Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/imagination: Add FWIF headers

Changes since v8:
- Corrected license identifiers

Changes since v7:
- Add padding to struct rogue_fwif_ccb_ctl to place read and write offsets
in different cache lines

Changes since v5:
- Split up header commit due to size
- Add BRN 71242 to device info

Changes since v4:
- Add FW header device info

Signed-off-by: Sarah Walker <sarah.walker@imgtec.com>
Signed-off-by: Donald Robson <donald.robson@imgtec.com>
Acked-by: Maxime Ripard <mripard@kernel.org>
Link: https://lore.kernel.org/r/aa681533a02bd2d46af17a6a6010f4d6048fbb0a.1700668843.git.donald.robson@imgtec.com
Signed-off-by: Maxime Ripard <mripard@kernel.org>

authored by

Sarah Walker and committed by
Maxime Ripard
a26f067f 7900e004

+3832
+2188
drivers/gpu/drm/imagination/pvr_rogue_fwif.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_ROGUE_FWIF_H 5 + #define PVR_ROGUE_FWIF_H 6 + 7 + #include <linux/bits.h> 8 + #include <linux/build_bug.h> 9 + #include <linux/compiler.h> 10 + #include <linux/kernel.h> 11 + #include <linux/types.h> 12 + 13 + #include "pvr_rogue_defs.h" 14 + #include "pvr_rogue_fwif_common.h" 15 + #include "pvr_rogue_fwif_shared.h" 16 + 17 + /* 18 + **************************************************************************** 19 + * Logging type 20 + **************************************************************************** 21 + */ 22 + #define ROGUE_FWIF_LOG_TYPE_NONE 0x00000000U 23 + #define ROGUE_FWIF_LOG_TYPE_TRACE 0x00000001U 24 + #define ROGUE_FWIF_LOG_TYPE_GROUP_MAIN 0x00000002U 25 + #define ROGUE_FWIF_LOG_TYPE_GROUP_MTS 0x00000004U 26 + #define ROGUE_FWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U 27 + #define ROGUE_FWIF_LOG_TYPE_GROUP_CSW 0x00000010U 28 + #define ROGUE_FWIF_LOG_TYPE_GROUP_BIF 0x00000020U 29 + #define ROGUE_FWIF_LOG_TYPE_GROUP_PM 0x00000040U 30 + #define ROGUE_FWIF_LOG_TYPE_GROUP_RTD 0x00000080U 31 + #define ROGUE_FWIF_LOG_TYPE_GROUP_SPM 0x00000100U 32 + #define ROGUE_FWIF_LOG_TYPE_GROUP_POW 0x00000200U 33 + #define ROGUE_FWIF_LOG_TYPE_GROUP_HWR 0x00000400U 34 + #define ROGUE_FWIF_LOG_TYPE_GROUP_HWP 0x00000800U 35 + #define ROGUE_FWIF_LOG_TYPE_GROUP_RPM 0x00001000U 36 + #define ROGUE_FWIF_LOG_TYPE_GROUP_DMA 0x00002000U 37 + #define ROGUE_FWIF_LOG_TYPE_GROUP_MISC 0x00004000U 38 + #define ROGUE_FWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U 39 + #define ROGUE_FWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU 40 + #define ROGUE_FWIF_LOG_TYPE_MASK 0x80007FFFU 41 + 42 + /* String used in pvrdebug -h output */ 43 + #define ROGUE_FWIF_LOG_GROUPS_STRING_LIST \ 44 + "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" 45 + 46 + /* Table entry to map log group strings to log type value */ 47 + struct rogue_fwif_log_group_map_entry { 48 + const char *log_group_name; 49 + u32 log_group_type; 50 + }; 51 + 52 + /* 53 + **************************************************************************** 54 + * ROGUE FW signature checks 55 + **************************************************************************** 56 + */ 57 + #define ROGUE_FW_SIG_BUFFER_SIZE_MIN (8192) 58 + 59 + #define ROGUE_FWIF_TIMEDIFF_ID ((0x1UL << 28) | ROGUE_CR_TIMER) 60 + 61 + /* 62 + **************************************************************************** 63 + * Trace Buffer 64 + **************************************************************************** 65 + */ 66 + 67 + /* Default size of ROGUE_FWIF_TRACEBUF_SPACE in DWords */ 68 + #define ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U 69 + #define ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE 200U 70 + #define ROGUE_FW_THREAD_NUM 1U 71 + #define ROGUE_FW_THREAD_MAX 2U 72 + 73 + #define ROGUE_FW_POLL_TYPE_SET 0x80000000U 74 + 75 + struct rogue_fwif_file_info_buf { 76 + char path[ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE]; 77 + char info[ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE]; 78 + u32 line_num; 79 + u32 padding; 80 + } __aligned(8); 81 + 82 + struct rogue_fwif_tracebuf_space { 83 + u32 trace_pointer; 84 + 85 + u32 trace_buffer_fw_addr; 86 + 87 + /* To be used by host when reading from trace buffer */ 88 + u32 *trace_buffer; 89 + 90 + struct rogue_fwif_file_info_buf assert_buf; 91 + } __aligned(8); 92 + 93 + /* Total number of FW fault logs stored */ 94 + #define ROGUE_FWIF_FWFAULTINFO_MAX (8U) 95 + 96 + struct rogue_fw_fault_info { 97 + aligned_u64 cr_timer; 98 + aligned_u64 os_timer; 99 + 100 + u32 data __aligned(8); 101 + u32 reserved; 102 + struct rogue_fwif_file_info_buf fault_buf; 103 + } __aligned(8); 104 + 105 + enum rogue_fwif_pow_state { 106 + ROGUE_FWIF_POW_OFF, /* idle and ready to full power down */ 107 + ROGUE_FWIF_POW_ON, /* running HW commands */ 108 + ROGUE_FWIF_POW_FORCED_IDLE, /* forced idle */ 109 + ROGUE_FWIF_POW_IDLE, /* idle waiting for host handshake */ 110 + }; 111 + 112 + /* Firmware HWR states */ 113 + /* The HW state is ok or locked up */ 114 + #define ROGUE_FWIF_HWR_HARDWARE_OK BIT(0) 115 + /* Tells if a HWR reset is in progress */ 116 + #define ROGUE_FWIF_HWR_RESET_IN_PROGRESS BIT(1) 117 + /* A DM unrelated lockup has been detected */ 118 + #define ROGUE_FWIF_HWR_GENERAL_LOCKUP BIT(3) 119 + /* At least one DM is running without being close to a lockup */ 120 + #define ROGUE_FWIF_HWR_DM_RUNNING_OK BIT(4) 121 + /* At least one DM is close to lockup */ 122 + #define ROGUE_FWIF_HWR_DM_STALLING BIT(5) 123 + /* The FW has faulted and needs to restart */ 124 + #define ROGUE_FWIF_HWR_FW_FAULT BIT(6) 125 + /* The FW has requested the host to restart it */ 126 + #define ROGUE_FWIF_HWR_RESTART_REQUESTED BIT(7) 127 + 128 + #define ROGUE_FWIF_PHR_STATE_SHIFT (8U) 129 + /* The FW has requested the host to restart it, per PHR configuration */ 130 + #define ROGUE_FWIF_PHR_RESTART_REQUESTED ((1) << ROGUE_FWIF_PHR_STATE_SHIFT) 131 + /* A PHR triggered GPU reset has just finished */ 132 + #define ROGUE_FWIF_PHR_RESTART_FINISHED ((2) << ROGUE_FWIF_PHR_STATE_SHIFT) 133 + #define ROGUE_FWIF_PHR_RESTART_MASK \ 134 + (ROGUE_FWIF_PHR_RESTART_REQUESTED | ROGUE_FWIF_PHR_RESTART_FINISHED) 135 + 136 + #define ROGUE_FWIF_PHR_MODE_OFF (0UL) 137 + #define ROGUE_FWIF_PHR_MODE_RD_RESET (1UL) 138 + #define ROGUE_FWIF_PHR_MODE_FULL_RESET (2UL) 139 + 140 + /* Firmware per-DM HWR states */ 141 + /* DM is working if all flags are cleared */ 142 + #define ROGUE_FWIF_DM_STATE_WORKING (0) 143 + /* DM is idle and ready for HWR */ 144 + #define ROGUE_FWIF_DM_STATE_READY_FOR_HWR BIT(0) 145 + /* DM need to skip to next cmd before resuming processing */ 146 + #define ROGUE_FWIF_DM_STATE_NEEDS_SKIP BIT(2) 147 + /* DM need partial render cleanup before resuming processing */ 148 + #define ROGUE_FWIF_DM_STATE_NEEDS_PR_CLEANUP BIT(3) 149 + /* DM need to increment Recovery Count once fully recovered */ 150 + #define ROGUE_FWIF_DM_STATE_NEEDS_TRACE_CLEAR BIT(4) 151 + /* DM was identified as locking up and causing HWR */ 152 + #define ROGUE_FWIF_DM_STATE_GUILTY_LOCKUP BIT(5) 153 + /* DM was innocently affected by another lockup which caused HWR */ 154 + #define ROGUE_FWIF_DM_STATE_INNOCENT_LOCKUP BIT(6) 155 + /* DM was identified as over-running and causing HWR */ 156 + #define ROGUE_FWIF_DM_STATE_GUILTY_OVERRUNING BIT(7) 157 + /* DM was innocently affected by another DM over-running which caused HWR */ 158 + #define ROGUE_FWIF_DM_STATE_INNOCENT_OVERRUNING BIT(8) 159 + /* DM was forced into HWR as it delayed more important workloads */ 160 + #define ROGUE_FWIF_DM_STATE_HARD_CONTEXT_SWITCH BIT(9) 161 + /* DM was forced into HWR due to an uncorrected GPU ECC error */ 162 + #define ROGUE_FWIF_DM_STATE_GPU_ECC_HWR BIT(10) 163 + 164 + /* Firmware's connection state */ 165 + enum rogue_fwif_connection_fw_state { 166 + /* Firmware is offline */ 167 + ROGUE_FW_CONNECTION_FW_OFFLINE = 0, 168 + /* Firmware is initialised */ 169 + ROGUE_FW_CONNECTION_FW_READY, 170 + /* Firmware connection is fully established */ 171 + ROGUE_FW_CONNECTION_FW_ACTIVE, 172 + /* Firmware is clearing up connection data*/ 173 + ROGUE_FW_CONNECTION_FW_OFFLOADING, 174 + ROGUE_FW_CONNECTION_FW_STATE_COUNT 175 + }; 176 + 177 + /* OS' connection state */ 178 + enum rogue_fwif_connection_os_state { 179 + /* OS is offline */ 180 + ROGUE_FW_CONNECTION_OS_OFFLINE = 0, 181 + /* OS's KM driver is setup and waiting */ 182 + ROGUE_FW_CONNECTION_OS_READY, 183 + /* OS connection is fully established */ 184 + ROGUE_FW_CONNECTION_OS_ACTIVE, 185 + ROGUE_FW_CONNECTION_OS_STATE_COUNT 186 + }; 187 + 188 + struct rogue_fwif_os_runtime_flags { 189 + unsigned int os_state : 3; 190 + unsigned int fl_ok : 1; 191 + unsigned int fl_grow_pending : 1; 192 + unsigned int isolated_os : 1; 193 + unsigned int reserved : 26; 194 + }; 195 + 196 + #define PVR_SLR_LOG_ENTRIES 10 197 + /* MAX_CLIENT_CCB_NAME not visible to this header */ 198 + #define PVR_SLR_LOG_STRLEN 30 199 + 200 + struct rogue_fwif_slr_entry { 201 + aligned_u64 timestamp; 202 + u32 fw_ctx_addr; 203 + u32 num_ufos; 204 + char ccb_name[PVR_SLR_LOG_STRLEN]; 205 + char padding[2]; 206 + } __aligned(8); 207 + 208 + #define MAX_THREAD_NUM 2 209 + 210 + /* firmware trace control data */ 211 + struct rogue_fwif_tracebuf { 212 + u32 log_type; 213 + struct rogue_fwif_tracebuf_space tracebuf[MAX_THREAD_NUM]; 214 + /* 215 + * Member initialised only when sTraceBuf is actually allocated (in 216 + * ROGUETraceBufferInitOnDemandResources) 217 + */ 218 + u32 tracebuf_size_in_dwords; 219 + /* Compatibility and other flags */ 220 + u32 tracebuf_flags; 221 + } __aligned(8); 222 + 223 + /* firmware system data shared with the Host driver */ 224 + struct rogue_fwif_sysdata { 225 + /* Configuration flags from host */ 226 + u32 config_flags; 227 + /* Extended configuration flags from host */ 228 + u32 config_flags_ext; 229 + enum rogue_fwif_pow_state pow_state; 230 + u32 hw_perf_ridx; 231 + u32 hw_perf_widx; 232 + u32 hw_perf_wrap_count; 233 + /* Constant after setup, needed in FW */ 234 + u32 hw_perf_size; 235 + /* The number of times the FW drops a packet due to buffer full */ 236 + u32 hw_perf_drop_count; 237 + 238 + /* 239 + * ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid 240 + * when FW is built with ROGUE_HWPERF_UTILIZATION & 241 + * ROGUE_HWPERF_DROP_TRACKING defined in rogue_fw_hwperf.c 242 + */ 243 + /* Buffer utilisation, high watermark of bytes in use */ 244 + u32 hw_perf_ut; 245 + /* The ordinal of the first packet the FW dropped */ 246 + u32 first_drop_ordinal; 247 + /* The ordinal of the last packet the FW dropped */ 248 + u32 last_drop_ordinal; 249 + /* State flags for each Operating System mirrored from Fw coremem */ 250 + struct rogue_fwif_os_runtime_flags 251 + os_runtime_flags_mirror[ROGUE_FW_MAX_NUM_OS]; 252 + 253 + struct rogue_fw_fault_info fault_info[ROGUE_FWIF_FWFAULTINFO_MAX]; 254 + u32 fw_faults; 255 + u32 cr_poll_addr[MAX_THREAD_NUM]; 256 + u32 cr_poll_mask[MAX_THREAD_NUM]; 257 + u32 cr_poll_count[MAX_THREAD_NUM]; 258 + aligned_u64 start_idle_time; 259 + 260 + #if defined(SUPPORT_ROGUE_FW_STATS_FRAMEWORK) 261 + # define ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE (8) 262 + # define ROGUE_FWIF_STATS_FRAMEWORK_MAX \ 263 + (2048 * ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE) 264 + u32 fw_stats_buf[ROGUE_FWIF_STATS_FRAMEWORK_MAX] __aligned(8); 265 + #endif 266 + u32 hwr_state_flags; 267 + u32 hwr_recovery_flags[PVR_FWIF_DM_MAX]; 268 + /* Compatibility and other flags */ 269 + u32 fw_sys_data_flags; 270 + /* Identify whether MC config is P-P or P-S */ 271 + u32 mc_config; 272 + } __aligned(8); 273 + 274 + /* per-os firmware shared data */ 275 + struct rogue_fwif_osdata { 276 + /* Configuration flags from an OS */ 277 + u32 fw_os_config_flags; 278 + /* Markers to signal that the host should perform a full sync check */ 279 + u32 fw_sync_check_mark; 280 + u32 host_sync_check_mark; 281 + 282 + u32 forced_updates_requested; 283 + u8 slr_log_wp; 284 + struct rogue_fwif_slr_entry slr_log_first; 285 + struct rogue_fwif_slr_entry slr_log[PVR_SLR_LOG_ENTRIES]; 286 + aligned_u64 last_forced_update_time; 287 + 288 + /* Interrupt count from Threads > */ 289 + u32 interrupt_count[MAX_THREAD_NUM]; 290 + u32 kccb_cmds_executed; 291 + u32 power_sync_fw_addr; 292 + /* Compatibility and other flags */ 293 + u32 fw_os_data_flags; 294 + u32 padding; 295 + } __aligned(8); 296 + 297 + /* Firmware trace time-stamp field breakup */ 298 + 299 + /* ROGUE_CR_TIMER register read (48 bits) value*/ 300 + #define ROGUE_FWT_TIMESTAMP_TIME_SHIFT (0U) 301 + #define ROGUE_FWT_TIMESTAMP_TIME_CLRMSK (0xFFFF000000000000ull) 302 + 303 + /* Extra debug-info (16 bits) */ 304 + #define ROGUE_FWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) 305 + #define ROGUE_FWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK 306 + 307 + /* Debug-info sub-fields */ 308 + /* 309 + * Bit 0: ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from ROGUE_CR_EVENT_STATUS 310 + * register 311 + */ 312 + #define ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) 313 + #define ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SET \ 314 + BIT(ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) 315 + 316 + /* Bit 1: ROGUE_CR_BIF_MMU_ENTRY_PENDING bit from ROGUE_CR_BIF_MMU_ENTRY register */ 317 + #define ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) 318 + #define ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET \ 319 + BIT(ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) 320 + 321 + /* Bit 2: ROGUE_CR_SLAVE_EVENT register is non-zero */ 322 + #define ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) 323 + #define ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SET \ 324 + BIT(ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) 325 + 326 + /* Bit 3-15: Unused bits */ 327 + 328 + #define ROGUE_FWT_DEBUG_INFO_STR_MAXLEN 64 329 + #define ROGUE_FWT_DEBUG_INFO_STR_PREPEND " (debug info: " 330 + #define ROGUE_FWT_DEBUG_INFO_STR_APPEND ")" 331 + 332 + /* 333 + ****************************************************************************** 334 + * HWR Data 335 + ****************************************************************************** 336 + */ 337 + enum rogue_hwrtype { 338 + ROGUE_HWRTYPE_UNKNOWNFAILURE = 0, 339 + ROGUE_HWRTYPE_OVERRUN = 1, 340 + ROGUE_HWRTYPE_POLLFAILURE = 2, 341 + ROGUE_HWRTYPE_BIF0FAULT = 3, 342 + ROGUE_HWRTYPE_BIF1FAULT = 4, 343 + ROGUE_HWRTYPE_TEXASBIF0FAULT = 5, 344 + ROGUE_HWRTYPE_MMUFAULT = 6, 345 + ROGUE_HWRTYPE_MMUMETAFAULT = 7, 346 + ROGUE_HWRTYPE_MIPSTLBFAULT = 8, 347 + ROGUE_HWRTYPE_ECCFAULT = 9, 348 + ROGUE_HWRTYPE_MMURISCVFAULT = 10, 349 + }; 350 + 351 + #define ROGUE_FWIF_HWRTYPE_BIF_BANK_GET(hwr_type) \ 352 + (((hwr_type) == ROGUE_HWRTYPE_BIF0FAULT) ? 0 : 1) 353 + 354 + #define ROGUE_FWIF_HWRTYPE_PAGE_FAULT_GET(hwr_type) \ 355 + ((((hwr_type) == ROGUE_HWRTYPE_BIF0FAULT) || \ 356 + ((hwr_type) == ROGUE_HWRTYPE_BIF1FAULT) || \ 357 + ((hwr_type) == ROGUE_HWRTYPE_TEXASBIF0FAULT) || \ 358 + ((hwr_type) == ROGUE_HWRTYPE_MMUFAULT) || \ 359 + ((hwr_type) == ROGUE_HWRTYPE_MMUMETAFAULT) || \ 360 + ((hwr_type) == ROGUE_HWRTYPE_MIPSTLBFAULT) || \ 361 + ((hwr_type) == ROGUE_HWRTYPE_MMURISCVFAULT)) \ 362 + ? true \ 363 + : false) 364 + 365 + struct rogue_bifinfo { 366 + aligned_u64 bif_req_status; 367 + aligned_u64 bif_mmu_status; 368 + aligned_u64 pc_address; /* phys address of the page catalogue */ 369 + aligned_u64 reserved; 370 + }; 371 + 372 + struct rogue_eccinfo { 373 + u32 fault_gpu; 374 + }; 375 + 376 + struct rogue_mmuinfo { 377 + aligned_u64 mmu_status[2]; 378 + aligned_u64 pc_address; /* phys address of the page catalogue */ 379 + aligned_u64 reserved; 380 + }; 381 + 382 + struct rogue_pollinfo { 383 + u32 thread_num; 384 + u32 cr_poll_addr; 385 + u32 cr_poll_mask; 386 + u32 cr_poll_last_value; 387 + aligned_u64 reserved; 388 + } __aligned(8); 389 + 390 + struct rogue_tlbinfo { 391 + u32 bad_addr; 392 + u32 entry_lo; 393 + }; 394 + 395 + struct rogue_hwrinfo { 396 + union { 397 + struct rogue_bifinfo bif_info; 398 + struct rogue_mmuinfo mmu_info; 399 + struct rogue_pollinfo poll_info; 400 + struct rogue_tlbinfo tlb_info; 401 + struct rogue_eccinfo ecc_info; 402 + } hwr_data; 403 + 404 + aligned_u64 cr_timer; 405 + aligned_u64 os_timer; 406 + u32 frame_num; 407 + u32 pid; 408 + u32 active_hwrt_data; 409 + u32 hwr_number; 410 + u32 event_status; 411 + u32 hwr_recovery_flags; 412 + enum rogue_hwrtype hwr_type; 413 + u32 dm; 414 + u32 core_id; 415 + aligned_u64 cr_time_of_kick; 416 + aligned_u64 cr_time_hw_reset_start; 417 + aligned_u64 cr_time_hw_reset_finish; 418 + aligned_u64 cr_time_freelist_ready; 419 + aligned_u64 reserved[2]; 420 + } __aligned(8); 421 + 422 + /* Number of first HWR logs recorded (never overwritten by newer logs) */ 423 + #define ROGUE_FWIF_HWINFO_MAX_FIRST 8U 424 + /* Number of latest HWR logs (older logs are overwritten by newer logs) */ 425 + #define ROGUE_FWIF_HWINFO_MAX_LAST 8U 426 + /* Total number of HWR logs stored in a buffer */ 427 + #define ROGUE_FWIF_HWINFO_MAX \ 428 + (ROGUE_FWIF_HWINFO_MAX_FIRST + ROGUE_FWIF_HWINFO_MAX_LAST) 429 + /* Index of the last log in the HWR log buffer */ 430 + #define ROGUE_FWIF_HWINFO_LAST_INDEX (ROGUE_FWIF_HWINFO_MAX - 1U) 431 + 432 + struct rogue_fwif_hwrinfobuf { 433 + struct rogue_hwrinfo hwr_info[ROGUE_FWIF_HWINFO_MAX]; 434 + u32 hwr_counter; 435 + u32 write_index; 436 + u32 dd_req_count; 437 + u32 hwr_info_buf_flags; /* Compatibility and other flags */ 438 + u32 hwr_dm_locked_up_count[PVR_FWIF_DM_MAX]; 439 + u32 hwr_dm_overran_count[PVR_FWIF_DM_MAX]; 440 + u32 hwr_dm_recovered_count[PVR_FWIF_DM_MAX]; 441 + u32 hwr_dm_false_detect_count[PVR_FWIF_DM_MAX]; 442 + } __aligned(8); 443 + 444 + #define ROGUE_FWIF_CTXSWITCH_PROFILE_FAST_EN (1) 445 + #define ROGUE_FWIF_CTXSWITCH_PROFILE_MEDIUM_EN (2) 446 + #define ROGUE_FWIF_CTXSWITCH_PROFILE_SLOW_EN (3) 447 + #define ROGUE_FWIF_CTXSWITCH_PROFILE_NODELAY_EN (4) 448 + 449 + #define ROGUE_FWIF_CDM_ARBITRATION_TASK_DEMAND_EN (1) 450 + #define ROGUE_FWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (2) 451 + 452 + #define ROGUE_FWIF_ISP_SCHEDMODE_VER1_IPP (1) 453 + #define ROGUE_FWIF_ISP_SCHEDMODE_VER2_ISP (2) 454 + /* 455 + ****************************************************************************** 456 + * ROGUE firmware Init Config Data 457 + ****************************************************************************** 458 + */ 459 + 460 + /* Flag definitions affecting the firmware globally */ 461 + #define ROGUE_FWIF_INICFG_CTXSWITCH_MODE_RAND BIT(0) 462 + #define ROGUE_FWIF_INICFG_CTXSWITCH_SRESET_EN BIT(1) 463 + #define ROGUE_FWIF_INICFG_HWPERF_EN BIT(2) 464 + #define ROGUE_FWIF_INICFG_DM_KILL_MODE_RAND_EN BIT(3) 465 + #define ROGUE_FWIF_INICFG_POW_RASCALDUST BIT(4) 466 + /* Bit 5 is reserved. */ 467 + #define ROGUE_FWIF_INICFG_FBCDC_V3_1_EN BIT(6) 468 + #define ROGUE_FWIF_INICFG_CHECK_MLIST_EN BIT(7) 469 + #define ROGUE_FWIF_INICFG_DISABLE_CLKGATING_EN BIT(8) 470 + /* Bit 9 is reserved. */ 471 + /* Bit 10 is reserved. */ 472 + /* Bit 11 is reserved. */ 473 + #define ROGUE_FWIF_INICFG_REGCONFIG_EN BIT(12) 474 + #define ROGUE_FWIF_INICFG_ASSERT_ON_OUTOFMEMORY BIT(13) 475 + #define ROGUE_FWIF_INICFG_HWP_DISABLE_FILTER BIT(14) 476 + /* Bit 15 is reserved. */ 477 + #define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) 478 + #define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_FAST \ 479 + (ROGUE_FWIF_CTXSWITCH_PROFILE_FAST_EN \ 480 + << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) 481 + #define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM \ 482 + (ROGUE_FWIF_CTXSWITCH_PROFILE_MEDIUM_EN \ 483 + << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) 484 + #define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SLOW \ 485 + (ROGUE_FWIF_CTXSWITCH_PROFILE_SLOW_EN \ 486 + << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) 487 + #define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_NODELAY \ 488 + (ROGUE_FWIF_CTXSWITCH_PROFILE_NODELAY_EN \ 489 + << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) 490 + #define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_MASK \ 491 + (7 << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) 492 + #define ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP BIT(19) 493 + #define ROGUE_FWIF_INICFG_ASSERT_ON_HWR_TRIGGER BIT(20) 494 + #define ROGUE_FWIF_INICFG_FABRIC_COHERENCY_ENABLED BIT(21) 495 + #define ROGUE_FWIF_INICFG_VALIDATE_IRQ BIT(22) 496 + #define ROGUE_FWIF_INICFG_DISABLE_PDP_EN BIT(23) 497 + #define ROGUE_FWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN BIT(24) 498 + #define ROGUE_FWIF_INICFG_WORKEST BIT(25) 499 + #define ROGUE_FWIF_INICFG_PDVFS BIT(26) 500 + #define ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT (27) 501 + #define ROGUE_FWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND \ 502 + (ROGUE_FWIF_CDM_ARBITRATION_TASK_DEMAND_EN \ 503 + << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT) 504 + #define ROGUE_FWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN \ 505 + (ROGUE_FWIF_CDM_ARBITRATION_ROUND_ROBIN_EN \ 506 + << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT) 507 + #define ROGUE_FWIF_INICFG_CDM_ARBITRATION_MASK \ 508 + (3 << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT) 509 + #define ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT (29) 510 + #define ROGUE_FWIF_INICFG_ISPSCHEDMODE_NONE (0) 511 + #define ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER1_IPP \ 512 + (ROGUE_FWIF_ISP_SCHEDMODE_VER1_IPP \ 513 + << ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT) 514 + #define ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER2_ISP \ 515 + (ROGUE_FWIF_ISP_SCHEDMODE_VER2_ISP \ 516 + << ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT) 517 + #define ROGUE_FWIF_INICFG_ISPSCHEDMODE_MASK \ 518 + (ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER1_IPP | \ 519 + ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER2_ISP) 520 + #define ROGUE_FWIF_INICFG_VALIDATE_SOCUSC_TIMER BIT(31) 521 + 522 + #define ROGUE_FWIF_INICFG_ALL (0xFFFFFFFFU) 523 + 524 + /* Extended Flag definitions affecting the firmware globally */ 525 + #define ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_SHIFT (0) 526 + /* [7] YUV10 override 527 + * [6:4] Quality 528 + * [3] Quality enable 529 + * [2:1] Compression scheme 530 + * [0] Lossy group 531 + */ 532 + #define ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_MASK (0xFF) 533 + #define ROGUE_FWIF_INICFG_EXT_ALL (ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_MASK) 534 + 535 + /* Flag definitions affecting only workloads submitted by a particular OS */ 536 + #define ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN BIT(0) 537 + #define ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN BIT(1) 538 + #define ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN BIT(2) 539 + #define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN BIT(3) 540 + 541 + #define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_TDM BIT(4) 542 + #define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_GEOM BIT(5) 543 + #define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_FRAG BIT(6) 544 + #define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_CDM BIT(7) 545 + 546 + #define ROGUE_FWIF_INICFG_OS_ALL (0xFF) 547 + 548 + #define ROGUE_FWIF_INICFG_OS_CTXSWITCH_DM_ALL \ 549 + (ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN | \ 550 + ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \ 551 + ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN | \ 552 + ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN) 553 + 554 + #define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CLRMSK \ 555 + ~(ROGUE_FWIF_INICFG_OS_CTXSWITCH_DM_ALL) 556 + 557 + #define ROGUE_FWIF_FILTCFG_TRUNCATE_HALF BIT(3) 558 + #define ROGUE_FWIF_FILTCFG_TRUNCATE_INT BIT(2) 559 + #define ROGUE_FWIF_FILTCFG_NEW_FILTER_MODE BIT(1) 560 + 561 + enum rogue_activepm_conf { 562 + ROGUE_ACTIVEPM_FORCE_OFF = 0, 563 + ROGUE_ACTIVEPM_FORCE_ON = 1, 564 + ROGUE_ACTIVEPM_DEFAULT = 2 565 + }; 566 + 567 + enum rogue_rd_power_island_conf { 568 + ROGUE_RD_POWER_ISLAND_FORCE_OFF = 0, 569 + ROGUE_RD_POWER_ISLAND_FORCE_ON = 1, 570 + ROGUE_RD_POWER_ISLAND_DEFAULT = 2 571 + }; 572 + 573 + struct rogue_fw_register_list { 574 + /* Register number */ 575 + u16 reg_num; 576 + /* Indirect register number (or 0 if not used) */ 577 + u16 indirect_reg_num; 578 + /* Start value for indirect register */ 579 + u16 indirect_start_val; 580 + /* End value for indirect register */ 581 + u16 indirect_end_val; 582 + }; 583 + 584 + struct rogue_fwif_dllist_node { 585 + u32 p; 586 + u32 n; 587 + }; 588 + 589 + /* 590 + * This number is used to represent an invalid page catalogue physical address 591 + */ 592 + #define ROGUE_FWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU 593 + 594 + /* This number is used to represent unallocated page catalog base register */ 595 + #define ROGUE_FW_BIF_INVALID_PCSET 0xFFFFFFFFU 596 + 597 + /* Firmware memory context. */ 598 + struct rogue_fwif_fwmemcontext { 599 + /* device physical address of context's page catalogue */ 600 + aligned_u64 pc_dev_paddr; 601 + /* 602 + * associated page catalog base register (ROGUE_FW_BIF_INVALID_PCSET == 603 + * unallocated) 604 + */ 605 + u32 page_cat_base_reg_set; 606 + /* breakpoint address */ 607 + u32 breakpoint_addr; 608 + /* breakpoint handler address */ 609 + u32 bp_handler_addr; 610 + /* DM and enable control for BP */ 611 + u32 breakpoint_ctl; 612 + /* Compatibility and other flags */ 613 + u32 fw_mem_ctx_flags; 614 + u32 padding; 615 + } __aligned(8); 616 + 617 + /* 618 + * FW context state flags 619 + */ 620 + #define ROGUE_FWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U) 621 + #define ROGUE_FWIF_CONTEXT_FLAGS_MC_NEED_RESUME_MASKFULL (0x000000FFU) 622 + #define ROGUE_FWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000100U) 623 + #define ROGUE_FWIF_CONTEXT_FLAGS_LAST_KICK_SECURE (0x00000200U) 624 + 625 + #define ROGUE_NUM_GEOM_CORES_MAX 4 626 + 627 + /* 628 + * FW-accessible TA state which must be written out to memory on context store 629 + */ 630 + struct rogue_fwif_geom_ctx_state_per_geom { 631 + /* To store in mid-TA */ 632 + aligned_u64 geom_reg_vdm_call_stack_pointer; 633 + /* Initial value (in case is 'lost' due to a lock-up */ 634 + aligned_u64 geom_reg_vdm_call_stack_pointer_init; 635 + u32 geom_reg_vbs_so_prim[4]; 636 + u16 geom_current_idx; 637 + u16 padding[3]; 638 + } __aligned(8); 639 + 640 + struct rogue_fwif_geom_ctx_state { 641 + /* FW-accessible TA state which must be written out to memory on context store */ 642 + struct rogue_fwif_geom_ctx_state_per_geom geom_core[ROGUE_NUM_GEOM_CORES_MAX]; 643 + } __aligned(8); 644 + 645 + /* 646 + * FW-accessible ISP state which must be written out to memory on context store 647 + */ 648 + struct rogue_fwif_frag_ctx_state { 649 + u32 frag_reg_pm_deallocated_mask_status; 650 + u32 frag_reg_dm_pds_mtilefree_status; 651 + /* Compatibility and other flags */ 652 + u32 ctx_state_flags; 653 + /* 654 + * frag_reg_isp_store should be the last element of the structure as this 655 + * is an array whose size is determined at runtime after detecting the 656 + * ROGUE core 657 + */ 658 + u32 frag_reg_isp_store[]; 659 + } __aligned(8); 660 + 661 + #define ROGUE_FWIF_CTX_USING_BUFFER_A (0) 662 + #define ROGUE_FWIF_CTX_USING_BUFFER_B (1U) 663 + 664 + struct rogue_fwif_compute_ctx_state { 665 + u32 ctx_state_flags; /* Target buffer and other flags */ 666 + }; 667 + 668 + struct rogue_fwif_fwcommoncontext { 669 + /* CCB details for this firmware context */ 670 + u32 ccbctl_fw_addr; /* CCB control */ 671 + u32 ccb_fw_addr; /* CCB base */ 672 + struct rogue_fwif_dma_addr ccb_meta_dma_addr; 673 + 674 + /* Context suspend state */ 675 + /* geom/frag context suspend state, read/written by FW */ 676 + u32 context_state_addr __aligned(8); 677 + 678 + /* Flags e.g. for context switching */ 679 + u32 fw_com_ctx_flags; 680 + u32 priority; 681 + u32 priority_seq_num; 682 + 683 + /* Framework state */ 684 + /* Register updates for Framework */ 685 + u32 rf_cmd_addr __aligned(8); 686 + 687 + /* Statistic updates waiting to be passed back to the host... */ 688 + /* True when some stats are pending */ 689 + bool stats_pending __aligned(4); 690 + /* Number of stores on this context since last update */ 691 + s32 stats_num_stores; 692 + /* Number of OOMs on this context since last update */ 693 + s32 stats_num_out_of_memory; 694 + /* Number of PRs on this context since last update */ 695 + s32 stats_num_partial_renders; 696 + /* Data Master type */ 697 + u32 dm; 698 + /* Device Virtual Address of the signal the context is waiting on */ 699 + aligned_u64 wait_signal_address; 700 + /* List entry for the wait-signal list */ 701 + struct rogue_fwif_dllist_node wait_signal_node __aligned(8); 702 + /* List entry for the buffer stalled list */ 703 + struct rogue_fwif_dllist_node buf_stalled_node __aligned(8); 704 + /* Address of the circular buffer queue pointers */ 705 + aligned_u64 cbuf_queue_ctrl_addr; 706 + 707 + aligned_u64 robustness_address; 708 + /* Max HWR deadline limit in ms */ 709 + u32 max_deadline_ms; 710 + /* Following HWR circular buffer read-offset needs resetting */ 711 + bool read_offset_needs_reset; 712 + 713 + /* List entry for the waiting list */ 714 + struct rogue_fwif_dllist_node waiting_node __aligned(8); 715 + /* List entry for the run list */ 716 + struct rogue_fwif_dllist_node run_node __aligned(8); 717 + /* UFO that last failed (or NULL) */ 718 + struct rogue_fwif_ufo last_failed_ufo; 719 + 720 + /* Memory context */ 721 + u32 fw_mem_context_fw_addr; 722 + 723 + /* References to the host side originators */ 724 + /* the Server Common Context */ 725 + u32 server_common_context_id; 726 + /* associated process ID */ 727 + u32 pid; 728 + 729 + /* True when Geom DM OOM is not allowed */ 730 + bool geom_oom_disabled __aligned(4); 731 + } __aligned(8); 732 + 733 + /* Firmware render context. */ 734 + struct rogue_fwif_fwrendercontext { 735 + /* Geometry firmware context. */ 736 + struct rogue_fwif_fwcommoncontext geom_context; 737 + /* Fragment firmware context. */ 738 + struct rogue_fwif_fwcommoncontext frag_context; 739 + 740 + struct rogue_fwif_static_rendercontext_state static_render_context_state; 741 + 742 + /* Number of commands submitted to the WorkEst FW CCB */ 743 + u32 work_est_ccb_submitted; 744 + 745 + /* Compatibility and other flags */ 746 + u32 fw_render_ctx_flags; 747 + } __aligned(8); 748 + 749 + /* Firmware compute context. */ 750 + struct rogue_fwif_fwcomputecontext { 751 + /* Firmware context for the CDM */ 752 + struct rogue_fwif_fwcommoncontext cdm_context; 753 + 754 + struct rogue_fwif_static_computecontext_state 755 + static_compute_context_state; 756 + 757 + /* Number of commands submitted to the WorkEst FW CCB */ 758 + u32 work_est_ccb_submitted; 759 + 760 + /* Compatibility and other flags */ 761 + u32 compute_ctx_flags; 762 + 763 + u32 wgp_state; 764 + u32 wgp_checksum; 765 + u32 core_mask_a; 766 + u32 core_mask_b; 767 + } __aligned(8); 768 + 769 + /* Firmware TDM context. */ 770 + struct rogue_fwif_fwtdmcontext { 771 + /* Firmware context for the TDM */ 772 + struct rogue_fwif_fwcommoncontext tdm_context; 773 + 774 + /* Number of commands submitted to the WorkEst FW CCB */ 775 + u32 work_est_ccb_submitted; 776 + } __aligned(8); 777 + 778 + /* Firmware TQ3D context. */ 779 + struct rogue_fwif_fwtransfercontext { 780 + /* Firmware context for TQ3D. */ 781 + struct rogue_fwif_fwcommoncontext tq_context; 782 + } __aligned(8); 783 + 784 + /* 785 + ****************************************************************************** 786 + * Defines for CMD_TYPE corruption detection and forward compatibility check 787 + ****************************************************************************** 788 + */ 789 + 790 + /* 791 + * CMD_TYPE 32bit contains: 792 + * 31:16 Reserved for magic value to detect corruption (16 bits) 793 + * 15 Reserved for ROGUE_CCB_TYPE_TASK (1 bit) 794 + * 14:0 Bits available for CMD_TYPEs (15 bits) 795 + */ 796 + 797 + /* Magic value to detect corruption */ 798 + #define ROGUE_CMD_MAGIC_DWORD (0x2ABC) 799 + #define ROGUE_CMD_MAGIC_DWORD_MASK (0xFFFF0000U) 800 + #define ROGUE_CMD_MAGIC_DWORD_SHIFT (16U) 801 + #define ROGUE_CMD_MAGIC_DWORD_SHIFTED \ 802 + (ROGUE_CMD_MAGIC_DWORD << ROGUE_CMD_MAGIC_DWORD_SHIFT) 803 + 804 + /* Kernel CCB control for ROGUE */ 805 + struct rogue_fwif_ccb_ctl { 806 + /* write offset into array of commands (MUST be aligned to 16 bytes!) */ 807 + u32 write_offset; 808 + /* Padding to ensure read and write offsets are in separate cache lines. */ 809 + u8 padding[128 - sizeof(u32)]; 810 + /* read offset into array of commands */ 811 + u32 read_offset; 812 + /* Offset wrapping mask (Total capacity of the CCB - 1) */ 813 + u32 wrap_mask; 814 + /* size of each command in bytes */ 815 + u32 cmd_size; 816 + u32 padding2; 817 + } __aligned(8); 818 + 819 + /* Kernel CCB command structure for ROGUE */ 820 + 821 + #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */ 822 + #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */ 823 + #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */ 824 + 825 + /* 826 + * can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT 827 + * bit from BIF_CTRL reg 828 + */ 829 + #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) 830 + /* BIF_CTRL_INVAL_TLB1_EN */ 831 + #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_TLB \ 832 + (ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) 833 + /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ 834 + #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) 835 + 836 + /* indicates FW should interrupt the host */ 837 + #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) 838 + 839 + struct rogue_fwif_mmucachedata { 840 + u32 cache_flags; 841 + u32 mmu_cache_sync_fw_addr; 842 + u32 mmu_cache_sync_update_value; 843 + }; 844 + 845 + #define ROGUE_FWIF_BPDATA_FLAGS_ENABLE BIT(0) 846 + #define ROGUE_FWIF_BPDATA_FLAGS_WRITE BIT(1) 847 + #define ROGUE_FWIF_BPDATA_FLAGS_CTL BIT(2) 848 + #define ROGUE_FWIF_BPDATA_FLAGS_REGS BIT(3) 849 + 850 + struct rogue_fwif_bpdata { 851 + /* Memory context */ 852 + u32 fw_mem_context_fw_addr; 853 + /* Breakpoint address */ 854 + u32 bp_addr; 855 + /* Breakpoint handler */ 856 + u32 bp_handler_addr; 857 + /* Breakpoint control */ 858 + u32 bp_dm; 859 + u32 bp_data_flags; 860 + /* Number of temporary registers to overallocate */ 861 + u32 temp_regs; 862 + /* Number of shared registers to overallocate */ 863 + u32 shared_regs; 864 + /* DM associated with the breakpoint */ 865 + u32 dm; 866 + }; 867 + 868 + #define ROGUE_FWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS \ 869 + (ROGUE_FWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ 870 + 871 + struct rogue_fwif_kccb_cmd_kick_data { 872 + /* address of the firmware context */ 873 + u32 context_fw_addr; 874 + /* Client CCB woff update */ 875 + u32 client_woff_update; 876 + /* Client CCB wrap mask update after CCCB growth */ 877 + u32 client_wrap_mask_update; 878 + /* number of CleanupCtl pointers attached */ 879 + u32 num_cleanup_ctl; 880 + /* CleanupCtl structures associated with command */ 881 + u32 cleanup_ctl_fw_addr 882 + [ROGUE_FWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; 883 + /* 884 + * offset to the CmdHeader which houses the workload estimation kick 885 + * data. 886 + */ 887 + u32 work_est_cmd_header_offset; 888 + }; 889 + 890 + struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data { 891 + struct rogue_fwif_kccb_cmd_kick_data geom_cmd_kick_data; 892 + struct rogue_fwif_kccb_cmd_kick_data frag_cmd_kick_data; 893 + }; 894 + 895 + struct rogue_fwif_kccb_cmd_force_update_data { 896 + /* address of the firmware context */ 897 + u32 context_fw_addr; 898 + /* Client CCB fence offset */ 899 + u32 ccb_fence_offset; 900 + }; 901 + 902 + enum rogue_fwif_cleanup_type { 903 + /* FW common context cleanup */ 904 + ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT, 905 + /* FW HW RT data cleanup */ 906 + ROGUE_FWIF_CLEANUP_HWRTDATA, 907 + /* FW freelist cleanup */ 908 + ROGUE_FWIF_CLEANUP_FREELIST, 909 + /* FW ZS Buffer cleanup */ 910 + ROGUE_FWIF_CLEANUP_ZSBUFFER, 911 + }; 912 + 913 + struct rogue_fwif_cleanup_request { 914 + /* Cleanup type */ 915 + enum rogue_fwif_cleanup_type cleanup_type; 916 + union { 917 + /* FW common context to cleanup */ 918 + u32 context_fw_addr; 919 + /* HW RT to cleanup */ 920 + u32 hwrt_data_fw_addr; 921 + /* Freelist to cleanup */ 922 + u32 freelist_fw_addr; 923 + /* ZS Buffer to cleanup */ 924 + u32 zs_buffer_fw_addr; 925 + } cleanup_data; 926 + }; 927 + 928 + enum rogue_fwif_power_type { 929 + ROGUE_FWIF_POW_OFF_REQ = 1, 930 + ROGUE_FWIF_POW_FORCED_IDLE_REQ, 931 + ROGUE_FWIF_POW_NUM_UNITS_CHANGE, 932 + ROGUE_FWIF_POW_APM_LATENCY_CHANGE 933 + }; 934 + 935 + enum rogue_fwif_power_force_idle_type { 936 + ROGUE_FWIF_POWER_FORCE_IDLE = 1, 937 + ROGUE_FWIF_POWER_CANCEL_FORCED_IDLE, 938 + ROGUE_FWIF_POWER_HOST_TIMEOUT, 939 + }; 940 + 941 + struct rogue_fwif_power_request { 942 + /* Type of power request */ 943 + enum rogue_fwif_power_type pow_type; 944 + union { 945 + /* Number of active Dusts */ 946 + u32 num_of_dusts; 947 + /* If the operation is mandatory */ 948 + bool forced __aligned(4); 949 + /* 950 + * Type of Request. Consolidating Force Idle, Cancel Forced 951 + * Idle, Host Timeout 952 + */ 953 + enum rogue_fwif_power_force_idle_type pow_request_type; 954 + } power_req_data; 955 + }; 956 + 957 + struct rogue_fwif_slcflushinvaldata { 958 + /* Context to fence on (only useful when bDMContext == TRUE) */ 959 + u32 context_fw_addr; 960 + /* Invalidate the cache as well as flushing */ 961 + bool inval __aligned(4); 962 + /* The data to flush/invalidate belongs to a specific DM context */ 963 + bool dm_context __aligned(4); 964 + /* Optional address of range (only useful when bDMContext == FALSE) */ 965 + aligned_u64 address; 966 + /* Optional size of range (only useful when bDMContext == FALSE) */ 967 + aligned_u64 size; 968 + }; 969 + 970 + enum rogue_fwif_hwperf_update_config { 971 + ROGUE_FWIF_HWPERF_CTRL_TOGGLE = 0, 972 + ROGUE_FWIF_HWPERF_CTRL_SET = 1, 973 + ROGUE_FWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2 974 + }; 975 + 976 + struct rogue_fwif_hwperf_ctrl { 977 + enum rogue_fwif_hwperf_update_config opcode; /* Control operation code */ 978 + aligned_u64 mask; /* Mask of events to toggle */ 979 + }; 980 + 981 + struct rogue_fwif_hwperf_config_enable_blks { 982 + /* Number of ROGUE_HWPERF_CONFIG_MUX_CNTBLK in the array */ 983 + u32 num_blocks; 984 + /* Address of the ROGUE_HWPERF_CONFIG_MUX_CNTBLK array */ 985 + u32 block_configs_fw_addr; 986 + }; 987 + 988 + struct rogue_fwif_hwperf_config_da_blks { 989 + /* Number of ROGUE_HWPERF_CONFIG_CNTBLK in the array */ 990 + u32 num_blocks; 991 + /* Address of the ROGUE_HWPERF_CONFIG_CNTBLK array */ 992 + u32 block_configs_fw_addr; 993 + }; 994 + 995 + struct rogue_fwif_coreclkspeedchange_data { 996 + u32 new_clock_speed; /* New clock speed */ 997 + }; 998 + 999 + #define ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX 16 1000 + 1001 + struct rogue_fwif_hwperf_ctrl_blks { 1002 + bool enable; 1003 + /* Number of block IDs in the array */ 1004 + u32 num_blocks; 1005 + /* Array of ROGUE_HWPERF_CNTBLK_ID values */ 1006 + u16 block_ids[ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX]; 1007 + }; 1008 + 1009 + struct rogue_fwif_hwperf_select_custom_cntrs { 1010 + u16 custom_block; 1011 + u16 num_counters; 1012 + u32 custom_counter_ids_fw_addr; 1013 + }; 1014 + 1015 + struct rogue_fwif_zsbuffer_backing_data { 1016 + u32 zs_buffer_fw_addr; /* ZS-Buffer FW address */ 1017 + 1018 + bool done __aligned(4); /* action backing/unbacking succeeded */ 1019 + }; 1020 + 1021 + struct rogue_fwif_freelist_gs_data { 1022 + /* Freelist FW address */ 1023 + u32 freelist_fw_addr; 1024 + /* Amount of the Freelist change */ 1025 + u32 delta_pages; 1026 + /* New amount of pages on the freelist (including ready pages) */ 1027 + u32 new_pages; 1028 + /* Number of ready pages to be held in reserve until OOM */ 1029 + u32 ready_pages; 1030 + }; 1031 + 1032 + #define MAX_FREELISTS_SIZE 3 1033 + #define MAX_HW_GEOM_FRAG_CONTEXTS_SIZE 3 1034 + 1035 + #define ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT \ 1036 + (MAX_HW_GEOM_FRAG_CONTEXTS_SIZE * MAX_FREELISTS_SIZE * 2U) 1037 + #define ROGUE_FWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U 1038 + 1039 + struct rogue_fwif_freelists_reconstruction_data { 1040 + u32 freelist_count; 1041 + u32 freelist_ids[ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT]; 1042 + }; 1043 + 1044 + struct rogue_fwif_write_offset_update_data { 1045 + /* 1046 + * Context to that may need to be resumed following write offset update 1047 + */ 1048 + u32 context_fw_addr; 1049 + } __aligned(8); 1050 + 1051 + /* 1052 + ****************************************************************************** 1053 + * Proactive DVFS Structures 1054 + ****************************************************************************** 1055 + */ 1056 + #define NUM_OPP_VALUES 16 1057 + 1058 + struct pdvfs_opp { 1059 + u32 volt; /* V */ 1060 + u32 freq; /* Hz */ 1061 + } __aligned(8); 1062 + 1063 + struct rogue_fwif_pdvfs_opp { 1064 + struct pdvfs_opp opp_values[NUM_OPP_VALUES]; 1065 + u32 min_opp_point; 1066 + u32 max_opp_point; 1067 + } __aligned(8); 1068 + 1069 + struct rogue_fwif_pdvfs_max_freq_data { 1070 + u32 max_opp_point; 1071 + } __aligned(8); 1072 + 1073 + struct rogue_fwif_pdvfs_min_freq_data { 1074 + u32 min_opp_point; 1075 + } __aligned(8); 1076 + 1077 + /* 1078 + ****************************************************************************** 1079 + * Register configuration structures 1080 + ****************************************************************************** 1081 + */ 1082 + 1083 + #define ROGUE_FWIF_REG_CFG_MAX_SIZE 512 1084 + 1085 + enum rogue_fwif_regdata_cmd_type { 1086 + ROGUE_FWIF_REGCFG_CMD_ADD = 101, 1087 + ROGUE_FWIF_REGCFG_CMD_CLEAR = 102, 1088 + ROGUE_FWIF_REGCFG_CMD_ENABLE = 103, 1089 + ROGUE_FWIF_REGCFG_CMD_DISABLE = 104 1090 + }; 1091 + 1092 + enum rogue_fwif_reg_cfg_type { 1093 + /* Sidekick power event */ 1094 + ROGUE_FWIF_REG_CFG_TYPE_PWR_ON = 0, 1095 + /* Rascal / dust power event */ 1096 + ROGUE_FWIF_REG_CFG_TYPE_DUST_CHANGE, 1097 + /* Geometry kick */ 1098 + ROGUE_FWIF_REG_CFG_TYPE_GEOM, 1099 + /* Fragment kick */ 1100 + ROGUE_FWIF_REG_CFG_TYPE_FRAG, 1101 + /* Compute kick */ 1102 + ROGUE_FWIF_REG_CFG_TYPE_CDM, 1103 + /* TLA kick */ 1104 + ROGUE_FWIF_REG_CFG_TYPE_TLA, 1105 + /* TDM kick */ 1106 + ROGUE_FWIF_REG_CFG_TYPE_TDM, 1107 + /* Applies to all types. Keep as last element */ 1108 + ROGUE_FWIF_REG_CFG_TYPE_ALL 1109 + }; 1110 + 1111 + struct rogue_fwif_reg_cfg_rec { 1112 + u64 sddr; 1113 + u64 mask; 1114 + u64 value; 1115 + }; 1116 + 1117 + struct rogue_fwif_regconfig_data { 1118 + enum rogue_fwif_regdata_cmd_type cmd_type; 1119 + enum rogue_fwif_reg_cfg_type reg_config_type; 1120 + struct rogue_fwif_reg_cfg_rec reg_config __aligned(8); 1121 + }; 1122 + 1123 + struct rogue_fwif_reg_cfg { 1124 + /* 1125 + * PDump WRW command write granularity is 32 bits. 1126 + * Add padding to ensure array size is 32 bit granular. 1127 + */ 1128 + u8 num_regs_type[ALIGN((u32)ROGUE_FWIF_REG_CFG_TYPE_ALL, 1129 + sizeof(u32))] __aligned(8); 1130 + struct rogue_fwif_reg_cfg_rec 1131 + reg_configs[ROGUE_FWIF_REG_CFG_MAX_SIZE] __aligned(8); 1132 + } __aligned(8); 1133 + 1134 + enum rogue_fwif_os_state_change { 1135 + ROGUE_FWIF_OS_ONLINE = 1, 1136 + ROGUE_FWIF_OS_OFFLINE 1137 + }; 1138 + 1139 + struct rogue_fwif_os_state_change_data { 1140 + u32 osid; 1141 + enum rogue_fwif_os_state_change new_os_state; 1142 + } __aligned(8); 1143 + 1144 + enum rogue_fwif_counter_dump_request { 1145 + ROGUE_FWIF_PWR_COUNTER_DUMP_START = 1, 1146 + ROGUE_FWIF_PWR_COUNTER_DUMP_STOP, 1147 + ROGUE_FWIF_PWR_COUNTER_DUMP_SAMPLE, 1148 + }; 1149 + 1150 + struct rogue_fwif_counter_dump_data { 1151 + enum rogue_fwif_counter_dump_request counter_dump_request; 1152 + } __aligned(8); 1153 + 1154 + enum rogue_fwif_kccb_cmd_type { 1155 + /* Common commands */ 1156 + ROGUE_FWIF_KCCB_CMD_KICK = 101U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1157 + ROGUE_FWIF_KCCB_CMD_MMUCACHE = 102U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1158 + ROGUE_FWIF_KCCB_CMD_BP = 103U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1159 + /* SLC flush and invalidation request */ 1160 + ROGUE_FWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | 1161 + ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1162 + /* 1163 + * Requests cleanup of a FW resource (type specified in the command 1164 + * data) 1165 + */ 1166 + ROGUE_FWIF_KCCB_CMD_CLEANUP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1167 + /* Power request */ 1168 + ROGUE_FWIF_KCCB_CMD_POW = 107U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1169 + /* Backing for on-demand ZS-Buffer done */ 1170 + ROGUE_FWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 1171 + 108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1172 + /* Unbacking for on-demand ZS-Buffer done */ 1173 + ROGUE_FWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 1174 + 109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1175 + /* Freelist Grow done */ 1176 + ROGUE_FWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 1177 + 110U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1178 + /* Freelists Reconstruction done */ 1179 + ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 1180 + 112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1181 + /* 1182 + * Informs the firmware that the host has added more data to a CDM2 1183 + * Circular Buffer 1184 + */ 1185 + ROGUE_FWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 1186 + 114U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1187 + /* Health check request */ 1188 + ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK = 115U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1189 + /* Forcing signalling of all unmet UFOs for a given CCB offset */ 1190 + ROGUE_FWIF_KCCB_CMD_FORCE_UPDATE = 116U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1191 + 1192 + /* There is a geometry and a fragment command in this single kick */ 1193 + ROGUE_FWIF_KCCB_CMD_COMBINED_GEOM_FRAG_KICK = 117U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1194 + /* Informs the FW that a Guest OS has come online / offline. */ 1195 + ROGUE_FWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1196 + 1197 + /* Commands only permitted to the native or host OS */ 1198 + ROGUE_FWIF_KCCB_CMD_REGCONFIG = 200U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1199 + 1200 + /* Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ 1201 + ROGUE_FWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1202 + 1203 + /* Enable or disable multiple HWPerf blocks (reusing existing configuration) */ 1204 + ROGUE_FWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1205 + /* Core clock speed change event */ 1206 + ROGUE_FWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1207 + 1208 + /* 1209 + * Ask the firmware to update its cached ui32LogType value from the (shared) 1210 + * tracebuf control structure 1211 + */ 1212 + ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1213 + /* Set a maximum frequency/OPP point */ 1214 + ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1215 + /* 1216 + * Changes the relative scheduling priority for a particular OSid. It can 1217 + * only be serviced for the Host DDK 1218 + */ 1219 + ROGUE_FWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1220 + /* Set or clear firmware state flags */ 1221 + ROGUE_FWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1222 + 1223 + /* Set a minimum frequency/OPP point */ 1224 + ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1225 + /* Configure Periodic Hardware Reset behaviour */ 1226 + ROGUE_FWIF_KCCB_CMD_PHR_CFG = 213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1227 + 1228 + /* Configure Safety Firmware Watchdog */ 1229 + ROGUE_FWIF_KCCB_CMD_WDG_CFG = 215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1230 + /* Controls counter dumping in the FW */ 1231 + ROGUE_FWIF_KCCB_CMD_COUNTER_DUMP = 216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1232 + /* Configure, clear and enable multiple HWPerf blocks */ 1233 + ROGUE_FWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1234 + /* Configure the custom counters for HWPerf */ 1235 + ROGUE_FWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1236 + 1237 + /* Configure directly addressable counters for HWPerf */ 1238 + ROGUE_FWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 220U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1239 + }; 1240 + 1241 + #define ROGUE_FWIF_LAST_ALLOWED_GUEST_KCCB_CMD \ 1242 + (ROGUE_FWIF_KCCB_CMD_REGCONFIG - 1) 1243 + 1244 + /* Kernel CCB command packet */ 1245 + struct rogue_fwif_kccb_cmd { 1246 + /* Command type */ 1247 + enum rogue_fwif_kccb_cmd_type cmd_type; 1248 + /* Compatibility and other flags */ 1249 + u32 kccb_flags; 1250 + 1251 + /* 1252 + * NOTE: Make sure that uCmdData is the last member of this struct 1253 + * This is to calculate actual command size for device mem copy. 1254 + * (Refer ROGUEGetCmdMemCopySize()) 1255 + */ 1256 + union { 1257 + /* Data for Kick command */ 1258 + struct rogue_fwif_kccb_cmd_kick_data cmd_kick_data; 1259 + /* Data for combined geom/frag Kick command */ 1260 + struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data 1261 + combined_geom_frag_cmd_kick_data; 1262 + /* Data for MMU cache command */ 1263 + struct rogue_fwif_mmucachedata mmu_cache_data; 1264 + /* Data for Breakpoint Commands */ 1265 + struct rogue_fwif_bpdata bp_data; 1266 + /* Data for SLC Flush/Inval commands */ 1267 + struct rogue_fwif_slcflushinvaldata slc_flush_inval_data; 1268 + /* Data for cleanup commands */ 1269 + struct rogue_fwif_cleanup_request cleanup_data; 1270 + /* Data for power request commands */ 1271 + struct rogue_fwif_power_request pow_data; 1272 + /* Data for HWPerf control command */ 1273 + struct rogue_fwif_hwperf_ctrl hw_perf_ctrl; 1274 + /* 1275 + * Data for HWPerf configure, clear and enable performance 1276 + * counter block command 1277 + */ 1278 + struct rogue_fwif_hwperf_config_enable_blks 1279 + hw_perf_cfg_enable_blks; 1280 + /* 1281 + * Data for HWPerf enable or disable performance counter block 1282 + * commands 1283 + */ 1284 + struct rogue_fwif_hwperf_ctrl_blks hw_perf_ctrl_blks; 1285 + /* Data for HWPerf configure the custom counters to read */ 1286 + struct rogue_fwif_hwperf_select_custom_cntrs 1287 + hw_perf_select_cstm_cntrs; 1288 + /* Data for HWPerf configure Directly Addressable blocks */ 1289 + struct rogue_fwif_hwperf_config_da_blks hw_perf_cfg_da_blks; 1290 + /* Data for core clock speed change */ 1291 + struct rogue_fwif_coreclkspeedchange_data 1292 + core_clk_speed_change_data; 1293 + /* Feedback for Z/S Buffer backing/unbacking */ 1294 + struct rogue_fwif_zsbuffer_backing_data zs_buffer_backing_data; 1295 + /* Feedback for Freelist grow/shrink */ 1296 + struct rogue_fwif_freelist_gs_data free_list_gs_data; 1297 + /* Feedback for Freelists reconstruction*/ 1298 + struct rogue_fwif_freelists_reconstruction_data 1299 + free_lists_reconstruction_data; 1300 + /* Data for custom register configuration */ 1301 + struct rogue_fwif_regconfig_data reg_config_data; 1302 + /* Data for informing the FW about the write offset update */ 1303 + struct rogue_fwif_write_offset_update_data 1304 + write_offset_update_data; 1305 + /* Data for setting the max frequency/OPP */ 1306 + struct rogue_fwif_pdvfs_max_freq_data pdvfs_max_freq_data; 1307 + /* Data for setting the min frequency/OPP */ 1308 + struct rogue_fwif_pdvfs_min_freq_data pdvfs_min_freq_data; 1309 + /* Data for updating the Guest Online states */ 1310 + struct rogue_fwif_os_state_change_data cmd_os_online_state_data; 1311 + /* Dev address for TBI buffer allocated on demand */ 1312 + u32 tbi_buffer_fw_addr; 1313 + /* Data for dumping of register ranges */ 1314 + struct rogue_fwif_counter_dump_data counter_dump_config_data; 1315 + /* Data for signalling all unmet fences for a given CCB */ 1316 + struct rogue_fwif_kccb_cmd_force_update_data force_update_data; 1317 + } cmd_data __aligned(8); 1318 + } __aligned(8); 1319 + 1320 + PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_fwif_kccb_cmd); 1321 + 1322 + /* 1323 + ****************************************************************************** 1324 + * Firmware CCB command structure for ROGUE 1325 + ****************************************************************************** 1326 + */ 1327 + 1328 + struct rogue_fwif_fwccb_cmd_zsbuffer_backing_data { 1329 + u32 zs_buffer_id; 1330 + }; 1331 + 1332 + struct rogue_fwif_fwccb_cmd_freelist_gs_data { 1333 + u32 freelist_id; 1334 + }; 1335 + 1336 + struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data { 1337 + u32 freelist_count; 1338 + u32 hwr_counter; 1339 + u32 freelist_ids[ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT]; 1340 + }; 1341 + 1342 + /* 1 if a page fault happened */ 1343 + #define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF BIT(0) 1344 + /* 1 if applicable to all contexts */ 1345 + #define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS BIT(1) 1346 + 1347 + struct rogue_fwif_fwccb_cmd_context_reset_data { 1348 + /* Context affected by the reset */ 1349 + u32 server_common_context_id; 1350 + /* Reason for reset */ 1351 + enum rogue_context_reset_reason reset_reason; 1352 + /* Data Master affected by the reset */ 1353 + u32 dm; 1354 + /* Job ref running at the time of reset */ 1355 + u32 reset_job_ref; 1356 + /* ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ 1357 + u32 flags; 1358 + /* At what page catalog address */ 1359 + aligned_u64 pc_address; 1360 + /* Page fault address (only when applicable) */ 1361 + aligned_u64 fault_address; 1362 + }; 1363 + 1364 + struct rogue_fwif_fwccb_cmd_fw_pagefault_data { 1365 + /* Page fault address */ 1366 + u64 fw_fault_addr; 1367 + }; 1368 + 1369 + enum rogue_fwif_fwccb_cmd_type { 1370 + /* Requests ZSBuffer to be backed with physical pages */ 1371 + ROGUE_FWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U | 1372 + ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1373 + /* Requests ZSBuffer to be unbacked */ 1374 + ROGUE_FWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U | 1375 + ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1376 + /* Requests an on-demand freelist grow/shrink */ 1377 + ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW = 103U | 1378 + ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1379 + /* Requests freelists reconstruction */ 1380 + ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 1381 + 104U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1382 + /* Notifies host of a HWR event on a context */ 1383 + ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 1384 + 105U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1385 + /* Requests an on-demand debug dump */ 1386 + ROGUE_FWIF_FWCCB_CMD_DEBUG_DUMP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1387 + /* Requests an on-demand update on process stats */ 1388 + ROGUE_FWIF_FWCCB_CMD_UPDATE_STATS = 107U | 1389 + ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1390 + 1391 + ROGUE_FWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 1392 + 108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1393 + ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 1394 + 109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1395 + 1396 + /* Notifies host of a FW pagefault */ 1397 + ROGUE_FWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION = 1398 + 112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, 1399 + }; 1400 + 1401 + enum rogue_fwif_fwccb_cmd_update_stats_type { 1402 + /* 1403 + * PVRSRVStatsUpdateRenderContextStats should increase the value of the 1404 + * ui32TotalNumPartialRenders stat 1405 + */ 1406 + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS = 1, 1407 + /* 1408 + * PVRSRVStatsUpdateRenderContextStats should increase the value of the 1409 + * ui32TotalNumOutOfMemory stat 1410 + */ 1411 + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, 1412 + /* 1413 + * PVRSRVStatsUpdateRenderContextStats should increase the value of the 1414 + * ui32NumGeomStores stat 1415 + */ 1416 + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_GEOM_STORES, 1417 + /* 1418 + * PVRSRVStatsUpdateRenderContextStats should increase the value of the 1419 + * ui32NumFragStores stat 1420 + */ 1421 + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_FRAG_STORES, 1422 + /* 1423 + * PVRSRVStatsUpdateRenderContextStats should increase the value of the 1424 + * ui32NumCDMStores stat 1425 + */ 1426 + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, 1427 + /* 1428 + * PVRSRVStatsUpdateRenderContextStats should increase the value of the 1429 + * ui32NumTDMStores stat 1430 + */ 1431 + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES 1432 + }; 1433 + 1434 + struct rogue_fwif_fwccb_cmd_update_stats_data { 1435 + /* Element to update */ 1436 + enum rogue_fwif_fwccb_cmd_update_stats_type element_to_update; 1437 + /* The pid of the process whose stats are being updated */ 1438 + u32 pid_owner; 1439 + /* Adjustment to be made to the statistic */ 1440 + s32 adjustment_value; 1441 + }; 1442 + 1443 + struct rogue_fwif_fwccb_cmd_core_clk_rate_change_data { 1444 + u32 core_clk_rate; 1445 + } __aligned(8); 1446 + 1447 + struct rogue_fwif_fwccb_cmd { 1448 + /* Command type */ 1449 + enum rogue_fwif_fwccb_cmd_type cmd_type; 1450 + /* Compatibility and other flags */ 1451 + u32 fwccb_flags; 1452 + 1453 + union { 1454 + /* Data for Z/S-Buffer on-demand (un)backing*/ 1455 + struct rogue_fwif_fwccb_cmd_zsbuffer_backing_data 1456 + cmd_zs_buffer_backing; 1457 + /* Data for on-demand freelist grow/shrink */ 1458 + struct rogue_fwif_fwccb_cmd_freelist_gs_data cmd_free_list_gs; 1459 + /* Data for freelists reconstruction */ 1460 + struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data 1461 + cmd_freelists_reconstruction; 1462 + /* Data for context reset notification */ 1463 + struct rogue_fwif_fwccb_cmd_context_reset_data 1464 + cmd_context_reset_notification; 1465 + /* Data for updating process stats */ 1466 + struct rogue_fwif_fwccb_cmd_update_stats_data 1467 + cmd_update_stats_data; 1468 + struct rogue_fwif_fwccb_cmd_core_clk_rate_change_data 1469 + cmd_core_clk_rate_change; 1470 + struct rogue_fwif_fwccb_cmd_fw_pagefault_data cmd_fw_pagefault; 1471 + } cmd_data __aligned(8); 1472 + } __aligned(8); 1473 + 1474 + PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_fwif_fwccb_cmd); 1475 + 1476 + /* 1477 + ****************************************************************************** 1478 + * Workload estimation Firmware CCB command structure for ROGUE 1479 + ****************************************************************************** 1480 + */ 1481 + struct rogue_fwif_workest_fwccb_cmd { 1482 + /* Index for return data array */ 1483 + u16 return_data_index; 1484 + /* The cycles the workload took on the hardware */ 1485 + u32 cycles_taken; 1486 + }; 1487 + 1488 + /* 1489 + ****************************************************************************** 1490 + * Client CCB commands for ROGUE 1491 + ****************************************************************************** 1492 + */ 1493 + 1494 + /* 1495 + * Required memory alignment for 64-bit variables accessible by Meta 1496 + * (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared 1497 + * between the host and meta that contains 64-bit variables has to maintain 1498 + * this alignment) 1499 + */ 1500 + #define ROGUE_FWIF_FWALLOC_ALIGN sizeof(u64) 1501 + 1502 + #define ROGUE_CCB_TYPE_TASK BIT(15) 1503 + #define ROGUE_CCB_FWALLOC_ALIGN(size) \ 1504 + (((size) + (ROGUE_FWIF_FWALLOC_ALIGN - 1)) & \ 1505 + ~(ROGUE_FWIF_FWALLOC_ALIGN - 1)) 1506 + 1507 + #define ROGUE_FWIF_CCB_CMD_TYPE_GEOM \ 1508 + (201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1509 + #define ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D \ 1510 + (202U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1511 + #define ROGUE_FWIF_CCB_CMD_TYPE_FRAG \ 1512 + (203U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1513 + #define ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR \ 1514 + (204U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1515 + #define ROGUE_FWIF_CCB_CMD_TYPE_CDM \ 1516 + (205U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1517 + #define ROGUE_FWIF_CCB_CMD_TYPE_TQ_TDM \ 1518 + (206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1519 + #define ROGUE_FWIF_CCB_CMD_TYPE_FBSC_INVALIDATE \ 1520 + (207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1521 + #define ROGUE_FWIF_CCB_CMD_TYPE_TQ_2D \ 1522 + (208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1523 + #define ROGUE_FWIF_CCB_CMD_TYPE_PRE_TIMESTAMP \ 1524 + (209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1525 + #define ROGUE_FWIF_CCB_CMD_TYPE_NULL \ 1526 + (210U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1527 + #define ROGUE_FWIF_CCB_CMD_TYPE_ABORT \ 1528 + (211U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) 1529 + 1530 + /* Leave a gap between CCB specific commands and generic commands */ 1531 + #define ROGUE_FWIF_CCB_CMD_TYPE_FENCE (212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) 1532 + #define ROGUE_FWIF_CCB_CMD_TYPE_UPDATE (213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) 1533 + #define ROGUE_FWIF_CCB_CMD_TYPE_RMW_UPDATE \ 1534 + (214U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) 1535 + #define ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR (215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) 1536 + #define ROGUE_FWIF_CCB_CMD_TYPE_PRIORITY (216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) 1537 + /* 1538 + * Pre and Post timestamp commands are supposed to sandwich the DM cmd. The 1539 + * padding code with the CCB wrap upsets the FW if we don't have the task type 1540 + * bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. 1541 + */ 1542 + #define ROGUE_FWIF_CCB_CMD_TYPE_POST_TIMESTAMP \ 1543 + (217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) 1544 + #define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_UPDATE \ 1545 + (218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) 1546 + #define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE \ 1547 + (219U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) 1548 + 1549 + #define ROGUE_FWIF_CCB_CMD_TYPE_PADDING (221U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) 1550 + 1551 + struct rogue_fwif_workest_kick_data { 1552 + /* Index for the KM Workload estimation return data array */ 1553 + u16 return_data_index __aligned(8); 1554 + /* Predicted time taken to do the work in cycles */ 1555 + u32 cycles_prediction __aligned(8); 1556 + /* Deadline for the workload */ 1557 + aligned_u64 deadline; 1558 + }; 1559 + 1560 + struct rogue_fwif_ccb_cmd_header { 1561 + u32 cmd_type; 1562 + u32 cmd_size; 1563 + /* 1564 + * external job reference - provided by client and used in debug for 1565 + * tracking submitted work 1566 + */ 1567 + u32 ext_job_ref; 1568 + /* 1569 + * internal job reference - generated by services and used in debug for 1570 + * tracking submitted work 1571 + */ 1572 + u32 int_job_ref; 1573 + /* Workload Estimation - Workload Estimation Data */ 1574 + struct rogue_fwif_workest_kick_data work_est_kick_data __aligned(8); 1575 + }; 1576 + 1577 + /* 1578 + ****************************************************************************** 1579 + * Client CCB commands which are only required by the kernel 1580 + ****************************************************************************** 1581 + */ 1582 + struct rogue_fwif_cmd_priority { 1583 + s32 priority; 1584 + }; 1585 + 1586 + /* 1587 + ****************************************************************************** 1588 + * Signature and Checksums Buffer 1589 + ****************************************************************************** 1590 + */ 1591 + struct rogue_fwif_sigbuf_ctl { 1592 + /* Ptr to Signature Buffer memory */ 1593 + u32 buffer_fw_addr; 1594 + /* Amount of space left for storing regs in the buffer */ 1595 + u32 left_size_in_regs; 1596 + } __aligned(8); 1597 + 1598 + struct rogue_fwif_counter_dump_ctl { 1599 + /* Ptr to counter dump buffer */ 1600 + u32 buffer_fw_addr; 1601 + /* Amount of space for storing in the buffer */ 1602 + u32 size_in_dwords; 1603 + } __aligned(8); 1604 + 1605 + struct rogue_fwif_firmware_gcov_ctl { 1606 + /* Ptr to firmware gcov buffer */ 1607 + u32 buffer_fw_addr; 1608 + /* Amount of space for storing in the buffer */ 1609 + u32 size; 1610 + } __aligned(8); 1611 + 1612 + /* 1613 + ***************************************************************************** 1614 + * ROGUE Compatibility checks 1615 + ***************************************************************************** 1616 + */ 1617 + 1618 + /* 1619 + * WARNING: Whenever the layout of ROGUE_FWIF_COMPCHECKS_BVNC changes, the 1620 + * following define should be increased by 1 to indicate to the compatibility 1621 + * logic that layout has changed. 1622 + */ 1623 + #define ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION 3 1624 + 1625 + struct rogue_fwif_compchecks_bvnc { 1626 + /* WARNING: This field must be defined as first one in this structure */ 1627 + u32 layout_version; 1628 + aligned_u64 bvnc; 1629 + } __aligned(8); 1630 + 1631 + struct rogue_fwif_init_options { 1632 + u8 os_count_support; 1633 + u8 padding[7]; 1634 + } __aligned(8); 1635 + 1636 + #define ROGUE_FWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \ 1637 + struct rogue_fwif_compchecks_bvnc(name) = { \ 1638 + ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION, \ 1639 + 0, \ 1640 + } 1641 + 1642 + static inline void rogue_fwif_compchecks_bvnc_init(struct rogue_fwif_compchecks_bvnc *compchecks) 1643 + { 1644 + compchecks->layout_version = ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION; 1645 + compchecks->bvnc = 0; 1646 + } 1647 + 1648 + struct rogue_fwif_compchecks { 1649 + /* hardware BVNC (from the ROGUE registers) */ 1650 + struct rogue_fwif_compchecks_bvnc hw_bvnc; 1651 + /* firmware BVNC */ 1652 + struct rogue_fwif_compchecks_bvnc fw_bvnc; 1653 + /* identifier of the FW processor version */ 1654 + u32 fw_processor_version; 1655 + /* software DDK version */ 1656 + u32 ddk_version; 1657 + /* software DDK build no. */ 1658 + u32 ddk_build; 1659 + /* build options bit-field */ 1660 + u32 build_options; 1661 + /* initialisation options bit-field */ 1662 + struct rogue_fwif_init_options init_options; 1663 + /* Information is valid */ 1664 + bool updated __aligned(4); 1665 + u32 padding; 1666 + } __aligned(8); 1667 + 1668 + /* 1669 + ****************************************************************************** 1670 + * Updated configuration post FW data init. 1671 + ****************************************************************************** 1672 + */ 1673 + struct rogue_fwif_runtime_cfg { 1674 + /* APM latency in ms before signalling IDLE to the host */ 1675 + u32 active_pm_latency_ms; 1676 + /* Compatibility and other flags */ 1677 + u32 runtime_cfg_flags; 1678 + /* 1679 + * If set, APM latency does not reset to system default each GPU power 1680 + * transition 1681 + */ 1682 + bool active_pm_latency_persistant __aligned(4); 1683 + /* Core clock speed, currently only used to calculate timer ticks */ 1684 + u32 core_clock_speed; 1685 + /* Last number of dusts change requested by the host */ 1686 + u32 default_dusts_num_init; 1687 + /* Periodic Hardware Reset configuration values */ 1688 + u32 phr_mode; 1689 + /* New number of milliseconds C/S is allowed to last */ 1690 + u32 hcs_deadline_ms; 1691 + /* The watchdog period in microseconds */ 1692 + u32 wdg_period_us; 1693 + /* Array of priorities per OS */ 1694 + u32 osid_priority[ROGUE_FW_MAX_NUM_OS]; 1695 + /* On-demand allocated HWPerf buffer address, to be passed to the FW */ 1696 + u32 hwperf_buf_fw_addr; 1697 + 1698 + bool padding __aligned(4); 1699 + }; 1700 + 1701 + /* 1702 + ***************************************************************************** 1703 + * Control data for ROGUE 1704 + ***************************************************************************** 1705 + */ 1706 + 1707 + #define ROGUE_FWIF_HWR_DEBUG_DUMP_ALL (99999U) 1708 + 1709 + enum rogue_fwif_tpu_dm { 1710 + ROGUE_FWIF_TPU_DM_PDM = 0, 1711 + ROGUE_FWIF_TPU_DM_VDM = 1, 1712 + ROGUE_FWIF_TPU_DM_CDM = 2, 1713 + ROGUE_FWIF_TPU_DM_TDM = 3, 1714 + ROGUE_FWIF_TPU_DM_LAST 1715 + }; 1716 + 1717 + enum rogue_fwif_gpio_val_mode { 1718 + /* No GPIO validation */ 1719 + ROGUE_FWIF_GPIO_VAL_OFF = 0, 1720 + /* 1721 + * Simple test case that initiates by sending data via the GPIO and then 1722 + * sends back any data received over the GPIO 1723 + */ 1724 + ROGUE_FWIF_GPIO_VAL_GENERAL = 1, 1725 + /* 1726 + * More complex test case that writes and reads data across the entire 1727 + * GPIO AP address range. 1728 + */ 1729 + ROGUE_FWIF_GPIO_VAL_AP = 2, 1730 + /* Validates the GPIO Testbench. */ 1731 + ROGUE_FWIF_GPIO_VAL_TESTBENCH = 5, 1732 + /* Send and then receive each byte in the range 0-255. */ 1733 + ROGUE_FWIF_GPIO_VAL_LOOPBACK = 6, 1734 + /* Send and then receive each power-of-2 byte in the range 0-255. */ 1735 + ROGUE_FWIF_GPIO_VAL_LOOPBACK_LITE = 7, 1736 + ROGUE_FWIF_GPIO_VAL_LAST 1737 + }; 1738 + 1739 + enum fw_perf_conf { 1740 + FW_PERF_CONF_NONE = 0, 1741 + FW_PERF_CONF_ICACHE = 1, 1742 + FW_PERF_CONF_DCACHE = 2, 1743 + FW_PERF_CONF_JTLB_INSTR = 5, 1744 + FW_PERF_CONF_INSTRUCTIONS = 6 1745 + }; 1746 + 1747 + enum fw_boot_stage { 1748 + FW_BOOT_STAGE_TLB_INIT_FAILURE = -2, 1749 + FW_BOOT_STAGE_NOT_AVAILABLE = -1, 1750 + FW_BOOT_NOT_STARTED = 0, 1751 + FW_BOOT_BLDR_STARTED = 1, 1752 + FW_BOOT_CACHE_DONE, 1753 + FW_BOOT_TLB_DONE, 1754 + FW_BOOT_MAIN_STARTED, 1755 + FW_BOOT_ALIGNCHECKS_DONE, 1756 + FW_BOOT_INIT_DONE, 1757 + }; 1758 + 1759 + /* 1760 + * Kernel CCB return slot responses. Usage of bit-fields instead of bare 1761 + * integers allows FW to possibly pack-in several responses for each single kCCB 1762 + * command. 1763 + */ 1764 + /* Command executed (return status from FW) */ 1765 + #define ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED BIT(0) 1766 + /* A cleanup was requested but resource busy */ 1767 + #define ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY BIT(1) 1768 + /* Poll failed in FW for a HW operation to complete */ 1769 + #define ROGUE_FWIF_KCCB_RTN_SLOT_POLL_FAILURE BIT(2) 1770 + /* Reset value of a kCCB return slot (set by host) */ 1771 + #define ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U 1772 + 1773 + struct rogue_fwif_connection_ctl { 1774 + /* Fw-Os connection states */ 1775 + enum rogue_fwif_connection_fw_state connection_fw_state; 1776 + enum rogue_fwif_connection_os_state connection_os_state; 1777 + u32 alive_fw_token; 1778 + u32 alive_os_token; 1779 + } __aligned(8); 1780 + 1781 + struct rogue_fwif_osinit { 1782 + /* Kernel CCB */ 1783 + u32 kernel_ccbctl_fw_addr; 1784 + u32 kernel_ccb_fw_addr; 1785 + u32 kernel_ccb_rtn_slots_fw_addr; 1786 + 1787 + /* Firmware CCB */ 1788 + u32 firmware_ccbctl_fw_addr; 1789 + u32 firmware_ccb_fw_addr; 1790 + 1791 + /* Workload Estimation Firmware CCB */ 1792 + u32 work_est_firmware_ccbctl_fw_addr; 1793 + u32 work_est_firmware_ccb_fw_addr; 1794 + 1795 + u32 rogue_fwif_hwr_info_buf_ctl_fw_addr; 1796 + 1797 + u32 hwr_debug_dump_limit; 1798 + 1799 + u32 fw_os_data_fw_addr; 1800 + 1801 + /* Compatibility checks to be populated by the Firmware */ 1802 + struct rogue_fwif_compchecks rogue_comp_checks; 1803 + } __aligned(8); 1804 + 1805 + /* BVNC Features */ 1806 + struct rogue_hwperf_bvnc_block { 1807 + /* Counter block ID, see ROGUE_HWPERF_CNTBLK_ID */ 1808 + u16 block_id; 1809 + 1810 + /* Number of counters in this block type */ 1811 + u16 num_counters; 1812 + 1813 + /* Number of blocks of this type */ 1814 + u16 num_blocks; 1815 + 1816 + u16 reserved; 1817 + }; 1818 + 1819 + #define ROGUE_HWPERF_MAX_BVNC_LEN (24) 1820 + 1821 + #define ROGUE_HWPERF_MAX_BVNC_BLOCK_LEN (16U) 1822 + 1823 + /* BVNC Features */ 1824 + struct rogue_hwperf_bvnc { 1825 + /* BVNC string */ 1826 + char bvnc_string[ROGUE_HWPERF_MAX_BVNC_LEN]; 1827 + /* See ROGUE_HWPERF_FEATURE_FLAGS */ 1828 + u32 bvnc_km_feature_flags; 1829 + /* Number of blocks described in aBvncBlocks */ 1830 + u16 num_bvnc_blocks; 1831 + /* Number of GPU cores present */ 1832 + u16 bvnc_gpu_cores; 1833 + /* Supported Performance Blocks for BVNC */ 1834 + struct rogue_hwperf_bvnc_block 1835 + bvnc_blocks[ROGUE_HWPERF_MAX_BVNC_BLOCK_LEN]; 1836 + }; 1837 + 1838 + PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_hwperf_bvnc); 1839 + 1840 + struct rogue_fwif_sysinit { 1841 + /* Fault read address */ 1842 + aligned_u64 fault_phys_addr; 1843 + 1844 + /* PDS execution base */ 1845 + aligned_u64 pds_exec_base; 1846 + /* UCS execution base */ 1847 + aligned_u64 usc_exec_base; 1848 + /* FBCDC bindless texture state table base */ 1849 + aligned_u64 fbcdc_state_table_base; 1850 + aligned_u64 fbcdc_large_state_table_base; 1851 + /* Texture state base */ 1852 + aligned_u64 texture_heap_base; 1853 + 1854 + /* Event filter for Firmware events */ 1855 + u64 hw_perf_filter; 1856 + 1857 + aligned_u64 slc3_fence_dev_addr; 1858 + 1859 + u32 tpu_trilinear_frac_mask[ROGUE_FWIF_TPU_DM_LAST] __aligned(8); 1860 + 1861 + /* Signature and Checksum Buffers for DMs */ 1862 + struct rogue_fwif_sigbuf_ctl sigbuf_ctl[PVR_FWIF_DM_MAX]; 1863 + 1864 + struct rogue_fwif_pdvfs_opp pdvfs_opp_info; 1865 + 1866 + struct rogue_fwif_dma_addr coremem_data_store; 1867 + 1868 + struct rogue_fwif_counter_dump_ctl counter_dump_ctl; 1869 + 1870 + u32 filter_flags; 1871 + 1872 + u32 runtime_cfg_fw_addr; 1873 + 1874 + u32 trace_buf_ctl_fw_addr; 1875 + u32 fw_sys_data_fw_addr; 1876 + 1877 + u32 gpu_util_fw_cb_ctl_fw_addr; 1878 + u32 reg_cfg_fw_addr; 1879 + u32 hwperf_ctl_fw_addr; 1880 + 1881 + u32 align_checks; 1882 + 1883 + /* Core clock speed at FW boot time */ 1884 + u32 initial_core_clock_speed; 1885 + 1886 + /* APM latency in ms before signalling IDLE to the host */ 1887 + u32 active_pm_latency_ms; 1888 + 1889 + /* Flag to be set by the Firmware after successful start */ 1890 + bool firmware_started __aligned(4); 1891 + 1892 + /* Host/FW Trace synchronisation Partition Marker */ 1893 + u32 marker_val; 1894 + 1895 + /* Firmware initialization complete time */ 1896 + u32 firmware_started_timestamp; 1897 + 1898 + u32 jones_disable_mask; 1899 + 1900 + /* Firmware performance counter config */ 1901 + enum fw_perf_conf firmware_perf; 1902 + 1903 + /* 1904 + * FW Pointer to memory containing core clock rate in Hz. 1905 + * Firmware (PDVFS) updates the memory when running on non primary FW 1906 + * thread to communicate to host driver. 1907 + */ 1908 + u32 core_clock_rate_fw_addr; 1909 + 1910 + enum rogue_fwif_gpio_val_mode gpio_validation_mode; 1911 + 1912 + /* Used in HWPerf for decoding BVNC Features */ 1913 + struct rogue_hwperf_bvnc bvnc_km_feature_flags; 1914 + 1915 + /* Value to write into ROGUE_CR_TFBC_COMPRESSION_CONTROL */ 1916 + u32 tfbc_compression_control; 1917 + } __aligned(8); 1918 + 1919 + /* 1920 + ***************************************************************************** 1921 + * Timer correlation shared data and defines 1922 + ***************************************************************************** 1923 + */ 1924 + 1925 + struct rogue_fwif_time_corr { 1926 + aligned_u64 os_timestamp; 1927 + aligned_u64 os_mono_timestamp; 1928 + aligned_u64 cr_timestamp; 1929 + 1930 + /* 1931 + * Utility variable used to convert CR timer deltas to OS timer deltas 1932 + * (nS), where the deltas are relative to the timestamps above: 1933 + * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below 1934 + */ 1935 + aligned_u64 cr_delta_to_os_delta_kns; 1936 + 1937 + u32 core_clock_speed; 1938 + u32 reserved; 1939 + } __aligned(8); 1940 + 1941 + /* 1942 + * The following macros are used to help converting FW timestamps to the Host 1943 + * time domain. On the FW the ROGUE_CR_TIMER counter is used to keep track of 1944 + * time; it increments by 1 every 256 GPU clock ticks, so the general 1945 + * formula to perform the conversion is: 1946 + * 1947 + * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS, 1948 + * otherwise if (scale == 10^6) then deltaOS is in uS ] 1949 + * 1950 + * deltaCR * 256 256 * scale 1951 + * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ] 1952 + * GPUclockspeed GPUclockspeed 1953 + * 1954 + * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20) 1955 + * to get some better accuracy and to avoid returning 0 in the integer 1956 + * division 256000000/GPUfreq if GPUfreq is greater than 256MHz. 1957 + * This is the same as keeping K as a decimal number. 1958 + * 1959 + * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies 1960 + * (deltaCR * K is more or less a constant), and it's relative to the base 1961 + * OS timestamp sampled as a part of the timer correlation data. 1962 + * This base is refreshed on GPU power-on, DVFS transition and periodic 1963 + * frequency calibration (executed every few seconds if the FW is doing 1964 + * some work), so as long as the GPU is doing something and one of these 1965 + * events is triggered then deltaCR * K will not overflow and deltaOS will be 1966 + * correct. 1967 + */ 1968 + 1969 + #define ROGUE_FWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20) 1970 + 1971 + #define ROGUE_FWIF_GET_DELTA_OSTIME_NS(delta_cr, k) \ 1972 + (((delta_cr) * (k)) >> ROGUE_FWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT) 1973 + 1974 + /* 1975 + ****************************************************************************** 1976 + * GPU Utilisation 1977 + ****************************************************************************** 1978 + */ 1979 + 1980 + /* See rogue_common.h for a list of GPU states */ 1981 + #define ROGUE_FWIF_GPU_UTIL_TIME_MASK \ 1982 + (0xFFFFFFFFFFFFFFFFull & ~ROGUE_FWIF_GPU_UTIL_STATE_MASK) 1983 + 1984 + #define ROGUE_FWIF_GPU_UTIL_GET_TIME(word) \ 1985 + ((word)(&ROGUE_FWIF_GPU_UTIL_TIME_MASK)) 1986 + #define ROGUE_FWIF_GPU_UTIL_GET_STATE(word) \ 1987 + ((word)(&ROGUE_FWIF_GPU_UTIL_STATE_MASK)) 1988 + 1989 + /* 1990 + * The OS timestamps computed by the FW are approximations of the real time, 1991 + * which means they could be slightly behind or ahead the real timer on the 1992 + * Host. In some cases we can perform subtractions between FW approximated 1993 + * timestamps and real OS timestamps, so we need a form of protection against 1994 + * negative results if for instance the FW one is a bit ahead of time. 1995 + */ 1996 + #define ROGUE_FWIF_GPU_UTIL_GET_PERIOD(newtime, oldtime) \ 1997 + (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U) 1998 + 1999 + #define ROGUE_FWIF_GPU_UTIL_MAKE_WORD(time, state) \ 2000 + (ROGUE_FWIF_GPU_UTIL_GET_TIME(time) | \ 2001 + ROGUE_FWIF_GPU_UTIL_GET_STATE(state)) 2002 + 2003 + /* 2004 + * The timer correlation array must be big enough to ensure old entries won't be 2005 + * overwritten before all the HWPerf events linked to those entries are 2006 + * processed by the MISR. The update frequency of this array depends on how fast 2007 + * the system can change state (basically how small the APM latency is) and 2008 + * perform DVFS transitions. 2009 + * 2010 + * The minimum size is 2 (not 1) to avoid race conditions between the FW reading 2011 + * an entry while the Host is updating it. With 2 entries in the worst case the 2012 + * FW will read old data, which is still quite ok if the Host is updating the 2013 + * timer correlation at that time. 2014 + */ 2015 + #define ROGUE_FWIF_TIME_CORR_ARRAY_SIZE 256U 2016 + #define ROGUE_FWIF_TIME_CORR_CURR_INDEX(seqcount) \ 2017 + ((seqcount) % ROGUE_FWIF_TIME_CORR_ARRAY_SIZE) 2018 + 2019 + /* Make sure the timer correlation array size is a power of 2 */ 2020 + static_assert((ROGUE_FWIF_TIME_CORR_ARRAY_SIZE & 2021 + (ROGUE_FWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, 2022 + "ROGUE_FWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); 2023 + 2024 + struct rogue_fwif_gpu_util_fwcb { 2025 + struct rogue_fwif_time_corr time_corr[ROGUE_FWIF_TIME_CORR_ARRAY_SIZE]; 2026 + u32 time_corr_seq_count; 2027 + 2028 + /* Compatibility and other flags */ 2029 + u32 gpu_util_flags; 2030 + 2031 + /* Last GPU state + OS time of the last state update */ 2032 + aligned_u64 last_word; 2033 + 2034 + /* Counters for the amount of time the GPU was active/idle/blocked */ 2035 + aligned_u64 stats_counters[PVR_FWIF_GPU_UTIL_STATE_NUM]; 2036 + } __aligned(8); 2037 + 2038 + struct rogue_fwif_rta_ctl { 2039 + /* Render number */ 2040 + u32 render_target_index; 2041 + /* index in RTA */ 2042 + u32 current_render_target; 2043 + /* total active RTs */ 2044 + u32 active_render_targets; 2045 + /* total active RTs from the first TA kick, for OOM */ 2046 + u32 cumul_active_render_targets; 2047 + /* Array of valid RT indices */ 2048 + u32 valid_render_targets_fw_addr; 2049 + /* Array of number of occurred partial renders per render target */ 2050 + u32 rta_num_partial_renders_fw_addr; 2051 + /* Number of render targets in the array */ 2052 + u32 max_rts; 2053 + /* Compatibility and other flags */ 2054 + u32 rta_ctl_flags; 2055 + } __aligned(8); 2056 + 2057 + struct rogue_fwif_freelist { 2058 + aligned_u64 freelist_dev_addr; 2059 + aligned_u64 current_dev_addr; 2060 + u32 current_stack_top; 2061 + u32 max_pages; 2062 + u32 grow_pages; 2063 + /* HW pages */ 2064 + u32 current_pages; 2065 + u32 allocated_page_count; 2066 + u32 allocated_mmu_page_count; 2067 + u32 freelist_id; 2068 + 2069 + bool grow_pending __aligned(4); 2070 + /* Pages that should be used only when OOM is reached */ 2071 + u32 ready_pages; 2072 + /* Compatibility and other flags */ 2073 + u32 freelist_flags; 2074 + /* PM Global PB on which Freelist is loaded */ 2075 + u32 pm_global_pb; 2076 + u32 padding; 2077 + } __aligned(8); 2078 + 2079 + /* 2080 + ****************************************************************************** 2081 + * HWRTData 2082 + ****************************************************************************** 2083 + */ 2084 + 2085 + /* HWRTData flags */ 2086 + /* Deprecated flags 1:0 */ 2087 + #define HWRTDATA_HAS_LAST_GEOM BIT(2) 2088 + #define HWRTDATA_PARTIAL_RENDERED BIT(3) 2089 + #define HWRTDATA_DISABLE_TILE_REORDERING BIT(4) 2090 + #define HWRTDATA_NEED_BRN65101_BLIT BIT(5) 2091 + #define HWRTDATA_FIRST_BRN65101_STRIP BIT(6) 2092 + #define HWRTDATA_NEED_BRN67182_2ND_RENDER BIT(7) 2093 + 2094 + enum rogue_fwif_rtdata_state { 2095 + ROGUE_FWIF_RTDATA_STATE_NONE = 0, 2096 + ROGUE_FWIF_RTDATA_STATE_KICK_GEOM, 2097 + ROGUE_FWIF_RTDATA_STATE_KICK_GEOM_FIRST, 2098 + ROGUE_FWIF_RTDATA_STATE_GEOM_FINISHED, 2099 + ROGUE_FWIF_RTDATA_STATE_KICK_FRAG, 2100 + ROGUE_FWIF_RTDATA_STATE_FRAG_FINISHED, 2101 + ROGUE_FWIF_RTDATA_STATE_FRAG_CONTEXT_STORED, 2102 + ROGUE_FWIF_RTDATA_STATE_GEOM_OUTOFMEM, 2103 + ROGUE_FWIF_RTDATA_STATE_PARTIALRENDERFINISHED, 2104 + /* 2105 + * In case of HWR, we can't set the RTDATA state to NONE, as this will 2106 + * cause any TA to become a first TA. To ensure all related TA's are 2107 + * skipped, we use the HWR state 2108 + */ 2109 + ROGUE_FWIF_RTDATA_STATE_HWR, 2110 + ROGUE_FWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU 2111 + }; 2112 + 2113 + struct rogue_fwif_hwrtdata_common { 2114 + bool geom_caches_need_zeroing __aligned(4); 2115 + 2116 + u32 screen_pixel_max; 2117 + aligned_u64 multi_sample_ctl; 2118 + u64 flipped_multi_sample_ctl; 2119 + u32 tpc_stride; 2120 + u32 tpc_size; 2121 + u32 te_screen; 2122 + u32 mtile_stride; 2123 + u32 teaa; 2124 + u32 te_mtile1; 2125 + u32 te_mtile2; 2126 + u32 isp_merge_lower_x; 2127 + u32 isp_merge_lower_y; 2128 + u32 isp_merge_upper_x; 2129 + u32 isp_merge_upper_y; 2130 + u32 isp_merge_scale_x; 2131 + u32 isp_merge_scale_y; 2132 + u32 rgn_header_size; 2133 + u32 isp_mtile_size; 2134 + u32 padding; 2135 + } __aligned(8); 2136 + 2137 + struct rogue_fwif_hwrtdata { 2138 + /* MList Data Store */ 2139 + aligned_u64 pm_mlist_dev_addr; 2140 + 2141 + aligned_u64 vce_cat_base[4]; 2142 + aligned_u64 vce_last_cat_base[4]; 2143 + aligned_u64 te_cat_base[4]; 2144 + aligned_u64 te_last_cat_base[4]; 2145 + aligned_u64 alist_cat_base; 2146 + aligned_u64 alist_last_cat_base; 2147 + 2148 + aligned_u64 pm_alist_stack_pointer; 2149 + u32 pm_mlist_stack_pointer; 2150 + 2151 + u32 hwrt_data_common_fw_addr; 2152 + 2153 + u32 hwrt_data_flags; 2154 + enum rogue_fwif_rtdata_state state; 2155 + 2156 + u32 freelists_fw_addr[MAX_FREELISTS_SIZE] __aligned(8); 2157 + u32 freelist_hwr_snapshot[MAX_FREELISTS_SIZE]; 2158 + 2159 + aligned_u64 vheap_table_dev_addr; 2160 + 2161 + struct rogue_fwif_rta_ctl rta_ctl; 2162 + 2163 + aligned_u64 tail_ptrs_dev_addr; 2164 + aligned_u64 macrotile_array_dev_addr; 2165 + aligned_u64 rgn_header_dev_addr; 2166 + aligned_u64 rtc_dev_addr; 2167 + 2168 + u32 owner_geom_not_used_by_host __aligned(8); 2169 + 2170 + bool geom_caches_need_zeroing __aligned(4); 2171 + 2172 + struct rogue_fwif_cleanup_ctl cleanup_state __aligned(64); 2173 + } __aligned(8); 2174 + 2175 + /* 2176 + ****************************************************************************** 2177 + * Sync checkpoints 2178 + ****************************************************************************** 2179 + */ 2180 + 2181 + #define PVR_SYNC_CHECKPOINT_UNDEF 0x000 2182 + #define PVR_SYNC_CHECKPOINT_ACTIVE 0xac1 /* Checkpoint has not signaled. */ 2183 + #define PVR_SYNC_CHECKPOINT_SIGNALED 0x519 /* Checkpoint has signaled. */ 2184 + #define PVR_SYNC_CHECKPOINT_ERRORED 0xeff /* Checkpoint has been errored. */ 2185 + 2186 + #include "pvr_rogue_fwif_check.h" 2187 + 2188 + #endif /* PVR_ROGUE_FWIF_H */
+493
drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_ROGUE_FWIF_CHECK_H 5 + #define PVR_ROGUE_FWIF_CHECK_H 6 + 7 + #include <linux/build_bug.h> 8 + 9 + #define OFFSET_CHECK(type, member, offset) \ 10 + static_assert(offsetof(type, member) == (offset), \ 11 + "offsetof(" #type ", " #member ") incorrect") 12 + 13 + #define SIZE_CHECK(type, size) \ 14 + static_assert(sizeof(type) == (size), #type " is incorrect size") 15 + 16 + OFFSET_CHECK(struct rogue_fwif_file_info_buf, path, 0); 17 + OFFSET_CHECK(struct rogue_fwif_file_info_buf, info, 200); 18 + OFFSET_CHECK(struct rogue_fwif_file_info_buf, line_num, 400); 19 + SIZE_CHECK(struct rogue_fwif_file_info_buf, 408); 20 + 21 + OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_pointer, 0); 22 + OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_buffer_fw_addr, 4); 23 + OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_buffer, 8); 24 + OFFSET_CHECK(struct rogue_fwif_tracebuf_space, assert_buf, 16); 25 + SIZE_CHECK(struct rogue_fwif_tracebuf_space, 424); 26 + 27 + OFFSET_CHECK(struct rogue_fwif_tracebuf, log_type, 0); 28 + OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf, 8); 29 + OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf_size_in_dwords, 856); 30 + OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf_flags, 860); 31 + SIZE_CHECK(struct rogue_fwif_tracebuf, 864); 32 + 33 + OFFSET_CHECK(struct rogue_fw_fault_info, cr_timer, 0); 34 + OFFSET_CHECK(struct rogue_fw_fault_info, os_timer, 8); 35 + OFFSET_CHECK(struct rogue_fw_fault_info, data, 16); 36 + OFFSET_CHECK(struct rogue_fw_fault_info, reserved, 20); 37 + OFFSET_CHECK(struct rogue_fw_fault_info, fault_buf, 24); 38 + SIZE_CHECK(struct rogue_fw_fault_info, 432); 39 + 40 + OFFSET_CHECK(struct rogue_fwif_sysdata, config_flags, 0); 41 + OFFSET_CHECK(struct rogue_fwif_sysdata, config_flags_ext, 4); 42 + OFFSET_CHECK(struct rogue_fwif_sysdata, pow_state, 8); 43 + OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_ridx, 12); 44 + OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_widx, 16); 45 + OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_wrap_count, 20); 46 + OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_size, 24); 47 + OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_drop_count, 28); 48 + OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_ut, 32); 49 + OFFSET_CHECK(struct rogue_fwif_sysdata, first_drop_ordinal, 36); 50 + OFFSET_CHECK(struct rogue_fwif_sysdata, last_drop_ordinal, 40); 51 + OFFSET_CHECK(struct rogue_fwif_sysdata, os_runtime_flags_mirror, 44); 52 + OFFSET_CHECK(struct rogue_fwif_sysdata, fault_info, 80); 53 + OFFSET_CHECK(struct rogue_fwif_sysdata, fw_faults, 3536); 54 + OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_addr, 3540); 55 + OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_mask, 3548); 56 + OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_count, 3556); 57 + OFFSET_CHECK(struct rogue_fwif_sysdata, start_idle_time, 3568); 58 + OFFSET_CHECK(struct rogue_fwif_sysdata, hwr_state_flags, 3576); 59 + OFFSET_CHECK(struct rogue_fwif_sysdata, hwr_recovery_flags, 3580); 60 + OFFSET_CHECK(struct rogue_fwif_sysdata, fw_sys_data_flags, 3616); 61 + OFFSET_CHECK(struct rogue_fwif_sysdata, mc_config, 3620); 62 + SIZE_CHECK(struct rogue_fwif_sysdata, 3624); 63 + 64 + OFFSET_CHECK(struct rogue_fwif_slr_entry, timestamp, 0); 65 + OFFSET_CHECK(struct rogue_fwif_slr_entry, fw_ctx_addr, 8); 66 + OFFSET_CHECK(struct rogue_fwif_slr_entry, num_ufos, 12); 67 + OFFSET_CHECK(struct rogue_fwif_slr_entry, ccb_name, 16); 68 + SIZE_CHECK(struct rogue_fwif_slr_entry, 48); 69 + 70 + OFFSET_CHECK(struct rogue_fwif_osdata, fw_os_config_flags, 0); 71 + OFFSET_CHECK(struct rogue_fwif_osdata, fw_sync_check_mark, 4); 72 + OFFSET_CHECK(struct rogue_fwif_osdata, host_sync_check_mark, 8); 73 + OFFSET_CHECK(struct rogue_fwif_osdata, forced_updates_requested, 12); 74 + OFFSET_CHECK(struct rogue_fwif_osdata, slr_log_wp, 16); 75 + OFFSET_CHECK(struct rogue_fwif_osdata, slr_log_first, 24); 76 + OFFSET_CHECK(struct rogue_fwif_osdata, slr_log, 72); 77 + OFFSET_CHECK(struct rogue_fwif_osdata, last_forced_update_time, 552); 78 + OFFSET_CHECK(struct rogue_fwif_osdata, interrupt_count, 560); 79 + OFFSET_CHECK(struct rogue_fwif_osdata, kccb_cmds_executed, 568); 80 + OFFSET_CHECK(struct rogue_fwif_osdata, power_sync_fw_addr, 572); 81 + OFFSET_CHECK(struct rogue_fwif_osdata, fw_os_data_flags, 576); 82 + SIZE_CHECK(struct rogue_fwif_osdata, 584); 83 + 84 + OFFSET_CHECK(struct rogue_bifinfo, bif_req_status, 0); 85 + OFFSET_CHECK(struct rogue_bifinfo, bif_mmu_status, 8); 86 + OFFSET_CHECK(struct rogue_bifinfo, pc_address, 16); 87 + OFFSET_CHECK(struct rogue_bifinfo, reserved, 24); 88 + SIZE_CHECK(struct rogue_bifinfo, 32); 89 + 90 + OFFSET_CHECK(struct rogue_eccinfo, fault_gpu, 0); 91 + SIZE_CHECK(struct rogue_eccinfo, 4); 92 + 93 + OFFSET_CHECK(struct rogue_mmuinfo, mmu_status, 0); 94 + OFFSET_CHECK(struct rogue_mmuinfo, pc_address, 16); 95 + OFFSET_CHECK(struct rogue_mmuinfo, reserved, 24); 96 + SIZE_CHECK(struct rogue_mmuinfo, 32); 97 + 98 + OFFSET_CHECK(struct rogue_pollinfo, thread_num, 0); 99 + OFFSET_CHECK(struct rogue_pollinfo, cr_poll_addr, 4); 100 + OFFSET_CHECK(struct rogue_pollinfo, cr_poll_mask, 8); 101 + OFFSET_CHECK(struct rogue_pollinfo, cr_poll_last_value, 12); 102 + OFFSET_CHECK(struct rogue_pollinfo, reserved, 16); 103 + SIZE_CHECK(struct rogue_pollinfo, 24); 104 + 105 + OFFSET_CHECK(struct rogue_tlbinfo, bad_addr, 0); 106 + OFFSET_CHECK(struct rogue_tlbinfo, entry_lo, 4); 107 + SIZE_CHECK(struct rogue_tlbinfo, 8); 108 + 109 + OFFSET_CHECK(struct rogue_hwrinfo, hwr_data, 0); 110 + OFFSET_CHECK(struct rogue_hwrinfo, cr_timer, 32); 111 + OFFSET_CHECK(struct rogue_hwrinfo, os_timer, 40); 112 + OFFSET_CHECK(struct rogue_hwrinfo, frame_num, 48); 113 + OFFSET_CHECK(struct rogue_hwrinfo, pid, 52); 114 + OFFSET_CHECK(struct rogue_hwrinfo, active_hwrt_data, 56); 115 + OFFSET_CHECK(struct rogue_hwrinfo, hwr_number, 60); 116 + OFFSET_CHECK(struct rogue_hwrinfo, event_status, 64); 117 + OFFSET_CHECK(struct rogue_hwrinfo, hwr_recovery_flags, 68); 118 + OFFSET_CHECK(struct rogue_hwrinfo, hwr_type, 72); 119 + OFFSET_CHECK(struct rogue_hwrinfo, dm, 76); 120 + OFFSET_CHECK(struct rogue_hwrinfo, core_id, 80); 121 + OFFSET_CHECK(struct rogue_hwrinfo, cr_time_of_kick, 88); 122 + OFFSET_CHECK(struct rogue_hwrinfo, cr_time_hw_reset_start, 96); 123 + OFFSET_CHECK(struct rogue_hwrinfo, cr_time_hw_reset_finish, 104); 124 + OFFSET_CHECK(struct rogue_hwrinfo, cr_time_freelist_ready, 112); 125 + OFFSET_CHECK(struct rogue_hwrinfo, reserved, 120); 126 + SIZE_CHECK(struct rogue_hwrinfo, 136); 127 + 128 + OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_info, 0); 129 + OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_counter, 2176); 130 + OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, write_index, 2180); 131 + OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, dd_req_count, 2184); 132 + OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_info_buf_flags, 2188); 133 + OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_locked_up_count, 2192); 134 + OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_overran_count, 2228); 135 + OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_recovered_count, 2264); 136 + OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_false_detect_count, 2300); 137 + SIZE_CHECK(struct rogue_fwif_hwrinfobuf, 2336); 138 + 139 + OFFSET_CHECK(struct rogue_fwif_fwmemcontext, pc_dev_paddr, 0); 140 + OFFSET_CHECK(struct rogue_fwif_fwmemcontext, page_cat_base_reg_set, 8); 141 + OFFSET_CHECK(struct rogue_fwif_fwmemcontext, breakpoint_addr, 12); 142 + OFFSET_CHECK(struct rogue_fwif_fwmemcontext, bp_handler_addr, 16); 143 + OFFSET_CHECK(struct rogue_fwif_fwmemcontext, breakpoint_ctl, 20); 144 + OFFSET_CHECK(struct rogue_fwif_fwmemcontext, fw_mem_ctx_flags, 24); 145 + SIZE_CHECK(struct rogue_fwif_fwmemcontext, 32); 146 + 147 + OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vdm_call_stack_pointer, 0); 148 + OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vdm_call_stack_pointer_init, 8); 149 + OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vbs_so_prim, 16); 150 + OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_current_idx, 32); 151 + SIZE_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, 40); 152 + 153 + OFFSET_CHECK(struct rogue_fwif_geom_ctx_state, geom_core, 0); 154 + SIZE_CHECK(struct rogue_fwif_geom_ctx_state, 160); 155 + 156 + OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_pm_deallocated_mask_status, 0); 157 + OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_dm_pds_mtilefree_status, 4); 158 + OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, ctx_state_flags, 8); 159 + OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_isp_store, 12); 160 + SIZE_CHECK(struct rogue_fwif_frag_ctx_state, 16); 161 + 162 + OFFSET_CHECK(struct rogue_fwif_compute_ctx_state, ctx_state_flags, 0); 163 + SIZE_CHECK(struct rogue_fwif_compute_ctx_state, 4); 164 + 165 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccbctl_fw_addr, 0); 166 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccb_fw_addr, 4); 167 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccb_meta_dma_addr, 8); 168 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, context_state_addr, 24); 169 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, fw_com_ctx_flags, 28); 170 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, priority, 32); 171 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, priority_seq_num, 36); 172 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, rf_cmd_addr, 40); 173 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_pending, 44); 174 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_stores, 48); 175 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_out_of_memory, 52); 176 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_partial_renders, 56); 177 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, dm, 60); 178 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, wait_signal_address, 64); 179 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, wait_signal_node, 72); 180 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, buf_stalled_node, 80); 181 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, cbuf_queue_ctrl_addr, 88); 182 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, robustness_address, 96); 183 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, max_deadline_ms, 104); 184 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, read_offset_needs_reset, 108); 185 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, waiting_node, 112); 186 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, run_node, 120); 187 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, last_failed_ufo, 128); 188 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, fw_mem_context_fw_addr, 136); 189 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, server_common_context_id, 140); 190 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, pid, 144); 191 + OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, geom_oom_disabled, 148); 192 + SIZE_CHECK(struct rogue_fwif_fwcommoncontext, 152); 193 + 194 + OFFSET_CHECK(struct rogue_fwif_ccb_ctl, write_offset, 0); 195 + OFFSET_CHECK(struct rogue_fwif_ccb_ctl, padding, 4); 196 + OFFSET_CHECK(struct rogue_fwif_ccb_ctl, read_offset, 128); 197 + OFFSET_CHECK(struct rogue_fwif_ccb_ctl, wrap_mask, 132); 198 + OFFSET_CHECK(struct rogue_fwif_ccb_ctl, cmd_size, 136); 199 + OFFSET_CHECK(struct rogue_fwif_ccb_ctl, padding2, 140); 200 + SIZE_CHECK(struct rogue_fwif_ccb_ctl, 144); 201 + 202 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, context_fw_addr, 0); 203 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, client_woff_update, 4); 204 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, client_wrap_mask_update, 8); 205 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, num_cleanup_ctl, 12); 206 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, cleanup_ctl_fw_addr, 16); 207 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, work_est_cmd_header_offset, 28); 208 + SIZE_CHECK(struct rogue_fwif_kccb_cmd_kick_data, 32); 209 + 210 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, geom_cmd_kick_data, 0); 211 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, frag_cmd_kick_data, 32); 212 + SIZE_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, 64); 213 + 214 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, context_fw_addr, 0); 215 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, ccb_fence_offset, 4); 216 + SIZE_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, 8); 217 + 218 + OFFSET_CHECK(struct rogue_fwif_cleanup_request, cleanup_type, 0); 219 + OFFSET_CHECK(struct rogue_fwif_cleanup_request, cleanup_data, 4); 220 + SIZE_CHECK(struct rogue_fwif_cleanup_request, 8); 221 + 222 + OFFSET_CHECK(struct rogue_fwif_power_request, pow_type, 0); 223 + OFFSET_CHECK(struct rogue_fwif_power_request, power_req_data, 4); 224 + SIZE_CHECK(struct rogue_fwif_power_request, 8); 225 + 226 + OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, context_fw_addr, 0); 227 + OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, inval, 4); 228 + OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, dm_context, 8); 229 + OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, address, 16); 230 + OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, size, 24); 231 + SIZE_CHECK(struct rogue_fwif_slcflushinvaldata, 32); 232 + 233 + OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl, opcode, 0); 234 + OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl, mask, 8); 235 + SIZE_CHECK(struct rogue_fwif_hwperf_ctrl, 16); 236 + 237 + OFFSET_CHECK(struct rogue_fwif_hwperf_config_enable_blks, num_blocks, 0); 238 + OFFSET_CHECK(struct rogue_fwif_hwperf_config_enable_blks, block_configs_fw_addr, 4); 239 + SIZE_CHECK(struct rogue_fwif_hwperf_config_enable_blks, 8); 240 + 241 + OFFSET_CHECK(struct rogue_fwif_hwperf_config_da_blks, num_blocks, 0); 242 + OFFSET_CHECK(struct rogue_fwif_hwperf_config_da_blks, block_configs_fw_addr, 4); 243 + SIZE_CHECK(struct rogue_fwif_hwperf_config_da_blks, 8); 244 + 245 + OFFSET_CHECK(struct rogue_fwif_coreclkspeedchange_data, new_clock_speed, 0); 246 + SIZE_CHECK(struct rogue_fwif_coreclkspeedchange_data, 4); 247 + 248 + OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, enable, 0); 249 + OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, num_blocks, 4); 250 + OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, block_ids, 8); 251 + SIZE_CHECK(struct rogue_fwif_hwperf_ctrl_blks, 40); 252 + 253 + OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, custom_block, 0); 254 + OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, num_counters, 2); 255 + OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, custom_counter_ids_fw_addr, 4); 256 + SIZE_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, 8); 257 + 258 + OFFSET_CHECK(struct rogue_fwif_zsbuffer_backing_data, zs_buffer_fw_addr, 0); 259 + OFFSET_CHECK(struct rogue_fwif_zsbuffer_backing_data, done, 4); 260 + SIZE_CHECK(struct rogue_fwif_zsbuffer_backing_data, 8); 261 + 262 + OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, freelist_fw_addr, 0); 263 + OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, delta_pages, 4); 264 + OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, new_pages, 8); 265 + OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, ready_pages, 12); 266 + SIZE_CHECK(struct rogue_fwif_freelist_gs_data, 16); 267 + 268 + OFFSET_CHECK(struct rogue_fwif_freelists_reconstruction_data, freelist_count, 0); 269 + OFFSET_CHECK(struct rogue_fwif_freelists_reconstruction_data, freelist_ids, 4); 270 + SIZE_CHECK(struct rogue_fwif_freelists_reconstruction_data, 76); 271 + 272 + OFFSET_CHECK(struct rogue_fwif_write_offset_update_data, context_fw_addr, 0); 273 + SIZE_CHECK(struct rogue_fwif_write_offset_update_data, 8); 274 + 275 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd, cmd_type, 0); 276 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd, kccb_flags, 4); 277 + OFFSET_CHECK(struct rogue_fwif_kccb_cmd, cmd_data, 8); 278 + SIZE_CHECK(struct rogue_fwif_kccb_cmd, 88); 279 + 280 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, server_common_context_id, 0); 281 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, reset_reason, 4); 282 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, dm, 8); 283 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, reset_job_ref, 12); 284 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, flags, 16); 285 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, pc_address, 24); 286 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, fault_address, 32); 287 + SIZE_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, 40); 288 + 289 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_fw_pagefault_data, fw_fault_addr, 0); 290 + SIZE_CHECK(struct rogue_fwif_fwccb_cmd_fw_pagefault_data, 8); 291 + 292 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, cmd_type, 0); 293 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, fwccb_flags, 4); 294 + OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, cmd_data, 8); 295 + SIZE_CHECK(struct rogue_fwif_fwccb_cmd, 88); 296 + 297 + OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, cmd_type, 0); 298 + OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, cmd_size, 4); 299 + OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, ext_job_ref, 8); 300 + OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, int_job_ref, 12); 301 + OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, work_est_kick_data, 16); 302 + SIZE_CHECK(struct rogue_fwif_ccb_cmd_header, 40); 303 + 304 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, active_pm_latency_ms, 0); 305 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, runtime_cfg_flags, 4); 306 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, active_pm_latency_persistant, 8); 307 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, core_clock_speed, 12); 308 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, default_dusts_num_init, 16); 309 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, phr_mode, 20); 310 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, hcs_deadline_ms, 24); 311 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, wdg_period_us, 28); 312 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, osid_priority, 32); 313 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, hwperf_buf_fw_addr, 64); 314 + OFFSET_CHECK(struct rogue_fwif_runtime_cfg, padding, 68); 315 + SIZE_CHECK(struct rogue_fwif_runtime_cfg, 72); 316 + 317 + OFFSET_CHECK(struct rogue_fwif_connection_ctl, connection_fw_state, 0); 318 + OFFSET_CHECK(struct rogue_fwif_connection_ctl, connection_os_state, 4); 319 + OFFSET_CHECK(struct rogue_fwif_connection_ctl, alive_fw_token, 8); 320 + OFFSET_CHECK(struct rogue_fwif_connection_ctl, alive_os_token, 12); 321 + SIZE_CHECK(struct rogue_fwif_connection_ctl, 16); 322 + 323 + OFFSET_CHECK(struct rogue_fwif_compchecks_bvnc, layout_version, 0); 324 + OFFSET_CHECK(struct rogue_fwif_compchecks_bvnc, bvnc, 8); 325 + SIZE_CHECK(struct rogue_fwif_compchecks_bvnc, 16); 326 + 327 + OFFSET_CHECK(struct rogue_fwif_init_options, os_count_support, 0); 328 + SIZE_CHECK(struct rogue_fwif_init_options, 8); 329 + 330 + OFFSET_CHECK(struct rogue_fwif_compchecks, hw_bvnc, 0); 331 + OFFSET_CHECK(struct rogue_fwif_compchecks, fw_bvnc, 16); 332 + OFFSET_CHECK(struct rogue_fwif_compchecks, fw_processor_version, 32); 333 + OFFSET_CHECK(struct rogue_fwif_compchecks, ddk_version, 36); 334 + OFFSET_CHECK(struct rogue_fwif_compchecks, ddk_build, 40); 335 + OFFSET_CHECK(struct rogue_fwif_compchecks, build_options, 44); 336 + OFFSET_CHECK(struct rogue_fwif_compchecks, init_options, 48); 337 + OFFSET_CHECK(struct rogue_fwif_compchecks, updated, 56); 338 + SIZE_CHECK(struct rogue_fwif_compchecks, 64); 339 + 340 + OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccbctl_fw_addr, 0); 341 + OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccb_fw_addr, 4); 342 + OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccb_rtn_slots_fw_addr, 8); 343 + OFFSET_CHECK(struct rogue_fwif_osinit, firmware_ccbctl_fw_addr, 12); 344 + OFFSET_CHECK(struct rogue_fwif_osinit, firmware_ccb_fw_addr, 16); 345 + OFFSET_CHECK(struct rogue_fwif_osinit, work_est_firmware_ccbctl_fw_addr, 20); 346 + OFFSET_CHECK(struct rogue_fwif_osinit, work_est_firmware_ccb_fw_addr, 24); 347 + OFFSET_CHECK(struct rogue_fwif_osinit, rogue_fwif_hwr_info_buf_ctl_fw_addr, 28); 348 + OFFSET_CHECK(struct rogue_fwif_osinit, hwr_debug_dump_limit, 32); 349 + OFFSET_CHECK(struct rogue_fwif_osinit, fw_os_data_fw_addr, 36); 350 + OFFSET_CHECK(struct rogue_fwif_osinit, rogue_comp_checks, 40); 351 + SIZE_CHECK(struct rogue_fwif_osinit, 104); 352 + 353 + OFFSET_CHECK(struct rogue_fwif_sigbuf_ctl, buffer_fw_addr, 0); 354 + OFFSET_CHECK(struct rogue_fwif_sigbuf_ctl, left_size_in_regs, 4); 355 + SIZE_CHECK(struct rogue_fwif_sigbuf_ctl, 8); 356 + 357 + OFFSET_CHECK(struct pdvfs_opp, volt, 0); 358 + OFFSET_CHECK(struct pdvfs_opp, freq, 4); 359 + SIZE_CHECK(struct pdvfs_opp, 8); 360 + 361 + OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, opp_values, 0); 362 + OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, min_opp_point, 128); 363 + OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, max_opp_point, 132); 364 + SIZE_CHECK(struct rogue_fwif_pdvfs_opp, 136); 365 + 366 + OFFSET_CHECK(struct rogue_fwif_counter_dump_ctl, buffer_fw_addr, 0); 367 + OFFSET_CHECK(struct rogue_fwif_counter_dump_ctl, size_in_dwords, 4); 368 + SIZE_CHECK(struct rogue_fwif_counter_dump_ctl, 8); 369 + 370 + OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_string, 0); 371 + OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_km_feature_flags, 24); 372 + OFFSET_CHECK(struct rogue_hwperf_bvnc, num_bvnc_blocks, 28); 373 + OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_gpu_cores, 30); 374 + OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_blocks, 32); 375 + SIZE_CHECK(struct rogue_hwperf_bvnc, 160); 376 + 377 + OFFSET_CHECK(struct rogue_fwif_sysinit, fault_phys_addr, 0); 378 + OFFSET_CHECK(struct rogue_fwif_sysinit, pds_exec_base, 8); 379 + OFFSET_CHECK(struct rogue_fwif_sysinit, usc_exec_base, 16); 380 + OFFSET_CHECK(struct rogue_fwif_sysinit, fbcdc_state_table_base, 24); 381 + OFFSET_CHECK(struct rogue_fwif_sysinit, fbcdc_large_state_table_base, 32); 382 + OFFSET_CHECK(struct rogue_fwif_sysinit, texture_heap_base, 40); 383 + OFFSET_CHECK(struct rogue_fwif_sysinit, hw_perf_filter, 48); 384 + OFFSET_CHECK(struct rogue_fwif_sysinit, slc3_fence_dev_addr, 56); 385 + OFFSET_CHECK(struct rogue_fwif_sysinit, tpu_trilinear_frac_mask, 64); 386 + OFFSET_CHECK(struct rogue_fwif_sysinit, sigbuf_ctl, 80); 387 + OFFSET_CHECK(struct rogue_fwif_sysinit, pdvfs_opp_info, 152); 388 + OFFSET_CHECK(struct rogue_fwif_sysinit, coremem_data_store, 288); 389 + OFFSET_CHECK(struct rogue_fwif_sysinit, counter_dump_ctl, 304); 390 + OFFSET_CHECK(struct rogue_fwif_sysinit, filter_flags, 312); 391 + OFFSET_CHECK(struct rogue_fwif_sysinit, runtime_cfg_fw_addr, 316); 392 + OFFSET_CHECK(struct rogue_fwif_sysinit, trace_buf_ctl_fw_addr, 320); 393 + OFFSET_CHECK(struct rogue_fwif_sysinit, fw_sys_data_fw_addr, 324); 394 + OFFSET_CHECK(struct rogue_fwif_sysinit, gpu_util_fw_cb_ctl_fw_addr, 328); 395 + OFFSET_CHECK(struct rogue_fwif_sysinit, reg_cfg_fw_addr, 332); 396 + OFFSET_CHECK(struct rogue_fwif_sysinit, hwperf_ctl_fw_addr, 336); 397 + OFFSET_CHECK(struct rogue_fwif_sysinit, align_checks, 340); 398 + OFFSET_CHECK(struct rogue_fwif_sysinit, initial_core_clock_speed, 344); 399 + OFFSET_CHECK(struct rogue_fwif_sysinit, active_pm_latency_ms, 348); 400 + OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_started, 352); 401 + OFFSET_CHECK(struct rogue_fwif_sysinit, marker_val, 356); 402 + OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_started_timestamp, 360); 403 + OFFSET_CHECK(struct rogue_fwif_sysinit, jones_disable_mask, 364); 404 + OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_perf, 368); 405 + OFFSET_CHECK(struct rogue_fwif_sysinit, core_clock_rate_fw_addr, 372); 406 + OFFSET_CHECK(struct rogue_fwif_sysinit, gpio_validation_mode, 376); 407 + OFFSET_CHECK(struct rogue_fwif_sysinit, bvnc_km_feature_flags, 380); 408 + OFFSET_CHECK(struct rogue_fwif_sysinit, tfbc_compression_control, 540); 409 + SIZE_CHECK(struct rogue_fwif_sysinit, 544); 410 + 411 + OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, time_corr, 0); 412 + OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, time_corr_seq_count, 10240); 413 + OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, gpu_util_flags, 10244); 414 + OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, last_word, 10248); 415 + OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, stats_counters, 10256); 416 + SIZE_CHECK(struct rogue_fwif_gpu_util_fwcb, 10280); 417 + 418 + OFFSET_CHECK(struct rogue_fwif_rta_ctl, render_target_index, 0); 419 + OFFSET_CHECK(struct rogue_fwif_rta_ctl, current_render_target, 4); 420 + OFFSET_CHECK(struct rogue_fwif_rta_ctl, active_render_targets, 8); 421 + OFFSET_CHECK(struct rogue_fwif_rta_ctl, cumul_active_render_targets, 12); 422 + OFFSET_CHECK(struct rogue_fwif_rta_ctl, valid_render_targets_fw_addr, 16); 423 + OFFSET_CHECK(struct rogue_fwif_rta_ctl, rta_num_partial_renders_fw_addr, 20); 424 + OFFSET_CHECK(struct rogue_fwif_rta_ctl, max_rts, 24); 425 + OFFSET_CHECK(struct rogue_fwif_rta_ctl, rta_ctl_flags, 28); 426 + SIZE_CHECK(struct rogue_fwif_rta_ctl, 32); 427 + 428 + OFFSET_CHECK(struct rogue_fwif_freelist, freelist_dev_addr, 0); 429 + OFFSET_CHECK(struct rogue_fwif_freelist, current_dev_addr, 8); 430 + OFFSET_CHECK(struct rogue_fwif_freelist, current_stack_top, 16); 431 + OFFSET_CHECK(struct rogue_fwif_freelist, max_pages, 20); 432 + OFFSET_CHECK(struct rogue_fwif_freelist, grow_pages, 24); 433 + OFFSET_CHECK(struct rogue_fwif_freelist, current_pages, 28); 434 + OFFSET_CHECK(struct rogue_fwif_freelist, allocated_page_count, 32); 435 + OFFSET_CHECK(struct rogue_fwif_freelist, allocated_mmu_page_count, 36); 436 + OFFSET_CHECK(struct rogue_fwif_freelist, freelist_id, 40); 437 + OFFSET_CHECK(struct rogue_fwif_freelist, grow_pending, 44); 438 + OFFSET_CHECK(struct rogue_fwif_freelist, ready_pages, 48); 439 + OFFSET_CHECK(struct rogue_fwif_freelist, freelist_flags, 52); 440 + OFFSET_CHECK(struct rogue_fwif_freelist, pm_global_pb, 56); 441 + SIZE_CHECK(struct rogue_fwif_freelist, 64); 442 + 443 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, geom_caches_need_zeroing, 0); 444 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, screen_pixel_max, 4); 445 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, multi_sample_ctl, 8); 446 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, flipped_multi_sample_ctl, 16); 447 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, tpc_stride, 24); 448 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, tpc_size, 28); 449 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_screen, 32); 450 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, mtile_stride, 36); 451 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, teaa, 40); 452 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_mtile1, 44); 453 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_mtile2, 48); 454 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_lower_x, 52); 455 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_lower_y, 56); 456 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_upper_x, 60); 457 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_upper_y, 64); 458 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_scale_x, 68); 459 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_scale_y, 72); 460 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, rgn_header_size, 76); 461 + OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_mtile_size, 80); 462 + SIZE_CHECK(struct rogue_fwif_hwrtdata_common, 88); 463 + 464 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_mlist_dev_addr, 0); 465 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, vce_cat_base, 8); 466 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, vce_last_cat_base, 40); 467 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, te_cat_base, 72); 468 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, te_last_cat_base, 104); 469 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, alist_cat_base, 136); 470 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, alist_last_cat_base, 144); 471 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_alist_stack_pointer, 152); 472 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_mlist_stack_pointer, 160); 473 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, hwrt_data_common_fw_addr, 164); 474 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, hwrt_data_flags, 168); 475 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, state, 172); 476 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, freelists_fw_addr, 176); 477 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, freelist_hwr_snapshot, 188); 478 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, vheap_table_dev_addr, 200); 479 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, rta_ctl, 208); 480 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, tail_ptrs_dev_addr, 240); 481 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, macrotile_array_dev_addr, 248); 482 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, rgn_header_dev_addr, 256); 483 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, rtc_dev_addr, 264); 484 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, owner_geom_not_used_by_host, 272); 485 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, geom_caches_need_zeroing, 276); 486 + OFFSET_CHECK(struct rogue_fwif_hwrtdata, cleanup_state, 320); 487 + SIZE_CHECK(struct rogue_fwif_hwrtdata, 384); 488 + 489 + OFFSET_CHECK(struct rogue_fwif_sync_checkpoint, state, 0); 490 + OFFSET_CHECK(struct rogue_fwif_sync_checkpoint, fw_ref_count, 4); 491 + SIZE_CHECK(struct rogue_fwif_sync_checkpoint, 8); 492 + 493 + #endif /* PVR_ROGUE_FWIF_CHECK_H */
+373
drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_ROGUE_FWIF_CLIENT_H 5 + #define PVR_ROGUE_FWIF_CLIENT_H 6 + 7 + #include <linux/bits.h> 8 + #include <linux/kernel.h> 9 + #include <linux/sizes.h> 10 + #include <linux/types.h> 11 + 12 + #include "pvr_rogue_fwif_shared.h" 13 + 14 + /* 15 + * Page size used for Parameter Management. 16 + */ 17 + #define ROGUE_PM_PAGE_SIZE SZ_4K 18 + 19 + /* 20 + * Minimum/Maximum PB size. 21 + * 22 + * Base page size is dependent on core: 23 + * S6/S6XT/S7 = 50 pages 24 + * S8XE = 40 pages 25 + * S8XE with BRN66011 fixed = 25 pages 26 + * 27 + * Minimum PB = Base Pages + (NUM_TE_PIPES-1)*16K + (NUM_VCE_PIPES-1)*64K + 28 + * IF_PM_PREALLOC(NUM_TE_PIPES*16K + NUM_VCE_PIPES*16K) 29 + * 30 + * Maximum PB size must ensure that no PM address space can be fully used, 31 + * because if the full address space was used it would wrap and corrupt itself. 32 + * Since there are two freelists (local is always minimum sized) this can be 33 + * described as following three conditions being met: 34 + * 35 + * (Minimum PB + Maximum PB) < ALIST PM address space size (16GB) 36 + * (Minimum PB + Maximum PB) < TE PM address space size (16GB) / NUM_TE_PIPES 37 + * (Minimum PB + Maximum PB) < VCE PM address space size (16GB) / NUM_VCE_PIPES 38 + * 39 + * Since the max of NUM_TE_PIPES and NUM_VCE_PIPES is 4, we have a hard limit 40 + * of 4GB minus the Minimum PB. For convenience we take the smaller power-of-2 41 + * value of 2GB. This is far more than any current applications use. 42 + */ 43 + #define ROGUE_PM_MAX_FREELIST_SIZE SZ_2G 44 + 45 + /* 46 + * Flags supported by the geometry DM command i.e. &struct rogue_fwif_cmd_geom. 47 + */ 48 + 49 + #define ROGUE_GEOM_FLAGS_FIRSTKICK BIT_MASK(0) 50 + #define ROGUE_GEOM_FLAGS_LASTKICK BIT_MASK(1) 51 + /* Use single core in a multi core setup. */ 52 + #define ROGUE_GEOM_FLAGS_SINGLE_CORE BIT_MASK(3) 53 + 54 + /* 55 + * Flags supported by the fragment DM command i.e. &struct rogue_fwif_cmd_frag. 56 + */ 57 + 58 + /* Use single core in a multi core setup. */ 59 + #define ROGUE_FRAG_FLAGS_SINGLE_CORE BIT_MASK(3) 60 + /* Indicates whether this render produces visibility results. */ 61 + #define ROGUE_FRAG_FLAGS_GET_VIS_RESULTS BIT_MASK(5) 62 + /* Indicates whether a depth buffer is present. */ 63 + #define ROGUE_FRAG_FLAGS_DEPTHBUFFER BIT_MASK(7) 64 + /* Indicates whether a stencil buffer is present. */ 65 + #define ROGUE_FRAG_FLAGS_STENCILBUFFER BIT_MASK(8) 66 + /* Disable pixel merging for this render. */ 67 + #define ROGUE_FRAG_FLAGS_DISABLE_PIXELMERGE BIT_MASK(15) 68 + /* Indicates whether a scratch buffer is present. */ 69 + #define ROGUE_FRAG_FLAGS_SCRATCHBUFFER BIT_MASK(19) 70 + /* Disallow compute overlapped with this render. */ 71 + #define ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP BIT_MASK(26) 72 + 73 + /* 74 + * Flags supported by the compute DM command i.e. &struct rogue_fwif_cmd_compute. 75 + */ 76 + 77 + #define ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP BIT_MASK(2) 78 + /*!< Use single core in a multi core setup. */ 79 + #define ROGUE_COMPUTE_FLAG_SINGLE_CORE BIT_MASK(5) 80 + 81 + /* 82 + * Flags supported by the transfer DM command i.e. &struct rogue_fwif_cmd_transfer. 83 + */ 84 + 85 + /*!< Use single core in a multi core setup. */ 86 + #define ROGUE_TRANSFER_FLAGS_SINGLE_CORE BIT_MASK(1) 87 + 88 + /* 89 + ************************************************ 90 + * Parameter/HWRTData control structures. 91 + ************************************************ 92 + */ 93 + 94 + /* 95 + * Configuration registers which need to be loaded by the firmware before a geometry 96 + * job can be started. 97 + */ 98 + struct rogue_fwif_geom_regs { 99 + u64 vdm_ctrl_stream_base; 100 + u64 tpu_border_colour_table; 101 + 102 + /* Only used when feature VDM_DRAWINDIRECT present. */ 103 + u64 vdm_draw_indirect0; 104 + /* Only used when feature VDM_DRAWINDIRECT present. */ 105 + u32 vdm_draw_indirect1; 106 + 107 + u32 ppp_ctrl; 108 + u32 te_psg; 109 + /* Only used when BRN 49927 present. */ 110 + u32 tpu; 111 + 112 + u32 vdm_context_resume_task0_size; 113 + /* Only used when feature VDM_OBJECT_LEVEL_LLS present. */ 114 + u32 vdm_context_resume_task3_size; 115 + 116 + /* Only used when BRN 56279 or BRN 67381 present. */ 117 + u32 pds_ctrl; 118 + 119 + u32 view_idx; 120 + 121 + /* Only used when feature TESSELLATION present */ 122 + u32 pds_coeff_free_prog; 123 + 124 + u32 padding; 125 + }; 126 + 127 + /* Only used when BRN 44455 or BRN 63027 present. */ 128 + struct rogue_fwif_dummy_rgnhdr_init_geom_regs { 129 + u64 te_psgregion_addr; 130 + }; 131 + 132 + /* 133 + * Represents a geometry command that can be used to tile a whole scene's objects as 134 + * per TA behavior. 135 + */ 136 + struct rogue_fwif_cmd_geom { 137 + /* 138 + * rogue_fwif_cmd_geom_frag_shared field must always be at the beginning of the 139 + * struct. 140 + * 141 + * The command struct (rogue_fwif_cmd_geom) is shared between Client and 142 + * Firmware. Kernel is unable to perform read/write operations on the 143 + * command struct, the SHARED region is the only exception from this rule. 144 + * This region must be the first member so that Kernel can easily access it. 145 + * For more info, see rogue_fwif_cmd_geom_frag_shared definition. 146 + */ 147 + struct rogue_fwif_cmd_geom_frag_shared cmd_shared; 148 + 149 + struct rogue_fwif_geom_regs regs __aligned(8); 150 + u32 flags __aligned(8); 151 + 152 + /* 153 + * Holds the geometry/fragment fence value to allow the fragment partial render command 154 + * to go through. 155 + */ 156 + struct rogue_fwif_ufo partial_render_geom_frag_fence; 157 + 158 + /* Only used when BRN 44455 or BRN 63027 present. */ 159 + struct rogue_fwif_dummy_rgnhdr_init_geom_regs dummy_rgnhdr_init_geom_regs __aligned(8); 160 + 161 + /* Only used when BRN 61484 or BRN 66333 present. */ 162 + u32 brn61484_66333_live_rt; 163 + 164 + u32 padding; 165 + }; 166 + 167 + /* 168 + * Configuration registers which need to be loaded by the firmware before ISP 169 + * can be started. 170 + */ 171 + struct rogue_fwif_frag_regs { 172 + u32 usc_pixel_output_ctrl; 173 + 174 + #define ROGUE_MAXIMUM_OUTPUT_REGISTERS_PER_PIXEL 8U 175 + u32 usc_clear_register[ROGUE_MAXIMUM_OUTPUT_REGISTERS_PER_PIXEL]; 176 + 177 + u32 isp_bgobjdepth; 178 + u32 isp_bgobjvals; 179 + u32 isp_aa; 180 + /* Only used when feature S7_TOP_INFRASTRUCTURE present. */ 181 + u32 isp_xtp_pipe_enable; 182 + 183 + u32 isp_ctl; 184 + 185 + /* Only used when BRN 49927 present. */ 186 + u32 tpu; 187 + 188 + u32 event_pixel_pds_info; 189 + 190 + /* Only used when feature CLUSTER_GROUPING present. */ 191 + u32 pixel_phantom; 192 + 193 + u32 view_idx; 194 + 195 + u32 event_pixel_pds_data; 196 + 197 + /* Only used when BRN 65101 present. */ 198 + u32 brn65101_event_pixel_pds_data; 199 + 200 + /* Only used when feature GPU_MULTICORE_SUPPORT or BRN 47217 present. */ 201 + u32 isp_oclqry_stride; 202 + 203 + /* Only used when feature ZLS_SUBTILE present. */ 204 + u32 isp_zls_pixels; 205 + 206 + /* Only used when feature ISP_ZLS_D24_S8_PACKING_OGL_MODE present. */ 207 + u32 rgx_cr_blackpearl_fix; 208 + 209 + /* All values below the ALIGN(8) must be 64 bit. */ 210 + aligned_u64 isp_scissor_base; 211 + u64 isp_dbias_base; 212 + u64 isp_oclqry_base; 213 + u64 isp_zlsctl; 214 + u64 isp_zload_store_base; 215 + u64 isp_stencil_load_store_base; 216 + 217 + /* 218 + * Only used when feature FBCDC_ALGORITHM present and value < 3 or feature 219 + * FB_CDC_V4 present. Additionally, BRNs 48754, 60227, 72310 and 72311 must 220 + * not be present. 221 + */ 222 + u64 fb_cdc_zls; 223 + 224 + #define ROGUE_PBE_WORDS_REQUIRED_FOR_RENDERS 3U 225 + u64 pbe_word[8U][ROGUE_PBE_WORDS_REQUIRED_FOR_RENDERS]; 226 + u64 tpu_border_colour_table; 227 + u64 pds_bgnd[3U]; 228 + 229 + /* Only used when BRN 65101 present. */ 230 + u64 pds_bgnd_brn65101[3U]; 231 + 232 + u64 pds_pr_bgnd[3U]; 233 + 234 + /* Only used when BRN 62850 or 62865 present. */ 235 + u64 isp_dummy_stencil_store_base; 236 + 237 + /* Only used when BRN 66193 present. */ 238 + u64 isp_dummy_depth_store_base; 239 + 240 + /* Only used when BRN 67182 present. */ 241 + u32 rgnhdr_single_rt_size; 242 + /* Only used when BRN 67182 present. */ 243 + u32 rgnhdr_scratch_offset; 244 + }; 245 + 246 + struct rogue_fwif_cmd_frag { 247 + struct rogue_fwif_cmd_geom_frag_shared cmd_shared __aligned(8); 248 + 249 + struct rogue_fwif_frag_regs regs __aligned(8); 250 + /* command control flags. */ 251 + u32 flags; 252 + /* Stride IN BYTES for Z-Buffer in case of RTAs. */ 253 + u32 zls_stride; 254 + /* Stride IN BYTES for S-Buffer in case of RTAs. */ 255 + u32 sls_stride; 256 + 257 + /* Only used if feature GPU_MULTICORE_SUPPORT present. */ 258 + u32 execute_count; 259 + }; 260 + 261 + /* 262 + * Configuration registers which need to be loaded by the firmware before CDM 263 + * can be started. 264 + */ 265 + struct rogue_fwif_compute_regs { 266 + u64 tpu_border_colour_table; 267 + 268 + /* Only used when feature CDM_USER_MODE_QUEUE present. */ 269 + u64 cdm_cb_queue; 270 + 271 + /* Only used when feature CDM_USER_MODE_QUEUE present. */ 272 + u64 cdm_cb_base; 273 + /* Only used when feature CDM_USER_MODE_QUEUE present. */ 274 + u64 cdm_cb; 275 + 276 + /* Only used when feature CDM_USER_MODE_QUEUE is not present. */ 277 + u64 cdm_ctrl_stream_base; 278 + 279 + u64 cdm_context_state_base_addr; 280 + 281 + /* Only used when BRN 49927 is present. */ 282 + u32 tpu; 283 + u32 cdm_resume_pds1; 284 + 285 + /* Only used when feature COMPUTE_MORTON_CAPABLE present. */ 286 + u32 cdm_item; 287 + 288 + /* Only used when feature CLUSTER_GROUPING present. */ 289 + u32 compute_cluster; 290 + 291 + /* Only used when feature TPU_DM_GLOBAL_REGISTERS present. */ 292 + u32 tpu_tag_cdm_ctrl; 293 + 294 + u32 padding; 295 + }; 296 + 297 + struct rogue_fwif_cmd_compute { 298 + /* Common command attributes */ 299 + struct rogue_fwif_cmd_common common __aligned(8); 300 + 301 + /* CDM registers */ 302 + struct rogue_fwif_compute_regs regs; 303 + 304 + /* Control flags */ 305 + u32 flags __aligned(8); 306 + 307 + /* Only used when feature UNIFIED_STORE_VIRTUAL_PARTITIONING present. */ 308 + u32 num_temp_regions; 309 + 310 + /* Only used when feature CDM_USER_MODE_QUEUE present. */ 311 + u32 stream_start_offset; 312 + 313 + /* Only used when feature GPU_MULTICORE_SUPPORT present. */ 314 + u32 execute_count; 315 + }; 316 + 317 + struct rogue_fwif_transfer_regs { 318 + /* 319 + * All 32 bit values should be added in the top section. This then requires only a 320 + * single RGXFW_ALIGN to align all the 64 bit values in the second section. 321 + */ 322 + u32 isp_bgobjvals; 323 + 324 + u32 usc_pixel_output_ctrl; 325 + u32 usc_clear_register0; 326 + u32 usc_clear_register1; 327 + u32 usc_clear_register2; 328 + u32 usc_clear_register3; 329 + 330 + u32 isp_mtile_size; 331 + u32 isp_render_origin; 332 + u32 isp_ctl; 333 + 334 + /* Only used when feature S7_TOP_INFRASTRUCTURE present. */ 335 + u32 isp_xtp_pipe_enable; 336 + u32 isp_aa; 337 + 338 + u32 event_pixel_pds_info; 339 + 340 + u32 event_pixel_pds_code; 341 + u32 event_pixel_pds_data; 342 + 343 + u32 isp_render; 344 + u32 isp_rgn; 345 + 346 + /* Only used when feature GPU_MULTICORE_SUPPORT present. */ 347 + u32 frag_screen; 348 + 349 + /* All values below the aligned_u64 must be 64 bit. */ 350 + aligned_u64 pds_bgnd0_base; 351 + u64 pds_bgnd1_base; 352 + u64 pds_bgnd3_sizeinfo; 353 + 354 + u64 isp_mtile_base; 355 + #define ROGUE_PBE_WORDS_REQUIRED_FOR_TQS 3 356 + /* TQ_MAX_RENDER_TARGETS * PBE_STATE_SIZE */ 357 + u64 pbe_wordx_mrty[3U * ROGUE_PBE_WORDS_REQUIRED_FOR_TQS]; 358 + }; 359 + 360 + struct rogue_fwif_cmd_transfer { 361 + /* Common command attributes */ 362 + struct rogue_fwif_cmd_common common __aligned(8); 363 + 364 + struct rogue_fwif_transfer_regs regs __aligned(8); 365 + 366 + u32 flags; 367 + 368 + u32 padding; 369 + }; 370 + 371 + #include "pvr_rogue_fwif_client_check.h" 372 + 373 + #endif /* PVR_ROGUE_FWIF_CLIENT_H */
+133
drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_ROGUE_FWIF_CLIENT_CHECK_H 5 + #define PVR_ROGUE_FWIF_CLIENT_CHECK_H 6 + 7 + #include <linux/build_bug.h> 8 + 9 + #define OFFSET_CHECK(type, member, offset) \ 10 + static_assert(offsetof(type, member) == (offset), \ 11 + "offsetof(" #type ", " #member ") incorrect") 12 + 13 + #define SIZE_CHECK(type, size) \ 14 + static_assert(sizeof(type) == (size), #type " is incorrect size") 15 + 16 + OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_ctrl_stream_base, 0); 17 + OFFSET_CHECK(struct rogue_fwif_geom_regs, tpu_border_colour_table, 8); 18 + OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_draw_indirect0, 16); 19 + OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_draw_indirect1, 24); 20 + OFFSET_CHECK(struct rogue_fwif_geom_regs, ppp_ctrl, 28); 21 + OFFSET_CHECK(struct rogue_fwif_geom_regs, te_psg, 32); 22 + OFFSET_CHECK(struct rogue_fwif_geom_regs, tpu, 36); 23 + OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_context_resume_task0_size, 40); 24 + OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_context_resume_task3_size, 44); 25 + OFFSET_CHECK(struct rogue_fwif_geom_regs, pds_ctrl, 48); 26 + OFFSET_CHECK(struct rogue_fwif_geom_regs, view_idx, 52); 27 + OFFSET_CHECK(struct rogue_fwif_geom_regs, pds_coeff_free_prog, 56); 28 + SIZE_CHECK(struct rogue_fwif_geom_regs, 64); 29 + 30 + OFFSET_CHECK(struct rogue_fwif_dummy_rgnhdr_init_geom_regs, te_psgregion_addr, 0); 31 + SIZE_CHECK(struct rogue_fwif_dummy_rgnhdr_init_geom_regs, 8); 32 + 33 + OFFSET_CHECK(struct rogue_fwif_cmd_geom, cmd_shared, 0); 34 + OFFSET_CHECK(struct rogue_fwif_cmd_geom, regs, 16); 35 + OFFSET_CHECK(struct rogue_fwif_cmd_geom, flags, 80); 36 + OFFSET_CHECK(struct rogue_fwif_cmd_geom, partial_render_geom_frag_fence, 84); 37 + OFFSET_CHECK(struct rogue_fwif_cmd_geom, dummy_rgnhdr_init_geom_regs, 96); 38 + OFFSET_CHECK(struct rogue_fwif_cmd_geom, brn61484_66333_live_rt, 104); 39 + SIZE_CHECK(struct rogue_fwif_cmd_geom, 112); 40 + 41 + OFFSET_CHECK(struct rogue_fwif_frag_regs, usc_pixel_output_ctrl, 0); 42 + OFFSET_CHECK(struct rogue_fwif_frag_regs, usc_clear_register, 4); 43 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_bgobjdepth, 36); 44 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_bgobjvals, 40); 45 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_aa, 44); 46 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_xtp_pipe_enable, 48); 47 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_ctl, 52); 48 + OFFSET_CHECK(struct rogue_fwif_frag_regs, tpu, 56); 49 + OFFSET_CHECK(struct rogue_fwif_frag_regs, event_pixel_pds_info, 60); 50 + OFFSET_CHECK(struct rogue_fwif_frag_regs, pixel_phantom, 64); 51 + OFFSET_CHECK(struct rogue_fwif_frag_regs, view_idx, 68); 52 + OFFSET_CHECK(struct rogue_fwif_frag_regs, event_pixel_pds_data, 72); 53 + OFFSET_CHECK(struct rogue_fwif_frag_regs, brn65101_event_pixel_pds_data, 76); 54 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_oclqry_stride, 80); 55 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zls_pixels, 84); 56 + OFFSET_CHECK(struct rogue_fwif_frag_regs, rgx_cr_blackpearl_fix, 88); 57 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_scissor_base, 96); 58 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dbias_base, 104); 59 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_oclqry_base, 112); 60 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zlsctl, 120); 61 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zload_store_base, 128); 62 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_stencil_load_store_base, 136); 63 + OFFSET_CHECK(struct rogue_fwif_frag_regs, fb_cdc_zls, 144); 64 + OFFSET_CHECK(struct rogue_fwif_frag_regs, pbe_word, 152); 65 + OFFSET_CHECK(struct rogue_fwif_frag_regs, tpu_border_colour_table, 344); 66 + OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_bgnd, 352); 67 + OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_bgnd_brn65101, 376); 68 + OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_pr_bgnd, 400); 69 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dummy_stencil_store_base, 424); 70 + OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dummy_depth_store_base, 432); 71 + OFFSET_CHECK(struct rogue_fwif_frag_regs, rgnhdr_single_rt_size, 440); 72 + OFFSET_CHECK(struct rogue_fwif_frag_regs, rgnhdr_scratch_offset, 444); 73 + SIZE_CHECK(struct rogue_fwif_frag_regs, 448); 74 + 75 + OFFSET_CHECK(struct rogue_fwif_cmd_frag, cmd_shared, 0); 76 + OFFSET_CHECK(struct rogue_fwif_cmd_frag, regs, 16); 77 + OFFSET_CHECK(struct rogue_fwif_cmd_frag, flags, 464); 78 + OFFSET_CHECK(struct rogue_fwif_cmd_frag, zls_stride, 468); 79 + OFFSET_CHECK(struct rogue_fwif_cmd_frag, sls_stride, 472); 80 + OFFSET_CHECK(struct rogue_fwif_cmd_frag, execute_count, 476); 81 + SIZE_CHECK(struct rogue_fwif_cmd_frag, 480); 82 + 83 + OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu_border_colour_table, 0); 84 + OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb_queue, 8); 85 + OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb_base, 16); 86 + OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb, 24); 87 + OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_ctrl_stream_base, 32); 88 + OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_context_state_base_addr, 40); 89 + OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu, 48); 90 + OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_resume_pds1, 52); 91 + OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_item, 56); 92 + OFFSET_CHECK(struct rogue_fwif_compute_regs, compute_cluster, 60); 93 + OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu_tag_cdm_ctrl, 64); 94 + SIZE_CHECK(struct rogue_fwif_compute_regs, 72); 95 + 96 + OFFSET_CHECK(struct rogue_fwif_cmd_compute, common, 0); 97 + OFFSET_CHECK(struct rogue_fwif_cmd_compute, regs, 8); 98 + OFFSET_CHECK(struct rogue_fwif_cmd_compute, flags, 80); 99 + OFFSET_CHECK(struct rogue_fwif_cmd_compute, num_temp_regions, 84); 100 + OFFSET_CHECK(struct rogue_fwif_cmd_compute, stream_start_offset, 88); 101 + OFFSET_CHECK(struct rogue_fwif_cmd_compute, execute_count, 92); 102 + SIZE_CHECK(struct rogue_fwif_cmd_compute, 96); 103 + 104 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_bgobjvals, 0); 105 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_pixel_output_ctrl, 4); 106 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register0, 8); 107 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register1, 12); 108 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register2, 16); 109 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register3, 20); 110 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_mtile_size, 24); 111 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_render_origin, 28); 112 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_ctl, 32); 113 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_xtp_pipe_enable, 36); 114 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_aa, 40); 115 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_info, 44); 116 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_code, 48); 117 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_data, 52); 118 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_render, 56); 119 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_rgn, 60); 120 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, frag_screen, 64); 121 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd0_base, 72); 122 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd1_base, 80); 123 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd3_sizeinfo, 88); 124 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_mtile_base, 96); 125 + OFFSET_CHECK(struct rogue_fwif_transfer_regs, pbe_wordx_mrty, 104); 126 + SIZE_CHECK(struct rogue_fwif_transfer_regs, 176); 127 + 128 + OFFSET_CHECK(struct rogue_fwif_cmd_transfer, common, 0); 129 + OFFSET_CHECK(struct rogue_fwif_cmd_transfer, regs, 8); 130 + OFFSET_CHECK(struct rogue_fwif_cmd_transfer, flags, 184); 131 + SIZE_CHECK(struct rogue_fwif_cmd_transfer, 192); 132 + 133 + #endif /* PVR_ROGUE_FWIF_CLIENT_CHECK_H */
+60
drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_ROGUE_FWIF_COMMON_H 5 + #define PVR_ROGUE_FWIF_COMMON_H 6 + 7 + #include <linux/build_bug.h> 8 + 9 + /* 10 + * This macro represents a mask of LSBs that must be zero on data structure 11 + * sizes and offsets to ensure they are 8-byte granular on types shared between 12 + * the FW and host driver. 13 + */ 14 + #define PVR_FW_ALIGNMENT_LSB 7U 15 + 16 + /* Macro to test structure size alignment. */ 17 + #define PVR_FW_STRUCT_SIZE_ASSERT(_a) \ 18 + static_assert((sizeof(_a) & PVR_FW_ALIGNMENT_LSB) == 0U, \ 19 + "Size of " #_a " is not properly aligned") 20 + 21 + /* The master definition for data masters known to the firmware. */ 22 + 23 + #define PVR_FWIF_DM_GP (0) 24 + /* Either TDM or 2D DM is present. */ 25 + /* When the 'tla' feature is present in the hw (as per @pvr_device_features). */ 26 + #define PVR_FWIF_DM_2D (1) 27 + /* 28 + * When the 'fastrender_dm' feature is present in the hw (as per 29 + * @pvr_device_features). 30 + */ 31 + #define PVR_FWIF_DM_TDM (1) 32 + 33 + #define PVR_FWIF_DM_GEOM (2) 34 + #define PVR_FWIF_DM_FRAG (3) 35 + #define PVR_FWIF_DM_CDM (4) 36 + #define PVR_FWIF_DM_RAY (5) 37 + #define PVR_FWIF_DM_GEOM2 (6) 38 + #define PVR_FWIF_DM_GEOM3 (7) 39 + #define PVR_FWIF_DM_GEOM4 (8) 40 + 41 + #define PVR_FWIF_DM_LAST PVR_FWIF_DM_GEOM4 42 + 43 + /* Maximum number of DM in use: GP, 2D/TDM, GEOM, 3D, CDM, RAY, GEOM2, GEOM3, GEOM4 */ 44 + #define PVR_FWIF_DM_MAX (PVR_FWIF_DM_LAST + 1U) 45 + 46 + /* GPU Utilisation states */ 47 + #define PVR_FWIF_GPU_UTIL_STATE_IDLE 0U 48 + #define PVR_FWIF_GPU_UTIL_STATE_ACTIVE 1U 49 + #define PVR_FWIF_GPU_UTIL_STATE_BLOCKED 2U 50 + #define PVR_FWIF_GPU_UTIL_STATE_NUM 3U 51 + #define PVR_FWIF_GPU_UTIL_STATE_MASK 0x3ULL 52 + 53 + /* 54 + * Maximum amount of register writes that can be done by the register 55 + * programmer (FW or META DMA). This is not a HW limitation, it is only 56 + * a protection against malformed inputs to the register programmer. 57 + */ 58 + #define PVR_MAX_NUM_REGISTER_PROGRAMMER_WRITES 128U 59 + 60 + #endif /* PVR_ROGUE_FWIF_COMMON_H */
+113
drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef __PVR_ROGUE_FWIF_DEV_INFO_H__ 5 + #define __PVR_ROGUE_FWIF_DEV_INFO_H__ 6 + 7 + enum { 8 + PVR_FW_HAS_BRN_44079 = 0, 9 + PVR_FW_HAS_BRN_47217, 10 + PVR_FW_HAS_BRN_48492, 11 + PVR_FW_HAS_BRN_48545, 12 + PVR_FW_HAS_BRN_49927, 13 + PVR_FW_HAS_BRN_50767, 14 + PVR_FW_HAS_BRN_51764, 15 + PVR_FW_HAS_BRN_62269, 16 + PVR_FW_HAS_BRN_63142, 17 + PVR_FW_HAS_BRN_63553, 18 + PVR_FW_HAS_BRN_66011, 19 + PVR_FW_HAS_BRN_71242, 20 + 21 + PVR_FW_HAS_BRN_MAX 22 + }; 23 + 24 + enum { 25 + PVR_FW_HAS_ERN_35421 = 0, 26 + PVR_FW_HAS_ERN_38020, 27 + PVR_FW_HAS_ERN_38748, 28 + PVR_FW_HAS_ERN_42064, 29 + PVR_FW_HAS_ERN_42290, 30 + PVR_FW_HAS_ERN_42606, 31 + PVR_FW_HAS_ERN_47025, 32 + PVR_FW_HAS_ERN_57596, 33 + 34 + PVR_FW_HAS_ERN_MAX 35 + }; 36 + 37 + enum { 38 + PVR_FW_HAS_FEATURE_AXI_ACELITE = 0, 39 + PVR_FW_HAS_FEATURE_CDM_CONTROL_STREAM_FORMAT, 40 + PVR_FW_HAS_FEATURE_CLUSTER_GROUPING, 41 + PVR_FW_HAS_FEATURE_COMMON_STORE_SIZE_IN_DWORDS, 42 + PVR_FW_HAS_FEATURE_COMPUTE, 43 + PVR_FW_HAS_FEATURE_COMPUTE_MORTON_CAPABLE, 44 + PVR_FW_HAS_FEATURE_COMPUTE_OVERLAP, 45 + PVR_FW_HAS_FEATURE_COREID_PER_OS, 46 + PVR_FW_HAS_FEATURE_DYNAMIC_DUST_POWER, 47 + PVR_FW_HAS_FEATURE_ECC_RAMS, 48 + PVR_FW_HAS_FEATURE_FBCDC, 49 + PVR_FW_HAS_FEATURE_FBCDC_ALGORITHM, 50 + PVR_FW_HAS_FEATURE_FBCDC_ARCHITECTURE, 51 + PVR_FW_HAS_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS, 52 + PVR_FW_HAS_FEATURE_FBC_MAX_LARGE_DESCRIPTORS, 53 + PVR_FW_HAS_FEATURE_FB_CDC_V4, 54 + PVR_FW_HAS_FEATURE_GPU_MULTICORE_SUPPORT, 55 + PVR_FW_HAS_FEATURE_GPU_VIRTUALISATION, 56 + PVR_FW_HAS_FEATURE_GS_RTA_SUPPORT, 57 + PVR_FW_HAS_FEATURE_IRQ_PER_OS, 58 + PVR_FW_HAS_FEATURE_ISP_MAX_TILES_IN_FLIGHT, 59 + PVR_FW_HAS_FEATURE_ISP_SAMPLES_PER_PIXEL, 60 + PVR_FW_HAS_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE, 61 + PVR_FW_HAS_FEATURE_LAYOUT_MARS, 62 + PVR_FW_HAS_FEATURE_MAX_PARTITIONS, 63 + PVR_FW_HAS_FEATURE_META, 64 + PVR_FW_HAS_FEATURE_META_COREMEM_SIZE, 65 + PVR_FW_HAS_FEATURE_MIPS, 66 + PVR_FW_HAS_FEATURE_NUM_CLUSTERS, 67 + PVR_FW_HAS_FEATURE_NUM_ISP_IPP_PIPES, 68 + PVR_FW_HAS_FEATURE_NUM_OSIDS, 69 + PVR_FW_HAS_FEATURE_NUM_RASTER_PIPES, 70 + PVR_FW_HAS_FEATURE_PBE2_IN_XE, 71 + PVR_FW_HAS_FEATURE_PBVNC_COREID_REG, 72 + PVR_FW_HAS_FEATURE_PERFBUS, 73 + PVR_FW_HAS_FEATURE_PERF_COUNTER_BATCH, 74 + PVR_FW_HAS_FEATURE_PHYS_BUS_WIDTH, 75 + PVR_FW_HAS_FEATURE_RISCV_FW_PROCESSOR, 76 + PVR_FW_HAS_FEATURE_ROGUEXE, 77 + PVR_FW_HAS_FEATURE_S7_TOP_INFRASTRUCTURE, 78 + PVR_FW_HAS_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT, 79 + PVR_FW_HAS_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2, 80 + PVR_FW_HAS_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION, 81 + PVR_FW_HAS_FEATURE_SLC_BANKS, 82 + PVR_FW_HAS_FEATURE_SLC_CACHE_LINE_SIZE_BITS, 83 + PVR_FW_HAS_FEATURE_SLC_SIZE_CONFIGURABLE, 84 + PVR_FW_HAS_FEATURE_SLC_SIZE_IN_KILOBYTES, 85 + PVR_FW_HAS_FEATURE_SOC_TIMER, 86 + PVR_FW_HAS_FEATURE_SYS_BUS_SECURE_RESET, 87 + PVR_FW_HAS_FEATURE_TESSELLATION, 88 + PVR_FW_HAS_FEATURE_TILE_REGION_PROTECTION, 89 + PVR_FW_HAS_FEATURE_TILE_SIZE_X, 90 + PVR_FW_HAS_FEATURE_TILE_SIZE_Y, 91 + PVR_FW_HAS_FEATURE_TLA, 92 + PVR_FW_HAS_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS, 93 + PVR_FW_HAS_FEATURE_TPU_DM_GLOBAL_REGISTERS, 94 + PVR_FW_HAS_FEATURE_TPU_FILTERING_MODE_CONTROL, 95 + PVR_FW_HAS_FEATURE_USC_MIN_OUTPUT_REGISTERS_PER_PIX, 96 + PVR_FW_HAS_FEATURE_VDM_DRAWINDIRECT, 97 + PVR_FW_HAS_FEATURE_VDM_OBJECT_LEVEL_LLS, 98 + PVR_FW_HAS_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS, 99 + PVR_FW_HAS_FEATURE_WATCHDOG_TIMER, 100 + PVR_FW_HAS_FEATURE_WORKGROUP_PROTECTION, 101 + PVR_FW_HAS_FEATURE_XE_ARCHITECTURE, 102 + PVR_FW_HAS_FEATURE_XE_MEMORY_HIERARCHY, 103 + PVR_FW_HAS_FEATURE_XE_TPU2, 104 + PVR_FW_HAS_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH, 105 + PVR_FW_HAS_FEATURE_XPU_MAX_SLAVES, 106 + PVR_FW_HAS_FEATURE_XPU_REGISTER_BROADCAST, 107 + PVR_FW_HAS_FEATURE_XT_TOP_INFRASTRUCTURE, 108 + PVR_FW_HAS_FEATURE_ZLS_SUBTILE, 109 + 110 + PVR_FW_HAS_FEATURE_MAX 111 + }; 112 + 113 + #endif /* __PVR_ROGUE_FWIF_DEV_INFO_H__ */
+28
drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_ROGUE_FWIF_RESETFRAMEWORK_H 5 + #define PVR_ROGUE_FWIF_RESETFRAMEWORK_H 6 + 7 + #include <linux/bits.h> 8 + #include <linux/types.h> 9 + 10 + #include "pvr_rogue_fwif_shared.h" 11 + 12 + struct rogue_fwif_rf_registers { 13 + union { 14 + u64 cdmreg_cdm_cb_base; 15 + u64 cdmreg_cdm_ctrl_stream_base; 16 + }; 17 + u64 cdmreg_cdm_cb_queue; 18 + u64 cdmreg_cdm_cb; 19 + }; 20 + 21 + struct rogue_fwif_rf_cmd { 22 + /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */ 23 + struct rogue_fwif_rf_registers fw_registers __aligned(8); 24 + }; 25 + 26 + #define ROGUE_FWIF_RF_CMD_SIZE sizeof(struct rogue_fwif_rf_cmd) 27 + 28 + #endif /* PVR_ROGUE_FWIF_RESETFRAMEWORK_H */
+258
drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_ROGUE_FWIF_SHARED_H 5 + #define PVR_ROGUE_FWIF_SHARED_H 6 + 7 + #include <linux/compiler.h> 8 + #include <linux/types.h> 9 + 10 + #define ROGUE_FWIF_NUM_RTDATAS 2U 11 + #define ROGUE_FWIF_NUM_GEOMDATAS 1U 12 + #define ROGUE_FWIF_NUM_RTDATA_FREELISTS 2U 13 + #define ROGUE_NUM_GEOM_CORES 1U 14 + 15 + #define ROGUE_NUM_GEOM_CORES_SIZE 2U 16 + 17 + /* 18 + * Maximum number of UFOs in a CCB command. 19 + * The number is based on having 32 sync prims (as originally), plus 32 sync 20 + * checkpoints. 21 + * Once the use of sync prims is no longer supported, we will retain 22 + * the same total (64) as the number of sync checkpoints which may be 23 + * supporting a fence is not visible to the client driver and has to 24 + * allow for the number of different timelines involved in fence merges. 25 + */ 26 + #define ROGUE_FWIF_CCB_CMD_MAX_UFOS (32U + 32U) 27 + 28 + /* 29 + * This is a generic limit imposed on any DM (GEOMETRY,FRAGMENT,CDM,TDM,2D,TRANSFER) 30 + * command passed through the bridge. 31 + * Just across the bridge in the server, any incoming kick command size is 32 + * checked against this maximum limit. 33 + * In case the incoming command size is larger than the specified limit, 34 + * the bridge call is retired with error. 35 + */ 36 + #define ROGUE_FWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) 37 + 38 + #define ROGUE_FWIF_PRBUFFER_START (0) 39 + #define ROGUE_FWIF_PRBUFFER_ZSBUFFER (0) 40 + #define ROGUE_FWIF_PRBUFFER_MSAABUFFER (1) 41 + #define ROGUE_FWIF_PRBUFFER_MAXSUPPORTED (2) 42 + 43 + struct rogue_fwif_dma_addr { 44 + aligned_u64 dev_addr; 45 + u32 fw_addr; 46 + u32 padding; 47 + } __aligned(8); 48 + 49 + struct rogue_fwif_ufo { 50 + u32 addr; 51 + u32 value; 52 + }; 53 + 54 + #define ROGUE_FWIF_UFO_ADDR_IS_SYNC_CHECKPOINT (1) 55 + 56 + struct rogue_fwif_sync_checkpoint { 57 + u32 state; 58 + u32 fw_ref_count; 59 + }; 60 + 61 + struct rogue_fwif_cleanup_ctl { 62 + /* Number of commands received by the FW */ 63 + u32 submitted_commands; 64 + /* Number of commands executed by the FW */ 65 + u32 executed_commands; 66 + } __aligned(8); 67 + 68 + /* 69 + * Used to share frame numbers across UM-KM-FW, 70 + * frame number is set in UM, 71 + * frame number is required in both KM for HTB and FW for FW trace. 72 + * 73 + * May be used to house Kick flags in the future. 74 + */ 75 + struct rogue_fwif_cmd_common { 76 + /* associated frame number */ 77 + u32 frame_num; 78 + }; 79 + 80 + /* 81 + * Geometry and fragment commands require set of firmware addresses that are stored in the Kernel. 82 + * Client has handle(s) to Kernel containers storing these addresses, instead of raw addresses. We 83 + * have to patch/write these addresses in KM to prevent UM from controlling FW addresses directly. 84 + * Typedefs for geometry and fragment commands are shared between Client and Firmware (both 85 + * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use geometry|fragment 86 + * CMD type definitions directly. Therefore we have a SHARED block that is shared between UM-KM-FW 87 + * across all BVNC configurations. 88 + */ 89 + struct rogue_fwif_cmd_geom_frag_shared { 90 + /* Common command attributes */ 91 + struct rogue_fwif_cmd_common cmn; 92 + 93 + /* 94 + * RTData associated with this command, this is used for context 95 + * selection and for storing out HW-context, when TA is switched out for 96 + * continuing later 97 + */ 98 + u32 hwrt_data_fw_addr; 99 + 100 + /* Supported PR Buffers like Z/S/MSAA Scratch */ 101 + u32 pr_buffer_fw_addr[ROGUE_FWIF_PRBUFFER_MAXSUPPORTED]; 102 + }; 103 + 104 + /* 105 + * Client Circular Command Buffer (CCCB) control structure. 106 + * This is shared between the Server and the Firmware and holds byte offsets 107 + * into the CCCB as well as the wrapping mask to aid wrap around. A given 108 + * snapshot of this queue with Cmd 1 running on the GPU might be: 109 + * 110 + * Roff Doff Woff 111 + * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........] 112 + * < runnable commands >< !ready to run > 113 + * 114 + * Cmd 1 : Currently executing on the GPU data master. 115 + * Cmd 2,3,4: Fence dependencies met, commands runnable. 116 + * Cmd 5... : Fence dependency not met yet. 117 + */ 118 + struct rogue_fwif_cccb_ctl { 119 + /* Host write offset into CCB. This must be aligned to 16 bytes. */ 120 + u32 write_offset; 121 + /* 122 + * Firmware read offset into CCB. Points to the command that is runnable 123 + * on GPU, if R!=W 124 + */ 125 + u32 read_offset; 126 + /* 127 + * Firmware fence dependency offset. Points to commands not ready, i.e. 128 + * fence dependencies are not met. 129 + */ 130 + u32 dep_offset; 131 + /* Offset wrapping mask, total capacity in bytes of the CCB-1 */ 132 + u32 wrap_mask; 133 + 134 + /* Only used if SUPPORT_AGP is present. */ 135 + u32 read_offset2; 136 + 137 + /* Only used if SUPPORT_AGP4 is present. */ 138 + u32 read_offset3; 139 + /* Only used if SUPPORT_AGP4 is present. */ 140 + u32 read_offset4; 141 + 142 + u32 padding; 143 + } __aligned(8); 144 + 145 + #define ROGUE_FW_LOCAL_FREELIST (0) 146 + #define ROGUE_FW_GLOBAL_FREELIST (1) 147 + #define ROGUE_FW_FREELIST_TYPE_LAST ROGUE_FW_GLOBAL_FREELIST 148 + #define ROGUE_FW_MAX_FREELISTS (ROGUE_FW_FREELIST_TYPE_LAST + 1U) 149 + 150 + struct rogue_fwif_geom_registers_caswitch { 151 + u64 geom_reg_vdm_context_state_base_addr; 152 + u64 geom_reg_vdm_context_state_resume_addr; 153 + u64 geom_reg_ta_context_state_base_addr; 154 + 155 + struct { 156 + u64 geom_reg_vdm_context_store_task0; 157 + u64 geom_reg_vdm_context_store_task1; 158 + u64 geom_reg_vdm_context_store_task2; 159 + 160 + /* VDM resume state update controls */ 161 + u64 geom_reg_vdm_context_resume_task0; 162 + u64 geom_reg_vdm_context_resume_task1; 163 + u64 geom_reg_vdm_context_resume_task2; 164 + 165 + u64 geom_reg_vdm_context_store_task3; 166 + u64 geom_reg_vdm_context_store_task4; 167 + 168 + u64 geom_reg_vdm_context_resume_task3; 169 + u64 geom_reg_vdm_context_resume_task4; 170 + } geom_state[2]; 171 + }; 172 + 173 + #define ROGUE_FWIF_GEOM_REGISTERS_CSWITCH_SIZE \ 174 + sizeof(struct rogue_fwif_geom_registers_caswitch) 175 + 176 + struct rogue_fwif_cdm_registers_cswitch { 177 + u64 cdmreg_cdm_context_pds0; 178 + u64 cdmreg_cdm_context_pds1; 179 + u64 cdmreg_cdm_terminate_pds; 180 + u64 cdmreg_cdm_terminate_pds1; 181 + 182 + /* CDM resume controls */ 183 + u64 cdmreg_cdm_resume_pds0; 184 + u64 cdmreg_cdm_context_pds0_b; 185 + u64 cdmreg_cdm_resume_pds0_b; 186 + }; 187 + 188 + struct rogue_fwif_static_rendercontext_state { 189 + /* Geom registers for ctx switch */ 190 + struct rogue_fwif_geom_registers_caswitch ctxswitch_regs[ROGUE_NUM_GEOM_CORES_SIZE] 191 + __aligned(8); 192 + }; 193 + 194 + #define ROGUE_FWIF_STATIC_RENDERCONTEXT_SIZE \ 195 + sizeof(struct rogue_fwif_static_rendercontext_state) 196 + 197 + struct rogue_fwif_static_computecontext_state { 198 + /* CDM registers for ctx switch */ 199 + struct rogue_fwif_cdm_registers_cswitch ctxswitch_regs __aligned(8); 200 + }; 201 + 202 + #define ROGUE_FWIF_STATIC_COMPUTECONTEXT_SIZE \ 203 + sizeof(struct rogue_fwif_static_computecontext_state) 204 + 205 + enum rogue_fwif_prbuffer_state { 206 + ROGUE_FWIF_PRBUFFER_UNBACKED = 0, 207 + ROGUE_FWIF_PRBUFFER_BACKED, 208 + ROGUE_FWIF_PRBUFFER_BACKING_PENDING, 209 + ROGUE_FWIF_PRBUFFER_UNBACKING_PENDING, 210 + }; 211 + 212 + struct rogue_fwif_prbuffer { 213 + /* Buffer ID*/ 214 + u32 buffer_id; 215 + /* Needs On-demand Z/S/MSAA Buffer allocation */ 216 + bool on_demand __aligned(4); 217 + /* Z/S/MSAA -Buffer state */ 218 + enum rogue_fwif_prbuffer_state state; 219 + /* Cleanup state */ 220 + struct rogue_fwif_cleanup_ctl cleanup_sate; 221 + /* Compatibility and other flags */ 222 + u32 prbuffer_flags; 223 + } __aligned(8); 224 + 225 + /* Last reset reason for a context. */ 226 + enum rogue_context_reset_reason { 227 + /* No reset reason recorded */ 228 + ROGUE_CONTEXT_RESET_REASON_NONE = 0, 229 + /* Caused a reset due to locking up */ 230 + ROGUE_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, 231 + /* Affected by another context locking up */ 232 + ROGUE_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, 233 + /* Overran the global deadline */ 234 + ROGUE_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, 235 + /* Affected by another context overrunning */ 236 + ROGUE_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, 237 + /* Forced reset to ensure scheduling requirements */ 238 + ROGUE_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, 239 + /* FW Safety watchdog triggered */ 240 + ROGUE_CONTEXT_RESET_REASON_FW_WATCHDOG = 12, 241 + /* FW page fault (no HWR) */ 242 + ROGUE_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13, 243 + /* FW execution error (GPU reset requested) */ 244 + ROGUE_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14, 245 + /* Host watchdog detected FW error */ 246 + ROGUE_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15, 247 + /* Geometry DM OOM event is not allowed */ 248 + ROGUE_CONTEXT_GEOM_OOM_DISABLED = 16, 249 + }; 250 + 251 + struct rogue_context_reset_reason_data { 252 + enum rogue_context_reset_reason reset_reason; 253 + u32 reset_ext_job_ref; 254 + }; 255 + 256 + #include "pvr_rogue_fwif_shared_check.h" 257 + 258 + #endif /* PVR_ROGUE_FWIF_SHARED_H */
+108
drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_ROGUE_FWIF_SHARED_CHECK_H 5 + #define PVR_ROGUE_FWIF_SHARED_CHECK_H 6 + 7 + #include <linux/build_bug.h> 8 + 9 + #define OFFSET_CHECK(type, member, offset) \ 10 + static_assert(offsetof(type, member) == (offset), \ 11 + "offsetof(" #type ", " #member ") incorrect") 12 + 13 + #define SIZE_CHECK(type, size) \ 14 + static_assert(sizeof(type) == (size), #type " is incorrect size") 15 + 16 + OFFSET_CHECK(struct rogue_fwif_dma_addr, dev_addr, 0); 17 + OFFSET_CHECK(struct rogue_fwif_dma_addr, fw_addr, 8); 18 + SIZE_CHECK(struct rogue_fwif_dma_addr, 16); 19 + 20 + OFFSET_CHECK(struct rogue_fwif_ufo, addr, 0); 21 + OFFSET_CHECK(struct rogue_fwif_ufo, value, 4); 22 + SIZE_CHECK(struct rogue_fwif_ufo, 8); 23 + 24 + OFFSET_CHECK(struct rogue_fwif_cleanup_ctl, submitted_commands, 0); 25 + OFFSET_CHECK(struct rogue_fwif_cleanup_ctl, executed_commands, 4); 26 + SIZE_CHECK(struct rogue_fwif_cleanup_ctl, 8); 27 + 28 + OFFSET_CHECK(struct rogue_fwif_cccb_ctl, write_offset, 0); 29 + OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset, 4); 30 + OFFSET_CHECK(struct rogue_fwif_cccb_ctl, dep_offset, 8); 31 + OFFSET_CHECK(struct rogue_fwif_cccb_ctl, wrap_mask, 12); 32 + OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset2, 16); 33 + OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset3, 20); 34 + OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset4, 24); 35 + SIZE_CHECK(struct rogue_fwif_cccb_ctl, 32); 36 + 37 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 38 + geom_reg_vdm_context_state_base_addr, 0); 39 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 40 + geom_reg_vdm_context_state_resume_addr, 8); 41 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 42 + geom_reg_ta_context_state_base_addr, 16); 43 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 44 + geom_state[0].geom_reg_vdm_context_store_task0, 24); 45 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 46 + geom_state[0].geom_reg_vdm_context_store_task1, 32); 47 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 48 + geom_state[0].geom_reg_vdm_context_store_task2, 40); 49 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 50 + geom_state[0].geom_reg_vdm_context_resume_task0, 48); 51 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 52 + geom_state[0].geom_reg_vdm_context_resume_task1, 56); 53 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 54 + geom_state[0].geom_reg_vdm_context_resume_task2, 64); 55 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 56 + geom_state[0].geom_reg_vdm_context_store_task3, 72); 57 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 58 + geom_state[0].geom_reg_vdm_context_store_task4, 80); 59 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 60 + geom_state[0].geom_reg_vdm_context_resume_task3, 88); 61 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 62 + geom_state[0].geom_reg_vdm_context_resume_task4, 96); 63 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 64 + geom_state[1].geom_reg_vdm_context_store_task0, 104); 65 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 66 + geom_state[1].geom_reg_vdm_context_store_task1, 112); 67 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 68 + geom_state[1].geom_reg_vdm_context_store_task2, 120); 69 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 70 + geom_state[1].geom_reg_vdm_context_resume_task0, 128); 71 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 72 + geom_state[1].geom_reg_vdm_context_resume_task1, 136); 73 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 74 + geom_state[1].geom_reg_vdm_context_resume_task2, 144); 75 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 76 + geom_state[1].geom_reg_vdm_context_store_task3, 152); 77 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 78 + geom_state[1].geom_reg_vdm_context_store_task4, 160); 79 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 80 + geom_state[1].geom_reg_vdm_context_resume_task3, 168); 81 + OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, 82 + geom_state[1].geom_reg_vdm_context_resume_task4, 176); 83 + SIZE_CHECK(struct rogue_fwif_geom_registers_caswitch, 184); 84 + 85 + OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0, 0); 86 + OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds1, 8); 87 + OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds, 16); 88 + OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds1, 24); 89 + OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0, 32); 90 + OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0_b, 40); 91 + OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0_b, 48); 92 + SIZE_CHECK(struct rogue_fwif_cdm_registers_cswitch, 56); 93 + 94 + OFFSET_CHECK(struct rogue_fwif_static_rendercontext_state, ctxswitch_regs, 0); 95 + SIZE_CHECK(struct rogue_fwif_static_rendercontext_state, 368); 96 + 97 + OFFSET_CHECK(struct rogue_fwif_static_computecontext_state, ctxswitch_regs, 0); 98 + SIZE_CHECK(struct rogue_fwif_static_computecontext_state, 56); 99 + 100 + OFFSET_CHECK(struct rogue_fwif_cmd_common, frame_num, 0); 101 + SIZE_CHECK(struct rogue_fwif_cmd_common, 4); 102 + 103 + OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, cmn, 0); 104 + OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, hwrt_data_fw_addr, 4); 105 + OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, pr_buffer_fw_addr, 8); 106 + SIZE_CHECK(struct rogue_fwif_cmd_geom_frag_shared, 16); 107 + 108 + #endif /* PVR_ROGUE_FWIF_SHARED_CHECK_H */
+78
drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_ROGUE_FWIF_STREAM_H 5 + #define PVR_ROGUE_FWIF_STREAM_H 6 + 7 + /** 8 + * DOC: Streams 9 + * 10 + * Commands are submitted to the kernel driver in the form of streams. 11 + * 12 + * A command stream has the following layout : 13 + * - A 64-bit header containing: 14 + * * A u32 containing the length of the main stream inclusive of the length of the header. 15 + * * A u32 for padding. 16 + * - The main stream data. 17 + * - The extension stream (optional), which is composed of: 18 + * * One or more headers. 19 + * * The extension stream data, corresponding to the extension headers. 20 + * 21 + * The main stream provides the base command data. This has a fixed layout based on the features 22 + * supported by a given GPU. 23 + * 24 + * The extension stream provides the command parameters that are required for BRNs & ERNs for the 25 + * current GPU. This stream is comprised of one or more headers, followed by data for each given 26 + * BRN/ERN. 27 + * 28 + * Each header is a u32 containing a bitmask of quirks & enhancements in the extension stream, a 29 + * "type" field determining the set of quirks & enhancements the bitmask represents, and a 30 + * continuation bit determining whether any more headers are present. The headers are then followed 31 + * by command data; this is specific to each quirk/enhancement. All unused / reserved bits in the 32 + * header must be set to 0. 33 + * 34 + * All parameters and headers in the main and extension streams must be naturally aligned. 35 + * 36 + * If a parameter appears in both the main and extension streams, then the extension parameter is 37 + * used. 38 + */ 39 + 40 + /* 41 + * Stream extension header definition 42 + */ 43 + #define PVR_STREAM_EXTHDR_TYPE_SHIFT 29U 44 + #define PVR_STREAM_EXTHDR_TYPE_MASK (7U << PVR_STREAM_EXTHDR_TYPE_SHIFT) 45 + #define PVR_STREAM_EXTHDR_TYPE_MAX 8U 46 + #define PVR_STREAM_EXTHDR_CONTINUATION BIT(28U) 47 + 48 + #define PVR_STREAM_EXTHDR_DATA_MASK ~(PVR_STREAM_EXTHDR_TYPE_MASK | PVR_STREAM_EXTHDR_CONTINUATION) 49 + 50 + /* 51 + * Stream extension header - Geometry 0 52 + */ 53 + #define PVR_STREAM_EXTHDR_TYPE_GEOM0 0U 54 + 55 + #define PVR_STREAM_EXTHDR_GEOM0_BRN49927 BIT(0U) 56 + 57 + #define PVR_STREAM_EXTHDR_GEOM0_VALID PVR_STREAM_EXTHDR_GEOM0_BRN49927 58 + 59 + /* 60 + * Stream extension header - Fragment 0 61 + */ 62 + #define PVR_STREAM_EXTHDR_TYPE_FRAG0 0U 63 + 64 + #define PVR_STREAM_EXTHDR_FRAG0_BRN47217 BIT(0U) 65 + #define PVR_STREAM_EXTHDR_FRAG0_BRN49927 BIT(1U) 66 + 67 + #define PVR_STREAM_EXTHDR_FRAG0_VALID PVR_STREAM_EXTHDR_FRAG0_BRN49927 68 + 69 + /* 70 + * Stream extension header - Compute 0 71 + */ 72 + #define PVR_STREAM_EXTHDR_TYPE_COMPUTE0 0U 73 + 74 + #define PVR_STREAM_EXTHDR_COMPUTE0_BRN49927 BIT(0U) 75 + 76 + #define PVR_STREAM_EXTHDR_COMPUTE0_VALID PVR_STREAM_EXTHDR_COMPUTE0_BRN49927 77 + 78 + #endif /* PVR_ROGUE_FWIF_STREAM_H */