Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: add amdgpu.h (v2)

This is the main header file for amdgpu.

v2: remove stable comments

Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jammy Zhou <Jammy.Zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

+2387
+2387
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1 + /* 2 + * Copyright 2008 Advanced Micro Devices, Inc. 3 + * Copyright 2008 Red Hat Inc. 4 + * Copyright 2009 Jerome Glisse. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the "Software"), 8 + * to deal in the Software without restriction, including without limitation 9 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 + * and/or sell copies of the Software, and to permit persons to whom the 11 + * Software is furnished to do so, subject to the following conditions: 12 + * 13 + * The above copyright notice and this permission notice shall be included in 14 + * all copies or substantial portions of the Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 + * OTHER DEALINGS IN THE SOFTWARE. 23 + * 24 + * Authors: Dave Airlie 25 + * Alex Deucher 26 + * Jerome Glisse 27 + */ 28 + #ifndef __AMDGPU_H__ 29 + #define __AMDGPU_H__ 30 + 31 + #include <linux/atomic.h> 32 + #include <linux/wait.h> 33 + #include <linux/list.h> 34 + #include <linux/kref.h> 35 + #include <linux/interval_tree.h> 36 + #include <linux/hashtable.h> 37 + #include <linux/fence.h> 38 + 39 + #include <ttm/ttm_bo_api.h> 40 + #include <ttm/ttm_bo_driver.h> 41 + #include <ttm/ttm_placement.h> 42 + #include <ttm/ttm_module.h> 43 + #include <ttm/ttm_execbuf_util.h> 44 + 45 + #include <drm/drm_gem.h> 46 + 47 + #include "amdgpu_family.h" 48 + #include "amdgpu_mode.h" 49 + #include "amdgpu_ih.h" 50 + #include "amdgpu_irq.h" 51 + #include "amdgpu_ucode.h" 52 + #include "amdgpu_gds.h" 53 + 54 + /* 55 + * Modules parameters. 56 + */ 57 + extern int amdgpu_modeset; 58 + extern int amdgpu_vram_limit; 59 + extern int amdgpu_gart_size; 60 + extern int amdgpu_benchmarking; 61 + extern int amdgpu_testing; 62 + extern int amdgpu_audio; 63 + extern int amdgpu_disp_priority; 64 + extern int amdgpu_hw_i2c; 65 + extern int amdgpu_pcie_gen2; 66 + extern int amdgpu_msi; 67 + extern int amdgpu_lockup_timeout; 68 + extern int amdgpu_dpm; 69 + extern int amdgpu_smc_load_fw; 70 + extern int amdgpu_aspm; 71 + extern int amdgpu_runtime_pm; 72 + extern int amdgpu_hard_reset; 73 + extern unsigned amdgpu_ip_block_mask; 74 + extern int amdgpu_bapm; 75 + extern int amdgpu_deep_color; 76 + extern int amdgpu_vm_size; 77 + extern int amdgpu_vm_block_size; 78 + 79 + #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 80 + #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 81 + /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ 82 + #define AMDGPU_IB_POOL_SIZE 16 83 + #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 84 + #define AMDGPUFB_CONN_LIMIT 4 85 + #define AMDGPU_BIOS_NUM_SCRATCH 8 86 + 87 + /* fence seq are set to this number when signaled */ 88 + #define AMDGPU_FENCE_SIGNALED_SEQ 0LL 89 + 90 + /* max number of rings */ 91 + #define AMDGPU_MAX_RINGS 16 92 + #define AMDGPU_MAX_GFX_RINGS 1 93 + #define AMDGPU_MAX_COMPUTE_RINGS 8 94 + #define AMDGPU_MAX_VCE_RINGS 2 95 + 96 + /* number of hw syncs before falling back on blocking */ 97 + #define AMDGPU_NUM_SYNCS 4 98 + 99 + /* hardcode that limit for now */ 100 + #define AMDGPU_VA_RESERVED_SIZE (8 << 20) 101 + 102 + /* hard reset data */ 103 + #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 104 + 105 + /* reset flags */ 106 + #define AMDGPU_RESET_GFX (1 << 0) 107 + #define AMDGPU_RESET_COMPUTE (1 << 1) 108 + #define AMDGPU_RESET_DMA (1 << 2) 109 + #define AMDGPU_RESET_CP (1 << 3) 110 + #define AMDGPU_RESET_GRBM (1 << 4) 111 + #define AMDGPU_RESET_DMA1 (1 << 5) 112 + #define AMDGPU_RESET_RLC (1 << 6) 113 + #define AMDGPU_RESET_SEM (1 << 7) 114 + #define AMDGPU_RESET_IH (1 << 8) 115 + #define AMDGPU_RESET_VMC (1 << 9) 116 + #define AMDGPU_RESET_MC (1 << 10) 117 + #define AMDGPU_RESET_DISPLAY (1 << 11) 118 + #define AMDGPU_RESET_UVD (1 << 12) 119 + #define AMDGPU_RESET_VCE (1 << 13) 120 + #define AMDGPU_RESET_VCE1 (1 << 14) 121 + 122 + /* CG block flags */ 123 + #define AMDGPU_CG_BLOCK_GFX (1 << 0) 124 + #define AMDGPU_CG_BLOCK_MC (1 << 1) 125 + #define AMDGPU_CG_BLOCK_SDMA (1 << 2) 126 + #define AMDGPU_CG_BLOCK_UVD (1 << 3) 127 + #define AMDGPU_CG_BLOCK_VCE (1 << 4) 128 + #define AMDGPU_CG_BLOCK_HDP (1 << 5) 129 + #define AMDGPU_CG_BLOCK_BIF (1 << 6) 130 + 131 + /* CG flags */ 132 + #define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0) 133 + #define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1) 134 + #define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2) 135 + #define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3) 136 + #define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4) 137 + #define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5) 138 + #define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6) 139 + #define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7) 140 + #define AMDGPU_CG_SUPPORT_MC_LS (1 << 8) 141 + #define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9) 142 + #define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10) 143 + #define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11) 144 + #define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12) 145 + #define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13) 146 + #define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14) 147 + #define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15) 148 + #define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16) 149 + 150 + /* PG flags */ 151 + #define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0) 152 + #define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1) 153 + #define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2) 154 + #define AMDGPU_PG_SUPPORT_UVD (1 << 3) 155 + #define AMDGPU_PG_SUPPORT_VCE (1 << 4) 156 + #define AMDGPU_PG_SUPPORT_CP (1 << 5) 157 + #define AMDGPU_PG_SUPPORT_GDS (1 << 6) 158 + #define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7) 159 + #define AMDGPU_PG_SUPPORT_SDMA (1 << 8) 160 + #define AMDGPU_PG_SUPPORT_ACP (1 << 9) 161 + #define AMDGPU_PG_SUPPORT_SAMU (1 << 10) 162 + 163 + /* GFX current status */ 164 + #define AMDGPU_GFX_NORMAL_MODE 0x00000000L 165 + #define AMDGPU_GFX_SAFE_MODE 0x00000001L 166 + #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L 167 + #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L 168 + #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L 169 + 170 + /* max cursor sizes (in pixels) */ 171 + #define CIK_CURSOR_WIDTH 128 172 + #define CIK_CURSOR_HEIGHT 128 173 + 174 + struct amdgpu_device; 175 + struct amdgpu_fence; 176 + struct amdgpu_ib; 177 + struct amdgpu_vm; 178 + struct amdgpu_ring; 179 + struct amdgpu_semaphore; 180 + struct amdgpu_cs_parser; 181 + struct amdgpu_irq_src; 182 + 183 + enum amdgpu_cp_irq { 184 + AMDGPU_CP_IRQ_GFX_EOP = 0, 185 + AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 186 + AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 187 + AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 188 + AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 189 + AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 190 + AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 191 + AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 192 + AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 193 + 194 + AMDGPU_CP_IRQ_LAST 195 + }; 196 + 197 + enum amdgpu_sdma_irq { 198 + AMDGPU_SDMA_IRQ_TRAP0 = 0, 199 + AMDGPU_SDMA_IRQ_TRAP1, 200 + 201 + AMDGPU_SDMA_IRQ_LAST 202 + }; 203 + 204 + enum amdgpu_thermal_irq { 205 + AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 206 + AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 207 + 208 + AMDGPU_THERMAL_IRQ_LAST 209 + }; 210 + 211 + /* 212 + * IP block functions 213 + */ 214 + enum amdgpu_ip_block_type { 215 + AMDGPU_IP_BLOCK_TYPE_COMMON, 216 + AMDGPU_IP_BLOCK_TYPE_GMC, 217 + AMDGPU_IP_BLOCK_TYPE_IH, 218 + AMDGPU_IP_BLOCK_TYPE_SMC, 219 + AMDGPU_IP_BLOCK_TYPE_DCE, 220 + AMDGPU_IP_BLOCK_TYPE_GFX, 221 + AMDGPU_IP_BLOCK_TYPE_SDMA, 222 + AMDGPU_IP_BLOCK_TYPE_UVD, 223 + AMDGPU_IP_BLOCK_TYPE_VCE, 224 + }; 225 + 226 + enum amdgpu_clockgating_state { 227 + AMDGPU_CG_STATE_GATE = 0, 228 + AMDGPU_CG_STATE_UNGATE, 229 + }; 230 + 231 + enum amdgpu_powergating_state { 232 + AMDGPU_PG_STATE_GATE = 0, 233 + AMDGPU_PG_STATE_UNGATE, 234 + }; 235 + 236 + struct amdgpu_ip_funcs { 237 + /* sets up early driver state (pre sw_init), does not configure hw - Optional */ 238 + int (*early_init)(struct amdgpu_device *adev); 239 + /* sets up late driver/hw state (post hw_init) - Optional */ 240 + int (*late_init)(struct amdgpu_device *adev); 241 + /* sets up driver state, does not configure hw */ 242 + int (*sw_init)(struct amdgpu_device *adev); 243 + /* tears down driver state, does not configure hw */ 244 + int (*sw_fini)(struct amdgpu_device *adev); 245 + /* sets up the hw state */ 246 + int (*hw_init)(struct amdgpu_device *adev); 247 + /* tears down the hw state */ 248 + int (*hw_fini)(struct amdgpu_device *adev); 249 + /* handles IP specific hw/sw changes for suspend */ 250 + int (*suspend)(struct amdgpu_device *adev); 251 + /* handles IP specific hw/sw changes for resume */ 252 + int (*resume)(struct amdgpu_device *adev); 253 + /* returns current IP block idle status */ 254 + bool (*is_idle)(struct amdgpu_device *adev); 255 + /* poll for idle */ 256 + int (*wait_for_idle)(struct amdgpu_device *adev); 257 + /* soft reset the IP block */ 258 + int (*soft_reset)(struct amdgpu_device *adev); 259 + /* dump the IP block status registers */ 260 + void (*print_status)(struct amdgpu_device *adev); 261 + /* enable/disable cg for the IP block */ 262 + int (*set_clockgating_state)(struct amdgpu_device *adev, 263 + enum amdgpu_clockgating_state state); 264 + /* enable/disable pg for the IP block */ 265 + int (*set_powergating_state)(struct amdgpu_device *adev, 266 + enum amdgpu_powergating_state state); 267 + }; 268 + 269 + int amdgpu_set_clockgating_state(struct amdgpu_device *adev, 270 + enum amdgpu_ip_block_type block_type, 271 + enum amdgpu_clockgating_state state); 272 + int amdgpu_set_powergating_state(struct amdgpu_device *adev, 273 + enum amdgpu_ip_block_type block_type, 274 + enum amdgpu_powergating_state state); 275 + 276 + struct amdgpu_ip_block_version { 277 + enum amdgpu_ip_block_type type; 278 + u32 major; 279 + u32 minor; 280 + u32 rev; 281 + const struct amdgpu_ip_funcs *funcs; 282 + }; 283 + 284 + int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, 285 + enum amdgpu_ip_block_type type, 286 + u32 major, u32 minor); 287 + 288 + const struct amdgpu_ip_block_version * amdgpu_get_ip_block( 289 + struct amdgpu_device *adev, 290 + enum amdgpu_ip_block_type type); 291 + 292 + /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ 293 + struct amdgpu_buffer_funcs { 294 + /* maximum bytes in a single operation */ 295 + uint32_t copy_max_bytes; 296 + 297 + /* number of dw to reserve per operation */ 298 + unsigned copy_num_dw; 299 + 300 + /* used for buffer migration */ 301 + void (*emit_copy_buffer)(struct amdgpu_ring *ring, 302 + /* src addr in bytes */ 303 + uint64_t src_offset, 304 + /* dst addr in bytes */ 305 + uint64_t dst_offset, 306 + /* number of byte to transfer */ 307 + uint32_t byte_count); 308 + 309 + /* maximum bytes in a single operation */ 310 + uint32_t fill_max_bytes; 311 + 312 + /* number of dw to reserve per operation */ 313 + unsigned fill_num_dw; 314 + 315 + /* used for buffer clearing */ 316 + void (*emit_fill_buffer)(struct amdgpu_ring *ring, 317 + /* value to write to memory */ 318 + uint32_t src_data, 319 + /* dst addr in bytes */ 320 + uint64_t dst_offset, 321 + /* number of byte to fill */ 322 + uint32_t byte_count); 323 + }; 324 + 325 + /* provided by hw blocks that can write ptes, e.g., sdma */ 326 + struct amdgpu_vm_pte_funcs { 327 + /* copy pte entries from GART */ 328 + void (*copy_pte)(struct amdgpu_ib *ib, 329 + uint64_t pe, uint64_t src, 330 + unsigned count); 331 + /* write pte one entry at a time with addr mapping */ 332 + void (*write_pte)(struct amdgpu_ib *ib, 333 + uint64_t pe, 334 + uint64_t addr, unsigned count, 335 + uint32_t incr, uint32_t flags); 336 + /* for linear pte/pde updates without addr mapping */ 337 + void (*set_pte_pde)(struct amdgpu_ib *ib, 338 + uint64_t pe, 339 + uint64_t addr, unsigned count, 340 + uint32_t incr, uint32_t flags); 341 + /* pad the indirect buffer to the necessary number of dw */ 342 + void (*pad_ib)(struct amdgpu_ib *ib); 343 + }; 344 + 345 + /* provided by the gmc block */ 346 + struct amdgpu_gart_funcs { 347 + /* flush the vm tlb via mmio */ 348 + void (*flush_gpu_tlb)(struct amdgpu_device *adev, 349 + uint32_t vmid); 350 + /* write pte/pde updates using the cpu */ 351 + int (*set_pte_pde)(struct amdgpu_device *adev, 352 + void *cpu_pt_addr, /* cpu addr of page table */ 353 + uint32_t gpu_page_idx, /* pte/pde to update */ 354 + uint64_t addr, /* addr to write into pte/pde */ 355 + uint32_t flags); /* access flags */ 356 + }; 357 + 358 + /* provided by the ih block */ 359 + struct amdgpu_ih_funcs { 360 + /* ring read/write ptr handling, called from interrupt context */ 361 + u32 (*get_wptr)(struct amdgpu_device *adev); 362 + void (*decode_iv)(struct amdgpu_device *adev, 363 + struct amdgpu_iv_entry *entry); 364 + void (*set_rptr)(struct amdgpu_device *adev); 365 + }; 366 + 367 + /* provided by hw blocks that expose a ring buffer for commands */ 368 + struct amdgpu_ring_funcs { 369 + /* ring read/write ptr handling */ 370 + u32 (*get_rptr)(struct amdgpu_ring *ring); 371 + u32 (*get_wptr)(struct amdgpu_ring *ring); 372 + void (*set_wptr)(struct amdgpu_ring *ring); 373 + /* validating and patching of IBs */ 374 + int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); 375 + /* command emit functions */ 376 + void (*emit_ib)(struct amdgpu_ring *ring, 377 + struct amdgpu_ib *ib); 378 + void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 379 + uint64_t seq, bool write64bit); 380 + bool (*emit_semaphore)(struct amdgpu_ring *ring, 381 + struct amdgpu_semaphore *semaphore, 382 + bool emit_wait); 383 + void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, 384 + uint64_t pd_addr); 385 + void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, 386 + uint32_t gds_base, uint32_t gds_size, 387 + uint32_t gws_base, uint32_t gws_size, 388 + uint32_t oa_base, uint32_t oa_size); 389 + /* testing functions */ 390 + int (*test_ring)(struct amdgpu_ring *ring); 391 + int (*test_ib)(struct amdgpu_ring *ring); 392 + bool (*is_lockup)(struct amdgpu_ring *ring); 393 + }; 394 + 395 + /* 396 + * BIOS. 397 + */ 398 + bool amdgpu_get_bios(struct amdgpu_device *adev); 399 + bool amdgpu_read_bios(struct amdgpu_device *adev); 400 + 401 + /* 402 + * Dummy page 403 + */ 404 + struct amdgpu_dummy_page { 405 + struct page *page; 406 + dma_addr_t addr; 407 + }; 408 + int amdgpu_dummy_page_init(struct amdgpu_device *adev); 409 + void amdgpu_dummy_page_fini(struct amdgpu_device *adev); 410 + 411 + 412 + /* 413 + * Clocks 414 + */ 415 + 416 + #define AMDGPU_MAX_PPLL 3 417 + 418 + struct amdgpu_clock { 419 + struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 420 + struct amdgpu_pll spll; 421 + struct amdgpu_pll mpll; 422 + /* 10 Khz units */ 423 + uint32_t default_mclk; 424 + uint32_t default_sclk; 425 + uint32_t default_dispclk; 426 + uint32_t current_dispclk; 427 + uint32_t dp_extclk; 428 + uint32_t max_pixel_clock; 429 + }; 430 + 431 + /* 432 + * Fences. 433 + */ 434 + struct amdgpu_fence_driver { 435 + struct amdgpu_ring *ring; 436 + uint64_t gpu_addr; 437 + volatile uint32_t *cpu_addr; 438 + /* sync_seq is protected by ring emission lock */ 439 + uint64_t sync_seq[AMDGPU_MAX_RINGS]; 440 + atomic64_t last_seq; 441 + bool initialized; 442 + bool delayed_irq; 443 + struct amdgpu_irq_src *irq_src; 444 + unsigned irq_type; 445 + struct delayed_work lockup_work; 446 + }; 447 + 448 + /* some special values for the owner field */ 449 + #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) 450 + #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) 451 + #define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul) 452 + 453 + struct amdgpu_fence { 454 + struct fence base; 455 + 456 + /* RB, DMA, etc. */ 457 + struct amdgpu_ring *ring; 458 + uint64_t seq; 459 + 460 + /* filp or special value for fence creator */ 461 + void *owner; 462 + 463 + wait_queue_t fence_wake; 464 + }; 465 + 466 + struct amdgpu_user_fence { 467 + /* write-back bo */ 468 + struct amdgpu_bo *bo; 469 + /* write-back address offset to bo start */ 470 + uint32_t offset; 471 + }; 472 + 473 + int amdgpu_fence_driver_init(struct amdgpu_device *adev); 474 + void amdgpu_fence_driver_fini(struct amdgpu_device *adev); 475 + void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); 476 + 477 + void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); 478 + int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 479 + struct amdgpu_irq_src *irq_src, 480 + unsigned irq_type); 481 + int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, 482 + struct amdgpu_fence **fence); 483 + void amdgpu_fence_process(struct amdgpu_ring *ring); 484 + int amdgpu_fence_wait_next(struct amdgpu_ring *ring); 485 + int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 486 + unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 487 + 488 + bool amdgpu_fence_signaled(struct amdgpu_fence *fence); 489 + int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible); 490 + int amdgpu_fence_wait_any(struct amdgpu_device *adev, 491 + struct amdgpu_fence **fences, 492 + bool intr); 493 + long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, 494 + u64 *target_seq, bool intr, 495 + long timeout); 496 + struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); 497 + void amdgpu_fence_unref(struct amdgpu_fence **fence); 498 + 499 + bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 500 + struct amdgpu_ring *ring); 501 + void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 502 + struct amdgpu_ring *ring); 503 + 504 + static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a, 505 + struct amdgpu_fence *b) 506 + { 507 + if (!a) { 508 + return b; 509 + } 510 + 511 + if (!b) { 512 + return a; 513 + } 514 + 515 + BUG_ON(a->ring != b->ring); 516 + 517 + if (a->seq > b->seq) { 518 + return a; 519 + } else { 520 + return b; 521 + } 522 + } 523 + 524 + static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a, 525 + struct amdgpu_fence *b) 526 + { 527 + if (!a) { 528 + return false; 529 + } 530 + 531 + if (!b) { 532 + return true; 533 + } 534 + 535 + BUG_ON(a->ring != b->ring); 536 + 537 + return a->seq < b->seq; 538 + } 539 + 540 + int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, 541 + void *owner, struct amdgpu_fence **fence); 542 + 543 + /* 544 + * TTM. 545 + */ 546 + struct amdgpu_mman { 547 + struct ttm_bo_global_ref bo_global_ref; 548 + struct drm_global_reference mem_global_ref; 549 + struct ttm_bo_device bdev; 550 + bool mem_global_referenced; 551 + bool initialized; 552 + 553 + #if defined(CONFIG_DEBUG_FS) 554 + struct dentry *vram; 555 + struct dentry *gtt; 556 + #endif 557 + 558 + /* buffer handling */ 559 + const struct amdgpu_buffer_funcs *buffer_funcs; 560 + struct amdgpu_ring *buffer_funcs_ring; 561 + }; 562 + 563 + int amdgpu_copy_buffer(struct amdgpu_ring *ring, 564 + uint64_t src_offset, 565 + uint64_t dst_offset, 566 + uint32_t byte_count, 567 + struct reservation_object *resv, 568 + struct amdgpu_fence **fence); 569 + int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); 570 + 571 + struct amdgpu_bo_list_entry { 572 + struct amdgpu_bo *robj; 573 + struct ttm_validate_buffer tv; 574 + struct amdgpu_bo_va *bo_va; 575 + unsigned prefered_domains; 576 + unsigned allowed_domains; 577 + uint32_t priority; 578 + }; 579 + 580 + struct amdgpu_bo_va_mapping { 581 + struct list_head list; 582 + struct interval_tree_node it; 583 + uint64_t offset; 584 + uint32_t flags; 585 + }; 586 + 587 + /* bo virtual addresses in a specific vm */ 588 + struct amdgpu_bo_va { 589 + /* protected by bo being reserved */ 590 + struct list_head bo_list; 591 + uint64_t addr; 592 + struct amdgpu_fence *last_pt_update; 593 + unsigned ref_count; 594 + 595 + /* protected by vm mutex */ 596 + struct list_head mappings; 597 + struct list_head vm_status; 598 + 599 + /* constant after initialization */ 600 + struct amdgpu_vm *vm; 601 + struct amdgpu_bo *bo; 602 + }; 603 + 604 + struct amdgpu_bo { 605 + /* Protected by gem.mutex */ 606 + struct list_head list; 607 + /* Protected by tbo.reserved */ 608 + u32 initial_domain; 609 + struct ttm_place placements[4]; 610 + struct ttm_placement placement; 611 + struct ttm_buffer_object tbo; 612 + struct ttm_bo_kmap_obj kmap; 613 + u64 flags; 614 + unsigned pin_count; 615 + void *kptr; 616 + u64 tiling_flags; 617 + u64 metadata_flags; 618 + void *metadata; 619 + u32 metadata_size; 620 + /* list of all virtual address to which this bo 621 + * is associated to 622 + */ 623 + struct list_head va; 624 + /* Constant after initialization */ 625 + struct amdgpu_device *adev; 626 + struct drm_gem_object gem_base; 627 + 628 + struct ttm_bo_kmap_obj dma_buf_vmap; 629 + pid_t pid; 630 + struct amdgpu_mn *mn; 631 + struct list_head mn_list; 632 + }; 633 + #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) 634 + 635 + void amdgpu_gem_object_free(struct drm_gem_object *obj); 636 + int amdgpu_gem_object_open(struct drm_gem_object *obj, 637 + struct drm_file *file_priv); 638 + void amdgpu_gem_object_close(struct drm_gem_object *obj, 639 + struct drm_file *file_priv); 640 + unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); 641 + struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); 642 + struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, 643 + struct dma_buf_attachment *attach, 644 + struct sg_table *sg); 645 + struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 646 + struct drm_gem_object *gobj, 647 + int flags); 648 + int amdgpu_gem_prime_pin(struct drm_gem_object *obj); 649 + void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); 650 + struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); 651 + void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); 652 + void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 653 + int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); 654 + 655 + /* sub-allocation manager, it has to be protected by another lock. 656 + * By conception this is an helper for other part of the driver 657 + * like the indirect buffer or semaphore, which both have their 658 + * locking. 659 + * 660 + * Principe is simple, we keep a list of sub allocation in offset 661 + * order (first entry has offset == 0, last entry has the highest 662 + * offset). 663 + * 664 + * When allocating new object we first check if there is room at 665 + * the end total_size - (last_object_offset + last_object_size) >= 666 + * alloc_size. If so we allocate new object there. 667 + * 668 + * When there is not enough room at the end, we start waiting for 669 + * each sub object until we reach object_offset+object_size >= 670 + * alloc_size, this object then become the sub object we return. 671 + * 672 + * Alignment can't be bigger than page size. 673 + * 674 + * Hole are not considered for allocation to keep things simple. 675 + * Assumption is that there won't be hole (all object on same 676 + * alignment). 677 + */ 678 + struct amdgpu_sa_manager { 679 + wait_queue_head_t wq; 680 + struct amdgpu_bo *bo; 681 + struct list_head *hole; 682 + struct list_head flist[AMDGPU_MAX_RINGS]; 683 + struct list_head olist; 684 + unsigned size; 685 + uint64_t gpu_addr; 686 + void *cpu_ptr; 687 + uint32_t domain; 688 + uint32_t align; 689 + }; 690 + 691 + struct amdgpu_sa_bo; 692 + 693 + /* sub-allocation buffer */ 694 + struct amdgpu_sa_bo { 695 + struct list_head olist; 696 + struct list_head flist; 697 + struct amdgpu_sa_manager *manager; 698 + unsigned soffset; 699 + unsigned eoffset; 700 + struct amdgpu_fence *fence; 701 + }; 702 + 703 + /* 704 + * GEM objects. 705 + */ 706 + struct amdgpu_gem { 707 + struct mutex mutex; 708 + struct list_head objects; 709 + }; 710 + 711 + int amdgpu_gem_init(struct amdgpu_device *adev); 712 + void amdgpu_gem_fini(struct amdgpu_device *adev); 713 + int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 714 + int alignment, u32 initial_domain, 715 + u64 flags, bool kernel, 716 + struct drm_gem_object **obj); 717 + 718 + int amdgpu_mode_dumb_create(struct drm_file *file_priv, 719 + struct drm_device *dev, 720 + struct drm_mode_create_dumb *args); 721 + int amdgpu_mode_dumb_mmap(struct drm_file *filp, 722 + struct drm_device *dev, 723 + uint32_t handle, uint64_t *offset_p); 724 + 725 + /* 726 + * Semaphores. 727 + */ 728 + struct amdgpu_semaphore { 729 + struct amdgpu_sa_bo *sa_bo; 730 + signed waiters; 731 + uint64_t gpu_addr; 732 + }; 733 + 734 + int amdgpu_semaphore_create(struct amdgpu_device *adev, 735 + struct amdgpu_semaphore **semaphore); 736 + bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, 737 + struct amdgpu_semaphore *semaphore); 738 + bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, 739 + struct amdgpu_semaphore *semaphore); 740 + void amdgpu_semaphore_free(struct amdgpu_device *adev, 741 + struct amdgpu_semaphore **semaphore, 742 + struct amdgpu_fence *fence); 743 + 744 + /* 745 + * Synchronization 746 + */ 747 + struct amdgpu_sync { 748 + struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; 749 + struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS]; 750 + struct amdgpu_fence *last_vm_update; 751 + }; 752 + 753 + void amdgpu_sync_create(struct amdgpu_sync *sync); 754 + void amdgpu_sync_fence(struct amdgpu_sync *sync, 755 + struct amdgpu_fence *fence); 756 + int amdgpu_sync_resv(struct amdgpu_device *adev, 757 + struct amdgpu_sync *sync, 758 + struct reservation_object *resv, 759 + void *owner); 760 + int amdgpu_sync_rings(struct amdgpu_sync *sync, 761 + struct amdgpu_ring *ring); 762 + void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, 763 + struct amdgpu_fence *fence); 764 + 765 + /* 766 + * GART structures, functions & helpers 767 + */ 768 + struct amdgpu_mc; 769 + 770 + #define AMDGPU_GPU_PAGE_SIZE 4096 771 + #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) 772 + #define AMDGPU_GPU_PAGE_SHIFT 12 773 + #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) 774 + 775 + struct amdgpu_gart { 776 + dma_addr_t table_addr; 777 + struct amdgpu_bo *robj; 778 + void *ptr; 779 + unsigned num_gpu_pages; 780 + unsigned num_cpu_pages; 781 + unsigned table_size; 782 + struct page **pages; 783 + dma_addr_t *pages_addr; 784 + bool ready; 785 + const struct amdgpu_gart_funcs *gart_funcs; 786 + }; 787 + 788 + int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); 789 + void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); 790 + int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); 791 + void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); 792 + int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); 793 + void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); 794 + int amdgpu_gart_init(struct amdgpu_device *adev); 795 + void amdgpu_gart_fini(struct amdgpu_device *adev); 796 + void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, 797 + int pages); 798 + int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, 799 + int pages, struct page **pagelist, 800 + dma_addr_t *dma_addr, uint32_t flags); 801 + 802 + /* 803 + * GPU MC structures, functions & helpers 804 + */ 805 + struct amdgpu_mc { 806 + resource_size_t aper_size; 807 + resource_size_t aper_base; 808 + resource_size_t agp_base; 809 + /* for some chips with <= 32MB we need to lie 810 + * about vram size near mc fb location */ 811 + u64 mc_vram_size; 812 + u64 visible_vram_size; 813 + u64 gtt_size; 814 + u64 gtt_start; 815 + u64 gtt_end; 816 + u64 vram_start; 817 + u64 vram_end; 818 + unsigned vram_width; 819 + u64 real_vram_size; 820 + int vram_mtrr; 821 + u64 gtt_base_align; 822 + u64 mc_mask; 823 + const struct firmware *fw; /* MC firmware */ 824 + uint32_t fw_version; 825 + struct amdgpu_irq_src vm_fault; 826 + bool is_gddr5; 827 + }; 828 + 829 + /* 830 + * GPU doorbell structures, functions & helpers 831 + */ 832 + typedef enum _AMDGPU_DOORBELL_ASSIGNMENT 833 + { 834 + AMDGPU_DOORBELL_KIQ = 0x000, 835 + AMDGPU_DOORBELL_HIQ = 0x001, 836 + AMDGPU_DOORBELL_DIQ = 0x002, 837 + AMDGPU_DOORBELL_MEC_RING0 = 0x010, 838 + AMDGPU_DOORBELL_MEC_RING1 = 0x011, 839 + AMDGPU_DOORBELL_MEC_RING2 = 0x012, 840 + AMDGPU_DOORBELL_MEC_RING3 = 0x013, 841 + AMDGPU_DOORBELL_MEC_RING4 = 0x014, 842 + AMDGPU_DOORBELL_MEC_RING5 = 0x015, 843 + AMDGPU_DOORBELL_MEC_RING6 = 0x016, 844 + AMDGPU_DOORBELL_MEC_RING7 = 0x017, 845 + AMDGPU_DOORBELL_GFX_RING0 = 0x020, 846 + AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, 847 + AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, 848 + AMDGPU_DOORBELL_IH = 0x1E8, 849 + AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, 850 + AMDGPU_DOORBELL_INVALID = 0xFFFF 851 + } AMDGPU_DOORBELL_ASSIGNMENT; 852 + 853 + struct amdgpu_doorbell { 854 + /* doorbell mmio */ 855 + resource_size_t base; 856 + resource_size_t size; 857 + u32 __iomem *ptr; 858 + u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ 859 + }; 860 + 861 + void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, 862 + phys_addr_t *aperture_base, 863 + size_t *aperture_size, 864 + size_t *start_offset); 865 + 866 + /* 867 + * IRQS. 868 + */ 869 + 870 + struct amdgpu_flip_work { 871 + struct work_struct flip_work; 872 + struct work_struct unpin_work; 873 + struct amdgpu_device *adev; 874 + int crtc_id; 875 + uint64_t base; 876 + struct drm_pending_vblank_event *event; 877 + struct amdgpu_bo *old_rbo; 878 + struct fence *fence; 879 + }; 880 + 881 + 882 + /* 883 + * CP & rings. 884 + */ 885 + 886 + struct amdgpu_ib { 887 + struct amdgpu_sa_bo *sa_bo; 888 + uint32_t length_dw; 889 + uint64_t gpu_addr; 890 + uint32_t *ptr; 891 + struct amdgpu_ring *ring; 892 + struct amdgpu_fence *fence; 893 + struct amdgpu_user_fence *user; 894 + struct amdgpu_vm *vm; 895 + bool is_const_ib; 896 + bool flush_hdp_writefifo; 897 + struct amdgpu_sync sync; 898 + bool gds_needed; 899 + uint32_t gds_base, gds_size; 900 + uint32_t gws_base, gws_size; 901 + uint32_t oa_base, oa_size; 902 + }; 903 + 904 + enum amdgpu_ring_type { 905 + AMDGPU_RING_TYPE_GFX, 906 + AMDGPU_RING_TYPE_COMPUTE, 907 + AMDGPU_RING_TYPE_SDMA, 908 + AMDGPU_RING_TYPE_UVD, 909 + AMDGPU_RING_TYPE_VCE 910 + }; 911 + 912 + struct amdgpu_ring { 913 + struct amdgpu_device *adev; 914 + const struct amdgpu_ring_funcs *funcs; 915 + struct amdgpu_fence_driver fence_drv; 916 + 917 + struct mutex *ring_lock; 918 + struct amdgpu_bo *ring_obj; 919 + volatile uint32_t *ring; 920 + unsigned rptr_offs; 921 + u64 next_rptr_gpu_addr; 922 + volatile u32 *next_rptr_cpu_addr; 923 + unsigned wptr; 924 + unsigned wptr_old; 925 + unsigned ring_size; 926 + unsigned ring_free_dw; 927 + int count_dw; 928 + atomic_t last_rptr; 929 + atomic64_t last_activity; 930 + uint64_t gpu_addr; 931 + uint32_t align_mask; 932 + uint32_t ptr_mask; 933 + bool ready; 934 + u32 nop; 935 + u32 idx; 936 + u64 last_semaphore_signal_addr; 937 + u64 last_semaphore_wait_addr; 938 + u32 me; 939 + u32 pipe; 940 + u32 queue; 941 + struct amdgpu_bo *mqd_obj; 942 + u32 doorbell_index; 943 + bool use_doorbell; 944 + unsigned wptr_offs; 945 + unsigned next_rptr_offs; 946 + unsigned fence_offs; 947 + struct drm_file *current_filp; 948 + bool need_ctx_switch; 949 + enum amdgpu_ring_type type; 950 + char name[16]; 951 + }; 952 + 953 + /* 954 + * VM 955 + */ 956 + 957 + /* maximum number of VMIDs */ 958 + #define AMDGPU_NUM_VM 16 959 + 960 + /* number of entries in page table */ 961 + #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) 962 + 963 + /* PTBs (Page Table Blocks) need to be aligned to 32K */ 964 + #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 965 + #define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1) 966 + #define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK) 967 + 968 + #define AMDGPU_PTE_VALID (1 << 0) 969 + #define AMDGPU_PTE_SYSTEM (1 << 1) 970 + #define AMDGPU_PTE_SNOOPED (1 << 2) 971 + 972 + /* VI only */ 973 + #define AMDGPU_PTE_EXECUTABLE (1 << 4) 974 + 975 + #define AMDGPU_PTE_READABLE (1 << 5) 976 + #define AMDGPU_PTE_WRITEABLE (1 << 6) 977 + 978 + /* PTE (Page Table Entry) fragment field for different page sizes */ 979 + #define AMDGPU_PTE_FRAG_4KB (0 << 7) 980 + #define AMDGPU_PTE_FRAG_64KB (4 << 7) 981 + #define AMDGPU_LOG2_PAGES_PER_FRAG 4 982 + 983 + struct amdgpu_vm_pt { 984 + struct amdgpu_bo *bo; 985 + uint64_t addr; 986 + }; 987 + 988 + struct amdgpu_vm_id { 989 + unsigned id; 990 + uint64_t pd_gpu_addr; 991 + /* last flushed PD/PT update */ 992 + struct amdgpu_fence *flushed_updates; 993 + /* last use of vmid */ 994 + struct amdgpu_fence *last_id_use; 995 + }; 996 + 997 + struct amdgpu_vm { 998 + struct mutex mutex; 999 + 1000 + struct rb_root va; 1001 + 1002 + /* protecting invalidated and freed */ 1003 + spinlock_t status_lock; 1004 + 1005 + /* BOs moved, but not yet updated in the PT */ 1006 + struct list_head invalidated; 1007 + 1008 + /* BOs freed, but not yet updated in the PT */ 1009 + struct list_head freed; 1010 + 1011 + /* contains the page directory */ 1012 + struct amdgpu_bo *page_directory; 1013 + unsigned max_pde_used; 1014 + 1015 + /* array of page tables, one for each page directory entry */ 1016 + struct amdgpu_vm_pt *page_tables; 1017 + 1018 + /* for id and flush management per ring */ 1019 + struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; 1020 + }; 1021 + 1022 + struct amdgpu_vm_manager { 1023 + struct amdgpu_fence *active[AMDGPU_NUM_VM]; 1024 + uint32_t max_pfn; 1025 + /* number of VMIDs */ 1026 + unsigned nvm; 1027 + /* vram base address for page table entry */ 1028 + u64 vram_base_offset; 1029 + /* is vm enabled? */ 1030 + bool enabled; 1031 + /* for hw to save the PD addr on suspend/resume */ 1032 + uint32_t saved_table_addr[AMDGPU_NUM_VM]; 1033 + /* vm pte handling */ 1034 + const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 1035 + struct amdgpu_ring *vm_pte_funcs_ring; 1036 + }; 1037 + 1038 + /* 1039 + * context related structures 1040 + */ 1041 + 1042 + struct amdgpu_ctx_state { 1043 + uint64_t flags; 1044 + uint64_t hangs; 1045 + }; 1046 + 1047 + struct amdgpu_ctx { 1048 + /* call kref_get()before CS start and kref_put() after CS fence signaled */ 1049 + struct kref refcount; 1050 + struct amdgpu_fpriv *fpriv; 1051 + struct amdgpu_ctx_state state; 1052 + uint32_t id; 1053 + }; 1054 + 1055 + struct amdgpu_ctx_mgr { 1056 + struct amdgpu_device *adev; 1057 + struct idr ctx_handles; 1058 + /* lock for IDR system */ 1059 + struct mutex hlock; 1060 + }; 1061 + 1062 + /* 1063 + * file private structure 1064 + */ 1065 + 1066 + struct amdgpu_fpriv { 1067 + struct amdgpu_vm vm; 1068 + struct mutex bo_list_lock; 1069 + struct idr bo_list_handles; 1070 + struct amdgpu_ctx_mgr ctx_mgr; 1071 + }; 1072 + 1073 + /* 1074 + * residency list 1075 + */ 1076 + 1077 + struct amdgpu_bo_list { 1078 + struct mutex lock; 1079 + struct amdgpu_bo *gds_obj; 1080 + struct amdgpu_bo *gws_obj; 1081 + struct amdgpu_bo *oa_obj; 1082 + bool has_userptr; 1083 + unsigned num_entries; 1084 + struct amdgpu_bo_list_entry *array; 1085 + }; 1086 + 1087 + struct amdgpu_bo_list * 1088 + amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); 1089 + void amdgpu_bo_list_put(struct amdgpu_bo_list *list); 1090 + void amdgpu_bo_list_free(struct amdgpu_bo_list *list); 1091 + 1092 + /* 1093 + * GFX stuff 1094 + */ 1095 + #include "clearstate_defs.h" 1096 + 1097 + struct amdgpu_rlc { 1098 + /* for power gating */ 1099 + struct amdgpu_bo *save_restore_obj; 1100 + uint64_t save_restore_gpu_addr; 1101 + volatile uint32_t *sr_ptr; 1102 + const u32 *reg_list; 1103 + u32 reg_list_size; 1104 + /* for clear state */ 1105 + struct amdgpu_bo *clear_state_obj; 1106 + uint64_t clear_state_gpu_addr; 1107 + volatile uint32_t *cs_ptr; 1108 + const struct cs_section_def *cs_data; 1109 + u32 clear_state_size; 1110 + /* for cp tables */ 1111 + struct amdgpu_bo *cp_table_obj; 1112 + uint64_t cp_table_gpu_addr; 1113 + volatile uint32_t *cp_table_ptr; 1114 + u32 cp_table_size; 1115 + }; 1116 + 1117 + struct amdgpu_mec { 1118 + struct amdgpu_bo *hpd_eop_obj; 1119 + u64 hpd_eop_gpu_addr; 1120 + u32 num_pipe; 1121 + u32 num_mec; 1122 + u32 num_queue; 1123 + }; 1124 + 1125 + /* 1126 + * GPU scratch registers structures, functions & helpers 1127 + */ 1128 + struct amdgpu_scratch { 1129 + unsigned num_reg; 1130 + uint32_t reg_base; 1131 + bool free[32]; 1132 + uint32_t reg[32]; 1133 + }; 1134 + 1135 + /* 1136 + * GFX configurations 1137 + */ 1138 + struct amdgpu_gca_config { 1139 + unsigned max_shader_engines; 1140 + unsigned max_tile_pipes; 1141 + unsigned max_cu_per_sh; 1142 + unsigned max_sh_per_se; 1143 + unsigned max_backends_per_se; 1144 + unsigned max_texture_channel_caches; 1145 + unsigned max_gprs; 1146 + unsigned max_gs_threads; 1147 + unsigned max_hw_contexts; 1148 + unsigned sc_prim_fifo_size_frontend; 1149 + unsigned sc_prim_fifo_size_backend; 1150 + unsigned sc_hiz_tile_fifo_size; 1151 + unsigned sc_earlyz_tile_fifo_size; 1152 + 1153 + unsigned num_tile_pipes; 1154 + unsigned backend_enable_mask; 1155 + unsigned mem_max_burst_length_bytes; 1156 + unsigned mem_row_size_in_kb; 1157 + unsigned shader_engine_tile_size; 1158 + unsigned num_gpus; 1159 + unsigned multi_gpu_tile_size; 1160 + unsigned mc_arb_ramcfg; 1161 + unsigned gb_addr_config; 1162 + 1163 + uint32_t tile_mode_array[32]; 1164 + uint32_t macrotile_mode_array[16]; 1165 + }; 1166 + 1167 + struct amdgpu_gfx { 1168 + struct mutex gpu_clock_mutex; 1169 + struct amdgpu_gca_config config; 1170 + struct amdgpu_rlc rlc; 1171 + struct amdgpu_mec mec; 1172 + struct amdgpu_scratch scratch; 1173 + const struct firmware *me_fw; /* ME firmware */ 1174 + uint32_t me_fw_version; 1175 + const struct firmware *pfp_fw; /* PFP firmware */ 1176 + uint32_t pfp_fw_version; 1177 + const struct firmware *ce_fw; /* CE firmware */ 1178 + uint32_t ce_fw_version; 1179 + const struct firmware *rlc_fw; /* RLC firmware */ 1180 + uint32_t rlc_fw_version; 1181 + const struct firmware *mec_fw; /* MEC firmware */ 1182 + uint32_t mec_fw_version; 1183 + const struct firmware *mec2_fw; /* MEC2 firmware */ 1184 + uint32_t mec2_fw_version; 1185 + struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 1186 + unsigned num_gfx_rings; 1187 + struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; 1188 + unsigned num_compute_rings; 1189 + struct amdgpu_irq_src eop_irq; 1190 + struct amdgpu_irq_src priv_reg_irq; 1191 + struct amdgpu_irq_src priv_inst_irq; 1192 + /* gfx status */ 1193 + uint32_t gfx_current_status; 1194 + /* sync signal for const engine */ 1195 + unsigned ce_sync_offs; 1196 + }; 1197 + 1198 + int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, 1199 + unsigned size, struct amdgpu_ib *ib); 1200 + void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); 1201 + int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, 1202 + struct amdgpu_ib *ib, void *owner); 1203 + int amdgpu_ib_pool_init(struct amdgpu_device *adev); 1204 + void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 1205 + int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 1206 + /* Ring access between begin & end cannot sleep */ 1207 + void amdgpu_ring_free_size(struct amdgpu_ring *ring); 1208 + int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); 1209 + int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); 1210 + void amdgpu_ring_commit(struct amdgpu_ring *ring); 1211 + void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); 1212 + void amdgpu_ring_undo(struct amdgpu_ring *ring); 1213 + void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); 1214 + void amdgpu_ring_lockup_update(struct amdgpu_ring *ring); 1215 + bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring); 1216 + unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, 1217 + uint32_t **data); 1218 + int amdgpu_ring_restore(struct amdgpu_ring *ring, 1219 + unsigned size, uint32_t *data); 1220 + int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 1221 + unsigned ring_size, u32 nop, u32 align_mask, 1222 + struct amdgpu_irq_src *irq_src, unsigned irq_type, 1223 + enum amdgpu_ring_type ring_type); 1224 + void amdgpu_ring_fini(struct amdgpu_ring *ring); 1225 + 1226 + /* 1227 + * CS. 1228 + */ 1229 + struct amdgpu_cs_chunk { 1230 + uint32_t chunk_id; 1231 + uint32_t length_dw; 1232 + uint32_t *kdata; 1233 + void __user *user_ptr; 1234 + }; 1235 + 1236 + struct amdgpu_cs_parser { 1237 + struct amdgpu_device *adev; 1238 + struct drm_file *filp; 1239 + uint32_t ctx_id; 1240 + struct amdgpu_bo_list *bo_list; 1241 + /* chunks */ 1242 + unsigned nchunks; 1243 + struct amdgpu_cs_chunk *chunks; 1244 + /* relocations */ 1245 + struct amdgpu_bo_list_entry *vm_bos; 1246 + struct amdgpu_bo_list_entry *ib_bos; 1247 + struct list_head validated; 1248 + 1249 + struct amdgpu_ib *ibs; 1250 + uint32_t num_ibs; 1251 + 1252 + struct ww_acquire_ctx ticket; 1253 + 1254 + /* user fence */ 1255 + struct amdgpu_user_fence uf; 1256 + }; 1257 + 1258 + static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) 1259 + { 1260 + return p->ibs[ib_idx].ptr[idx]; 1261 + } 1262 + 1263 + /* 1264 + * Writeback 1265 + */ 1266 + #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ 1267 + 1268 + struct amdgpu_wb { 1269 + struct amdgpu_bo *wb_obj; 1270 + volatile uint32_t *wb; 1271 + uint64_t gpu_addr; 1272 + u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ 1273 + unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 1274 + }; 1275 + 1276 + int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); 1277 + void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); 1278 + 1279 + /** 1280 + * struct amdgpu_pm - power management datas 1281 + * It keeps track of various data needed to take powermanagement decision. 1282 + */ 1283 + 1284 + enum amdgpu_pm_state_type { 1285 + /* not used for dpm */ 1286 + POWER_STATE_TYPE_DEFAULT, 1287 + POWER_STATE_TYPE_POWERSAVE, 1288 + /* user selectable states */ 1289 + POWER_STATE_TYPE_BATTERY, 1290 + POWER_STATE_TYPE_BALANCED, 1291 + POWER_STATE_TYPE_PERFORMANCE, 1292 + /* internal states */ 1293 + POWER_STATE_TYPE_INTERNAL_UVD, 1294 + POWER_STATE_TYPE_INTERNAL_UVD_SD, 1295 + POWER_STATE_TYPE_INTERNAL_UVD_HD, 1296 + POWER_STATE_TYPE_INTERNAL_UVD_HD2, 1297 + POWER_STATE_TYPE_INTERNAL_UVD_MVC, 1298 + POWER_STATE_TYPE_INTERNAL_BOOT, 1299 + POWER_STATE_TYPE_INTERNAL_THERMAL, 1300 + POWER_STATE_TYPE_INTERNAL_ACPI, 1301 + POWER_STATE_TYPE_INTERNAL_ULV, 1302 + POWER_STATE_TYPE_INTERNAL_3DPERF, 1303 + }; 1304 + 1305 + enum amdgpu_int_thermal_type { 1306 + THERMAL_TYPE_NONE, 1307 + THERMAL_TYPE_EXTERNAL, 1308 + THERMAL_TYPE_EXTERNAL_GPIO, 1309 + THERMAL_TYPE_RV6XX, 1310 + THERMAL_TYPE_RV770, 1311 + THERMAL_TYPE_ADT7473_WITH_INTERNAL, 1312 + THERMAL_TYPE_EVERGREEN, 1313 + THERMAL_TYPE_SUMO, 1314 + THERMAL_TYPE_NI, 1315 + THERMAL_TYPE_SI, 1316 + THERMAL_TYPE_EMC2103_WITH_INTERNAL, 1317 + THERMAL_TYPE_CI, 1318 + THERMAL_TYPE_KV, 1319 + }; 1320 + 1321 + enum amdgpu_dpm_auto_throttle_src { 1322 + AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, 1323 + AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL 1324 + }; 1325 + 1326 + enum amdgpu_dpm_event_src { 1327 + AMDGPU_DPM_EVENT_SRC_ANALOG = 0, 1328 + AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, 1329 + AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, 1330 + AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, 1331 + AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 1332 + }; 1333 + 1334 + #define AMDGPU_MAX_VCE_LEVELS 6 1335 + 1336 + enum amdgpu_vce_level { 1337 + AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ 1338 + AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ 1339 + AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ 1340 + AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ 1341 + AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ 1342 + AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ 1343 + }; 1344 + 1345 + struct amdgpu_ps { 1346 + u32 caps; /* vbios flags */ 1347 + u32 class; /* vbios flags */ 1348 + u32 class2; /* vbios flags */ 1349 + /* UVD clocks */ 1350 + u32 vclk; 1351 + u32 dclk; 1352 + /* VCE clocks */ 1353 + u32 evclk; 1354 + u32 ecclk; 1355 + bool vce_active; 1356 + enum amdgpu_vce_level vce_level; 1357 + /* asic priv */ 1358 + void *ps_priv; 1359 + }; 1360 + 1361 + struct amdgpu_dpm_thermal { 1362 + /* thermal interrupt work */ 1363 + struct work_struct work; 1364 + /* low temperature threshold */ 1365 + int min_temp; 1366 + /* high temperature threshold */ 1367 + int max_temp; 1368 + /* was last interrupt low to high or high to low */ 1369 + bool high_to_low; 1370 + /* interrupt source */ 1371 + struct amdgpu_irq_src irq; 1372 + }; 1373 + 1374 + enum amdgpu_clk_action 1375 + { 1376 + AMDGPU_SCLK_UP = 1, 1377 + AMDGPU_SCLK_DOWN 1378 + }; 1379 + 1380 + struct amdgpu_blacklist_clocks 1381 + { 1382 + u32 sclk; 1383 + u32 mclk; 1384 + enum amdgpu_clk_action action; 1385 + }; 1386 + 1387 + struct amdgpu_clock_and_voltage_limits { 1388 + u32 sclk; 1389 + u32 mclk; 1390 + u16 vddc; 1391 + u16 vddci; 1392 + }; 1393 + 1394 + struct amdgpu_clock_array { 1395 + u32 count; 1396 + u32 *values; 1397 + }; 1398 + 1399 + struct amdgpu_clock_voltage_dependency_entry { 1400 + u32 clk; 1401 + u16 v; 1402 + }; 1403 + 1404 + struct amdgpu_clock_voltage_dependency_table { 1405 + u32 count; 1406 + struct amdgpu_clock_voltage_dependency_entry *entries; 1407 + }; 1408 + 1409 + union amdgpu_cac_leakage_entry { 1410 + struct { 1411 + u16 vddc; 1412 + u32 leakage; 1413 + }; 1414 + struct { 1415 + u16 vddc1; 1416 + u16 vddc2; 1417 + u16 vddc3; 1418 + }; 1419 + }; 1420 + 1421 + struct amdgpu_cac_leakage_table { 1422 + u32 count; 1423 + union amdgpu_cac_leakage_entry *entries; 1424 + }; 1425 + 1426 + struct amdgpu_phase_shedding_limits_entry { 1427 + u16 voltage; 1428 + u32 sclk; 1429 + u32 mclk; 1430 + }; 1431 + 1432 + struct amdgpu_phase_shedding_limits_table { 1433 + u32 count; 1434 + struct amdgpu_phase_shedding_limits_entry *entries; 1435 + }; 1436 + 1437 + struct amdgpu_uvd_clock_voltage_dependency_entry { 1438 + u32 vclk; 1439 + u32 dclk; 1440 + u16 v; 1441 + }; 1442 + 1443 + struct amdgpu_uvd_clock_voltage_dependency_table { 1444 + u8 count; 1445 + struct amdgpu_uvd_clock_voltage_dependency_entry *entries; 1446 + }; 1447 + 1448 + struct amdgpu_vce_clock_voltage_dependency_entry { 1449 + u32 ecclk; 1450 + u32 evclk; 1451 + u16 v; 1452 + }; 1453 + 1454 + struct amdgpu_vce_clock_voltage_dependency_table { 1455 + u8 count; 1456 + struct amdgpu_vce_clock_voltage_dependency_entry *entries; 1457 + }; 1458 + 1459 + struct amdgpu_ppm_table { 1460 + u8 ppm_design; 1461 + u16 cpu_core_number; 1462 + u32 platform_tdp; 1463 + u32 small_ac_platform_tdp; 1464 + u32 platform_tdc; 1465 + u32 small_ac_platform_tdc; 1466 + u32 apu_tdp; 1467 + u32 dgpu_tdp; 1468 + u32 dgpu_ulv_power; 1469 + u32 tj_max; 1470 + }; 1471 + 1472 + struct amdgpu_cac_tdp_table { 1473 + u16 tdp; 1474 + u16 configurable_tdp; 1475 + u16 tdc; 1476 + u16 battery_power_limit; 1477 + u16 small_power_limit; 1478 + u16 low_cac_leakage; 1479 + u16 high_cac_leakage; 1480 + u16 maximum_power_delivery_limit; 1481 + }; 1482 + 1483 + struct amdgpu_dpm_dynamic_state { 1484 + struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; 1485 + struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; 1486 + struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; 1487 + struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; 1488 + struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; 1489 + struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; 1490 + struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; 1491 + struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; 1492 + struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; 1493 + struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; 1494 + struct amdgpu_clock_array valid_sclk_values; 1495 + struct amdgpu_clock_array valid_mclk_values; 1496 + struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; 1497 + struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; 1498 + u32 mclk_sclk_ratio; 1499 + u32 sclk_mclk_delta; 1500 + u16 vddc_vddci_delta; 1501 + u16 min_vddc_for_pcie_gen2; 1502 + struct amdgpu_cac_leakage_table cac_leakage_table; 1503 + struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; 1504 + struct amdgpu_ppm_table *ppm_table; 1505 + struct amdgpu_cac_tdp_table *cac_tdp_table; 1506 + }; 1507 + 1508 + struct amdgpu_dpm_fan { 1509 + u16 t_min; 1510 + u16 t_med; 1511 + u16 t_high; 1512 + u16 pwm_min; 1513 + u16 pwm_med; 1514 + u16 pwm_high; 1515 + u8 t_hyst; 1516 + u32 cycle_delay; 1517 + u16 t_max; 1518 + u8 control_mode; 1519 + u16 default_max_fan_pwm; 1520 + u16 default_fan_output_sensitivity; 1521 + u16 fan_output_sensitivity; 1522 + bool ucode_fan_control; 1523 + }; 1524 + 1525 + enum amdgpu_pcie_gen { 1526 + AMDGPU_PCIE_GEN1 = 0, 1527 + AMDGPU_PCIE_GEN2 = 1, 1528 + AMDGPU_PCIE_GEN3 = 2, 1529 + AMDGPU_PCIE_GEN_INVALID = 0xffff 1530 + }; 1531 + 1532 + enum amdgpu_dpm_forced_level { 1533 + AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, 1534 + AMDGPU_DPM_FORCED_LEVEL_LOW = 1, 1535 + AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, 1536 + }; 1537 + 1538 + struct amdgpu_vce_state { 1539 + /* vce clocks */ 1540 + u32 evclk; 1541 + u32 ecclk; 1542 + /* gpu clocks */ 1543 + u32 sclk; 1544 + u32 mclk; 1545 + u8 clk_idx; 1546 + u8 pstate; 1547 + }; 1548 + 1549 + struct amdgpu_dpm_funcs { 1550 + int (*get_temperature)(struct amdgpu_device *adev); 1551 + int (*pre_set_power_state)(struct amdgpu_device *adev); 1552 + int (*set_power_state)(struct amdgpu_device *adev); 1553 + void (*post_set_power_state)(struct amdgpu_device *adev); 1554 + void (*display_configuration_changed)(struct amdgpu_device *adev); 1555 + u32 (*get_sclk)(struct amdgpu_device *adev, bool low); 1556 + u32 (*get_mclk)(struct amdgpu_device *adev, bool low); 1557 + void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); 1558 + void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); 1559 + int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); 1560 + bool (*vblank_too_short)(struct amdgpu_device *adev); 1561 + void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); 1562 + void (*enable_bapm)(struct amdgpu_device *adev, bool enable); 1563 + void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); 1564 + u32 (*get_fan_control_mode)(struct amdgpu_device *adev); 1565 + int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); 1566 + int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); 1567 + }; 1568 + 1569 + struct amdgpu_dpm { 1570 + struct amdgpu_ps *ps; 1571 + /* number of valid power states */ 1572 + int num_ps; 1573 + /* current power state that is active */ 1574 + struct amdgpu_ps *current_ps; 1575 + /* requested power state */ 1576 + struct amdgpu_ps *requested_ps; 1577 + /* boot up power state */ 1578 + struct amdgpu_ps *boot_ps; 1579 + /* default uvd power state */ 1580 + struct amdgpu_ps *uvd_ps; 1581 + /* vce requirements */ 1582 + struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; 1583 + enum amdgpu_vce_level vce_level; 1584 + enum amdgpu_pm_state_type state; 1585 + enum amdgpu_pm_state_type user_state; 1586 + u32 platform_caps; 1587 + u32 voltage_response_time; 1588 + u32 backbias_response_time; 1589 + void *priv; 1590 + u32 new_active_crtcs; 1591 + int new_active_crtc_count; 1592 + u32 current_active_crtcs; 1593 + int current_active_crtc_count; 1594 + struct amdgpu_dpm_dynamic_state dyn_state; 1595 + struct amdgpu_dpm_fan fan; 1596 + u32 tdp_limit; 1597 + u32 near_tdp_limit; 1598 + u32 near_tdp_limit_adjusted; 1599 + u32 sq_ramping_threshold; 1600 + u32 cac_leakage; 1601 + u16 tdp_od_limit; 1602 + u32 tdp_adjustment; 1603 + u16 load_line_slope; 1604 + bool power_control; 1605 + bool ac_power; 1606 + /* special states active */ 1607 + bool thermal_active; 1608 + bool uvd_active; 1609 + bool vce_active; 1610 + /* thermal handling */ 1611 + struct amdgpu_dpm_thermal thermal; 1612 + /* forced levels */ 1613 + enum amdgpu_dpm_forced_level forced_level; 1614 + }; 1615 + 1616 + struct amdgpu_pm { 1617 + struct mutex mutex; 1618 + /* write locked while reprogramming mclk */ 1619 + struct rw_semaphore mclk_lock; 1620 + u32 current_sclk; 1621 + u32 current_mclk; 1622 + u32 default_sclk; 1623 + u32 default_mclk; 1624 + struct amdgpu_i2c_chan *i2c_bus; 1625 + /* internal thermal controller on rv6xx+ */ 1626 + enum amdgpu_int_thermal_type int_thermal_type; 1627 + struct device *int_hwmon_dev; 1628 + /* fan control parameters */ 1629 + bool no_fan; 1630 + u8 fan_pulses_per_revolution; 1631 + u8 fan_min_rpm; 1632 + u8 fan_max_rpm; 1633 + /* dpm */ 1634 + bool dpm_enabled; 1635 + struct amdgpu_dpm dpm; 1636 + const struct firmware *fw; /* SMC firmware */ 1637 + uint32_t fw_version; 1638 + const struct amdgpu_dpm_funcs *funcs; 1639 + }; 1640 + 1641 + /* 1642 + * UVD 1643 + */ 1644 + #define AMDGPU_MAX_UVD_HANDLES 10 1645 + #define AMDGPU_UVD_STACK_SIZE (1024*1024) 1646 + #define AMDGPU_UVD_HEAP_SIZE (1024*1024) 1647 + #define AMDGPU_UVD_FIRMWARE_OFFSET 256 1648 + 1649 + struct amdgpu_uvd { 1650 + struct amdgpu_bo *vcpu_bo; 1651 + void *cpu_addr; 1652 + uint64_t gpu_addr; 1653 + void *saved_bo; 1654 + atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1655 + struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 1656 + struct delayed_work idle_work; 1657 + const struct firmware *fw; /* UVD firmware */ 1658 + struct amdgpu_ring ring; 1659 + struct amdgpu_irq_src irq; 1660 + bool address_64_bit; 1661 + }; 1662 + 1663 + /* 1664 + * VCE 1665 + */ 1666 + #define AMDGPU_MAX_VCE_HANDLES 16 1667 + #define AMDGPU_VCE_STACK_SIZE (1024*1024) 1668 + #define AMDGPU_VCE_HEAP_SIZE (4*1024*1024) 1669 + #define AMDGPU_VCE_FIRMWARE_OFFSET 256 1670 + 1671 + struct amdgpu_vce { 1672 + struct amdgpu_bo *vcpu_bo; 1673 + uint64_t gpu_addr; 1674 + unsigned fw_version; 1675 + unsigned fb_version; 1676 + atomic_t handles[AMDGPU_MAX_VCE_HANDLES]; 1677 + struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; 1678 + struct delayed_work idle_work; 1679 + const struct firmware *fw; /* VCE firmware */ 1680 + struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1681 + struct amdgpu_irq_src irq; 1682 + }; 1683 + 1684 + /* 1685 + * SDMA 1686 + */ 1687 + struct amdgpu_sdma { 1688 + /* SDMA firmware */ 1689 + const struct firmware *fw; 1690 + uint32_t fw_version; 1691 + 1692 + struct amdgpu_ring ring; 1693 + }; 1694 + 1695 + /* 1696 + * Firmware 1697 + */ 1698 + struct amdgpu_firmware { 1699 + struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; 1700 + bool smu_load; 1701 + struct amdgpu_bo *fw_buf; 1702 + unsigned int fw_size; 1703 + }; 1704 + 1705 + /* 1706 + * Benchmarking 1707 + */ 1708 + void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 1709 + 1710 + 1711 + /* 1712 + * Testing 1713 + */ 1714 + void amdgpu_test_moves(struct amdgpu_device *adev); 1715 + void amdgpu_test_ring_sync(struct amdgpu_device *adev, 1716 + struct amdgpu_ring *cpA, 1717 + struct amdgpu_ring *cpB); 1718 + void amdgpu_test_syncing(struct amdgpu_device *adev); 1719 + 1720 + /* 1721 + * MMU Notifier 1722 + */ 1723 + #if defined(CONFIG_MMU_NOTIFIER) 1724 + int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); 1725 + void amdgpu_mn_unregister(struct amdgpu_bo *bo); 1726 + #else 1727 + static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 1728 + { 1729 + return -ENODEV; 1730 + } 1731 + static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} 1732 + #endif 1733 + 1734 + /* 1735 + * Debugfs 1736 + */ 1737 + struct amdgpu_debugfs { 1738 + struct drm_info_list *files; 1739 + unsigned num_files; 1740 + }; 1741 + 1742 + int amdgpu_debugfs_add_files(struct amdgpu_device *adev, 1743 + struct drm_info_list *files, 1744 + unsigned nfiles); 1745 + int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); 1746 + 1747 + #if defined(CONFIG_DEBUG_FS) 1748 + int amdgpu_debugfs_init(struct drm_minor *minor); 1749 + void amdgpu_debugfs_cleanup(struct drm_minor *minor); 1750 + #endif 1751 + 1752 + /* 1753 + * amdgpu smumgr functions 1754 + */ 1755 + struct amdgpu_smumgr_funcs { 1756 + int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); 1757 + int (*request_smu_load_fw)(struct amdgpu_device *adev); 1758 + int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); 1759 + }; 1760 + 1761 + /* 1762 + * amdgpu smumgr 1763 + */ 1764 + struct amdgpu_smumgr { 1765 + struct amdgpu_bo *toc_buf; 1766 + struct amdgpu_bo *smu_buf; 1767 + /* asic priv smu data */ 1768 + void *priv; 1769 + spinlock_t smu_lock; 1770 + /* smumgr functions */ 1771 + const struct amdgpu_smumgr_funcs *smumgr_funcs; 1772 + /* ucode loading complete flag */ 1773 + uint32_t fw_flags; 1774 + }; 1775 + 1776 + /* 1777 + * ASIC specific register table accessible by UMD 1778 + */ 1779 + struct amdgpu_allowed_register_entry { 1780 + uint32_t reg_offset; 1781 + bool untouched; 1782 + bool grbm_indexed; 1783 + }; 1784 + 1785 + struct amdgpu_cu_info { 1786 + uint32_t number; /* total active CU number */ 1787 + uint32_t ao_cu_mask; 1788 + uint32_t bitmap[4][4]; 1789 + }; 1790 + 1791 + 1792 + /* 1793 + * ASIC specific functions. 1794 + */ 1795 + struct amdgpu_asic_funcs { 1796 + bool (*read_disabled_bios)(struct amdgpu_device *adev); 1797 + int (*read_register)(struct amdgpu_device *adev, u32 se_num, 1798 + u32 sh_num, u32 reg_offset, u32 *value); 1799 + void (*set_vga_state)(struct amdgpu_device *adev, bool state); 1800 + int (*reset)(struct amdgpu_device *adev); 1801 + /* wait for mc_idle */ 1802 + int (*wait_for_mc_idle)(struct amdgpu_device *adev); 1803 + /* get the reference clock */ 1804 + u32 (*get_xclk)(struct amdgpu_device *adev); 1805 + /* get the gpu clock counter */ 1806 + uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); 1807 + int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info); 1808 + /* MM block clocks */ 1809 + int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1810 + int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1811 + }; 1812 + 1813 + /* 1814 + * IOCTL. 1815 + */ 1816 + int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, 1817 + struct drm_file *filp); 1818 + int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 1819 + struct drm_file *filp); 1820 + 1821 + int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, 1822 + struct drm_file *filp); 1823 + int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 1824 + struct drm_file *filp); 1825 + int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, 1826 + struct drm_file *filp); 1827 + int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 1828 + struct drm_file *filp); 1829 + int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 1830 + struct drm_file *filp); 1831 + int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 1832 + struct drm_file *filp); 1833 + int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1834 + int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1835 + 1836 + int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 1837 + struct drm_file *filp); 1838 + 1839 + /* VRAM scratch page for HDP bug, default vram page */ 1840 + struct amdgpu_vram_scratch { 1841 + struct amdgpu_bo *robj; 1842 + volatile uint32_t *ptr; 1843 + u64 gpu_addr; 1844 + }; 1845 + 1846 + /* 1847 + * ACPI 1848 + */ 1849 + struct amdgpu_atif_notification_cfg { 1850 + bool enabled; 1851 + int command_code; 1852 + }; 1853 + 1854 + struct amdgpu_atif_notifications { 1855 + bool display_switch; 1856 + bool expansion_mode_change; 1857 + bool thermal_state; 1858 + bool forced_power_state; 1859 + bool system_power_state; 1860 + bool display_conf_change; 1861 + bool px_gfx_switch; 1862 + bool brightness_change; 1863 + bool dgpu_display_event; 1864 + }; 1865 + 1866 + struct amdgpu_atif_functions { 1867 + bool system_params; 1868 + bool sbios_requests; 1869 + bool select_active_disp; 1870 + bool lid_state; 1871 + bool get_tv_standard; 1872 + bool set_tv_standard; 1873 + bool get_panel_expansion_mode; 1874 + bool set_panel_expansion_mode; 1875 + bool temperature_change; 1876 + bool graphics_device_types; 1877 + }; 1878 + 1879 + struct amdgpu_atif { 1880 + struct amdgpu_atif_notifications notifications; 1881 + struct amdgpu_atif_functions functions; 1882 + struct amdgpu_atif_notification_cfg notification_cfg; 1883 + struct amdgpu_encoder *encoder_for_bl; 1884 + }; 1885 + 1886 + struct amdgpu_atcs_functions { 1887 + bool get_ext_state; 1888 + bool pcie_perf_req; 1889 + bool pcie_dev_rdy; 1890 + bool pcie_bus_width; 1891 + }; 1892 + 1893 + struct amdgpu_atcs { 1894 + struct amdgpu_atcs_functions functions; 1895 + }; 1896 + 1897 + int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv, 1898 + uint32_t *id,uint32_t flags); 1899 + int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, 1900 + uint32_t id); 1901 + int amdgpu_ctx_query(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, 1902 + uint32_t id,struct amdgpu_ctx_state *state); 1903 + 1904 + void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv); 1905 + 1906 + extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 1907 + struct drm_file *filp); 1908 + 1909 + /* 1910 + * Core structure, functions and helpers. 1911 + */ 1912 + typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 1913 + typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1914 + 1915 + typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1916 + typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 1917 + 1918 + struct amdgpu_device { 1919 + struct device *dev; 1920 + struct drm_device *ddev; 1921 + struct pci_dev *pdev; 1922 + struct rw_semaphore exclusive_lock; 1923 + 1924 + /* ASIC */ 1925 + enum amdgpu_asic_type asic_type; 1926 + uint32_t family; 1927 + uint32_t rev_id; 1928 + uint32_t external_rev_id; 1929 + unsigned long flags; 1930 + int usec_timeout; 1931 + const struct amdgpu_asic_funcs *asic_funcs; 1932 + bool shutdown; 1933 + bool suspend; 1934 + bool need_dma32; 1935 + bool accel_working; 1936 + bool needs_reset; 1937 + struct work_struct reset_work; 1938 + struct notifier_block acpi_nb; 1939 + struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 1940 + struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 1941 + unsigned debugfs_count; 1942 + #if defined(CONFIG_DEBUG_FS) 1943 + struct dentry *debugfs_regs; 1944 + #endif 1945 + struct amdgpu_atif atif; 1946 + struct amdgpu_atcs atcs; 1947 + struct mutex srbm_mutex; 1948 + /* GRBM index mutex. Protects concurrent access to GRBM index */ 1949 + struct mutex grbm_idx_mutex; 1950 + struct dev_pm_domain vga_pm_domain; 1951 + bool have_disp_power_ref; 1952 + 1953 + /* BIOS */ 1954 + uint8_t *bios; 1955 + bool is_atom_bios; 1956 + uint16_t bios_header_start; 1957 + struct amdgpu_bo *stollen_vga_memory; 1958 + uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 1959 + 1960 + /* Register/doorbell mmio */ 1961 + resource_size_t rmmio_base; 1962 + resource_size_t rmmio_size; 1963 + void __iomem *rmmio; 1964 + /* protects concurrent MM_INDEX/DATA based register access */ 1965 + spinlock_t mmio_idx_lock; 1966 + /* protects concurrent SMC based register access */ 1967 + spinlock_t smc_idx_lock; 1968 + amdgpu_rreg_t smc_rreg; 1969 + amdgpu_wreg_t smc_wreg; 1970 + /* protects concurrent PCIE register access */ 1971 + spinlock_t pcie_idx_lock; 1972 + amdgpu_rreg_t pcie_rreg; 1973 + amdgpu_wreg_t pcie_wreg; 1974 + /* protects concurrent UVD register access */ 1975 + spinlock_t uvd_ctx_idx_lock; 1976 + amdgpu_rreg_t uvd_ctx_rreg; 1977 + amdgpu_wreg_t uvd_ctx_wreg; 1978 + /* protects concurrent DIDT register access */ 1979 + spinlock_t didt_idx_lock; 1980 + amdgpu_rreg_t didt_rreg; 1981 + amdgpu_wreg_t didt_wreg; 1982 + /* protects concurrent ENDPOINT (audio) register access */ 1983 + spinlock_t audio_endpt_idx_lock; 1984 + amdgpu_block_rreg_t audio_endpt_rreg; 1985 + amdgpu_block_wreg_t audio_endpt_wreg; 1986 + void __iomem *rio_mem; 1987 + resource_size_t rio_mem_size; 1988 + struct amdgpu_doorbell doorbell; 1989 + 1990 + /* clock/pll info */ 1991 + struct amdgpu_clock clock; 1992 + 1993 + /* MC */ 1994 + struct amdgpu_mc mc; 1995 + struct amdgpu_gart gart; 1996 + struct amdgpu_dummy_page dummy_page; 1997 + struct amdgpu_vm_manager vm_manager; 1998 + 1999 + /* memory management */ 2000 + struct amdgpu_mman mman; 2001 + struct amdgpu_gem gem; 2002 + struct amdgpu_vram_scratch vram_scratch; 2003 + struct amdgpu_wb wb; 2004 + atomic64_t vram_usage; 2005 + atomic64_t vram_vis_usage; 2006 + atomic64_t gtt_usage; 2007 + atomic64_t num_bytes_moved; 2008 + 2009 + /* display */ 2010 + struct amdgpu_mode_info mode_info; 2011 + struct work_struct hotplug_work; 2012 + struct amdgpu_irq_src crtc_irq; 2013 + struct amdgpu_irq_src pageflip_irq; 2014 + struct amdgpu_irq_src hpd_irq; 2015 + 2016 + /* rings */ 2017 + wait_queue_head_t fence_queue; 2018 + unsigned fence_context; 2019 + struct mutex ring_lock; 2020 + unsigned num_rings; 2021 + struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 2022 + bool ib_pool_ready; 2023 + struct amdgpu_sa_manager ring_tmp_bo; 2024 + 2025 + /* interrupts */ 2026 + struct amdgpu_irq irq; 2027 + 2028 + /* dpm */ 2029 + struct amdgpu_pm pm; 2030 + u32 cg_flags; 2031 + u32 pg_flags; 2032 + 2033 + /* amdgpu smumgr */ 2034 + struct amdgpu_smumgr smu; 2035 + 2036 + /* gfx */ 2037 + struct amdgpu_gfx gfx; 2038 + 2039 + /* sdma */ 2040 + struct amdgpu_sdma sdma[2]; 2041 + struct amdgpu_irq_src sdma_trap_irq; 2042 + struct amdgpu_irq_src sdma_illegal_inst_irq; 2043 + 2044 + /* uvd */ 2045 + bool has_uvd; 2046 + struct amdgpu_uvd uvd; 2047 + 2048 + /* vce */ 2049 + struct amdgpu_vce vce; 2050 + 2051 + /* firmwares */ 2052 + struct amdgpu_firmware firmware; 2053 + 2054 + /* GDS */ 2055 + struct amdgpu_gds gds; 2056 + 2057 + const struct amdgpu_ip_block_version *ip_blocks; 2058 + int num_ip_blocks; 2059 + bool *ip_block_enabled; 2060 + struct mutex mn_lock; 2061 + DECLARE_HASHTABLE(mn_hash, 7); 2062 + 2063 + /* tracking pinned memory */ 2064 + u64 vram_pin_size; 2065 + u64 gart_pin_size; 2066 + }; 2067 + 2068 + bool amdgpu_device_is_px(struct drm_device *dev); 2069 + int amdgpu_device_init(struct amdgpu_device *adev, 2070 + struct drm_device *ddev, 2071 + struct pci_dev *pdev, 2072 + uint32_t flags); 2073 + void amdgpu_device_fini(struct amdgpu_device *adev); 2074 + int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 2075 + 2076 + uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 2077 + bool always_indirect); 2078 + void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 2079 + bool always_indirect); 2080 + u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); 2081 + void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); 2082 + 2083 + u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); 2084 + void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); 2085 + 2086 + /* 2087 + * Cast helper 2088 + */ 2089 + extern const struct fence_ops amdgpu_fence_ops; 2090 + static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) 2091 + { 2092 + struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 2093 + 2094 + if (__f->base.ops == &amdgpu_fence_ops) 2095 + return __f; 2096 + 2097 + return NULL; 2098 + } 2099 + 2100 + /* 2101 + * Registers read & write functions. 2102 + */ 2103 + #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false) 2104 + #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true) 2105 + #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false)) 2106 + #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false) 2107 + #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true) 2108 + #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 2109 + #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 2110 + #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 2111 + #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 2112 + #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 2113 + #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 2114 + #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 2115 + #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 2116 + #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 2117 + #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 2118 + #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 2119 + #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 2120 + #define WREG32_P(reg, val, mask) \ 2121 + do { \ 2122 + uint32_t tmp_ = RREG32(reg); \ 2123 + tmp_ &= (mask); \ 2124 + tmp_ |= ((val) & ~(mask)); \ 2125 + WREG32(reg, tmp_); \ 2126 + } while (0) 2127 + #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 2128 + #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 2129 + #define WREG32_PLL_P(reg, val, mask) \ 2130 + do { \ 2131 + uint32_t tmp_ = RREG32_PLL(reg); \ 2132 + tmp_ &= (mask); \ 2133 + tmp_ |= ((val) & ~(mask)); \ 2134 + WREG32_PLL(reg, tmp_); \ 2135 + } while (0) 2136 + #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) 2137 + #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) 2138 + #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) 2139 + 2140 + #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) 2141 + #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) 2142 + 2143 + #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 2144 + #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 2145 + 2146 + #define REG_SET_FIELD(orig_val, reg, field, field_val) \ 2147 + (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 2148 + (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 2149 + 2150 + #define REG_GET_FIELD(value, reg, field) \ 2151 + (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 2152 + 2153 + /* 2154 + * BIOS helpers. 2155 + */ 2156 + #define RBIOS8(i) (adev->bios[i]) 2157 + #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 2158 + #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 2159 + 2160 + /* 2161 + * RING helpers. 2162 + */ 2163 + static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) 2164 + { 2165 + if (ring->count_dw <= 0) 2166 + DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); 2167 + ring->ring[ring->wptr++] = v; 2168 + ring->wptr &= ring->ptr_mask; 2169 + ring->count_dw--; 2170 + ring->ring_free_dw--; 2171 + } 2172 + 2173 + /* 2174 + * ASICs macro. 2175 + */ 2176 + #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) 2177 + #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 2178 + #define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev)) 2179 + #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2180 + #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2181 + #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2182 + #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2183 + #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2184 + #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 2185 + #define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info)) 2186 + #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 2187 + #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) 2188 + #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 2189 + #define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags))) 2190 + #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 2191 + #define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib))) 2192 + #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 2193 + #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 2194 + #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) 2195 + #define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r)) 2196 + #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 2197 + #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 2198 + #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 2199 + #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) 2200 + #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 2201 + #define amdgpu_ring_emit_fence(r, addr, seq, write64bit) (r)->funcs->emit_fence((r), (addr), (seq), (write64bit)) 2202 + #define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait)) 2203 + #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) 2204 + #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) 2205 + #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) 2206 + #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) 2207 + #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r)) 2208 + #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) 2209 + #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc)) 2210 + #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev)) 2211 + #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) 2212 + #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) 2213 + #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) 2214 + #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) 2215 + #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) 2216 + #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) 2217 + #define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base)) 2218 + #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) 2219 + #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) 2220 + #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) 2221 + #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) 2222 + #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) 2223 + #define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b)) 2224 + #define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b)) 2225 + #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev)) 2226 + #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) 2227 + #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) 2228 + #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) 2229 + #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) 2230 + #define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l)) 2231 + #define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l)) 2232 + #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) 2233 + #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)) 2234 + #define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l)) 2235 + #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) 2236 + #define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g)) 2237 + #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) 2238 + #define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m)) 2239 + #define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev)) 2240 + #define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s)) 2241 + #define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s)) 2242 + 2243 + #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) 2244 + 2245 + /* Common functions */ 2246 + int amdgpu_gpu_reset(struct amdgpu_device *adev); 2247 + void amdgpu_pci_config_reset(struct amdgpu_device *adev); 2248 + bool amdgpu_card_posted(struct amdgpu_device *adev); 2249 + void amdgpu_update_display_priority(struct amdgpu_device *adev); 2250 + bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); 2251 + int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2252 + int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2253 + u32 ip_instance, u32 ring, 2254 + struct amdgpu_ring **out_ring); 2255 + void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); 2256 + bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 2257 + int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 2258 + uint32_t flags); 2259 + bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 2260 + bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 2261 + uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 2262 + struct ttm_mem_reg *mem); 2263 + void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); 2264 + void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); 2265 + void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); 2266 + void amdgpu_program_register_sequence(struct amdgpu_device *adev, 2267 + const u32 *registers, 2268 + const u32 array_size); 2269 + 2270 + bool amdgpu_device_is_px(struct drm_device *dev); 2271 + /* atpx handler */ 2272 + #if defined(CONFIG_VGA_SWITCHEROO) 2273 + void amdgpu_register_atpx_handler(void); 2274 + void amdgpu_unregister_atpx_handler(void); 2275 + #else 2276 + static inline void amdgpu_register_atpx_handler(void) {} 2277 + static inline void amdgpu_unregister_atpx_handler(void) {} 2278 + #endif 2279 + 2280 + /* 2281 + * KMS 2282 + */ 2283 + extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 2284 + extern int amdgpu_max_kms_ioctl; 2285 + 2286 + int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); 2287 + int amdgpu_driver_unload_kms(struct drm_device *dev); 2288 + void amdgpu_driver_lastclose_kms(struct drm_device *dev); 2289 + int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 2290 + void amdgpu_driver_postclose_kms(struct drm_device *dev, 2291 + struct drm_file *file_priv); 2292 + void amdgpu_driver_preclose_kms(struct drm_device *dev, 2293 + struct drm_file *file_priv); 2294 + int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); 2295 + int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon); 2296 + u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc); 2297 + int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc); 2298 + void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc); 2299 + int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 2300 + int *max_error, 2301 + struct timeval *vblank_time, 2302 + unsigned flags); 2303 + long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, 2304 + unsigned long arg); 2305 + 2306 + /* 2307 + * vm 2308 + */ 2309 + int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); 2310 + void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 2311 + struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, 2312 + struct amdgpu_vm *vm, 2313 + struct list_head *head); 2314 + struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, 2315 + struct amdgpu_vm *vm); 2316 + void amdgpu_vm_flush(struct amdgpu_ring *ring, 2317 + struct amdgpu_vm *vm, 2318 + struct amdgpu_fence *updates); 2319 + void amdgpu_vm_fence(struct amdgpu_device *adev, 2320 + struct amdgpu_vm *vm, 2321 + struct amdgpu_fence *fence); 2322 + uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); 2323 + int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 2324 + struct amdgpu_vm *vm); 2325 + int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 2326 + struct amdgpu_vm *vm); 2327 + int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, 2328 + struct amdgpu_vm *vm); 2329 + int amdgpu_vm_bo_update(struct amdgpu_device *adev, 2330 + struct amdgpu_bo_va *bo_va, 2331 + struct ttm_mem_reg *mem); 2332 + void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 2333 + struct amdgpu_bo *bo); 2334 + struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 2335 + struct amdgpu_bo *bo); 2336 + struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 2337 + struct amdgpu_vm *vm, 2338 + struct amdgpu_bo *bo); 2339 + int amdgpu_vm_bo_map(struct amdgpu_device *adev, 2340 + struct amdgpu_bo_va *bo_va, 2341 + uint64_t addr, uint64_t offset, 2342 + uint64_t size, uint32_t flags); 2343 + int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 2344 + struct amdgpu_bo_va *bo_va, 2345 + uint64_t addr); 2346 + void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 2347 + struct amdgpu_bo_va *bo_va); 2348 + 2349 + /* 2350 + * functions used by amdgpu_encoder.c 2351 + */ 2352 + struct amdgpu_afmt_acr { 2353 + u32 clock; 2354 + 2355 + int n_32khz; 2356 + int cts_32khz; 2357 + 2358 + int n_44_1khz; 2359 + int cts_44_1khz; 2360 + 2361 + int n_48khz; 2362 + int cts_48khz; 2363 + 2364 + }; 2365 + 2366 + struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 2367 + 2368 + /* amdgpu_acpi.c */ 2369 + #if defined(CONFIG_ACPI) 2370 + int amdgpu_acpi_init(struct amdgpu_device *adev); 2371 + void amdgpu_acpi_fini(struct amdgpu_device *adev); 2372 + bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 2373 + int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 2374 + u8 perf_req, bool advertise); 2375 + int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 2376 + #else 2377 + static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 2378 + static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 2379 + #endif 2380 + 2381 + struct amdgpu_bo_va_mapping * 2382 + amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 2383 + uint64_t addr, struct amdgpu_bo **bo); 2384 + 2385 + #include "amdgpu_object.h" 2386 + 2387 + #endif