Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.1-rc3 1160 lines 36 kB view raw
1/* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28#ifndef __AMDGPU_H__ 29#define __AMDGPU_H__ 30 31#include "amdgpu_ctx.h" 32 33#include <linux/atomic.h> 34#include <linux/wait.h> 35#include <linux/list.h> 36#include <linux/kref.h> 37#include <linux/rbtree.h> 38#include <linux/hashtable.h> 39#include <linux/dma-fence.h> 40 41#include <drm/ttm/ttm_bo_api.h> 42#include <drm/ttm/ttm_bo_driver.h> 43#include <drm/ttm/ttm_placement.h> 44#include <drm/ttm/ttm_module.h> 45#include <drm/ttm/ttm_execbuf_util.h> 46 47#include <drm/drmP.h> 48#include <drm/drm_gem.h> 49#include <drm/amdgpu_drm.h> 50#include <drm/gpu_scheduler.h> 51 52#include <kgd_kfd_interface.h> 53#include "dm_pp_interface.h" 54#include "kgd_pp_interface.h" 55 56#include "amd_shared.h" 57#include "amdgpu_mode.h" 58#include "amdgpu_ih.h" 59#include "amdgpu_irq.h" 60#include "amdgpu_ucode.h" 61#include "amdgpu_ttm.h" 62#include "amdgpu_psp.h" 63#include "amdgpu_gds.h" 64#include "amdgpu_sync.h" 65#include "amdgpu_ring.h" 66#include "amdgpu_vm.h" 67#include "amdgpu_dpm.h" 68#include "amdgpu_acp.h" 69#include "amdgpu_uvd.h" 70#include "amdgpu_vce.h" 71#include "amdgpu_vcn.h" 72#include "amdgpu_mn.h" 73#include "amdgpu_gmc.h" 74#include "amdgpu_gfx.h" 75#include "amdgpu_sdma.h" 76#include "amdgpu_dm.h" 77#include "amdgpu_virt.h" 78#include "amdgpu_csa.h" 79#include "amdgpu_gart.h" 80#include "amdgpu_debugfs.h" 81#include "amdgpu_job.h" 82#include "amdgpu_bo_list.h" 83#include "amdgpu_gem.h" 84#include "amdgpu_doorbell.h" 85#include "amdgpu_amdkfd.h" 86 87#define MAX_GPU_INSTANCE 16 88 89struct amdgpu_gpu_instance 90{ 91 struct amdgpu_device *adev; 92 int mgpu_fan_enabled; 93}; 94 95struct amdgpu_mgpu_info 96{ 97 struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; 98 struct mutex mutex; 99 uint32_t num_gpu; 100 uint32_t num_dgpu; 101 uint32_t num_apu; 102}; 103 104/* 105 * Modules parameters. 106 */ 107extern int amdgpu_modeset; 108extern int amdgpu_vram_limit; 109extern int amdgpu_vis_vram_limit; 110extern int amdgpu_gart_size; 111extern int amdgpu_gtt_size; 112extern int amdgpu_moverate; 113extern int amdgpu_benchmarking; 114extern int amdgpu_testing; 115extern int amdgpu_audio; 116extern int amdgpu_disp_priority; 117extern int amdgpu_hw_i2c; 118extern int amdgpu_pcie_gen2; 119extern int amdgpu_msi; 120extern int amdgpu_lockup_timeout; 121extern int amdgpu_dpm; 122extern int amdgpu_fw_load_type; 123extern int amdgpu_aspm; 124extern int amdgpu_runtime_pm; 125extern uint amdgpu_ip_block_mask; 126extern int amdgpu_bapm; 127extern int amdgpu_deep_color; 128extern int amdgpu_vm_size; 129extern int amdgpu_vm_block_size; 130extern int amdgpu_vm_fragment_size; 131extern int amdgpu_vm_fault_stop; 132extern int amdgpu_vm_debug; 133extern int amdgpu_vm_update_mode; 134extern int amdgpu_dc; 135extern int amdgpu_sched_jobs; 136extern int amdgpu_sched_hw_submission; 137extern uint amdgpu_pcie_gen_cap; 138extern uint amdgpu_pcie_lane_cap; 139extern uint amdgpu_cg_mask; 140extern uint amdgpu_pg_mask; 141extern uint amdgpu_sdma_phase_quantum; 142extern char *amdgpu_disable_cu; 143extern char *amdgpu_virtual_display; 144extern uint amdgpu_pp_feature_mask; 145extern int amdgpu_vram_page_split; 146extern int amdgpu_ngg; 147extern int amdgpu_prim_buf_per_se; 148extern int amdgpu_pos_buf_per_se; 149extern int amdgpu_cntl_sb_buf_per_se; 150extern int amdgpu_param_buf_per_se; 151extern int amdgpu_job_hang_limit; 152extern int amdgpu_lbpw; 153extern int amdgpu_compute_multipipe; 154extern int amdgpu_gpu_recovery; 155extern int amdgpu_emu_mode; 156extern uint amdgpu_smu_memory_pool_size; 157extern uint amdgpu_dc_feature_mask; 158extern struct amdgpu_mgpu_info mgpu_info; 159 160#ifdef CONFIG_DRM_AMDGPU_SI 161extern int amdgpu_si_support; 162#endif 163#ifdef CONFIG_DRM_AMDGPU_CIK 164extern int amdgpu_cik_support; 165#endif 166 167#define AMDGPU_VM_MAX_NUM_CTX 4096 168#define AMDGPU_SG_THRESHOLD (256*1024*1024) 169#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ 170#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 171#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 172#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 173/* AMDGPU_IB_POOL_SIZE must be a power of 2 */ 174#define AMDGPU_IB_POOL_SIZE 16 175#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 176#define AMDGPUFB_CONN_LIMIT 4 177#define AMDGPU_BIOS_NUM_SCRATCH 16 178 179/* hard reset data */ 180#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 181 182/* reset flags */ 183#define AMDGPU_RESET_GFX (1 << 0) 184#define AMDGPU_RESET_COMPUTE (1 << 1) 185#define AMDGPU_RESET_DMA (1 << 2) 186#define AMDGPU_RESET_CP (1 << 3) 187#define AMDGPU_RESET_GRBM (1 << 4) 188#define AMDGPU_RESET_DMA1 (1 << 5) 189#define AMDGPU_RESET_RLC (1 << 6) 190#define AMDGPU_RESET_SEM (1 << 7) 191#define AMDGPU_RESET_IH (1 << 8) 192#define AMDGPU_RESET_VMC (1 << 9) 193#define AMDGPU_RESET_MC (1 << 10) 194#define AMDGPU_RESET_DISPLAY (1 << 11) 195#define AMDGPU_RESET_UVD (1 << 12) 196#define AMDGPU_RESET_VCE (1 << 13) 197#define AMDGPU_RESET_VCE1 (1 << 14) 198 199/* max cursor sizes (in pixels) */ 200#define CIK_CURSOR_WIDTH 128 201#define CIK_CURSOR_HEIGHT 128 202 203struct amdgpu_device; 204struct amdgpu_ib; 205struct amdgpu_cs_parser; 206struct amdgpu_job; 207struct amdgpu_irq_src; 208struct amdgpu_fpriv; 209struct amdgpu_bo_va_mapping; 210struct amdgpu_atif; 211 212enum amdgpu_cp_irq { 213 AMDGPU_CP_IRQ_GFX_EOP = 0, 214 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 215 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 216 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 217 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 218 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 219 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 220 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 221 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 222 223 AMDGPU_CP_IRQ_LAST 224}; 225 226enum amdgpu_thermal_irq { 227 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 228 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 229 230 AMDGPU_THERMAL_IRQ_LAST 231}; 232 233enum amdgpu_kiq_irq { 234 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, 235 AMDGPU_CP_KIQ_IRQ_LAST 236}; 237 238#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ 239#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ 240#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ 241 242int amdgpu_device_ip_set_clockgating_state(void *dev, 243 enum amd_ip_block_type block_type, 244 enum amd_clockgating_state state); 245int amdgpu_device_ip_set_powergating_state(void *dev, 246 enum amd_ip_block_type block_type, 247 enum amd_powergating_state state); 248void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 249 u32 *flags); 250int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 251 enum amd_ip_block_type block_type); 252bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 253 enum amd_ip_block_type block_type); 254 255#define AMDGPU_MAX_IP_NUM 16 256 257struct amdgpu_ip_block_status { 258 bool valid; 259 bool sw; 260 bool hw; 261 bool late_initialized; 262 bool hang; 263}; 264 265struct amdgpu_ip_block_version { 266 const enum amd_ip_block_type type; 267 const u32 major; 268 const u32 minor; 269 const u32 rev; 270 const struct amd_ip_funcs *funcs; 271}; 272 273struct amdgpu_ip_block { 274 struct amdgpu_ip_block_status status; 275 const struct amdgpu_ip_block_version *version; 276}; 277 278int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 279 enum amd_ip_block_type type, 280 u32 major, u32 minor); 281 282struct amdgpu_ip_block * 283amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 284 enum amd_ip_block_type type); 285 286int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 287 const struct amdgpu_ip_block_version *ip_block_version); 288 289/* 290 * BIOS. 291 */ 292bool amdgpu_get_bios(struct amdgpu_device *adev); 293bool amdgpu_read_bios(struct amdgpu_device *adev); 294 295/* 296 * Clocks 297 */ 298 299#define AMDGPU_MAX_PPLL 3 300 301struct amdgpu_clock { 302 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 303 struct amdgpu_pll spll; 304 struct amdgpu_pll mpll; 305 /* 10 Khz units */ 306 uint32_t default_mclk; 307 uint32_t default_sclk; 308 uint32_t default_dispclk; 309 uint32_t current_dispclk; 310 uint32_t dp_extclk; 311 uint32_t max_pixel_clock; 312}; 313 314/* sub-allocation manager, it has to be protected by another lock. 315 * By conception this is an helper for other part of the driver 316 * like the indirect buffer or semaphore, which both have their 317 * locking. 318 * 319 * Principe is simple, we keep a list of sub allocation in offset 320 * order (first entry has offset == 0, last entry has the highest 321 * offset). 322 * 323 * When allocating new object we first check if there is room at 324 * the end total_size - (last_object_offset + last_object_size) >= 325 * alloc_size. If so we allocate new object there. 326 * 327 * When there is not enough room at the end, we start waiting for 328 * each sub object until we reach object_offset+object_size >= 329 * alloc_size, this object then become the sub object we return. 330 * 331 * Alignment can't be bigger than page size. 332 * 333 * Hole are not considered for allocation to keep things simple. 334 * Assumption is that there won't be hole (all object on same 335 * alignment). 336 */ 337 338#define AMDGPU_SA_NUM_FENCE_LISTS 32 339 340struct amdgpu_sa_manager { 341 wait_queue_head_t wq; 342 struct amdgpu_bo *bo; 343 struct list_head *hole; 344 struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; 345 struct list_head olist; 346 unsigned size; 347 uint64_t gpu_addr; 348 void *cpu_ptr; 349 uint32_t domain; 350 uint32_t align; 351}; 352 353/* sub-allocation buffer */ 354struct amdgpu_sa_bo { 355 struct list_head olist; 356 struct list_head flist; 357 struct amdgpu_sa_manager *manager; 358 unsigned soffset; 359 unsigned eoffset; 360 struct dma_fence *fence; 361}; 362 363int amdgpu_fence_slab_init(void); 364void amdgpu_fence_slab_fini(void); 365 366/* 367 * IRQS. 368 */ 369 370struct amdgpu_flip_work { 371 struct delayed_work flip_work; 372 struct work_struct unpin_work; 373 struct amdgpu_device *adev; 374 int crtc_id; 375 u32 target_vblank; 376 uint64_t base; 377 struct drm_pending_vblank_event *event; 378 struct amdgpu_bo *old_abo; 379 struct dma_fence *excl; 380 unsigned shared_count; 381 struct dma_fence **shared; 382 struct dma_fence_cb cb; 383 bool async; 384}; 385 386 387/* 388 * CP & rings. 389 */ 390 391struct amdgpu_ib { 392 struct amdgpu_sa_bo *sa_bo; 393 uint32_t length_dw; 394 uint64_t gpu_addr; 395 uint32_t *ptr; 396 uint32_t flags; 397}; 398 399extern const struct drm_sched_backend_ops amdgpu_sched_ops; 400 401/* 402 * file private structure 403 */ 404 405struct amdgpu_fpriv { 406 struct amdgpu_vm vm; 407 struct amdgpu_bo_va *prt_va; 408 struct amdgpu_bo_va *csa_va; 409 struct mutex bo_list_lock; 410 struct idr bo_list_handles; 411 struct amdgpu_ctx_mgr ctx_mgr; 412}; 413 414int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); 415 416int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 417 unsigned size, struct amdgpu_ib *ib); 418void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 419 struct dma_fence *f); 420int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 421 struct amdgpu_ib *ibs, struct amdgpu_job *job, 422 struct dma_fence **f); 423int amdgpu_ib_pool_init(struct amdgpu_device *adev); 424void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 425int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 426 427/* 428 * CS. 429 */ 430struct amdgpu_cs_chunk { 431 uint32_t chunk_id; 432 uint32_t length_dw; 433 void *kdata; 434}; 435 436struct amdgpu_cs_parser { 437 struct amdgpu_device *adev; 438 struct drm_file *filp; 439 struct amdgpu_ctx *ctx; 440 441 /* chunks */ 442 unsigned nchunks; 443 struct amdgpu_cs_chunk *chunks; 444 445 /* scheduler job object */ 446 struct amdgpu_job *job; 447 struct drm_sched_entity *entity; 448 449 /* buffer objects */ 450 struct ww_acquire_ctx ticket; 451 struct amdgpu_bo_list *bo_list; 452 struct amdgpu_mn *mn; 453 struct amdgpu_bo_list_entry vm_pd; 454 struct list_head validated; 455 struct dma_fence *fence; 456 uint64_t bytes_moved_threshold; 457 uint64_t bytes_moved_vis_threshold; 458 uint64_t bytes_moved; 459 uint64_t bytes_moved_vis; 460 struct amdgpu_bo_list_entry *evictable; 461 462 /* user fence */ 463 struct amdgpu_bo_list_entry uf_entry; 464 465 unsigned num_post_dep_syncobjs; 466 struct drm_syncobj **post_dep_syncobjs; 467}; 468 469static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, 470 uint32_t ib_idx, int idx) 471{ 472 return p->job->ibs[ib_idx].ptr[idx]; 473} 474 475static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, 476 uint32_t ib_idx, int idx, 477 uint32_t value) 478{ 479 p->job->ibs[ib_idx].ptr[idx] = value; 480} 481 482/* 483 * Writeback 484 */ 485#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */ 486 487struct amdgpu_wb { 488 struct amdgpu_bo *wb_obj; 489 volatile uint32_t *wb; 490 uint64_t gpu_addr; 491 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ 492 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 493}; 494 495int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); 496void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); 497 498/* 499 * Benchmarking 500 */ 501void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 502 503 504/* 505 * Testing 506 */ 507void amdgpu_test_moves(struct amdgpu_device *adev); 508 509/* 510 * ASIC specific register table accessible by UMD 511 */ 512struct amdgpu_allowed_register_entry { 513 uint32_t reg_offset; 514 bool grbm_indexed; 515}; 516 517/* 518 * ASIC specific functions. 519 */ 520struct amdgpu_asic_funcs { 521 bool (*read_disabled_bios)(struct amdgpu_device *adev); 522 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 523 u8 *bios, u32 length_bytes); 524 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 525 u32 sh_num, u32 reg_offset, u32 *value); 526 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 527 int (*reset)(struct amdgpu_device *adev); 528 /* get the reference clock */ 529 u32 (*get_xclk)(struct amdgpu_device *adev); 530 /* MM block clocks */ 531 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 532 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 533 /* static power management */ 534 int (*get_pcie_lanes)(struct amdgpu_device *adev); 535 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 536 /* get config memsize register */ 537 u32 (*get_config_memsize)(struct amdgpu_device *adev); 538 /* flush hdp write queue */ 539 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 540 /* invalidate hdp read cache */ 541 void (*invalidate_hdp)(struct amdgpu_device *adev, 542 struct amdgpu_ring *ring); 543 /* check if the asic needs a full reset of if soft reset will work */ 544 bool (*need_full_reset)(struct amdgpu_device *adev); 545 /* initialize doorbell layout for specific asic*/ 546 void (*init_doorbell_index)(struct amdgpu_device *adev); 547 /* PCIe bandwidth usage */ 548 void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, 549 uint64_t *count1); 550 /* do we need to reset the asic at init time (e.g., kexec) */ 551 bool (*need_reset_on_init)(struct amdgpu_device *adev); 552}; 553 554/* 555 * IOCTL. 556 */ 557int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 558 struct drm_file *filp); 559 560int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 561int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 562 struct drm_file *filp); 563int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 564int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 565 struct drm_file *filp); 566 567/* VRAM scratch page for HDP bug, default vram page */ 568struct amdgpu_vram_scratch { 569 struct amdgpu_bo *robj; 570 volatile uint32_t *ptr; 571 u64 gpu_addr; 572}; 573 574/* 575 * ACPI 576 */ 577struct amdgpu_atcs_functions { 578 bool get_ext_state; 579 bool pcie_perf_req; 580 bool pcie_dev_rdy; 581 bool pcie_bus_width; 582}; 583 584struct amdgpu_atcs { 585 struct amdgpu_atcs_functions functions; 586}; 587 588/* 589 * Firmware VRAM reservation 590 */ 591struct amdgpu_fw_vram_usage { 592 u64 start_offset; 593 u64 size; 594 struct amdgpu_bo *reserved_bo; 595 void *va; 596}; 597 598/* 599 * CGS 600 */ 601struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 602void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 603 604/* 605 * Core structure, functions and helpers. 606 */ 607typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 608typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 609 610typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 611typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 612 613 614/* 615 * amdgpu nbio functions 616 * 617 */ 618struct nbio_hdp_flush_reg { 619 u32 ref_and_mask_cp0; 620 u32 ref_and_mask_cp1; 621 u32 ref_and_mask_cp2; 622 u32 ref_and_mask_cp3; 623 u32 ref_and_mask_cp4; 624 u32 ref_and_mask_cp5; 625 u32 ref_and_mask_cp6; 626 u32 ref_and_mask_cp7; 627 u32 ref_and_mask_cp8; 628 u32 ref_and_mask_cp9; 629 u32 ref_and_mask_sdma0; 630 u32 ref_and_mask_sdma1; 631}; 632 633struct amdgpu_nbio_funcs { 634 const struct nbio_hdp_flush_reg *hdp_flush_reg; 635 u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); 636 u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); 637 u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); 638 u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); 639 u32 (*get_rev_id)(struct amdgpu_device *adev); 640 void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); 641 void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 642 u32 (*get_memsize)(struct amdgpu_device *adev); 643 void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, 644 bool use_doorbell, int doorbell_index, int doorbell_size); 645 void (*enable_doorbell_aperture)(struct amdgpu_device *adev, 646 bool enable); 647 void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, 648 bool enable); 649 void (*ih_doorbell_range)(struct amdgpu_device *adev, 650 bool use_doorbell, int doorbell_index); 651 void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, 652 bool enable); 653 void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, 654 bool enable); 655 void (*get_clockgating_state)(struct amdgpu_device *adev, 656 u32 *flags); 657 void (*ih_control)(struct amdgpu_device *adev); 658 void (*init_registers)(struct amdgpu_device *adev); 659 void (*detect_hw_virt)(struct amdgpu_device *adev); 660}; 661 662struct amdgpu_df_funcs { 663 void (*init)(struct amdgpu_device *adev); 664 void (*enable_broadcast_mode)(struct amdgpu_device *adev, 665 bool enable); 666 u32 (*get_fb_channel_number)(struct amdgpu_device *adev); 667 u32 (*get_hbm_channel_number)(struct amdgpu_device *adev); 668 void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, 669 bool enable); 670 void (*get_clockgating_state)(struct amdgpu_device *adev, 671 u32 *flags); 672 void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, 673 bool enable); 674}; 675/* Define the HW IP blocks will be used in driver , add more if necessary */ 676enum amd_hw_ip_block_type { 677 GC_HWIP = 1, 678 HDP_HWIP, 679 SDMA0_HWIP, 680 SDMA1_HWIP, 681 MMHUB_HWIP, 682 ATHUB_HWIP, 683 NBIO_HWIP, 684 MP0_HWIP, 685 MP1_HWIP, 686 UVD_HWIP, 687 VCN_HWIP = UVD_HWIP, 688 VCE_HWIP, 689 DF_HWIP, 690 DCE_HWIP, 691 OSSSYS_HWIP, 692 SMUIO_HWIP, 693 PWR_HWIP, 694 NBIF_HWIP, 695 THM_HWIP, 696 CLK_HWIP, 697 MAX_HWIP 698}; 699 700#define HWIP_MAX_INSTANCE 6 701 702struct amd_powerplay { 703 void *pp_handle; 704 const struct amd_pm_funcs *pp_funcs; 705 uint32_t pp_feature; 706}; 707 708#define AMDGPU_RESET_MAGIC_NUM 64 709struct amdgpu_device { 710 struct device *dev; 711 struct drm_device *ddev; 712 struct pci_dev *pdev; 713 714#ifdef CONFIG_DRM_AMD_ACP 715 struct amdgpu_acp acp; 716#endif 717 718 /* ASIC */ 719 enum amd_asic_type asic_type; 720 uint32_t family; 721 uint32_t rev_id; 722 uint32_t external_rev_id; 723 unsigned long flags; 724 int usec_timeout; 725 const struct amdgpu_asic_funcs *asic_funcs; 726 bool shutdown; 727 bool need_dma32; 728 bool need_swiotlb; 729 bool accel_working; 730 struct notifier_block acpi_nb; 731 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 732 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 733 unsigned debugfs_count; 734#if defined(CONFIG_DEBUG_FS) 735 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 736#endif 737 struct amdgpu_atif *atif; 738 struct amdgpu_atcs atcs; 739 struct mutex srbm_mutex; 740 /* GRBM index mutex. Protects concurrent access to GRBM index */ 741 struct mutex grbm_idx_mutex; 742 struct dev_pm_domain vga_pm_domain; 743 bool have_disp_power_ref; 744 745 /* BIOS */ 746 bool is_atom_fw; 747 uint8_t *bios; 748 uint32_t bios_size; 749 struct amdgpu_bo *stolen_vga_memory; 750 uint32_t bios_scratch_reg_offset; 751 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 752 753 /* Register/doorbell mmio */ 754 resource_size_t rmmio_base; 755 resource_size_t rmmio_size; 756 void __iomem *rmmio; 757 /* protects concurrent MM_INDEX/DATA based register access */ 758 spinlock_t mmio_idx_lock; 759 /* protects concurrent SMC based register access */ 760 spinlock_t smc_idx_lock; 761 amdgpu_rreg_t smc_rreg; 762 amdgpu_wreg_t smc_wreg; 763 /* protects concurrent PCIE register access */ 764 spinlock_t pcie_idx_lock; 765 amdgpu_rreg_t pcie_rreg; 766 amdgpu_wreg_t pcie_wreg; 767 amdgpu_rreg_t pciep_rreg; 768 amdgpu_wreg_t pciep_wreg; 769 /* protects concurrent UVD register access */ 770 spinlock_t uvd_ctx_idx_lock; 771 amdgpu_rreg_t uvd_ctx_rreg; 772 amdgpu_wreg_t uvd_ctx_wreg; 773 /* protects concurrent DIDT register access */ 774 spinlock_t didt_idx_lock; 775 amdgpu_rreg_t didt_rreg; 776 amdgpu_wreg_t didt_wreg; 777 /* protects concurrent gc_cac register access */ 778 spinlock_t gc_cac_idx_lock; 779 amdgpu_rreg_t gc_cac_rreg; 780 amdgpu_wreg_t gc_cac_wreg; 781 /* protects concurrent se_cac register access */ 782 spinlock_t se_cac_idx_lock; 783 amdgpu_rreg_t se_cac_rreg; 784 amdgpu_wreg_t se_cac_wreg; 785 /* protects concurrent ENDPOINT (audio) register access */ 786 spinlock_t audio_endpt_idx_lock; 787 amdgpu_block_rreg_t audio_endpt_rreg; 788 amdgpu_block_wreg_t audio_endpt_wreg; 789 void __iomem *rio_mem; 790 resource_size_t rio_mem_size; 791 struct amdgpu_doorbell doorbell; 792 793 /* clock/pll info */ 794 struct amdgpu_clock clock; 795 796 /* MC */ 797 struct amdgpu_gmc gmc; 798 struct amdgpu_gart gart; 799 dma_addr_t dummy_page_addr; 800 struct amdgpu_vm_manager vm_manager; 801 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; 802 803 /* memory management */ 804 struct amdgpu_mman mman; 805 struct amdgpu_vram_scratch vram_scratch; 806 struct amdgpu_wb wb; 807 atomic64_t num_bytes_moved; 808 atomic64_t num_evictions; 809 atomic64_t num_vram_cpu_page_faults; 810 atomic_t gpu_reset_counter; 811 atomic_t vram_lost_counter; 812 813 /* data for buffer migration throttling */ 814 struct { 815 spinlock_t lock; 816 s64 last_update_us; 817 s64 accum_us; /* accumulated microseconds */ 818 s64 accum_us_vis; /* for visible VRAM */ 819 u32 log2_max_MBps; 820 } mm_stats; 821 822 /* display */ 823 bool enable_virtual_display; 824 struct amdgpu_mode_info mode_info; 825 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ 826 struct work_struct hotplug_work; 827 struct amdgpu_irq_src crtc_irq; 828 struct amdgpu_irq_src pageflip_irq; 829 struct amdgpu_irq_src hpd_irq; 830 831 /* rings */ 832 u64 fence_context; 833 unsigned num_rings; 834 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 835 bool ib_pool_ready; 836 struct amdgpu_sa_manager ring_tmp_bo; 837 838 /* interrupts */ 839 struct amdgpu_irq irq; 840 841 /* powerplay */ 842 struct amd_powerplay powerplay; 843 bool pp_force_state_enabled; 844 845 /* dpm */ 846 struct amdgpu_pm pm; 847 u32 cg_flags; 848 u32 pg_flags; 849 850 /* gfx */ 851 struct amdgpu_gfx gfx; 852 853 /* sdma */ 854 struct amdgpu_sdma sdma; 855 856 /* uvd */ 857 struct amdgpu_uvd uvd; 858 859 /* vce */ 860 struct amdgpu_vce vce; 861 862 /* vcn */ 863 struct amdgpu_vcn vcn; 864 865 /* firmwares */ 866 struct amdgpu_firmware firmware; 867 868 /* PSP */ 869 struct psp_context psp; 870 871 /* GDS */ 872 struct amdgpu_gds gds; 873 874 /* KFD */ 875 struct amdgpu_kfd_dev kfd; 876 877 /* display related functionality */ 878 struct amdgpu_display_manager dm; 879 880 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; 881 int num_ip_blocks; 882 struct mutex mn_lock; 883 DECLARE_HASHTABLE(mn_hash, 7); 884 885 /* tracking pinned memory */ 886 atomic64_t vram_pin_size; 887 atomic64_t visible_pin_size; 888 atomic64_t gart_pin_size; 889 890 /* soc15 register offset based on ip, instance and segment */ 891 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; 892 893 const struct amdgpu_nbio_funcs *nbio_funcs; 894 const struct amdgpu_df_funcs *df_funcs; 895 896 /* delayed work_func for deferring clockgating during resume */ 897 struct delayed_work late_init_work; 898 899 struct amdgpu_virt virt; 900 /* firmware VRAM reservation */ 901 struct amdgpu_fw_vram_usage fw_vram_usage; 902 903 /* link all shadow bo */ 904 struct list_head shadow_list; 905 struct mutex shadow_list_lock; 906 /* keep an lru list of rings by HW IP */ 907 struct list_head ring_lru_list; 908 spinlock_t ring_lru_list_lock; 909 910 /* record hw reset is performed */ 911 bool has_hw_reset; 912 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; 913 914 /* s3/s4 mask */ 915 bool in_suspend; 916 917 /* record last mm index being written through WREG32*/ 918 unsigned long last_mm_index; 919 bool in_gpu_reset; 920 struct mutex lock_reset; 921 struct amdgpu_doorbell_index doorbell_index; 922 923 int asic_reset_res; 924 struct work_struct xgmi_reset_work; 925}; 926 927static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) 928{ 929 return container_of(bdev, struct amdgpu_device, mman.bdev); 930} 931 932int amdgpu_device_init(struct amdgpu_device *adev, 933 struct drm_device *ddev, 934 struct pci_dev *pdev, 935 uint32_t flags); 936void amdgpu_device_fini(struct amdgpu_device *adev); 937int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 938 939uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 940 uint32_t acc_flags); 941void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 942 uint32_t acc_flags); 943void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); 944uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); 945 946u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); 947void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); 948 949bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); 950bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); 951 952int emu_soc_asic_init(struct amdgpu_device *adev); 953 954/* 955 * Registers read & write functions. 956 */ 957 958#define AMDGPU_REGS_IDX (1<<0) 959#define AMDGPU_REGS_NO_KIQ (1<<1) 960 961#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) 962#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) 963 964#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) 965#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) 966 967#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0) 968#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX) 969#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0)) 970#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0) 971#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX) 972#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 973#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 974#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 975#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 976#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) 977#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) 978#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 979#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 980#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 981#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 982#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 983#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 984#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) 985#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) 986#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) 987#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) 988#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 989#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 990#define WREG32_P(reg, val, mask) \ 991 do { \ 992 uint32_t tmp_ = RREG32(reg); \ 993 tmp_ &= (mask); \ 994 tmp_ |= ((val) & ~(mask)); \ 995 WREG32(reg, tmp_); \ 996 } while (0) 997#define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 998#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 999#define WREG32_PLL_P(reg, val, mask) \ 1000 do { \ 1001 uint32_t tmp_ = RREG32_PLL(reg); \ 1002 tmp_ &= (mask); \ 1003 tmp_ |= ((val) & ~(mask)); \ 1004 WREG32_PLL(reg, tmp_); \ 1005 } while (0) 1006#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) 1007#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) 1008#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) 1009 1010#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 1011#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 1012 1013#define REG_SET_FIELD(orig_val, reg, field, field_val) \ 1014 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 1015 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 1016 1017#define REG_GET_FIELD(value, reg, field) \ 1018 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 1019 1020#define WREG32_FIELD(reg, field, val) \ 1021 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1022 1023#define WREG32_FIELD_OFFSET(reg, offset, field, val) \ 1024 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1025 1026/* 1027 * BIOS helpers. 1028 */ 1029#define RBIOS8(i) (adev->bios[i]) 1030#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 1031#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 1032 1033/* 1034 * ASICs macro. 1035 */ 1036#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) 1037#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 1038#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 1039#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 1040#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 1041#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 1042#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 1043#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 1044#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 1045#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 1046#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 1047#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 1048#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) 1049#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) 1050#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) 1051#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) 1052#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) 1053#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) 1054 1055/* Common functions */ 1056bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); 1057int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 1058 struct amdgpu_job* job); 1059void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); 1060bool amdgpu_device_need_post(struct amdgpu_device *adev); 1061 1062void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 1063 u64 num_vis_bytes); 1064int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); 1065void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 1066 const u32 *registers, 1067 const u32 array_size); 1068 1069bool amdgpu_device_is_px(struct drm_device *dev); 1070/* atpx handler */ 1071#if defined(CONFIG_VGA_SWITCHEROO) 1072void amdgpu_register_atpx_handler(void); 1073void amdgpu_unregister_atpx_handler(void); 1074bool amdgpu_has_atpx_dgpu_power_cntl(void); 1075bool amdgpu_is_atpx_hybrid(void); 1076bool amdgpu_atpx_dgpu_req_power_for_displays(void); 1077bool amdgpu_has_atpx(void); 1078#else 1079static inline void amdgpu_register_atpx_handler(void) {} 1080static inline void amdgpu_unregister_atpx_handler(void) {} 1081static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 1082static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 1083static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } 1084static inline bool amdgpu_has_atpx(void) { return false; } 1085#endif 1086 1087#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) 1088void *amdgpu_atpx_get_dhandle(void); 1089#else 1090static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } 1091#endif 1092 1093/* 1094 * KMS 1095 */ 1096extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 1097extern const int amdgpu_max_kms_ioctl; 1098 1099int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); 1100void amdgpu_driver_unload_kms(struct drm_device *dev); 1101void amdgpu_driver_lastclose_kms(struct drm_device *dev); 1102int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 1103void amdgpu_driver_postclose_kms(struct drm_device *dev, 1104 struct drm_file *file_priv); 1105int amdgpu_device_ip_suspend(struct amdgpu_device *adev); 1106int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); 1107int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); 1108u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); 1109int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); 1110void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); 1111long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, 1112 unsigned long arg); 1113 1114/* 1115 * functions used by amdgpu_encoder.c 1116 */ 1117struct amdgpu_afmt_acr { 1118 u32 clock; 1119 1120 int n_32khz; 1121 int cts_32khz; 1122 1123 int n_44_1khz; 1124 int cts_44_1khz; 1125 1126 int n_48khz; 1127 int cts_48khz; 1128 1129}; 1130 1131struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 1132 1133/* amdgpu_acpi.c */ 1134#if defined(CONFIG_ACPI) 1135int amdgpu_acpi_init(struct amdgpu_device *adev); 1136void amdgpu_acpi_fini(struct amdgpu_device *adev); 1137bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 1138int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 1139 u8 perf_req, bool advertise); 1140int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 1141 1142void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, 1143 struct amdgpu_dm_backlight_caps *caps); 1144#else 1145static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 1146static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 1147#endif 1148 1149int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1150 uint64_t addr, struct amdgpu_bo **bo, 1151 struct amdgpu_bo_va_mapping **mapping); 1152 1153#if defined(CONFIG_DRM_AMD_DC) 1154int amdgpu_dm_display_resume(struct amdgpu_device *adev ); 1155#else 1156static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; } 1157#endif 1158 1159#include "amdgpu_object.h" 1160#endif