Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_H__
29#define __AMDGPU_H__
30
31#include <linux/atomic.h>
32#include <linux/wait.h>
33#include <linux/list.h>
34#include <linux/kref.h>
35#include <linux/rbtree.h>
36#include <linux/hashtable.h>
37#include <linux/dma-fence.h>
38
39#include <drm/ttm/ttm_bo_api.h>
40#include <drm/ttm/ttm_bo_driver.h>
41#include <drm/ttm/ttm_placement.h>
42#include <drm/ttm/ttm_module.h>
43#include <drm/ttm/ttm_execbuf_util.h>
44
45#include <drm/drmP.h>
46#include <drm/drm_gem.h>
47#include <drm/amdgpu_drm.h>
48#include <drm/gpu_scheduler.h>
49
50#include <kgd_kfd_interface.h>
51#include "dm_pp_interface.h"
52#include "kgd_pp_interface.h"
53
54#include "amd_shared.h"
55#include "amdgpu_mode.h"
56#include "amdgpu_ih.h"
57#include "amdgpu_irq.h"
58#include "amdgpu_ucode.h"
59#include "amdgpu_ttm.h"
60#include "amdgpu_psp.h"
61#include "amdgpu_gds.h"
62#include "amdgpu_sync.h"
63#include "amdgpu_ring.h"
64#include "amdgpu_vm.h"
65#include "amdgpu_dpm.h"
66#include "amdgpu_acp.h"
67#include "amdgpu_uvd.h"
68#include "amdgpu_vce.h"
69#include "amdgpu_vcn.h"
70#include "amdgpu_mn.h"
71#include "amdgpu_dm.h"
72#include "amdgpu_virt.h"
73#include "amdgpu_gart.h"
74#include "amdgpu_debugfs.h"
75
76/*
77 * Modules parameters.
78 */
79extern int amdgpu_modeset;
80extern int amdgpu_vram_limit;
81extern int amdgpu_vis_vram_limit;
82extern int amdgpu_gart_size;
83extern int amdgpu_gtt_size;
84extern int amdgpu_moverate;
85extern int amdgpu_benchmarking;
86extern int amdgpu_testing;
87extern int amdgpu_audio;
88extern int amdgpu_disp_priority;
89extern int amdgpu_hw_i2c;
90extern int amdgpu_pcie_gen2;
91extern int amdgpu_msi;
92extern int amdgpu_lockup_timeout;
93extern int amdgpu_dpm;
94extern int amdgpu_fw_load_type;
95extern int amdgpu_aspm;
96extern int amdgpu_runtime_pm;
97extern uint amdgpu_ip_block_mask;
98extern int amdgpu_bapm;
99extern int amdgpu_deep_color;
100extern int amdgpu_vm_size;
101extern int amdgpu_vm_block_size;
102extern int amdgpu_vm_fragment_size;
103extern int amdgpu_vm_fault_stop;
104extern int amdgpu_vm_debug;
105extern int amdgpu_vm_update_mode;
106extern int amdgpu_dc;
107extern int amdgpu_dc_log;
108extern int amdgpu_sched_jobs;
109extern int amdgpu_sched_hw_submission;
110extern int amdgpu_no_evict;
111extern int amdgpu_direct_gma_size;
112extern uint amdgpu_pcie_gen_cap;
113extern uint amdgpu_pcie_lane_cap;
114extern uint amdgpu_cg_mask;
115extern uint amdgpu_pg_mask;
116extern uint amdgpu_sdma_phase_quantum;
117extern char *amdgpu_disable_cu;
118extern char *amdgpu_virtual_display;
119extern uint amdgpu_pp_feature_mask;
120extern int amdgpu_vram_page_split;
121extern int amdgpu_ngg;
122extern int amdgpu_prim_buf_per_se;
123extern int amdgpu_pos_buf_per_se;
124extern int amdgpu_cntl_sb_buf_per_se;
125extern int amdgpu_param_buf_per_se;
126extern int amdgpu_job_hang_limit;
127extern int amdgpu_lbpw;
128extern int amdgpu_compute_multipipe;
129extern int amdgpu_gpu_recovery;
130
131#ifdef CONFIG_DRM_AMDGPU_SI
132extern int amdgpu_si_support;
133#endif
134#ifdef CONFIG_DRM_AMDGPU_CIK
135extern int amdgpu_cik_support;
136#endif
137
138#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
139#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
140#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
141#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
142/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
143#define AMDGPU_IB_POOL_SIZE 16
144#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
145#define AMDGPUFB_CONN_LIMIT 4
146#define AMDGPU_BIOS_NUM_SCRATCH 16
147
148/* max number of IP instances */
149#define AMDGPU_MAX_SDMA_INSTANCES 2
150
151/* hard reset data */
152#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
153
154/* reset flags */
155#define AMDGPU_RESET_GFX (1 << 0)
156#define AMDGPU_RESET_COMPUTE (1 << 1)
157#define AMDGPU_RESET_DMA (1 << 2)
158#define AMDGPU_RESET_CP (1 << 3)
159#define AMDGPU_RESET_GRBM (1 << 4)
160#define AMDGPU_RESET_DMA1 (1 << 5)
161#define AMDGPU_RESET_RLC (1 << 6)
162#define AMDGPU_RESET_SEM (1 << 7)
163#define AMDGPU_RESET_IH (1 << 8)
164#define AMDGPU_RESET_VMC (1 << 9)
165#define AMDGPU_RESET_MC (1 << 10)
166#define AMDGPU_RESET_DISPLAY (1 << 11)
167#define AMDGPU_RESET_UVD (1 << 12)
168#define AMDGPU_RESET_VCE (1 << 13)
169#define AMDGPU_RESET_VCE1 (1 << 14)
170
171/* GFX current status */
172#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
173#define AMDGPU_GFX_SAFE_MODE 0x00000001L
174#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
175#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
176#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
177
178/* max cursor sizes (in pixels) */
179#define CIK_CURSOR_WIDTH 128
180#define CIK_CURSOR_HEIGHT 128
181
182/* GPU RESET flags */
183#define AMDGPU_RESET_INFO_VRAM_LOST (1 << 0)
184#define AMDGPU_RESET_INFO_FULLRESET (1 << 1)
185
186struct amdgpu_device;
187struct amdgpu_ib;
188struct amdgpu_cs_parser;
189struct amdgpu_job;
190struct amdgpu_irq_src;
191struct amdgpu_fpriv;
192struct amdgpu_bo_va_mapping;
193
194enum amdgpu_cp_irq {
195 AMDGPU_CP_IRQ_GFX_EOP = 0,
196 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
197 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
198 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
199 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
200 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
201 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
202 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
203 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
204
205 AMDGPU_CP_IRQ_LAST
206};
207
208enum amdgpu_sdma_irq {
209 AMDGPU_SDMA_IRQ_TRAP0 = 0,
210 AMDGPU_SDMA_IRQ_TRAP1,
211
212 AMDGPU_SDMA_IRQ_LAST
213};
214
215enum amdgpu_thermal_irq {
216 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
217 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
218
219 AMDGPU_THERMAL_IRQ_LAST
220};
221
222enum amdgpu_kiq_irq {
223 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
224 AMDGPU_CP_KIQ_IRQ_LAST
225};
226
227int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
228 enum amd_ip_block_type block_type,
229 enum amd_clockgating_state state);
230int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
231 enum amd_ip_block_type block_type,
232 enum amd_powergating_state state);
233void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
234 u32 *flags);
235int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
236 enum amd_ip_block_type block_type);
237bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
238 enum amd_ip_block_type block_type);
239
240#define AMDGPU_MAX_IP_NUM 16
241
242struct amdgpu_ip_block_status {
243 bool valid;
244 bool sw;
245 bool hw;
246 bool late_initialized;
247 bool hang;
248};
249
250struct amdgpu_ip_block_version {
251 const enum amd_ip_block_type type;
252 const u32 major;
253 const u32 minor;
254 const u32 rev;
255 const struct amd_ip_funcs *funcs;
256};
257
258struct amdgpu_ip_block {
259 struct amdgpu_ip_block_status status;
260 const struct amdgpu_ip_block_version *version;
261};
262
263int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
264 enum amd_ip_block_type type,
265 u32 major, u32 minor);
266
267struct amdgpu_ip_block *
268amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
269 enum amd_ip_block_type type);
270
271int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
272 const struct amdgpu_ip_block_version *ip_block_version);
273
274/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
275struct amdgpu_buffer_funcs {
276 /* maximum bytes in a single operation */
277 uint32_t copy_max_bytes;
278
279 /* number of dw to reserve per operation */
280 unsigned copy_num_dw;
281
282 /* used for buffer migration */
283 void (*emit_copy_buffer)(struct amdgpu_ib *ib,
284 /* src addr in bytes */
285 uint64_t src_offset,
286 /* dst addr in bytes */
287 uint64_t dst_offset,
288 /* number of byte to transfer */
289 uint32_t byte_count);
290
291 /* maximum bytes in a single operation */
292 uint32_t fill_max_bytes;
293
294 /* number of dw to reserve per operation */
295 unsigned fill_num_dw;
296
297 /* used for buffer clearing */
298 void (*emit_fill_buffer)(struct amdgpu_ib *ib,
299 /* value to write to memory */
300 uint32_t src_data,
301 /* dst addr in bytes */
302 uint64_t dst_offset,
303 /* number of byte to fill */
304 uint32_t byte_count);
305};
306
307/* provided by hw blocks that can write ptes, e.g., sdma */
308struct amdgpu_vm_pte_funcs {
309 /* number of dw to reserve per operation */
310 unsigned copy_pte_num_dw;
311
312 /* copy pte entries from GART */
313 void (*copy_pte)(struct amdgpu_ib *ib,
314 uint64_t pe, uint64_t src,
315 unsigned count);
316
317 /* write pte one entry at a time with addr mapping */
318 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
319 uint64_t value, unsigned count,
320 uint32_t incr);
321
322 /* maximum nums of PTEs/PDEs in a single operation */
323 uint32_t set_max_nums_pte_pde;
324
325 /* number of dw to reserve per operation */
326 unsigned set_pte_pde_num_dw;
327
328 /* for linear pte/pde updates without addr mapping */
329 void (*set_pte_pde)(struct amdgpu_ib *ib,
330 uint64_t pe,
331 uint64_t addr, unsigned count,
332 uint32_t incr, uint64_t flags);
333};
334
335/* provided by the gmc block */
336struct amdgpu_gart_funcs {
337 /* flush the vm tlb via mmio */
338 void (*flush_gpu_tlb)(struct amdgpu_device *adev,
339 uint32_t vmid);
340 /* write pte/pde updates using the cpu */
341 int (*set_pte_pde)(struct amdgpu_device *adev,
342 void *cpu_pt_addr, /* cpu addr of page table */
343 uint32_t gpu_page_idx, /* pte/pde to update */
344 uint64_t addr, /* addr to write into pte/pde */
345 uint64_t flags); /* access flags */
346 /* enable/disable PRT support */
347 void (*set_prt)(struct amdgpu_device *adev, bool enable);
348 /* set pte flags based per asic */
349 uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
350 uint32_t flags);
351 /* get the pde for a given mc addr */
352 void (*get_vm_pde)(struct amdgpu_device *adev, int level,
353 u64 *dst, u64 *flags);
354 uint32_t (*get_invalidate_req)(unsigned int vmid);
355};
356
357/* provided by the ih block */
358struct amdgpu_ih_funcs {
359 /* ring read/write ptr handling, called from interrupt context */
360 u32 (*get_wptr)(struct amdgpu_device *adev);
361 bool (*prescreen_iv)(struct amdgpu_device *adev);
362 void (*decode_iv)(struct amdgpu_device *adev,
363 struct amdgpu_iv_entry *entry);
364 void (*set_rptr)(struct amdgpu_device *adev);
365};
366
367/*
368 * BIOS.
369 */
370bool amdgpu_get_bios(struct amdgpu_device *adev);
371bool amdgpu_read_bios(struct amdgpu_device *adev);
372
373/*
374 * Dummy page
375 */
376struct amdgpu_dummy_page {
377 struct page *page;
378 dma_addr_t addr;
379};
380
381/*
382 * Clocks
383 */
384
385#define AMDGPU_MAX_PPLL 3
386
387struct amdgpu_clock {
388 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
389 struct amdgpu_pll spll;
390 struct amdgpu_pll mpll;
391 /* 10 Khz units */
392 uint32_t default_mclk;
393 uint32_t default_sclk;
394 uint32_t default_dispclk;
395 uint32_t current_dispclk;
396 uint32_t dp_extclk;
397 uint32_t max_pixel_clock;
398};
399
400/*
401 * GEM.
402 */
403
404#define AMDGPU_GEM_DOMAIN_MAX 0x3
405#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
406
407void amdgpu_gem_object_free(struct drm_gem_object *obj);
408int amdgpu_gem_object_open(struct drm_gem_object *obj,
409 struct drm_file *file_priv);
410void amdgpu_gem_object_close(struct drm_gem_object *obj,
411 struct drm_file *file_priv);
412unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
413struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
414struct drm_gem_object *
415amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
416 struct dma_buf_attachment *attach,
417 struct sg_table *sg);
418struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
419 struct drm_gem_object *gobj,
420 int flags);
421int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
422void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
423struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
424void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
425void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
426int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
427
428/* sub-allocation manager, it has to be protected by another lock.
429 * By conception this is an helper for other part of the driver
430 * like the indirect buffer or semaphore, which both have their
431 * locking.
432 *
433 * Principe is simple, we keep a list of sub allocation in offset
434 * order (first entry has offset == 0, last entry has the highest
435 * offset).
436 *
437 * When allocating new object we first check if there is room at
438 * the end total_size - (last_object_offset + last_object_size) >=
439 * alloc_size. If so we allocate new object there.
440 *
441 * When there is not enough room at the end, we start waiting for
442 * each sub object until we reach object_offset+object_size >=
443 * alloc_size, this object then become the sub object we return.
444 *
445 * Alignment can't be bigger than page size.
446 *
447 * Hole are not considered for allocation to keep things simple.
448 * Assumption is that there won't be hole (all object on same
449 * alignment).
450 */
451
452#define AMDGPU_SA_NUM_FENCE_LISTS 32
453
454struct amdgpu_sa_manager {
455 wait_queue_head_t wq;
456 struct amdgpu_bo *bo;
457 struct list_head *hole;
458 struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
459 struct list_head olist;
460 unsigned size;
461 uint64_t gpu_addr;
462 void *cpu_ptr;
463 uint32_t domain;
464 uint32_t align;
465};
466
467/* sub-allocation buffer */
468struct amdgpu_sa_bo {
469 struct list_head olist;
470 struct list_head flist;
471 struct amdgpu_sa_manager *manager;
472 unsigned soffset;
473 unsigned eoffset;
474 struct dma_fence *fence;
475};
476
477/*
478 * GEM objects.
479 */
480void amdgpu_gem_force_release(struct amdgpu_device *adev);
481int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
482 int alignment, u32 initial_domain,
483 u64 flags, bool kernel,
484 struct reservation_object *resv,
485 struct drm_gem_object **obj);
486
487int amdgpu_mode_dumb_create(struct drm_file *file_priv,
488 struct drm_device *dev,
489 struct drm_mode_create_dumb *args);
490int amdgpu_mode_dumb_mmap(struct drm_file *filp,
491 struct drm_device *dev,
492 uint32_t handle, uint64_t *offset_p);
493int amdgpu_fence_slab_init(void);
494void amdgpu_fence_slab_fini(void);
495
496/*
497 * VMHUB structures, functions & helpers
498 */
499struct amdgpu_vmhub {
500 uint32_t ctx0_ptb_addr_lo32;
501 uint32_t ctx0_ptb_addr_hi32;
502 uint32_t vm_inv_eng0_req;
503 uint32_t vm_inv_eng0_ack;
504 uint32_t vm_context0_cntl;
505 uint32_t vm_l2_pro_fault_status;
506 uint32_t vm_l2_pro_fault_cntl;
507};
508
509/*
510 * GPU MC structures, functions & helpers
511 */
512struct amdgpu_mc {
513 resource_size_t aper_size;
514 resource_size_t aper_base;
515 resource_size_t agp_base;
516 /* for some chips with <= 32MB we need to lie
517 * about vram size near mc fb location */
518 u64 mc_vram_size;
519 u64 visible_vram_size;
520 u64 gart_size;
521 u64 gart_start;
522 u64 gart_end;
523 u64 vram_start;
524 u64 vram_end;
525 unsigned vram_width;
526 u64 real_vram_size;
527 int vram_mtrr;
528 u64 mc_mask;
529 const struct firmware *fw; /* MC firmware */
530 uint32_t fw_version;
531 struct amdgpu_irq_src vm_fault;
532 uint32_t vram_type;
533 uint32_t srbm_soft_reset;
534 bool prt_warning;
535 uint64_t stolen_size;
536 /* apertures */
537 u64 shared_aperture_start;
538 u64 shared_aperture_end;
539 u64 private_aperture_start;
540 u64 private_aperture_end;
541 /* protects concurrent invalidation */
542 spinlock_t invalidate_lock;
543 bool translate_further;
544};
545
546/*
547 * GPU doorbell structures, functions & helpers
548 */
549typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
550{
551 AMDGPU_DOORBELL_KIQ = 0x000,
552 AMDGPU_DOORBELL_HIQ = 0x001,
553 AMDGPU_DOORBELL_DIQ = 0x002,
554 AMDGPU_DOORBELL_MEC_RING0 = 0x010,
555 AMDGPU_DOORBELL_MEC_RING1 = 0x011,
556 AMDGPU_DOORBELL_MEC_RING2 = 0x012,
557 AMDGPU_DOORBELL_MEC_RING3 = 0x013,
558 AMDGPU_DOORBELL_MEC_RING4 = 0x014,
559 AMDGPU_DOORBELL_MEC_RING5 = 0x015,
560 AMDGPU_DOORBELL_MEC_RING6 = 0x016,
561 AMDGPU_DOORBELL_MEC_RING7 = 0x017,
562 AMDGPU_DOORBELL_GFX_RING0 = 0x020,
563 AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
564 AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
565 AMDGPU_DOORBELL_IH = 0x1E8,
566 AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
567 AMDGPU_DOORBELL_INVALID = 0xFFFF
568} AMDGPU_DOORBELL_ASSIGNMENT;
569
570struct amdgpu_doorbell {
571 /* doorbell mmio */
572 resource_size_t base;
573 resource_size_t size;
574 u32 __iomem *ptr;
575 u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
576};
577
578/*
579 * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
580 */
581typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
582{
583 /*
584 * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
585 * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
586 * Compute related doorbells are allocated from 0x00 to 0x8a
587 */
588
589
590 /* kernel scheduling */
591 AMDGPU_DOORBELL64_KIQ = 0x00,
592
593 /* HSA interface queue and debug queue */
594 AMDGPU_DOORBELL64_HIQ = 0x01,
595 AMDGPU_DOORBELL64_DIQ = 0x02,
596
597 /* Compute engines */
598 AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
599 AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
600 AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
601 AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
602 AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
603 AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
604 AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
605 AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
606
607 /* User queue doorbell range (128 doorbells) */
608 AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
609 AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
610
611 /* Graphics engine */
612 AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
613
614 /*
615 * Other graphics doorbells can be allocated here: from 0x8c to 0xef
616 * Graphics voltage island aperture 1
617 * default non-graphics QWORD index is 0xF0 - 0xFF inclusive
618 */
619
620 /* sDMA engines */
621 AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
622 AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
623 AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
624 AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
625
626 /* Interrupt handler */
627 AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
628 AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
629 AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
630
631 /* VCN engine use 32 bits doorbell */
632 AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
633 AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
634 AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
635 AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
636
637 /* overlap the doorbell assignment with VCN as they are mutually exclusive
638 * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
639 */
640 AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
641 AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
642 AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
643 AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
644
645 AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
646 AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
647 AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
648 AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
649
650 AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
651 AMDGPU_DOORBELL64_INVALID = 0xFFFF
652} AMDGPU_DOORBELL64_ASSIGNMENT;
653
654/*
655 * IRQS.
656 */
657
658struct amdgpu_flip_work {
659 struct delayed_work flip_work;
660 struct work_struct unpin_work;
661 struct amdgpu_device *adev;
662 int crtc_id;
663 u32 target_vblank;
664 uint64_t base;
665 struct drm_pending_vblank_event *event;
666 struct amdgpu_bo *old_abo;
667 struct dma_fence *excl;
668 unsigned shared_count;
669 struct dma_fence **shared;
670 struct dma_fence_cb cb;
671 bool async;
672};
673
674
675/*
676 * CP & rings.
677 */
678
679struct amdgpu_ib {
680 struct amdgpu_sa_bo *sa_bo;
681 uint32_t length_dw;
682 uint64_t gpu_addr;
683 uint32_t *ptr;
684 uint32_t flags;
685};
686
687extern const struct drm_sched_backend_ops amdgpu_sched_ops;
688
689int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
690 struct amdgpu_job **job, struct amdgpu_vm *vm);
691int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
692 struct amdgpu_job **job);
693
694void amdgpu_job_free_resources(struct amdgpu_job *job);
695void amdgpu_job_free(struct amdgpu_job *job);
696int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
697 struct drm_sched_entity *entity, void *owner,
698 struct dma_fence **f);
699
700/*
701 * Queue manager
702 */
703struct amdgpu_queue_mapper {
704 int hw_ip;
705 struct mutex lock;
706 /* protected by lock */
707 struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
708};
709
710struct amdgpu_queue_mgr {
711 struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
712};
713
714int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
715 struct amdgpu_queue_mgr *mgr);
716int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
717 struct amdgpu_queue_mgr *mgr);
718int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
719 struct amdgpu_queue_mgr *mgr,
720 u32 hw_ip, u32 instance, u32 ring,
721 struct amdgpu_ring **out_ring);
722
723/*
724 * context related structures
725 */
726
727struct amdgpu_ctx_ring {
728 uint64_t sequence;
729 struct dma_fence **fences;
730 struct drm_sched_entity entity;
731};
732
733struct amdgpu_ctx {
734 struct kref refcount;
735 struct amdgpu_device *adev;
736 struct amdgpu_queue_mgr queue_mgr;
737 unsigned reset_counter;
738 unsigned reset_counter_query;
739 uint32_t vram_lost_counter;
740 spinlock_t ring_lock;
741 struct dma_fence **fences;
742 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
743 bool preamble_presented;
744 enum drm_sched_priority init_priority;
745 enum drm_sched_priority override_priority;
746 struct mutex lock;
747 atomic_t guilty;
748};
749
750struct amdgpu_ctx_mgr {
751 struct amdgpu_device *adev;
752 struct mutex lock;
753 /* protected by lock */
754 struct idr ctx_handles;
755};
756
757struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
758int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
759
760int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
761 struct dma_fence *fence, uint64_t *seq);
762struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
763 struct amdgpu_ring *ring, uint64_t seq);
764void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
765 enum drm_sched_priority priority);
766
767int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
768 struct drm_file *filp);
769
770int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
771
772void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
773void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
774
775
776/*
777 * file private structure
778 */
779
780struct amdgpu_fpriv {
781 struct amdgpu_vm vm;
782 struct amdgpu_bo_va *prt_va;
783 struct amdgpu_bo_va *csa_va;
784 struct mutex bo_list_lock;
785 struct idr bo_list_handles;
786 struct amdgpu_ctx_mgr ctx_mgr;
787};
788
789/*
790 * residency list
791 */
792struct amdgpu_bo_list_entry {
793 struct amdgpu_bo *robj;
794 struct ttm_validate_buffer tv;
795 struct amdgpu_bo_va *bo_va;
796 uint32_t priority;
797 struct page **user_pages;
798 int user_invalidated;
799};
800
801struct amdgpu_bo_list {
802 struct mutex lock;
803 struct rcu_head rhead;
804 struct kref refcount;
805 struct amdgpu_bo *gds_obj;
806 struct amdgpu_bo *gws_obj;
807 struct amdgpu_bo *oa_obj;
808 unsigned first_userptr;
809 unsigned num_entries;
810 struct amdgpu_bo_list_entry *array;
811};
812
813struct amdgpu_bo_list *
814amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
815void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
816 struct list_head *validated);
817void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
818void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
819
820/*
821 * GFX stuff
822 */
823#include "clearstate_defs.h"
824
825struct amdgpu_rlc_funcs {
826 void (*enter_safe_mode)(struct amdgpu_device *adev);
827 void (*exit_safe_mode)(struct amdgpu_device *adev);
828};
829
830struct amdgpu_rlc {
831 /* for power gating */
832 struct amdgpu_bo *save_restore_obj;
833 uint64_t save_restore_gpu_addr;
834 volatile uint32_t *sr_ptr;
835 const u32 *reg_list;
836 u32 reg_list_size;
837 /* for clear state */
838 struct amdgpu_bo *clear_state_obj;
839 uint64_t clear_state_gpu_addr;
840 volatile uint32_t *cs_ptr;
841 const struct cs_section_def *cs_data;
842 u32 clear_state_size;
843 /* for cp tables */
844 struct amdgpu_bo *cp_table_obj;
845 uint64_t cp_table_gpu_addr;
846 volatile uint32_t *cp_table_ptr;
847 u32 cp_table_size;
848
849 /* safe mode for updating CG/PG state */
850 bool in_safe_mode;
851 const struct amdgpu_rlc_funcs *funcs;
852
853 /* for firmware data */
854 u32 save_and_restore_offset;
855 u32 clear_state_descriptor_offset;
856 u32 avail_scratch_ram_locations;
857 u32 reg_restore_list_size;
858 u32 reg_list_format_start;
859 u32 reg_list_format_separate_start;
860 u32 starting_offsets_start;
861 u32 reg_list_format_size_bytes;
862 u32 reg_list_size_bytes;
863
864 u32 *register_list_format;
865 u32 *register_restore;
866};
867
868#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
869
870struct amdgpu_mec {
871 struct amdgpu_bo *hpd_eop_obj;
872 u64 hpd_eop_gpu_addr;
873 struct amdgpu_bo *mec_fw_obj;
874 u64 mec_fw_gpu_addr;
875 u32 num_mec;
876 u32 num_pipe_per_mec;
877 u32 num_queue_per_pipe;
878 void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
879
880 /* These are the resources for which amdgpu takes ownership */
881 DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
882};
883
884struct amdgpu_kiq {
885 u64 eop_gpu_addr;
886 struct amdgpu_bo *eop_obj;
887 spinlock_t ring_lock;
888 struct amdgpu_ring ring;
889 struct amdgpu_irq_src irq;
890};
891
892/*
893 * GPU scratch registers structures, functions & helpers
894 */
895struct amdgpu_scratch {
896 unsigned num_reg;
897 uint32_t reg_base;
898 uint32_t free_mask;
899};
900
901/*
902 * GFX configurations
903 */
904#define AMDGPU_GFX_MAX_SE 4
905#define AMDGPU_GFX_MAX_SH_PER_SE 2
906
907struct amdgpu_rb_config {
908 uint32_t rb_backend_disable;
909 uint32_t user_rb_backend_disable;
910 uint32_t raster_config;
911 uint32_t raster_config_1;
912};
913
914struct gb_addr_config {
915 uint16_t pipe_interleave_size;
916 uint8_t num_pipes;
917 uint8_t max_compress_frags;
918 uint8_t num_banks;
919 uint8_t num_se;
920 uint8_t num_rb_per_se;
921};
922
923struct amdgpu_gfx_config {
924 unsigned max_shader_engines;
925 unsigned max_tile_pipes;
926 unsigned max_cu_per_sh;
927 unsigned max_sh_per_se;
928 unsigned max_backends_per_se;
929 unsigned max_texture_channel_caches;
930 unsigned max_gprs;
931 unsigned max_gs_threads;
932 unsigned max_hw_contexts;
933 unsigned sc_prim_fifo_size_frontend;
934 unsigned sc_prim_fifo_size_backend;
935 unsigned sc_hiz_tile_fifo_size;
936 unsigned sc_earlyz_tile_fifo_size;
937
938 unsigned num_tile_pipes;
939 unsigned backend_enable_mask;
940 unsigned mem_max_burst_length_bytes;
941 unsigned mem_row_size_in_kb;
942 unsigned shader_engine_tile_size;
943 unsigned num_gpus;
944 unsigned multi_gpu_tile_size;
945 unsigned mc_arb_ramcfg;
946 unsigned gb_addr_config;
947 unsigned num_rbs;
948 unsigned gs_vgt_table_depth;
949 unsigned gs_prim_buffer_depth;
950
951 uint32_t tile_mode_array[32];
952 uint32_t macrotile_mode_array[16];
953
954 struct gb_addr_config gb_addr_config_fields;
955 struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
956
957 /* gfx configure feature */
958 uint32_t double_offchip_lds_buf;
959};
960
961struct amdgpu_cu_info {
962 uint32_t simd_per_cu;
963 uint32_t max_waves_per_simd;
964 uint32_t wave_front_size;
965 uint32_t max_scratch_slots_per_cu;
966 uint32_t lds_size;
967
968 /* total active CU number */
969 uint32_t number;
970 uint32_t ao_cu_mask;
971 uint32_t ao_cu_bitmap[4][4];
972 uint32_t bitmap[4][4];
973};
974
975struct amdgpu_gfx_funcs {
976 /* get the gpu clock counter */
977 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
978 void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
979 void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
980 void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
981 void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
982};
983
984struct amdgpu_ngg_buf {
985 struct amdgpu_bo *bo;
986 uint64_t gpu_addr;
987 uint32_t size;
988 uint32_t bo_size;
989};
990
991enum {
992 NGG_PRIM = 0,
993 NGG_POS,
994 NGG_CNTL,
995 NGG_PARAM,
996 NGG_BUF_MAX
997};
998
999struct amdgpu_ngg {
1000 struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
1001 uint32_t gds_reserve_addr;
1002 uint32_t gds_reserve_size;
1003 bool init;
1004};
1005
1006struct amdgpu_gfx {
1007 struct mutex gpu_clock_mutex;
1008 struct amdgpu_gfx_config config;
1009 struct amdgpu_rlc rlc;
1010 struct amdgpu_mec mec;
1011 struct amdgpu_kiq kiq;
1012 struct amdgpu_scratch scratch;
1013 const struct firmware *me_fw; /* ME firmware */
1014 uint32_t me_fw_version;
1015 const struct firmware *pfp_fw; /* PFP firmware */
1016 uint32_t pfp_fw_version;
1017 const struct firmware *ce_fw; /* CE firmware */
1018 uint32_t ce_fw_version;
1019 const struct firmware *rlc_fw; /* RLC firmware */
1020 uint32_t rlc_fw_version;
1021 const struct firmware *mec_fw; /* MEC firmware */
1022 uint32_t mec_fw_version;
1023 const struct firmware *mec2_fw; /* MEC2 firmware */
1024 uint32_t mec2_fw_version;
1025 uint32_t me_feature_version;
1026 uint32_t ce_feature_version;
1027 uint32_t pfp_feature_version;
1028 uint32_t rlc_feature_version;
1029 uint32_t mec_feature_version;
1030 uint32_t mec2_feature_version;
1031 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
1032 unsigned num_gfx_rings;
1033 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
1034 unsigned num_compute_rings;
1035 struct amdgpu_irq_src eop_irq;
1036 struct amdgpu_irq_src priv_reg_irq;
1037 struct amdgpu_irq_src priv_inst_irq;
1038 /* gfx status */
1039 uint32_t gfx_current_status;
1040 /* ce ram size*/
1041 unsigned ce_ram_size;
1042 struct amdgpu_cu_info cu_info;
1043 const struct amdgpu_gfx_funcs *funcs;
1044
1045 /* reset mask */
1046 uint32_t grbm_soft_reset;
1047 uint32_t srbm_soft_reset;
1048 /* s3/s4 mask */
1049 bool in_suspend;
1050 /* NGG */
1051 struct amdgpu_ngg ngg;
1052
1053 /* pipe reservation */
1054 struct mutex pipe_reserve_mutex;
1055 DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1056};
1057
1058int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1059 unsigned size, struct amdgpu_ib *ib);
1060void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
1061 struct dma_fence *f);
1062int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
1063 struct amdgpu_ib *ibs, struct amdgpu_job *job,
1064 struct dma_fence **f);
1065int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1066void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1067int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
1068
1069/*
1070 * CS.
1071 */
1072struct amdgpu_cs_chunk {
1073 uint32_t chunk_id;
1074 uint32_t length_dw;
1075 void *kdata;
1076};
1077
1078struct amdgpu_cs_parser {
1079 struct amdgpu_device *adev;
1080 struct drm_file *filp;
1081 struct amdgpu_ctx *ctx;
1082
1083 /* chunks */
1084 unsigned nchunks;
1085 struct amdgpu_cs_chunk *chunks;
1086
1087 /* scheduler job object */
1088 struct amdgpu_job *job;
1089
1090 /* buffer objects */
1091 struct ww_acquire_ctx ticket;
1092 struct amdgpu_bo_list *bo_list;
1093 struct amdgpu_mn *mn;
1094 struct amdgpu_bo_list_entry vm_pd;
1095 struct list_head validated;
1096 struct dma_fence *fence;
1097 uint64_t bytes_moved_threshold;
1098 uint64_t bytes_moved_vis_threshold;
1099 uint64_t bytes_moved;
1100 uint64_t bytes_moved_vis;
1101 struct amdgpu_bo_list_entry *evictable;
1102
1103 /* user fence */
1104 struct amdgpu_bo_list_entry uf_entry;
1105
1106 unsigned num_post_dep_syncobjs;
1107 struct drm_syncobj **post_dep_syncobjs;
1108};
1109
1110#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
1111#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
1112#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
1113
1114struct amdgpu_job {
1115 struct drm_sched_job base;
1116 struct amdgpu_device *adev;
1117 struct amdgpu_vm *vm;
1118 struct amdgpu_ring *ring;
1119 struct amdgpu_sync sync;
1120 struct amdgpu_sync sched_sync;
1121 struct amdgpu_ib *ibs;
1122 struct dma_fence *fence; /* the hw fence */
1123 uint32_t preamble_status;
1124 uint32_t num_ibs;
1125 void *owner;
1126 uint64_t fence_ctx; /* the fence_context this job uses */
1127 bool vm_needs_flush;
1128 unsigned vmid;
1129 uint64_t vm_pd_addr;
1130 uint32_t gds_base, gds_size;
1131 uint32_t gws_base, gws_size;
1132 uint32_t oa_base, oa_size;
1133 uint32_t vram_lost_counter;
1134
1135 /* user fence handling */
1136 uint64_t uf_addr;
1137 uint64_t uf_sequence;
1138
1139};
1140#define to_amdgpu_job(sched_job) \
1141 container_of((sched_job), struct amdgpu_job, base)
1142
1143static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
1144 uint32_t ib_idx, int idx)
1145{
1146 return p->job->ibs[ib_idx].ptr[idx];
1147}
1148
1149static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
1150 uint32_t ib_idx, int idx,
1151 uint32_t value)
1152{
1153 p->job->ibs[ib_idx].ptr[idx] = value;
1154}
1155
1156/*
1157 * Writeback
1158 */
1159#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
1160
1161struct amdgpu_wb {
1162 struct amdgpu_bo *wb_obj;
1163 volatile uint32_t *wb;
1164 uint64_t gpu_addr;
1165 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
1166 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
1167};
1168
1169int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
1170void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
1171
1172void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
1173
1174/*
1175 * SDMA
1176 */
1177struct amdgpu_sdma_instance {
1178 /* SDMA firmware */
1179 const struct firmware *fw;
1180 uint32_t fw_version;
1181 uint32_t feature_version;
1182
1183 struct amdgpu_ring ring;
1184 bool burst_nop;
1185};
1186
1187struct amdgpu_sdma {
1188 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
1189#ifdef CONFIG_DRM_AMDGPU_SI
1190 //SI DMA has a difference trap irq number for the second engine
1191 struct amdgpu_irq_src trap_irq_1;
1192#endif
1193 struct amdgpu_irq_src trap_irq;
1194 struct amdgpu_irq_src illegal_inst_irq;
1195 int num_instances;
1196 uint32_t srbm_soft_reset;
1197};
1198
1199/*
1200 * Firmware
1201 */
1202enum amdgpu_firmware_load_type {
1203 AMDGPU_FW_LOAD_DIRECT = 0,
1204 AMDGPU_FW_LOAD_SMU,
1205 AMDGPU_FW_LOAD_PSP,
1206};
1207
1208struct amdgpu_firmware {
1209 struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
1210 enum amdgpu_firmware_load_type load_type;
1211 struct amdgpu_bo *fw_buf;
1212 unsigned int fw_size;
1213 unsigned int max_ucodes;
1214 /* firmwares are loaded by psp instead of smu from vega10 */
1215 const struct amdgpu_psp_funcs *funcs;
1216 struct amdgpu_bo *rbuf;
1217 struct mutex mutex;
1218
1219 /* gpu info firmware data pointer */
1220 const struct firmware *gpu_info_fw;
1221
1222 void *fw_buf_ptr;
1223 uint64_t fw_buf_mc;
1224};
1225
1226/*
1227 * Benchmarking
1228 */
1229void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
1230
1231
1232/*
1233 * Testing
1234 */
1235void amdgpu_test_moves(struct amdgpu_device *adev);
1236
1237
1238/*
1239 * amdgpu smumgr functions
1240 */
1241struct amdgpu_smumgr_funcs {
1242 int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
1243 int (*request_smu_load_fw)(struct amdgpu_device *adev);
1244 int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
1245};
1246
1247/*
1248 * amdgpu smumgr
1249 */
1250struct amdgpu_smumgr {
1251 struct amdgpu_bo *toc_buf;
1252 struct amdgpu_bo *smu_buf;
1253 /* asic priv smu data */
1254 void *priv;
1255 spinlock_t smu_lock;
1256 /* smumgr functions */
1257 const struct amdgpu_smumgr_funcs *smumgr_funcs;
1258 /* ucode loading complete flag */
1259 uint32_t fw_flags;
1260};
1261
1262/*
1263 * ASIC specific register table accessible by UMD
1264 */
1265struct amdgpu_allowed_register_entry {
1266 uint32_t reg_offset;
1267 bool grbm_indexed;
1268};
1269
1270/*
1271 * ASIC specific functions.
1272 */
1273struct amdgpu_asic_funcs {
1274 bool (*read_disabled_bios)(struct amdgpu_device *adev);
1275 bool (*read_bios_from_rom)(struct amdgpu_device *adev,
1276 u8 *bios, u32 length_bytes);
1277 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1278 u32 sh_num, u32 reg_offset, u32 *value);
1279 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1280 int (*reset)(struct amdgpu_device *adev);
1281 /* get the reference clock */
1282 u32 (*get_xclk)(struct amdgpu_device *adev);
1283 /* MM block clocks */
1284 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1285 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1286 /* static power management */
1287 int (*get_pcie_lanes)(struct amdgpu_device *adev);
1288 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
1289 /* get config memsize register */
1290 u32 (*get_config_memsize)(struct amdgpu_device *adev);
1291};
1292
1293/*
1294 * IOCTL.
1295 */
1296int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
1297 struct drm_file *filp);
1298int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
1299 struct drm_file *filp);
1300
1301int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
1302 struct drm_file *filp);
1303int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
1304 struct drm_file *filp);
1305int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
1306 struct drm_file *filp);
1307int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1308 struct drm_file *filp);
1309int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
1310 struct drm_file *filp);
1311int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1312 struct drm_file *filp);
1313int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1314int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1315 struct drm_file *filp);
1316int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1317int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1318 struct drm_file *filp);
1319
1320int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1321 struct drm_file *filp);
1322
1323/* VRAM scratch page for HDP bug, default vram page */
1324struct amdgpu_vram_scratch {
1325 struct amdgpu_bo *robj;
1326 volatile uint32_t *ptr;
1327 u64 gpu_addr;
1328};
1329
1330/*
1331 * ACPI
1332 */
1333struct amdgpu_atif_notification_cfg {
1334 bool enabled;
1335 int command_code;
1336};
1337
1338struct amdgpu_atif_notifications {
1339 bool display_switch;
1340 bool expansion_mode_change;
1341 bool thermal_state;
1342 bool forced_power_state;
1343 bool system_power_state;
1344 bool display_conf_change;
1345 bool px_gfx_switch;
1346 bool brightness_change;
1347 bool dgpu_display_event;
1348};
1349
1350struct amdgpu_atif_functions {
1351 bool system_params;
1352 bool sbios_requests;
1353 bool select_active_disp;
1354 bool lid_state;
1355 bool get_tv_standard;
1356 bool set_tv_standard;
1357 bool get_panel_expansion_mode;
1358 bool set_panel_expansion_mode;
1359 bool temperature_change;
1360 bool graphics_device_types;
1361};
1362
1363struct amdgpu_atif {
1364 struct amdgpu_atif_notifications notifications;
1365 struct amdgpu_atif_functions functions;
1366 struct amdgpu_atif_notification_cfg notification_cfg;
1367 struct amdgpu_encoder *encoder_for_bl;
1368};
1369
1370struct amdgpu_atcs_functions {
1371 bool get_ext_state;
1372 bool pcie_perf_req;
1373 bool pcie_dev_rdy;
1374 bool pcie_bus_width;
1375};
1376
1377struct amdgpu_atcs {
1378 struct amdgpu_atcs_functions functions;
1379};
1380
1381/*
1382 * Firmware VRAM reservation
1383 */
1384struct amdgpu_fw_vram_usage {
1385 u64 start_offset;
1386 u64 size;
1387 struct amdgpu_bo *reserved_bo;
1388 void *va;
1389};
1390
1391/*
1392 * CGS
1393 */
1394struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1395void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
1396
1397/*
1398 * Core structure, functions and helpers.
1399 */
1400typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
1401typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1402
1403typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1404typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1405
1406
1407/*
1408 * amdgpu nbio functions
1409 *
1410 */
1411struct nbio_hdp_flush_reg {
1412 u32 ref_and_mask_cp0;
1413 u32 ref_and_mask_cp1;
1414 u32 ref_and_mask_cp2;
1415 u32 ref_and_mask_cp3;
1416 u32 ref_and_mask_cp4;
1417 u32 ref_and_mask_cp5;
1418 u32 ref_and_mask_cp6;
1419 u32 ref_and_mask_cp7;
1420 u32 ref_and_mask_cp8;
1421 u32 ref_and_mask_cp9;
1422 u32 ref_and_mask_sdma0;
1423 u32 ref_and_mask_sdma1;
1424};
1425
1426struct amdgpu_nbio_funcs {
1427 const struct nbio_hdp_flush_reg *hdp_flush_reg;
1428 u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
1429 u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
1430 u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
1431 u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
1432 u32 (*get_rev_id)(struct amdgpu_device *adev);
1433 void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
1434 void (*hdp_flush)(struct amdgpu_device *adev);
1435 u32 (*get_memsize)(struct amdgpu_device *adev);
1436 void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
1437 bool use_doorbell, int doorbell_index);
1438 void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
1439 bool enable);
1440 void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
1441 bool enable);
1442 void (*ih_doorbell_range)(struct amdgpu_device *adev,
1443 bool use_doorbell, int doorbell_index);
1444 void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
1445 bool enable);
1446 void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
1447 bool enable);
1448 void (*get_clockgating_state)(struct amdgpu_device *adev,
1449 u32 *flags);
1450 void (*ih_control)(struct amdgpu_device *adev);
1451 void (*init_registers)(struct amdgpu_device *adev);
1452 void (*detect_hw_virt)(struct amdgpu_device *adev);
1453};
1454
1455
1456/* Define the HW IP blocks will be used in driver , add more if necessary */
1457enum amd_hw_ip_block_type {
1458 GC_HWIP = 1,
1459 HDP_HWIP,
1460 SDMA0_HWIP,
1461 SDMA1_HWIP,
1462 MMHUB_HWIP,
1463 ATHUB_HWIP,
1464 NBIO_HWIP,
1465 MP0_HWIP,
1466 UVD_HWIP,
1467 VCN_HWIP = UVD_HWIP,
1468 VCE_HWIP,
1469 DF_HWIP,
1470 DCE_HWIP,
1471 OSSSYS_HWIP,
1472 SMUIO_HWIP,
1473 PWR_HWIP,
1474 NBIF_HWIP,
1475 MAX_HWIP
1476};
1477
1478#define HWIP_MAX_INSTANCE 6
1479
1480struct amd_powerplay {
1481 struct cgs_device *cgs_device;
1482 void *pp_handle;
1483 const struct amd_ip_funcs *ip_funcs;
1484 const struct amd_pm_funcs *pp_funcs;
1485};
1486
1487#define AMDGPU_RESET_MAGIC_NUM 64
1488struct amdgpu_device {
1489 struct device *dev;
1490 struct drm_device *ddev;
1491 struct pci_dev *pdev;
1492
1493#ifdef CONFIG_DRM_AMD_ACP
1494 struct amdgpu_acp acp;
1495#endif
1496
1497 /* ASIC */
1498 enum amd_asic_type asic_type;
1499 uint32_t family;
1500 uint32_t rev_id;
1501 uint32_t external_rev_id;
1502 unsigned long flags;
1503 int usec_timeout;
1504 const struct amdgpu_asic_funcs *asic_funcs;
1505 bool shutdown;
1506 bool need_dma32;
1507 bool accel_working;
1508 struct work_struct reset_work;
1509 struct notifier_block acpi_nb;
1510 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
1511 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1512 unsigned debugfs_count;
1513#if defined(CONFIG_DEBUG_FS)
1514 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1515#endif
1516 struct amdgpu_atif atif;
1517 struct amdgpu_atcs atcs;
1518 struct mutex srbm_mutex;
1519 /* GRBM index mutex. Protects concurrent access to GRBM index */
1520 struct mutex grbm_idx_mutex;
1521 struct dev_pm_domain vga_pm_domain;
1522 bool have_disp_power_ref;
1523
1524 /* BIOS */
1525 bool is_atom_fw;
1526 uint8_t *bios;
1527 uint32_t bios_size;
1528 struct amdgpu_bo *stolen_vga_memory;
1529 uint32_t bios_scratch_reg_offset;
1530 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
1531
1532 /* Register/doorbell mmio */
1533 resource_size_t rmmio_base;
1534 resource_size_t rmmio_size;
1535 void __iomem *rmmio;
1536 /* protects concurrent MM_INDEX/DATA based register access */
1537 spinlock_t mmio_idx_lock;
1538 /* protects concurrent SMC based register access */
1539 spinlock_t smc_idx_lock;
1540 amdgpu_rreg_t smc_rreg;
1541 amdgpu_wreg_t smc_wreg;
1542 /* protects concurrent PCIE register access */
1543 spinlock_t pcie_idx_lock;
1544 amdgpu_rreg_t pcie_rreg;
1545 amdgpu_wreg_t pcie_wreg;
1546 amdgpu_rreg_t pciep_rreg;
1547 amdgpu_wreg_t pciep_wreg;
1548 /* protects concurrent UVD register access */
1549 spinlock_t uvd_ctx_idx_lock;
1550 amdgpu_rreg_t uvd_ctx_rreg;
1551 amdgpu_wreg_t uvd_ctx_wreg;
1552 /* protects concurrent DIDT register access */
1553 spinlock_t didt_idx_lock;
1554 amdgpu_rreg_t didt_rreg;
1555 amdgpu_wreg_t didt_wreg;
1556 /* protects concurrent gc_cac register access */
1557 spinlock_t gc_cac_idx_lock;
1558 amdgpu_rreg_t gc_cac_rreg;
1559 amdgpu_wreg_t gc_cac_wreg;
1560 /* protects concurrent se_cac register access */
1561 spinlock_t se_cac_idx_lock;
1562 amdgpu_rreg_t se_cac_rreg;
1563 amdgpu_wreg_t se_cac_wreg;
1564 /* protects concurrent ENDPOINT (audio) register access */
1565 spinlock_t audio_endpt_idx_lock;
1566 amdgpu_block_rreg_t audio_endpt_rreg;
1567 amdgpu_block_wreg_t audio_endpt_wreg;
1568 void __iomem *rio_mem;
1569 resource_size_t rio_mem_size;
1570 struct amdgpu_doorbell doorbell;
1571
1572 /* clock/pll info */
1573 struct amdgpu_clock clock;
1574
1575 /* MC */
1576 struct amdgpu_mc mc;
1577 struct amdgpu_gart gart;
1578 struct amdgpu_dummy_page dummy_page;
1579 struct amdgpu_vm_manager vm_manager;
1580 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
1581
1582 /* memory management */
1583 struct amdgpu_mman mman;
1584 struct amdgpu_vram_scratch vram_scratch;
1585 struct amdgpu_wb wb;
1586 atomic64_t num_bytes_moved;
1587 atomic64_t num_evictions;
1588 atomic64_t num_vram_cpu_page_faults;
1589 atomic_t gpu_reset_counter;
1590 atomic_t vram_lost_counter;
1591
1592 /* data for buffer migration throttling */
1593 struct {
1594 spinlock_t lock;
1595 s64 last_update_us;
1596 s64 accum_us; /* accumulated microseconds */
1597 s64 accum_us_vis; /* for visible VRAM */
1598 u32 log2_max_MBps;
1599 } mm_stats;
1600
1601 /* display */
1602 bool enable_virtual_display;
1603 struct amdgpu_mode_info mode_info;
1604 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
1605 struct work_struct hotplug_work;
1606 struct amdgpu_irq_src crtc_irq;
1607 struct amdgpu_irq_src pageflip_irq;
1608 struct amdgpu_irq_src hpd_irq;
1609
1610 /* rings */
1611 u64 fence_context;
1612 unsigned num_rings;
1613 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
1614 bool ib_pool_ready;
1615 struct amdgpu_sa_manager ring_tmp_bo;
1616
1617 /* interrupts */
1618 struct amdgpu_irq irq;
1619
1620 /* powerplay */
1621 struct amd_powerplay powerplay;
1622 bool pp_force_state_enabled;
1623
1624 /* dpm */
1625 struct amdgpu_pm pm;
1626 u32 cg_flags;
1627 u32 pg_flags;
1628
1629 /* amdgpu smumgr */
1630 struct amdgpu_smumgr smu;
1631
1632 /* gfx */
1633 struct amdgpu_gfx gfx;
1634
1635 /* sdma */
1636 struct amdgpu_sdma sdma;
1637
1638 /* uvd */
1639 struct amdgpu_uvd uvd;
1640
1641 /* vce */
1642 struct amdgpu_vce vce;
1643
1644 /* vcn */
1645 struct amdgpu_vcn vcn;
1646
1647 /* firmwares */
1648 struct amdgpu_firmware firmware;
1649
1650 /* PSP */
1651 struct psp_context psp;
1652
1653 /* GDS */
1654 struct amdgpu_gds gds;
1655
1656 /* display related functionality */
1657 struct amdgpu_display_manager dm;
1658
1659 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
1660 int num_ip_blocks;
1661 struct mutex mn_lock;
1662 DECLARE_HASHTABLE(mn_hash, 7);
1663
1664 /* tracking pinned memory */
1665 u64 vram_pin_size;
1666 u64 invisible_pin_size;
1667 u64 gart_pin_size;
1668
1669 /* amdkfd interface */
1670 struct kfd_dev *kfd;
1671
1672 /* soc15 register offset based on ip, instance and segment */
1673 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1674
1675 const struct amdgpu_nbio_funcs *nbio_funcs;
1676
1677 /* delayed work_func for deferring clockgating during resume */
1678 struct delayed_work late_init_work;
1679
1680 struct amdgpu_virt virt;
1681 /* firmware VRAM reservation */
1682 struct amdgpu_fw_vram_usage fw_vram_usage;
1683
1684 /* link all shadow bo */
1685 struct list_head shadow_list;
1686 struct mutex shadow_list_lock;
1687 /* keep an lru list of rings by HW IP */
1688 struct list_head ring_lru_list;
1689 spinlock_t ring_lru_list_lock;
1690
1691 /* record hw reset is performed */
1692 bool has_hw_reset;
1693 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
1694
1695 /* record last mm index being written through WREG32*/
1696 unsigned long last_mm_index;
1697 bool in_gpu_reset;
1698 struct mutex lock_reset;
1699};
1700
1701static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
1702{
1703 return container_of(bdev, struct amdgpu_device, mman.bdev);
1704}
1705
1706int amdgpu_device_init(struct amdgpu_device *adev,
1707 struct drm_device *ddev,
1708 struct pci_dev *pdev,
1709 uint32_t flags);
1710void amdgpu_device_fini(struct amdgpu_device *adev);
1711int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
1712
1713uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
1714 uint32_t acc_flags);
1715void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
1716 uint32_t acc_flags);
1717u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
1718void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
1719
1720u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
1721void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
1722u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
1723void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
1724
1725bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
1726bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1727
1728/*
1729 * Registers read & write functions.
1730 */
1731
1732#define AMDGPU_REGS_IDX (1<<0)
1733#define AMDGPU_REGS_NO_KIQ (1<<1)
1734
1735#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1736#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1737
1738#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
1739#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
1740#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
1741#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
1742#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
1743#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1744#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1745#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
1746#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
1747#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
1748#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
1749#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
1750#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
1751#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
1752#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
1753#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
1754#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
1755#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
1756#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
1757#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
1758#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
1759#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
1760#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
1761#define WREG32_P(reg, val, mask) \
1762 do { \
1763 uint32_t tmp_ = RREG32(reg); \
1764 tmp_ &= (mask); \
1765 tmp_ |= ((val) & ~(mask)); \
1766 WREG32(reg, tmp_); \
1767 } while (0)
1768#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1769#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1770#define WREG32_PLL_P(reg, val, mask) \
1771 do { \
1772 uint32_t tmp_ = RREG32_PLL(reg); \
1773 tmp_ &= (mask); \
1774 tmp_ |= ((val) & ~(mask)); \
1775 WREG32_PLL(reg, tmp_); \
1776 } while (0)
1777#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
1778#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
1779#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
1780
1781#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
1782#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
1783#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
1784#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
1785
1786#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
1787#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
1788
1789#define REG_SET_FIELD(orig_val, reg, field, field_val) \
1790 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
1791 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
1792
1793#define REG_GET_FIELD(value, reg, field) \
1794 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
1795
1796#define WREG32_FIELD(reg, field, val) \
1797 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1798
1799#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
1800 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1801
1802/*
1803 * BIOS helpers.
1804 */
1805#define RBIOS8(i) (adev->bios[i])
1806#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1807#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1808
1809static inline struct amdgpu_sdma_instance *
1810amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1811{
1812 struct amdgpu_device *adev = ring->adev;
1813 int i;
1814
1815 for (i = 0; i < adev->sdma.num_instances; i++)
1816 if (&adev->sdma.instance[i].ring == ring)
1817 break;
1818
1819 if (i < AMDGPU_MAX_SDMA_INSTANCES)
1820 return &adev->sdma.instance[i];
1821 else
1822 return NULL;
1823}
1824
1825/*
1826 * ASICs macro.
1827 */
1828#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
1829#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
1830#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
1831#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
1832#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
1833#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
1834#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
1835#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
1836#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
1837#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
1838#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
1839#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1840#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
1841#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
1842#define amdgpu_gart_get_vm_pde(adev, level, dst, flags) (adev)->gart.gart_funcs->get_vm_pde((adev), (level), (dst), (flags))
1843#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
1844#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
1845#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
1846#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
1847#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
1848#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
1849#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
1850#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
1851#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
1852#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
1853#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
1854#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
1855#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
1856#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
1857#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
1858#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
1859#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
1860#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
1861#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
1862#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
1863#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
1864#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
1865#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
1866#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
1867#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
1868#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
1869#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
1870#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
1871#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
1872#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
1873#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
1874#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
1875#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
1876#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
1877#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
1878#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
1879#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
1880#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
1881#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
1882#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
1883#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
1884#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
1885#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
1886#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
1887#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
1888#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
1889#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
1890
1891/* Common functions */
1892int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1893 struct amdgpu_job* job, bool force);
1894void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1895bool amdgpu_device_need_post(struct amdgpu_device *adev);
1896void amdgpu_update_display_priority(struct amdgpu_device *adev);
1897
1898void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1899 u64 num_vis_bytes);
1900void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
1901bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
1902void amdgpu_device_vram_location(struct amdgpu_device *adev,
1903 struct amdgpu_mc *mc, u64 base);
1904void amdgpu_device_gart_location(struct amdgpu_device *adev,
1905 struct amdgpu_mc *mc);
1906int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
1907void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
1908int amdgpu_ttm_init(struct amdgpu_device *adev);
1909void amdgpu_ttm_fini(struct amdgpu_device *adev);
1910void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1911 const u32 *registers,
1912 const u32 array_size);
1913
1914bool amdgpu_device_is_px(struct drm_device *dev);
1915/* atpx handler */
1916#if defined(CONFIG_VGA_SWITCHEROO)
1917void amdgpu_register_atpx_handler(void);
1918void amdgpu_unregister_atpx_handler(void);
1919bool amdgpu_has_atpx_dgpu_power_cntl(void);
1920bool amdgpu_is_atpx_hybrid(void);
1921bool amdgpu_atpx_dgpu_req_power_for_displays(void);
1922bool amdgpu_has_atpx(void);
1923#else
1924static inline void amdgpu_register_atpx_handler(void) {}
1925static inline void amdgpu_unregister_atpx_handler(void) {}
1926static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
1927static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
1928static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
1929static inline bool amdgpu_has_atpx(void) { return false; }
1930#endif
1931
1932/*
1933 * KMS
1934 */
1935extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1936extern const int amdgpu_max_kms_ioctl;
1937
1938int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
1939void amdgpu_driver_unload_kms(struct drm_device *dev);
1940void amdgpu_driver_lastclose_kms(struct drm_device *dev);
1941int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
1942void amdgpu_driver_postclose_kms(struct drm_device *dev,
1943 struct drm_file *file_priv);
1944int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
1945int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
1946int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
1947u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
1948int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1949void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1950long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
1951 unsigned long arg);
1952
1953/*
1954 * functions used by amdgpu_encoder.c
1955 */
1956struct amdgpu_afmt_acr {
1957 u32 clock;
1958
1959 int n_32khz;
1960 int cts_32khz;
1961
1962 int n_44_1khz;
1963 int cts_44_1khz;
1964
1965 int n_48khz;
1966 int cts_48khz;
1967
1968};
1969
1970struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1971
1972/* amdgpu_acpi.c */
1973#if defined(CONFIG_ACPI)
1974int amdgpu_acpi_init(struct amdgpu_device *adev);
1975void amdgpu_acpi_fini(struct amdgpu_device *adev);
1976bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
1977int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1978 u8 perf_req, bool advertise);
1979int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1980#else
1981static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
1982static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
1983#endif
1984
1985int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1986 uint64_t addr, struct amdgpu_bo **bo,
1987 struct amdgpu_bo_va_mapping **mapping);
1988
1989#if defined(CONFIG_DRM_AMD_DC)
1990int amdgpu_dm_display_resume(struct amdgpu_device *adev );
1991#else
1992static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
1993#endif
1994
1995#include "amdgpu_object.h"
1996#endif