Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next

First radeon and amdgpu pull request for 4.6. Highlights:
- ACP support for APUs with i2s audio
- CS ioctl optimizations
- GPU scheduler optimizations
- GPUVM optimizations
- Initial GPU reset support (not enabled yet)
- New powerplay sysfs interface for manually selecting clocks
- Powerplay fixes
- Virtualization fixes
- Removal of hw semaphore support
- Lots of other misc fixes and cleanups

* 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux: (118 commits)
drm/amdgpu: Don't call interval_tree_remove in amdgpu_mn_destroy
drm/amdgpu: Fix race condition in amdgpu_mn_unregister
drm/amdgpu: cleanup gem init/finit
drm/amdgpu: rework GEM info printing
drm/amdgpu: print the GPU offset as well in gem_info
drm/amdgpu: optionally print the pin count in gem_info as well
drm/amdgpu: print the BO size only once in amdgpu_gem_info
drm/amdgpu: print pid as integer
drm/amdgpu: remove page flip work queue v3
drm/amdgpu: stop blocking for page filp fences
drm/amdgpu: stop calling amdgpu_gpu_reset from the flip code
drm/amdgpu: remove fence reset detection leftovers
drm/amdgpu: Fix race condition in MMU notifier release
drm/radeon: Fix WARN_ON if DRM_DP_AUX_CHARDEV is enabled
drm/amdgpu/vi: move uvd tiling config setup into uvd code
drm/amdgpu/vi: move sdma tiling config setup into sdma code
drm/amdgpu/cik: move uvd tiling config setup into uvd code
drm/amdgpu/cik: move sdma tiling config setup into sdma code
drm/amdgpu/gfx7: rework gpu_init()
drm/amdgpu/gfx: clean up harvest configuration (v2)
...

+5499 -4378
+2
drivers/gpu/drm/Kconfig
··· 172 172 source "drivers/gpu/drm/amd/amdgpu/Kconfig" 173 173 source "drivers/gpu/drm/amd/powerplay/Kconfig" 174 174 175 + source "drivers/gpu/drm/amd/acp/Kconfig" 176 + 175 177 source "drivers/gpu/drm/nouveau/Kconfig" 176 178 177 179 config DRM_I810
+11
drivers/gpu/drm/amd/acp/Kconfig
··· 1 + menu "ACP Configuration" 2 + 3 + config DRM_AMD_ACP 4 + bool "Enable ACP IP support" 5 + default y 6 + select MFD_CORE 7 + select PM_GENERIC_DOMAINS if PM 8 + help 9 + Choose this option to enable ACP IP support for AMD SOCs. 10 + 11 + endmenu
+8
drivers/gpu/drm/amd/acp/Makefile
··· 1 + # 2 + # Makefile for the ACP, which is a sub-component 3 + # of AMDSOC/AMDGPU drm driver. 4 + # It provides the HW control for ACP related functionalities. 5 + 6 + subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include 7 + 8 + AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
+50
drivers/gpu/drm/amd/acp/acp_hw.c
··· 1 + /* 2 + * Copyright 2015 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include <linux/mm.h> 25 + #include <linux/slab.h> 26 + #include <linux/device.h> 27 + #include <linux/delay.h> 28 + #include <linux/errno.h> 29 + 30 + #include "acp_gfx_if.h" 31 + 32 + #define ACP_MODE_I2S 0 33 + #define ACP_MODE_AZ 1 34 + 35 + #define mmACP_AZALIA_I2S_SELECT 0x51d4 36 + 37 + int amd_acp_hw_init(void *cgs_device, 38 + unsigned acp_version_major, unsigned acp_version_minor) 39 + { 40 + unsigned int acp_mode = ACP_MODE_I2S; 41 + 42 + if ((acp_version_major == 2) && (acp_version_minor == 2)) 43 + acp_mode = cgs_read_register(cgs_device, 44 + mmACP_AZALIA_I2S_SELECT); 45 + 46 + if (acp_mode != ACP_MODE_I2S) 47 + return -ENODEV; 48 + 49 + return 0; 50 + }
+34
drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
··· 1 + /* 2 + * Copyright 2015 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef _ACP_GFX_IF_H 25 + #define _ACP_GFX_IF_H 26 + 27 + #include <linux/types.h> 28 + #include "cgs_linux.h" 29 + #include "cgs_common.h" 30 + 31 + int amd_acp_hw_init(void *cgs_device, 32 + unsigned acp_version_major, unsigned acp_version_minor); 33 + 34 + #endif /* _ACP_GFX_IF_H */
+14 -3
drivers/gpu/drm/amd/amdgpu/Makefile
··· 8 8 -I$(FULL_AMD_PATH)/include \ 9 9 -I$(FULL_AMD_PATH)/amdgpu \ 10 10 -I$(FULL_AMD_PATH)/scheduler \ 11 - -I$(FULL_AMD_PATH)/powerplay/inc 11 + -I$(FULL_AMD_PATH)/powerplay/inc \ 12 + -I$(FULL_AMD_PATH)/acp/include 12 13 13 14 amdgpu-y := amdgpu_drv.o 14 15 ··· 21 20 amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \ 22 21 amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \ 23 22 amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ 24 - atombios_encoders.o amdgpu_semaphore.o amdgpu_sa.o atombios_i2c.o \ 23 + atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ 25 24 amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ 26 25 amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o 27 26 ··· 93 92 amdgpu-y += \ 94 93 ../scheduler/gpu_scheduler.o \ 95 94 ../scheduler/sched_fence.o \ 96 - amdgpu_sched.o 95 + amdgpu_job.o 96 + 97 + # ACP componet 98 + ifneq ($(CONFIG_DRM_AMD_ACP),) 99 + amdgpu-y += amdgpu_acp.o 100 + 101 + AMDACPPATH := ../acp 102 + include $(FULL_AMD_PATH)/acp/Makefile 103 + 104 + amdgpu-y += $(AMD_ACP_FILES) 105 + endif 97 106 98 107 amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o 99 108 amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
+119 -121
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 53 53 #include "amdgpu_ucode.h" 54 54 #include "amdgpu_gds.h" 55 55 #include "amd_powerplay.h" 56 + #include "amdgpu_acp.h" 56 57 57 58 #include "gpu_scheduler.h" 58 59 ··· 75 74 extern int amdgpu_smc_load_fw; 76 75 extern int amdgpu_aspm; 77 76 extern int amdgpu_runtime_pm; 78 - extern int amdgpu_hard_reset; 79 77 extern unsigned amdgpu_ip_block_mask; 80 78 extern int amdgpu_bapm; 81 79 extern int amdgpu_deep_color; ··· 82 82 extern int amdgpu_vm_block_size; 83 83 extern int amdgpu_vm_fault_stop; 84 84 extern int amdgpu_vm_debug; 85 - extern int amdgpu_enable_scheduler; 86 85 extern int amdgpu_sched_jobs; 87 86 extern int amdgpu_sched_hw_submission; 88 - extern int amdgpu_enable_semaphores; 89 87 extern int amdgpu_powerplay; 90 88 91 89 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 ··· 103 105 104 106 /* max number of IP instances */ 105 107 #define AMDGPU_MAX_SDMA_INSTANCES 2 106 - 107 - /* number of hw syncs before falling back on blocking */ 108 - #define AMDGPU_NUM_SYNCS 4 109 108 110 109 /* hardcode that limit for now */ 111 110 #define AMDGPU_VA_RESERVED_SIZE (8 << 20) ··· 184 189 struct amdgpu_ib; 185 190 struct amdgpu_vm; 186 191 struct amdgpu_ring; 187 - struct amdgpu_semaphore; 188 192 struct amdgpu_cs_parser; 189 193 struct amdgpu_job; 190 194 struct amdgpu_irq_src; ··· 281 287 unsigned count); 282 288 /* write pte one entry at a time with addr mapping */ 283 289 void (*write_pte)(struct amdgpu_ib *ib, 284 - uint64_t pe, 290 + const dma_addr_t *pages_addr, uint64_t pe, 285 291 uint64_t addr, unsigned count, 286 292 uint32_t incr, uint32_t flags); 287 293 /* for linear pte/pde updates without addr mapping */ ··· 289 295 uint64_t pe, 290 296 uint64_t addr, unsigned count, 291 297 uint32_t incr, uint32_t flags); 292 - /* pad the indirect buffer to the necessary number of dw */ 293 - void (*pad_ib)(struct amdgpu_ib *ib); 294 298 }; 295 299 296 300 /* provided by the gmc block */ ··· 326 334 struct amdgpu_ib *ib); 327 335 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 328 336 uint64_t seq, unsigned flags); 329 - bool (*emit_semaphore)(struct amdgpu_ring *ring, 330 - struct amdgpu_semaphore *semaphore, 331 - bool emit_wait); 332 337 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, 333 338 uint64_t pd_addr); 334 339 void (*emit_hdp_flush)(struct amdgpu_ring *ring); ··· 338 349 int (*test_ib)(struct amdgpu_ring *ring); 339 350 /* insert NOP packets */ 340 351 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); 352 + /* pad the indirect buffer to the necessary number of dw */ 353 + void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 341 354 }; 342 355 343 356 /* ··· 385 394 uint64_t gpu_addr; 386 395 volatile uint32_t *cpu_addr; 387 396 /* sync_seq is protected by ring emission lock */ 388 - uint64_t sync_seq[AMDGPU_MAX_RINGS]; 397 + uint64_t sync_seq; 389 398 atomic64_t last_seq; 390 399 bool initialized; 391 400 struct amdgpu_irq_src *irq_src; ··· 438 447 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 439 448 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 440 449 441 - bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 442 - struct amdgpu_ring *ring); 443 - void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 444 - struct amdgpu_ring *ring); 445 - 446 450 /* 447 451 * TTM. 448 452 */ ··· 456 470 /* buffer handling */ 457 471 const struct amdgpu_buffer_funcs *buffer_funcs; 458 472 struct amdgpu_ring *buffer_funcs_ring; 473 + /* Scheduler entity for buffer moves */ 474 + struct amd_sched_entity entity; 459 475 }; 460 476 461 477 int amdgpu_copy_buffer(struct amdgpu_ring *ring, ··· 472 484 struct amdgpu_bo *robj; 473 485 struct ttm_validate_buffer tv; 474 486 struct amdgpu_bo_va *bo_va; 475 - unsigned prefered_domains; 476 - unsigned allowed_domains; 477 487 uint32_t priority; 478 488 }; 479 489 ··· 508 522 /* Protected by gem.mutex */ 509 523 struct list_head list; 510 524 /* Protected by tbo.reserved */ 511 - u32 initial_domain; 525 + u32 prefered_domains; 526 + u32 allowed_domains; 512 527 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 513 528 struct ttm_placement placement; 514 529 struct ttm_buffer_object tbo; ··· 531 544 struct amdgpu_bo *parent; 532 545 533 546 struct ttm_bo_kmap_obj dma_buf_vmap; 534 - pid_t pid; 535 547 struct amdgpu_mn *mn; 536 548 struct list_head mn_list; 537 549 }; ··· 607 621 /* 608 622 * GEM objects. 609 623 */ 610 - struct amdgpu_gem { 611 - struct mutex mutex; 612 - struct list_head objects; 613 - }; 614 - 615 - int amdgpu_gem_init(struct amdgpu_device *adev); 616 - void amdgpu_gem_fini(struct amdgpu_device *adev); 624 + void amdgpu_gem_force_release(struct amdgpu_device *adev); 617 625 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 618 626 int alignment, u32 initial_domain, 619 627 u64 flags, bool kernel, ··· 619 639 int amdgpu_mode_dumb_mmap(struct drm_file *filp, 620 640 struct drm_device *dev, 621 641 uint32_t handle, uint64_t *offset_p); 622 - 623 - /* 624 - * Semaphores. 625 - */ 626 - struct amdgpu_semaphore { 627 - struct amdgpu_sa_bo *sa_bo; 628 - signed waiters; 629 - uint64_t gpu_addr; 630 - }; 631 - 632 - int amdgpu_semaphore_create(struct amdgpu_device *adev, 633 - struct amdgpu_semaphore **semaphore); 634 - bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, 635 - struct amdgpu_semaphore *semaphore); 636 - bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, 637 - struct amdgpu_semaphore *semaphore); 638 - void amdgpu_semaphore_free(struct amdgpu_device *adev, 639 - struct amdgpu_semaphore **semaphore, 640 - struct fence *fence); 641 - 642 642 /* 643 643 * Synchronization 644 644 */ 645 645 struct amdgpu_sync { 646 - struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; 647 - struct fence *sync_to[AMDGPU_MAX_RINGS]; 648 646 DECLARE_HASHTABLE(fences, 4); 649 647 struct fence *last_vm_update; 650 648 }; ··· 634 676 struct amdgpu_sync *sync, 635 677 struct reservation_object *resv, 636 678 void *owner); 637 - int amdgpu_sync_rings(struct amdgpu_sync *sync, 638 - struct amdgpu_ring *ring); 639 679 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 640 680 int amdgpu_sync_wait(struct amdgpu_sync *sync); 641 - void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, 642 - struct fence *fence); 681 + void amdgpu_sync_free(struct amdgpu_sync *sync); 643 682 644 683 /* 645 684 * GART structures, functions & helpers ··· 754 799 struct fence *excl; 755 800 unsigned shared_count; 756 801 struct fence **shared; 802 + struct fence_cb cb; 757 803 }; 758 804 759 805 ··· 767 811 uint32_t length_dw; 768 812 uint64_t gpu_addr; 769 813 uint32_t *ptr; 770 - struct amdgpu_ring *ring; 771 814 struct amdgpu_fence *fence; 772 815 struct amdgpu_user_fence *user; 816 + bool grabbed_vmid; 773 817 struct amdgpu_vm *vm; 774 818 struct amdgpu_ctx *ctx; 775 - struct amdgpu_sync sync; 776 819 uint32_t gds_base, gds_size; 777 820 uint32_t gws_base, gws_size; 778 821 uint32_t oa_base, oa_size; ··· 790 835 791 836 extern struct amd_sched_backend_ops amdgpu_sched_ops; 792 837 793 - int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, 794 - struct amdgpu_ring *ring, 795 - struct amdgpu_ib *ibs, 796 - unsigned num_ibs, 797 - int (*free_job)(struct amdgpu_job *), 798 - void *owner, 799 - struct fence **fence); 838 + int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 839 + struct amdgpu_job **job); 840 + int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 841 + struct amdgpu_job **job); 842 + void amdgpu_job_free(struct amdgpu_job *job); 843 + int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 844 + struct amd_sched_entity *entity, void *owner, 845 + struct fence **f); 800 846 801 847 struct amdgpu_ring { 802 848 struct amdgpu_device *adev; ··· 806 850 struct amd_gpu_scheduler sched; 807 851 808 852 spinlock_t fence_lock; 809 - struct mutex *ring_lock; 810 853 struct amdgpu_bo *ring_obj; 811 854 volatile uint32_t *ring; 812 855 unsigned rptr_offs; ··· 814 859 unsigned wptr; 815 860 unsigned wptr_old; 816 861 unsigned ring_size; 817 - unsigned ring_free_dw; 862 + unsigned max_dw; 818 863 int count_dw; 819 864 uint64_t gpu_addr; 820 865 uint32_t align_mask; ··· 822 867 bool ready; 823 868 u32 nop; 824 869 u32 idx; 825 - u64 last_semaphore_signal_addr; 826 - u64 last_semaphore_wait_addr; 827 870 u32 me; 828 871 u32 pipe; 829 872 u32 queue; ··· 834 881 struct amdgpu_ctx *current_ctx; 835 882 enum amdgpu_ring_type type; 836 883 char name[16]; 837 - bool is_pte_ring; 838 884 }; 839 885 840 886 /* ··· 884 932 }; 885 933 886 934 struct amdgpu_vm { 935 + /* tree of virtual addresses mapped */ 936 + spinlock_t it_lock; 887 937 struct rb_root va; 888 938 889 939 /* protecting invalidated */ ··· 910 956 911 957 /* for id and flush management per ring */ 912 958 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; 913 - /* for interval tree */ 914 - spinlock_t it_lock; 959 + 915 960 /* protecting freed */ 916 961 spinlock_t freed_lock; 962 + 963 + /* Scheduler entity for page table updates */ 964 + struct amd_sched_entity entity; 965 + }; 966 + 967 + struct amdgpu_vm_manager_id { 968 + struct list_head list; 969 + struct fence *active; 970 + atomic_long_t owner; 917 971 }; 918 972 919 973 struct amdgpu_vm_manager { 920 - struct { 921 - struct fence *active; 922 - atomic_long_t owner; 923 - } ids[AMDGPU_NUM_VM]; 974 + /* Handling of VMIDs */ 975 + struct mutex lock; 976 + unsigned num_ids; 977 + struct list_head ids_lru; 978 + struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM]; 924 979 925 980 uint32_t max_pfn; 926 - /* number of VMIDs */ 927 - unsigned nvm; 928 981 /* vram base address for page table entry */ 929 982 u64 vram_base_offset; 930 983 /* is vm enabled? */ 931 984 bool enabled; 932 985 /* vm pte handling */ 933 986 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 934 - struct amdgpu_ring *vm_pte_funcs_ring; 987 + struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; 988 + unsigned vm_pte_num_rings; 989 + atomic_t vm_pte_next_ring; 935 990 }; 936 991 992 + void amdgpu_vm_manager_init(struct amdgpu_device *adev); 937 993 void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 938 994 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); 939 995 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); ··· 954 990 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 955 991 struct amdgpu_vm *vm); 956 992 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 957 - struct amdgpu_sync *sync); 993 + struct amdgpu_sync *sync, struct fence *fence); 958 994 void amdgpu_vm_flush(struct amdgpu_ring *ring, 959 995 struct amdgpu_vm *vm, 960 996 struct fence *updates); 961 - void amdgpu_vm_fence(struct amdgpu_device *adev, 962 - struct amdgpu_vm *vm, 963 - struct fence *fence); 964 - uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); 997 + uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 965 998 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 966 999 struct amdgpu_vm *vm); 967 1000 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, ··· 984 1023 uint64_t addr); 985 1024 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 986 1025 struct amdgpu_bo_va *bo_va); 987 - int amdgpu_vm_free_job(struct amdgpu_job *job); 988 1026 989 1027 /* 990 1028 * context related structures ··· 1010 1050 /* protected by lock */ 1011 1051 struct idr ctx_handles; 1012 1052 }; 1013 - 1014 - int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, 1015 - struct amdgpu_ctx *ctx); 1016 - void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); 1017 1053 1018 1054 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); 1019 1055 int amdgpu_ctx_put(struct amdgpu_ctx *ctx); ··· 1052 1096 1053 1097 struct amdgpu_bo_list * 1054 1098 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); 1099 + void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, 1100 + struct list_head *validated); 1055 1101 void amdgpu_bo_list_put(struct amdgpu_bo_list *list); 1056 1102 void amdgpu_bo_list_free(struct amdgpu_bo_list *list); 1057 1103 ··· 1127 1169 unsigned multi_gpu_tile_size; 1128 1170 unsigned mc_arb_ramcfg; 1129 1171 unsigned gb_addr_config; 1172 + unsigned num_rbs; 1130 1173 1131 1174 uint32_t tile_mode_array[32]; 1132 1175 uint32_t macrotile_mode_array[16]; ··· 1170 1211 unsigned ce_ram_size; 1171 1212 }; 1172 1213 1173 - int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, 1214 + int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1174 1215 unsigned size, struct amdgpu_ib *ib); 1175 1216 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); 1176 - int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, 1177 - struct amdgpu_ib *ib, void *owner); 1217 + int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 1218 + struct amdgpu_ib *ib, void *owner, 1219 + struct fence *last_vm_update, 1220 + struct fence **f); 1178 1221 int amdgpu_ib_pool_init(struct amdgpu_device *adev); 1179 1222 void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 1180 1223 int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 1181 - /* Ring access between begin & end cannot sleep */ 1182 - void amdgpu_ring_free_size(struct amdgpu_ring *ring); 1183 1224 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); 1184 - int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); 1185 1225 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); 1226 + void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 1186 1227 void amdgpu_ring_commit(struct amdgpu_ring *ring); 1187 - void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); 1188 1228 void amdgpu_ring_undo(struct amdgpu_ring *ring); 1189 - void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); 1190 1229 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, 1191 1230 uint32_t **data); 1192 1231 int amdgpu_ring_restore(struct amdgpu_ring *ring, ··· 1203 1246 uint32_t chunk_id; 1204 1247 uint32_t length_dw; 1205 1248 uint32_t *kdata; 1206 - void __user *user_ptr; 1207 1249 }; 1208 1250 1209 1251 struct amdgpu_cs_parser { 1210 1252 struct amdgpu_device *adev; 1211 1253 struct drm_file *filp; 1212 1254 struct amdgpu_ctx *ctx; 1213 - struct amdgpu_bo_list *bo_list; 1255 + 1214 1256 /* chunks */ 1215 1257 unsigned nchunks; 1216 1258 struct amdgpu_cs_chunk *chunks; 1217 - /* relocations */ 1259 + 1260 + /* scheduler job object */ 1261 + struct amdgpu_job *job; 1262 + 1263 + /* buffer objects */ 1264 + struct ww_acquire_ctx ticket; 1265 + struct amdgpu_bo_list *bo_list; 1218 1266 struct amdgpu_bo_list_entry vm_pd; 1219 - struct list_head validated; 1220 - struct fence *fence; 1221 - 1222 - struct amdgpu_ib *ibs; 1223 - uint32_t num_ibs; 1224 - 1225 - struct ww_acquire_ctx ticket; 1267 + struct list_head validated; 1268 + struct fence *fence; 1269 + uint64_t bytes_moved_threshold; 1270 + uint64_t bytes_moved; 1226 1271 1227 1272 /* user fence */ 1228 - struct amdgpu_user_fence uf; 1229 1273 struct amdgpu_bo_list_entry uf_entry; 1230 1274 }; 1231 1275 1232 1276 struct amdgpu_job { 1233 1277 struct amd_sched_job base; 1234 1278 struct amdgpu_device *adev; 1279 + struct amdgpu_ring *ring; 1280 + struct amdgpu_sync sync; 1235 1281 struct amdgpu_ib *ibs; 1236 1282 uint32_t num_ibs; 1237 1283 void *owner; 1238 1284 struct amdgpu_user_fence uf; 1239 - int (*free_job)(struct amdgpu_job *job); 1240 1285 }; 1241 1286 #define to_amdgpu_job(sched_job) \ 1242 1287 container_of((sched_job), struct amdgpu_job, base) 1243 1288 1244 - static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) 1289 + static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, 1290 + uint32_t ib_idx, int idx) 1245 1291 { 1246 - return p->ibs[ib_idx].ptr[idx]; 1292 + return p->job->ibs[ib_idx].ptr[idx]; 1293 + } 1294 + 1295 + static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, 1296 + uint32_t ib_idx, int idx, 1297 + uint32_t value) 1298 + { 1299 + p->job->ibs[ib_idx].ptr[idx] = value; 1247 1300 } 1248 1301 1249 1302 /* ··· 1505 1538 AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, 1506 1539 AMDGPU_DPM_FORCED_LEVEL_LOW = 1, 1507 1540 AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, 1541 + AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, 1508 1542 }; 1509 1543 1510 1544 struct amdgpu_vce_state { ··· 1635 1667 struct amdgpu_ring ring; 1636 1668 struct amdgpu_irq_src irq; 1637 1669 bool address_64_bit; 1670 + struct amd_sched_entity entity; 1638 1671 }; 1639 1672 1640 1673 /* ··· 1660 1691 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1661 1692 struct amdgpu_irq_src irq; 1662 1693 unsigned harvest_config; 1694 + struct amd_sched_entity entity; 1663 1695 }; 1664 1696 1665 1697 /* ··· 1895 1925 1896 1926 1897 1927 /* 1928 + * CGS 1929 + */ 1930 + void *amdgpu_cgs_create_device(struct amdgpu_device *adev); 1931 + void amdgpu_cgs_destroy_device(void *cgs_device); 1932 + 1933 + 1934 + /* GPU virtualization */ 1935 + struct amdgpu_virtualization { 1936 + bool supports_sr_iov; 1937 + }; 1938 + 1939 + /* 1898 1940 * Core structure, functions and helpers. 1899 1941 */ 1900 1942 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); ··· 1925 1943 struct device *dev; 1926 1944 struct drm_device *ddev; 1927 1945 struct pci_dev *pdev; 1946 + 1947 + #ifdef CONFIG_DRM_AMD_ACP 1948 + struct amdgpu_acp acp; 1949 + #endif 1928 1950 1929 1951 /* ASIC */ 1930 1952 enum amd_asic_type asic_type; ··· 2006 2020 2007 2021 /* memory management */ 2008 2022 struct amdgpu_mman mman; 2009 - struct amdgpu_gem gem; 2010 2023 struct amdgpu_vram_scratch vram_scratch; 2011 2024 struct amdgpu_wb wb; 2012 2025 atomic64_t vram_usage; ··· 2023 2038 2024 2039 /* rings */ 2025 2040 unsigned fence_context; 2026 - struct mutex ring_lock; 2027 2041 unsigned num_rings; 2028 2042 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 2029 2043 bool ib_pool_ready; ··· 2034 2050 /* powerplay */ 2035 2051 struct amd_powerplay powerplay; 2036 2052 bool pp_enabled; 2053 + bool pp_force_state_enabled; 2037 2054 2038 2055 /* dpm */ 2039 2056 struct amdgpu_pm pm; ··· 2076 2091 /* amdkfd interface */ 2077 2092 struct kfd_dev *kfd; 2078 2093 2079 - /* kernel conext for IB submission */ 2080 - struct amdgpu_ctx kernel_ctx; 2094 + struct amdgpu_virtualization virtualization; 2081 2095 }; 2082 2096 2083 2097 bool amdgpu_device_is_px(struct drm_device *dev); ··· 2181 2197 ring->ring[ring->wptr++] = v; 2182 2198 ring->wptr &= ring->ptr_mask; 2183 2199 ring->count_dw--; 2184 - ring->ring_free_dw--; 2185 2200 } 2186 2201 2187 2202 static inline struct amdgpu_sdma_instance * ··· 2216 2233 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 2217 2234 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) 2218 2235 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 2219 - #define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags))) 2236 + #define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) 2220 2237 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 2221 - #define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib))) 2222 2238 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 2223 2239 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 2224 2240 #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) ··· 2227 2245 #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) 2228 2246 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 2229 2247 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) 2230 - #define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait)) 2231 2248 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) 2232 2249 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) 2250 + #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 2233 2251 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) 2234 2252 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) 2235 2253 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) ··· 2321 2339 #define amdgpu_dpm_get_performance_level(adev) \ 2322 2340 (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) 2323 2341 2342 + #define amdgpu_dpm_get_pp_num_states(adev, data) \ 2343 + (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) 2344 + 2345 + #define amdgpu_dpm_get_pp_table(adev, table) \ 2346 + (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) 2347 + 2348 + #define amdgpu_dpm_set_pp_table(adev, buf, size) \ 2349 + (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) 2350 + 2351 + #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ 2352 + (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) 2353 + 2354 + #define amdgpu_dpm_force_clock_level(adev, type, level) \ 2355 + (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) 2356 + 2324 2357 #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ 2325 2358 (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) 2326 2359 ··· 2346 2349 void amdgpu_pci_config_reset(struct amdgpu_device *adev); 2347 2350 bool amdgpu_card_posted(struct amdgpu_device *adev); 2348 2351 void amdgpu_update_display_priority(struct amdgpu_device *adev); 2349 - bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); 2350 2352 2351 2353 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2352 2354 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, ··· 2355 2359 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 2356 2360 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 2357 2361 uint32_t flags); 2358 - bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 2362 + struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 2363 + bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 2364 + unsigned long end); 2359 2365 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 2360 2366 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 2361 2367 struct ttm_mem_reg *mem);
+502
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
··· 1 + /* 2 + * Copyright 2015 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: AMD 23 + * 24 + */ 25 + 26 + #include <linux/irqdomain.h> 27 + #include <linux/pm_domain.h> 28 + #include <linux/platform_device.h> 29 + #include <sound/designware_i2s.h> 30 + #include <sound/pcm.h> 31 + 32 + #include "amdgpu.h" 33 + #include "atom.h" 34 + #include "amdgpu_acp.h" 35 + 36 + #include "acp_gfx_if.h" 37 + 38 + #define ACP_TILE_ON_MASK 0x03 39 + #define ACP_TILE_OFF_MASK 0x02 40 + #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f 41 + #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 42 + 43 + #define ACP_TILE_P1_MASK 0x3e 44 + #define ACP_TILE_P2_MASK 0x3d 45 + #define ACP_TILE_DSP0_MASK 0x3b 46 + #define ACP_TILE_DSP1_MASK 0x37 47 + 48 + #define ACP_TILE_DSP2_MASK 0x2f 49 + 50 + #define ACP_DMA_REGS_END 0x146c0 51 + #define ACP_I2S_PLAY_REGS_START 0x14840 52 + #define ACP_I2S_PLAY_REGS_END 0x148b4 53 + #define ACP_I2S_CAP_REGS_START 0x148b8 54 + #define ACP_I2S_CAP_REGS_END 0x1496c 55 + 56 + #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac 57 + #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 58 + #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c 59 + #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 60 + 61 + #define mmACP_PGFSM_RETAIN_REG 0x51c9 62 + #define mmACP_PGFSM_CONFIG_REG 0x51ca 63 + #define mmACP_PGFSM_READ_REG_0 0x51cc 64 + 65 + #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8 66 + #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9 67 + #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa 68 + #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb 69 + 70 + #define ACP_TIMEOUT_LOOP 0x000000FF 71 + #define ACP_DEVS 3 72 + #define ACP_SRC_ID 162 73 + 74 + enum { 75 + ACP_TILE_P1 = 0, 76 + ACP_TILE_P2, 77 + ACP_TILE_DSP0, 78 + ACP_TILE_DSP1, 79 + ACP_TILE_DSP2, 80 + }; 81 + 82 + static int acp_sw_init(void *handle) 83 + { 84 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 85 + 86 + adev->acp.parent = adev->dev; 87 + 88 + adev->acp.cgs_device = 89 + amdgpu_cgs_create_device(adev); 90 + if (!adev->acp.cgs_device) 91 + return -EINVAL; 92 + 93 + return 0; 94 + } 95 + 96 + static int acp_sw_fini(void *handle) 97 + { 98 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 99 + 100 + if (adev->acp.cgs_device) 101 + amdgpu_cgs_destroy_device(adev->acp.cgs_device); 102 + 103 + return 0; 104 + } 105 + 106 + /* power off a tile/block within ACP */ 107 + static int acp_suspend_tile(void *cgs_dev, int tile) 108 + { 109 + u32 val = 0; 110 + u32 count = 0; 111 + 112 + if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { 113 + pr_err("Invalid ACP tile : %d to suspend\n", tile); 114 + return -1; 115 + } 116 + 117 + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); 118 + val &= ACP_TILE_ON_MASK; 119 + 120 + if (val == 0x0) { 121 + val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); 122 + val = val | (1 << tile); 123 + cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); 124 + cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, 125 + 0x500 + tile); 126 + 127 + count = ACP_TIMEOUT_LOOP; 128 + while (true) { 129 + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 130 + + tile); 131 + val = val & ACP_TILE_ON_MASK; 132 + if (val == ACP_TILE_OFF_MASK) 133 + break; 134 + if (--count == 0) { 135 + pr_err("Timeout reading ACP PGFSM status\n"); 136 + return -ETIMEDOUT; 137 + } 138 + udelay(100); 139 + } 140 + 141 + val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); 142 + 143 + val |= ACP_TILE_OFF_RETAIN_REG_MASK; 144 + cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); 145 + } 146 + return 0; 147 + } 148 + 149 + /* power on a tile/block within ACP */ 150 + static int acp_resume_tile(void *cgs_dev, int tile) 151 + { 152 + u32 val = 0; 153 + u32 count = 0; 154 + 155 + if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { 156 + pr_err("Invalid ACP tile to resume\n"); 157 + return -1; 158 + } 159 + 160 + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); 161 + val = val & ACP_TILE_ON_MASK; 162 + 163 + if (val != 0x0) { 164 + cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, 165 + 0x600 + tile); 166 + count = ACP_TIMEOUT_LOOP; 167 + while (true) { 168 + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 169 + + tile); 170 + val = val & ACP_TILE_ON_MASK; 171 + if (val == 0x0) 172 + break; 173 + if (--count == 0) { 174 + pr_err("Timeout reading ACP PGFSM status\n"); 175 + return -ETIMEDOUT; 176 + } 177 + udelay(100); 178 + } 179 + val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); 180 + if (tile == ACP_TILE_P1) 181 + val = val & (ACP_TILE_P1_MASK); 182 + else if (tile == ACP_TILE_P2) 183 + val = val & (ACP_TILE_P2_MASK); 184 + 185 + cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); 186 + } 187 + return 0; 188 + } 189 + 190 + struct acp_pm_domain { 191 + void *cgs_dev; 192 + struct generic_pm_domain gpd; 193 + }; 194 + 195 + static int acp_poweroff(struct generic_pm_domain *genpd) 196 + { 197 + int i, ret; 198 + struct acp_pm_domain *apd; 199 + 200 + apd = container_of(genpd, struct acp_pm_domain, gpd); 201 + if (apd != NULL) { 202 + /* Donot return abruptly if any of power tile fails to suspend. 203 + * Log it and continue powering off other tile 204 + */ 205 + for (i = 4; i >= 0 ; i--) { 206 + ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); 207 + if (ret) 208 + pr_err("ACP tile %d tile suspend failed\n", i); 209 + } 210 + } 211 + return 0; 212 + } 213 + 214 + static int acp_poweron(struct generic_pm_domain *genpd) 215 + { 216 + int i, ret; 217 + struct acp_pm_domain *apd; 218 + 219 + apd = container_of(genpd, struct acp_pm_domain, gpd); 220 + if (apd != NULL) { 221 + for (i = 0; i < 2; i++) { 222 + ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i); 223 + if (ret) { 224 + pr_err("ACP tile %d resume failed\n", i); 225 + break; 226 + } 227 + } 228 + 229 + /* Disable DSPs which are not going to be used */ 230 + for (i = 0; i < 3; i++) { 231 + ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i); 232 + /* Continue suspending other DSP, even if one fails */ 233 + if (ret) 234 + pr_err("ACP DSP %d suspend failed\n", i); 235 + } 236 + } 237 + return 0; 238 + } 239 + 240 + static struct device *get_mfd_cell_dev(const char *device_name, int r) 241 + { 242 + char auto_dev_name[25]; 243 + char buf[8]; 244 + struct device *dev; 245 + 246 + sprintf(buf, ".%d.auto", r); 247 + strcpy(auto_dev_name, device_name); 248 + strcat(auto_dev_name, buf); 249 + dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name); 250 + dev_info(dev, "device %s added to pm domain\n", auto_dev_name); 251 + 252 + return dev; 253 + } 254 + 255 + /** 256 + * acp_hw_init - start and test ACP block 257 + * 258 + * @adev: amdgpu_device pointer 259 + * 260 + */ 261 + static int acp_hw_init(void *handle) 262 + { 263 + int r, i; 264 + uint64_t acp_base; 265 + struct device *dev; 266 + struct i2s_platform_data *i2s_pdata; 267 + 268 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 269 + 270 + const struct amdgpu_ip_block_version *ip_version = 271 + amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); 272 + 273 + if (!ip_version) 274 + return -EINVAL; 275 + 276 + r = amd_acp_hw_init(adev->acp.cgs_device, 277 + ip_version->major, ip_version->minor); 278 + /* -ENODEV means board uses AZ rather than ACP */ 279 + if (r == -ENODEV) 280 + return 0; 281 + else if (r) 282 + return r; 283 + 284 + r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO, 285 + 0x5289, 0, &acp_base); 286 + if (r == -ENODEV) 287 + return 0; 288 + else if (r) 289 + return r; 290 + 291 + adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); 292 + if (adev->acp.acp_genpd == NULL) 293 + return -ENOMEM; 294 + 295 + adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; 296 + adev->acp.acp_genpd->gpd.power_off = acp_poweroff; 297 + adev->acp.acp_genpd->gpd.power_on = acp_poweron; 298 + 299 + 300 + adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; 301 + 302 + pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); 303 + 304 + adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS, 305 + GFP_KERNEL); 306 + 307 + if (adev->acp.acp_cell == NULL) 308 + return -ENOMEM; 309 + 310 + adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL); 311 + 312 + if (adev->acp.acp_res == NULL) { 313 + kfree(adev->acp.acp_cell); 314 + return -ENOMEM; 315 + } 316 + 317 + i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL); 318 + if (i2s_pdata == NULL) { 319 + kfree(adev->acp.acp_res); 320 + kfree(adev->acp.acp_cell); 321 + return -ENOMEM; 322 + } 323 + 324 + i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; 325 + i2s_pdata[0].cap = DWC_I2S_PLAY; 326 + i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; 327 + i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; 328 + i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; 329 + 330 + i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | 331 + DW_I2S_QUIRK_COMP_PARAM1; 332 + i2s_pdata[1].cap = DWC_I2S_RECORD; 333 + i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; 334 + i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; 335 + i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; 336 + 337 + adev->acp.acp_res[0].name = "acp2x_dma"; 338 + adev->acp.acp_res[0].flags = IORESOURCE_MEM; 339 + adev->acp.acp_res[0].start = acp_base; 340 + adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END; 341 + 342 + adev->acp.acp_res[1].name = "acp2x_dw_i2s_play"; 343 + adev->acp.acp_res[1].flags = IORESOURCE_MEM; 344 + adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START; 345 + adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END; 346 + 347 + adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap"; 348 + adev->acp.acp_res[2].flags = IORESOURCE_MEM; 349 + adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; 350 + adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; 351 + 352 + adev->acp.acp_res[3].name = "acp2x_dma_irq"; 353 + adev->acp.acp_res[3].flags = IORESOURCE_IRQ; 354 + adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162); 355 + adev->acp.acp_res[3].end = adev->acp.acp_res[3].start; 356 + 357 + adev->acp.acp_cell[0].name = "acp_audio_dma"; 358 + adev->acp.acp_cell[0].num_resources = 4; 359 + adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; 360 + 361 + adev->acp.acp_cell[1].name = "designware-i2s"; 362 + adev->acp.acp_cell[1].num_resources = 1; 363 + adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; 364 + adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; 365 + adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); 366 + 367 + adev->acp.acp_cell[2].name = "designware-i2s"; 368 + adev->acp.acp_cell[2].num_resources = 1; 369 + adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2]; 370 + adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; 371 + adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); 372 + 373 + r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 374 + ACP_DEVS); 375 + if (r) 376 + return r; 377 + 378 + for (i = 0; i < ACP_DEVS ; i++) { 379 + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); 380 + r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); 381 + if (r) { 382 + dev_err(dev, "Failed to add dev to genpd\n"); 383 + return r; 384 + } 385 + } 386 + 387 + return 0; 388 + } 389 + 390 + /** 391 + * acp_hw_fini - stop the hardware block 392 + * 393 + * @adev: amdgpu_device pointer 394 + * 395 + */ 396 + static int acp_hw_fini(void *handle) 397 + { 398 + int i, ret; 399 + struct device *dev; 400 + 401 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 402 + 403 + for (i = 0; i < ACP_DEVS ; i++) { 404 + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); 405 + ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); 406 + /* If removal fails, dont giveup and try rest */ 407 + if (ret) 408 + dev_err(dev, "remove dev from genpd failed\n"); 409 + } 410 + 411 + mfd_remove_devices(adev->acp.parent); 412 + kfree(adev->acp.acp_res); 413 + kfree(adev->acp.acp_genpd); 414 + kfree(adev->acp.acp_cell); 415 + 416 + return 0; 417 + } 418 + 419 + static int acp_suspend(void *handle) 420 + { 421 + return 0; 422 + } 423 + 424 + static int acp_resume(void *handle) 425 + { 426 + int i, ret; 427 + struct acp_pm_domain *apd; 428 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 429 + 430 + /* SMU block will power on ACP irrespective of ACP runtime status. 431 + * Power off explicitly based on genpd ACP runtime status so that ACP 432 + * hw and ACP-genpd status are in sync. 433 + * 'suspend_power_off' represents "Power status before system suspend" 434 + */ 435 + if (adev->acp.acp_genpd->gpd.suspend_power_off == true) { 436 + apd = container_of(&adev->acp.acp_genpd->gpd, 437 + struct acp_pm_domain, gpd); 438 + 439 + for (i = 4; i >= 0 ; i--) { 440 + ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); 441 + if (ret) 442 + pr_err("ACP tile %d tile suspend failed\n", i); 443 + } 444 + } 445 + return 0; 446 + } 447 + 448 + static int acp_early_init(void *handle) 449 + { 450 + return 0; 451 + } 452 + 453 + static bool acp_is_idle(void *handle) 454 + { 455 + return true; 456 + } 457 + 458 + static int acp_wait_for_idle(void *handle) 459 + { 460 + return 0; 461 + } 462 + 463 + static int acp_soft_reset(void *handle) 464 + { 465 + return 0; 466 + } 467 + 468 + static void acp_print_status(void *handle) 469 + { 470 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 471 + 472 + dev_info(adev->dev, "ACP STATUS\n"); 473 + } 474 + 475 + static int acp_set_clockgating_state(void *handle, 476 + enum amd_clockgating_state state) 477 + { 478 + return 0; 479 + } 480 + 481 + static int acp_set_powergating_state(void *handle, 482 + enum amd_powergating_state state) 483 + { 484 + return 0; 485 + } 486 + 487 + const struct amd_ip_funcs acp_ip_funcs = { 488 + .early_init = acp_early_init, 489 + .late_init = NULL, 490 + .sw_init = acp_sw_init, 491 + .sw_fini = acp_sw_fini, 492 + .hw_init = acp_hw_init, 493 + .hw_fini = acp_hw_fini, 494 + .suspend = acp_suspend, 495 + .resume = acp_resume, 496 + .is_idle = acp_is_idle, 497 + .wait_for_idle = acp_wait_for_idle, 498 + .soft_reset = acp_soft_reset, 499 + .print_status = acp_print_status, 500 + .set_clockgating_state = acp_set_clockgating_state, 501 + .set_powergating_state = acp_set_powergating_state, 502 + };
+42
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
··· 1 + /* 2 + * Copyright 2015 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: AMD 23 + * 24 + */ 25 + 26 + #ifndef __AMDGPU_ACP_H__ 27 + #define __AMDGPU_ACP_H__ 28 + 29 + #include <linux/mfd/core.h> 30 + 31 + struct amdgpu_acp { 32 + struct device *parent; 33 + void *cgs_device; 34 + struct amd_acp_private *private; 35 + struct mfd_cell *acp_cell; 36 + struct resource *acp_res; 37 + struct acp_pm_domain *acp_genpd; 38 + }; 39 + 40 + extern const struct amd_ip_funcs acp_ip_funcs; 41 + 42 + #endif /* __AMDGPU_ACP_H__ */
+13
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
··· 1514 1514 return -EINVAL; 1515 1515 } 1516 1516 1517 + bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev) 1518 + { 1519 + int index = GetIndexIntoMasterTable(DATA, GPUVirtualizationInfo); 1520 + u8 frev, crev; 1521 + u16 data_offset, size; 1522 + 1523 + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, 1524 + &frev, &crev, &data_offset)) 1525 + return true; 1526 + 1527 + return false; 1528 + } 1529 + 1517 1530 void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) 1518 1531 { 1519 1532 uint32_t bios_6_scratch;
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
··· 196 196 u8 module_index, 197 197 struct atom_mc_reg_table *reg_table); 198 198 199 + bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev); 200 + 199 201 void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); 200 202 void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev); 201 203 void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
+50 -13
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
··· 32 32 #include "amdgpu.h" 33 33 #include "amdgpu_trace.h" 34 34 35 + #define AMDGPU_BO_LIST_MAX_PRIORITY 32u 36 + #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) 37 + 35 38 static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv, 36 39 struct amdgpu_bo_list **result, 37 40 int *id) ··· 93 90 94 91 bool has_userptr = false; 95 92 unsigned i; 93 + int r; 96 94 97 95 array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); 98 96 if (!array) ··· 103 99 for (i = 0; i < num_entries; ++i) { 104 100 struct amdgpu_bo_list_entry *entry = &array[i]; 105 101 struct drm_gem_object *gobj; 102 + struct mm_struct *usermm; 106 103 107 104 gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle); 108 - if (!gobj) 105 + if (!gobj) { 106 + r = -ENOENT; 109 107 goto error_free; 108 + } 110 109 111 110 entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 112 111 drm_gem_object_unreference_unlocked(gobj); 113 - entry->priority = info[i].bo_priority; 114 - entry->prefered_domains = entry->robj->initial_domain; 115 - entry->allowed_domains = entry->prefered_domains; 116 - if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 117 - entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 118 - if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) { 112 + entry->priority = min(info[i].bo_priority, 113 + AMDGPU_BO_LIST_MAX_PRIORITY); 114 + usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm); 115 + if (usermm) { 116 + if (usermm != current->mm) { 117 + r = -EPERM; 118 + goto error_free; 119 + } 119 120 has_userptr = true; 120 - entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; 121 - entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 122 121 } 123 122 entry->tv.bo = &entry->robj->tbo; 124 123 entry->tv.shared = true; 125 124 126 - if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) 125 + if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) 127 126 gds_obj = entry->robj; 128 - if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) 127 + if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) 129 128 gws_obj = entry->robj; 130 - if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA) 129 + if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) 131 130 oa_obj = entry->robj; 132 131 133 132 trace_amdgpu_bo_list_set(list, entry->robj); ··· 152 145 153 146 error_free: 154 147 drm_free_large(array); 155 - return -ENOENT; 148 + return r; 156 149 } 157 150 158 151 struct amdgpu_bo_list * ··· 166 159 mutex_lock(&result->lock); 167 160 mutex_unlock(&fpriv->bo_list_lock); 168 161 return result; 162 + } 163 + 164 + void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, 165 + struct list_head *validated) 166 + { 167 + /* This is based on the bucket sort with O(n) time complexity. 168 + * An item with priority "i" is added to bucket[i]. The lists are then 169 + * concatenated in descending order. 170 + */ 171 + struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; 172 + unsigned i; 173 + 174 + for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) 175 + INIT_LIST_HEAD(&bucket[i]); 176 + 177 + /* Since buffers which appear sooner in the relocation list are 178 + * likely to be used more often than buffers which appear later 179 + * in the list, the sort mustn't change the ordering of buffers 180 + * with the same priority, i.e. it must be stable. 181 + */ 182 + for (i = 0; i < list->num_entries; i++) { 183 + unsigned priority = list->array[i].priority; 184 + 185 + list_add_tail(&list->array[i].tv.head, 186 + &bucket[priority]); 187 + } 188 + 189 + /* Connect the sorted buckets in the output list. */ 190 + for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) 191 + list_splice(&bucket[i], validated); 169 192 } 170 193 171 194 void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
+156 -229
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 30 30 #include "amdgpu.h" 31 31 #include "amdgpu_trace.h" 32 32 33 - #define AMDGPU_CS_MAX_PRIORITY 32u 34 - #define AMDGPU_CS_NUM_BUCKETS (AMDGPU_CS_MAX_PRIORITY + 1) 35 - 36 - /* This is based on the bucket sort with O(n) time complexity. 37 - * An item with priority "i" is added to bucket[i]. The lists are then 38 - * concatenated in descending order. 39 - */ 40 - struct amdgpu_cs_buckets { 41 - struct list_head bucket[AMDGPU_CS_NUM_BUCKETS]; 42 - }; 43 - 44 - static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b) 45 - { 46 - unsigned i; 47 - 48 - for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) 49 - INIT_LIST_HEAD(&b->bucket[i]); 50 - } 51 - 52 - static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b, 53 - struct list_head *item, unsigned priority) 54 - { 55 - /* Since buffers which appear sooner in the relocation list are 56 - * likely to be used more often than buffers which appear later 57 - * in the list, the sort mustn't change the ordering of buffers 58 - * with the same priority, i.e. it must be stable. 59 - */ 60 - list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]); 61 - } 62 - 63 - static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b, 64 - struct list_head *out_list) 65 - { 66 - unsigned i; 67 - 68 - /* Connect the sorted buckets in the output list. */ 69 - for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) { 70 - list_splice(&b->bucket[i], out_list); 71 - } 72 - } 73 - 74 33 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 75 34 u32 ip_instance, u32 ring, 76 35 struct amdgpu_ring **out_ring) ··· 87 128 } 88 129 89 130 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, 131 + struct amdgpu_user_fence *uf, 90 132 struct drm_amdgpu_cs_chunk_fence *fence_data) 91 133 { 92 134 struct drm_gem_object *gobj; ··· 99 139 if (gobj == NULL) 100 140 return -EINVAL; 101 141 102 - p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 103 - p->uf.offset = fence_data->offset; 142 + uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 143 + uf->offset = fence_data->offset; 104 144 105 - if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { 145 + if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) { 106 146 drm_gem_object_unreference_unlocked(gobj); 107 147 return -EINVAL; 108 148 } 109 149 110 - p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); 111 - p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; 112 - p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 150 + p->uf_entry.robj = amdgpu_bo_ref(uf->bo); 113 151 p->uf_entry.priority = 0; 114 152 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; 115 153 p->uf_entry.tv.shared = true; ··· 118 160 119 161 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 120 162 { 163 + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 121 164 union drm_amdgpu_cs *cs = data; 122 165 uint64_t *chunk_array_user; 123 166 uint64_t *chunk_array; 124 - struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 125 - unsigned size; 167 + struct amdgpu_user_fence uf = {}; 168 + unsigned size, num_ibs = 0; 126 169 int i; 127 170 int ret; 128 171 ··· 140 181 goto free_chunk; 141 182 } 142 183 143 - p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 144 - 145 184 /* get chunks */ 146 - INIT_LIST_HEAD(&p->validated); 147 185 chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); 148 186 if (copy_from_user(chunk_array, chunk_array_user, 149 187 sizeof(uint64_t)*cs->in.num_chunks)) { 150 188 ret = -EFAULT; 151 - goto put_bo_list; 189 + goto put_ctx; 152 190 } 153 191 154 192 p->nchunks = cs->in.num_chunks; ··· 153 197 GFP_KERNEL); 154 198 if (!p->chunks) { 155 199 ret = -ENOMEM; 156 - goto put_bo_list; 200 + goto put_ctx; 157 201 } 158 202 159 203 for (i = 0; i < p->nchunks; i++) { ··· 173 217 174 218 size = p->chunks[i].length_dw; 175 219 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 176 - p->chunks[i].user_ptr = cdata; 177 220 178 221 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 179 222 if (p->chunks[i].kdata == NULL) { ··· 188 233 189 234 switch (p->chunks[i].chunk_id) { 190 235 case AMDGPU_CHUNK_ID_IB: 191 - p->num_ibs++; 236 + ++num_ibs; 192 237 break; 193 238 194 239 case AMDGPU_CHUNK_ID_FENCE: ··· 198 243 goto free_partial_kdata; 199 244 } 200 245 201 - ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); 246 + ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata); 202 247 if (ret) 203 248 goto free_partial_kdata; 204 249 ··· 213 258 } 214 259 } 215 260 216 - 217 - p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); 218 - if (!p->ibs) { 219 - ret = -ENOMEM; 261 + ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job); 262 + if (ret) 220 263 goto free_all_kdata; 221 - } 264 + 265 + p->job->uf = uf; 222 266 223 267 kfree(chunk_array); 224 268 return 0; ··· 228 274 for (; i >= 0; i--) 229 275 drm_free_large(p->chunks[i].kdata); 230 276 kfree(p->chunks); 231 - put_bo_list: 232 - if (p->bo_list) 233 - amdgpu_bo_list_put(p->bo_list); 277 + put_ctx: 234 278 amdgpu_ctx_put(p->ctx); 235 279 free_chunk: 236 280 kfree(chunk_array); ··· 288 336 return max(bytes_moved_threshold, 1024*1024ull); 289 337 } 290 338 291 - int amdgpu_cs_list_validate(struct amdgpu_device *adev, 292 - struct amdgpu_vm *vm, 339 + int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, 293 340 struct list_head *validated) 294 341 { 295 342 struct amdgpu_bo_list_entry *lobj; 296 - struct amdgpu_bo *bo; 297 - u64 bytes_moved = 0, initial_bytes_moved; 298 - u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); 343 + u64 initial_bytes_moved; 299 344 int r; 300 345 301 346 list_for_each_entry(lobj, validated, tv.head) { 302 - bo = lobj->robj; 303 - if (!bo->pin_count) { 304 - u32 domain = lobj->prefered_domains; 305 - u32 current_domain = 306 - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 347 + struct amdgpu_bo *bo = lobj->robj; 348 + struct mm_struct *usermm; 349 + uint32_t domain; 307 350 308 - /* Check if this buffer will be moved and don't move it 309 - * if we have moved too many buffers for this IB already. 310 - * 311 - * Note that this allows moving at least one buffer of 312 - * any size, because it doesn't take the current "bo" 313 - * into account. We don't want to disallow buffer moves 314 - * completely. 315 - */ 316 - if ((lobj->allowed_domains & current_domain) != 0 && 317 - (domain & current_domain) == 0 && /* will be moved */ 318 - bytes_moved > bytes_moved_threshold) { 319 - /* don't move it */ 320 - domain = current_domain; 351 + usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 352 + if (usermm && usermm != current->mm) 353 + return -EPERM; 354 + 355 + if (bo->pin_count) 356 + continue; 357 + 358 + /* Avoid moving this one if we have moved too many buffers 359 + * for this IB already. 360 + * 361 + * Note that this allows moving at least one buffer of 362 + * any size, because it doesn't take the current "bo" 363 + * into account. We don't want to disallow buffer moves 364 + * completely. 365 + */ 366 + if (p->bytes_moved <= p->bytes_moved_threshold) 367 + domain = bo->prefered_domains; 368 + else 369 + domain = bo->allowed_domains; 370 + 371 + retry: 372 + amdgpu_ttm_placement_from_domain(bo, domain); 373 + initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); 374 + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 375 + p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - 376 + initial_bytes_moved; 377 + 378 + if (unlikely(r)) { 379 + if (r != -ERESTARTSYS && domain != bo->allowed_domains) { 380 + domain = bo->allowed_domains; 381 + goto retry; 321 382 } 322 - 323 - retry: 324 - amdgpu_ttm_placement_from_domain(bo, domain); 325 - initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); 326 - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 327 - bytes_moved += atomic64_read(&adev->num_bytes_moved) - 328 - initial_bytes_moved; 329 - 330 - if (unlikely(r)) { 331 - if (r != -ERESTARTSYS && domain != lobj->allowed_domains) { 332 - domain = lobj->allowed_domains; 333 - goto retry; 334 - } 335 - return r; 336 - } 383 + return r; 337 384 } 338 - lobj->bo_va = amdgpu_vm_bo_find(vm, bo); 339 385 } 340 386 return 0; 341 387 } 342 388 343 - static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) 389 + static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, 390 + union drm_amdgpu_cs *cs) 344 391 { 345 392 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 346 - struct amdgpu_cs_buckets buckets; 347 393 struct list_head duplicates; 348 394 bool need_mmap_lock = false; 349 - int i, r; 395 + int r; 350 396 397 + INIT_LIST_HEAD(&p->validated); 398 + 399 + p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 351 400 if (p->bo_list) { 352 401 need_mmap_lock = p->bo_list->has_userptr; 353 - amdgpu_cs_buckets_init(&buckets); 354 - for (i = 0; i < p->bo_list->num_entries; i++) 355 - amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head, 356 - p->bo_list->array[i].priority); 357 - 358 - amdgpu_cs_buckets_get_list(&buckets, &p->validated); 402 + amdgpu_bo_list_get_list(p->bo_list, &p->validated); 359 403 } 360 404 361 405 INIT_LIST_HEAD(&duplicates); 362 406 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 363 407 364 - if (p->uf.bo) 408 + if (p->job->uf.bo) 365 409 list_add(&p->uf_entry.tv.head, &p->validated); 366 410 367 411 if (need_mmap_lock) ··· 369 421 370 422 amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); 371 423 372 - r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); 424 + p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); 425 + p->bytes_moved = 0; 426 + 427 + r = amdgpu_cs_list_validate(p, &duplicates); 373 428 if (r) 374 429 goto error_validate; 375 430 376 - r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); 431 + r = amdgpu_cs_list_validate(p, &p->validated); 432 + if (r) 433 + goto error_validate; 434 + 435 + if (p->bo_list) { 436 + struct amdgpu_vm *vm = &fpriv->vm; 437 + unsigned i; 438 + 439 + for (i = 0; i < p->bo_list->num_entries; i++) { 440 + struct amdgpu_bo *bo = p->bo_list->array[i].robj; 441 + 442 + p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); 443 + } 444 + } 377 445 378 446 error_validate: 379 447 if (r) { ··· 411 447 412 448 list_for_each_entry(e, &p->validated, tv.head) { 413 449 struct reservation_object *resv = e->robj->tbo.resv; 414 - r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp); 450 + r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp); 415 451 416 452 if (r) 417 453 return r; ··· 474 510 for (i = 0; i < parser->nchunks; i++) 475 511 drm_free_large(parser->chunks[i].kdata); 476 512 kfree(parser->chunks); 477 - if (parser->ibs) 478 - for (i = 0; i < parser->num_ibs; i++) 479 - amdgpu_ib_free(parser->adev, &parser->ibs[i]); 480 - kfree(parser->ibs); 481 - amdgpu_bo_unref(&parser->uf.bo); 513 + if (parser->job) 514 + amdgpu_job_free(parser->job); 482 515 amdgpu_bo_unref(&parser->uf_entry.robj); 483 516 } 484 517 ··· 491 530 if (r) 492 531 return r; 493 532 494 - r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence); 533 + r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence); 495 534 if (r) 496 535 return r; 497 536 ··· 517 556 return r; 518 557 519 558 f = bo_va->last_pt_update; 520 - r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f); 559 + r = amdgpu_sync_fence(adev, &p->job->sync, f); 521 560 if (r) 522 561 return r; 523 562 } 524 563 525 564 } 526 565 527 - r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync); 566 + r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync); 528 567 529 568 if (amdgpu_vm_debug && p->bo_list) { 530 569 /* Invalidate all BOs to test for userspace bugs */ ··· 542 581 } 543 582 544 583 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, 545 - struct amdgpu_cs_parser *parser) 584 + struct amdgpu_cs_parser *p) 546 585 { 547 - struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 586 + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 548 587 struct amdgpu_vm *vm = &fpriv->vm; 549 - struct amdgpu_ring *ring; 588 + struct amdgpu_ring *ring = p->job->ring; 550 589 int i, r; 551 590 552 - if (parser->num_ibs == 0) 553 - return 0; 554 - 555 591 /* Only for UVD/VCE VM emulation */ 556 - for (i = 0; i < parser->num_ibs; i++) { 557 - ring = parser->ibs[i].ring; 558 - if (ring->funcs->parse_cs) { 559 - r = amdgpu_ring_parse_cs(ring, parser, i); 592 + if (ring->funcs->parse_cs) { 593 + for (i = 0; i < p->job->num_ibs; i++) { 594 + r = amdgpu_ring_parse_cs(ring, p, i); 560 595 if (r) 561 596 return r; 562 597 } 563 598 } 564 599 565 - r = amdgpu_bo_vm_update_pte(parser, vm); 600 + r = amdgpu_bo_vm_update_pte(p, vm); 566 601 if (!r) 567 - amdgpu_cs_sync_rings(parser); 602 + amdgpu_cs_sync_rings(p); 568 603 569 604 return r; 570 605 } ··· 583 626 int i, j; 584 627 int r; 585 628 586 - for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) { 629 + for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { 587 630 struct amdgpu_cs_chunk *chunk; 588 631 struct amdgpu_ib *ib; 589 632 struct drm_amdgpu_cs_chunk_ib *chunk_ib; 590 633 struct amdgpu_ring *ring; 591 634 592 635 chunk = &parser->chunks[i]; 593 - ib = &parser->ibs[j]; 636 + ib = &parser->job->ibs[j]; 594 637 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; 595 638 596 639 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) ··· 601 644 &ring); 602 645 if (r) 603 646 return r; 647 + 648 + if (parser->job->ring && parser->job->ring != ring) 649 + return -EINVAL; 650 + 651 + parser->job->ring = ring; 604 652 605 653 if (ring->funcs->parse_cs) { 606 654 struct amdgpu_bo_va_mapping *m; ··· 635 673 offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; 636 674 kptr += chunk_ib->va_start - offset; 637 675 638 - r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib); 676 + r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib); 639 677 if (r) { 640 678 DRM_ERROR("Failed to get ib !\n"); 641 679 return r; ··· 644 682 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); 645 683 amdgpu_bo_kunmap(aobj); 646 684 } else { 647 - r = amdgpu_ib_get(ring, vm, 0, ib); 685 + r = amdgpu_ib_get(adev, vm, 0, ib); 648 686 if (r) { 649 687 DRM_ERROR("Failed to get ib !\n"); 650 688 return r; ··· 659 697 j++; 660 698 } 661 699 662 - if (!parser->num_ibs) 663 - return 0; 664 - 665 700 /* add GDS resources to first IB */ 666 701 if (parser->bo_list) { 667 702 struct amdgpu_bo *gds = parser->bo_list->gds_obj; 668 703 struct amdgpu_bo *gws = parser->bo_list->gws_obj; 669 704 struct amdgpu_bo *oa = parser->bo_list->oa_obj; 670 - struct amdgpu_ib *ib = &parser->ibs[0]; 705 + struct amdgpu_ib *ib = &parser->job->ibs[0]; 671 706 672 707 if (gds) { 673 708 ib->gds_base = amdgpu_bo_gpu_offset(gds); ··· 680 721 } 681 722 } 682 723 /* wrap the last IB with user fence */ 683 - if (parser->uf.bo) { 684 - struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1]; 724 + if (parser->job->uf.bo) { 725 + struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; 685 726 686 727 /* UVD & VCE fw doesn't support user fences */ 687 - if (ib->ring->type == AMDGPU_RING_TYPE_UVD || 688 - ib->ring->type == AMDGPU_RING_TYPE_VCE) 728 + if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD || 729 + parser->job->ring->type == AMDGPU_RING_TYPE_VCE) 689 730 return -EINVAL; 690 731 691 - ib->user = &parser->uf; 732 + ib->user = &parser->job->uf; 692 733 } 693 734 694 735 return 0; ··· 698 739 struct amdgpu_cs_parser *p) 699 740 { 700 741 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 701 - struct amdgpu_ib *ib; 702 742 int i, j, r; 703 743 704 - if (!p->num_ibs) 705 - return 0; 706 - 707 - /* Add dependencies to first IB */ 708 - ib = &p->ibs[0]; 709 744 for (i = 0; i < p->nchunks; ++i) { 710 745 struct drm_amdgpu_cs_chunk_dep *deps; 711 746 struct amdgpu_cs_chunk *chunk; ··· 737 784 return r; 738 785 739 786 } else if (fence) { 740 - r = amdgpu_sync_fence(adev, &ib->sync, fence); 787 + r = amdgpu_sync_fence(adev, &p->job->sync, 788 + fence); 741 789 fence_put(fence); 742 790 amdgpu_ctx_put(ctx); 743 791 if (r) ··· 750 796 return 0; 751 797 } 752 798 753 - static int amdgpu_cs_free_job(struct amdgpu_job *job) 799 + static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 800 + union drm_amdgpu_cs *cs) 754 801 { 755 - int i; 756 - if (job->ibs) 757 - for (i = 0; i < job->num_ibs; i++) 758 - amdgpu_ib_free(job->adev, &job->ibs[i]); 759 - kfree(job->ibs); 760 - if (job->uf.bo) 761 - amdgpu_bo_unref(&job->uf.bo); 802 + struct amdgpu_ring *ring = p->job->ring; 803 + struct amd_sched_fence *fence; 804 + struct amdgpu_job *job; 805 + 806 + job = p->job; 807 + p->job = NULL; 808 + 809 + job->base.sched = &ring->sched; 810 + job->base.s_entity = &p->ctx->rings[ring->idx].entity; 811 + job->owner = p->filp; 812 + 813 + fence = amd_sched_fence_create(job->base.s_entity, p->filp); 814 + if (!fence) { 815 + amdgpu_job_free(job); 816 + return -ENOMEM; 817 + } 818 + 819 + job->base.s_fence = fence; 820 + p->fence = fence_get(&fence->base); 821 + 822 + cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, 823 + &fence->base); 824 + job->ibs[job->num_ibs - 1].sequence = cs->out.handle; 825 + 826 + trace_amdgpu_cs_ioctl(job); 827 + amd_sched_entity_push_job(&job->base); 828 + 762 829 return 0; 763 830 } 764 831 ··· 804 829 r = amdgpu_cs_handle_lockup(adev, r); 805 830 return r; 806 831 } 807 - r = amdgpu_cs_parser_relocs(&parser); 832 + r = amdgpu_cs_parser_bos(&parser, data); 808 833 if (r == -ENOMEM) 809 834 DRM_ERROR("Not enough memory for command submission!\n"); 810 835 else if (r && r != -ERESTARTSYS) ··· 823 848 if (r) 824 849 goto out; 825 850 826 - for (i = 0; i < parser.num_ibs; i++) 851 + for (i = 0; i < parser.job->num_ibs; i++) 827 852 trace_amdgpu_cs(&parser, i); 828 853 829 854 r = amdgpu_cs_ib_vm_chunk(adev, &parser); 830 855 if (r) 831 856 goto out; 832 857 833 - if (amdgpu_enable_scheduler && parser.num_ibs) { 834 - struct amdgpu_ring * ring = parser.ibs->ring; 835 - struct amd_sched_fence *fence; 836 - struct amdgpu_job *job; 837 - 838 - job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 839 - if (!job) { 840 - r = -ENOMEM; 841 - goto out; 842 - } 843 - 844 - job->base.sched = &ring->sched; 845 - job->base.s_entity = &parser.ctx->rings[ring->idx].entity; 846 - job->adev = parser.adev; 847 - job->owner = parser.filp; 848 - job->free_job = amdgpu_cs_free_job; 849 - 850 - job->ibs = parser.ibs; 851 - job->num_ibs = parser.num_ibs; 852 - parser.ibs = NULL; 853 - parser.num_ibs = 0; 854 - 855 - if (job->ibs[job->num_ibs - 1].user) { 856 - job->uf = parser.uf; 857 - job->ibs[job->num_ibs - 1].user = &job->uf; 858 - parser.uf.bo = NULL; 859 - } 860 - 861 - fence = amd_sched_fence_create(job->base.s_entity, 862 - parser.filp); 863 - if (!fence) { 864 - r = -ENOMEM; 865 - amdgpu_cs_free_job(job); 866 - kfree(job); 867 - goto out; 868 - } 869 - job->base.s_fence = fence; 870 - parser.fence = fence_get(&fence->base); 871 - 872 - cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, 873 - &fence->base); 874 - job->ibs[job->num_ibs - 1].sequence = cs->out.handle; 875 - 876 - trace_amdgpu_cs_ioctl(job); 877 - amd_sched_entity_push_job(&job->base); 878 - 879 - } else { 880 - struct amdgpu_fence *fence; 881 - 882 - r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, 883 - parser.filp); 884 - fence = parser.ibs[parser.num_ibs - 1].fence; 885 - parser.fence = fence_get(&fence->base); 886 - cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; 887 - } 858 + r = amdgpu_cs_submit(&parser, cs); 888 859 889 860 out: 890 861 amdgpu_cs_parser_fini(&parser, r, reserved_buffers); ··· 901 980 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 902 981 uint64_t addr, struct amdgpu_bo **bo) 903 982 { 904 - struct amdgpu_bo_list_entry *reloc; 905 983 struct amdgpu_bo_va_mapping *mapping; 984 + unsigned i; 985 + 986 + if (!parser->bo_list) 987 + return NULL; 906 988 907 989 addr /= AMDGPU_GPU_PAGE_SIZE; 908 990 909 - list_for_each_entry(reloc, &parser->validated, tv.head) { 910 - if (!reloc->bo_va) 991 + for (i = 0; i < parser->bo_list->num_entries; i++) { 992 + struct amdgpu_bo_list_entry *lobj; 993 + 994 + lobj = &parser->bo_list->array[i]; 995 + if (!lobj->bo_va) 911 996 continue; 912 997 913 - list_for_each_entry(mapping, &reloc->bo_va->valids, list) { 998 + list_for_each_entry(mapping, &lobj->bo_va->valids, list) { 914 999 if (mapping->it.start > addr || 915 1000 addr > mapping->it.last) 916 1001 continue; 917 1002 918 - *bo = reloc->bo_va->bo; 1003 + *bo = lobj->bo_va->bo; 919 1004 return mapping; 920 1005 } 921 1006 922 - list_for_each_entry(mapping, &reloc->bo_va->invalids, list) { 1007 + list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { 923 1008 if (mapping->it.start > addr || 924 1009 addr > mapping->it.last) 925 1010 continue; 926 1011 927 - *bo = reloc->bo_va->bo; 1012 + *bo = lobj->bo_va->bo; 928 1013 return mapping; 929 1014 } 930 1015 }
+38 -47
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
··· 25 25 #include <drm/drmP.h> 26 26 #include "amdgpu.h" 27 27 28 - int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, 29 - struct amdgpu_ctx *ctx) 28 + static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) 30 29 { 31 30 unsigned i, j; 32 31 int r; ··· 34 35 ctx->adev = adev; 35 36 kref_init(&ctx->refcount); 36 37 spin_lock_init(&ctx->ring_lock); 37 - ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs * 38 - AMDGPU_MAX_RINGS, GFP_KERNEL); 38 + ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, 39 + sizeof(struct fence*), GFP_KERNEL); 39 40 if (!ctx->fences) 40 41 return -ENOMEM; 41 42 42 43 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 43 44 ctx->rings[i].sequence = 1; 44 - ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) * 45 - amdgpu_sched_jobs * i; 45 + ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; 46 46 } 47 - if (amdgpu_enable_scheduler) { 48 - /* create context entity for each ring */ 49 - for (i = 0; i < adev->num_rings; i++) { 50 - struct amd_sched_rq *rq; 51 - if (pri >= AMD_SCHED_MAX_PRIORITY) { 52 - kfree(ctx->fences); 53 - return -EINVAL; 54 - } 55 - rq = &adev->rings[i]->sched.sched_rq[pri]; 56 - r = amd_sched_entity_init(&adev->rings[i]->sched, 57 - &ctx->rings[i].entity, 58 - rq, amdgpu_sched_jobs); 59 - if (r) 60 - break; 61 - } 47 + /* create context entity for each ring */ 48 + for (i = 0; i < adev->num_rings; i++) { 49 + struct amdgpu_ring *ring = adev->rings[i]; 50 + struct amd_sched_rq *rq; 62 51 63 - if (i < adev->num_rings) { 64 - for (j = 0; j < i; j++) 65 - amd_sched_entity_fini(&adev->rings[j]->sched, 66 - &ctx->rings[j].entity); 67 - kfree(ctx->fences); 68 - return r; 69 - } 52 + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; 53 + r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, 54 + rq, amdgpu_sched_jobs); 55 + if (r) 56 + break; 57 + } 58 + 59 + if (i < adev->num_rings) { 60 + for (j = 0; j < i; j++) 61 + amd_sched_entity_fini(&adev->rings[j]->sched, 62 + &ctx->rings[j].entity); 63 + kfree(ctx->fences); 64 + return r; 70 65 } 71 66 return 0; 72 67 } 73 68 74 - void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) 69 + static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) 75 70 { 76 71 struct amdgpu_device *adev = ctx->adev; 77 72 unsigned i, j; ··· 78 85 fence_put(ctx->rings[i].fences[j]); 79 86 kfree(ctx->fences); 80 87 81 - if (amdgpu_enable_scheduler) { 82 - for (i = 0; i < adev->num_rings; i++) 83 - amd_sched_entity_fini(&adev->rings[i]->sched, 84 - &ctx->rings[i].entity); 85 - } 88 + for (i = 0; i < adev->num_rings; i++) 89 + amd_sched_entity_fini(&adev->rings[i]->sched, 90 + &ctx->rings[i].entity); 86 91 } 87 92 88 93 static int amdgpu_ctx_alloc(struct amdgpu_device *adev, ··· 103 112 return r; 104 113 } 105 114 *id = (uint32_t)r; 106 - r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx); 115 + r = amdgpu_ctx_init(adev, ctx); 107 116 if (r) { 108 117 idr_remove(&mgr->ctx_handles, *id); 109 118 *id = 0; ··· 191 200 id = args->in.ctx_id; 192 201 193 202 switch (args->in.op) { 194 - case AMDGPU_CTX_OP_ALLOC_CTX: 195 - r = amdgpu_ctx_alloc(adev, fpriv, &id); 196 - args->out.alloc.ctx_id = id; 197 - break; 198 - case AMDGPU_CTX_OP_FREE_CTX: 199 - r = amdgpu_ctx_free(fpriv, id); 200 - break; 201 - case AMDGPU_CTX_OP_QUERY_STATE: 202 - r = amdgpu_ctx_query(adev, fpriv, id, &args->out); 203 - break; 204 - default: 205 - return -EINVAL; 203 + case AMDGPU_CTX_OP_ALLOC_CTX: 204 + r = amdgpu_ctx_alloc(adev, fpriv, &id); 205 + args->out.alloc.ctx_id = id; 206 + break; 207 + case AMDGPU_CTX_OP_FREE_CTX: 208 + r = amdgpu_ctx_free(fpriv, id); 209 + break; 210 + case AMDGPU_CTX_OP_QUERY_STATE: 211 + r = amdgpu_ctx_query(adev, fpriv, id, &args->out); 212 + break; 213 + default: 214 + return -EINVAL; 206 215 } 207 216 208 217 return r;
+11 -41
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 636 636 } 637 637 638 638 /** 639 - * amdgpu_boot_test_post_card - check and possibly initialize the hw 640 - * 641 - * @adev: amdgpu_device pointer 642 - * 643 - * Check if the asic is initialized and if not, attempt to initialize 644 - * it (all asics). 645 - * Returns true if initialized or false if not. 646 - */ 647 - bool amdgpu_boot_test_post_card(struct amdgpu_device *adev) 648 - { 649 - if (amdgpu_card_posted(adev)) 650 - return true; 651 - 652 - if (adev->bios) { 653 - DRM_INFO("GPU not posted. posting now...\n"); 654 - if (adev->is_atom_bios) 655 - amdgpu_atom_asic_init(adev->mode_info.atom_context); 656 - return true; 657 - } else { 658 - dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); 659 - return false; 660 - } 661 - } 662 - 663 - /** 664 639 * amdgpu_dummy_page_init - init dummy page used by the driver 665 640 * 666 641 * @adev: amdgpu_device pointer ··· 933 958 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 934 959 amdgpu_sched_jobs); 935 960 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 936 - } 937 - /* vramlimit must be a power of two */ 938 - if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) { 939 - dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n", 940 - amdgpu_vram_limit); 941 - amdgpu_vram_limit = 0; 942 961 } 943 962 944 963 if (amdgpu_gart_size != -1) { ··· 1403 1434 adev->mman.buffer_funcs = NULL; 1404 1435 adev->mman.buffer_funcs_ring = NULL; 1405 1436 adev->vm_manager.vm_pte_funcs = NULL; 1406 - adev->vm_manager.vm_pte_funcs_ring = NULL; 1437 + adev->vm_manager.vm_pte_num_rings = 0; 1407 1438 adev->gart.gart_funcs = NULL; 1408 1439 adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); 1409 1440 ··· 1424 1455 1425 1456 /* mutex initialization are all done here so we 1426 1457 * can recall function without having locking issues */ 1427 - mutex_init(&adev->ring_lock); 1458 + mutex_init(&adev->vm_manager.lock); 1428 1459 atomic_set(&adev->irq.ih.lock, 0); 1429 - mutex_init(&adev->gem.mutex); 1430 1460 mutex_init(&adev->pm.mutex); 1431 1461 mutex_init(&adev->gfx.gpu_clock_mutex); 1432 1462 mutex_init(&adev->srbm_mutex); ··· 1499 1531 return r; 1500 1532 } 1501 1533 1534 + /* See if the asic supports SR-IOV */ 1535 + adev->virtualization.supports_sr_iov = 1536 + amdgpu_atombios_has_gpu_virtualization_table(adev); 1537 + 1502 1538 /* Post card if necessary */ 1503 - if (!amdgpu_card_posted(adev)) { 1539 + if (!amdgpu_card_posted(adev) || 1540 + adev->virtualization.supports_sr_iov) { 1504 1541 if (!adev->bios) { 1505 1542 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); 1506 1543 return -EINVAL; ··· 1550 1577 return r; 1551 1578 } 1552 1579 1553 - r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx); 1554 - if (r) { 1555 - dev_err(adev->dev, "failed to create kernel context (%d).\n", r); 1556 - return r; 1557 - } 1558 1580 r = amdgpu_ib_ring_tests(adev); 1559 1581 if (r) 1560 1582 DRM_ERROR("ib ring test failed (%d).\n", r); ··· 1613 1645 adev->shutdown = true; 1614 1646 /* evict vram memory */ 1615 1647 amdgpu_bo_evict_vram(adev); 1616 - amdgpu_ctx_fini(&adev->kernel_ctx); 1617 1648 amdgpu_ib_pool_fini(adev); 1618 1649 amdgpu_fence_driver_fini(adev); 1619 1650 amdgpu_fbdev_fini(adev); ··· 1856 1889 1857 1890 retry: 1858 1891 r = amdgpu_asic_reset(adev); 1892 + /* post card */ 1893 + amdgpu_atom_asic_init(adev->mode_info.atom_context); 1894 + 1859 1895 if (!r) { 1860 1896 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); 1861 1897 r = amdgpu_resume(adev);
+27 -26
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 35 35 #include <drm/drm_crtc_helper.h> 36 36 #include <drm/drm_edid.h> 37 37 38 - static void amdgpu_flip_wait_fence(struct amdgpu_device *adev, 39 - struct fence **f) 38 + static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) 40 39 { 41 - struct amdgpu_fence *fence; 42 - long r; 40 + struct amdgpu_flip_work *work = 41 + container_of(cb, struct amdgpu_flip_work, cb); 43 42 44 - if (*f == NULL) 45 - return; 43 + fence_put(f); 44 + schedule_work(&work->flip_work); 45 + } 46 46 47 - fence = to_amdgpu_fence(*f); 48 - if (fence) { 49 - r = fence_wait(&fence->base, false); 50 - if (r == -EDEADLK) 51 - r = amdgpu_gpu_reset(adev); 52 - } else 53 - r = fence_wait(*f, false); 47 + static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, 48 + struct fence **f) 49 + { 50 + struct fence *fence= *f; 54 51 55 - if (r) 56 - DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r); 52 + if (fence == NULL) 53 + return false; 57 54 58 - /* We continue with the page flip even if we failed to wait on 59 - * the fence, otherwise the DRM core and userspace will be 60 - * confused about which BO the CRTC is scanning out 61 - */ 62 - fence_put(*f); 63 55 *f = NULL; 56 + 57 + if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) 58 + return true; 59 + 60 + fence_put(*f); 61 + return false; 64 62 } 65 63 66 64 static void amdgpu_flip_work_func(struct work_struct *__work) ··· 74 76 int vpos, hpos, stat, min_udelay; 75 77 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; 76 78 77 - amdgpu_flip_wait_fence(adev, &work->excl); 79 + if (amdgpu_flip_handle_fence(work, &work->excl)) 80 + return; 81 + 78 82 for (i = 0; i < work->shared_count; ++i) 79 - amdgpu_flip_wait_fence(adev, &work->shared[i]); 83 + if (amdgpu_flip_handle_fence(work, &work->shared[i])) 84 + return; 80 85 81 86 /* We borrow the event spin lock for protecting flip_status */ 82 87 spin_lock_irqsave(&crtc->dev->event_lock, flags); ··· 119 118 spin_lock_irqsave(&crtc->dev->event_lock, flags); 120 119 }; 121 120 122 - /* do the flip (mmio) */ 123 - adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); 124 121 /* set the flip status */ 125 122 amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 126 - 127 123 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 124 + 125 + /* Do the flip (mmio) */ 126 + adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); 128 127 } 129 128 130 129 /* ··· 243 242 /* update crtc fb */ 244 243 crtc->primary->fb = fb; 245 244 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 246 - queue_work(amdgpu_crtc->pflip_queue, &work->flip_work); 245 + amdgpu_flip_work_func(&work->flip_work); 247 246 return 0; 248 247 249 248 vblank_cleanup:
-12
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 69 69 int amdgpu_smc_load_fw = 1; 70 70 int amdgpu_aspm = -1; 71 71 int amdgpu_runtime_pm = -1; 72 - int amdgpu_hard_reset = 0; 73 72 unsigned amdgpu_ip_block_mask = 0xffffffff; 74 73 int amdgpu_bapm = -1; 75 74 int amdgpu_deep_color = 0; ··· 77 78 int amdgpu_vm_fault_stop = 0; 78 79 int amdgpu_vm_debug = 0; 79 80 int amdgpu_exp_hw_support = 0; 80 - int amdgpu_enable_scheduler = 1; 81 81 int amdgpu_sched_jobs = 32; 82 82 int amdgpu_sched_hw_submission = 2; 83 - int amdgpu_enable_semaphores = 0; 84 83 int amdgpu_powerplay = -1; 85 84 86 85 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); ··· 123 126 MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); 124 127 module_param_named(runpm, amdgpu_runtime_pm, int, 0444); 125 128 126 - MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); 127 - module_param_named(hard_reset, amdgpu_hard_reset, int, 0444); 128 - 129 129 MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))"); 130 130 module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444); 131 131 ··· 147 153 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); 148 154 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); 149 155 150 - MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)"); 151 - module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); 152 - 153 156 MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)"); 154 157 module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); 155 158 156 159 MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); 157 160 module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); 158 - 159 - MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))"); 160 - module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); 161 161 162 162 #ifdef CONFIG_DRM_AMD_POWERPLAY 163 163 MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
+53 -120
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 107 107 if ((*fence) == NULL) { 108 108 return -ENOMEM; 109 109 } 110 - (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx]; 110 + (*fence)->seq = ++ring->fence_drv.sync_seq; 111 111 (*fence)->ring = ring; 112 112 (*fence)->owner = owner; 113 113 fence_init(&(*fence)->base, &amdgpu_fence_ops, ··· 171 171 */ 172 172 last_seq = atomic64_read(&ring->fence_drv.last_seq); 173 173 do { 174 - last_emitted = ring->fence_drv.sync_seq[ring->idx]; 174 + last_emitted = ring->fence_drv.sync_seq; 175 175 seq = amdgpu_fence_read(ring); 176 176 seq |= last_seq & 0xffffffff00000000LL; 177 177 if (seq < last_seq) { ··· 260 260 } 261 261 262 262 /* 263 - * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal 263 + * amdgpu_ring_wait_seq - wait for seq of the specific ring to signal 264 264 * @ring: ring to wait on for the seq number 265 265 * @seq: seq number wait for 266 266 * 267 267 * return value: 268 268 * 0: seq signaled, and gpu not hang 269 - * -EDEADL: GPU hang detected 270 269 * -EINVAL: some paramter is not valid 271 270 */ 272 271 static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) 273 272 { 274 - bool signaled = false; 275 - 276 273 BUG_ON(!ring); 277 - if (seq > ring->fence_drv.sync_seq[ring->idx]) 274 + if (seq > ring->fence_drv.sync_seq) 278 275 return -EINVAL; 279 276 280 277 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 281 278 return 0; 282 279 283 280 amdgpu_fence_schedule_fallback(ring); 284 - wait_event(ring->fence_drv.fence_queue, ( 285 - (signaled = amdgpu_fence_seq_signaled(ring, seq)))); 281 + wait_event(ring->fence_drv.fence_queue, 282 + amdgpu_fence_seq_signaled(ring, seq)); 286 283 287 - if (signaled) 288 - return 0; 289 - else 290 - return -EDEADLK; 284 + return 0; 291 285 } 292 286 293 287 /** ··· 298 304 { 299 305 uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; 300 306 301 - if (seq >= ring->fence_drv.sync_seq[ring->idx]) 307 + if (seq >= ring->fence_drv.sync_seq) 302 308 return -ENOENT; 303 309 304 310 return amdgpu_fence_ring_wait_seq(ring, seq); ··· 316 322 */ 317 323 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 318 324 { 319 - uint64_t seq = ring->fence_drv.sync_seq[ring->idx]; 325 + uint64_t seq = ring->fence_drv.sync_seq; 320 326 321 327 if (!seq) 322 328 return 0; ··· 341 347 * but it's ok to report slightly wrong fence count here. 342 348 */ 343 349 amdgpu_fence_process(ring); 344 - emitted = ring->fence_drv.sync_seq[ring->idx] 350 + emitted = ring->fence_drv.sync_seq 345 351 - atomic64_read(&ring->fence_drv.last_seq); 346 352 /* to avoid 32bits warp around */ 347 353 if (emitted > 0x10000000) 348 354 emitted = 0x10000000; 349 355 350 356 return (unsigned)emitted; 351 - } 352 - 353 - /** 354 - * amdgpu_fence_need_sync - do we need a semaphore 355 - * 356 - * @fence: amdgpu fence object 357 - * @dst_ring: which ring to check against 358 - * 359 - * Check if the fence needs to be synced against another ring 360 - * (all asics). If so, we need to emit a semaphore. 361 - * Returns true if we need to sync with another ring, false if 362 - * not. 363 - */ 364 - bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 365 - struct amdgpu_ring *dst_ring) 366 - { 367 - struct amdgpu_fence_driver *fdrv; 368 - 369 - if (!fence) 370 - return false; 371 - 372 - if (fence->ring == dst_ring) 373 - return false; 374 - 375 - /* we are protected by the ring mutex */ 376 - fdrv = &dst_ring->fence_drv; 377 - if (fence->seq <= fdrv->sync_seq[fence->ring->idx]) 378 - return false; 379 - 380 - return true; 381 - } 382 - 383 - /** 384 - * amdgpu_fence_note_sync - record the sync point 385 - * 386 - * @fence: amdgpu fence object 387 - * @dst_ring: which ring to check against 388 - * 389 - * Note the sequence number at which point the fence will 390 - * be synced with the requested ring (all asics). 391 - */ 392 - void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 393 - struct amdgpu_ring *dst_ring) 394 - { 395 - struct amdgpu_fence_driver *dst, *src; 396 - unsigned i; 397 - 398 - if (!fence) 399 - return; 400 - 401 - if (fence->ring == dst_ring) 402 - return; 403 - 404 - /* we are protected by the ring mutex */ 405 - src = &fence->ring->fence_drv; 406 - dst = &dst_ring->fence_drv; 407 - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 408 - if (i == dst_ring->idx) 409 - continue; 410 - 411 - dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 412 - } 413 357 } 414 358 415 359 /** ··· 403 471 */ 404 472 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 405 473 { 406 - int i, r; 474 + long timeout; 475 + int r; 407 476 408 477 ring->fence_drv.cpu_addr = NULL; 409 478 ring->fence_drv.gpu_addr = 0; 410 - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 411 - ring->fence_drv.sync_seq[i] = 0; 412 - 479 + ring->fence_drv.sync_seq = 0; 413 480 atomic64_set(&ring->fence_drv.last_seq, 0); 414 481 ring->fence_drv.initialized = false; 415 482 ··· 417 486 418 487 init_waitqueue_head(&ring->fence_drv.fence_queue); 419 488 420 - if (amdgpu_enable_scheduler) { 421 - long timeout = msecs_to_jiffies(amdgpu_lockup_timeout); 422 - if (timeout == 0) { 423 - /* 424 - * FIXME: 425 - * Delayed workqueue cannot use it directly, 426 - * so the scheduler will not use delayed workqueue if 427 - * MAX_SCHEDULE_TIMEOUT is set. 428 - * Currently keep it simple and silly. 429 - */ 430 - timeout = MAX_SCHEDULE_TIMEOUT; 431 - } 432 - r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, 433 - amdgpu_sched_hw_submission, 434 - timeout, ring->name); 435 - if (r) { 436 - DRM_ERROR("Failed to create scheduler on ring %s.\n", 437 - ring->name); 438 - return r; 439 - } 489 + timeout = msecs_to_jiffies(amdgpu_lockup_timeout); 490 + if (timeout == 0) { 491 + /* 492 + * FIXME: 493 + * Delayed workqueue cannot use it directly, 494 + * so the scheduler will not use delayed workqueue if 495 + * MAX_SCHEDULE_TIMEOUT is set. 496 + * Currently keep it simple and silly. 497 + */ 498 + timeout = MAX_SCHEDULE_TIMEOUT; 499 + } 500 + r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, 501 + amdgpu_sched_hw_submission, 502 + timeout, ring->name); 503 + if (r) { 504 + DRM_ERROR("Failed to create scheduler on ring %s.\n", 505 + ring->name); 506 + return r; 440 507 } 441 508 442 509 return 0; ··· 481 552 482 553 if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) 483 554 kmem_cache_destroy(amdgpu_fence_slab); 484 - mutex_lock(&adev->ring_lock); 485 555 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 486 556 struct amdgpu_ring *ring = adev->rings[i]; 487 557 ··· 498 570 del_timer_sync(&ring->fence_drv.fallback_timer); 499 571 ring->fence_drv.initialized = false; 500 572 } 501 - mutex_unlock(&adev->ring_lock); 502 573 } 503 574 504 575 /** ··· 512 585 { 513 586 int i, r; 514 587 515 - mutex_lock(&adev->ring_lock); 516 588 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 517 589 struct amdgpu_ring *ring = adev->rings[i]; 518 590 if (!ring || !ring->fence_drv.initialized) ··· 528 602 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 529 603 ring->fence_drv.irq_type); 530 604 } 531 - mutex_unlock(&adev->ring_lock); 532 605 } 533 606 534 607 /** ··· 546 621 { 547 622 int i; 548 623 549 - mutex_lock(&adev->ring_lock); 550 624 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 551 625 struct amdgpu_ring *ring = adev->rings[i]; 552 626 if (!ring || !ring->fence_drv.initialized) ··· 555 631 amdgpu_irq_get(adev, ring->fence_drv.irq_src, 556 632 ring->fence_drv.irq_type); 557 633 } 558 - mutex_unlock(&adev->ring_lock); 559 634 } 560 635 561 636 /** ··· 574 651 if (!ring || !ring->fence_drv.initialized) 575 652 continue; 576 653 577 - amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]); 654 + amdgpu_fence_write(ring, ring->fence_drv.sync_seq); 578 655 } 579 656 } 580 657 ··· 704 781 struct drm_info_node *node = (struct drm_info_node *)m->private; 705 782 struct drm_device *dev = node->minor->dev; 706 783 struct amdgpu_device *adev = dev->dev_private; 707 - int i, j; 784 + int i; 708 785 709 786 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 710 787 struct amdgpu_ring *ring = adev->rings[i]; ··· 717 794 seq_printf(m, "Last signaled fence 0x%016llx\n", 718 795 (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); 719 796 seq_printf(m, "Last emitted 0x%016llx\n", 720 - ring->fence_drv.sync_seq[i]); 721 - 722 - for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { 723 - struct amdgpu_ring *other = adev->rings[j]; 724 - if (i != j && other && other->fence_drv.initialized && 725 - ring->fence_drv.sync_seq[j]) 726 - seq_printf(m, "Last sync to ring %d 0x%016llx\n", 727 - j, ring->fence_drv.sync_seq[j]); 728 - } 797 + ring->fence_drv.sync_seq); 729 798 } 799 + return 0; 800 + } 801 + 802 + /** 803 + * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset 804 + * 805 + * Manually trigger a gpu reset at the next fence wait. 806 + */ 807 + static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data) 808 + { 809 + struct drm_info_node *node = (struct drm_info_node *) m->private; 810 + struct drm_device *dev = node->minor->dev; 811 + struct amdgpu_device *adev = dev->dev_private; 812 + 813 + seq_printf(m, "gpu reset\n"); 814 + amdgpu_gpu_reset(adev); 815 + 730 816 return 0; 731 817 } 732 818 733 819 static struct drm_info_list amdgpu_debugfs_fence_list[] = { 734 820 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, 821 + {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL} 735 822 }; 736 823 #endif 737 824 738 825 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 739 826 { 740 827 #if defined(CONFIG_DEBUG_FS) 741 - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1); 828 + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2); 742 829 #else 743 830 return 0; 744 831 #endif
+92 -43
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 83 83 return r; 84 84 } 85 85 *obj = &robj->gem_base; 86 - robj->pid = task_pid_nr(current); 87 - 88 - mutex_lock(&adev->gem.mutex); 89 - list_add_tail(&robj->list, &adev->gem.objects); 90 - mutex_unlock(&adev->gem.mutex); 91 86 92 87 return 0; 93 88 } 94 89 95 - int amdgpu_gem_init(struct amdgpu_device *adev) 90 + void amdgpu_gem_force_release(struct amdgpu_device *adev) 96 91 { 97 - INIT_LIST_HEAD(&adev->gem.objects); 98 - return 0; 99 - } 92 + struct drm_device *ddev = adev->ddev; 93 + struct drm_file *file; 100 94 101 - void amdgpu_gem_fini(struct amdgpu_device *adev) 102 - { 103 - amdgpu_bo_force_delete(adev); 95 + mutex_lock(&ddev->struct_mutex); 96 + 97 + list_for_each_entry(file, &ddev->filelist, lhead) { 98 + struct drm_gem_object *gobj; 99 + int handle; 100 + 101 + WARN_ONCE(1, "Still active user space clients!\n"); 102 + spin_lock(&file->table_lock); 103 + idr_for_each_entry(&file->object_idr, gobj, handle) { 104 + WARN_ONCE(1, "And also active allocations!\n"); 105 + drm_gem_object_unreference(gobj); 106 + } 107 + idr_destroy(&file->object_idr); 108 + spin_unlock(&file->table_lock); 109 + } 110 + 111 + mutex_unlock(&ddev->struct_mutex); 104 112 } 105 113 106 114 /* ··· 260 252 goto handle_lockup; 261 253 262 254 bo = gem_to_amdgpu_bo(gobj); 255 + bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; 256 + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 263 257 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 264 258 if (r) 265 259 goto release_object; ··· 318 308 return -ENOENT; 319 309 } 320 310 robj = gem_to_amdgpu_bo(gobj); 321 - if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) || 311 + if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || 322 312 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { 323 313 drm_gem_object_unreference_unlocked(gobj); 324 314 return -EPERM; ··· 638 628 639 629 info.bo_size = robj->gem_base.size; 640 630 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; 641 - info.domains = robj->initial_domain; 631 + info.domains = robj->prefered_domains; 642 632 info.domain_flags = robj->flags; 643 633 amdgpu_bo_unreserve(robj); 644 634 if (copy_to_user(out, &info, sizeof(info))) ··· 646 636 break; 647 637 } 648 638 case AMDGPU_GEM_OP_SET_PLACEMENT: 649 - if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) { 639 + if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { 650 640 r = -EPERM; 651 641 amdgpu_bo_unreserve(robj); 652 642 break; 653 643 } 654 - robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM | 655 - AMDGPU_GEM_DOMAIN_GTT | 656 - AMDGPU_GEM_DOMAIN_CPU); 644 + robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | 645 + AMDGPU_GEM_DOMAIN_GTT | 646 + AMDGPU_GEM_DOMAIN_CPU); 647 + robj->allowed_domains = robj->prefered_domains; 648 + if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 649 + robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 650 + 657 651 amdgpu_bo_unreserve(robj); 658 652 break; 659 653 default: ··· 702 688 } 703 689 704 690 #if defined(CONFIG_DEBUG_FS) 691 + static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) 692 + { 693 + struct drm_gem_object *gobj = ptr; 694 + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 695 + struct seq_file *m = data; 696 + 697 + unsigned domain; 698 + const char *placement; 699 + unsigned pin_count; 700 + 701 + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 702 + switch (domain) { 703 + case AMDGPU_GEM_DOMAIN_VRAM: 704 + placement = "VRAM"; 705 + break; 706 + case AMDGPU_GEM_DOMAIN_GTT: 707 + placement = " GTT"; 708 + break; 709 + case AMDGPU_GEM_DOMAIN_CPU: 710 + default: 711 + placement = " CPU"; 712 + break; 713 + } 714 + seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx", 715 + id, amdgpu_bo_size(bo), placement, 716 + amdgpu_bo_gpu_offset(bo)); 717 + 718 + pin_count = ACCESS_ONCE(bo->pin_count); 719 + if (pin_count) 720 + seq_printf(m, " pin count %d", pin_count); 721 + seq_printf(m, "\n"); 722 + 723 + return 0; 724 + } 725 + 705 726 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) 706 727 { 707 728 struct drm_info_node *node = (struct drm_info_node *)m->private; 708 729 struct drm_device *dev = node->minor->dev; 709 - struct amdgpu_device *adev = dev->dev_private; 710 - struct amdgpu_bo *rbo; 711 - unsigned i = 0; 730 + struct drm_file *file; 731 + int r; 712 732 713 - mutex_lock(&adev->gem.mutex); 714 - list_for_each_entry(rbo, &adev->gem.objects, list) { 715 - unsigned domain; 716 - const char *placement; 733 + r = mutex_lock_interruptible(&dev->struct_mutex); 734 + if (r) 735 + return r; 717 736 718 - domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type); 719 - switch (domain) { 720 - case AMDGPU_GEM_DOMAIN_VRAM: 721 - placement = "VRAM"; 722 - break; 723 - case AMDGPU_GEM_DOMAIN_GTT: 724 - placement = " GTT"; 725 - break; 726 - case AMDGPU_GEM_DOMAIN_CPU: 727 - default: 728 - placement = " CPU"; 729 - break; 730 - } 731 - seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 732 - i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20, 733 - placement, (unsigned long)rbo->pid); 734 - i++; 737 + list_for_each_entry(file, &dev->filelist, lhead) { 738 + struct task_struct *task; 739 + 740 + /* 741 + * Although we have a valid reference on file->pid, that does 742 + * not guarantee that the task_struct who called get_pid() is 743 + * still alive (e.g. get_pid(current) => fork() => exit()). 744 + * Therefore, we need to protect this ->comm access using RCU. 745 + */ 746 + rcu_read_lock(); 747 + task = pid_task(file->pid, PIDTYPE_PID); 748 + seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), 749 + task ? task->comm : "<unknown>"); 750 + rcu_read_unlock(); 751 + 752 + spin_lock(&file->table_lock); 753 + idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m); 754 + spin_unlock(&file->table_lock); 735 755 } 736 - mutex_unlock(&adev->gem.mutex); 756 + 757 + mutex_unlock(&dev->struct_mutex); 737 758 return 0; 738 759 } 739 760
+20 -42
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 55 55 * suballocator. 56 56 * Returns 0 on success, error on failure. 57 57 */ 58 - int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, 58 + int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 59 59 unsigned size, struct amdgpu_ib *ib) 60 60 { 61 - struct amdgpu_device *adev = ring->adev; 62 61 int r; 63 62 64 63 if (size) { ··· 74 75 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 75 76 } 76 77 77 - amdgpu_sync_create(&ib->sync); 78 - 79 - ib->ring = ring; 80 78 ib->vm = vm; 81 79 82 80 return 0; ··· 89 93 */ 90 94 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) 91 95 { 92 - amdgpu_sync_free(adev, &ib->sync, &ib->fence->base); 93 96 amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base); 94 97 if (ib->fence) 95 98 fence_put(&ib->fence->base); ··· 101 106 * @num_ibs: number of IBs to schedule 102 107 * @ibs: IB objects to schedule 103 108 * @owner: owner for creating the fences 109 + * @f: fence created during this submission 104 110 * 105 111 * Schedule an IB on the associated ring (all asics). 106 112 * Returns 0 on success, error on failure. ··· 116 120 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior 117 121 * to SI there was just a DE IB. 118 122 */ 119 - int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, 120 - struct amdgpu_ib *ibs, void *owner) 123 + int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 124 + struct amdgpu_ib *ibs, void *owner, 125 + struct fence *last_vm_update, 126 + struct fence **f) 121 127 { 128 + struct amdgpu_device *adev = ring->adev; 122 129 struct amdgpu_ib *ib = &ibs[0]; 123 - struct amdgpu_ring *ring; 124 130 struct amdgpu_ctx *ctx, *old_ctx; 125 131 struct amdgpu_vm *vm; 126 132 unsigned i; ··· 131 133 if (num_ibs == 0) 132 134 return -EINVAL; 133 135 134 - ring = ibs->ring; 135 136 ctx = ibs->ctx; 136 137 vm = ibs->vm; 137 138 ··· 138 141 dev_err(adev->dev, "couldn't schedule ib\n"); 139 142 return -EINVAL; 140 143 } 141 - r = amdgpu_sync_wait(&ibs->sync); 142 - if (r) { 143 - dev_err(adev->dev, "IB sync failed (%d).\n", r); 144 - return r; 144 + 145 + if (vm && !ibs->grabbed_vmid) { 146 + dev_err(adev->dev, "VM IB without ID\n"); 147 + return -EINVAL; 145 148 } 146 - r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); 149 + 150 + r = amdgpu_ring_alloc(ring, 256 * num_ibs); 147 151 if (r) { 148 152 dev_err(adev->dev, "scheduling IB failed (%d).\n", r); 149 153 return r; 150 154 } 151 155 152 156 if (vm) { 153 - /* grab a vm id if necessary */ 154 - r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync); 155 - if (r) { 156 - amdgpu_ring_unlock_undo(ring); 157 - return r; 158 - } 159 - } 160 - 161 - r = amdgpu_sync_rings(&ibs->sync, ring); 162 - if (r) { 163 - amdgpu_ring_unlock_undo(ring); 164 - dev_err(adev->dev, "failed to sync rings (%d)\n", r); 165 - return r; 166 - } 167 - 168 - if (vm) { 169 157 /* do context switch */ 170 - amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); 158 + amdgpu_vm_flush(ring, vm, last_vm_update); 171 159 172 160 if (ring->funcs->emit_gds_switch) 173 161 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, ··· 168 186 for (i = 0; i < num_ibs; ++i) { 169 187 ib = &ibs[i]; 170 188 171 - if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) { 189 + if (ib->ctx != ctx || ib->vm != vm) { 172 190 ring->current_ctx = old_ctx; 173 - amdgpu_ring_unlock_undo(ring); 191 + amdgpu_ring_undo(ring); 174 192 return -EINVAL; 175 193 } 176 194 amdgpu_ring_emit_ib(ring, ib); ··· 181 199 if (r) { 182 200 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 183 201 ring->current_ctx = old_ctx; 184 - amdgpu_ring_unlock_undo(ring); 202 + amdgpu_ring_undo(ring); 185 203 return r; 186 204 } 187 - 188 - if (!amdgpu_enable_scheduler && ib->ctx) 189 - ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, 190 - &ib->fence->base); 191 205 192 206 /* wrap the last IB with fence */ 193 207 if (ib->user) { ··· 193 215 AMDGPU_FENCE_FLAG_64BIT); 194 216 } 195 217 196 - if (ib->vm) 197 - amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); 218 + if (f) 219 + *f = fence_get(&ib->fence->base); 198 220 199 - amdgpu_ring_unlock_commit(ring); 221 + amdgpu_ring_commit(ring); 200 222 return 0; 201 223 } 202 224
+159
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 1 + /* 2 + * Copyright 2015 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * 23 + */ 24 + #include <linux/kthread.h> 25 + #include <linux/wait.h> 26 + #include <linux/sched.h> 27 + #include <drm/drmP.h> 28 + #include "amdgpu.h" 29 + #include "amdgpu_trace.h" 30 + 31 + int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 32 + struct amdgpu_job **job) 33 + { 34 + size_t size = sizeof(struct amdgpu_job); 35 + 36 + if (num_ibs == 0) 37 + return -EINVAL; 38 + 39 + size += sizeof(struct amdgpu_ib) * num_ibs; 40 + 41 + *job = kzalloc(size, GFP_KERNEL); 42 + if (!*job) 43 + return -ENOMEM; 44 + 45 + (*job)->adev = adev; 46 + (*job)->ibs = (void *)&(*job)[1]; 47 + (*job)->num_ibs = num_ibs; 48 + 49 + amdgpu_sync_create(&(*job)->sync); 50 + 51 + return 0; 52 + } 53 + 54 + int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 55 + struct amdgpu_job **job) 56 + { 57 + int r; 58 + 59 + r = amdgpu_job_alloc(adev, 1, job); 60 + if (r) 61 + return r; 62 + 63 + r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); 64 + if (r) 65 + kfree(*job); 66 + 67 + return r; 68 + } 69 + 70 + void amdgpu_job_free(struct amdgpu_job *job) 71 + { 72 + unsigned i; 73 + 74 + for (i = 0; i < job->num_ibs; ++i) 75 + amdgpu_ib_free(job->adev, &job->ibs[i]); 76 + 77 + amdgpu_bo_unref(&job->uf.bo); 78 + amdgpu_sync_free(&job->sync); 79 + kfree(job); 80 + } 81 + 82 + int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 83 + struct amd_sched_entity *entity, void *owner, 84 + struct fence **f) 85 + { 86 + job->ring = ring; 87 + job->base.sched = &ring->sched; 88 + job->base.s_entity = entity; 89 + job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); 90 + if (!job->base.s_fence) 91 + return -ENOMEM; 92 + 93 + *f = fence_get(&job->base.s_fence->base); 94 + 95 + job->owner = owner; 96 + amd_sched_entity_push_job(&job->base); 97 + 98 + return 0; 99 + } 100 + 101 + static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) 102 + { 103 + struct amdgpu_job *job = to_amdgpu_job(sched_job); 104 + struct amdgpu_vm *vm = job->ibs->vm; 105 + 106 + struct fence *fence = amdgpu_sync_get_fence(&job->sync); 107 + 108 + if (fence == NULL && vm && !job->ibs->grabbed_vmid) { 109 + struct amdgpu_ring *ring = job->ring; 110 + int r; 111 + 112 + r = amdgpu_vm_grab_id(vm, ring, &job->sync, 113 + &job->base.s_fence->base); 114 + if (r) 115 + DRM_ERROR("Error getting VM ID (%d)\n", r); 116 + else 117 + job->ibs->grabbed_vmid = true; 118 + 119 + fence = amdgpu_sync_get_fence(&job->sync); 120 + } 121 + 122 + return fence; 123 + } 124 + 125 + static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) 126 + { 127 + struct fence *fence = NULL; 128 + struct amdgpu_job *job; 129 + int r; 130 + 131 + if (!sched_job) { 132 + DRM_ERROR("job is null\n"); 133 + return NULL; 134 + } 135 + job = to_amdgpu_job(sched_job); 136 + 137 + r = amdgpu_sync_wait(&job->sync); 138 + if (r) { 139 + DRM_ERROR("failed to sync wait (%d)\n", r); 140 + return NULL; 141 + } 142 + 143 + trace_amdgpu_sched_run_job(job); 144 + r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->owner, 145 + job->sync.last_vm_update, &fence); 146 + if (r) { 147 + DRM_ERROR("Error scheduling IBs (%d)\n", r); 148 + goto err; 149 + } 150 + 151 + err: 152 + amdgpu_job_free(job); 153 + return fence; 154 + } 155 + 156 + struct amd_sched_backend_ops amdgpu_sched_ops = { 157 + .dependency = amdgpu_job_dependency, 158 + .run_job = amdgpu_job_run, 159 + };
+7 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 447 447 dev_info.max_memory_clock = adev->pm.default_mclk * 10; 448 448 } 449 449 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 450 - dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * 451 - adev->gfx.config.max_shader_engines; 450 + dev_info.num_rb_pipes = adev->gfx.config.num_rbs; 452 451 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 453 452 dev_info._pad = 0; 454 453 dev_info.ids_flags = 0; ··· 726 727 727 728 /* Get associated drm_crtc: */ 728 729 crtc = &adev->mode_info.crtcs[pipe]->base; 730 + if (!crtc) { 731 + /* This can occur on driver load if some component fails to 732 + * initialize completely and driver is unloaded */ 733 + DRM_ERROR("Uninitialized crtc %d\n", pipe); 734 + return -EINVAL; 735 + } 729 736 730 737 /* Helper routine in DRM core does all the work: */ 731 738 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+16 -21
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
··· 48 48 /* protected by adev->mn_lock */ 49 49 struct hlist_node node; 50 50 51 - /* objects protected by lock */ 52 - struct mutex lock; 51 + /* objects protected by mm->mmap_sem */ 53 52 struct rb_root objects; 54 53 }; 55 54 ··· 72 73 struct amdgpu_bo *bo, *next_bo; 73 74 74 75 mutex_lock(&adev->mn_lock); 75 - mutex_lock(&rmn->lock); 76 + down_write(&rmn->mm->mmap_sem); 76 77 hash_del(&rmn->node); 77 78 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, 78 79 it.rb) { 79 - 80 - interval_tree_remove(&node->it, &rmn->objects); 81 80 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { 82 81 bo->mn = NULL; 83 82 list_del_init(&bo->mn_list); 84 83 } 85 84 kfree(node); 86 85 } 87 - mutex_unlock(&rmn->lock); 86 + up_write(&rmn->mm->mmap_sem); 88 87 mutex_unlock(&adev->mn_lock); 89 - mmu_notifier_unregister(&rmn->mn, rmn->mm); 88 + mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); 90 89 kfree(rmn); 91 90 } 92 91 ··· 126 129 /* notification is exclusive, but interval is inclusive */ 127 130 end -= 1; 128 131 129 - mutex_lock(&rmn->lock); 130 - 131 132 it = interval_tree_iter_first(&rmn->objects, start, end); 132 133 while (it) { 133 134 struct amdgpu_mn_node *node; ··· 137 142 138 143 list_for_each_entry(bo, &node->bos, mn_list) { 139 144 140 - if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) 145 + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, 146 + end)) 141 147 continue; 142 148 143 149 r = amdgpu_bo_reserve(bo, true); ··· 160 164 amdgpu_bo_unreserve(bo); 161 165 } 162 166 } 163 - 164 - mutex_unlock(&rmn->lock); 165 167 } 166 168 167 169 static const struct mmu_notifier_ops amdgpu_mn_ops = { ··· 180 186 struct amdgpu_mn *rmn; 181 187 int r; 182 188 183 - down_write(&mm->mmap_sem); 184 189 mutex_lock(&adev->mn_lock); 190 + down_write(&mm->mmap_sem); 185 191 186 192 hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm) 187 193 if (rmn->mm == mm) ··· 196 202 rmn->adev = adev; 197 203 rmn->mm = mm; 198 204 rmn->mn.ops = &amdgpu_mn_ops; 199 - mutex_init(&rmn->lock); 200 205 rmn->objects = RB_ROOT; 201 206 202 207 r = __mmu_notifier_register(&rmn->mn, mm); ··· 205 212 hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm); 206 213 207 214 release_locks: 208 - mutex_unlock(&adev->mn_lock); 209 215 up_write(&mm->mmap_sem); 216 + mutex_unlock(&adev->mn_lock); 210 217 211 218 return rmn; 212 219 213 220 free_rmn: 214 - mutex_unlock(&adev->mn_lock); 215 221 up_write(&mm->mmap_sem); 222 + mutex_unlock(&adev->mn_lock); 216 223 kfree(rmn); 217 224 218 225 return ERR_PTR(r); ··· 242 249 243 250 INIT_LIST_HEAD(&bos); 244 251 245 - mutex_lock(&rmn->lock); 252 + down_write(&rmn->mm->mmap_sem); 246 253 247 254 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { 248 255 kfree(node); ··· 256 263 if (!node) { 257 264 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); 258 265 if (!node) { 259 - mutex_unlock(&rmn->lock); 266 + up_write(&rmn->mm->mmap_sem); 260 267 return -ENOMEM; 261 268 } 262 269 } ··· 271 278 272 279 interval_tree_insert(&node->it, &rmn->objects); 273 280 274 - mutex_unlock(&rmn->lock); 281 + up_write(&rmn->mm->mmap_sem); 275 282 276 283 return 0; 277 284 } ··· 290 297 struct list_head *head; 291 298 292 299 mutex_lock(&adev->mn_lock); 300 + 293 301 rmn = bo->mn; 294 302 if (rmn == NULL) { 295 303 mutex_unlock(&adev->mn_lock); 296 304 return; 297 305 } 298 306 299 - mutex_lock(&rmn->lock); 307 + down_write(&rmn->mm->mmap_sem); 308 + 300 309 /* save the next list entry for later */ 301 310 head = bo->mn_list.next; 302 311 ··· 312 317 kfree(node); 313 318 } 314 319 315 - mutex_unlock(&rmn->lock); 320 + up_write(&rmn->mm->mmap_sem); 316 321 mutex_unlock(&adev->mn_lock); 317 322 }
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
··· 390 390 struct drm_display_mode native_mode; 391 391 u32 pll_id; 392 392 /* page flipping */ 393 - struct workqueue_struct *pflip_queue; 394 393 struct amdgpu_flip_work *pflip_works; 395 394 enum amdgpu_flip_status pflip_status; 396 395 int deferred_flip_completion;
+10 -30
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 97 97 98 98 amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); 99 99 100 - mutex_lock(&bo->adev->gem.mutex); 101 - list_del_init(&bo->list); 102 - mutex_unlock(&bo->adev->gem.mutex); 103 100 drm_gem_object_release(&bo->gem_base); 104 101 amdgpu_bo_unref(&bo->parent); 105 102 kfree(bo->metadata); ··· 251 254 bo->adev = adev; 252 255 INIT_LIST_HEAD(&bo->list); 253 256 INIT_LIST_HEAD(&bo->va); 254 - bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM | 255 - AMDGPU_GEM_DOMAIN_GTT | 256 - AMDGPU_GEM_DOMAIN_CPU | 257 - AMDGPU_GEM_DOMAIN_GDS | 258 - AMDGPU_GEM_DOMAIN_GWS | 259 - AMDGPU_GEM_DOMAIN_OA); 257 + bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | 258 + AMDGPU_GEM_DOMAIN_GTT | 259 + AMDGPU_GEM_DOMAIN_CPU | 260 + AMDGPU_GEM_DOMAIN_GDS | 261 + AMDGPU_GEM_DOMAIN_GWS | 262 + AMDGPU_GEM_DOMAIN_OA); 263 + bo->allowed_domains = bo->prefered_domains; 264 + if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 265 + bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 260 266 261 267 bo->flags = flags; 262 268 ··· 367 367 int r, i; 368 368 unsigned fpfn, lpfn; 369 369 370 - if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) 370 + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) 371 371 return -EPERM; 372 372 373 373 if (WARN_ON_ONCE(min_offset > max_offset)) ··· 468 468 return 0; 469 469 } 470 470 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); 471 - } 472 - 473 - void amdgpu_bo_force_delete(struct amdgpu_device *adev) 474 - { 475 - struct amdgpu_bo *bo, *n; 476 - 477 - if (list_empty(&adev->gem.objects)) { 478 - return; 479 - } 480 - dev_err(adev->dev, "Userspace still has active objects !\n"); 481 - list_for_each_entry_safe(bo, n, &adev->gem.objects, list) { 482 - dev_err(adev->dev, "%p %p %lu %lu force free\n", 483 - &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 484 - *((unsigned long *)&bo->gem_base.refcount)); 485 - mutex_lock(&bo->adev->gem.mutex); 486 - list_del_init(&bo->list); 487 - mutex_unlock(&bo->adev->gem.mutex); 488 - /* this should unref the ttm bo */ 489 - drm_gem_object_unreference_unlocked(&bo->gem_base); 490 - } 491 471 } 492 472 493 473 int amdgpu_bo_init(struct amdgpu_device *adev)
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 149 149 u64 *gpu_addr); 150 150 int amdgpu_bo_unpin(struct amdgpu_bo *bo); 151 151 int amdgpu_bo_evict_vram(struct amdgpu_device *adev); 152 - void amdgpu_bo_force_delete(struct amdgpu_device *adev); 153 152 int amdgpu_bo_init(struct amdgpu_device *adev); 154 153 void amdgpu_bo_fini(struct amdgpu_device *adev); 155 154 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
+341 -14
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 119 119 level = amdgpu_dpm_get_performance_level(adev); 120 120 return snprintf(buf, PAGE_SIZE, "%s\n", 121 121 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 122 - (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 122 + (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 123 + (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 124 + (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown"); 123 125 } else { 124 126 enum amdgpu_dpm_forced_level level; 125 127 ··· 148 146 level = AMDGPU_DPM_FORCED_LEVEL_HIGH; 149 147 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 150 148 level = AMDGPU_DPM_FORCED_LEVEL_AUTO; 149 + } else if (strncmp("manual", buf, strlen("manual")) == 0) { 150 + level = AMDGPU_DPM_FORCED_LEVEL_MANUAL; 151 151 } else { 152 152 count = -EINVAL; 153 153 goto fail; ··· 176 172 return count; 177 173 } 178 174 175 + static ssize_t amdgpu_get_pp_num_states(struct device *dev, 176 + struct device_attribute *attr, 177 + char *buf) 178 + { 179 + struct drm_device *ddev = dev_get_drvdata(dev); 180 + struct amdgpu_device *adev = ddev->dev_private; 181 + struct pp_states_info data; 182 + int i, buf_len; 183 + 184 + if (adev->pp_enabled) 185 + amdgpu_dpm_get_pp_num_states(adev, &data); 186 + 187 + buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 188 + for (i = 0; i < data.nums; i++) 189 + buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 190 + (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 191 + (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 192 + (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 193 + (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 194 + 195 + return buf_len; 196 + } 197 + 198 + static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 199 + struct device_attribute *attr, 200 + char *buf) 201 + { 202 + struct drm_device *ddev = dev_get_drvdata(dev); 203 + struct amdgpu_device *adev = ddev->dev_private; 204 + struct pp_states_info data; 205 + enum amd_pm_state_type pm = 0; 206 + int i = 0; 207 + 208 + if (adev->pp_enabled) { 209 + 210 + pm = amdgpu_dpm_get_current_power_state(adev); 211 + amdgpu_dpm_get_pp_num_states(adev, &data); 212 + 213 + for (i = 0; i < data.nums; i++) { 214 + if (pm == data.states[i]) 215 + break; 216 + } 217 + 218 + if (i == data.nums) 219 + i = -EINVAL; 220 + } 221 + 222 + return snprintf(buf, PAGE_SIZE, "%d\n", i); 223 + } 224 + 225 + static ssize_t amdgpu_get_pp_force_state(struct device *dev, 226 + struct device_attribute *attr, 227 + char *buf) 228 + { 229 + struct drm_device *ddev = dev_get_drvdata(dev); 230 + struct amdgpu_device *adev = ddev->dev_private; 231 + struct pp_states_info data; 232 + enum amd_pm_state_type pm = 0; 233 + int i; 234 + 235 + if (adev->pp_force_state_enabled && adev->pp_enabled) { 236 + pm = amdgpu_dpm_get_current_power_state(adev); 237 + amdgpu_dpm_get_pp_num_states(adev, &data); 238 + 239 + for (i = 0; i < data.nums; i++) { 240 + if (pm == data.states[i]) 241 + break; 242 + } 243 + 244 + if (i == data.nums) 245 + i = -EINVAL; 246 + 247 + return snprintf(buf, PAGE_SIZE, "%d\n", i); 248 + 249 + } else 250 + return snprintf(buf, PAGE_SIZE, "\n"); 251 + } 252 + 253 + static ssize_t amdgpu_set_pp_force_state(struct device *dev, 254 + struct device_attribute *attr, 255 + const char *buf, 256 + size_t count) 257 + { 258 + struct drm_device *ddev = dev_get_drvdata(dev); 259 + struct amdgpu_device *adev = ddev->dev_private; 260 + enum amd_pm_state_type state = 0; 261 + long idx; 262 + int ret; 263 + 264 + if (strlen(buf) == 1) 265 + adev->pp_force_state_enabled = false; 266 + else { 267 + ret = kstrtol(buf, 0, &idx); 268 + 269 + if (ret) { 270 + count = -EINVAL; 271 + goto fail; 272 + } 273 + 274 + if (adev->pp_enabled) { 275 + struct pp_states_info data; 276 + amdgpu_dpm_get_pp_num_states(adev, &data); 277 + state = data.states[idx]; 278 + /* only set user selected power states */ 279 + if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 280 + state != POWER_STATE_TYPE_DEFAULT) { 281 + amdgpu_dpm_dispatch_task(adev, 282 + AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); 283 + adev->pp_force_state_enabled = true; 284 + } 285 + } 286 + } 287 + fail: 288 + return count; 289 + } 290 + 291 + static ssize_t amdgpu_get_pp_table(struct device *dev, 292 + struct device_attribute *attr, 293 + char *buf) 294 + { 295 + struct drm_device *ddev = dev_get_drvdata(dev); 296 + struct amdgpu_device *adev = ddev->dev_private; 297 + char *table = NULL; 298 + int size, i; 299 + 300 + if (adev->pp_enabled) 301 + size = amdgpu_dpm_get_pp_table(adev, &table); 302 + else 303 + return 0; 304 + 305 + if (size >= PAGE_SIZE) 306 + size = PAGE_SIZE - 1; 307 + 308 + for (i = 0; i < size; i++) { 309 + sprintf(buf + i, "%02x", table[i]); 310 + } 311 + sprintf(buf + i, "\n"); 312 + 313 + return size; 314 + } 315 + 316 + static ssize_t amdgpu_set_pp_table(struct device *dev, 317 + struct device_attribute *attr, 318 + const char *buf, 319 + size_t count) 320 + { 321 + struct drm_device *ddev = dev_get_drvdata(dev); 322 + struct amdgpu_device *adev = ddev->dev_private; 323 + 324 + if (adev->pp_enabled) 325 + amdgpu_dpm_set_pp_table(adev, buf, count); 326 + 327 + return count; 328 + } 329 + 330 + static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 331 + struct device_attribute *attr, 332 + char *buf) 333 + { 334 + struct drm_device *ddev = dev_get_drvdata(dev); 335 + struct amdgpu_device *adev = ddev->dev_private; 336 + ssize_t size = 0; 337 + 338 + if (adev->pp_enabled) 339 + size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 340 + 341 + return size; 342 + } 343 + 344 + static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 345 + struct device_attribute *attr, 346 + const char *buf, 347 + size_t count) 348 + { 349 + struct drm_device *ddev = dev_get_drvdata(dev); 350 + struct amdgpu_device *adev = ddev->dev_private; 351 + int ret; 352 + long level; 353 + 354 + ret = kstrtol(buf, 0, &level); 355 + 356 + if (ret) { 357 + count = -EINVAL; 358 + goto fail; 359 + } 360 + 361 + if (adev->pp_enabled) 362 + amdgpu_dpm_force_clock_level(adev, PP_SCLK, level); 363 + fail: 364 + return count; 365 + } 366 + 367 + static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 368 + struct device_attribute *attr, 369 + char *buf) 370 + { 371 + struct drm_device *ddev = dev_get_drvdata(dev); 372 + struct amdgpu_device *adev = ddev->dev_private; 373 + ssize_t size = 0; 374 + 375 + if (adev->pp_enabled) 376 + size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 377 + 378 + return size; 379 + } 380 + 381 + static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 382 + struct device_attribute *attr, 383 + const char *buf, 384 + size_t count) 385 + { 386 + struct drm_device *ddev = dev_get_drvdata(dev); 387 + struct amdgpu_device *adev = ddev->dev_private; 388 + int ret; 389 + long level; 390 + 391 + ret = kstrtol(buf, 0, &level); 392 + 393 + if (ret) { 394 + count = -EINVAL; 395 + goto fail; 396 + } 397 + 398 + if (adev->pp_enabled) 399 + amdgpu_dpm_force_clock_level(adev, PP_MCLK, level); 400 + fail: 401 + return count; 402 + } 403 + 404 + static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 405 + struct device_attribute *attr, 406 + char *buf) 407 + { 408 + struct drm_device *ddev = dev_get_drvdata(dev); 409 + struct amdgpu_device *adev = ddev->dev_private; 410 + ssize_t size = 0; 411 + 412 + if (adev->pp_enabled) 413 + size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 414 + 415 + return size; 416 + } 417 + 418 + static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 419 + struct device_attribute *attr, 420 + const char *buf, 421 + size_t count) 422 + { 423 + struct drm_device *ddev = dev_get_drvdata(dev); 424 + struct amdgpu_device *adev = ddev->dev_private; 425 + int ret; 426 + long level; 427 + 428 + ret = kstrtol(buf, 0, &level); 429 + 430 + if (ret) { 431 + count = -EINVAL; 432 + goto fail; 433 + } 434 + 435 + if (adev->pp_enabled) 436 + amdgpu_dpm_force_clock_level(adev, PP_PCIE, level); 437 + fail: 438 + return count; 439 + } 440 + 179 441 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 180 442 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 181 443 amdgpu_get_dpm_forced_performance_level, 182 444 amdgpu_set_dpm_forced_performance_level); 445 + static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); 446 + static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); 447 + static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, 448 + amdgpu_get_pp_force_state, 449 + amdgpu_set_pp_force_state); 450 + static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, 451 + amdgpu_get_pp_table, 452 + amdgpu_set_pp_table); 453 + static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, 454 + amdgpu_get_pp_dpm_sclk, 455 + amdgpu_set_pp_dpm_sclk); 456 + static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, 457 + amdgpu_get_pp_dpm_mclk, 458 + amdgpu_set_pp_dpm_mclk); 459 + static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, 460 + amdgpu_get_pp_dpm_pcie, 461 + amdgpu_set_pp_dpm_pcie); 183 462 184 463 static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 185 464 struct device_attribute *attr, ··· 910 623 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 911 624 } 912 625 913 - mutex_lock(&adev->ring_lock); 914 - 915 626 /* update whether vce is active */ 916 627 ps->vce_active = adev->pm.dpm.vce_active; 917 628 918 629 ret = amdgpu_dpm_pre_set_power_state(adev); 919 630 if (ret) 920 - goto done; 631 + return; 921 632 922 633 /* update display watermarks based on new power state */ 923 634 amdgpu_display_bandwidth_update(adev); ··· 952 667 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 953 668 } 954 669 } 955 - 956 - done: 957 - mutex_unlock(&adev->ring_lock); 958 670 } 959 671 960 672 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) ··· 1052 770 DRM_ERROR("failed to create device file for dpm state\n"); 1053 771 return ret; 1054 772 } 773 + 774 + if (adev->pp_enabled) { 775 + ret = device_create_file(adev->dev, &dev_attr_pp_num_states); 776 + if (ret) { 777 + DRM_ERROR("failed to create device file pp_num_states\n"); 778 + return ret; 779 + } 780 + ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); 781 + if (ret) { 782 + DRM_ERROR("failed to create device file pp_cur_state\n"); 783 + return ret; 784 + } 785 + ret = device_create_file(adev->dev, &dev_attr_pp_force_state); 786 + if (ret) { 787 + DRM_ERROR("failed to create device file pp_force_state\n"); 788 + return ret; 789 + } 790 + ret = device_create_file(adev->dev, &dev_attr_pp_table); 791 + if (ret) { 792 + DRM_ERROR("failed to create device file pp_table\n"); 793 + return ret; 794 + } 795 + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); 796 + if (ret) { 797 + DRM_ERROR("failed to create device file pp_dpm_sclk\n"); 798 + return ret; 799 + } 800 + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); 801 + if (ret) { 802 + DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 803 + return ret; 804 + } 805 + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); 806 + if (ret) { 807 + DRM_ERROR("failed to create device file pp_dpm_pcie\n"); 808 + return ret; 809 + } 810 + } 1055 811 ret = amdgpu_debugfs_pm_init(adev); 1056 812 if (ret) { 1057 813 DRM_ERROR("Failed to register debugfs file for dpm!\n"); ··· 1107 787 hwmon_device_unregister(adev->pm.int_hwmon_dev); 1108 788 device_remove_file(adev->dev, &dev_attr_power_dpm_state); 1109 789 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 790 + if (adev->pp_enabled) { 791 + device_remove_file(adev->dev, &dev_attr_pp_num_states); 792 + device_remove_file(adev->dev, &dev_attr_pp_cur_state); 793 + device_remove_file(adev->dev, &dev_attr_pp_force_state); 794 + device_remove_file(adev->dev, &dev_attr_pp_table); 795 + device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 796 + device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 797 + device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); 798 + } 1110 799 } 1111 800 1112 801 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) ··· 1131 802 int i = 0; 1132 803 1133 804 amdgpu_display_bandwidth_update(adev); 1134 - mutex_lock(&adev->ring_lock); 1135 - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1136 - struct amdgpu_ring *ring = adev->rings[i]; 1137 - if (ring && ring->ready) 1138 - amdgpu_fence_wait_empty(ring); 1139 - } 1140 - mutex_unlock(&adev->ring_lock); 805 + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 806 + struct amdgpu_ring *ring = adev->rings[i]; 807 + if (ring && ring->ready) 808 + amdgpu_fence_wait_empty(ring); 809 + } 1141 810 1142 811 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); 1143 812 } else {
+1 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
··· 73 73 if (ret) 74 74 return ERR_PTR(ret); 75 75 76 - mutex_lock(&adev->gem.mutex); 77 - list_add_tail(&bo->list, &adev->gem.objects); 78 - mutex_unlock(&adev->gem.mutex); 79 - 80 76 return &bo->gem_base; 81 77 } 82 78 ··· 117 121 { 118 122 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 119 123 120 - if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) 124 + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) 121 125 return ERR_PTR(-EPERM); 122 126 123 127 return drm_gem_prime_export(dev, gobj, flags);
+44 -130
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
··· 49 49 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); 50 50 51 51 /** 52 - * amdgpu_ring_free_size - update the free size 53 - * 54 - * @adev: amdgpu_device pointer 55 - * @ring: amdgpu_ring structure holding ring information 56 - * 57 - * Update the free dw slots in the ring buffer (all asics). 58 - */ 59 - void amdgpu_ring_free_size(struct amdgpu_ring *ring) 60 - { 61 - uint32_t rptr = amdgpu_ring_get_rptr(ring); 62 - 63 - /* This works because ring_size is a power of 2 */ 64 - ring->ring_free_dw = rptr + (ring->ring_size / 4); 65 - ring->ring_free_dw -= ring->wptr; 66 - ring->ring_free_dw &= ring->ptr_mask; 67 - if (!ring->ring_free_dw) { 68 - /* this is an empty ring */ 69 - ring->ring_free_dw = ring->ring_size / 4; 70 - } 71 - } 72 - 73 - /** 74 52 * amdgpu_ring_alloc - allocate space on the ring buffer 75 53 * 76 54 * @adev: amdgpu_device pointer ··· 60 82 */ 61 83 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) 62 84 { 63 - int r; 64 - 65 - /* make sure we aren't trying to allocate more space than there is on the ring */ 66 - if (ndw > (ring->ring_size / 4)) 67 - return -ENOMEM; 68 85 /* Align requested size with padding so unlock_commit can 69 86 * pad safely */ 70 - amdgpu_ring_free_size(ring); 71 87 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 72 - while (ndw > (ring->ring_free_dw - 1)) { 73 - amdgpu_ring_free_size(ring); 74 - if (ndw < ring->ring_free_dw) { 75 - break; 76 - } 77 - r = amdgpu_fence_wait_next(ring); 78 - if (r) 79 - return r; 80 - } 88 + 89 + /* Make sure we aren't trying to allocate more space 90 + * than the maximum for one submission 91 + */ 92 + if (WARN_ON_ONCE(ndw > ring->max_dw)) 93 + return -ENOMEM; 94 + 81 95 ring->count_dw = ndw; 82 96 ring->wptr_old = ring->wptr; 83 - return 0; 84 - } 85 - 86 - /** 87 - * amdgpu_ring_lock - lock the ring and allocate space on it 88 - * 89 - * @adev: amdgpu_device pointer 90 - * @ring: amdgpu_ring structure holding ring information 91 - * @ndw: number of dwords to allocate in the ring buffer 92 - * 93 - * Lock the ring and allocate @ndw dwords in the ring buffer 94 - * (all asics). 95 - * Returns 0 on success, error on failure. 96 - */ 97 - int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw) 98 - { 99 - int r; 100 - 101 - mutex_lock(ring->ring_lock); 102 - r = amdgpu_ring_alloc(ring, ndw); 103 - if (r) { 104 - mutex_unlock(ring->ring_lock); 105 - return r; 106 - } 107 97 return 0; 108 98 } 109 99 ··· 88 142 89 143 for (i = 0; i < count; i++) 90 144 amdgpu_ring_write(ring, ring->nop); 145 + } 146 + 147 + /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets 148 + * 149 + * @ring: amdgpu_ring structure holding ring information 150 + * @ib: IB to add NOP packets to 151 + * 152 + * This is the generic pad_ib function for rings except SDMA 153 + */ 154 + void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 155 + { 156 + while (ib->length_dw & ring->align_mask) 157 + ib->ptr[ib->length_dw++] = ring->nop; 91 158 } 92 159 93 160 /** ··· 127 168 } 128 169 129 170 /** 130 - * amdgpu_ring_unlock_commit - tell the GPU to execute the new 131 - * commands on the ring buffer and unlock it 132 - * 133 - * @ring: amdgpu_ring structure holding ring information 134 - * 135 - * Call amdgpu_ring_commit() then unlock the ring (all asics). 136 - */ 137 - void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring) 138 - { 139 - amdgpu_ring_commit(ring); 140 - mutex_unlock(ring->ring_lock); 141 - } 142 - 143 - /** 144 171 * amdgpu_ring_undo - reset the wptr 145 172 * 146 173 * @ring: amdgpu_ring structure holding ring information ··· 136 191 void amdgpu_ring_undo(struct amdgpu_ring *ring) 137 192 { 138 193 ring->wptr = ring->wptr_old; 139 - } 140 - 141 - /** 142 - * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring 143 - * 144 - * @ring: amdgpu_ring structure holding ring information 145 - * 146 - * Call amdgpu_ring_undo() then unlock the ring (all asics). 147 - */ 148 - void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring) 149 - { 150 - amdgpu_ring_undo(ring); 151 - mutex_unlock(ring->ring_lock); 152 194 } 153 195 154 196 /** ··· 150 218 { 151 219 unsigned size, ptr, i; 152 220 153 - /* just in case lock the ring */ 154 - mutex_lock(ring->ring_lock); 155 221 *data = NULL; 156 222 157 - if (ring->ring_obj == NULL) { 158 - mutex_unlock(ring->ring_lock); 223 + if (ring->ring_obj == NULL) 159 224 return 0; 160 - } 161 225 162 226 /* it doesn't make sense to save anything if all fences are signaled */ 163 - if (!amdgpu_fence_count_emitted(ring)) { 164 - mutex_unlock(ring->ring_lock); 227 + if (!amdgpu_fence_count_emitted(ring)) 165 228 return 0; 166 - } 167 229 168 230 ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); 169 231 170 232 size = ring->wptr + (ring->ring_size / 4); 171 233 size -= ptr; 172 234 size &= ring->ptr_mask; 173 - if (size == 0) { 174 - mutex_unlock(ring->ring_lock); 235 + if (size == 0) 175 236 return 0; 176 - } 177 237 178 238 /* and then save the content of the ring */ 179 239 *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); 180 - if (!*data) { 181 - mutex_unlock(ring->ring_lock); 240 + if (!*data) 182 241 return 0; 183 - } 184 242 for (i = 0; i < size; ++i) { 185 243 (*data)[i] = ring->ring[ptr++]; 186 244 ptr &= ring->ptr_mask; 187 245 } 188 246 189 - mutex_unlock(ring->ring_lock); 190 247 return size; 191 248 } 192 249 ··· 197 276 return 0; 198 277 199 278 /* restore the saved ring content */ 200 - r = amdgpu_ring_lock(ring, size); 279 + r = amdgpu_ring_alloc(ring, size); 201 280 if (r) 202 281 return r; 203 282 ··· 205 284 amdgpu_ring_write(ring, data[i]); 206 285 } 207 286 208 - amdgpu_ring_unlock_commit(ring); 287 + amdgpu_ring_commit(ring); 209 288 kfree(data); 210 289 return 0; 211 290 } ··· 273 352 return r; 274 353 } 275 354 276 - ring->ring_lock = &adev->ring_lock; 277 355 /* Align ring size */ 278 356 rb_bufsz = order_base_2(ring_size / 8); 279 357 ring_size = (1 << (rb_bufsz + 1)) * 4; ··· 309 389 } 310 390 } 311 391 ring->ptr_mask = (ring->ring_size / 4) - 1; 312 - ring->ring_free_dw = ring->ring_size / 4; 392 + ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4, 393 + amdgpu_sched_hw_submission); 313 394 314 395 if (amdgpu_debugfs_ring_init(adev, ring)) { 315 396 DRM_ERROR("Failed to register debugfs file for rings !\n"); ··· 331 410 int r; 332 411 struct amdgpu_bo *ring_obj; 333 412 334 - if (ring->ring_lock == NULL) 335 - return; 336 - 337 - mutex_lock(ring->ring_lock); 338 413 ring_obj = ring->ring_obj; 339 414 ring->ready = false; 340 415 ring->ring = NULL; 341 416 ring->ring_obj = NULL; 342 - mutex_unlock(ring->ring_lock); 343 417 344 418 amdgpu_wb_free(ring->adev, ring->fence_offs); 345 419 amdgpu_wb_free(ring->adev, ring->rptr_offs); ··· 390 474 struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); 391 475 392 476 uint32_t rptr, wptr, rptr_next; 393 - unsigned count, i, j; 394 - 395 - amdgpu_ring_free_size(ring); 396 - count = (ring->ring_size / 4) - ring->ring_free_dw; 477 + unsigned i; 397 478 398 479 wptr = amdgpu_ring_get_wptr(ring); 399 - seq_printf(m, "wptr: 0x%08x [%5d]\n", 400 - wptr, wptr); 480 + seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); 401 481 402 482 rptr = amdgpu_ring_get_rptr(ring); 403 - seq_printf(m, "rptr: 0x%08x [%5d]\n", 404 - rptr, rptr); 405 - 406 483 rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); 484 + 485 + seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); 407 486 408 487 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", 409 488 ring->wptr, ring->wptr); 410 - seq_printf(m, "last semaphore signal addr : 0x%016llx\n", 411 - ring->last_semaphore_signal_addr); 412 - seq_printf(m, "last semaphore wait addr : 0x%016llx\n", 413 - ring->last_semaphore_wait_addr); 414 - seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 415 - seq_printf(m, "%u dwords in ring\n", count); 416 489 417 490 if (!ring->ready) 418 491 return 0; ··· 410 505 * packet that is the root issue 411 506 */ 412 507 i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 413 - for (j = 0; j <= (count + 32); j++) { 508 + while (i != rptr) { 414 509 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); 415 - if (rptr == i) 510 + if (i == rptr) 416 511 seq_puts(m, " *"); 417 - if (rptr_next == i) 512 + if (i == rptr_next) 513 + seq_puts(m, " #"); 514 + seq_puts(m, "\n"); 515 + i = (i + 1) & ring->ptr_mask; 516 + } 517 + while (i != wptr) { 518 + seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); 519 + if (i == rptr) 520 + seq_puts(m, " *"); 521 + if (i == rptr_next) 418 522 seq_puts(m, " #"); 419 523 seq_puts(m, "\n"); 420 524 i = (i + 1) & ring->ptr_mask;
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
··· 321 321 int i, r; 322 322 signed long t; 323 323 324 - BUG_ON(align > sa_manager->align); 325 - BUG_ON(size > sa_manager->size); 324 + if (WARN_ON_ONCE(align > sa_manager->align)) 325 + return -EINVAL; 326 + 327 + if (WARN_ON_ONCE(size > sa_manager->size)) 328 + return -EINVAL; 326 329 327 330 *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL); 328 331 if ((*sa_bo) == NULL) {
-108
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
··· 1 - /* 2 - * Copyright 2015 Advanced Micro Devices, Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - * 23 - */ 24 - #include <linux/kthread.h> 25 - #include <linux/wait.h> 26 - #include <linux/sched.h> 27 - #include <drm/drmP.h> 28 - #include "amdgpu.h" 29 - #include "amdgpu_trace.h" 30 - 31 - static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) 32 - { 33 - struct amdgpu_job *job = to_amdgpu_job(sched_job); 34 - return amdgpu_sync_get_fence(&job->ibs->sync); 35 - } 36 - 37 - static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) 38 - { 39 - struct amdgpu_fence *fence = NULL; 40 - struct amdgpu_job *job; 41 - int r; 42 - 43 - if (!sched_job) { 44 - DRM_ERROR("job is null\n"); 45 - return NULL; 46 - } 47 - job = to_amdgpu_job(sched_job); 48 - trace_amdgpu_sched_run_job(job); 49 - r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); 50 - if (r) { 51 - DRM_ERROR("Error scheduling IBs (%d)\n", r); 52 - goto err; 53 - } 54 - 55 - fence = job->ibs[job->num_ibs - 1].fence; 56 - fence_get(&fence->base); 57 - 58 - err: 59 - if (job->free_job) 60 - job->free_job(job); 61 - 62 - kfree(job); 63 - return fence ? &fence->base : NULL; 64 - } 65 - 66 - struct amd_sched_backend_ops amdgpu_sched_ops = { 67 - .dependency = amdgpu_sched_dependency, 68 - .run_job = amdgpu_sched_run_job, 69 - }; 70 - 71 - int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, 72 - struct amdgpu_ring *ring, 73 - struct amdgpu_ib *ibs, 74 - unsigned num_ibs, 75 - int (*free_job)(struct amdgpu_job *), 76 - void *owner, 77 - struct fence **f) 78 - { 79 - int r = 0; 80 - if (amdgpu_enable_scheduler) { 81 - struct amdgpu_job *job = 82 - kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 83 - if (!job) 84 - return -ENOMEM; 85 - job->base.sched = &ring->sched; 86 - job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 87 - job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); 88 - if (!job->base.s_fence) { 89 - kfree(job); 90 - return -ENOMEM; 91 - } 92 - *f = fence_get(&job->base.s_fence->base); 93 - 94 - job->adev = adev; 95 - job->ibs = ibs; 96 - job->num_ibs = num_ibs; 97 - job->owner = owner; 98 - job->free_job = free_job; 99 - amd_sched_entity_push_job(&job->base); 100 - } else { 101 - r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); 102 - if (r) 103 - return r; 104 - *f = fence_get(&ibs[num_ibs - 1].fence->base); 105 - } 106 - 107 - return 0; 108 - }
-102
drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
··· 1 - /* 2 - * Copyright 2011 Christian König. 3 - * All Rights Reserved. 4 - * 5 - * Permission is hereby granted, free of charge, to any person obtaining a 6 - * copy of this software and associated documentation files (the 7 - * "Software"), to deal in the Software without restriction, including 8 - * without limitation the rights to use, copy, modify, merge, publish, 9 - * distribute, sub license, and/or sell copies of the Software, and to 10 - * permit persons to whom the Software is furnished to do so, subject to 11 - * the following conditions: 12 - * 13 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 - * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 - * 21 - * The above copyright notice and this permission notice (including the 22 - * next paragraph) shall be included in all copies or substantial portions 23 - * of the Software. 24 - * 25 - */ 26 - /* 27 - * Authors: 28 - * Christian König <deathsimple@vodafone.de> 29 - */ 30 - #include <drm/drmP.h> 31 - #include "amdgpu.h" 32 - #include "amdgpu_trace.h" 33 - 34 - int amdgpu_semaphore_create(struct amdgpu_device *adev, 35 - struct amdgpu_semaphore **semaphore) 36 - { 37 - int r; 38 - 39 - *semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL); 40 - if (*semaphore == NULL) { 41 - return -ENOMEM; 42 - } 43 - r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, 44 - &(*semaphore)->sa_bo, 8, 8); 45 - if (r) { 46 - kfree(*semaphore); 47 - *semaphore = NULL; 48 - return r; 49 - } 50 - (*semaphore)->waiters = 0; 51 - (*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo); 52 - 53 - *((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; 54 - 55 - return 0; 56 - } 57 - 58 - bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, 59 - struct amdgpu_semaphore *semaphore) 60 - { 61 - trace_amdgpu_semaphore_signale(ring->idx, semaphore); 62 - 63 - if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) { 64 - --semaphore->waiters; 65 - 66 - /* for debugging lockup only, used by sysfs debug files */ 67 - ring->last_semaphore_signal_addr = semaphore->gpu_addr; 68 - return true; 69 - } 70 - return false; 71 - } 72 - 73 - bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, 74 - struct amdgpu_semaphore *semaphore) 75 - { 76 - trace_amdgpu_semaphore_wait(ring->idx, semaphore); 77 - 78 - if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) { 79 - ++semaphore->waiters; 80 - 81 - /* for debugging lockup only, used by sysfs debug files */ 82 - ring->last_semaphore_wait_addr = semaphore->gpu_addr; 83 - return true; 84 - } 85 - return false; 86 - } 87 - 88 - void amdgpu_semaphore_free(struct amdgpu_device *adev, 89 - struct amdgpu_semaphore **semaphore, 90 - struct fence *fence) 91 - { 92 - if (semaphore == NULL || *semaphore == NULL) { 93 - return; 94 - } 95 - if ((*semaphore)->waiters > 0) { 96 - dev_err(adev->dev, "semaphore %p has more waiters than signalers," 97 - " hardware lockup imminent!\n", *semaphore); 98 - } 99 - amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence); 100 - kfree(*semaphore); 101 - *semaphore = NULL; 102 - }
+13 -141
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
··· 46 46 */ 47 47 void amdgpu_sync_create(struct amdgpu_sync *sync) 48 48 { 49 - unsigned i; 50 - 51 - for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) 52 - sync->semaphores[i] = NULL; 53 - 54 - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 55 - sync->sync_to[i] = NULL; 56 - 57 49 hash_init(sync->fences); 58 50 sync->last_vm_update = NULL; 59 51 } ··· 99 107 struct fence *f) 100 108 { 101 109 struct amdgpu_sync_entry *e; 102 - struct amdgpu_fence *fence; 103 110 104 111 if (!f) 105 112 return 0; ··· 107 116 amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) 108 117 amdgpu_sync_keep_later(&sync->last_vm_update, f); 109 118 110 - fence = to_amdgpu_fence(f); 111 - if (!fence || fence->ring->adev != adev) { 112 - hash_for_each_possible(sync->fences, e, node, f->context) { 113 - if (unlikely(e->fence->context != f->context)) 114 - continue; 119 + hash_for_each_possible(sync->fences, e, node, f->context) { 120 + if (unlikely(e->fence->context != f->context)) 121 + continue; 115 122 116 - amdgpu_sync_keep_later(&e->fence, f); 117 - return 0; 118 - } 119 - 120 - e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL); 121 - if (!e) 122 - return -ENOMEM; 123 - 124 - hash_add(sync->fences, &e->node, f->context); 125 - e->fence = fence_get(f); 123 + amdgpu_sync_keep_later(&e->fence, f); 126 124 return 0; 127 125 } 128 126 129 - amdgpu_sync_keep_later(&sync->sync_to[fence->ring->idx], f); 127 + e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL); 128 + if (!e) 129 + return -ENOMEM; 130 130 131 + hash_add(sync->fences, &e->node, f->context); 132 + e->fence = fence_get(f); 131 133 return 0; 132 134 } 133 135 ··· 137 153 } 138 154 139 155 /** 140 - * amdgpu_sync_resv - use the semaphores to sync to a reservation object 156 + * amdgpu_sync_resv - sync to a reservation object 141 157 * 142 158 * @sync: sync object to add fences from reservation object to 143 159 * @resv: reservation object with embedded fence 144 160 * @shared: true if we should only sync to the exclusive fence 145 161 * 146 - * Sync to the fence using the semaphore objects 162 + * Sync to the fence 147 163 */ 148 164 int amdgpu_sync_resv(struct amdgpu_device *adev, 149 165 struct amdgpu_sync *sync, ··· 234 250 kfree(e); 235 251 } 236 252 237 - if (amdgpu_enable_semaphores) 238 - return 0; 239 - 240 - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 241 - struct fence *fence = sync->sync_to[i]; 242 - if (!fence) 243 - continue; 244 - 245 - r = fence_wait(fence, false); 246 - if (r) 247 - return r; 248 - } 249 - 250 - return 0; 251 - } 252 - 253 - /** 254 - * amdgpu_sync_rings - sync ring to all registered fences 255 - * 256 - * @sync: sync object to use 257 - * @ring: ring that needs sync 258 - * 259 - * Ensure that all registered fences are signaled before letting 260 - * the ring continue. The caller must hold the ring lock. 261 - */ 262 - int amdgpu_sync_rings(struct amdgpu_sync *sync, 263 - struct amdgpu_ring *ring) 264 - { 265 - struct amdgpu_device *adev = ring->adev; 266 - unsigned count = 0; 267 - int i, r; 268 - 269 - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 270 - struct amdgpu_ring *other = adev->rings[i]; 271 - struct amdgpu_semaphore *semaphore; 272 - struct amdgpu_fence *fence; 273 - 274 - if (!sync->sync_to[i]) 275 - continue; 276 - 277 - fence = to_amdgpu_fence(sync->sync_to[i]); 278 - 279 - /* check if we really need to sync */ 280 - if (!amdgpu_enable_scheduler && 281 - !amdgpu_fence_need_sync(fence, ring)) 282 - continue; 283 - 284 - /* prevent GPU deadlocks */ 285 - if (!other->ready) { 286 - dev_err(adev->dev, "Syncing to a disabled ring!"); 287 - return -EINVAL; 288 - } 289 - 290 - if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { 291 - r = fence_wait(sync->sync_to[i], true); 292 - if (r) 293 - return r; 294 - continue; 295 - } 296 - 297 - if (count >= AMDGPU_NUM_SYNCS) { 298 - /* not enough room, wait manually */ 299 - r = fence_wait(&fence->base, false); 300 - if (r) 301 - return r; 302 - continue; 303 - } 304 - r = amdgpu_semaphore_create(adev, &semaphore); 305 - if (r) 306 - return r; 307 - 308 - sync->semaphores[count++] = semaphore; 309 - 310 - /* allocate enough space for sync command */ 311 - r = amdgpu_ring_alloc(other, 16); 312 - if (r) 313 - return r; 314 - 315 - /* emit the signal semaphore */ 316 - if (!amdgpu_semaphore_emit_signal(other, semaphore)) { 317 - /* signaling wasn't successful wait manually */ 318 - amdgpu_ring_undo(other); 319 - r = fence_wait(&fence->base, false); 320 - if (r) 321 - return r; 322 - continue; 323 - } 324 - 325 - /* we assume caller has already allocated space on waiters ring */ 326 - if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { 327 - /* waiting wasn't successful wait manually */ 328 - amdgpu_ring_undo(other); 329 - r = fence_wait(&fence->base, false); 330 - if (r) 331 - return r; 332 - continue; 333 - } 334 - 335 - amdgpu_ring_commit(other); 336 - amdgpu_fence_note_sync(fence, ring); 337 - } 338 - 339 253 return 0; 340 254 } 341 255 342 256 /** 343 257 * amdgpu_sync_free - free the sync object 344 258 * 345 - * @adev: amdgpu_device pointer 346 259 * @sync: sync object to use 347 - * @fence: fence to use for the free 348 260 * 349 - * Free the sync object by freeing all semaphores in it. 261 + * Free the sync object. 350 262 */ 351 - void amdgpu_sync_free(struct amdgpu_device *adev, 352 - struct amdgpu_sync *sync, 353 - struct fence *fence) 263 + void amdgpu_sync_free(struct amdgpu_sync *sync) 354 264 { 355 265 struct amdgpu_sync_entry *e; 356 266 struct hlist_node *tmp; ··· 255 377 fence_put(e->fence); 256 378 kfree(e); 257 379 } 258 - 259 - for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) 260 - amdgpu_semaphore_free(adev, &sync->semaphores[i], fence); 261 - 262 - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 263 - fence_put(sync->sync_to[i]); 264 380 265 381 fence_put(sync->last_vm_update); 266 382 }
-237
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
··· 238 238 amdgpu_do_test_moves(adev); 239 239 } 240 240 241 - static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, 242 - struct amdgpu_ring *ring, 243 - struct fence **fence) 244 - { 245 - uint32_t handle = ring->idx ^ 0xdeafbeef; 246 - int r; 247 - 248 - if (ring == &adev->uvd.ring) { 249 - r = amdgpu_uvd_get_create_msg(ring, handle, NULL); 250 - if (r) { 251 - DRM_ERROR("Failed to get dummy create msg\n"); 252 - return r; 253 - } 254 - 255 - r = amdgpu_uvd_get_destroy_msg(ring, handle, fence); 256 - if (r) { 257 - DRM_ERROR("Failed to get dummy destroy msg\n"); 258 - return r; 259 - } 260 - 261 - } else if (ring == &adev->vce.ring[0] || 262 - ring == &adev->vce.ring[1]) { 263 - r = amdgpu_vce_get_create_msg(ring, handle, NULL); 264 - if (r) { 265 - DRM_ERROR("Failed to get dummy create msg\n"); 266 - return r; 267 - } 268 - 269 - r = amdgpu_vce_get_destroy_msg(ring, handle, fence); 270 - if (r) { 271 - DRM_ERROR("Failed to get dummy destroy msg\n"); 272 - return r; 273 - } 274 - } else { 275 - struct amdgpu_fence *a_fence = NULL; 276 - r = amdgpu_ring_lock(ring, 64); 277 - if (r) { 278 - DRM_ERROR("Failed to lock ring A %d\n", ring->idx); 279 - return r; 280 - } 281 - amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence); 282 - amdgpu_ring_unlock_commit(ring); 283 - *fence = &a_fence->base; 284 - } 285 - return 0; 286 - } 287 - 288 241 void amdgpu_test_ring_sync(struct amdgpu_device *adev, 289 242 struct amdgpu_ring *ringA, 290 243 struct amdgpu_ring *ringB) 291 244 { 292 - struct fence *fence1 = NULL, *fence2 = NULL; 293 - struct amdgpu_semaphore *semaphore = NULL; 294 - int r; 295 - 296 - r = amdgpu_semaphore_create(adev, &semaphore); 297 - if (r) { 298 - DRM_ERROR("Failed to create semaphore\n"); 299 - goto out_cleanup; 300 - } 301 - 302 - r = amdgpu_ring_lock(ringA, 64); 303 - if (r) { 304 - DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 305 - goto out_cleanup; 306 - } 307 - amdgpu_semaphore_emit_wait(ringA, semaphore); 308 - amdgpu_ring_unlock_commit(ringA); 309 - 310 - r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1); 311 - if (r) 312 - goto out_cleanup; 313 - 314 - r = amdgpu_ring_lock(ringA, 64); 315 - if (r) { 316 - DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 317 - goto out_cleanup; 318 - } 319 - amdgpu_semaphore_emit_wait(ringA, semaphore); 320 - amdgpu_ring_unlock_commit(ringA); 321 - 322 - r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2); 323 - if (r) 324 - goto out_cleanup; 325 - 326 - mdelay(1000); 327 - 328 - if (fence_is_signaled(fence1)) { 329 - DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); 330 - goto out_cleanup; 331 - } 332 - 333 - r = amdgpu_ring_lock(ringB, 64); 334 - if (r) { 335 - DRM_ERROR("Failed to lock ring B %p\n", ringB); 336 - goto out_cleanup; 337 - } 338 - amdgpu_semaphore_emit_signal(ringB, semaphore); 339 - amdgpu_ring_unlock_commit(ringB); 340 - 341 - r = fence_wait(fence1, false); 342 - if (r) { 343 - DRM_ERROR("Failed to wait for sync fence 1\n"); 344 - goto out_cleanup; 345 - } 346 - 347 - mdelay(1000); 348 - 349 - if (fence_is_signaled(fence2)) { 350 - DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); 351 - goto out_cleanup; 352 - } 353 - 354 - r = amdgpu_ring_lock(ringB, 64); 355 - if (r) { 356 - DRM_ERROR("Failed to lock ring B %p\n", ringB); 357 - goto out_cleanup; 358 - } 359 - amdgpu_semaphore_emit_signal(ringB, semaphore); 360 - amdgpu_ring_unlock_commit(ringB); 361 - 362 - r = fence_wait(fence2, false); 363 - if (r) { 364 - DRM_ERROR("Failed to wait for sync fence 1\n"); 365 - goto out_cleanup; 366 - } 367 - 368 - out_cleanup: 369 - amdgpu_semaphore_free(adev, &semaphore, NULL); 370 - 371 - if (fence1) 372 - fence_put(fence1); 373 - 374 - if (fence2) 375 - fence_put(fence2); 376 - 377 - if (r) 378 - printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 379 245 } 380 246 381 247 static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, ··· 249 383 struct amdgpu_ring *ringB, 250 384 struct amdgpu_ring *ringC) 251 385 { 252 - struct fence *fenceA = NULL, *fenceB = NULL; 253 - struct amdgpu_semaphore *semaphore = NULL; 254 - bool sigA, sigB; 255 - int i, r; 256 - 257 - r = amdgpu_semaphore_create(adev, &semaphore); 258 - if (r) { 259 - DRM_ERROR("Failed to create semaphore\n"); 260 - goto out_cleanup; 261 - } 262 - 263 - r = amdgpu_ring_lock(ringA, 64); 264 - if (r) { 265 - DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 266 - goto out_cleanup; 267 - } 268 - amdgpu_semaphore_emit_wait(ringA, semaphore); 269 - amdgpu_ring_unlock_commit(ringA); 270 - 271 - r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA); 272 - if (r) 273 - goto out_cleanup; 274 - 275 - r = amdgpu_ring_lock(ringB, 64); 276 - if (r) { 277 - DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); 278 - goto out_cleanup; 279 - } 280 - amdgpu_semaphore_emit_wait(ringB, semaphore); 281 - amdgpu_ring_unlock_commit(ringB); 282 - r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB); 283 - if (r) 284 - goto out_cleanup; 285 - 286 - mdelay(1000); 287 - 288 - if (fence_is_signaled(fenceA)) { 289 - DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); 290 - goto out_cleanup; 291 - } 292 - if (fence_is_signaled(fenceB)) { 293 - DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); 294 - goto out_cleanup; 295 - } 296 - 297 - r = amdgpu_ring_lock(ringC, 64); 298 - if (r) { 299 - DRM_ERROR("Failed to lock ring B %p\n", ringC); 300 - goto out_cleanup; 301 - } 302 - amdgpu_semaphore_emit_signal(ringC, semaphore); 303 - amdgpu_ring_unlock_commit(ringC); 304 - 305 - for (i = 0; i < 30; ++i) { 306 - mdelay(100); 307 - sigA = fence_is_signaled(fenceA); 308 - sigB = fence_is_signaled(fenceB); 309 - if (sigA || sigB) 310 - break; 311 - } 312 - 313 - if (!sigA && !sigB) { 314 - DRM_ERROR("Neither fence A nor B has been signaled\n"); 315 - goto out_cleanup; 316 - } else if (sigA && sigB) { 317 - DRM_ERROR("Both fence A and B has been signaled\n"); 318 - goto out_cleanup; 319 - } 320 - 321 - DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); 322 - 323 - r = amdgpu_ring_lock(ringC, 64); 324 - if (r) { 325 - DRM_ERROR("Failed to lock ring B %p\n", ringC); 326 - goto out_cleanup; 327 - } 328 - amdgpu_semaphore_emit_signal(ringC, semaphore); 329 - amdgpu_ring_unlock_commit(ringC); 330 - 331 - mdelay(1000); 332 - 333 - r = fence_wait(fenceA, false); 334 - if (r) { 335 - DRM_ERROR("Failed to wait for sync fence A\n"); 336 - goto out_cleanup; 337 - } 338 - r = fence_wait(fenceB, false); 339 - if (r) { 340 - DRM_ERROR("Failed to wait for sync fence B\n"); 341 - goto out_cleanup; 342 - } 343 - 344 - out_cleanup: 345 - amdgpu_semaphore_free(adev, &semaphore, NULL); 346 - 347 - if (fenceA) 348 - fence_put(fenceA); 349 - 350 - if (fenceB) 351 - fence_put(fenceB); 352 - 353 - if (r) 354 - printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 355 386 } 356 387 357 388 static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
+11 -44
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
··· 38 38 39 39 TP_fast_assign( 40 40 __entry->bo_list = p->bo_list; 41 - __entry->ring = p->ibs[i].ring->idx; 42 - __entry->dw = p->ibs[i].length_dw; 41 + __entry->ring = p->job->ring->idx; 42 + __entry->dw = p->job->ibs[i].length_dw; 43 43 __entry->fences = amdgpu_fence_count_emitted( 44 - p->ibs[i].ring); 44 + p->job->ring); 45 45 ), 46 46 TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", 47 47 __entry->bo_list, __entry->ring, __entry->dw, ··· 65 65 __entry->sched_job = &job->base; 66 66 __entry->ib = job->ibs; 67 67 __entry->fence = &job->base.s_fence->base; 68 - __entry->ring_name = job->ibs[0].ring->name; 68 + __entry->ring_name = job->ring->name; 69 69 __entry->num_ibs = job->num_ibs; 70 70 ), 71 71 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", ··· 90 90 __entry->sched_job = &job->base; 91 91 __entry->ib = job->ibs; 92 92 __entry->fence = &job->base.s_fence->base; 93 - __entry->ring_name = job->ibs[0].ring->name; 93 + __entry->ring_name = job->ring->name; 94 94 __entry->num_ibs = job->num_ibs; 95 95 ), 96 96 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", ··· 100 100 101 101 102 102 TRACE_EVENT(amdgpu_vm_grab_id, 103 - TP_PROTO(unsigned vmid, int ring), 104 - TP_ARGS(vmid, ring), 103 + TP_PROTO(struct amdgpu_vm *vm, unsigned vmid, int ring), 104 + TP_ARGS(vm, vmid, ring), 105 105 TP_STRUCT__entry( 106 + __field(struct amdgpu_vm *, vm) 106 107 __field(u32, vmid) 107 108 __field(u32, ring) 108 109 ), 109 110 110 111 TP_fast_assign( 112 + __entry->vm = vm; 111 113 __entry->vmid = vmid; 112 114 __entry->ring = ring; 113 115 ), 114 - TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) 116 + TP_printk("vm=%p, id=%u, ring=%u", __entry->vm, __entry->vmid, 117 + __entry->ring) 115 118 ); 116 119 117 120 TRACE_EVENT(amdgpu_vm_bo_map, ··· 248 245 __entry->bo = bo; 249 246 ), 250 247 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) 251 - ); 252 - 253 - DECLARE_EVENT_CLASS(amdgpu_semaphore_request, 254 - 255 - TP_PROTO(int ring, struct amdgpu_semaphore *sem), 256 - 257 - TP_ARGS(ring, sem), 258 - 259 - TP_STRUCT__entry( 260 - __field(int, ring) 261 - __field(signed, waiters) 262 - __field(uint64_t, gpu_addr) 263 - ), 264 - 265 - TP_fast_assign( 266 - __entry->ring = ring; 267 - __entry->waiters = sem->waiters; 268 - __entry->gpu_addr = sem->gpu_addr; 269 - ), 270 - 271 - TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring, 272 - __entry->waiters, __entry->gpu_addr) 273 - ); 274 - 275 - DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale, 276 - 277 - TP_PROTO(int ring, struct amdgpu_semaphore *sem), 278 - 279 - TP_ARGS(ring, sem) 280 - ); 281 - 282 - DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait, 283 - 284 - TP_PROTO(int ring, struct amdgpu_semaphore *sem), 285 - 286 - TP_ARGS(ring, sem) 287 248 ); 288 249 289 250 #endif
+50 -31
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 77 77 static int amdgpu_ttm_global_init(struct amdgpu_device *adev) 78 78 { 79 79 struct drm_global_reference *global_ref; 80 + struct amdgpu_ring *ring; 81 + struct amd_sched_rq *rq; 80 82 int r; 81 83 82 84 adev->mman.mem_global_referenced = false; ··· 108 106 return r; 109 107 } 110 108 109 + ring = adev->mman.buffer_funcs_ring; 110 + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; 111 + r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, 112 + rq, amdgpu_sched_jobs); 113 + if (r != 0) { 114 + DRM_ERROR("Failed setting up TTM BO move run queue.\n"); 115 + drm_global_item_unref(&adev->mman.mem_global_ref); 116 + drm_global_item_unref(&adev->mman.bo_global_ref.ref); 117 + return r; 118 + } 119 + 111 120 adev->mman.mem_global_referenced = true; 121 + 112 122 return 0; 113 123 } 114 124 115 125 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) 116 126 { 117 127 if (adev->mman.mem_global_referenced) { 128 + amd_sched_entity_fini(adev->mman.entity.sched, 129 + &adev->mman.entity); 118 130 drm_global_item_unref(&adev->mman.bo_global_ref.ref); 119 131 drm_global_item_unref(&adev->mman.mem_global_ref); 120 132 adev->mman.mem_global_referenced = false; ··· 515 499 enum dma_data_direction direction = write ? 516 500 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 517 501 518 - if (current->mm != gtt->usermm) 519 - return -EPERM; 520 - 521 502 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { 522 503 /* check that we only pin down anonymous memory 523 504 to prevent problems with writeback */ ··· 786 773 return 0; 787 774 } 788 775 789 - bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm) 776 + struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) 790 777 { 791 778 struct amdgpu_ttm_tt *gtt = (void *)ttm; 792 779 793 780 if (gtt == NULL) 781 + return NULL; 782 + 783 + return gtt->usermm; 784 + } 785 + 786 + bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 787 + unsigned long end) 788 + { 789 + struct amdgpu_ttm_tt *gtt = (void *)ttm; 790 + unsigned long size; 791 + 792 + if (gtt == NULL) 794 793 return false; 795 794 796 - return !!gtt->userptr; 795 + if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr) 796 + return false; 797 + 798 + size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; 799 + if (gtt->userptr > end || gtt->userptr + size <= start) 800 + return false; 801 + 802 + return true; 797 803 } 798 804 799 805 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) ··· 1028 996 struct fence **fence) 1029 997 { 1030 998 struct amdgpu_device *adev = ring->adev; 999 + struct amdgpu_job *job; 1000 + 1031 1001 uint32_t max_bytes; 1032 1002 unsigned num_loops, num_dw; 1033 - struct amdgpu_ib *ib; 1034 1003 unsigned i; 1035 1004 int r; 1036 1005 ··· 1043 1010 while (num_dw & 0x7) 1044 1011 num_dw++; 1045 1012 1046 - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 1047 - if (!ib) 1048 - return -ENOMEM; 1049 - 1050 - r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib); 1051 - if (r) { 1052 - kfree(ib); 1013 + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); 1014 + if (r) 1053 1015 return r; 1054 - } 1055 - 1056 - ib->length_dw = 0; 1057 1016 1058 1017 if (resv) { 1059 - r = amdgpu_sync_resv(adev, &ib->sync, resv, 1018 + r = amdgpu_sync_resv(adev, &job->sync, resv, 1060 1019 AMDGPU_FENCE_OWNER_UNDEFINED); 1061 1020 if (r) { 1062 1021 DRM_ERROR("sync failed (%d).\n", r); ··· 1059 1034 for (i = 0; i < num_loops; i++) { 1060 1035 uint32_t cur_size_in_bytes = min(byte_count, max_bytes); 1061 1036 1062 - amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset, 1063 - cur_size_in_bytes); 1037 + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, 1038 + dst_offset, cur_size_in_bytes); 1064 1039 1065 1040 src_offset += cur_size_in_bytes; 1066 1041 dst_offset += cur_size_in_bytes; 1067 1042 byte_count -= cur_size_in_bytes; 1068 1043 } 1069 1044 1070 - amdgpu_vm_pad_ib(adev, ib); 1071 - WARN_ON(ib->length_dw > num_dw); 1072 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 1073 - &amdgpu_vm_free_job, 1074 - AMDGPU_FENCE_OWNER_UNDEFINED, 1075 - fence); 1045 + amdgpu_ring_pad_ib(ring, &job->ibs[0]); 1046 + WARN_ON(job->ibs[0].length_dw > num_dw); 1047 + r = amdgpu_job_submit(job, ring, &adev->mman.entity, 1048 + AMDGPU_FENCE_OWNER_UNDEFINED, fence); 1076 1049 if (r) 1077 1050 goto error_free; 1078 1051 1079 - if (!amdgpu_enable_scheduler) { 1080 - amdgpu_ib_free(adev, ib); 1081 - kfree(ib); 1082 - } 1083 1052 return 0; 1053 + 1084 1054 error_free: 1085 - amdgpu_ib_free(adev, ib); 1086 - kfree(ib); 1055 + amdgpu_job_free(job); 1087 1056 return r; 1088 1057 } 1089 1058
+52 -46
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 91 91 92 92 int amdgpu_uvd_sw_init(struct amdgpu_device *adev) 93 93 { 94 + struct amdgpu_ring *ring; 95 + struct amd_sched_rq *rq; 94 96 unsigned long bo_size; 95 97 const char *fw_name; 96 98 const struct common_firmware_header *hdr; ··· 193 191 194 192 amdgpu_bo_unreserve(adev->uvd.vcpu_bo); 195 193 194 + ring = &adev->uvd.ring; 195 + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; 196 + r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, 197 + rq, amdgpu_sched_jobs); 198 + if (r != 0) { 199 + DRM_ERROR("Failed setting up UVD run queue.\n"); 200 + return r; 201 + } 202 + 196 203 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 197 204 atomic_set(&adev->uvd.handles[i], 0); 198 205 adev->uvd.filp[i] = NULL; ··· 220 209 221 210 if (adev->uvd.vcpu_bo == NULL) 222 211 return 0; 212 + 213 + amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); 223 214 224 215 r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); 225 216 if (!r) { ··· 254 241 255 242 amdgpu_uvd_note_usage(adev); 256 243 257 - r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); 244 + r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); 258 245 if (r) { 259 246 DRM_ERROR("Error destroying UVD (%d)!\n", r); 260 247 continue; ··· 308 295 309 296 amdgpu_uvd_note_usage(adev); 310 297 311 - r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); 298 + r = amdgpu_uvd_get_destroy_msg(ring, handle, 299 + false, &fence); 312 300 if (r) { 313 301 DRM_ERROR("Error destroying UVD (%d)!\n", r); 314 302 continue; ··· 630 616 { 631 617 struct amdgpu_bo_va_mapping *mapping; 632 618 struct amdgpu_bo *bo; 633 - struct amdgpu_ib *ib; 634 619 uint32_t cmd, lo, hi; 635 620 uint64_t start, end; 636 621 uint64_t addr; ··· 651 638 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; 652 639 start += addr; 653 640 654 - ib = &ctx->parser->ibs[ctx->ib_idx]; 655 - ib->ptr[ctx->data0] = start & 0xFFFFFFFF; 656 - ib->ptr[ctx->data1] = start >> 32; 641 + amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0, 642 + lower_32_bits(start)); 643 + amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1, 644 + upper_32_bits(start)); 657 645 658 646 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; 659 647 if (cmd < 0x4) { ··· 716 702 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, 717 703 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) 718 704 { 719 - struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; 705 + struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; 720 706 int i, r; 721 707 722 708 ctx->idx++; ··· 762 748 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, 763 749 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) 764 750 { 765 - struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; 751 + struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; 766 752 int r; 767 753 768 754 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { ··· 804 790 [0x00000003] = 2048, 805 791 [0x00000004] = 0xFFFFFFFF, 806 792 }; 807 - struct amdgpu_ib *ib = &parser->ibs[ib_idx]; 793 + struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; 808 794 int r; 809 795 810 796 if (ib->length_dw % 16) { ··· 837 823 return 0; 838 824 } 839 825 840 - static int amdgpu_uvd_free_job( 841 - struct amdgpu_job *job) 842 - { 843 - amdgpu_ib_free(job->adev, job->ibs); 844 - kfree(job->ibs); 845 - return 0; 846 - } 847 - 848 - static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 849 - struct amdgpu_bo *bo, 850 - struct fence **fence) 826 + static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 827 + bool direct, struct fence **fence) 851 828 { 852 829 struct ttm_validate_buffer tv; 853 830 struct ww_acquire_ctx ticket; 854 831 struct list_head head; 855 - struct amdgpu_ib *ib = NULL; 832 + struct amdgpu_job *job; 833 + struct amdgpu_ib *ib; 856 834 struct fence *f = NULL; 857 835 struct amdgpu_device *adev = ring->adev; 858 836 uint64_t addr; ··· 868 862 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 869 863 if (r) 870 864 goto err; 871 - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 872 - if (!ib) { 873 - r = -ENOMEM; 874 - goto err; 875 - } 876 - r = amdgpu_ib_get(ring, NULL, 64, ib); 877 - if (r) 878 - goto err1; 879 865 866 + r = amdgpu_job_alloc_with_ib(adev, 64, &job); 867 + if (r) 868 + goto err; 869 + 870 + ib = &job->ibs[0]; 880 871 addr = amdgpu_bo_gpu_offset(bo); 881 872 ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); 882 873 ib->ptr[1] = addr; ··· 885 882 ib->ptr[i] = PACKET2(0); 886 883 ib->length_dw = 16; 887 884 888 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 889 - &amdgpu_uvd_free_job, 890 - AMDGPU_FENCE_OWNER_UNDEFINED, 891 - &f); 892 - if (r) 893 - goto err2; 885 + if (direct) { 886 + r = amdgpu_ib_schedule(ring, 1, ib, 887 + AMDGPU_FENCE_OWNER_UNDEFINED, NULL, &f); 888 + if (r) 889 + goto err_free; 890 + 891 + amdgpu_job_free(job); 892 + } else { 893 + r = amdgpu_job_submit(job, ring, &adev->uvd.entity, 894 + AMDGPU_FENCE_OWNER_UNDEFINED, &f); 895 + if (r) 896 + goto err_free; 897 + } 894 898 895 899 ttm_eu_fence_buffer_objects(&ticket, &head, f); 896 900 ··· 905 895 *fence = fence_get(f); 906 896 amdgpu_bo_unref(&bo); 907 897 fence_put(f); 908 - if (amdgpu_enable_scheduler) 909 - return 0; 910 898 911 - amdgpu_ib_free(ring->adev, ib); 912 - kfree(ib); 913 899 return 0; 914 - err2: 915 - amdgpu_ib_free(ring->adev, ib); 916 - err1: 917 - kfree(ib); 900 + 901 + err_free: 902 + amdgpu_job_free(job); 903 + 918 904 err: 919 905 ttm_eu_backoff_reservation(&ticket, &head); 920 906 return r; ··· 965 959 amdgpu_bo_kunmap(bo); 966 960 amdgpu_bo_unreserve(bo); 967 961 968 - return amdgpu_uvd_send_msg(ring, bo, fence); 962 + return amdgpu_uvd_send_msg(ring, bo, true, fence); 969 963 } 970 964 971 965 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 972 - struct fence **fence) 966 + bool direct, struct fence **fence) 973 967 { 974 968 struct amdgpu_device *adev = ring->adev; 975 969 struct amdgpu_bo *bo; ··· 1007 1001 amdgpu_bo_kunmap(bo); 1008 1002 amdgpu_bo_unreserve(bo); 1009 1003 1010 - return amdgpu_uvd_send_msg(ring, bo, fence); 1004 + return amdgpu_uvd_send_msg(ring, bo, direct, fence); 1011 1005 } 1012 1006 1013 1007 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
··· 31 31 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 32 32 struct fence **fence); 33 33 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 34 - struct fence **fence); 34 + bool direct, struct fence **fence); 35 35 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, 36 36 struct drm_file *filp); 37 37 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
+59 -80
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 74 74 */ 75 75 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) 76 76 { 77 + struct amdgpu_ring *ring; 78 + struct amd_sched_rq *rq; 77 79 const char *fw_name; 78 80 const struct common_firmware_header *hdr; 79 81 unsigned ucode_version, version_major, version_minor, binary_id; ··· 172 170 return r; 173 171 } 174 172 173 + 174 + ring = &adev->vce.ring[0]; 175 + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; 176 + r = amd_sched_entity_init(&ring->sched, &adev->vce.entity, 177 + rq, amdgpu_sched_jobs); 178 + if (r != 0) { 179 + DRM_ERROR("Failed setting up VCE run queue.\n"); 180 + return r; 181 + } 182 + 175 183 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 176 184 atomic_set(&adev->vce.handles[i], 0); 177 185 adev->vce.filp[i] = NULL; ··· 201 189 { 202 190 if (adev->vce.vcpu_bo == NULL) 203 191 return 0; 192 + 193 + amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); 204 194 205 195 amdgpu_bo_unref(&adev->vce.vcpu_bo); 206 196 ··· 351 337 352 338 amdgpu_vce_note_usage(adev); 353 339 354 - r = amdgpu_vce_get_destroy_msg(ring, handle, NULL); 340 + r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); 355 341 if (r) 356 342 DRM_ERROR("Error destroying VCE handle (%d)!\n", r); 357 343 358 344 adev->vce.filp[i] = NULL; 359 345 atomic_set(&adev->vce.handles[i], 0); 360 346 } 361 - } 362 - 363 - static int amdgpu_vce_free_job( 364 - struct amdgpu_job *job) 365 - { 366 - amdgpu_ib_free(job->adev, job->ibs); 367 - kfree(job->ibs); 368 - return 0; 369 347 } 370 348 371 349 /** ··· 374 368 struct fence **fence) 375 369 { 376 370 const unsigned ib_size_dw = 1024; 377 - struct amdgpu_ib *ib = NULL; 371 + struct amdgpu_job *job; 372 + struct amdgpu_ib *ib; 378 373 struct fence *f = NULL; 379 - struct amdgpu_device *adev = ring->adev; 380 374 uint64_t dummy; 381 375 int i, r; 382 376 383 - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 384 - if (!ib) 385 - return -ENOMEM; 386 - r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); 387 - if (r) { 388 - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 389 - kfree(ib); 377 + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 378 + if (r) 390 379 return r; 391 - } 380 + 381 + ib = &job->ibs[0]; 392 382 393 383 dummy = ib->gpu_addr + 1024; 394 384 ··· 425 423 for (i = ib->length_dw; i < ib_size_dw; ++i) 426 424 ib->ptr[i] = 0x0; 427 425 428 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 429 - &amdgpu_vce_free_job, 430 - AMDGPU_FENCE_OWNER_UNDEFINED, 431 - &f); 426 + r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED, 427 + NULL, &f); 432 428 if (r) 433 429 goto err; 430 + 431 + amdgpu_job_free(job); 434 432 if (fence) 435 433 *fence = fence_get(f); 436 434 fence_put(f); 437 - if (amdgpu_enable_scheduler) 438 - return 0; 435 + return 0; 436 + 439 437 err: 440 - amdgpu_ib_free(adev, ib); 441 - kfree(ib); 438 + amdgpu_job_free(job); 442 439 return r; 443 440 } 444 441 ··· 452 451 * Close up a stream for HW test or if userspace failed to do so 453 452 */ 454 453 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 455 - struct fence **fence) 454 + bool direct, struct fence **fence) 456 455 { 457 456 const unsigned ib_size_dw = 1024; 458 - struct amdgpu_ib *ib = NULL; 457 + struct amdgpu_job *job; 458 + struct amdgpu_ib *ib; 459 459 struct fence *f = NULL; 460 - struct amdgpu_device *adev = ring->adev; 461 460 uint64_t dummy; 462 461 int i, r; 463 462 464 - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 465 - if (!ib) 466 - return -ENOMEM; 467 - 468 - r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); 469 - if (r) { 470 - kfree(ib); 471 - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 463 + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 464 + if (r) 472 465 return r; 473 - } 474 466 467 + ib = &job->ibs[0]; 475 468 dummy = ib->gpu_addr + 1024; 476 469 477 470 /* stitch together an VCE destroy msg */ ··· 485 490 486 491 for (i = ib->length_dw; i < ib_size_dw; ++i) 487 492 ib->ptr[i] = 0x0; 488 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 489 - &amdgpu_vce_free_job, 490 - AMDGPU_FENCE_OWNER_UNDEFINED, 491 - &f); 492 - if (r) 493 - goto err; 493 + 494 + if (direct) { 495 + r = amdgpu_ib_schedule(ring, 1, ib, 496 + AMDGPU_FENCE_OWNER_UNDEFINED, 497 + NULL, &f); 498 + if (r) 499 + goto err; 500 + 501 + amdgpu_job_free(job); 502 + } else { 503 + r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, 504 + AMDGPU_FENCE_OWNER_UNDEFINED, &f); 505 + if (r) 506 + goto err; 507 + } 508 + 494 509 if (fence) 495 510 *fence = fence_get(f); 496 511 fence_put(f); 497 - if (amdgpu_enable_scheduler) 498 - return 0; 512 + return 0; 513 + 499 514 err: 500 - amdgpu_ib_free(adev, ib); 501 - kfree(ib); 515 + amdgpu_job_free(job); 502 516 return r; 503 517 } 504 518 ··· 525 521 int lo, int hi, unsigned size, uint32_t index) 526 522 { 527 523 struct amdgpu_bo_va_mapping *mapping; 528 - struct amdgpu_ib *ib = &p->ibs[ib_idx]; 529 524 struct amdgpu_bo *bo; 530 525 uint64_t addr; 531 526 ··· 553 550 addr += amdgpu_bo_gpu_offset(bo); 554 551 addr -= ((uint64_t)size) * ((uint64_t)index); 555 552 556 - ib->ptr[lo] = addr & 0xFFFFFFFF; 557 - ib->ptr[hi] = addr >> 32; 553 + amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr)); 554 + amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr)); 558 555 559 556 return 0; 560 557 } ··· 609 606 */ 610 607 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 611 608 { 612 - struct amdgpu_ib *ib = &p->ibs[ib_idx]; 609 + struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 613 610 unsigned fb_idx = 0, bs_idx = 0; 614 611 int session_idx = -1; 615 612 bool destroyed = false; ··· 746 743 } 747 744 748 745 /** 749 - * amdgpu_vce_ring_emit_semaphore - emit a semaphore command 750 - * 751 - * @ring: engine to use 752 - * @semaphore: address of semaphore 753 - * @emit_wait: true=emit wait, false=emit signal 754 - * 755 - */ 756 - bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, 757 - struct amdgpu_semaphore *semaphore, 758 - bool emit_wait) 759 - { 760 - uint64_t addr = semaphore->gpu_addr; 761 - 762 - amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE); 763 - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); 764 - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); 765 - amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); 766 - if (!emit_wait) 767 - amdgpu_ring_write(ring, VCE_CMD_END); 768 - 769 - return true; 770 - } 771 - 772 - /** 773 746 * amdgpu_vce_ring_emit_ib - execute indirect buffer 774 747 * 775 748 * @ring: engine to use ··· 793 814 unsigned i; 794 815 int r; 795 816 796 - r = amdgpu_ring_lock(ring, 16); 817 + r = amdgpu_ring_alloc(ring, 16); 797 818 if (r) { 798 819 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", 799 820 ring->idx, r); 800 821 return r; 801 822 } 802 823 amdgpu_ring_write(ring, VCE_CMD_END); 803 - amdgpu_ring_unlock_commit(ring); 824 + amdgpu_ring_commit(ring); 804 825 805 826 for (i = 0; i < adev->usec_timeout; i++) { 806 827 if (amdgpu_ring_get_rptr(ring) != rptr) ··· 841 862 goto error; 842 863 } 843 864 844 - r = amdgpu_vce_get_destroy_msg(ring, 1, &fence); 865 + r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); 845 866 if (r) { 846 867 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); 847 868 goto error;
+1 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
··· 31 31 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 32 32 struct fence **fence); 33 33 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 34 - struct fence **fence); 34 + bool direct, struct fence **fence); 35 35 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); 36 36 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); 37 - bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, 38 - struct amdgpu_semaphore *semaphore, 39 - bool emit_wait); 40 37 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 41 38 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 42 39 unsigned flags);
+324 -287
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 55 55 * 56 56 * @adev: amdgpu_device pointer 57 57 * 58 - * Calculate the number of page directory entries (cayman+). 58 + * Calculate the number of page directory entries. 59 59 */ 60 60 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) 61 61 { ··· 67 67 * 68 68 * @adev: amdgpu_device pointer 69 69 * 70 - * Calculate the size of the page directory in bytes (cayman+). 70 + * Calculate the size of the page directory in bytes. 71 71 */ 72 72 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) 73 73 { ··· 89 89 struct amdgpu_bo_list_entry *entry) 90 90 { 91 91 entry->robj = vm->page_directory; 92 - entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; 93 - entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; 94 92 entry->priority = 0; 95 93 entry->tv.bo = &vm->page_directory->tbo; 96 94 entry->tv.shared = true; ··· 152 154 * @vm: vm to allocate id for 153 155 * @ring: ring we want to submit job to 154 156 * @sync: sync object where we add dependencies 157 + * @fence: fence protecting ID from reuse 155 158 * 156 159 * Allocate an id for the vm, adding fences to the sync obj as necessary. 157 - * 158 - * Global mutex must be locked! 159 160 */ 160 161 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 161 - struct amdgpu_sync *sync) 162 + struct amdgpu_sync *sync, struct fence *fence) 162 163 { 163 - struct fence *best[AMDGPU_MAX_RINGS] = {}; 164 164 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 165 165 struct amdgpu_device *adev = ring->adev; 166 + struct amdgpu_vm_manager_id *id; 167 + int r; 166 168 167 - unsigned choices[2] = {}; 168 - unsigned i; 169 + mutex_lock(&adev->vm_manager.lock); 169 170 170 171 /* check if the id is still valid */ 171 172 if (vm_id->id) { 172 - unsigned id = vm_id->id; 173 173 long owner; 174 174 175 - owner = atomic_long_read(&adev->vm_manager.ids[id].owner); 175 + id = &adev->vm_manager.ids[vm_id->id]; 176 + owner = atomic_long_read(&id->owner); 176 177 if (owner == (long)vm) { 177 - trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); 178 + list_move_tail(&id->list, &adev->vm_manager.ids_lru); 179 + trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); 180 + 181 + fence_put(id->active); 182 + id->active = fence_get(fence); 183 + 184 + mutex_unlock(&adev->vm_manager.lock); 178 185 return 0; 179 186 } 180 187 } ··· 187 184 /* we definately need to flush */ 188 185 vm_id->pd_gpu_addr = ~0ll; 189 186 190 - /* skip over VMID 0, since it is the system VM */ 191 - for (i = 1; i < adev->vm_manager.nvm; ++i) { 192 - struct fence *fence = adev->vm_manager.ids[i].active; 193 - struct amdgpu_ring *fring; 187 + id = list_first_entry(&adev->vm_manager.ids_lru, 188 + struct amdgpu_vm_manager_id, 189 + list); 190 + list_move_tail(&id->list, &adev->vm_manager.ids_lru); 191 + atomic_long_set(&id->owner, (long)vm); 194 192 195 - if (fence == NULL) { 196 - /* found a free one */ 197 - vm_id->id = i; 198 - trace_amdgpu_vm_grab_id(i, ring->idx); 199 - return 0; 200 - } 193 + vm_id->id = id - adev->vm_manager.ids; 194 + trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); 201 195 202 - fring = amdgpu_ring_from_fence(fence); 203 - if (best[fring->idx] == NULL || 204 - fence_is_later(best[fring->idx], fence)) { 205 - best[fring->idx] = fence; 206 - choices[fring == ring ? 0 : 1] = i; 207 - } 196 + r = amdgpu_sync_fence(ring->adev, sync, id->active); 197 + 198 + if (!r) { 199 + fence_put(id->active); 200 + id->active = fence_get(fence); 208 201 } 209 202 210 - for (i = 0; i < 2; ++i) { 211 - if (choices[i]) { 212 - struct fence *fence; 213 - 214 - fence = adev->vm_manager.ids[choices[i]].active; 215 - vm_id->id = choices[i]; 216 - 217 - trace_amdgpu_vm_grab_id(choices[i], ring->idx); 218 - return amdgpu_sync_fence(ring->adev, sync, fence); 219 - } 220 - } 221 - 222 - /* should never happen */ 223 - BUG(); 224 - return -EINVAL; 203 + mutex_unlock(&adev->vm_manager.lock); 204 + return r; 225 205 } 226 206 227 207 /** ··· 214 228 * @vm: vm we want to flush 215 229 * @updates: last vm update that we waited for 216 230 * 217 - * Flush the vm (cayman+). 218 - * 219 - * Global and local mutex must be locked! 231 + * Flush the vm. 220 232 */ 221 233 void amdgpu_vm_flush(struct amdgpu_ring *ring, 222 234 struct amdgpu_vm *vm, ··· 244 260 } 245 261 246 262 /** 247 - * amdgpu_vm_fence - remember fence for vm 248 - * 249 - * @adev: amdgpu_device pointer 250 - * @vm: vm we want to fence 251 - * @fence: fence to remember 252 - * 253 - * Fence the vm (cayman+). 254 - * Set the fence used to protect page table and id. 255 - * 256 - * Global and local mutex must be locked! 257 - */ 258 - void amdgpu_vm_fence(struct amdgpu_device *adev, 259 - struct amdgpu_vm *vm, 260 - struct fence *fence) 261 - { 262 - struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); 263 - unsigned vm_id = vm->ids[ring->idx].id; 264 - 265 - fence_put(adev->vm_manager.ids[vm_id].active); 266 - adev->vm_manager.ids[vm_id].active = fence_get(fence); 267 - atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); 268 - } 269 - 270 - /** 271 263 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 272 264 * 273 265 * @vm: requested vm 274 266 * @bo: requested buffer object 275 267 * 276 - * Find @bo inside the requested vm (cayman+). 268 + * Find @bo inside the requested vm. 277 269 * Search inside the @bos vm list for the requested vm 278 270 * Returns the found bo_va or NULL if none is found 279 271 * ··· 272 312 * amdgpu_vm_update_pages - helper to call the right asic function 273 313 * 274 314 * @adev: amdgpu_device pointer 315 + * @gtt: GART instance to use for mapping 316 + * @gtt_flags: GTT hw access flags 275 317 * @ib: indirect buffer to fill with commands 276 318 * @pe: addr of the page entry 277 319 * @addr: dst addr to write into pe 278 320 * @count: number of page entries to update 279 321 * @incr: increase next addr by incr bytes 280 322 * @flags: hw access flags 281 - * @gtt_flags: GTT hw access flags 282 323 * 283 324 * Traces the parameters and calls the right asic functions 284 325 * to setup the page table using the DMA. 285 326 */ 286 327 static void amdgpu_vm_update_pages(struct amdgpu_device *adev, 328 + struct amdgpu_gart *gtt, 329 + uint32_t gtt_flags, 287 330 struct amdgpu_ib *ib, 288 331 uint64_t pe, uint64_t addr, 289 332 unsigned count, uint32_t incr, 290 - uint32_t flags, uint32_t gtt_flags) 333 + uint32_t flags) 291 334 { 292 335 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); 293 336 294 - if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { 295 - uint64_t src = adev->gart.table_addr + (addr >> 12) * 8; 337 + if ((gtt == &adev->gart) && (flags == gtt_flags)) { 338 + uint64_t src = gtt->table_addr + (addr >> 12) * 8; 296 339 amdgpu_vm_copy_pte(adev, ib, pe, src, count); 297 340 298 - } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) { 299 - amdgpu_vm_write_pte(adev, ib, pe, addr, 300 - count, incr, flags); 341 + } else if (gtt) { 342 + dma_addr_t *pages_addr = gtt->pages_addr; 343 + amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, 344 + count, incr, flags); 345 + 346 + } else if (count < 3) { 347 + amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, 348 + count, incr, flags); 301 349 302 350 } else { 303 351 amdgpu_vm_set_pte_pde(adev, ib, pe, addr, 304 352 count, incr, flags); 305 353 } 306 - } 307 - 308 - int amdgpu_vm_free_job(struct amdgpu_job *job) 309 - { 310 - int i; 311 - for (i = 0; i < job->num_ibs; i++) 312 - amdgpu_ib_free(job->adev, &job->ibs[i]); 313 - kfree(job->ibs); 314 - return 0; 315 354 } 316 355 317 356 /** ··· 322 363 * need to reserve bo first before calling it. 323 364 */ 324 365 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 366 + struct amdgpu_vm *vm, 325 367 struct amdgpu_bo *bo) 326 368 { 327 - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 369 + struct amdgpu_ring *ring; 328 370 struct fence *fence = NULL; 329 - struct amdgpu_ib *ib; 371 + struct amdgpu_job *job; 330 372 unsigned entries; 331 373 uint64_t addr; 332 374 int r; 375 + 376 + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 333 377 334 378 r = reservation_object_reserve_shared(bo->tbo.resv); 335 379 if (r) ··· 345 383 addr = amdgpu_bo_gpu_offset(bo); 346 384 entries = amdgpu_bo_size(bo) / 8; 347 385 348 - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 349 - if (!ib) 386 + r = amdgpu_job_alloc_with_ib(adev, 64, &job); 387 + if (r) 350 388 goto error; 351 389 352 - r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); 390 + amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries, 391 + 0, 0); 392 + amdgpu_ring_pad_ib(ring, &job->ibs[0]); 393 + 394 + WARN_ON(job->ibs[0].length_dw > 64); 395 + r = amdgpu_job_submit(job, ring, &vm->entity, 396 + AMDGPU_FENCE_OWNER_VM, &fence); 353 397 if (r) 354 398 goto error_free; 355 399 356 - ib->length_dw = 0; 357 - 358 - amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0); 359 - amdgpu_vm_pad_ib(adev, ib); 360 - WARN_ON(ib->length_dw > 64); 361 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 362 - &amdgpu_vm_free_job, 363 - AMDGPU_FENCE_OWNER_VM, 364 - &fence); 365 - if (!r) 366 - amdgpu_bo_fence(bo, fence, true); 400 + amdgpu_bo_fence(bo, fence, true); 367 401 fence_put(fence); 368 - if (amdgpu_enable_scheduler) 369 - return 0; 402 + return 0; 370 403 371 404 error_free: 372 - amdgpu_ib_free(adev, ib); 373 - kfree(ib); 405 + amdgpu_job_free(job); 374 406 375 407 error: 376 408 return r; 377 409 } 378 410 379 411 /** 380 - * amdgpu_vm_map_gart - get the physical address of a gart page 412 + * amdgpu_vm_map_gart - Resolve gart mapping of addr 381 413 * 382 - * @adev: amdgpu_device pointer 414 + * @pages_addr: optional DMA address to use for lookup 383 415 * @addr: the unmapped addr 384 416 * 385 417 * Look up the physical address of the page that the pte resolves 386 - * to (cayman+). 387 - * Returns the physical address of the page. 418 + * to and return the pointer for the page table entry. 388 419 */ 389 - uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) 420 + uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) 390 421 { 391 422 uint64_t result; 392 423 393 - /* page table offset */ 394 - result = adev->gart.pages_addr[addr >> PAGE_SHIFT]; 424 + if (pages_addr) { 425 + /* page table offset */ 426 + result = pages_addr[addr >> PAGE_SHIFT]; 395 427 396 - /* in case cpu page size != gpu page size*/ 397 - result |= addr & (~PAGE_MASK); 428 + /* in case cpu page size != gpu page size*/ 429 + result |= addr & (~PAGE_MASK); 430 + 431 + } else { 432 + /* No mapping required */ 433 + result = addr; 434 + } 435 + 436 + result &= 0xFFFFFFFFFFFFF000ULL; 398 437 399 438 return result; 400 439 } ··· 409 446 * @end: end of GPU address range 410 447 * 411 448 * Allocates new page tables if necessary 412 - * and updates the page directory (cayman+). 449 + * and updates the page directory. 413 450 * Returns 0 for success, error for failure. 414 - * 415 - * Global and local mutex must be locked! 416 451 */ 417 452 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 418 453 struct amdgpu_vm *vm) 419 454 { 420 - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 455 + struct amdgpu_ring *ring; 421 456 struct amdgpu_bo *pd = vm->page_directory; 422 457 uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); 423 458 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; 424 459 uint64_t last_pde = ~0, last_pt = ~0; 425 460 unsigned count = 0, pt_idx, ndw; 461 + struct amdgpu_job *job; 426 462 struct amdgpu_ib *ib; 427 463 struct fence *fence = NULL; 428 464 429 465 int r; 466 + 467 + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 430 468 431 469 /* padding, etc. */ 432 470 ndw = 64; ··· 435 471 /* assume the worst case */ 436 472 ndw += vm->max_pde_used * 6; 437 473 438 - /* update too big for an IB */ 439 - if (ndw > 0xfffff) 440 - return -ENOMEM; 441 - 442 - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 443 - if (!ib) 444 - return -ENOMEM; 445 - 446 - r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); 447 - if (r) { 448 - kfree(ib); 474 + r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); 475 + if (r) 449 476 return r; 450 - } 451 - ib->length_dw = 0; 477 + 478 + ib = &job->ibs[0]; 452 479 453 480 /* walk over the address space and update the page directory */ 454 481 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { ··· 459 504 ((last_pt + incr * count) != pt)) { 460 505 461 506 if (count) { 462 - amdgpu_vm_update_pages(adev, ib, last_pde, 463 - last_pt, count, incr, 464 - AMDGPU_PTE_VALID, 0); 507 + amdgpu_vm_update_pages(adev, NULL, 0, ib, 508 + last_pde, last_pt, 509 + count, incr, 510 + AMDGPU_PTE_VALID); 465 511 } 466 512 467 513 count = 1; ··· 474 518 } 475 519 476 520 if (count) 477 - amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count, 478 - incr, AMDGPU_PTE_VALID, 0); 521 + amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt, 522 + count, incr, AMDGPU_PTE_VALID); 479 523 480 524 if (ib->length_dw != 0) { 481 - amdgpu_vm_pad_ib(adev, ib); 482 - amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); 525 + amdgpu_ring_pad_ib(ring, ib); 526 + amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, 527 + AMDGPU_FENCE_OWNER_VM); 483 528 WARN_ON(ib->length_dw > ndw); 484 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 485 - &amdgpu_vm_free_job, 486 - AMDGPU_FENCE_OWNER_VM, 487 - &fence); 529 + r = amdgpu_job_submit(job, ring, &vm->entity, 530 + AMDGPU_FENCE_OWNER_VM, &fence); 488 531 if (r) 489 532 goto error_free; 490 533 ··· 491 536 fence_put(vm->page_directory_fence); 492 537 vm->page_directory_fence = fence_get(fence); 493 538 fence_put(fence); 494 - } 495 539 496 - if (!amdgpu_enable_scheduler || ib->length_dw == 0) { 497 - amdgpu_ib_free(adev, ib); 498 - kfree(ib); 540 + } else { 541 + amdgpu_job_free(job); 499 542 } 500 543 501 544 return 0; 502 545 503 546 error_free: 504 - amdgpu_ib_free(adev, ib); 505 - kfree(ib); 547 + amdgpu_job_free(job); 506 548 return r; 507 549 } 508 550 ··· 507 555 * amdgpu_vm_frag_ptes - add fragment information to PTEs 508 556 * 509 557 * @adev: amdgpu_device pointer 558 + * @gtt: GART instance to use for mapping 559 + * @gtt_flags: GTT hw mapping flags 510 560 * @ib: IB for the update 511 561 * @pe_start: first PTE to handle 512 562 * @pe_end: last PTE to handle 513 563 * @addr: addr those PTEs should point to 514 564 * @flags: hw mapping flags 515 - * @gtt_flags: GTT hw mapping flags 516 - * 517 - * Global and local mutex must be locked! 518 565 */ 519 566 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, 567 + struct amdgpu_gart *gtt, 568 + uint32_t gtt_flags, 520 569 struct amdgpu_ib *ib, 521 570 uint64_t pe_start, uint64_t pe_end, 522 - uint64_t addr, uint32_t flags, 523 - uint32_t gtt_flags) 571 + uint64_t addr, uint32_t flags) 524 572 { 525 573 /** 526 574 * The MC L1 TLB supports variable sized pages, based on a fragment ··· 550 598 551 599 unsigned count; 552 600 601 + /* Abort early if there isn't anything to do */ 602 + if (pe_start == pe_end) 603 + return; 604 + 553 605 /* system pages are non continuously */ 554 - if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) || 555 - (frag_start >= frag_end)) { 606 + if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { 556 607 557 608 count = (pe_end - pe_start) / 8; 558 - amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, 559 - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); 609 + amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start, 610 + addr, count, AMDGPU_GPU_PAGE_SIZE, 611 + flags); 560 612 return; 561 613 } 562 614 563 615 /* handle the 4K area at the beginning */ 564 616 if (pe_start != frag_start) { 565 617 count = (frag_start - pe_start) / 8; 566 - amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, 567 - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); 618 + amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr, 619 + count, AMDGPU_GPU_PAGE_SIZE, flags); 568 620 addr += AMDGPU_GPU_PAGE_SIZE * count; 569 621 } 570 622 571 623 /* handle the area in the middle */ 572 624 count = (frag_end - frag_start) / 8; 573 - amdgpu_vm_update_pages(adev, ib, frag_start, addr, count, 574 - AMDGPU_GPU_PAGE_SIZE, flags | frag_flags, 575 - gtt_flags); 625 + amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count, 626 + AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); 576 627 577 628 /* handle the 4K area at the end */ 578 629 if (frag_end != pe_end) { 579 630 addr += AMDGPU_GPU_PAGE_SIZE * count; 580 631 count = (pe_end - frag_end) / 8; 581 - amdgpu_vm_update_pages(adev, ib, frag_end, addr, count, 582 - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); 632 + amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr, 633 + count, AMDGPU_GPU_PAGE_SIZE, flags); 583 634 } 584 635 } 585 636 ··· 590 635 * amdgpu_vm_update_ptes - make sure that page tables are valid 591 636 * 592 637 * @adev: amdgpu_device pointer 638 + * @gtt: GART instance to use for mapping 639 + * @gtt_flags: GTT hw mapping flags 593 640 * @vm: requested vm 594 641 * @start: start of GPU address range 595 642 * @end: end of GPU address range 596 643 * @dst: destination address to map to 597 644 * @flags: mapping flags 598 645 * 599 - * Update the page tables in the range @start - @end (cayman+). 600 - * 601 - * Global and local mutex must be locked! 646 + * Update the page tables in the range @start - @end. 602 647 */ 603 - static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, 604 - struct amdgpu_vm *vm, 605 - struct amdgpu_ib *ib, 606 - uint64_t start, uint64_t end, 607 - uint64_t dst, uint32_t flags, 608 - uint32_t gtt_flags) 648 + static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, 649 + struct amdgpu_gart *gtt, 650 + uint32_t gtt_flags, 651 + struct amdgpu_vm *vm, 652 + struct amdgpu_ib *ib, 653 + uint64_t start, uint64_t end, 654 + uint64_t dst, uint32_t flags) 609 655 { 610 - uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; 611 - uint64_t last_pte = ~0, last_dst = ~0; 612 - void *owner = AMDGPU_FENCE_OWNER_VM; 613 - unsigned count = 0; 614 - uint64_t addr; 656 + const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; 615 657 616 - /* sync to everything on unmapping */ 617 - if (!(flags & AMDGPU_PTE_VALID)) 618 - owner = AMDGPU_FENCE_OWNER_UNDEFINED; 658 + uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; 659 + uint64_t addr; 619 660 620 661 /* walk over the address space and update the page tables */ 621 662 for (addr = start; addr < end; ) { 622 663 uint64_t pt_idx = addr >> amdgpu_vm_block_size; 623 664 struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; 624 665 unsigned nptes; 625 - uint64_t pte; 626 - int r; 627 - 628 - amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner); 629 - r = reservation_object_reserve_shared(pt->tbo.resv); 630 - if (r) 631 - return r; 666 + uint64_t pe_start; 632 667 633 668 if ((addr & ~mask) == (end & ~mask)) 634 669 nptes = end - addr; 635 670 else 636 671 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); 637 672 638 - pte = amdgpu_bo_gpu_offset(pt); 639 - pte += (addr & mask) * 8; 673 + pe_start = amdgpu_bo_gpu_offset(pt); 674 + pe_start += (addr & mask) * 8; 640 675 641 - if ((last_pte + 8 * count) != pte) { 676 + if (last_pe_end != pe_start) { 642 677 643 - if (count) { 644 - amdgpu_vm_frag_ptes(adev, ib, last_pte, 645 - last_pte + 8 * count, 646 - last_dst, flags, 647 - gtt_flags); 648 - } 678 + amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, 679 + last_pe_start, last_pe_end, 680 + last_dst, flags); 649 681 650 - count = nptes; 651 - last_pte = pte; 682 + last_pe_start = pe_start; 683 + last_pe_end = pe_start + 8 * nptes; 652 684 last_dst = dst; 653 685 } else { 654 - count += nptes; 686 + last_pe_end += 8 * nptes; 655 687 } 656 688 657 689 addr += nptes; 658 690 dst += nptes * AMDGPU_GPU_PAGE_SIZE; 659 691 } 660 692 661 - if (count) { 662 - amdgpu_vm_frag_ptes(adev, ib, last_pte, 663 - last_pte + 8 * count, 664 - last_dst, flags, gtt_flags); 665 - } 666 - 667 - return 0; 693 + amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, 694 + last_pe_start, last_pe_end, 695 + last_dst, flags); 668 696 } 669 697 670 698 /** 671 699 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 672 700 * 673 701 * @adev: amdgpu_device pointer 674 - * @vm: requested vm 675 - * @mapping: mapped range and flags to use for the update 676 - * @addr: addr to set the area to 702 + * @gtt: GART instance to use for mapping 677 703 * @gtt_flags: flags as they are used for GTT 704 + * @vm: requested vm 705 + * @start: start of mapped range 706 + * @last: last mapped entry 707 + * @flags: flags for the entries 708 + * @addr: addr to set the area to 678 709 * @fence: optional resulting fence 679 710 * 680 - * Fill in the page table entries for @mapping. 711 + * Fill in the page table entries between @start and @last. 681 712 * Returns 0 for success, -EINVAL for failure. 682 - * 683 - * Object have to be reserved and mutex must be locked! 684 713 */ 685 714 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 715 + struct amdgpu_gart *gtt, 716 + uint32_t gtt_flags, 686 717 struct amdgpu_vm *vm, 687 - struct amdgpu_bo_va_mapping *mapping, 688 - uint64_t addr, uint32_t gtt_flags, 718 + uint64_t start, uint64_t last, 719 + uint32_t flags, uint64_t addr, 689 720 struct fence **fence) 690 721 { 691 - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 722 + struct amdgpu_ring *ring; 723 + void *owner = AMDGPU_FENCE_OWNER_VM; 692 724 unsigned nptes, ncmds, ndw; 693 - uint32_t flags = gtt_flags; 725 + struct amdgpu_job *job; 694 726 struct amdgpu_ib *ib; 695 727 struct fence *f = NULL; 696 728 int r; 697 729 698 - /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 699 - * but in case of something, we filter the flags in first place 700 - */ 701 - if (!(mapping->flags & AMDGPU_PTE_READABLE)) 702 - flags &= ~AMDGPU_PTE_READABLE; 703 - if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 704 - flags &= ~AMDGPU_PTE_WRITEABLE; 730 + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 705 731 706 - trace_amdgpu_vm_bo_update(mapping); 732 + /* sync to everything on unmapping */ 733 + if (!(flags & AMDGPU_PTE_VALID)) 734 + owner = AMDGPU_FENCE_OWNER_UNDEFINED; 707 735 708 - nptes = mapping->it.last - mapping->it.start + 1; 736 + nptes = last - start + 1; 709 737 710 738 /* 711 739 * reserve space for one command every (1 << BLOCK_SIZE) ··· 699 761 /* padding, etc. */ 700 762 ndw = 64; 701 763 702 - if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { 764 + if ((gtt == &adev->gart) && (flags == gtt_flags)) { 703 765 /* only copy commands needed */ 704 766 ndw += ncmds * 7; 705 767 706 - } else if (flags & AMDGPU_PTE_SYSTEM) { 768 + } else if (gtt) { 707 769 /* header for write data commands */ 708 770 ndw += ncmds * 4; 709 771 ··· 718 780 ndw += 2 * 10; 719 781 } 720 782 721 - /* update too big for an IB */ 722 - if (ndw > 0xfffff) 723 - return -ENOMEM; 724 - 725 - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 726 - if (!ib) 727 - return -ENOMEM; 728 - 729 - r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); 730 - if (r) { 731 - kfree(ib); 783 + r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); 784 + if (r) 732 785 return r; 733 - } 734 786 735 - ib->length_dw = 0; 787 + ib = &job->ibs[0]; 736 788 737 - r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start, 738 - mapping->it.last + 1, addr + mapping->offset, 739 - flags, gtt_flags); 789 + r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, 790 + owner); 791 + if (r) 792 + goto error_free; 740 793 741 - if (r) { 742 - amdgpu_ib_free(adev, ib); 743 - kfree(ib); 744 - return r; 745 - } 794 + r = reservation_object_reserve_shared(vm->page_directory->tbo.resv); 795 + if (r) 796 + goto error_free; 746 797 747 - amdgpu_vm_pad_ib(adev, ib); 798 + amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1, 799 + addr, flags); 800 + 801 + amdgpu_ring_pad_ib(ring, ib); 748 802 WARN_ON(ib->length_dw > ndw); 749 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 750 - &amdgpu_vm_free_job, 751 - AMDGPU_FENCE_OWNER_VM, 752 - &f); 803 + r = amdgpu_job_submit(job, ring, &vm->entity, 804 + AMDGPU_FENCE_OWNER_VM, &f); 753 805 if (r) 754 806 goto error_free; 755 807 ··· 749 821 *fence = fence_get(f); 750 822 } 751 823 fence_put(f); 752 - if (!amdgpu_enable_scheduler) { 753 - amdgpu_ib_free(adev, ib); 754 - kfree(ib); 755 - } 756 824 return 0; 757 825 758 826 error_free: 759 - amdgpu_ib_free(adev, ib); 760 - kfree(ib); 827 + amdgpu_job_free(job); 761 828 return r; 829 + } 830 + 831 + /** 832 + * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks 833 + * 834 + * @adev: amdgpu_device pointer 835 + * @gtt: GART instance to use for mapping 836 + * @vm: requested vm 837 + * @mapping: mapped range and flags to use for the update 838 + * @addr: addr to set the area to 839 + * @gtt_flags: flags as they are used for GTT 840 + * @fence: optional resulting fence 841 + * 842 + * Split the mapping into smaller chunks so that each update fits 843 + * into a SDMA IB. 844 + * Returns 0 for success, -EINVAL for failure. 845 + */ 846 + static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 847 + struct amdgpu_gart *gtt, 848 + uint32_t gtt_flags, 849 + struct amdgpu_vm *vm, 850 + struct amdgpu_bo_va_mapping *mapping, 851 + uint64_t addr, struct fence **fence) 852 + { 853 + const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; 854 + 855 + uint64_t start = mapping->it.start; 856 + uint32_t flags = gtt_flags; 857 + int r; 858 + 859 + /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 860 + * but in case of something, we filter the flags in first place 861 + */ 862 + if (!(mapping->flags & AMDGPU_PTE_READABLE)) 863 + flags &= ~AMDGPU_PTE_READABLE; 864 + if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 865 + flags &= ~AMDGPU_PTE_WRITEABLE; 866 + 867 + trace_amdgpu_vm_bo_update(mapping); 868 + 869 + addr += mapping->offset; 870 + 871 + if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags))) 872 + return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, 873 + start, mapping->it.last, 874 + flags, addr, fence); 875 + 876 + while (start != mapping->it.last + 1) { 877 + uint64_t last; 878 + 879 + last = min((uint64_t)mapping->it.last, start + max_size); 880 + r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, 881 + start, last, flags, addr, 882 + fence); 883 + if (r) 884 + return r; 885 + 886 + start = last + 1; 887 + addr += max_size; 888 + } 889 + 890 + return 0; 762 891 } 763 892 764 893 /** ··· 836 851 { 837 852 struct amdgpu_vm *vm = bo_va->vm; 838 853 struct amdgpu_bo_va_mapping *mapping; 854 + struct amdgpu_gart *gtt = NULL; 839 855 uint32_t flags; 840 856 uint64_t addr; 841 857 int r; 842 858 843 859 if (mem) { 844 860 addr = (u64)mem->start << PAGE_SHIFT; 845 - if (mem->mem_type != TTM_PL_TT) 861 + switch (mem->mem_type) { 862 + case TTM_PL_TT: 863 + gtt = &bo_va->bo->adev->gart; 864 + break; 865 + 866 + case TTM_PL_VRAM: 846 867 addr += adev->vm_manager.vram_base_offset; 868 + break; 869 + 870 + default: 871 + break; 872 + } 847 873 } else { 848 874 addr = 0; 849 875 } ··· 867 871 spin_unlock(&vm->status_lock); 868 872 869 873 list_for_each_entry(mapping, &bo_va->invalids, list) { 870 - r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, 871 - flags, &bo_va->last_pt_update); 874 + r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr, 875 + &bo_va->last_pt_update); 872 876 if (r) 873 877 return r; 874 878 } ··· 914 918 struct amdgpu_bo_va_mapping, list); 915 919 list_del(&mapping->list); 916 920 spin_unlock(&vm->freed_lock); 917 - r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); 921 + r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping, 922 + 0, NULL); 918 923 kfree(mapping); 919 924 if (r) 920 925 return r; ··· 973 976 * @vm: requested vm 974 977 * @bo: amdgpu buffer object 975 978 * 976 - * Add @bo into the requested vm (cayman+). 979 + * Add @bo into the requested vm. 977 980 * Add @bo to the list of bos associated with the vm 978 981 * Returns newly added bo_va or NULL for failure 979 982 * ··· 1114 1117 */ 1115 1118 pt->parent = amdgpu_bo_ref(vm->page_directory); 1116 1119 1117 - r = amdgpu_vm_clear_bo(adev, pt); 1120 + r = amdgpu_vm_clear_bo(adev, vm, pt); 1118 1121 if (r) { 1119 1122 amdgpu_bo_unref(&pt); 1120 1123 goto error_free; 1121 1124 } 1122 1125 1123 1126 entry->robj = pt; 1124 - entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; 1125 - entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; 1126 1127 entry->priority = 0; 1127 1128 entry->tv.bo = &entry->robj->tbo; 1128 1129 entry->tv.shared = true; ··· 1205 1210 * @adev: amdgpu_device pointer 1206 1211 * @bo_va: requested bo_va 1207 1212 * 1208 - * Remove @bo_va->bo from the requested vm (cayman+). 1213 + * Remove @bo_va->bo from the requested vm. 1209 1214 * 1210 1215 * Object have to be reserved! 1211 1216 */ ··· 1250 1255 * @vm: requested vm 1251 1256 * @bo: amdgpu buffer object 1252 1257 * 1253 - * Mark @bo as invalid (cayman+). 1258 + * Mark @bo as invalid. 1254 1259 */ 1255 1260 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 1256 1261 struct amdgpu_bo *bo) ··· 1271 1276 * @adev: amdgpu_device pointer 1272 1277 * @vm: requested vm 1273 1278 * 1274 - * Init @vm fields (cayman+). 1279 + * Init @vm fields. 1275 1280 */ 1276 1281 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1277 1282 { 1278 1283 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, 1279 1284 AMDGPU_VM_PTE_COUNT * 8); 1280 1285 unsigned pd_size, pd_entries; 1286 + unsigned ring_instance; 1287 + struct amdgpu_ring *ring; 1288 + struct amd_sched_rq *rq; 1281 1289 int i, r; 1282 1290 1283 1291 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { ··· 1304 1306 return -ENOMEM; 1305 1307 } 1306 1308 1309 + /* create scheduler entity for page table updates */ 1310 + 1311 + ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); 1312 + ring_instance %= adev->vm_manager.vm_pte_num_rings; 1313 + ring = adev->vm_manager.vm_pte_rings[ring_instance]; 1314 + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; 1315 + r = amd_sched_entity_init(&ring->sched, &vm->entity, 1316 + rq, amdgpu_sched_jobs); 1317 + if (r) 1318 + return r; 1319 + 1307 1320 vm->page_directory_fence = NULL; 1308 1321 1309 1322 r = amdgpu_bo_create(adev, pd_size, align, true, ··· 1322 1313 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1323 1314 NULL, NULL, &vm->page_directory); 1324 1315 if (r) 1325 - return r; 1316 + goto error_free_sched_entity; 1317 + 1326 1318 r = amdgpu_bo_reserve(vm->page_directory, false); 1327 - if (r) { 1328 - amdgpu_bo_unref(&vm->page_directory); 1329 - vm->page_directory = NULL; 1330 - return r; 1331 - } 1332 - r = amdgpu_vm_clear_bo(adev, vm->page_directory); 1319 + if (r) 1320 + goto error_free_page_directory; 1321 + 1322 + r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); 1333 1323 amdgpu_bo_unreserve(vm->page_directory); 1334 - if (r) { 1335 - amdgpu_bo_unref(&vm->page_directory); 1336 - vm->page_directory = NULL; 1337 - return r; 1338 - } 1324 + if (r) 1325 + goto error_free_page_directory; 1339 1326 1340 1327 return 0; 1328 + 1329 + error_free_page_directory: 1330 + amdgpu_bo_unref(&vm->page_directory); 1331 + vm->page_directory = NULL; 1332 + 1333 + error_free_sched_entity: 1334 + amd_sched_entity_fini(&ring->sched, &vm->entity); 1335 + 1336 + return r; 1341 1337 } 1342 1338 1343 1339 /** ··· 1351 1337 * @adev: amdgpu_device pointer 1352 1338 * @vm: requested vm 1353 1339 * 1354 - * Tear down @vm (cayman+). 1340 + * Tear down @vm. 1355 1341 * Unbind the VM and remove all bos from the vm bo list 1356 1342 */ 1357 1343 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1358 1344 { 1359 1345 struct amdgpu_bo_va_mapping *mapping, *tmp; 1360 1346 int i; 1347 + 1348 + amd_sched_entity_fini(vm->entity.sched, &vm->entity); 1361 1349 1362 1350 if (!RB_EMPTY_ROOT(&vm->va)) { 1363 1351 dev_err(adev->dev, "still active bo inside vm\n"); ··· 1388 1372 fence_put(vm->ids[i].flushed_updates); 1389 1373 } 1390 1374 1375 + } 1376 + 1377 + /** 1378 + * amdgpu_vm_manager_init - init the VM manager 1379 + * 1380 + * @adev: amdgpu_device pointer 1381 + * 1382 + * Initialize the VM manager structures 1383 + */ 1384 + void amdgpu_vm_manager_init(struct amdgpu_device *adev) 1385 + { 1386 + unsigned i; 1387 + 1388 + INIT_LIST_HEAD(&adev->vm_manager.ids_lru); 1389 + 1390 + /* skip over VMID 0, since it is the system VM */ 1391 + for (i = 1; i < adev->vm_manager.num_ids; ++i) 1392 + list_add_tail(&adev->vm_manager.ids[i].list, 1393 + &adev->vm_manager.ids_lru); 1394 + 1395 + atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); 1391 1396 } 1392 1397 1393 1398 /**
+4 -304
drivers/gpu/drm/amd/amdgpu/cik.c
··· 1059 1059 return -EINVAL; 1060 1060 } 1061 1061 1062 - static void cik_print_gpu_status_regs(struct amdgpu_device *adev) 1063 - { 1064 - dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", 1065 - RREG32(mmGRBM_STATUS)); 1066 - dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", 1067 - RREG32(mmGRBM_STATUS2)); 1068 - dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1069 - RREG32(mmGRBM_STATUS_SE0)); 1070 - dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1071 - RREG32(mmGRBM_STATUS_SE1)); 1072 - dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", 1073 - RREG32(mmGRBM_STATUS_SE2)); 1074 - dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", 1075 - RREG32(mmGRBM_STATUS_SE3)); 1076 - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", 1077 - RREG32(mmSRBM_STATUS)); 1078 - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", 1079 - RREG32(mmSRBM_STATUS2)); 1080 - dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", 1081 - RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); 1082 - dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", 1083 - RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); 1084 - dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); 1085 - dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", 1086 - RREG32(mmCP_STALLED_STAT1)); 1087 - dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", 1088 - RREG32(mmCP_STALLED_STAT2)); 1089 - dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", 1090 - RREG32(mmCP_STALLED_STAT3)); 1091 - dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", 1092 - RREG32(mmCP_CPF_BUSY_STAT)); 1093 - dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", 1094 - RREG32(mmCP_CPF_STALLED_STAT1)); 1095 - dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); 1096 - dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); 1097 - dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", 1098 - RREG32(mmCP_CPC_STALLED_STAT1)); 1099 - dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); 1100 - } 1101 - 1102 - /** 1103 - * cik_gpu_check_soft_reset - check which blocks are busy 1104 - * 1105 - * @adev: amdgpu_device pointer 1106 - * 1107 - * Check which blocks are busy and return the relevant reset 1108 - * mask to be used by cik_gpu_soft_reset(). 1109 - * Returns a mask of the blocks to be reset. 1110 - */ 1111 - u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev) 1112 - { 1113 - u32 reset_mask = 0; 1114 - u32 tmp; 1115 - 1116 - /* GRBM_STATUS */ 1117 - tmp = RREG32(mmGRBM_STATUS); 1118 - if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 1119 - GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 1120 - GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 1121 - GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 1122 - GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 1123 - GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) 1124 - reset_mask |= AMDGPU_RESET_GFX; 1125 - 1126 - if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) 1127 - reset_mask |= AMDGPU_RESET_CP; 1128 - 1129 - /* GRBM_STATUS2 */ 1130 - tmp = RREG32(mmGRBM_STATUS2); 1131 - if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) 1132 - reset_mask |= AMDGPU_RESET_RLC; 1133 - 1134 - /* SDMA0_STATUS_REG */ 1135 - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET); 1136 - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) 1137 - reset_mask |= AMDGPU_RESET_DMA; 1138 - 1139 - /* SDMA1_STATUS_REG */ 1140 - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); 1141 - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) 1142 - reset_mask |= AMDGPU_RESET_DMA1; 1143 - 1144 - /* SRBM_STATUS2 */ 1145 - tmp = RREG32(mmSRBM_STATUS2); 1146 - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) 1147 - reset_mask |= AMDGPU_RESET_DMA; 1148 - 1149 - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) 1150 - reset_mask |= AMDGPU_RESET_DMA1; 1151 - 1152 - /* SRBM_STATUS */ 1153 - tmp = RREG32(mmSRBM_STATUS); 1154 - 1155 - if (tmp & SRBM_STATUS__IH_BUSY_MASK) 1156 - reset_mask |= AMDGPU_RESET_IH; 1157 - 1158 - if (tmp & SRBM_STATUS__SEM_BUSY_MASK) 1159 - reset_mask |= AMDGPU_RESET_SEM; 1160 - 1161 - if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) 1162 - reset_mask |= AMDGPU_RESET_GRBM; 1163 - 1164 - if (tmp & SRBM_STATUS__VMC_BUSY_MASK) 1165 - reset_mask |= AMDGPU_RESET_VMC; 1166 - 1167 - if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1168 - SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) 1169 - reset_mask |= AMDGPU_RESET_MC; 1170 - 1171 - if (amdgpu_display_is_display_hung(adev)) 1172 - reset_mask |= AMDGPU_RESET_DISPLAY; 1173 - 1174 - /* Skip MC reset as it's mostly likely not hung, just busy */ 1175 - if (reset_mask & AMDGPU_RESET_MC) { 1176 - DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); 1177 - reset_mask &= ~AMDGPU_RESET_MC; 1178 - } 1179 - 1180 - return reset_mask; 1181 - } 1182 - 1183 - /** 1184 - * cik_gpu_soft_reset - soft reset GPU 1185 - * 1186 - * @adev: amdgpu_device pointer 1187 - * @reset_mask: mask of which blocks to reset 1188 - * 1189 - * Soft reset the blocks specified in @reset_mask. 1190 - */ 1191 - static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) 1192 - { 1193 - struct amdgpu_mode_mc_save save; 1194 - u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 1195 - u32 tmp; 1196 - 1197 - if (reset_mask == 0) 1198 - return; 1199 - 1200 - dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1201 - 1202 - cik_print_gpu_status_regs(adev); 1203 - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1204 - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); 1205 - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1206 - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); 1207 - 1208 - /* disable CG/PG */ 1209 - 1210 - /* stop the rlc */ 1211 - gfx_v7_0_rlc_stop(adev); 1212 - 1213 - /* Disable GFX parsing/prefetching */ 1214 - WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); 1215 - 1216 - /* Disable MEC parsing/prefetching */ 1217 - WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); 1218 - 1219 - if (reset_mask & AMDGPU_RESET_DMA) { 1220 - /* sdma0 */ 1221 - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); 1222 - tmp |= SDMA0_F32_CNTL__HALT_MASK; 1223 - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); 1224 - } 1225 - if (reset_mask & AMDGPU_RESET_DMA1) { 1226 - /* sdma1 */ 1227 - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); 1228 - tmp |= SDMA0_F32_CNTL__HALT_MASK; 1229 - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); 1230 - } 1231 - 1232 - gmc_v7_0_mc_stop(adev, &save); 1233 - if (amdgpu_asic_wait_for_mc_idle(adev)) { 1234 - dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 1235 - } 1236 - 1237 - if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) 1238 - grbm_soft_reset = GRBM_SOFT_RESET__SOFT_RESET_CP_MASK | 1239 - GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK; 1240 - 1241 - if (reset_mask & AMDGPU_RESET_CP) { 1242 - grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK; 1243 - 1244 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; 1245 - } 1246 - 1247 - if (reset_mask & AMDGPU_RESET_DMA) 1248 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; 1249 - 1250 - if (reset_mask & AMDGPU_RESET_DMA1) 1251 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; 1252 - 1253 - if (reset_mask & AMDGPU_RESET_DISPLAY) 1254 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 1255 - 1256 - if (reset_mask & AMDGPU_RESET_RLC) 1257 - grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; 1258 - 1259 - if (reset_mask & AMDGPU_RESET_SEM) 1260 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK; 1261 - 1262 - if (reset_mask & AMDGPU_RESET_IH) 1263 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; 1264 - 1265 - if (reset_mask & AMDGPU_RESET_GRBM) 1266 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; 1267 - 1268 - if (reset_mask & AMDGPU_RESET_VMC) 1269 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK; 1270 - 1271 - if (!(adev->flags & AMD_IS_APU)) { 1272 - if (reset_mask & AMDGPU_RESET_MC) 1273 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK; 1274 - } 1275 - 1276 - if (grbm_soft_reset) { 1277 - tmp = RREG32(mmGRBM_SOFT_RESET); 1278 - tmp |= grbm_soft_reset; 1279 - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 1280 - WREG32(mmGRBM_SOFT_RESET, tmp); 1281 - tmp = RREG32(mmGRBM_SOFT_RESET); 1282 - 1283 - udelay(50); 1284 - 1285 - tmp &= ~grbm_soft_reset; 1286 - WREG32(mmGRBM_SOFT_RESET, tmp); 1287 - tmp = RREG32(mmGRBM_SOFT_RESET); 1288 - } 1289 - 1290 - if (srbm_soft_reset) { 1291 - tmp = RREG32(mmSRBM_SOFT_RESET); 1292 - tmp |= srbm_soft_reset; 1293 - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1294 - WREG32(mmSRBM_SOFT_RESET, tmp); 1295 - tmp = RREG32(mmSRBM_SOFT_RESET); 1296 - 1297 - udelay(50); 1298 - 1299 - tmp &= ~srbm_soft_reset; 1300 - WREG32(mmSRBM_SOFT_RESET, tmp); 1301 - tmp = RREG32(mmSRBM_SOFT_RESET); 1302 - } 1303 - 1304 - /* Wait a little for things to settle down */ 1305 - udelay(50); 1306 - 1307 - gmc_v7_0_mc_resume(adev, &save); 1308 - udelay(50); 1309 - 1310 - cik_print_gpu_status_regs(adev); 1311 - } 1312 - 1313 1062 struct kv_reset_save_regs { 1314 1063 u32 gmcon_reng_execute; 1315 1064 u32 gmcon_misc; ··· 1154 1405 1155 1406 static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) 1156 1407 { 1157 - struct amdgpu_mode_mc_save save; 1158 1408 struct kv_reset_save_regs kv_save = { 0 }; 1159 - u32 tmp, i; 1409 + u32 i; 1160 1410 1161 1411 dev_info(adev->dev, "GPU pci config reset\n"); 1162 - 1163 - /* disable dpm? */ 1164 - 1165 - /* disable cg/pg */ 1166 - 1167 - /* Disable GFX parsing/prefetching */ 1168 - WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | 1169 - CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); 1170 - 1171 - /* Disable MEC parsing/prefetching */ 1172 - WREG32(mmCP_MEC_CNTL, 1173 - CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); 1174 - 1175 - /* sdma0 */ 1176 - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); 1177 - tmp |= SDMA0_F32_CNTL__HALT_MASK; 1178 - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); 1179 - /* sdma1 */ 1180 - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); 1181 - tmp |= SDMA0_F32_CNTL__HALT_MASK; 1182 - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); 1183 - /* XXX other engines? */ 1184 - 1185 - /* halt the rlc, disable cp internal ints */ 1186 - gfx_v7_0_rlc_stop(adev); 1187 - 1188 - udelay(50); 1189 - 1190 - /* disable mem access */ 1191 - gmc_v7_0_mc_stop(adev, &save); 1192 - if (amdgpu_asic_wait_for_mc_idle(adev)) { 1193 - dev_warn(adev->dev, "Wait for MC idle timed out !\n"); 1194 - } 1195 1412 1196 1413 if (adev->flags & AMD_IS_APU) 1197 1414 kv_save_regs_for_reset(adev, &kv_save); ··· 1204 1489 */ 1205 1490 static int cik_asic_reset(struct amdgpu_device *adev) 1206 1491 { 1207 - u32 reset_mask; 1492 + cik_set_bios_scratch_engine_hung(adev, true); 1208 1493 1209 - reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); 1494 + cik_gpu_pci_config_reset(adev); 1210 1495 1211 - if (reset_mask) 1212 - cik_set_bios_scratch_engine_hung(adev, true); 1213 - 1214 - /* try soft reset */ 1215 - cik_gpu_soft_reset(adev, reset_mask); 1216 - 1217 - reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); 1218 - 1219 - /* try pci config reset */ 1220 - if (reset_mask && amdgpu_hard_reset) 1221 - cik_gpu_pci_config_reset(adev); 1222 - 1223 - reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); 1224 - 1225 - if (!reset_mask) 1226 - cik_set_bios_scratch_engine_hung(adev, false); 1496 + cik_set_bios_scratch_engine_hung(adev, false); 1227 1497 1228 1498 return 0; 1229 1499 }
+22 -45
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
··· 295 295 } 296 296 297 297 /** 298 - * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring 299 - * 300 - * @ring: amdgpu_ring structure holding ring information 301 - * @semaphore: amdgpu semaphore object 302 - * @emit_wait: wait or signal semaphore 303 - * 304 - * Add a DMA semaphore packet to the ring wait on or signal 305 - * other rings (CIK). 306 - */ 307 - static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring, 308 - struct amdgpu_semaphore *semaphore, 309 - bool emit_wait) 310 - { 311 - u64 addr = semaphore->gpu_addr; 312 - u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; 313 - 314 - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); 315 - amdgpu_ring_write(ring, addr & 0xfffffff8); 316 - amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 317 - 318 - return true; 319 - } 320 - 321 - /** 322 298 * cik_sdma_gfx_stop - stop the gfx async dma engines 323 299 * 324 300 * @adev: amdgpu_device pointer ··· 392 416 } 393 417 cik_srbm_select(adev, 0, 0, 0, 0); 394 418 mutex_unlock(&adev->srbm_mutex); 419 + 420 + WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], 421 + adev->gfx.config.gb_addr_config & 0x70); 395 422 396 423 WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); 397 424 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); ··· 563 584 tmp = 0xCAFEDEAD; 564 585 adev->wb.wb[index] = cpu_to_le32(tmp); 565 586 566 - r = amdgpu_ring_lock(ring, 5); 587 + r = amdgpu_ring_alloc(ring, 5); 567 588 if (r) { 568 589 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 569 590 amdgpu_wb_free(adev, index); ··· 574 595 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 575 596 amdgpu_ring_write(ring, 1); /* number of DWs to follow */ 576 597 amdgpu_ring_write(ring, 0xDEADBEEF); 577 - amdgpu_ring_unlock_commit(ring); 598 + amdgpu_ring_commit(ring); 578 599 579 600 for (i = 0; i < adev->usec_timeout; i++) { 580 601 tmp = le32_to_cpu(adev->wb.wb[index]); ··· 624 645 tmp = 0xCAFEDEAD; 625 646 adev->wb.wb[index] = cpu_to_le32(tmp); 626 647 memset(&ib, 0, sizeof(ib)); 627 - r = amdgpu_ib_get(ring, NULL, 256, &ib); 648 + r = amdgpu_ib_get(adev, NULL, 256, &ib); 628 649 if (r) { 629 650 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 630 651 goto err0; ··· 636 657 ib.ptr[3] = 1; 637 658 ib.ptr[4] = 0xDEADBEEF; 638 659 ib.length_dw = 5; 639 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, 640 - AMDGPU_FENCE_OWNER_UNDEFINED, 641 - &f); 660 + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, 661 + NULL, &f); 642 662 if (r) 643 663 goto err1; 644 664 ··· 716 738 * Update PTEs by writing them manually using sDMA (CIK). 717 739 */ 718 740 static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, 719 - uint64_t pe, 741 + const dma_addr_t *pages_addr, uint64_t pe, 720 742 uint64_t addr, unsigned count, 721 743 uint32_t incr, uint32_t flags) 722 744 { ··· 735 757 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 736 758 ib->ptr[ib->length_dw++] = ndw; 737 759 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 738 - if (flags & AMDGPU_PTE_SYSTEM) { 739 - value = amdgpu_vm_map_gart(ib->ring->adev, addr); 740 - value &= 0xFFFFFFFFFFFFF000ULL; 741 - } else if (flags & AMDGPU_PTE_VALID) { 742 - value = addr; 743 - } else { 744 - value = 0; 745 - } 760 + value = amdgpu_vm_map_gart(pages_addr, addr); 746 761 addr += incr; 747 762 value |= flags; 748 763 ib->ptr[ib->length_dw++] = value; ··· 798 827 * @ib: indirect buffer to fill with padding 799 828 * 800 829 */ 801 - static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) 830 + static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 802 831 { 803 - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); 832 + struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); 804 833 u32 pad_count; 805 834 int i; 806 835 ··· 1068 1097 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); 1069 1098 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", 1070 1099 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); 1100 + dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", 1101 + i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); 1071 1102 mutex_lock(&adev->srbm_mutex); 1072 1103 for (j = 0; j < 16; j++) { 1073 1104 cik_srbm_select(adev, 0, 0, 0, j); ··· 1270 1297 .parse_cs = NULL, 1271 1298 .emit_ib = cik_sdma_ring_emit_ib, 1272 1299 .emit_fence = cik_sdma_ring_emit_fence, 1273 - .emit_semaphore = cik_sdma_ring_emit_semaphore, 1274 1300 .emit_vm_flush = cik_sdma_ring_emit_vm_flush, 1275 1301 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, 1276 1302 .test_ring = cik_sdma_ring_test_ring, 1277 1303 .test_ib = cik_sdma_ring_test_ib, 1278 1304 .insert_nop = cik_sdma_ring_insert_nop, 1305 + .pad_ib = cik_sdma_ring_pad_ib, 1279 1306 }; 1280 1307 1281 1308 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) ··· 1372 1399 .copy_pte = cik_sdma_vm_copy_pte, 1373 1400 .write_pte = cik_sdma_vm_write_pte, 1374 1401 .set_pte_pde = cik_sdma_vm_set_pte_pde, 1375 - .pad_ib = cik_sdma_vm_pad_ib, 1376 1402 }; 1377 1403 1378 1404 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) 1379 1405 { 1406 + unsigned i; 1407 + 1380 1408 if (adev->vm_manager.vm_pte_funcs == NULL) { 1381 1409 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; 1382 - adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; 1383 - adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; 1410 + for (i = 0; i < adev->sdma.num_instances; i++) 1411 + adev->vm_manager.vm_pte_rings[i] = 1412 + &adev->sdma.instance[i].ring; 1413 + 1414 + adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; 1384 1415 } 1385 1416 }
+1 -3
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 2670 2670 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2671 2671 2672 2672 drm_crtc_cleanup(crtc); 2673 - destroy_workqueue(amdgpu_crtc->pflip_queue); 2674 2673 kfree(amdgpu_crtc); 2675 2674 } 2676 2675 ··· 2889 2890 2890 2891 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2891 2892 amdgpu_crtc->crtc_id = index; 2892 - amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); 2893 2893 adev->mode_info.crtcs[index] = amdgpu_crtc; 2894 2894 2895 2895 amdgpu_crtc->max_cursor_width = 128; ··· 3364 3366 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3365 3367 3366 3368 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3367 - queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); 3369 + schedule_work(&works->unpin_work); 3368 3370 3369 3371 return 0; 3370 3372 }
+1 -3
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 2661 2661 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2662 2662 2663 2663 drm_crtc_cleanup(crtc); 2664 - destroy_workqueue(amdgpu_crtc->pflip_queue); 2665 2664 kfree(amdgpu_crtc); 2666 2665 } 2667 2666 ··· 2880 2881 2881 2882 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2882 2883 amdgpu_crtc->crtc_id = index; 2883 - amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); 2884 2884 adev->mode_info.crtcs[index] = amdgpu_crtc; 2885 2885 2886 2886 amdgpu_crtc->max_cursor_width = 128; ··· 3359 3361 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3360 3362 3361 3363 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3362 - queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); 3364 + schedule_work(&works->unpin_work); 3363 3365 3364 3366 return 0; 3365 3367 }
+1 -3
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 2582 2582 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2583 2583 2584 2584 drm_crtc_cleanup(crtc); 2585 - destroy_workqueue(amdgpu_crtc->pflip_queue); 2586 2585 kfree(amdgpu_crtc); 2587 2586 } 2588 2587 ··· 2808 2809 2809 2810 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2810 2811 amdgpu_crtc->crtc_id = index; 2811 - amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); 2812 2812 adev->mode_info.crtcs[index] = amdgpu_crtc; 2813 2813 2814 2814 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH; ··· 3373 3375 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3374 3376 3375 3377 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3376 - queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); 3378 + schedule_work(&works->unpin_work); 3377 3379 3378 3380 return 0; 3379 3381 }
+6
drivers/gpu/drm/amd/amdgpu/fiji_smc.c
··· 272 272 if (!adev->pm.fw) 273 273 return -EINVAL; 274 274 275 + /* Skip SMC ucode loading on SR-IOV capable boards. 276 + * vbios does this for us in asic_init in that case. 277 + */ 278 + if (adev->virtualization.supports_sr_iov) 279 + return 0; 280 + 275 281 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; 276 282 amdgpu_ucode_print_smc_hdr(&hdr->header); 277 283
+754 -1181
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 31 31 #include "amdgpu_ucode.h" 32 32 #include "clearstate_ci.h" 33 33 34 - #include "uvd/uvd_4_2_d.h" 35 - 36 34 #include "dce/dce_8_0_d.h" 37 35 #include "dce/dce_8_0_sh_mask.h" 38 36 ··· 1004 1006 */ 1005 1007 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) 1006 1008 { 1007 - const u32 num_tile_mode_states = 32; 1008 - const u32 num_secondary_tile_mode_states = 16; 1009 - u32 reg_offset, gb_tile_moden, split_equal_to_row_size; 1009 + const u32 num_tile_mode_states = 1010 + ARRAY_SIZE(adev->gfx.config.tile_mode_array); 1011 + const u32 num_secondary_tile_mode_states = 1012 + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 1013 + u32 reg_offset, split_equal_to_row_size; 1014 + uint32_t *tile, *macrotile; 1015 + 1016 + tile = adev->gfx.config.tile_mode_array; 1017 + macrotile = adev->gfx.config.macrotile_mode_array; 1010 1018 1011 1019 switch (adev->gfx.config.mem_row_size_in_kb) { 1012 1020 case 1: ··· 1027 1023 break; 1028 1024 } 1029 1025 1026 + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1027 + tile[reg_offset] = 0; 1028 + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1029 + macrotile[reg_offset] = 0; 1030 + 1030 1031 switch (adev->asic_type) { 1031 1032 case CHIP_BONAIRE: 1032 - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1033 - switch (reg_offset) { 1034 - case 0: 1035 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1036 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1037 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1038 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1039 - break; 1040 - case 1: 1041 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1042 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1043 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1044 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1045 - break; 1046 - case 2: 1047 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1048 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1049 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1050 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1051 - break; 1052 - case 3: 1053 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1054 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1055 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1056 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1057 - break; 1058 - case 4: 1059 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1060 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1061 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1062 - TILE_SPLIT(split_equal_to_row_size)); 1063 - break; 1064 - case 5: 1065 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1066 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1067 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1068 - break; 1069 - case 6: 1070 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1071 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1072 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1073 - TILE_SPLIT(split_equal_to_row_size)); 1074 - break; 1075 - case 7: 1076 - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); 1077 - break; 1033 + tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1034 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1035 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1036 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1037 + tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1038 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1039 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1040 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1041 + tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1042 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1043 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1044 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1045 + tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1046 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1047 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1048 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1049 + tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1050 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1051 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1052 + TILE_SPLIT(split_equal_to_row_size)); 1053 + tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1054 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1055 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1056 + tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1057 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1058 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1059 + TILE_SPLIT(split_equal_to_row_size)); 1060 + tile[7] = (TILE_SPLIT(split_equal_to_row_size)); 1061 + tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1062 + PIPE_CONFIG(ADDR_SURF_P4_16x16)); 1063 + tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1064 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1065 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1066 + tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1067 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1068 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1069 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1070 + tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1071 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1072 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1073 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1074 + tile[12] = (TILE_SPLIT(split_equal_to_row_size)); 1075 + tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1076 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1077 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1078 + tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1079 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1080 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1081 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1082 + tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1083 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1084 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1085 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1086 + tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1087 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1088 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1089 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1090 + tile[17] = (TILE_SPLIT(split_equal_to_row_size)); 1091 + tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1092 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1093 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1094 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1095 + tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1096 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1097 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1098 + tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1099 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1100 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1101 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1102 + tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1103 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1104 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1105 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1106 + tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1107 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1108 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1109 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1110 + tile[23] = (TILE_SPLIT(split_equal_to_row_size)); 1111 + tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1112 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1113 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1114 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1115 + tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1116 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1117 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1118 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1119 + tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1120 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1121 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1122 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1123 + tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1124 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1125 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1126 + tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1127 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1128 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1129 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1130 + tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1131 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1132 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1133 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1134 + tile[30] = (TILE_SPLIT(split_equal_to_row_size)); 1078 1135 1079 - case 8: 1080 - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1081 - PIPE_CONFIG(ADDR_SURF_P4_16x16)); 1082 - break; 1083 - case 9: 1084 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1085 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1086 - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1087 - break; 1088 - case 10: 1089 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1090 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1091 - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1092 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1093 - break; 1094 - case 11: 1095 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1096 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1097 - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1098 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1099 - break; 1100 - case 12: 1101 - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); 1102 - break; 1103 - case 13: 1104 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1105 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1106 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1107 - break; 1108 - case 14: 1109 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1110 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1111 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1112 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1113 - break; 1114 - case 15: 1115 - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1116 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1117 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1118 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1119 - break; 1120 - case 16: 1121 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1122 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1123 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1124 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1125 - break; 1126 - case 17: 1127 - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); 1128 - break; 1129 - case 18: 1130 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1131 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1132 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1133 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1134 - break; 1135 - case 19: 1136 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1137 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1138 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1139 - break; 1140 - case 20: 1141 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1142 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1143 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1144 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1145 - break; 1146 - case 21: 1147 - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1148 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1149 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1150 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1151 - break; 1152 - case 22: 1153 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1154 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1155 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1156 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1157 - break; 1158 - case 23: 1159 - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); 1160 - break; 1161 - case 24: 1162 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1163 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1164 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1165 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1166 - break; 1167 - case 25: 1168 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1169 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1170 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1171 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1172 - break; 1173 - case 26: 1174 - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1175 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1176 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1177 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1178 - break; 1179 - case 27: 1180 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1181 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1182 - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1183 - break; 1184 - case 28: 1185 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1186 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1187 - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1188 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1189 - break; 1190 - case 29: 1191 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1192 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1193 - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1194 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1195 - break; 1196 - case 30: 1197 - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); 1198 - break; 1199 - default: 1200 - gb_tile_moden = 0; 1201 - break; 1202 - } 1203 - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; 1204 - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); 1205 - } 1206 - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { 1207 - switch (reg_offset) { 1208 - case 0: 1209 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1210 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1211 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1212 - NUM_BANKS(ADDR_SURF_16_BANK)); 1213 - break; 1214 - case 1: 1215 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1216 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1217 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1218 - NUM_BANKS(ADDR_SURF_16_BANK)); 1219 - break; 1220 - case 2: 1221 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1222 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1223 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1224 - NUM_BANKS(ADDR_SURF_16_BANK)); 1225 - break; 1226 - case 3: 1227 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1228 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1229 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1230 - NUM_BANKS(ADDR_SURF_16_BANK)); 1231 - break; 1232 - case 4: 1233 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1234 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1235 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1236 - NUM_BANKS(ADDR_SURF_16_BANK)); 1237 - break; 1238 - case 5: 1239 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1240 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1241 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1242 - NUM_BANKS(ADDR_SURF_8_BANK)); 1243 - break; 1244 - case 6: 1245 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1246 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1247 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1248 - NUM_BANKS(ADDR_SURF_4_BANK)); 1249 - break; 1250 - case 8: 1251 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1252 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 1253 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1254 - NUM_BANKS(ADDR_SURF_16_BANK)); 1255 - break; 1256 - case 9: 1257 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1258 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1259 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1260 - NUM_BANKS(ADDR_SURF_16_BANK)); 1261 - break; 1262 - case 10: 1263 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1264 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1265 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1266 - NUM_BANKS(ADDR_SURF_16_BANK)); 1267 - break; 1268 - case 11: 1269 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1270 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1271 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1272 - NUM_BANKS(ADDR_SURF_16_BANK)); 1273 - break; 1274 - case 12: 1275 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1276 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1277 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1278 - NUM_BANKS(ADDR_SURF_16_BANK)); 1279 - break; 1280 - case 13: 1281 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1282 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1283 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1284 - NUM_BANKS(ADDR_SURF_8_BANK)); 1285 - break; 1286 - case 14: 1287 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1288 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1289 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1290 - NUM_BANKS(ADDR_SURF_4_BANK)); 1291 - break; 1292 - default: 1293 - gb_tile_moden = 0; 1294 - break; 1295 - } 1296 - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; 1297 - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1298 - } 1136 + macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1137 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1138 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1139 + NUM_BANKS(ADDR_SURF_16_BANK)); 1140 + macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1141 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1142 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1143 + NUM_BANKS(ADDR_SURF_16_BANK)); 1144 + macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1145 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1146 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1147 + NUM_BANKS(ADDR_SURF_16_BANK)); 1148 + macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1149 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1150 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1151 + NUM_BANKS(ADDR_SURF_16_BANK)); 1152 + macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1153 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1154 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1155 + NUM_BANKS(ADDR_SURF_16_BANK)); 1156 + macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1157 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1158 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1159 + NUM_BANKS(ADDR_SURF_8_BANK)); 1160 + macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1161 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1162 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1163 + NUM_BANKS(ADDR_SURF_4_BANK)); 1164 + macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1165 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 1166 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1167 + NUM_BANKS(ADDR_SURF_16_BANK)); 1168 + macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1169 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1170 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1171 + NUM_BANKS(ADDR_SURF_16_BANK)); 1172 + macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1173 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1174 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1175 + NUM_BANKS(ADDR_SURF_16_BANK)); 1176 + macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1177 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1178 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1179 + NUM_BANKS(ADDR_SURF_16_BANK)); 1180 + macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1181 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1182 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1183 + NUM_BANKS(ADDR_SURF_16_BANK)); 1184 + macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1185 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1186 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1187 + NUM_BANKS(ADDR_SURF_8_BANK)); 1188 + macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1189 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1190 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1191 + NUM_BANKS(ADDR_SURF_4_BANK)); 1192 + 1193 + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1194 + WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); 1195 + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1196 + if (reg_offset != 7) 1197 + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); 1299 1198 break; 1300 1199 case CHIP_HAWAII: 1301 - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1302 - switch (reg_offset) { 1303 - case 0: 1304 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1305 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1306 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1307 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1308 - break; 1309 - case 1: 1310 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1311 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1312 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1313 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1314 - break; 1315 - case 2: 1316 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1317 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1318 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1319 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1320 - break; 1321 - case 3: 1322 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1323 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1324 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1325 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1326 - break; 1327 - case 4: 1328 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1329 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1330 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1331 - TILE_SPLIT(split_equal_to_row_size)); 1332 - break; 1333 - case 5: 1334 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1335 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1336 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1337 - TILE_SPLIT(split_equal_to_row_size)); 1338 - break; 1339 - case 6: 1340 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1341 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1342 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1343 - TILE_SPLIT(split_equal_to_row_size)); 1344 - break; 1345 - case 7: 1346 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1347 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1348 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1349 - TILE_SPLIT(split_equal_to_row_size)); 1350 - break; 1200 + tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1201 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1202 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1203 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1204 + tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1205 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1206 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1207 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1208 + tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1209 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1210 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1211 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1212 + tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1213 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1214 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1215 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1216 + tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1217 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1218 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1219 + TILE_SPLIT(split_equal_to_row_size)); 1220 + tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1221 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1222 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1223 + TILE_SPLIT(split_equal_to_row_size)); 1224 + tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1225 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1226 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1227 + TILE_SPLIT(split_equal_to_row_size)); 1228 + tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1229 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1230 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1231 + TILE_SPLIT(split_equal_to_row_size)); 1232 + tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1233 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); 1234 + tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1235 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1236 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1237 + tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1238 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1239 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1240 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1241 + tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1242 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1243 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1244 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1245 + tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | 1246 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1247 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1248 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1249 + tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1250 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1251 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1252 + tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1253 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1254 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1255 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1256 + tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1257 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1258 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1259 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1260 + tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1261 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1262 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1263 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1264 + tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1265 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1266 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1267 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1268 + tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1269 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1270 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1271 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1272 + tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1273 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1274 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); 1275 + tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1276 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1277 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1278 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1279 + tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1280 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1281 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1282 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1283 + tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1284 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1285 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1286 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1287 + tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1288 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1289 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1290 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1291 + tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1292 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1293 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1294 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1295 + tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1296 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1297 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1298 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1299 + tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1300 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1301 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1302 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1303 + tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1304 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1305 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1306 + tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1307 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1308 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1309 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1310 + tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1311 + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1312 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1313 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1314 + tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1315 + PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1316 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1317 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1351 1318 1352 - case 8: 1353 - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1354 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); 1355 - break; 1356 - case 9: 1357 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1358 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1359 - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1360 - break; 1361 - case 10: 1362 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1363 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1364 - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1365 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1366 - break; 1367 - case 11: 1368 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1369 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1370 - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1371 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1372 - break; 1373 - case 12: 1374 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | 1375 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1376 - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1377 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1378 - break; 1379 - case 13: 1380 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1381 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1382 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1383 - break; 1384 - case 14: 1385 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1386 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1387 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1388 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1389 - break; 1390 - case 15: 1391 - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1392 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1393 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1394 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1395 - break; 1396 - case 16: 1397 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1398 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1399 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1400 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1401 - break; 1402 - case 17: 1403 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1404 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1405 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1406 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1407 - break; 1408 - case 18: 1409 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1410 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1411 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1412 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1413 - break; 1414 - case 19: 1415 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1416 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1417 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); 1418 - break; 1419 - case 20: 1420 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1421 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1422 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1423 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1424 - break; 1425 - case 21: 1426 - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1427 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1428 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1429 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1430 - break; 1431 - case 22: 1432 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1433 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1434 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1435 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1436 - break; 1437 - case 23: 1438 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1439 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1440 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1441 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1442 - break; 1443 - case 24: 1444 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1445 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1446 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1447 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1448 - break; 1449 - case 25: 1450 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1451 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1452 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1453 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1454 - break; 1455 - case 26: 1456 - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1457 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1458 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1459 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1460 - break; 1461 - case 27: 1462 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1463 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1464 - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1465 - break; 1466 - case 28: 1467 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1468 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1469 - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1470 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1471 - break; 1472 - case 29: 1473 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1474 - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1475 - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1476 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1477 - break; 1478 - case 30: 1479 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1480 - PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1481 - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1482 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1483 - break; 1484 - default: 1485 - gb_tile_moden = 0; 1486 - break; 1487 - } 1488 - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; 1489 - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); 1490 - } 1491 - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { 1492 - switch (reg_offset) { 1493 - case 0: 1494 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1495 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1496 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1497 - NUM_BANKS(ADDR_SURF_16_BANK)); 1498 - break; 1499 - case 1: 1500 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1501 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1502 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1503 - NUM_BANKS(ADDR_SURF_16_BANK)); 1504 - break; 1505 - case 2: 1506 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1507 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1508 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1509 - NUM_BANKS(ADDR_SURF_16_BANK)); 1510 - break; 1511 - case 3: 1512 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1513 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1514 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1515 - NUM_BANKS(ADDR_SURF_16_BANK)); 1516 - break; 1517 - case 4: 1518 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1519 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1520 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1521 - NUM_BANKS(ADDR_SURF_8_BANK)); 1522 - break; 1523 - case 5: 1524 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1525 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1526 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1527 - NUM_BANKS(ADDR_SURF_4_BANK)); 1528 - break; 1529 - case 6: 1530 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1531 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1532 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1533 - NUM_BANKS(ADDR_SURF_4_BANK)); 1534 - break; 1535 - case 8: 1536 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1537 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1538 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1539 - NUM_BANKS(ADDR_SURF_16_BANK)); 1540 - break; 1541 - case 9: 1542 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1543 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1544 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1545 - NUM_BANKS(ADDR_SURF_16_BANK)); 1546 - break; 1547 - case 10: 1548 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1549 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1550 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1551 - NUM_BANKS(ADDR_SURF_16_BANK)); 1552 - break; 1553 - case 11: 1554 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1555 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1556 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1557 - NUM_BANKS(ADDR_SURF_8_BANK)); 1558 - break; 1559 - case 12: 1560 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1561 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1562 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1563 - NUM_BANKS(ADDR_SURF_16_BANK)); 1564 - break; 1565 - case 13: 1566 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1567 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1568 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1569 - NUM_BANKS(ADDR_SURF_8_BANK)); 1570 - break; 1571 - case 14: 1572 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1573 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1574 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1575 - NUM_BANKS(ADDR_SURF_4_BANK)); 1576 - break; 1577 - default: 1578 - gb_tile_moden = 0; 1579 - break; 1580 - } 1581 - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; 1582 - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1583 - } 1319 + macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1320 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1321 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1322 + NUM_BANKS(ADDR_SURF_16_BANK)); 1323 + macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1324 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1325 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1326 + NUM_BANKS(ADDR_SURF_16_BANK)); 1327 + macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1328 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1329 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1330 + NUM_BANKS(ADDR_SURF_16_BANK)); 1331 + macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1332 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1333 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1334 + NUM_BANKS(ADDR_SURF_16_BANK)); 1335 + macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1336 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1337 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1338 + NUM_BANKS(ADDR_SURF_8_BANK)); 1339 + macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1340 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1341 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1342 + NUM_BANKS(ADDR_SURF_4_BANK)); 1343 + macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1344 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1345 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1346 + NUM_BANKS(ADDR_SURF_4_BANK)); 1347 + macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1348 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1349 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1350 + NUM_BANKS(ADDR_SURF_16_BANK)); 1351 + macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1352 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1353 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1354 + NUM_BANKS(ADDR_SURF_16_BANK)); 1355 + macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1356 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1357 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1358 + NUM_BANKS(ADDR_SURF_16_BANK)); 1359 + macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1360 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1361 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1362 + NUM_BANKS(ADDR_SURF_8_BANK)); 1363 + macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1364 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1365 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1366 + NUM_BANKS(ADDR_SURF_16_BANK)); 1367 + macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1368 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1369 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1370 + NUM_BANKS(ADDR_SURF_8_BANK)); 1371 + macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1372 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1373 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1374 + NUM_BANKS(ADDR_SURF_4_BANK)); 1375 + 1376 + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1377 + WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); 1378 + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1379 + if (reg_offset != 7) 1380 + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); 1584 1381 break; 1585 1382 case CHIP_KABINI: 1586 1383 case CHIP_KAVERI: 1587 1384 case CHIP_MULLINS: 1588 1385 default: 1589 - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1590 - switch (reg_offset) { 1591 - case 0: 1592 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1593 - PIPE_CONFIG(ADDR_SURF_P2) | 1594 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1595 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1596 - break; 1597 - case 1: 1598 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1599 - PIPE_CONFIG(ADDR_SURF_P2) | 1600 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1601 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1602 - break; 1603 - case 2: 1604 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1605 - PIPE_CONFIG(ADDR_SURF_P2) | 1606 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1607 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1608 - break; 1609 - case 3: 1610 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1611 - PIPE_CONFIG(ADDR_SURF_P2) | 1612 - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1613 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1614 - break; 1615 - case 4: 1616 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1617 - PIPE_CONFIG(ADDR_SURF_P2) | 1618 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1619 - TILE_SPLIT(split_equal_to_row_size)); 1620 - break; 1621 - case 5: 1622 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1623 - PIPE_CONFIG(ADDR_SURF_P2) | 1624 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1625 - break; 1626 - case 6: 1627 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1628 - PIPE_CONFIG(ADDR_SURF_P2) | 1629 - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1630 - TILE_SPLIT(split_equal_to_row_size)); 1631 - break; 1632 - case 7: 1633 - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); 1634 - break; 1386 + tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1387 + PIPE_CONFIG(ADDR_SURF_P2) | 1388 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1389 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1390 + tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1391 + PIPE_CONFIG(ADDR_SURF_P2) | 1392 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1393 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1394 + tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1395 + PIPE_CONFIG(ADDR_SURF_P2) | 1396 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1397 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1398 + tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1399 + PIPE_CONFIG(ADDR_SURF_P2) | 1400 + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1401 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1402 + tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1403 + PIPE_CONFIG(ADDR_SURF_P2) | 1404 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1405 + TILE_SPLIT(split_equal_to_row_size)); 1406 + tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1407 + PIPE_CONFIG(ADDR_SURF_P2) | 1408 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1409 + tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1410 + PIPE_CONFIG(ADDR_SURF_P2) | 1411 + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1412 + TILE_SPLIT(split_equal_to_row_size)); 1413 + tile[7] = (TILE_SPLIT(split_equal_to_row_size)); 1414 + tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1415 + PIPE_CONFIG(ADDR_SURF_P2)); 1416 + tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1417 + PIPE_CONFIG(ADDR_SURF_P2) | 1418 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1419 + tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1420 + PIPE_CONFIG(ADDR_SURF_P2) | 1421 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1422 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1423 + tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1424 + PIPE_CONFIG(ADDR_SURF_P2) | 1425 + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1426 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1427 + tile[12] = (TILE_SPLIT(split_equal_to_row_size)); 1428 + tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1429 + PIPE_CONFIG(ADDR_SURF_P2) | 1430 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1431 + tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1432 + PIPE_CONFIG(ADDR_SURF_P2) | 1433 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1434 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1435 + tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1436 + PIPE_CONFIG(ADDR_SURF_P2) | 1437 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1438 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1439 + tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1440 + PIPE_CONFIG(ADDR_SURF_P2) | 1441 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1442 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1443 + tile[17] = (TILE_SPLIT(split_equal_to_row_size)); 1444 + tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1445 + PIPE_CONFIG(ADDR_SURF_P2) | 1446 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1447 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1448 + tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1449 + PIPE_CONFIG(ADDR_SURF_P2) | 1450 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); 1451 + tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1452 + PIPE_CONFIG(ADDR_SURF_P2) | 1453 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1454 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1455 + tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1456 + PIPE_CONFIG(ADDR_SURF_P2) | 1457 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1458 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1459 + tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1460 + PIPE_CONFIG(ADDR_SURF_P2) | 1461 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1462 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1463 + tile[23] = (TILE_SPLIT(split_equal_to_row_size)); 1464 + tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1465 + PIPE_CONFIG(ADDR_SURF_P2) | 1466 + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1467 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1468 + tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1469 + PIPE_CONFIG(ADDR_SURF_P2) | 1470 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1471 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1472 + tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1473 + PIPE_CONFIG(ADDR_SURF_P2) | 1474 + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1475 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1476 + tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1477 + PIPE_CONFIG(ADDR_SURF_P2) | 1478 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1479 + tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1480 + PIPE_CONFIG(ADDR_SURF_P2) | 1481 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1482 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1483 + tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1484 + PIPE_CONFIG(ADDR_SURF_P2) | 1485 + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1486 + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1487 + tile[30] = (TILE_SPLIT(split_equal_to_row_size)); 1635 1488 1636 - case 8: 1637 - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1638 - PIPE_CONFIG(ADDR_SURF_P2)); 1639 - break; 1640 - case 9: 1641 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1642 - PIPE_CONFIG(ADDR_SURF_P2) | 1643 - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1644 - break; 1645 - case 10: 1646 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1647 - PIPE_CONFIG(ADDR_SURF_P2) | 1648 - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1649 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1650 - break; 1651 - case 11: 1652 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1653 - PIPE_CONFIG(ADDR_SURF_P2) | 1654 - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1655 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1656 - break; 1657 - case 12: 1658 - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); 1659 - break; 1660 - case 13: 1661 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1662 - PIPE_CONFIG(ADDR_SURF_P2) | 1663 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1664 - break; 1665 - case 14: 1666 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1667 - PIPE_CONFIG(ADDR_SURF_P2) | 1668 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1669 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1670 - break; 1671 - case 15: 1672 - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1673 - PIPE_CONFIG(ADDR_SURF_P2) | 1674 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1675 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1676 - break; 1677 - case 16: 1678 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1679 - PIPE_CONFIG(ADDR_SURF_P2) | 1680 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1681 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1682 - break; 1683 - case 17: 1684 - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); 1685 - break; 1686 - case 18: 1687 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1688 - PIPE_CONFIG(ADDR_SURF_P2) | 1689 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1690 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1691 - break; 1692 - case 19: 1693 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1694 - PIPE_CONFIG(ADDR_SURF_P2) | 1695 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); 1696 - break; 1697 - case 20: 1698 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1699 - PIPE_CONFIG(ADDR_SURF_P2) | 1700 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1701 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1702 - break; 1703 - case 21: 1704 - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1705 - PIPE_CONFIG(ADDR_SURF_P2) | 1706 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1707 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1708 - break; 1709 - case 22: 1710 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1711 - PIPE_CONFIG(ADDR_SURF_P2) | 1712 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1713 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1714 - break; 1715 - case 23: 1716 - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); 1717 - break; 1718 - case 24: 1719 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1720 - PIPE_CONFIG(ADDR_SURF_P2) | 1721 - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1722 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1723 - break; 1724 - case 25: 1725 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1726 - PIPE_CONFIG(ADDR_SURF_P2) | 1727 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1728 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1729 - break; 1730 - case 26: 1731 - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1732 - PIPE_CONFIG(ADDR_SURF_P2) | 1733 - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1734 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1735 - break; 1736 - case 27: 1737 - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1738 - PIPE_CONFIG(ADDR_SURF_P2) | 1739 - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1740 - break; 1741 - case 28: 1742 - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1743 - PIPE_CONFIG(ADDR_SURF_P2) | 1744 - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1745 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1746 - break; 1747 - case 29: 1748 - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1749 - PIPE_CONFIG(ADDR_SURF_P2) | 1750 - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1751 - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1752 - break; 1753 - case 30: 1754 - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); 1755 - break; 1756 - default: 1757 - gb_tile_moden = 0; 1758 - break; 1759 - } 1760 - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; 1761 - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); 1762 - } 1763 - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { 1764 - switch (reg_offset) { 1765 - case 0: 1766 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1767 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1768 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1769 - NUM_BANKS(ADDR_SURF_8_BANK)); 1770 - break; 1771 - case 1: 1772 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1773 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1774 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1775 - NUM_BANKS(ADDR_SURF_8_BANK)); 1776 - break; 1777 - case 2: 1778 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1779 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1780 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1781 - NUM_BANKS(ADDR_SURF_8_BANK)); 1782 - break; 1783 - case 3: 1784 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1785 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1786 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1787 - NUM_BANKS(ADDR_SURF_8_BANK)); 1788 - break; 1789 - case 4: 1790 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1791 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1792 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1793 - NUM_BANKS(ADDR_SURF_8_BANK)); 1794 - break; 1795 - case 5: 1796 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1797 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1798 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1799 - NUM_BANKS(ADDR_SURF_8_BANK)); 1800 - break; 1801 - case 6: 1802 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1803 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1804 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1805 - NUM_BANKS(ADDR_SURF_8_BANK)); 1806 - break; 1807 - case 8: 1808 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 1809 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 1810 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1811 - NUM_BANKS(ADDR_SURF_16_BANK)); 1812 - break; 1813 - case 9: 1814 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 1815 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1816 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1817 - NUM_BANKS(ADDR_SURF_16_BANK)); 1818 - break; 1819 - case 10: 1820 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1821 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1822 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1823 - NUM_BANKS(ADDR_SURF_16_BANK)); 1824 - break; 1825 - case 11: 1826 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1827 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1828 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1829 - NUM_BANKS(ADDR_SURF_16_BANK)); 1830 - break; 1831 - case 12: 1832 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1833 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1834 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1835 - NUM_BANKS(ADDR_SURF_16_BANK)); 1836 - break; 1837 - case 13: 1838 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1839 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1840 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1841 - NUM_BANKS(ADDR_SURF_16_BANK)); 1842 - break; 1843 - case 14: 1844 - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1845 - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1846 - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1847 - NUM_BANKS(ADDR_SURF_8_BANK)); 1848 - break; 1849 - default: 1850 - gb_tile_moden = 0; 1851 - break; 1852 - } 1853 - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; 1854 - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1855 - } 1489 + macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1490 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1491 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1492 + NUM_BANKS(ADDR_SURF_8_BANK)); 1493 + macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1494 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1495 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1496 + NUM_BANKS(ADDR_SURF_8_BANK)); 1497 + macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1498 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1499 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1500 + NUM_BANKS(ADDR_SURF_8_BANK)); 1501 + macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1502 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1503 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1504 + NUM_BANKS(ADDR_SURF_8_BANK)); 1505 + macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1506 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1507 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1508 + NUM_BANKS(ADDR_SURF_8_BANK)); 1509 + macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1510 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1511 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1512 + NUM_BANKS(ADDR_SURF_8_BANK)); 1513 + macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1514 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1515 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1516 + NUM_BANKS(ADDR_SURF_8_BANK)); 1517 + macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 1518 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 1519 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1520 + NUM_BANKS(ADDR_SURF_16_BANK)); 1521 + macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 1522 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1523 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1524 + NUM_BANKS(ADDR_SURF_16_BANK)); 1525 + macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1526 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1527 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1528 + NUM_BANKS(ADDR_SURF_16_BANK)); 1529 + macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1530 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1531 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1532 + NUM_BANKS(ADDR_SURF_16_BANK)); 1533 + macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1534 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1535 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1536 + NUM_BANKS(ADDR_SURF_16_BANK)); 1537 + macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1538 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1539 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1540 + NUM_BANKS(ADDR_SURF_16_BANK)); 1541 + macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1542 + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1543 + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1544 + NUM_BANKS(ADDR_SURF_8_BANK)); 1545 + 1546 + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1547 + WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); 1548 + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1549 + if (reg_offset != 7) 1550 + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); 1856 1551 break; 1857 1552 } 1858 1553 } ··· 1596 1893 */ 1597 1894 static u32 gfx_v7_0_create_bitmask(u32 bit_width) 1598 1895 { 1599 - u32 i, mask = 0; 1600 - 1601 - for (i = 0; i < bit_width; i++) { 1602 - mask <<= 1; 1603 - mask |= 1; 1604 - } 1605 - return mask; 1896 + return (u32)((1ULL << bit_width) - 1); 1606 1897 } 1607 1898 1608 1899 /** 1609 - * gfx_v7_0_get_rb_disabled - computes the mask of disabled RBs 1900 + * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs 1610 1901 * 1611 1902 * @adev: amdgpu_device pointer 1612 - * @max_rb_num: max RBs (render backends) for the asic 1613 - * @se_num: number of SEs (shader engines) for the asic 1614 - * @sh_per_se: number of SH blocks per SE for the asic 1615 1903 * 1616 - * Calculates the bitmask of disabled RBs (CIK). 1617 - * Returns the disabled RB bitmask. 1904 + * Calculates the bitmask of enabled RBs (CIK). 1905 + * Returns the enabled RB bitmask. 1618 1906 */ 1619 - static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev, 1620 - u32 max_rb_num_per_se, 1621 - u32 sh_per_se) 1907 + static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1622 1908 { 1623 1909 u32 data, mask; 1624 1910 1625 1911 data = RREG32(mmCC_RB_BACKEND_DISABLE); 1626 - if (data & 1) 1627 - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1628 - else 1629 - data = 0; 1630 - 1631 1912 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); 1632 1913 1914 + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1633 1915 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 1634 1916 1635 - mask = gfx_v7_0_create_bitmask(max_rb_num_per_se / sh_per_se); 1917 + mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se / 1918 + adev->gfx.config.max_sh_per_se); 1636 1919 1637 - return data & mask; 1920 + return (~data) & mask; 1638 1921 } 1639 1922 1640 1923 /** ··· 1629 1940 * @adev: amdgpu_device pointer 1630 1941 * @se_num: number of SEs (shader engines) for the asic 1631 1942 * @sh_per_se: number of SH blocks per SE for the asic 1632 - * @max_rb_num: max RBs (render backends) for the asic 1633 1943 * 1634 1944 * Configures per-SE/SH RB registers (CIK). 1635 1945 */ 1636 - static void gfx_v7_0_setup_rb(struct amdgpu_device *adev, 1637 - u32 se_num, u32 sh_per_se, 1638 - u32 max_rb_num_per_se) 1946 + static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) 1639 1947 { 1640 1948 int i, j; 1641 - u32 data, mask; 1642 - u32 disabled_rbs = 0; 1643 - u32 enabled_rbs = 0; 1949 + u32 data, tmp, num_rbs = 0; 1950 + u32 active_rbs = 0; 1644 1951 1645 1952 mutex_lock(&adev->grbm_idx_mutex); 1646 - for (i = 0; i < se_num; i++) { 1647 - for (j = 0; j < sh_per_se; j++) { 1953 + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1954 + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1648 1955 gfx_v7_0_select_se_sh(adev, i, j); 1649 - data = gfx_v7_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se); 1956 + data = gfx_v7_0_get_rb_active_bitmap(adev); 1650 1957 if (adev->asic_type == CHIP_HAWAII) 1651 - disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); 1958 + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1959 + HAWAII_RB_BITMAP_WIDTH_PER_SH); 1652 1960 else 1653 - disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH); 1961 + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1962 + CIK_RB_BITMAP_WIDTH_PER_SH); 1654 1963 } 1655 1964 } 1656 1965 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 1657 1966 mutex_unlock(&adev->grbm_idx_mutex); 1658 1967 1659 - mask = 1; 1660 - for (i = 0; i < max_rb_num_per_se * se_num; i++) { 1661 - if (!(disabled_rbs & mask)) 1662 - enabled_rbs |= mask; 1663 - mask <<= 1; 1664 - } 1665 - 1666 - adev->gfx.config.backend_enable_mask = enabled_rbs; 1667 - 1668 - mutex_lock(&adev->grbm_idx_mutex); 1669 - for (i = 0; i < se_num; i++) { 1670 - gfx_v7_0_select_se_sh(adev, i, 0xffffffff); 1671 - data = 0; 1672 - for (j = 0; j < sh_per_se; j++) { 1673 - switch (enabled_rbs & 3) { 1674 - case 0: 1675 - if (j == 0) 1676 - data |= (RASTER_CONFIG_RB_MAP_3 << 1677 - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); 1678 - else 1679 - data |= (RASTER_CONFIG_RB_MAP_0 << 1680 - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); 1681 - break; 1682 - case 1: 1683 - data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); 1684 - break; 1685 - case 2: 1686 - data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); 1687 - break; 1688 - case 3: 1689 - default: 1690 - data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); 1691 - break; 1692 - } 1693 - enabled_rbs >>= 2; 1694 - } 1695 - WREG32(mmPA_SC_RASTER_CONFIG, data); 1696 - } 1697 - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 1698 - mutex_unlock(&adev->grbm_idx_mutex); 1968 + adev->gfx.config.backend_enable_mask = active_rbs; 1969 + tmp = active_rbs; 1970 + while (tmp >>= 1) 1971 + num_rbs++; 1972 + adev->gfx.config.num_rbs = num_rbs; 1699 1973 } 1700 1974 1701 1975 /** ··· 1711 2059 */ 1712 2060 static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) 1713 2061 { 1714 - u32 gb_addr_config; 1715 - u32 mc_shared_chmap, mc_arb_ramcfg; 1716 - u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; 1717 - u32 sh_mem_cfg; 1718 - u32 tmp; 2062 + u32 tmp, sh_mem_cfg; 1719 2063 int i; 1720 - 1721 - switch (adev->asic_type) { 1722 - case CHIP_BONAIRE: 1723 - adev->gfx.config.max_shader_engines = 2; 1724 - adev->gfx.config.max_tile_pipes = 4; 1725 - adev->gfx.config.max_cu_per_sh = 7; 1726 - adev->gfx.config.max_sh_per_se = 1; 1727 - adev->gfx.config.max_backends_per_se = 2; 1728 - adev->gfx.config.max_texture_channel_caches = 4; 1729 - adev->gfx.config.max_gprs = 256; 1730 - adev->gfx.config.max_gs_threads = 32; 1731 - adev->gfx.config.max_hw_contexts = 8; 1732 - 1733 - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1734 - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1735 - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1736 - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1737 - gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 1738 - break; 1739 - case CHIP_HAWAII: 1740 - adev->gfx.config.max_shader_engines = 4; 1741 - adev->gfx.config.max_tile_pipes = 16; 1742 - adev->gfx.config.max_cu_per_sh = 11; 1743 - adev->gfx.config.max_sh_per_se = 1; 1744 - adev->gfx.config.max_backends_per_se = 4; 1745 - adev->gfx.config.max_texture_channel_caches = 16; 1746 - adev->gfx.config.max_gprs = 256; 1747 - adev->gfx.config.max_gs_threads = 32; 1748 - adev->gfx.config.max_hw_contexts = 8; 1749 - 1750 - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1751 - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1752 - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1753 - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1754 - gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN; 1755 - break; 1756 - case CHIP_KAVERI: 1757 - adev->gfx.config.max_shader_engines = 1; 1758 - adev->gfx.config.max_tile_pipes = 4; 1759 - if ((adev->pdev->device == 0x1304) || 1760 - (adev->pdev->device == 0x1305) || 1761 - (adev->pdev->device == 0x130C) || 1762 - (adev->pdev->device == 0x130F) || 1763 - (adev->pdev->device == 0x1310) || 1764 - (adev->pdev->device == 0x1311) || 1765 - (adev->pdev->device == 0x131C)) { 1766 - adev->gfx.config.max_cu_per_sh = 8; 1767 - adev->gfx.config.max_backends_per_se = 2; 1768 - } else if ((adev->pdev->device == 0x1309) || 1769 - (adev->pdev->device == 0x130A) || 1770 - (adev->pdev->device == 0x130D) || 1771 - (adev->pdev->device == 0x1313) || 1772 - (adev->pdev->device == 0x131D)) { 1773 - adev->gfx.config.max_cu_per_sh = 6; 1774 - adev->gfx.config.max_backends_per_se = 2; 1775 - } else if ((adev->pdev->device == 0x1306) || 1776 - (adev->pdev->device == 0x1307) || 1777 - (adev->pdev->device == 0x130B) || 1778 - (adev->pdev->device == 0x130E) || 1779 - (adev->pdev->device == 0x1315) || 1780 - (adev->pdev->device == 0x131B)) { 1781 - adev->gfx.config.max_cu_per_sh = 4; 1782 - adev->gfx.config.max_backends_per_se = 1; 1783 - } else { 1784 - adev->gfx.config.max_cu_per_sh = 3; 1785 - adev->gfx.config.max_backends_per_se = 1; 1786 - } 1787 - adev->gfx.config.max_sh_per_se = 1; 1788 - adev->gfx.config.max_texture_channel_caches = 4; 1789 - adev->gfx.config.max_gprs = 256; 1790 - adev->gfx.config.max_gs_threads = 16; 1791 - adev->gfx.config.max_hw_contexts = 8; 1792 - 1793 - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1794 - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1795 - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1796 - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1797 - gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 1798 - break; 1799 - case CHIP_KABINI: 1800 - case CHIP_MULLINS: 1801 - default: 1802 - adev->gfx.config.max_shader_engines = 1; 1803 - adev->gfx.config.max_tile_pipes = 2; 1804 - adev->gfx.config.max_cu_per_sh = 2; 1805 - adev->gfx.config.max_sh_per_se = 1; 1806 - adev->gfx.config.max_backends_per_se = 1; 1807 - adev->gfx.config.max_texture_channel_caches = 2; 1808 - adev->gfx.config.max_gprs = 256; 1809 - adev->gfx.config.max_gs_threads = 16; 1810 - adev->gfx.config.max_hw_contexts = 8; 1811 - 1812 - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1813 - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1814 - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1815 - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1816 - gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 1817 - break; 1818 - } 1819 2064 1820 2065 WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT)); 1821 2066 1822 - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); 1823 - adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); 1824 - mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; 1825 - 1826 - adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; 1827 - adev->gfx.config.mem_max_burst_length_bytes = 256; 1828 - if (adev->flags & AMD_IS_APU) { 1829 - /* Get memory bank mapping mode. */ 1830 - tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); 1831 - dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 1832 - dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); 1833 - 1834 - tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); 1835 - dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 1836 - dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); 1837 - 1838 - /* Validate settings in case only one DIMM installed. */ 1839 - if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) 1840 - dimm00_addr_map = 0; 1841 - if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) 1842 - dimm01_addr_map = 0; 1843 - if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) 1844 - dimm10_addr_map = 0; 1845 - if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) 1846 - dimm11_addr_map = 0; 1847 - 1848 - /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ 1849 - /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ 1850 - if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) 1851 - adev->gfx.config.mem_row_size_in_kb = 2; 1852 - else 1853 - adev->gfx.config.mem_row_size_in_kb = 1; 1854 - } else { 1855 - tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT; 1856 - adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 1857 - if (adev->gfx.config.mem_row_size_in_kb > 4) 1858 - adev->gfx.config.mem_row_size_in_kb = 4; 1859 - } 1860 - /* XXX use MC settings? */ 1861 - adev->gfx.config.shader_engine_tile_size = 32; 1862 - adev->gfx.config.num_gpus = 1; 1863 - adev->gfx.config.multi_gpu_tile_size = 64; 1864 - 1865 - /* fix up row size */ 1866 - gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK; 1867 - switch (adev->gfx.config.mem_row_size_in_kb) { 1868 - case 1: 1869 - default: 1870 - gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 1871 - break; 1872 - case 2: 1873 - gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 1874 - break; 1875 - case 4: 1876 - gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 1877 - break; 1878 - } 1879 - adev->gfx.config.gb_addr_config = gb_addr_config; 1880 - 1881 - WREG32(mmGB_ADDR_CONFIG, gb_addr_config); 1882 - WREG32(mmHDP_ADDR_CONFIG, gb_addr_config); 1883 - WREG32(mmDMIF_ADDR_CALC, gb_addr_config); 1884 - WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70); 1885 - WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70); 1886 - WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config); 1887 - WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); 1888 - WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); 2067 + WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 2068 + WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 2069 + WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); 1889 2070 1890 2071 gfx_v7_0_tiling_mode_table_init(adev); 1891 2072 1892 - gfx_v7_0_setup_rb(adev, adev->gfx.config.max_shader_engines, 1893 - adev->gfx.config.max_sh_per_se, 1894 - adev->gfx.config.max_backends_per_se); 2073 + gfx_v7_0_setup_rb(adev); 1895 2074 1896 2075 /* set HW defaults for 3D engine */ 1897 2076 WREG32(mmCP_MEQ_THRESHOLDS, 1898 - (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | 1899 - (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); 2077 + (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | 2078 + (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); 1900 2079 1901 2080 mutex_lock(&adev->grbm_idx_mutex); 1902 2081 /* ··· 1738 2255 1739 2256 /* XXX SH_MEM regs */ 1740 2257 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1741 - sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 2258 + sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1742 2259 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1743 2260 1744 2261 mutex_lock(&adev->srbm_mutex); ··· 1862 2379 return r; 1863 2380 } 1864 2381 WREG32(scratch, 0xCAFEDEAD); 1865 - r = amdgpu_ring_lock(ring, 3); 2382 + r = amdgpu_ring_alloc(ring, 3); 1866 2383 if (r) { 1867 2384 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); 1868 2385 amdgpu_gfx_scratch_free(adev, scratch); ··· 1871 2388 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 1872 2389 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 1873 2390 amdgpu_ring_write(ring, 0xDEADBEEF); 1874 - amdgpu_ring_unlock_commit(ring); 2391 + amdgpu_ring_commit(ring); 1875 2392 1876 2393 for (i = 0; i < adev->usec_timeout; i++) { 1877 2394 tmp = RREG32(scratch); ··· 1999 2516 amdgpu_ring_write(ring, upper_32_bits(seq)); 2000 2517 } 2001 2518 2002 - /** 2003 - * gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring 2004 - * 2005 - * @ring: amdgpu ring buffer object 2006 - * @semaphore: amdgpu semaphore object 2007 - * @emit_wait: Is this a sempahore wait? 2008 - * 2009 - * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP 2010 - * from running ahead of semaphore waits. 2011 - */ 2012 - static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring, 2013 - struct amdgpu_semaphore *semaphore, 2014 - bool emit_wait) 2015 - { 2016 - uint64_t addr = semaphore->gpu_addr; 2017 - unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 2018 - 2019 - amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 2020 - amdgpu_ring_write(ring, addr & 0xffffffff); 2021 - amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 2022 - 2023 - if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) { 2024 - /* Prevent the PFP from running ahead of the semaphore wait */ 2025 - amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 2026 - amdgpu_ring_write(ring, 0x0); 2027 - } 2028 - 2029 - return true; 2030 - } 2031 - 2032 2519 /* 2033 2520 * IB stuff 2034 2521 */ ··· 2114 2661 } 2115 2662 WREG32(scratch, 0xCAFEDEAD); 2116 2663 memset(&ib, 0, sizeof(ib)); 2117 - r = amdgpu_ib_get(ring, NULL, 256, &ib); 2664 + r = amdgpu_ib_get(adev, NULL, 256, &ib); 2118 2665 if (r) { 2119 2666 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 2120 2667 goto err1; ··· 2124 2671 ib.ptr[2] = 0xDEADBEEF; 2125 2672 ib.length_dw = 3; 2126 2673 2127 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, 2128 - AMDGPU_FENCE_OWNER_UNDEFINED, 2129 - &f); 2674 + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, 2675 + NULL, &f); 2130 2676 if (r) 2131 2677 goto err2; 2132 2678 ··· 2294 2842 2295 2843 gfx_v7_0_cp_gfx_enable(adev, true); 2296 2844 2297 - r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8); 2845 + r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8); 2298 2846 if (r) { 2299 2847 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2300 2848 return r; ··· 2363 2911 amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 2364 2912 amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 2365 2913 2366 - amdgpu_ring_unlock_commit(ring); 2914 + amdgpu_ring_commit(ring); 2367 2915 2368 2916 return 0; 2369 2917 } ··· 2441 2989 2442 2990 static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 2443 2991 { 2444 - u32 rptr; 2445 - 2446 - rptr = ring->adev->wb.wb[ring->rptr_offs]; 2447 - 2448 - return rptr; 2992 + return ring->adev->wb.wb[ring->rptr_offs]; 2449 2993 } 2450 2994 2451 2995 static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 2452 2996 { 2453 2997 struct amdgpu_device *adev = ring->adev; 2454 - u32 wptr; 2455 2998 2456 - wptr = RREG32(mmCP_RB0_WPTR); 2457 - 2458 - return wptr; 2999 + return RREG32(mmCP_RB0_WPTR); 2459 3000 } 2460 3001 2461 3002 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) ··· 2461 3016 2462 3017 static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 2463 3018 { 2464 - u32 rptr; 2465 - 2466 - rptr = ring->adev->wb.wb[ring->rptr_offs]; 2467 - 2468 - return rptr; 3019 + return ring->adev->wb.wb[ring->rptr_offs]; 2469 3020 } 2470 3021 2471 3022 static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 2472 3023 { 2473 - u32 wptr; 2474 - 2475 3024 /* XXX check if swapping is necessary on BE */ 2476 - wptr = ring->adev->wb.wb[ring->wptr_offs]; 2477 - 2478 - return wptr; 3025 + return ring->adev->wb.wb[ring->wptr_offs]; 2479 3026 } 2480 3027 2481 3028 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) ··· 2558 3121 WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++)); 2559 3122 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); 2560 3123 } 2561 - 2562 - return 0; 2563 - } 2564 - 2565 - /** 2566 - * gfx_v7_0_cp_compute_start - start the compute queues 2567 - * 2568 - * @adev: amdgpu_device pointer 2569 - * 2570 - * Enable the compute queues. 2571 - * Returns 0 for success, error for failure. 2572 - */ 2573 - static int gfx_v7_0_cp_compute_start(struct amdgpu_device *adev) 2574 - { 2575 - gfx_v7_0_cp_compute_enable(adev, true); 2576 3124 2577 3125 return 0; 2578 3126 } ··· 2752 3330 u32 *buf; 2753 3331 struct bonaire_mqd *mqd; 2754 3332 2755 - r = gfx_v7_0_cp_compute_start(adev); 2756 - if (r) 2757 - return r; 3333 + gfx_v7_0_cp_compute_enable(adev, true); 2758 3334 2759 3335 /* fix up chicken bits */ 2760 3336 tmp = RREG32(mmCP_CPF_DEBUG); ··· 3815 4395 } 3816 4396 } 3817 4397 3818 - static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev, 3819 - u32 se, u32 sh) 4398 + static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) 3820 4399 { 3821 - u32 mask = 0, tmp, tmp1; 3822 - int i; 4400 + u32 data, mask; 3823 4401 3824 - gfx_v7_0_select_se_sh(adev, se, sh); 3825 - tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); 3826 - tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); 3827 - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 4402 + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); 4403 + data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); 3828 4404 3829 - tmp &= 0xffff0000; 4405 + data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4406 + data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 3830 4407 3831 - tmp |= tmp1; 3832 - tmp >>= 16; 4408 + mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se / 4409 + adev->gfx.config.max_sh_per_se); 3833 4410 3834 - for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { 3835 - mask <<= 1; 3836 - mask |= 1; 3837 - } 3838 - 3839 - return (~tmp) & mask; 4411 + return (~data) & mask; 3840 4412 } 3841 4413 3842 4414 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev) ··· 4166 4754 return 0; 4167 4755 } 4168 4756 4757 + static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) 4758 + { 4759 + u32 gb_addr_config; 4760 + u32 mc_shared_chmap, mc_arb_ramcfg; 4761 + u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; 4762 + u32 tmp; 4763 + 4764 + switch (adev->asic_type) { 4765 + case CHIP_BONAIRE: 4766 + adev->gfx.config.max_shader_engines = 2; 4767 + adev->gfx.config.max_tile_pipes = 4; 4768 + adev->gfx.config.max_cu_per_sh = 7; 4769 + adev->gfx.config.max_sh_per_se = 1; 4770 + adev->gfx.config.max_backends_per_se = 2; 4771 + adev->gfx.config.max_texture_channel_caches = 4; 4772 + adev->gfx.config.max_gprs = 256; 4773 + adev->gfx.config.max_gs_threads = 32; 4774 + adev->gfx.config.max_hw_contexts = 8; 4775 + 4776 + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4777 + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4778 + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4779 + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4780 + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 4781 + break; 4782 + case CHIP_HAWAII: 4783 + adev->gfx.config.max_shader_engines = 4; 4784 + adev->gfx.config.max_tile_pipes = 16; 4785 + adev->gfx.config.max_cu_per_sh = 11; 4786 + adev->gfx.config.max_sh_per_se = 1; 4787 + adev->gfx.config.max_backends_per_se = 4; 4788 + adev->gfx.config.max_texture_channel_caches = 16; 4789 + adev->gfx.config.max_gprs = 256; 4790 + adev->gfx.config.max_gs_threads = 32; 4791 + adev->gfx.config.max_hw_contexts = 8; 4792 + 4793 + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4794 + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4795 + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4796 + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4797 + gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN; 4798 + break; 4799 + case CHIP_KAVERI: 4800 + adev->gfx.config.max_shader_engines = 1; 4801 + adev->gfx.config.max_tile_pipes = 4; 4802 + if ((adev->pdev->device == 0x1304) || 4803 + (adev->pdev->device == 0x1305) || 4804 + (adev->pdev->device == 0x130C) || 4805 + (adev->pdev->device == 0x130F) || 4806 + (adev->pdev->device == 0x1310) || 4807 + (adev->pdev->device == 0x1311) || 4808 + (adev->pdev->device == 0x131C)) { 4809 + adev->gfx.config.max_cu_per_sh = 8; 4810 + adev->gfx.config.max_backends_per_se = 2; 4811 + } else if ((adev->pdev->device == 0x1309) || 4812 + (adev->pdev->device == 0x130A) || 4813 + (adev->pdev->device == 0x130D) || 4814 + (adev->pdev->device == 0x1313) || 4815 + (adev->pdev->device == 0x131D)) { 4816 + adev->gfx.config.max_cu_per_sh = 6; 4817 + adev->gfx.config.max_backends_per_se = 2; 4818 + } else if ((adev->pdev->device == 0x1306) || 4819 + (adev->pdev->device == 0x1307) || 4820 + (adev->pdev->device == 0x130B) || 4821 + (adev->pdev->device == 0x130E) || 4822 + (adev->pdev->device == 0x1315) || 4823 + (adev->pdev->device == 0x131B)) { 4824 + adev->gfx.config.max_cu_per_sh = 4; 4825 + adev->gfx.config.max_backends_per_se = 1; 4826 + } else { 4827 + adev->gfx.config.max_cu_per_sh = 3; 4828 + adev->gfx.config.max_backends_per_se = 1; 4829 + } 4830 + adev->gfx.config.max_sh_per_se = 1; 4831 + adev->gfx.config.max_texture_channel_caches = 4; 4832 + adev->gfx.config.max_gprs = 256; 4833 + adev->gfx.config.max_gs_threads = 16; 4834 + adev->gfx.config.max_hw_contexts = 8; 4835 + 4836 + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4837 + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4838 + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4839 + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4840 + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 4841 + break; 4842 + case CHIP_KABINI: 4843 + case CHIP_MULLINS: 4844 + default: 4845 + adev->gfx.config.max_shader_engines = 1; 4846 + adev->gfx.config.max_tile_pipes = 2; 4847 + adev->gfx.config.max_cu_per_sh = 2; 4848 + adev->gfx.config.max_sh_per_se = 1; 4849 + adev->gfx.config.max_backends_per_se = 1; 4850 + adev->gfx.config.max_texture_channel_caches = 2; 4851 + adev->gfx.config.max_gprs = 256; 4852 + adev->gfx.config.max_gs_threads = 16; 4853 + adev->gfx.config.max_hw_contexts = 8; 4854 + 4855 + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4856 + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4857 + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4858 + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4859 + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 4860 + break; 4861 + } 4862 + 4863 + mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); 4864 + adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); 4865 + mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; 4866 + 4867 + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; 4868 + adev->gfx.config.mem_max_burst_length_bytes = 256; 4869 + if (adev->flags & AMD_IS_APU) { 4870 + /* Get memory bank mapping mode. */ 4871 + tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); 4872 + dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 4873 + dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); 4874 + 4875 + tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); 4876 + dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 4877 + dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); 4878 + 4879 + /* Validate settings in case only one DIMM installed. */ 4880 + if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) 4881 + dimm00_addr_map = 0; 4882 + if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) 4883 + dimm01_addr_map = 0; 4884 + if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) 4885 + dimm10_addr_map = 0; 4886 + if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) 4887 + dimm11_addr_map = 0; 4888 + 4889 + /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ 4890 + /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ 4891 + if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) 4892 + adev->gfx.config.mem_row_size_in_kb = 2; 4893 + else 4894 + adev->gfx.config.mem_row_size_in_kb = 1; 4895 + } else { 4896 + tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT; 4897 + adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 4898 + if (adev->gfx.config.mem_row_size_in_kb > 4) 4899 + adev->gfx.config.mem_row_size_in_kb = 4; 4900 + } 4901 + /* XXX use MC settings? */ 4902 + adev->gfx.config.shader_engine_tile_size = 32; 4903 + adev->gfx.config.num_gpus = 1; 4904 + adev->gfx.config.multi_gpu_tile_size = 64; 4905 + 4906 + /* fix up row size */ 4907 + gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK; 4908 + switch (adev->gfx.config.mem_row_size_in_kb) { 4909 + case 1: 4910 + default: 4911 + gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 4912 + break; 4913 + case 2: 4914 + gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 4915 + break; 4916 + case 4: 4917 + gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 4918 + break; 4919 + } 4920 + adev->gfx.config.gb_addr_config = gb_addr_config; 4921 + } 4922 + 4169 4923 static int gfx_v7_0_sw_init(void *handle) 4170 4924 { 4171 4925 struct amdgpu_ring *ring; ··· 4435 4857 if (r) 4436 4858 return r; 4437 4859 4860 + adev->gfx.ce_ram_size = 0x8000; 4861 + 4862 + gfx_v7_0_gpu_early_init(adev); 4863 + 4438 4864 return r; 4439 4865 } 4440 4866 ··· 4478 4896 r = gfx_v7_0_cp_resume(adev); 4479 4897 if (r) 4480 4898 return r; 4481 - 4482 - adev->gfx.ce_ram_size = 0x8000; 4483 4899 4484 4900 return r; 4485 4901 } ··· 4595 5015 RREG32(mmHDP_ADDR_CONFIG)); 4596 5016 dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", 4597 5017 RREG32(mmDMIF_ADDR_CALC)); 4598 - dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n", 4599 - RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET)); 4600 - dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n", 4601 - RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET)); 4602 - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", 4603 - RREG32(mmUVD_UDEC_ADDR_CONFIG)); 4604 - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", 4605 - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); 4606 - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", 4607 - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); 4608 5018 4609 5019 dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", 4610 5020 RREG32(mmCP_MEQ_THRESHOLDS)); ··· 5137 5567 .parse_cs = NULL, 5138 5568 .emit_ib = gfx_v7_0_ring_emit_ib_gfx, 5139 5569 .emit_fence = gfx_v7_0_ring_emit_fence_gfx, 5140 - .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5141 5570 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5142 5571 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, 5143 5572 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, 5144 5573 .test_ring = gfx_v7_0_ring_test_ring, 5145 5574 .test_ib = gfx_v7_0_ring_test_ib, 5146 5575 .insert_nop = amdgpu_ring_insert_nop, 5576 + .pad_ib = amdgpu_ring_generic_pad_ib, 5147 5577 }; 5148 5578 5149 5579 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { ··· 5153 5583 .parse_cs = NULL, 5154 5584 .emit_ib = gfx_v7_0_ring_emit_ib_compute, 5155 5585 .emit_fence = gfx_v7_0_ring_emit_fence_compute, 5156 - .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5157 5586 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5158 5587 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, 5159 5588 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, 5160 5589 .test_ring = gfx_v7_0_ring_test_ring, 5161 5590 .test_ib = gfx_v7_0_ring_test_ib, 5162 5591 .insert_nop = amdgpu_ring_insert_nop, 5592 + .pad_ib = amdgpu_ring_generic_pad_ib, 5163 5593 }; 5164 5594 5165 5595 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) ··· 5229 5659 5230 5660 5231 5661 int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, 5232 - struct amdgpu_cu_info *cu_info) 5662 + struct amdgpu_cu_info *cu_info) 5233 5663 { 5234 5664 int i, j, k, counter, active_cu_number = 0; 5235 5665 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; ··· 5243 5673 mask = 1; 5244 5674 ao_bitmap = 0; 5245 5675 counter = 0; 5246 - bitmap = gfx_v7_0_get_cu_active_bitmap(adev, i, j); 5676 + gfx_v7_0_select_se_sh(adev, i, j); 5677 + bitmap = gfx_v7_0_get_cu_active_bitmap(adev); 5247 5678 cu_info->bitmap[i][j] = bitmap; 5248 5679 5249 - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { 5680 + for (k = 0; k < 16; k ++) { 5250 5681 if (bitmap & mask) { 5251 5682 if (counter < 2) 5252 5683 ao_bitmap |= mask; ··· 5259 5688 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 5260 5689 } 5261 5690 } 5691 + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 5692 + mutex_unlock(&adev->grbm_idx_mutex); 5262 5693 5263 5694 cu_info->number = active_cu_number; 5264 5695 cu_info->ao_cu_mask = ao_cu_mask; 5265 - mutex_unlock(&adev->grbm_idx_mutex); 5696 + 5266 5697 return 0; 5267 5698 }
+54 -174
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 43 43 #include "gca/gfx_8_0_sh_mask.h" 44 44 #include "gca/gfx_8_0_enum.h" 45 45 46 - #include "uvd/uvd_5_0_d.h" 47 - #include "uvd/uvd_5_0_sh_mask.h" 48 - 49 46 #include "dce/dce_10_0_d.h" 50 47 #include "dce/dce_10_0_sh_mask.h" 51 48 ··· 649 652 return r; 650 653 } 651 654 WREG32(scratch, 0xCAFEDEAD); 652 - r = amdgpu_ring_lock(ring, 3); 655 + r = amdgpu_ring_alloc(ring, 3); 653 656 if (r) { 654 657 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 655 658 ring->idx, r); ··· 659 662 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 660 663 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 661 664 amdgpu_ring_write(ring, 0xDEADBEEF); 662 - amdgpu_ring_unlock_commit(ring); 665 + amdgpu_ring_commit(ring); 663 666 664 667 for (i = 0; i < adev->usec_timeout; i++) { 665 668 tmp = RREG32(scratch); ··· 696 699 } 697 700 WREG32(scratch, 0xCAFEDEAD); 698 701 memset(&ib, 0, sizeof(ib)); 699 - r = amdgpu_ib_get(ring, NULL, 256, &ib); 702 + r = amdgpu_ib_get(adev, NULL, 256, &ib); 700 703 if (r) { 701 704 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 702 705 goto err1; ··· 706 709 ib.ptr[2] = 0xDEADBEEF; 707 710 ib.length_dw = 3; 708 711 709 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, 710 - AMDGPU_FENCE_OWNER_UNDEFINED, 711 - &f); 712 + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, 713 + NULL, &f); 712 714 if (r) 713 715 goto err2; 714 716 ··· 1167 1171 1168 1172 /* allocate an indirect buffer to put the commands in */ 1169 1173 memset(&ib, 0, sizeof(ib)); 1170 - r = amdgpu_ib_get(ring, NULL, total_size, &ib); 1174 + r = amdgpu_ib_get(adev, NULL, total_size, &ib); 1171 1175 if (r) { 1172 1176 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 1173 1177 return r; ··· 1262 1266 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); 1263 1267 1264 1268 /* shedule the ib on the ring */ 1265 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, 1266 - AMDGPU_FENCE_OWNER_UNDEFINED, 1267 - &f); 1269 + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, 1270 + NULL, &f); 1268 1271 if (r) { 1269 1272 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); 1270 1273 goto fail; ··· 2569 2574 } 2570 2575 } 2571 2576 2572 - static u32 gfx_v8_0_create_bitmask(u32 bit_width) 2573 - { 2574 - return (u32)((1ULL << bit_width) - 1); 2575 - } 2576 - 2577 2577 void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) 2578 2578 { 2579 2579 u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); ··· 2589 2599 WREG32(mmGRBM_GFX_INDEX, data); 2590 2600 } 2591 2601 2592 - static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev, 2593 - u32 max_rb_num_per_se, 2594 - u32 sh_per_se) 2602 + static u32 gfx_v8_0_create_bitmask(u32 bit_width) 2603 + { 2604 + return (u32)((1ULL << bit_width) - 1); 2605 + } 2606 + 2607 + static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) 2595 2608 { 2596 2609 u32 data, mask; 2597 2610 2598 2611 data = RREG32(mmCC_RB_BACKEND_DISABLE); 2599 - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 2600 - 2601 2612 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); 2602 2613 2614 + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 2603 2615 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 2604 2616 2605 - mask = gfx_v8_0_create_bitmask(max_rb_num_per_se / sh_per_se); 2617 + mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / 2618 + adev->gfx.config.max_sh_per_se); 2606 2619 2607 - return data & mask; 2620 + return (~data) & mask; 2608 2621 } 2609 2622 2610 - static void gfx_v8_0_setup_rb(struct amdgpu_device *adev, 2611 - u32 se_num, u32 sh_per_se, 2612 - u32 max_rb_num_per_se) 2623 + static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) 2613 2624 { 2614 2625 int i, j; 2615 - u32 data, mask; 2616 - u32 disabled_rbs = 0; 2617 - u32 enabled_rbs = 0; 2626 + u32 data, tmp, num_rbs = 0; 2627 + u32 active_rbs = 0; 2618 2628 2619 2629 mutex_lock(&adev->grbm_idx_mutex); 2620 - for (i = 0; i < se_num; i++) { 2621 - for (j = 0; j < sh_per_se; j++) { 2630 + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 2631 + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 2622 2632 gfx_v8_0_select_se_sh(adev, i, j); 2623 - data = gfx_v8_0_get_rb_disabled(adev, 2624 - max_rb_num_per_se, sh_per_se); 2625 - disabled_rbs |= data << ((i * sh_per_se + j) * 2626 - RB_BITMAP_WIDTH_PER_SH); 2633 + data = gfx_v8_0_get_rb_active_bitmap(adev); 2634 + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 2635 + RB_BITMAP_WIDTH_PER_SH); 2627 2636 } 2628 2637 } 2629 2638 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 2630 2639 mutex_unlock(&adev->grbm_idx_mutex); 2631 2640 2632 - mask = 1; 2633 - for (i = 0; i < max_rb_num_per_se * se_num; i++) { 2634 - if (!(disabled_rbs & mask)) 2635 - enabled_rbs |= mask; 2636 - mask <<= 1; 2637 - } 2638 - 2639 - adev->gfx.config.backend_enable_mask = enabled_rbs; 2640 - 2641 - mutex_lock(&adev->grbm_idx_mutex); 2642 - for (i = 0; i < se_num; i++) { 2643 - gfx_v8_0_select_se_sh(adev, i, 0xffffffff); 2644 - data = RREG32(mmPA_SC_RASTER_CONFIG); 2645 - for (j = 0; j < sh_per_se; j++) { 2646 - switch (enabled_rbs & 3) { 2647 - case 0: 2648 - if (j == 0) 2649 - data |= (RASTER_CONFIG_RB_MAP_3 << 2650 - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); 2651 - else 2652 - data |= (RASTER_CONFIG_RB_MAP_0 << 2653 - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); 2654 - break; 2655 - case 1: 2656 - data |= (RASTER_CONFIG_RB_MAP_0 << 2657 - (i * sh_per_se + j) * 2); 2658 - break; 2659 - case 2: 2660 - data |= (RASTER_CONFIG_RB_MAP_3 << 2661 - (i * sh_per_se + j) * 2); 2662 - break; 2663 - case 3: 2664 - default: 2665 - data |= (RASTER_CONFIG_RB_MAP_2 << 2666 - (i * sh_per_se + j) * 2); 2667 - break; 2668 - } 2669 - enabled_rbs >>= 2; 2670 - } 2671 - WREG32(mmPA_SC_RASTER_CONFIG, data); 2672 - } 2673 - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 2674 - mutex_unlock(&adev->grbm_idx_mutex); 2641 + adev->gfx.config.backend_enable_mask = active_rbs; 2642 + tmp = active_rbs; 2643 + while (tmp >>= 1) 2644 + num_rbs++; 2645 + adev->gfx.config.num_rbs = num_rbs; 2675 2646 } 2676 2647 2677 2648 /** ··· 2692 2741 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 2693 2742 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 2694 2743 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); 2695 - WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, 2696 - adev->gfx.config.gb_addr_config & 0x70); 2697 - WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, 2698 - adev->gfx.config.gb_addr_config & 0x70); 2699 - WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 2700 - WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 2701 - WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 2702 2744 2703 2745 gfx_v8_0_tiling_mode_table_init(adev); 2704 2746 2705 - gfx_v8_0_setup_rb(adev, adev->gfx.config.max_shader_engines, 2706 - adev->gfx.config.max_sh_per_se, 2707 - adev->gfx.config.max_backends_per_se); 2747 + gfx_v8_0_setup_rb(adev); 2708 2748 2709 2749 /* XXX SH_MEM regs */ 2710 2750 /* where to put LDS, scratch, GPUVM in FSA64 space */ ··· 3004 3062 3005 3063 gfx_v8_0_cp_gfx_enable(adev, true); 3006 3064 3007 - r = amdgpu_ring_lock(ring, gfx_v8_0_get_csb_size(adev) + 4); 3065 + r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4); 3008 3066 if (r) { 3009 3067 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3010 3068 return r; ··· 3068 3126 amdgpu_ring_write(ring, 0x8000); 3069 3127 amdgpu_ring_write(ring, 0x8000); 3070 3128 3071 - amdgpu_ring_unlock_commit(ring); 3129 + amdgpu_ring_commit(ring); 3072 3130 3073 3131 return 0; 3074 3132 } ··· 3166 3224 adev->gfx.compute_ring[i].ready = false; 3167 3225 } 3168 3226 udelay(50); 3169 - } 3170 - 3171 - static int gfx_v8_0_cp_compute_start(struct amdgpu_device *adev) 3172 - { 3173 - gfx_v8_0_cp_compute_enable(adev, true); 3174 - 3175 - return 0; 3176 3227 } 3177 3228 3178 3229 static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) ··· 3737 3802 WREG32(mmCP_PQ_STATUS, tmp); 3738 3803 } 3739 3804 3740 - r = gfx_v8_0_cp_compute_start(adev); 3741 - if (r) 3742 - return r; 3805 + gfx_v8_0_cp_compute_enable(adev, true); 3743 3806 3744 3807 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3745 3808 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; ··· 3949 4016 RREG32(mmHDP_ADDR_CONFIG)); 3950 4017 dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", 3951 4018 RREG32(mmDMIF_ADDR_CALC)); 3952 - dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n", 3953 - RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET)); 3954 - dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n", 3955 - RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET)); 3956 - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", 3957 - RREG32(mmUVD_UDEC_ADDR_CONFIG)); 3958 - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", 3959 - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); 3960 - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", 3961 - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); 3962 4019 3963 4020 dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", 3964 4021 RREG32(mmCP_MEQ_THRESHOLDS)); ··· 4685 4762 4686 4763 } 4687 4764 4688 - /** 4689 - * gfx_v8_0_ring_emit_semaphore - emit a semaphore on the CP ring 4690 - * 4691 - * @ring: amdgpu ring buffer object 4692 - * @semaphore: amdgpu semaphore object 4693 - * @emit_wait: Is this a sempahore wait? 4694 - * 4695 - * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP 4696 - * from running ahead of semaphore waits. 4697 - */ 4698 - static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, 4699 - struct amdgpu_semaphore *semaphore, 4700 - bool emit_wait) 4701 - { 4702 - uint64_t addr = semaphore->gpu_addr; 4703 - unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 4704 - 4705 - if (ring->adev->asic_type == CHIP_TOPAZ || 4706 - ring->adev->asic_type == CHIP_TONGA || 4707 - ring->adev->asic_type == CHIP_FIJI) 4708 - /* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */ 4709 - return false; 4710 - else { 4711 - amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2)); 4712 - amdgpu_ring_write(ring, lower_32_bits(addr)); 4713 - amdgpu_ring_write(ring, upper_32_bits(addr)); 4714 - amdgpu_ring_write(ring, sel); 4715 - } 4716 - 4717 - if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) { 4718 - /* Prevent the PFP from running ahead of the semaphore wait */ 4719 - amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4720 - amdgpu_ring_write(ring, 0x0); 4721 - } 4722 - 4723 - return true; 4724 - } 4725 - 4726 4765 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 4727 4766 unsigned vm_id, uint64_t pd_addr) 4728 4767 { 4729 4768 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); 4730 - uint32_t seq = ring->fence_drv.sync_seq[ring->idx]; 4769 + uint32_t seq = ring->fence_drv.sync_seq; 4731 4770 uint64_t addr = ring->fence_drv.gpu_addr; 4732 4771 4733 4772 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); ··· 5030 5145 .parse_cs = NULL, 5031 5146 .emit_ib = gfx_v8_0_ring_emit_ib_gfx, 5032 5147 .emit_fence = gfx_v8_0_ring_emit_fence_gfx, 5033 - .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 5034 5148 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 5035 5149 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, 5036 5150 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, 5037 5151 .test_ring = gfx_v8_0_ring_test_ring, 5038 5152 .test_ib = gfx_v8_0_ring_test_ib, 5039 5153 .insert_nop = amdgpu_ring_insert_nop, 5154 + .pad_ib = amdgpu_ring_generic_pad_ib, 5040 5155 }; 5041 5156 5042 5157 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { ··· 5046 5161 .parse_cs = NULL, 5047 5162 .emit_ib = gfx_v8_0_ring_emit_ib_compute, 5048 5163 .emit_fence = gfx_v8_0_ring_emit_fence_compute, 5049 - .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 5050 5164 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 5051 5165 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, 5052 5166 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, 5053 5167 .test_ring = gfx_v8_0_ring_test_ring, 5054 5168 .test_ib = gfx_v8_0_ring_test_ib, 5055 5169 .insert_nop = amdgpu_ring_insert_nop, 5170 + .pad_ib = amdgpu_ring_generic_pad_ib, 5056 5171 }; 5057 5172 5058 5173 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) ··· 5121 5236 } 5122 5237 } 5123 5238 5124 - static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev, 5125 - u32 se, u32 sh) 5239 + static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) 5126 5240 { 5127 - u32 mask = 0, tmp, tmp1; 5128 - int i; 5241 + u32 data, mask; 5129 5242 5130 - gfx_v8_0_select_se_sh(adev, se, sh); 5131 - tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); 5132 - tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); 5133 - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 5243 + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); 5244 + data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); 5134 5245 5135 - tmp &= 0xffff0000; 5246 + data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 5247 + data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 5136 5248 5137 - tmp |= tmp1; 5138 - tmp >>= 16; 5249 + mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / 5250 + adev->gfx.config.max_sh_per_se); 5139 5251 5140 - for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { 5141 - mask <<= 1; 5142 - mask |= 1; 5143 - } 5144 - 5145 - return (~tmp) & mask; 5252 + return (~data) & mask; 5146 5253 } 5147 5254 5148 5255 int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, 5149 - struct amdgpu_cu_info *cu_info) 5256 + struct amdgpu_cu_info *cu_info) 5150 5257 { 5151 5258 int i, j, k, counter, active_cu_number = 0; 5152 5259 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; ··· 5152 5275 mask = 1; 5153 5276 ao_bitmap = 0; 5154 5277 counter = 0; 5155 - bitmap = gfx_v8_0_get_cu_active_bitmap(adev, i, j); 5278 + gfx_v8_0_select_se_sh(adev, i, j); 5279 + bitmap = gfx_v8_0_get_cu_active_bitmap(adev); 5156 5280 cu_info->bitmap[i][j] = bitmap; 5157 5281 5158 - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { 5282 + for (k = 0; k < 16; k ++) { 5159 5283 if (bitmap & mask) { 5160 5284 if (counter < 2) 5161 5285 ao_bitmap |= mask; ··· 5168 5290 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 5169 5291 } 5170 5292 } 5293 + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 5294 + mutex_unlock(&adev->grbm_idx_mutex); 5171 5295 5172 5296 cu_info->number = active_cu_number; 5173 5297 cu_info->ao_cu_mask = ao_cu_mask; 5174 - mutex_unlock(&adev->grbm_idx_mutex); 5298 + 5175 5299 return 0; 5176 5300 }
+3 -6
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 694 694 * amdgpu graphics/compute will use VMIDs 1-7 695 695 * amdkfd will use VMIDs 8-15 696 696 */ 697 - adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; 697 + adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; 698 + amdgpu_vm_manager_init(adev); 698 699 699 700 /* base offset of vram pages */ 700 701 if (adev->flags & AMD_IS_APU) { ··· 927 926 int dma_bits; 928 927 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 929 928 930 - r = amdgpu_gem_init(adev); 931 - if (r) 932 - return r; 933 - 934 929 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); 935 930 if (r) 936 931 return r; ··· 1007 1010 adev->vm_manager.enabled = false; 1008 1011 } 1009 1012 gmc_v7_0_gart_fini(adev); 1010 - amdgpu_gem_fini(adev); 1013 + amdgpu_gem_force_release(adev); 1011 1014 amdgpu_bo_fini(adev); 1012 1015 1013 1016 return 0;
+9 -6
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 252 252 if (!adev->mc.fw) 253 253 return -EINVAL; 254 254 255 + /* Skip MC ucode loading on SR-IOV capable boards. 256 + * vbios does this for us in asic_init in that case. 257 + */ 258 + if (adev->virtualization.supports_sr_iov) 259 + return 0; 260 + 255 261 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; 256 262 amdgpu_ucode_print_mc_hdr(&hdr->header); 257 263 ··· 780 774 * amdgpu graphics/compute will use VMIDs 1-7 781 775 * amdkfd will use VMIDs 8-15 782 776 */ 783 - adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; 777 + adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; 778 + amdgpu_vm_manager_init(adev); 784 779 785 780 /* base offset of vram pages */ 786 781 if (adev->flags & AMD_IS_APU) { ··· 887 880 int dma_bits; 888 881 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 889 882 890 - r = amdgpu_gem_init(adev); 891 - if (r) 892 - return r; 893 - 894 883 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); 895 884 if (r) 896 885 return r; ··· 967 964 adev->vm_manager.enabled = false; 968 965 } 969 966 gmc_v8_0_gart_fini(adev); 970 - amdgpu_gem_fini(adev); 967 + amdgpu_gem_force_release(adev); 971 968 amdgpu_bo_fini(adev); 972 969 973 970 return 0;
+6
drivers/gpu/drm/amd/amdgpu/iceland_smc.c
··· 279 279 if (!adev->pm.fw) 280 280 return -EINVAL; 281 281 282 + /* Skip SMC ucode loading on SR-IOV capable boards. 283 + * vbios does this for us in asic_init in that case. 284 + */ 285 + if (adev->virtualization.supports_sr_iov) 286 + return 0; 287 + 282 288 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; 283 289 amdgpu_ucode_print_smc_hdr(&hdr->header); 284 290
+23 -47
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 335 335 } 336 336 337 337 /** 338 - * sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring 339 - * 340 - * @ring: amdgpu_ring structure holding ring information 341 - * @semaphore: amdgpu semaphore object 342 - * @emit_wait: wait or signal semaphore 343 - * 344 - * Add a DMA semaphore packet to the ring wait on or signal 345 - * other rings (VI). 346 - */ 347 - static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring, 348 - struct amdgpu_semaphore *semaphore, 349 - bool emit_wait) 350 - { 351 - u64 addr = semaphore->gpu_addr; 352 - u32 sig = emit_wait ? 0 : 1; 353 - 354 - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | 355 - SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); 356 - amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); 357 - amdgpu_ring_write(ring, upper_32_bits(addr)); 358 - 359 - return true; 360 - } 361 - 362 - /** 363 338 * sdma_v2_4_gfx_stop - stop the gfx async dma engines 364 339 * 365 340 * @adev: amdgpu_device pointer ··· 433 458 } 434 459 vi_srbm_select(adev, 0, 0, 0, 0); 435 460 mutex_unlock(&adev->srbm_mutex); 461 + 462 + WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], 463 + adev->gfx.config.gb_addr_config & 0x70); 436 464 437 465 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 438 466 ··· 614 636 tmp = 0xCAFEDEAD; 615 637 adev->wb.wb[index] = cpu_to_le32(tmp); 616 638 617 - r = amdgpu_ring_lock(ring, 5); 639 + r = amdgpu_ring_alloc(ring, 5); 618 640 if (r) { 619 641 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 620 642 amdgpu_wb_free(adev, index); ··· 627 649 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 628 650 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); 629 651 amdgpu_ring_write(ring, 0xDEADBEEF); 630 - amdgpu_ring_unlock_commit(ring); 652 + amdgpu_ring_commit(ring); 631 653 632 654 for (i = 0; i < adev->usec_timeout; i++) { 633 655 tmp = le32_to_cpu(adev->wb.wb[index]); ··· 677 699 tmp = 0xCAFEDEAD; 678 700 adev->wb.wb[index] = cpu_to_le32(tmp); 679 701 memset(&ib, 0, sizeof(ib)); 680 - r = amdgpu_ib_get(ring, NULL, 256, &ib); 702 + r = amdgpu_ib_get(adev, NULL, 256, &ib); 681 703 if (r) { 682 704 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 683 705 goto err0; ··· 694 716 ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 695 717 ib.length_dw = 8; 696 718 697 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, 698 - AMDGPU_FENCE_OWNER_UNDEFINED, 699 - &f); 719 + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, 720 + NULL, &f); 700 721 if (r) 701 722 goto err1; 702 723 ··· 774 797 * Update PTEs by writing them manually using sDMA (CIK). 775 798 */ 776 799 static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, 777 - uint64_t pe, 800 + const dma_addr_t *pages_addr, uint64_t pe, 778 801 uint64_t addr, unsigned count, 779 802 uint32_t incr, uint32_t flags) 780 803 { ··· 793 816 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 794 817 ib->ptr[ib->length_dw++] = ndw; 795 818 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 796 - if (flags & AMDGPU_PTE_SYSTEM) { 797 - value = amdgpu_vm_map_gart(ib->ring->adev, addr); 798 - value &= 0xFFFFFFFFFFFFF000ULL; 799 - } else if (flags & AMDGPU_PTE_VALID) { 800 - value = addr; 801 - } else { 802 - value = 0; 803 - } 819 + value = amdgpu_vm_map_gart(pages_addr, addr); 804 820 addr += incr; 805 821 value |= flags; 806 822 ib->ptr[ib->length_dw++] = value; ··· 851 881 } 852 882 853 883 /** 854 - * sdma_v2_4_vm_pad_ib - pad the IB to the required number of dw 884 + * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw 855 885 * 856 886 * @ib: indirect buffer to fill with padding 857 887 * 858 888 */ 859 - static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) 889 + static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 860 890 { 861 - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); 891 + struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); 862 892 u32 pad_count; 863 893 int i; 864 894 ··· 1081 1111 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); 1082 1112 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", 1083 1113 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); 1114 + dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", 1115 + i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); 1084 1116 mutex_lock(&adev->srbm_mutex); 1085 1117 for (j = 0; j < 16; j++) { 1086 1118 vi_srbm_select(adev, 0, 0, 0, j); ··· 1274 1302 .parse_cs = NULL, 1275 1303 .emit_ib = sdma_v2_4_ring_emit_ib, 1276 1304 .emit_fence = sdma_v2_4_ring_emit_fence, 1277 - .emit_semaphore = sdma_v2_4_ring_emit_semaphore, 1278 1305 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, 1279 1306 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, 1280 1307 .test_ring = sdma_v2_4_ring_test_ring, 1281 1308 .test_ib = sdma_v2_4_ring_test_ib, 1282 1309 .insert_nop = sdma_v2_4_ring_insert_nop, 1310 + .pad_ib = sdma_v2_4_ring_pad_ib, 1283 1311 }; 1284 1312 1285 1313 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) ··· 1377 1405 .copy_pte = sdma_v2_4_vm_copy_pte, 1378 1406 .write_pte = sdma_v2_4_vm_write_pte, 1379 1407 .set_pte_pde = sdma_v2_4_vm_set_pte_pde, 1380 - .pad_ib = sdma_v2_4_vm_pad_ib, 1381 1408 }; 1382 1409 1383 1410 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) 1384 1411 { 1412 + unsigned i; 1413 + 1385 1414 if (adev->vm_manager.vm_pte_funcs == NULL) { 1386 1415 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; 1387 - adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; 1388 - adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; 1416 + for (i = 0; i < adev->sdma.num_instances; i++) 1417 + adev->vm_manager.vm_pte_rings[i] = 1418 + &adev->sdma.instance[i].ring; 1419 + 1420 + adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; 1389 1421 } 1390 1422 }
+23 -48
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
··· 444 444 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); 445 445 } 446 446 447 - 448 - /** 449 - * sdma_v3_0_ring_emit_semaphore - emit a semaphore on the dma ring 450 - * 451 - * @ring: amdgpu_ring structure holding ring information 452 - * @semaphore: amdgpu semaphore object 453 - * @emit_wait: wait or signal semaphore 454 - * 455 - * Add a DMA semaphore packet to the ring wait on or signal 456 - * other rings (VI). 457 - */ 458 - static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring, 459 - struct amdgpu_semaphore *semaphore, 460 - bool emit_wait) 461 - { 462 - u64 addr = semaphore->gpu_addr; 463 - u32 sig = emit_wait ? 0 : 1; 464 - 465 - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | 466 - SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); 467 - amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); 468 - amdgpu_ring_write(ring, upper_32_bits(addr)); 469 - 470 - return true; 471 - } 472 - 473 447 /** 474 448 * sdma_v3_0_gfx_stop - stop the gfx async dma engines 475 449 * ··· 569 595 } 570 596 vi_srbm_select(adev, 0, 0, 0, 0); 571 597 mutex_unlock(&adev->srbm_mutex); 598 + 599 + WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], 600 + adev->gfx.config.gb_addr_config & 0x70); 572 601 573 602 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 574 603 ··· 765 788 tmp = 0xCAFEDEAD; 766 789 adev->wb.wb[index] = cpu_to_le32(tmp); 767 790 768 - r = amdgpu_ring_lock(ring, 5); 791 + r = amdgpu_ring_alloc(ring, 5); 769 792 if (r) { 770 793 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 771 794 amdgpu_wb_free(adev, index); ··· 778 801 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 779 802 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); 780 803 amdgpu_ring_write(ring, 0xDEADBEEF); 781 - amdgpu_ring_unlock_commit(ring); 804 + amdgpu_ring_commit(ring); 782 805 783 806 for (i = 0; i < adev->usec_timeout; i++) { 784 807 tmp = le32_to_cpu(adev->wb.wb[index]); ··· 828 851 tmp = 0xCAFEDEAD; 829 852 adev->wb.wb[index] = cpu_to_le32(tmp); 830 853 memset(&ib, 0, sizeof(ib)); 831 - r = amdgpu_ib_get(ring, NULL, 256, &ib); 854 + r = amdgpu_ib_get(adev, NULL, 256, &ib); 832 855 if (r) { 833 856 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 834 857 goto err0; ··· 845 868 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 846 869 ib.length_dw = 8; 847 870 848 - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, 849 - AMDGPU_FENCE_OWNER_UNDEFINED, 850 - &f); 871 + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, 872 + NULL, &f); 851 873 if (r) 852 874 goto err1; 853 875 ··· 924 948 * Update PTEs by writing them manually using sDMA (CIK). 925 949 */ 926 950 static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, 927 - uint64_t pe, 951 + const dma_addr_t *pages_addr, uint64_t pe, 928 952 uint64_t addr, unsigned count, 929 953 uint32_t incr, uint32_t flags) 930 954 { ··· 943 967 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 944 968 ib->ptr[ib->length_dw++] = ndw; 945 969 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 946 - if (flags & AMDGPU_PTE_SYSTEM) { 947 - value = amdgpu_vm_map_gart(ib->ring->adev, addr); 948 - value &= 0xFFFFFFFFFFFFF000ULL; 949 - } else if (flags & AMDGPU_PTE_VALID) { 950 - value = addr; 951 - } else { 952 - value = 0; 953 - } 970 + value = amdgpu_vm_map_gart(pages_addr, addr); 954 971 addr += incr; 955 972 value |= flags; 956 973 ib->ptr[ib->length_dw++] = value; ··· 1001 1032 } 1002 1033 1003 1034 /** 1004 - * sdma_v3_0_vm_pad_ib - pad the IB to the required number of dw 1035 + * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw 1005 1036 * 1006 1037 * @ib: indirect buffer to fill with padding 1007 1038 * 1008 1039 */ 1009 - static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) 1040 + static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 1010 1041 { 1011 - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); 1042 + struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); 1012 1043 u32 pad_count; 1013 1044 int i; 1014 1045 ··· 1244 1275 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); 1245 1276 dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n", 1246 1277 i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i])); 1278 + dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", 1279 + i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); 1247 1280 mutex_lock(&adev->srbm_mutex); 1248 1281 for (j = 0; j < 16; j++) { 1249 1282 vi_srbm_select(adev, 0, 0, 0, j); ··· 1541 1570 .parse_cs = NULL, 1542 1571 .emit_ib = sdma_v3_0_ring_emit_ib, 1543 1572 .emit_fence = sdma_v3_0_ring_emit_fence, 1544 - .emit_semaphore = sdma_v3_0_ring_emit_semaphore, 1545 1573 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, 1546 1574 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, 1547 1575 .test_ring = sdma_v3_0_ring_test_ring, 1548 1576 .test_ib = sdma_v3_0_ring_test_ib, 1549 1577 .insert_nop = sdma_v3_0_ring_insert_nop, 1578 + .pad_ib = sdma_v3_0_ring_pad_ib, 1550 1579 }; 1551 1580 1552 1581 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) ··· 1644 1673 .copy_pte = sdma_v3_0_vm_copy_pte, 1645 1674 .write_pte = sdma_v3_0_vm_write_pte, 1646 1675 .set_pte_pde = sdma_v3_0_vm_set_pte_pde, 1647 - .pad_ib = sdma_v3_0_vm_pad_ib, 1648 1676 }; 1649 1677 1650 1678 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) 1651 1679 { 1680 + unsigned i; 1681 + 1652 1682 if (adev->vm_manager.vm_pte_funcs == NULL) { 1653 1683 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; 1654 - adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; 1655 - adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; 1684 + for (i = 0; i < adev->sdma.num_instances; i++) 1685 + adev->vm_manager.vm_pte_rings[i] = 1686 + &adev->sdma.instance[i].ring; 1687 + 1688 + adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; 1656 1689 } 1657 1690 }
+6
drivers/gpu/drm/amd/amdgpu/tonga_smc.c
··· 272 272 if (!adev->pm.fw) 273 273 return -EINVAL; 274 274 275 + /* Skip SMC ucode loading on SR-IOV capable boards. 276 + * vbios does this for us in asic_init in that case. 277 + */ 278 + if (adev->virtualization.supports_sr_iov) 279 + return 0; 280 + 275 281 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; 276 282 amdgpu_ucode_print_smc_hdr(&hdr->header); 277 283
+17 -33
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
··· 164 164 goto done; 165 165 } 166 166 167 - r = amdgpu_ring_lock(ring, 10); 167 + r = amdgpu_ring_alloc(ring, 10); 168 168 if (r) { 169 169 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 170 170 goto done; ··· 189 189 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 190 190 amdgpu_ring_write(ring, 3); 191 191 192 - amdgpu_ring_unlock_commit(ring); 192 + amdgpu_ring_commit(ring); 193 193 194 194 done: 195 195 /* lower clocks again */ ··· 439 439 } 440 440 441 441 /** 442 - * uvd_v4_2_ring_emit_semaphore - emit semaphore command 443 - * 444 - * @ring: amdgpu_ring pointer 445 - * @semaphore: semaphore to emit commands for 446 - * @emit_wait: true if we should emit a wait command 447 - * 448 - * Emit a semaphore command (either wait or signal) to the UVD ring. 449 - */ 450 - static bool uvd_v4_2_ring_emit_semaphore(struct amdgpu_ring *ring, 451 - struct amdgpu_semaphore *semaphore, 452 - bool emit_wait) 453 - { 454 - uint64_t addr = semaphore->gpu_addr; 455 - 456 - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); 457 - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); 458 - 459 - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); 460 - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); 461 - 462 - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); 463 - amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); 464 - 465 - return true; 466 - } 467 - 468 - /** 469 442 * uvd_v4_2_ring_test_ring - register write test 470 443 * 471 444 * @ring: amdgpu_ring pointer ··· 453 480 int r; 454 481 455 482 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 456 - r = amdgpu_ring_lock(ring, 3); 483 + r = amdgpu_ring_alloc(ring, 3); 457 484 if (r) { 458 485 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 459 486 ring->idx, r); ··· 461 488 } 462 489 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 463 490 amdgpu_ring_write(ring, 0xDEADBEEF); 464 - amdgpu_ring_unlock_commit(ring); 491 + amdgpu_ring_commit(ring); 465 492 for (i = 0; i < adev->usec_timeout; i++) { 466 493 tmp = RREG32(mmUVD_CONTEXT_ID); 467 494 if (tmp == 0xDEADBEEF) ··· 522 549 goto error; 523 550 } 524 551 525 - r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); 552 + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); 526 553 if (r) { 527 554 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); 528 555 goto error; ··· 575 602 /* bits 32-39 */ 576 603 addr = (adev->uvd.gpu_addr >> 32) & 0xFF; 577 604 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 605 + 606 + WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 607 + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 608 + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 578 609 579 610 uvd_v4_2_init_cg(adev); 580 611 } ··· 781 804 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); 782 805 dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", 783 806 RREG32(mmUVD_CONTEXT_ID)); 807 + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", 808 + RREG32(mmUVD_UDEC_ADDR_CONFIG)); 809 + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", 810 + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); 811 + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", 812 + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); 813 + 784 814 } 785 815 786 816 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, ··· 866 882 .parse_cs = amdgpu_uvd_ring_parse_cs, 867 883 .emit_ib = uvd_v4_2_ring_emit_ib, 868 884 .emit_fence = uvd_v4_2_ring_emit_fence, 869 - .emit_semaphore = uvd_v4_2_ring_emit_semaphore, 870 885 .test_ring = uvd_v4_2_ring_test_ring, 871 886 .test_ib = uvd_v4_2_ring_test_ib, 872 887 .insert_nop = amdgpu_ring_insert_nop, 888 + .pad_ib = amdgpu_ring_generic_pad_ib, 873 889 }; 874 890 875 891 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
+16 -33
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
··· 160 160 goto done; 161 161 } 162 162 163 - r = amdgpu_ring_lock(ring, 10); 163 + r = amdgpu_ring_alloc(ring, 10); 164 164 if (r) { 165 165 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 166 166 goto done; ··· 185 185 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 186 186 amdgpu_ring_write(ring, 3); 187 187 188 - amdgpu_ring_unlock_commit(ring); 188 + amdgpu_ring_commit(ring); 189 189 190 190 done: 191 191 /* lower clocks again */ ··· 279 279 size = AMDGPU_UVD_HEAP_SIZE; 280 280 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 281 281 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 282 + 283 + WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 284 + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 285 + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 282 286 } 283 287 284 288 /** ··· 487 483 } 488 484 489 485 /** 490 - * uvd_v5_0_ring_emit_semaphore - emit semaphore command 491 - * 492 - * @ring: amdgpu_ring pointer 493 - * @semaphore: semaphore to emit commands for 494 - * @emit_wait: true if we should emit a wait command 495 - * 496 - * Emit a semaphore command (either wait or signal) to the UVD ring. 497 - */ 498 - static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring, 499 - struct amdgpu_semaphore *semaphore, 500 - bool emit_wait) 501 - { 502 - uint64_t addr = semaphore->gpu_addr; 503 - 504 - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); 505 - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); 506 - 507 - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); 508 - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); 509 - 510 - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); 511 - amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); 512 - 513 - return true; 514 - } 515 - 516 - /** 517 486 * uvd_v5_0_ring_test_ring - register write test 518 487 * 519 488 * @ring: amdgpu_ring pointer ··· 501 524 int r; 502 525 503 526 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 504 - r = amdgpu_ring_lock(ring, 3); 527 + r = amdgpu_ring_alloc(ring, 3); 505 528 if (r) { 506 529 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 507 530 ring->idx, r); ··· 509 532 } 510 533 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 511 534 amdgpu_ring_write(ring, 0xDEADBEEF); 512 - amdgpu_ring_unlock_commit(ring); 535 + amdgpu_ring_commit(ring); 513 536 for (i = 0; i < adev->usec_timeout; i++) { 514 537 tmp = RREG32(mmUVD_CONTEXT_ID); 515 538 if (tmp == 0xDEADBEEF) ··· 572 595 goto error; 573 596 } 574 597 575 - r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); 598 + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); 576 599 if (r) { 577 600 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); 578 601 goto error; ··· 728 751 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); 729 752 dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", 730 753 RREG32(mmUVD_CONTEXT_ID)); 754 + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", 755 + RREG32(mmUVD_UDEC_ADDR_CONFIG)); 756 + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", 757 + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); 758 + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", 759 + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); 731 760 } 732 761 733 762 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, ··· 804 821 .parse_cs = amdgpu_uvd_ring_parse_cs, 805 822 .emit_ib = uvd_v5_0_ring_emit_ib, 806 823 .emit_fence = uvd_v5_0_ring_emit_fence, 807 - .emit_semaphore = uvd_v5_0_ring_emit_semaphore, 808 824 .test_ring = uvd_v5_0_ring_test_ring, 809 825 .test_ib = uvd_v5_0_ring_test_ib, 810 826 .insert_nop = amdgpu_ring_insert_nop, 827 + .pad_ib = amdgpu_ring_generic_pad_ib, 811 828 }; 812 829 813 830 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
+16 -33
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 157 157 goto done; 158 158 } 159 159 160 - r = amdgpu_ring_lock(ring, 10); 160 + r = amdgpu_ring_alloc(ring, 10); 161 161 if (r) { 162 162 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 163 163 goto done; ··· 182 182 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 183 183 amdgpu_ring_write(ring, 3); 184 184 185 - amdgpu_ring_unlock_commit(ring); 185 + amdgpu_ring_commit(ring); 186 186 187 187 done: 188 188 if (!r) ··· 277 277 size = AMDGPU_UVD_HEAP_SIZE; 278 278 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 279 279 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 280 + 281 + WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 282 + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 283 + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 280 284 } 281 285 282 286 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, ··· 726 722 } 727 723 728 724 /** 729 - * uvd_v6_0_ring_emit_semaphore - emit semaphore command 730 - * 731 - * @ring: amdgpu_ring pointer 732 - * @semaphore: semaphore to emit commands for 733 - * @emit_wait: true if we should emit a wait command 734 - * 735 - * Emit a semaphore command (either wait or signal) to the UVD ring. 736 - */ 737 - static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring, 738 - struct amdgpu_semaphore *semaphore, 739 - bool emit_wait) 740 - { 741 - uint64_t addr = semaphore->gpu_addr; 742 - 743 - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); 744 - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); 745 - 746 - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); 747 - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); 748 - 749 - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); 750 - amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); 751 - 752 - return true; 753 - } 754 - 755 - /** 756 725 * uvd_v6_0_ring_test_ring - register write test 757 726 * 758 727 * @ring: amdgpu_ring pointer ··· 740 763 int r; 741 764 742 765 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 743 - r = amdgpu_ring_lock(ring, 3); 766 + r = amdgpu_ring_alloc(ring, 3); 744 767 if (r) { 745 768 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 746 769 ring->idx, r); ··· 748 771 } 749 772 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 750 773 amdgpu_ring_write(ring, 0xDEADBEEF); 751 - amdgpu_ring_unlock_commit(ring); 774 + amdgpu_ring_commit(ring); 752 775 for (i = 0; i < adev->usec_timeout; i++) { 753 776 tmp = RREG32(mmUVD_CONTEXT_ID); 754 777 if (tmp == 0xDEADBEEF) ··· 804 827 goto error; 805 828 } 806 829 807 - r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); 830 + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); 808 831 if (r) { 809 832 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); 810 833 goto error; ··· 951 974 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); 952 975 dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", 953 976 RREG32(mmUVD_CONTEXT_ID)); 977 + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", 978 + RREG32(mmUVD_UDEC_ADDR_CONFIG)); 979 + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", 980 + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); 981 + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", 982 + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); 954 983 } 955 984 956 985 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, ··· 1045 1062 .parse_cs = amdgpu_uvd_ring_parse_cs, 1046 1063 .emit_ib = uvd_v6_0_ring_emit_ib, 1047 1064 .emit_fence = uvd_v6_0_ring_emit_fence, 1048 - .emit_semaphore = uvd_v6_0_ring_emit_semaphore, 1049 1065 .test_ring = uvd_v6_0_ring_test_ring, 1050 1066 .test_ib = uvd_v6_0_ring_test_ib, 1051 1067 .insert_nop = amdgpu_ring_insert_nop, 1068 + .pad_ib = amdgpu_ring_generic_pad_ib, 1052 1069 }; 1053 1070 1054 1071 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
+1 -1
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
··· 639 639 .parse_cs = amdgpu_vce_ring_parse_cs, 640 640 .emit_ib = amdgpu_vce_ring_emit_ib, 641 641 .emit_fence = amdgpu_vce_ring_emit_fence, 642 - .emit_semaphore = amdgpu_vce_ring_emit_semaphore, 643 642 .test_ring = amdgpu_vce_ring_test_ring, 644 643 .test_ib = amdgpu_vce_ring_test_ib, 645 644 .insert_nop = amdgpu_ring_insert_nop, 645 + .pad_ib = amdgpu_ring_generic_pad_ib, 646 646 }; 647 647 648 648 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
+1 -1
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 759 759 .parse_cs = amdgpu_vce_ring_parse_cs, 760 760 .emit_ib = amdgpu_vce_ring_emit_ib, 761 761 .emit_fence = amdgpu_vce_ring_emit_fence, 762 - .emit_semaphore = amdgpu_vce_ring_emit_semaphore, 763 762 .test_ring = amdgpu_vce_ring_test_ring, 764 763 .test_ib = amdgpu_vce_ring_test_ib, 765 764 .insert_nop = amdgpu_ring_insert_nop, 765 + .pad_ib = amdgpu_ring_generic_pad_ib, 766 766 }; 767 767 768 768 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
+16 -381
drivers/gpu/drm/amd/amdgpu/vi.c
··· 74 74 #include "uvd_v6_0.h" 75 75 #include "vce_v3_0.h" 76 76 #include "amdgpu_powerplay.h" 77 + #if defined(CONFIG_DRM_AMD_ACP) 78 + #include "amdgpu_acp.h" 79 + #endif 77 80 78 81 /* 79 82 * Indirect registers accessor ··· 574 571 return -EINVAL; 575 572 } 576 573 577 - static void vi_print_gpu_status_regs(struct amdgpu_device *adev) 578 - { 579 - dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", 580 - RREG32(mmGRBM_STATUS)); 581 - dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", 582 - RREG32(mmGRBM_STATUS2)); 583 - dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", 584 - RREG32(mmGRBM_STATUS_SE0)); 585 - dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", 586 - RREG32(mmGRBM_STATUS_SE1)); 587 - dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", 588 - RREG32(mmGRBM_STATUS_SE2)); 589 - dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", 590 - RREG32(mmGRBM_STATUS_SE3)); 591 - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", 592 - RREG32(mmSRBM_STATUS)); 593 - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", 594 - RREG32(mmSRBM_STATUS2)); 595 - dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", 596 - RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); 597 - if (adev->sdma.num_instances > 1) { 598 - dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", 599 - RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); 600 - } 601 - dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); 602 - dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", 603 - RREG32(mmCP_STALLED_STAT1)); 604 - dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", 605 - RREG32(mmCP_STALLED_STAT2)); 606 - dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", 607 - RREG32(mmCP_STALLED_STAT3)); 608 - dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", 609 - RREG32(mmCP_CPF_BUSY_STAT)); 610 - dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", 611 - RREG32(mmCP_CPF_STALLED_STAT1)); 612 - dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); 613 - dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); 614 - dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", 615 - RREG32(mmCP_CPC_STALLED_STAT1)); 616 - dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); 617 - } 618 - 619 - /** 620 - * vi_gpu_check_soft_reset - check which blocks are busy 621 - * 622 - * @adev: amdgpu_device pointer 623 - * 624 - * Check which blocks are busy and return the relevant reset 625 - * mask to be used by vi_gpu_soft_reset(). 626 - * Returns a mask of the blocks to be reset. 627 - */ 628 - u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev) 629 - { 630 - u32 reset_mask = 0; 631 - u32 tmp; 632 - 633 - /* GRBM_STATUS */ 634 - tmp = RREG32(mmGRBM_STATUS); 635 - if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 636 - GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 637 - GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 638 - GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 639 - GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 640 - GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) 641 - reset_mask |= AMDGPU_RESET_GFX; 642 - 643 - if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) 644 - reset_mask |= AMDGPU_RESET_CP; 645 - 646 - /* GRBM_STATUS2 */ 647 - tmp = RREG32(mmGRBM_STATUS2); 648 - if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) 649 - reset_mask |= AMDGPU_RESET_RLC; 650 - 651 - if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK | 652 - GRBM_STATUS2__CPC_BUSY_MASK | 653 - GRBM_STATUS2__CPG_BUSY_MASK)) 654 - reset_mask |= AMDGPU_RESET_CP; 655 - 656 - /* SRBM_STATUS2 */ 657 - tmp = RREG32(mmSRBM_STATUS2); 658 - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) 659 - reset_mask |= AMDGPU_RESET_DMA; 660 - 661 - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) 662 - reset_mask |= AMDGPU_RESET_DMA1; 663 - 664 - /* SRBM_STATUS */ 665 - tmp = RREG32(mmSRBM_STATUS); 666 - 667 - if (tmp & SRBM_STATUS__IH_BUSY_MASK) 668 - reset_mask |= AMDGPU_RESET_IH; 669 - 670 - if (tmp & SRBM_STATUS__SEM_BUSY_MASK) 671 - reset_mask |= AMDGPU_RESET_SEM; 672 - 673 - if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) 674 - reset_mask |= AMDGPU_RESET_GRBM; 675 - 676 - if (adev->asic_type != CHIP_TOPAZ) { 677 - if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK | 678 - SRBM_STATUS__UVD_BUSY_MASK)) 679 - reset_mask |= AMDGPU_RESET_UVD; 680 - } 681 - 682 - if (tmp & SRBM_STATUS__VMC_BUSY_MASK) 683 - reset_mask |= AMDGPU_RESET_VMC; 684 - 685 - if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 686 - SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) 687 - reset_mask |= AMDGPU_RESET_MC; 688 - 689 - /* SDMA0_STATUS_REG */ 690 - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET); 691 - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) 692 - reset_mask |= AMDGPU_RESET_DMA; 693 - 694 - /* SDMA1_STATUS_REG */ 695 - if (adev->sdma.num_instances > 1) { 696 - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); 697 - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) 698 - reset_mask |= AMDGPU_RESET_DMA1; 699 - } 700 - #if 0 701 - /* VCE_STATUS */ 702 - if (adev->asic_type != CHIP_TOPAZ) { 703 - tmp = RREG32(mmVCE_STATUS); 704 - if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK) 705 - reset_mask |= AMDGPU_RESET_VCE; 706 - if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK) 707 - reset_mask |= AMDGPU_RESET_VCE1; 708 - 709 - } 710 - 711 - if (adev->asic_type != CHIP_TOPAZ) { 712 - if (amdgpu_display_is_display_hung(adev)) 713 - reset_mask |= AMDGPU_RESET_DISPLAY; 714 - } 715 - #endif 716 - 717 - /* Skip MC reset as it's mostly likely not hung, just busy */ 718 - if (reset_mask & AMDGPU_RESET_MC) { 719 - DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); 720 - reset_mask &= ~AMDGPU_RESET_MC; 721 - } 722 - 723 - return reset_mask; 724 - } 725 - 726 - /** 727 - * vi_gpu_soft_reset - soft reset GPU 728 - * 729 - * @adev: amdgpu_device pointer 730 - * @reset_mask: mask of which blocks to reset 731 - * 732 - * Soft reset the blocks specified in @reset_mask. 733 - */ 734 - static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) 735 - { 736 - struct amdgpu_mode_mc_save save; 737 - u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 738 - u32 tmp; 739 - 740 - if (reset_mask == 0) 741 - return; 742 - 743 - dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask); 744 - 745 - vi_print_gpu_status_regs(adev); 746 - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 747 - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); 748 - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 749 - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); 750 - 751 - /* disable CG/PG */ 752 - 753 - /* stop the rlc */ 754 - //XXX 755 - //gfx_v8_0_rlc_stop(adev); 756 - 757 - /* Disable GFX parsing/prefetching */ 758 - tmp = RREG32(mmCP_ME_CNTL); 759 - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); 760 - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); 761 - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); 762 - WREG32(mmCP_ME_CNTL, tmp); 763 - 764 - /* Disable MEC parsing/prefetching */ 765 - tmp = RREG32(mmCP_MEC_CNTL); 766 - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); 767 - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); 768 - WREG32(mmCP_MEC_CNTL, tmp); 769 - 770 - if (reset_mask & AMDGPU_RESET_DMA) { 771 - /* sdma0 */ 772 - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); 773 - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); 774 - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); 775 - } 776 - if (reset_mask & AMDGPU_RESET_DMA1) { 777 - /* sdma1 */ 778 - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); 779 - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); 780 - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); 781 - } 782 - 783 - gmc_v8_0_mc_stop(adev, &save); 784 - if (amdgpu_asic_wait_for_mc_idle(adev)) { 785 - dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 786 - } 787 - 788 - if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) { 789 - grbm_soft_reset = 790 - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 791 - grbm_soft_reset = 792 - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); 793 - } 794 - 795 - if (reset_mask & AMDGPU_RESET_CP) { 796 - grbm_soft_reset = 797 - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 798 - srbm_soft_reset = 799 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); 800 - } 801 - 802 - if (reset_mask & AMDGPU_RESET_DMA) 803 - srbm_soft_reset = 804 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1); 805 - 806 - if (reset_mask & AMDGPU_RESET_DMA1) 807 - srbm_soft_reset = 808 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1); 809 - 810 - if (reset_mask & AMDGPU_RESET_DISPLAY) 811 - srbm_soft_reset = 812 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1); 813 - 814 - if (reset_mask & AMDGPU_RESET_RLC) 815 - grbm_soft_reset = 816 - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 817 - 818 - if (reset_mask & AMDGPU_RESET_SEM) 819 - srbm_soft_reset = 820 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); 821 - 822 - if (reset_mask & AMDGPU_RESET_IH) 823 - srbm_soft_reset = 824 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1); 825 - 826 - if (reset_mask & AMDGPU_RESET_GRBM) 827 - srbm_soft_reset = 828 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); 829 - 830 - if (reset_mask & AMDGPU_RESET_VMC) 831 - srbm_soft_reset = 832 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); 833 - 834 - if (reset_mask & AMDGPU_RESET_UVD) 835 - srbm_soft_reset = 836 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); 837 - 838 - if (reset_mask & AMDGPU_RESET_VCE) 839 - srbm_soft_reset = 840 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 841 - 842 - if (reset_mask & AMDGPU_RESET_VCE) 843 - srbm_soft_reset = 844 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 845 - 846 - if (!(adev->flags & AMD_IS_APU)) { 847 - if (reset_mask & AMDGPU_RESET_MC) 848 - srbm_soft_reset = 849 - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); 850 - } 851 - 852 - if (grbm_soft_reset) { 853 - tmp = RREG32(mmGRBM_SOFT_RESET); 854 - tmp |= grbm_soft_reset; 855 - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 856 - WREG32(mmGRBM_SOFT_RESET, tmp); 857 - tmp = RREG32(mmGRBM_SOFT_RESET); 858 - 859 - udelay(50); 860 - 861 - tmp &= ~grbm_soft_reset; 862 - WREG32(mmGRBM_SOFT_RESET, tmp); 863 - tmp = RREG32(mmGRBM_SOFT_RESET); 864 - } 865 - 866 - if (srbm_soft_reset) { 867 - tmp = RREG32(mmSRBM_SOFT_RESET); 868 - tmp |= srbm_soft_reset; 869 - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 870 - WREG32(mmSRBM_SOFT_RESET, tmp); 871 - tmp = RREG32(mmSRBM_SOFT_RESET); 872 - 873 - udelay(50); 874 - 875 - tmp &= ~srbm_soft_reset; 876 - WREG32(mmSRBM_SOFT_RESET, tmp); 877 - tmp = RREG32(mmSRBM_SOFT_RESET); 878 - } 879 - 880 - /* Wait a little for things to settle down */ 881 - udelay(50); 882 - 883 - gmc_v8_0_mc_resume(adev, &save); 884 - udelay(50); 885 - 886 - vi_print_gpu_status_regs(adev); 887 - } 888 - 889 574 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) 890 575 { 891 - struct amdgpu_mode_mc_save save; 892 - u32 tmp, i; 576 + u32 i; 893 577 894 578 dev_info(adev->dev, "GPU pci config reset\n"); 895 - 896 - /* disable dpm? */ 897 - 898 - /* disable cg/pg */ 899 - 900 - /* Disable GFX parsing/prefetching */ 901 - tmp = RREG32(mmCP_ME_CNTL); 902 - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); 903 - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); 904 - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); 905 - WREG32(mmCP_ME_CNTL, tmp); 906 - 907 - /* Disable MEC parsing/prefetching */ 908 - tmp = RREG32(mmCP_MEC_CNTL); 909 - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); 910 - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); 911 - WREG32(mmCP_MEC_CNTL, tmp); 912 - 913 - /* Disable GFX parsing/prefetching */ 914 - WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | 915 - CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); 916 - 917 - /* Disable MEC parsing/prefetching */ 918 - WREG32(mmCP_MEC_CNTL, 919 - CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); 920 - 921 - /* sdma0 */ 922 - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); 923 - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); 924 - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); 925 - 926 - /* sdma1 */ 927 - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); 928 - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); 929 - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); 930 - 931 - /* XXX other engines? */ 932 - 933 - /* halt the rlc, disable cp internal ints */ 934 - //XXX 935 - //gfx_v8_0_rlc_stop(adev); 936 - 937 - udelay(50); 938 - 939 - /* disable mem access */ 940 - gmc_v8_0_mc_stop(adev, &save); 941 - if (amdgpu_asic_wait_for_mc_idle(adev)) { 942 - dev_warn(adev->dev, "Wait for MC idle timed out !\n"); 943 - } 944 579 945 580 /* disable BM */ 946 581 pci_clear_master(adev->pdev); ··· 619 978 */ 620 979 static int vi_asic_reset(struct amdgpu_device *adev) 621 980 { 622 - u32 reset_mask; 981 + vi_set_bios_scratch_engine_hung(adev, true); 623 982 624 - reset_mask = vi_gpu_check_soft_reset(adev); 983 + vi_gpu_pci_config_reset(adev); 625 984 626 - if (reset_mask) 627 - vi_set_bios_scratch_engine_hung(adev, true); 628 - 629 - /* try soft reset */ 630 - vi_gpu_soft_reset(adev, reset_mask); 631 - 632 - reset_mask = vi_gpu_check_soft_reset(adev); 633 - 634 - /* try pci config reset */ 635 - if (reset_mask && amdgpu_hard_reset) 636 - vi_gpu_pci_config_reset(adev); 637 - 638 - reset_mask = vi_gpu_check_soft_reset(adev); 639 - 640 - if (!reset_mask) 641 - vi_set_bios_scratch_engine_hung(adev, false); 985 + vi_set_bios_scratch_engine_hung(adev, false); 642 986 643 987 return 0; 644 988 } ··· 973 1347 .rev = 0, 974 1348 .funcs = &vce_v3_0_ip_funcs, 975 1349 }, 1350 + #if defined(CONFIG_DRM_AMD_ACP) 1351 + { 1352 + .type = AMD_IP_BLOCK_TYPE_ACP, 1353 + .major = 2, 1354 + .minor = 2, 1355 + .rev = 0, 1356 + .funcs = &acp_ip_funcs, 1357 + }, 1358 + #endif 976 1359 }; 977 1360 978 1361 int vi_set_ip_blocks(struct amdgpu_device *adev)
+1
drivers/gpu/drm/amd/include/amd_shared.h
··· 73 73 AMD_IP_BLOCK_TYPE_SDMA, 74 74 AMD_IP_BLOCK_TYPE_UVD, 75 75 AMD_IP_BLOCK_TYPE_VCE, 76 + AMD_IP_BLOCK_TYPE_ACP, 76 77 }; 77 78 78 79 enum amd_clockgating_state {
+1
drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h
··· 1379 1379 #define mmDC_GPIO_PAD_STRENGTH_1 0x1978 1380 1380 #define mmDC_GPIO_PAD_STRENGTH_2 0x1979 1381 1381 #define mmPHY_AUX_CNTL 0x197f 1382 + #define mmDC_GPIO_I2CPAD_MASK 0x1974 1382 1383 #define mmDC_GPIO_I2CPAD_A 0x1975 1383 1384 #define mmDC_GPIO_I2CPAD_EN 0x1976 1384 1385 #define mmDC_GPIO_I2CPAD_Y 0x1977
+1117
drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h
··· 1 + /* 2 + * DCE_8_0 Register documentation 3 + * 4 + * Copyright (C) 2016 Advanced Micro Devices, Inc. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the "Software"), 8 + * to deal in the Software without restriction, including without limitation 9 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 + * and/or sell copies of the Software, and to permit persons to whom the 11 + * Software is furnished to do so, subject to the following conditions: 12 + * 13 + * The above copyright notice and this permission notice shall be included 14 + * in all copies or substantial portions of the Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 17 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 20 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 + */ 23 + 24 + #ifndef DCE_8_0_ENUM_H 25 + #define DCE_8_0_ENUM_H 26 + 27 + typedef enum SurfaceEndian { 28 + ENDIAN_NONE = 0x0, 29 + ENDIAN_8IN16 = 0x1, 30 + ENDIAN_8IN32 = 0x2, 31 + ENDIAN_8IN64 = 0x3, 32 + } SurfaceEndian; 33 + typedef enum ArrayMode { 34 + ARRAY_LINEAR_GENERAL = 0x0, 35 + ARRAY_LINEAR_ALIGNED = 0x1, 36 + ARRAY_1D_TILED_THIN1 = 0x2, 37 + ARRAY_1D_TILED_THICK = 0x3, 38 + ARRAY_2D_TILED_THIN1 = 0x4, 39 + ARRAY_PRT_TILED_THIN1 = 0x5, 40 + ARRAY_PRT_2D_TILED_THIN1 = 0x6, 41 + ARRAY_2D_TILED_THICK = 0x7, 42 + ARRAY_2D_TILED_XTHICK = 0x8, 43 + ARRAY_PRT_TILED_THICK = 0x9, 44 + ARRAY_PRT_2D_TILED_THICK = 0xa, 45 + ARRAY_PRT_3D_TILED_THIN1 = 0xb, 46 + ARRAY_3D_TILED_THIN1 = 0xc, 47 + ARRAY_3D_TILED_THICK = 0xd, 48 + ARRAY_3D_TILED_XTHICK = 0xe, 49 + ARRAY_PRT_3D_TILED_THICK = 0xf, 50 + } ArrayMode; 51 + typedef enum PipeTiling { 52 + CONFIG_1_PIPE = 0x0, 53 + CONFIG_2_PIPE = 0x1, 54 + CONFIG_4_PIPE = 0x2, 55 + CONFIG_8_PIPE = 0x3, 56 + } PipeTiling; 57 + typedef enum BankTiling { 58 + CONFIG_4_BANK = 0x0, 59 + CONFIG_8_BANK = 0x1, 60 + } BankTiling; 61 + typedef enum GroupInterleave { 62 + CONFIG_256B_GROUP = 0x0, 63 + CONFIG_512B_GROUP = 0x1, 64 + } GroupInterleave; 65 + typedef enum RowTiling { 66 + CONFIG_1KB_ROW = 0x0, 67 + CONFIG_2KB_ROW = 0x1, 68 + CONFIG_4KB_ROW = 0x2, 69 + CONFIG_8KB_ROW = 0x3, 70 + CONFIG_1KB_ROW_OPT = 0x4, 71 + CONFIG_2KB_ROW_OPT = 0x5, 72 + CONFIG_4KB_ROW_OPT = 0x6, 73 + CONFIG_8KB_ROW_OPT = 0x7, 74 + } RowTiling; 75 + typedef enum BankSwapBytes { 76 + CONFIG_128B_SWAPS = 0x0, 77 + CONFIG_256B_SWAPS = 0x1, 78 + CONFIG_512B_SWAPS = 0x2, 79 + CONFIG_1KB_SWAPS = 0x3, 80 + } BankSwapBytes; 81 + typedef enum SampleSplitBytes { 82 + CONFIG_1KB_SPLIT = 0x0, 83 + CONFIG_2KB_SPLIT = 0x1, 84 + CONFIG_4KB_SPLIT = 0x2, 85 + CONFIG_8KB_SPLIT = 0x3, 86 + } SampleSplitBytes; 87 + typedef enum NumPipes { 88 + ADDR_CONFIG_1_PIPE = 0x0, 89 + ADDR_CONFIG_2_PIPE = 0x1, 90 + ADDR_CONFIG_4_PIPE = 0x2, 91 + ADDR_CONFIG_8_PIPE = 0x3, 92 + } NumPipes; 93 + typedef enum PipeInterleaveSize { 94 + ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0, 95 + ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1, 96 + } PipeInterleaveSize; 97 + typedef enum BankInterleaveSize { 98 + ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0, 99 + ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1, 100 + ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2, 101 + ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3, 102 + } BankInterleaveSize; 103 + typedef enum NumShaderEngines { 104 + ADDR_CONFIG_1_SHADER_ENGINE = 0x0, 105 + ADDR_CONFIG_2_SHADER_ENGINE = 0x1, 106 + } NumShaderEngines; 107 + typedef enum ShaderEngineTileSize { 108 + ADDR_CONFIG_SE_TILE_16 = 0x0, 109 + ADDR_CONFIG_SE_TILE_32 = 0x1, 110 + } ShaderEngineTileSize; 111 + typedef enum NumGPUs { 112 + ADDR_CONFIG_1_GPU = 0x0, 113 + ADDR_CONFIG_2_GPU = 0x1, 114 + ADDR_CONFIG_4_GPU = 0x2, 115 + } NumGPUs; 116 + typedef enum MultiGPUTileSize { 117 + ADDR_CONFIG_GPU_TILE_16 = 0x0, 118 + ADDR_CONFIG_GPU_TILE_32 = 0x1, 119 + ADDR_CONFIG_GPU_TILE_64 = 0x2, 120 + ADDR_CONFIG_GPU_TILE_128 = 0x3, 121 + } MultiGPUTileSize; 122 + typedef enum RowSize { 123 + ADDR_CONFIG_1KB_ROW = 0x0, 124 + ADDR_CONFIG_2KB_ROW = 0x1, 125 + ADDR_CONFIG_4KB_ROW = 0x2, 126 + } RowSize; 127 + typedef enum NumLowerPipes { 128 + ADDR_CONFIG_1_LOWER_PIPES = 0x0, 129 + ADDR_CONFIG_2_LOWER_PIPES = 0x1, 130 + } NumLowerPipes; 131 + typedef enum DebugBlockId { 132 + DBG_CLIENT_BLKID_RESERVED = 0x0, 133 + DBG_CLIENT_BLKID_dbg = 0x1, 134 + DBG_CLIENT_BLKID_uvdu_0 = 0x2, 135 + DBG_CLIENT_BLKID_uvdu_1 = 0x3, 136 + DBG_CLIENT_BLKID_uvdu_2 = 0x4, 137 + DBG_CLIENT_BLKID_uvdu_3 = 0x5, 138 + DBG_CLIENT_BLKID_uvdu_4 = 0x6, 139 + DBG_CLIENT_BLKID_uvdu_5 = 0x7, 140 + DBG_CLIENT_BLKID_uvdu_6 = 0x8, 141 + DBG_CLIENT_BLKID_uvdm_0 = 0x9, 142 + DBG_CLIENT_BLKID_uvdm_1 = 0xa, 143 + DBG_CLIENT_BLKID_uvdm_2 = 0xb, 144 + DBG_CLIENT_BLKID_uvdm_3 = 0xc, 145 + DBG_CLIENT_BLKID_vcea_0 = 0xd, 146 + DBG_CLIENT_BLKID_vcea_1 = 0xe, 147 + DBG_CLIENT_BLKID_vcea_2 = 0xf, 148 + DBG_CLIENT_BLKID_vcea_3 = 0x10, 149 + DBG_CLIENT_BLKID_vcea_4 = 0x11, 150 + DBG_CLIENT_BLKID_vcea_5 = 0x12, 151 + DBG_CLIENT_BLKID_vcea_6 = 0x13, 152 + DBG_CLIENT_BLKID_vceb_0 = 0x14, 153 + DBG_CLIENT_BLKID_vceb_1 = 0x15, 154 + DBG_CLIENT_BLKID_vceb_2 = 0x16, 155 + DBG_CLIENT_BLKID_dco = 0x17, 156 + DBG_CLIENT_BLKID_xdma = 0x18, 157 + DBG_CLIENT_BLKID_smu_0 = 0x19, 158 + DBG_CLIENT_BLKID_smu_1 = 0x1a, 159 + DBG_CLIENT_BLKID_smu_2 = 0x1b, 160 + DBG_CLIENT_BLKID_gck = 0x1c, 161 + DBG_CLIENT_BLKID_tmonw0 = 0x1d, 162 + DBG_CLIENT_BLKID_tmonw1 = 0x1e, 163 + DBG_CLIENT_BLKID_grbm = 0x1f, 164 + DBG_CLIENT_BLKID_rlc = 0x20, 165 + DBG_CLIENT_BLKID_ds0 = 0x21, 166 + DBG_CLIENT_BLKID_cpg_0 = 0x22, 167 + DBG_CLIENT_BLKID_cpg_1 = 0x23, 168 + DBG_CLIENT_BLKID_cpc_0 = 0x24, 169 + DBG_CLIENT_BLKID_cpc_1 = 0x25, 170 + DBG_CLIENT_BLKID_cpf = 0x26, 171 + DBG_CLIENT_BLKID_scf0 = 0x27, 172 + DBG_CLIENT_BLKID_scf1 = 0x28, 173 + DBG_CLIENT_BLKID_scf2 = 0x29, 174 + DBG_CLIENT_BLKID_scf3 = 0x2a, 175 + DBG_CLIENT_BLKID_pc0 = 0x2b, 176 + DBG_CLIENT_BLKID_pc1 = 0x2c, 177 + DBG_CLIENT_BLKID_pc2 = 0x2d, 178 + DBG_CLIENT_BLKID_pc3 = 0x2e, 179 + DBG_CLIENT_BLKID_vgt0 = 0x2f, 180 + DBG_CLIENT_BLKID_vgt1 = 0x30, 181 + DBG_CLIENT_BLKID_vgt2 = 0x31, 182 + DBG_CLIENT_BLKID_vgt3 = 0x32, 183 + DBG_CLIENT_BLKID_sx00 = 0x33, 184 + DBG_CLIENT_BLKID_sx10 = 0x34, 185 + DBG_CLIENT_BLKID_sx20 = 0x35, 186 + DBG_CLIENT_BLKID_sx30 = 0x36, 187 + DBG_CLIENT_BLKID_cb001 = 0x37, 188 + DBG_CLIENT_BLKID_cb200 = 0x38, 189 + DBG_CLIENT_BLKID_cb201 = 0x39, 190 + DBG_CLIENT_BLKID_cbr0 = 0x3a, 191 + DBG_CLIENT_BLKID_cb000 = 0x3b, 192 + DBG_CLIENT_BLKID_cb101 = 0x3c, 193 + DBG_CLIENT_BLKID_cb300 = 0x3d, 194 + DBG_CLIENT_BLKID_cb301 = 0x3e, 195 + DBG_CLIENT_BLKID_cbr1 = 0x3f, 196 + DBG_CLIENT_BLKID_cb100 = 0x40, 197 + DBG_CLIENT_BLKID_ia0 = 0x41, 198 + DBG_CLIENT_BLKID_ia1 = 0x42, 199 + DBG_CLIENT_BLKID_bci0 = 0x43, 200 + DBG_CLIENT_BLKID_bci1 = 0x44, 201 + DBG_CLIENT_BLKID_bci2 = 0x45, 202 + DBG_CLIENT_BLKID_bci3 = 0x46, 203 + DBG_CLIENT_BLKID_pa0 = 0x47, 204 + DBG_CLIENT_BLKID_pa1 = 0x48, 205 + DBG_CLIENT_BLKID_spim0 = 0x49, 206 + DBG_CLIENT_BLKID_spim1 = 0x4a, 207 + DBG_CLIENT_BLKID_spim2 = 0x4b, 208 + DBG_CLIENT_BLKID_spim3 = 0x4c, 209 + DBG_CLIENT_BLKID_sdma = 0x4d, 210 + DBG_CLIENT_BLKID_ih = 0x4e, 211 + DBG_CLIENT_BLKID_sem = 0x4f, 212 + DBG_CLIENT_BLKID_srbm = 0x50, 213 + DBG_CLIENT_BLKID_hdp = 0x51, 214 + DBG_CLIENT_BLKID_acp_0 = 0x52, 215 + DBG_CLIENT_BLKID_acp_1 = 0x53, 216 + DBG_CLIENT_BLKID_sam = 0x54, 217 + DBG_CLIENT_BLKID_mcc0 = 0x55, 218 + DBG_CLIENT_BLKID_mcc1 = 0x56, 219 + DBG_CLIENT_BLKID_mcc2 = 0x57, 220 + DBG_CLIENT_BLKID_mcc3 = 0x58, 221 + DBG_CLIENT_BLKID_mcd0 = 0x59, 222 + DBG_CLIENT_BLKID_mcd1 = 0x5a, 223 + DBG_CLIENT_BLKID_mcd2 = 0x5b, 224 + DBG_CLIENT_BLKID_mcd3 = 0x5c, 225 + DBG_CLIENT_BLKID_mcb = 0x5d, 226 + DBG_CLIENT_BLKID_vmc = 0x5e, 227 + DBG_CLIENT_BLKID_gmcon = 0x5f, 228 + DBG_CLIENT_BLKID_gdc_0 = 0x60, 229 + DBG_CLIENT_BLKID_gdc_1 = 0x61, 230 + DBG_CLIENT_BLKID_gdc_2 = 0x62, 231 + DBG_CLIENT_BLKID_gdc_3 = 0x63, 232 + DBG_CLIENT_BLKID_gdc_4 = 0x64, 233 + DBG_CLIENT_BLKID_gdc_5 = 0x65, 234 + DBG_CLIENT_BLKID_gdc_6 = 0x66, 235 + DBG_CLIENT_BLKID_gdc_7 = 0x67, 236 + DBG_CLIENT_BLKID_gdc_8 = 0x68, 237 + DBG_CLIENT_BLKID_gdc_9 = 0x69, 238 + DBG_CLIENT_BLKID_gdc_10 = 0x6a, 239 + DBG_CLIENT_BLKID_gdc_11 = 0x6b, 240 + DBG_CLIENT_BLKID_gdc_12 = 0x6c, 241 + DBG_CLIENT_BLKID_gdc_13 = 0x6d, 242 + DBG_CLIENT_BLKID_gdc_14 = 0x6e, 243 + DBG_CLIENT_BLKID_gdc_15 = 0x6f, 244 + DBG_CLIENT_BLKID_gdc_16 = 0x70, 245 + DBG_CLIENT_BLKID_gdc_17 = 0x71, 246 + DBG_CLIENT_BLKID_gdc_18 = 0x72, 247 + DBG_CLIENT_BLKID_gdc_19 = 0x73, 248 + DBG_CLIENT_BLKID_gdc_20 = 0x74, 249 + DBG_CLIENT_BLKID_gdc_21 = 0x75, 250 + DBG_CLIENT_BLKID_gdc_22 = 0x76, 251 + DBG_CLIENT_BLKID_wd = 0x77, 252 + DBG_CLIENT_BLKID_sdma_0 = 0x78, 253 + DBG_CLIENT_BLKID_sdma_1 = 0x79, 254 + } DebugBlockId; 255 + typedef enum DebugBlockId_OLD { 256 + DBG_BLOCK_ID_RESERVED = 0x0, 257 + DBG_BLOCK_ID_DBG = 0x1, 258 + DBG_BLOCK_ID_VMC = 0x2, 259 + DBG_BLOCK_ID_PDMA = 0x3, 260 + DBG_BLOCK_ID_CG = 0x4, 261 + DBG_BLOCK_ID_SRBM = 0x5, 262 + DBG_BLOCK_ID_GRBM = 0x6, 263 + DBG_BLOCK_ID_RLC = 0x7, 264 + DBG_BLOCK_ID_CSC = 0x8, 265 + DBG_BLOCK_ID_SEM = 0x9, 266 + DBG_BLOCK_ID_IH = 0xa, 267 + DBG_BLOCK_ID_SC = 0xb, 268 + DBG_BLOCK_ID_SQ = 0xc, 269 + DBG_BLOCK_ID_AVP = 0xd, 270 + DBG_BLOCK_ID_GMCON = 0xe, 271 + DBG_BLOCK_ID_SMU = 0xf, 272 + DBG_BLOCK_ID_DMA0 = 0x10, 273 + DBG_BLOCK_ID_DMA1 = 0x11, 274 + DBG_BLOCK_ID_SPIM = 0x12, 275 + DBG_BLOCK_ID_GDS = 0x13, 276 + DBG_BLOCK_ID_SPIS = 0x14, 277 + DBG_BLOCK_ID_UNUSED0 = 0x15, 278 + DBG_BLOCK_ID_PA0 = 0x16, 279 + DBG_BLOCK_ID_PA1 = 0x17, 280 + DBG_BLOCK_ID_CP0 = 0x18, 281 + DBG_BLOCK_ID_CP1 = 0x19, 282 + DBG_BLOCK_ID_CP2 = 0x1a, 283 + DBG_BLOCK_ID_UNUSED1 = 0x1b, 284 + DBG_BLOCK_ID_UVDU = 0x1c, 285 + DBG_BLOCK_ID_UVDM = 0x1d, 286 + DBG_BLOCK_ID_VCE = 0x1e, 287 + DBG_BLOCK_ID_UNUSED2 = 0x1f, 288 + DBG_BLOCK_ID_VGT0 = 0x20, 289 + DBG_BLOCK_ID_VGT1 = 0x21, 290 + DBG_BLOCK_ID_IA = 0x22, 291 + DBG_BLOCK_ID_UNUSED3 = 0x23, 292 + DBG_BLOCK_ID_SCT0 = 0x24, 293 + DBG_BLOCK_ID_SCT1 = 0x25, 294 + DBG_BLOCK_ID_SPM0 = 0x26, 295 + DBG_BLOCK_ID_SPM1 = 0x27, 296 + DBG_BLOCK_ID_TCAA = 0x28, 297 + DBG_BLOCK_ID_TCAB = 0x29, 298 + DBG_BLOCK_ID_TCCA = 0x2a, 299 + DBG_BLOCK_ID_TCCB = 0x2b, 300 + DBG_BLOCK_ID_MCC0 = 0x2c, 301 + DBG_BLOCK_ID_MCC1 = 0x2d, 302 + DBG_BLOCK_ID_MCC2 = 0x2e, 303 + DBG_BLOCK_ID_MCC3 = 0x2f, 304 + DBG_BLOCK_ID_SX0 = 0x30, 305 + DBG_BLOCK_ID_SX1 = 0x31, 306 + DBG_BLOCK_ID_SX2 = 0x32, 307 + DBG_BLOCK_ID_SX3 = 0x33, 308 + DBG_BLOCK_ID_UNUSED4 = 0x34, 309 + DBG_BLOCK_ID_UNUSED5 = 0x35, 310 + DBG_BLOCK_ID_UNUSED6 = 0x36, 311 + DBG_BLOCK_ID_UNUSED7 = 0x37, 312 + DBG_BLOCK_ID_PC0 = 0x38, 313 + DBG_BLOCK_ID_PC1 = 0x39, 314 + DBG_BLOCK_ID_UNUSED8 = 0x3a, 315 + DBG_BLOCK_ID_UNUSED9 = 0x3b, 316 + DBG_BLOCK_ID_UNUSED10 = 0x3c, 317 + DBG_BLOCK_ID_UNUSED11 = 0x3d, 318 + DBG_BLOCK_ID_MCB = 0x3e, 319 + DBG_BLOCK_ID_UNUSED12 = 0x3f, 320 + DBG_BLOCK_ID_SCB0 = 0x40, 321 + DBG_BLOCK_ID_SCB1 = 0x41, 322 + DBG_BLOCK_ID_UNUSED13 = 0x42, 323 + DBG_BLOCK_ID_UNUSED14 = 0x43, 324 + DBG_BLOCK_ID_SCF0 = 0x44, 325 + DBG_BLOCK_ID_SCF1 = 0x45, 326 + DBG_BLOCK_ID_UNUSED15 = 0x46, 327 + DBG_BLOCK_ID_UNUSED16 = 0x47, 328 + DBG_BLOCK_ID_BCI0 = 0x48, 329 + DBG_BLOCK_ID_BCI1 = 0x49, 330 + DBG_BLOCK_ID_BCI2 = 0x4a, 331 + DBG_BLOCK_ID_BCI3 = 0x4b, 332 + DBG_BLOCK_ID_UNUSED17 = 0x4c, 333 + DBG_BLOCK_ID_UNUSED18 = 0x4d, 334 + DBG_BLOCK_ID_UNUSED19 = 0x4e, 335 + DBG_BLOCK_ID_UNUSED20 = 0x4f, 336 + DBG_BLOCK_ID_CB00 = 0x50, 337 + DBG_BLOCK_ID_CB01 = 0x51, 338 + DBG_BLOCK_ID_CB02 = 0x52, 339 + DBG_BLOCK_ID_CB03 = 0x53, 340 + DBG_BLOCK_ID_CB04 = 0x54, 341 + DBG_BLOCK_ID_UNUSED21 = 0x55, 342 + DBG_BLOCK_ID_UNUSED22 = 0x56, 343 + DBG_BLOCK_ID_UNUSED23 = 0x57, 344 + DBG_BLOCK_ID_CB10 = 0x58, 345 + DBG_BLOCK_ID_CB11 = 0x59, 346 + DBG_BLOCK_ID_CB12 = 0x5a, 347 + DBG_BLOCK_ID_CB13 = 0x5b, 348 + DBG_BLOCK_ID_CB14 = 0x5c, 349 + DBG_BLOCK_ID_UNUSED24 = 0x5d, 350 + DBG_BLOCK_ID_UNUSED25 = 0x5e, 351 + DBG_BLOCK_ID_UNUSED26 = 0x5f, 352 + DBG_BLOCK_ID_TCP0 = 0x60, 353 + DBG_BLOCK_ID_TCP1 = 0x61, 354 + DBG_BLOCK_ID_TCP2 = 0x62, 355 + DBG_BLOCK_ID_TCP3 = 0x63, 356 + DBG_BLOCK_ID_TCP4 = 0x64, 357 + DBG_BLOCK_ID_TCP5 = 0x65, 358 + DBG_BLOCK_ID_TCP6 = 0x66, 359 + DBG_BLOCK_ID_TCP7 = 0x67, 360 + DBG_BLOCK_ID_TCP8 = 0x68, 361 + DBG_BLOCK_ID_TCP9 = 0x69, 362 + DBG_BLOCK_ID_TCP10 = 0x6a, 363 + DBG_BLOCK_ID_TCP11 = 0x6b, 364 + DBG_BLOCK_ID_TCP12 = 0x6c, 365 + DBG_BLOCK_ID_TCP13 = 0x6d, 366 + DBG_BLOCK_ID_TCP14 = 0x6e, 367 + DBG_BLOCK_ID_TCP15 = 0x6f, 368 + DBG_BLOCK_ID_TCP16 = 0x70, 369 + DBG_BLOCK_ID_TCP17 = 0x71, 370 + DBG_BLOCK_ID_TCP18 = 0x72, 371 + DBG_BLOCK_ID_TCP19 = 0x73, 372 + DBG_BLOCK_ID_TCP20 = 0x74, 373 + DBG_BLOCK_ID_TCP21 = 0x75, 374 + DBG_BLOCK_ID_TCP22 = 0x76, 375 + DBG_BLOCK_ID_TCP23 = 0x77, 376 + DBG_BLOCK_ID_TCP_RESERVED0 = 0x78, 377 + DBG_BLOCK_ID_TCP_RESERVED1 = 0x79, 378 + DBG_BLOCK_ID_TCP_RESERVED2 = 0x7a, 379 + DBG_BLOCK_ID_TCP_RESERVED3 = 0x7b, 380 + DBG_BLOCK_ID_TCP_RESERVED4 = 0x7c, 381 + DBG_BLOCK_ID_TCP_RESERVED5 = 0x7d, 382 + DBG_BLOCK_ID_TCP_RESERVED6 = 0x7e, 383 + DBG_BLOCK_ID_TCP_RESERVED7 = 0x7f, 384 + DBG_BLOCK_ID_DB00 = 0x80, 385 + DBG_BLOCK_ID_DB01 = 0x81, 386 + DBG_BLOCK_ID_DB02 = 0x82, 387 + DBG_BLOCK_ID_DB03 = 0x83, 388 + DBG_BLOCK_ID_DB04 = 0x84, 389 + DBG_BLOCK_ID_UNUSED27 = 0x85, 390 + DBG_BLOCK_ID_UNUSED28 = 0x86, 391 + DBG_BLOCK_ID_UNUSED29 = 0x87, 392 + DBG_BLOCK_ID_DB10 = 0x88, 393 + DBG_BLOCK_ID_DB11 = 0x89, 394 + DBG_BLOCK_ID_DB12 = 0x8a, 395 + DBG_BLOCK_ID_DB13 = 0x8b, 396 + DBG_BLOCK_ID_DB14 = 0x8c, 397 + DBG_BLOCK_ID_UNUSED30 = 0x8d, 398 + DBG_BLOCK_ID_UNUSED31 = 0x8e, 399 + DBG_BLOCK_ID_UNUSED32 = 0x8f, 400 + DBG_BLOCK_ID_TCC0 = 0x90, 401 + DBG_BLOCK_ID_TCC1 = 0x91, 402 + DBG_BLOCK_ID_TCC2 = 0x92, 403 + DBG_BLOCK_ID_TCC3 = 0x93, 404 + DBG_BLOCK_ID_TCC4 = 0x94, 405 + DBG_BLOCK_ID_TCC5 = 0x95, 406 + DBG_BLOCK_ID_TCC6 = 0x96, 407 + DBG_BLOCK_ID_TCC7 = 0x97, 408 + DBG_BLOCK_ID_SPS00 = 0x98, 409 + DBG_BLOCK_ID_SPS01 = 0x99, 410 + DBG_BLOCK_ID_SPS02 = 0x9a, 411 + DBG_BLOCK_ID_SPS10 = 0x9b, 412 + DBG_BLOCK_ID_SPS11 = 0x9c, 413 + DBG_BLOCK_ID_SPS12 = 0x9d, 414 + DBG_BLOCK_ID_UNUSED33 = 0x9e, 415 + DBG_BLOCK_ID_UNUSED34 = 0x9f, 416 + DBG_BLOCK_ID_TA00 = 0xa0, 417 + DBG_BLOCK_ID_TA01 = 0xa1, 418 + DBG_BLOCK_ID_TA02 = 0xa2, 419 + DBG_BLOCK_ID_TA03 = 0xa3, 420 + DBG_BLOCK_ID_TA04 = 0xa4, 421 + DBG_BLOCK_ID_TA05 = 0xa5, 422 + DBG_BLOCK_ID_TA06 = 0xa6, 423 + DBG_BLOCK_ID_TA07 = 0xa7, 424 + DBG_BLOCK_ID_TA08 = 0xa8, 425 + DBG_BLOCK_ID_TA09 = 0xa9, 426 + DBG_BLOCK_ID_TA0A = 0xaa, 427 + DBG_BLOCK_ID_TA0B = 0xab, 428 + DBG_BLOCK_ID_UNUSED35 = 0xac, 429 + DBG_BLOCK_ID_UNUSED36 = 0xad, 430 + DBG_BLOCK_ID_UNUSED37 = 0xae, 431 + DBG_BLOCK_ID_UNUSED38 = 0xaf, 432 + DBG_BLOCK_ID_TA10 = 0xb0, 433 + DBG_BLOCK_ID_TA11 = 0xb1, 434 + DBG_BLOCK_ID_TA12 = 0xb2, 435 + DBG_BLOCK_ID_TA13 = 0xb3, 436 + DBG_BLOCK_ID_TA14 = 0xb4, 437 + DBG_BLOCK_ID_TA15 = 0xb5, 438 + DBG_BLOCK_ID_TA16 = 0xb6, 439 + DBG_BLOCK_ID_TA17 = 0xb7, 440 + DBG_BLOCK_ID_TA18 = 0xb8, 441 + DBG_BLOCK_ID_TA19 = 0xb9, 442 + DBG_BLOCK_ID_TA1A = 0xba, 443 + DBG_BLOCK_ID_TA1B = 0xbb, 444 + DBG_BLOCK_ID_UNUSED39 = 0xbc, 445 + DBG_BLOCK_ID_UNUSED40 = 0xbd, 446 + DBG_BLOCK_ID_UNUSED41 = 0xbe, 447 + DBG_BLOCK_ID_UNUSED42 = 0xbf, 448 + DBG_BLOCK_ID_TD00 = 0xc0, 449 + DBG_BLOCK_ID_TD01 = 0xc1, 450 + DBG_BLOCK_ID_TD02 = 0xc2, 451 + DBG_BLOCK_ID_TD03 = 0xc3, 452 + DBG_BLOCK_ID_TD04 = 0xc4, 453 + DBG_BLOCK_ID_TD05 = 0xc5, 454 + DBG_BLOCK_ID_TD06 = 0xc6, 455 + DBG_BLOCK_ID_TD07 = 0xc7, 456 + DBG_BLOCK_ID_TD08 = 0xc8, 457 + DBG_BLOCK_ID_TD09 = 0xc9, 458 + DBG_BLOCK_ID_TD0A = 0xca, 459 + DBG_BLOCK_ID_TD0B = 0xcb, 460 + DBG_BLOCK_ID_UNUSED43 = 0xcc, 461 + DBG_BLOCK_ID_UNUSED44 = 0xcd, 462 + DBG_BLOCK_ID_UNUSED45 = 0xce, 463 + DBG_BLOCK_ID_UNUSED46 = 0xcf, 464 + DBG_BLOCK_ID_TD10 = 0xd0, 465 + DBG_BLOCK_ID_TD11 = 0xd1, 466 + DBG_BLOCK_ID_TD12 = 0xd2, 467 + DBG_BLOCK_ID_TD13 = 0xd3, 468 + DBG_BLOCK_ID_TD14 = 0xd4, 469 + DBG_BLOCK_ID_TD15 = 0xd5, 470 + DBG_BLOCK_ID_TD16 = 0xd6, 471 + DBG_BLOCK_ID_TD17 = 0xd7, 472 + DBG_BLOCK_ID_TD18 = 0xd8, 473 + DBG_BLOCK_ID_TD19 = 0xd9, 474 + DBG_BLOCK_ID_TD1A = 0xda, 475 + DBG_BLOCK_ID_TD1B = 0xdb, 476 + DBG_BLOCK_ID_UNUSED47 = 0xdc, 477 + DBG_BLOCK_ID_UNUSED48 = 0xdd, 478 + DBG_BLOCK_ID_UNUSED49 = 0xde, 479 + DBG_BLOCK_ID_UNUSED50 = 0xdf, 480 + DBG_BLOCK_ID_MCD0 = 0xe0, 481 + DBG_BLOCK_ID_MCD1 = 0xe1, 482 + DBG_BLOCK_ID_MCD2 = 0xe2, 483 + DBG_BLOCK_ID_MCD3 = 0xe3, 484 + DBG_BLOCK_ID_MCD4 = 0xe4, 485 + DBG_BLOCK_ID_MCD5 = 0xe5, 486 + DBG_BLOCK_ID_UNUSED51 = 0xe6, 487 + DBG_BLOCK_ID_UNUSED52 = 0xe7, 488 + } DebugBlockId_OLD; 489 + typedef enum DebugBlockId_BY2 { 490 + DBG_BLOCK_ID_RESERVED_BY2 = 0x0, 491 + DBG_BLOCK_ID_VMC_BY2 = 0x1, 492 + DBG_BLOCK_ID_CG_BY2 = 0x2, 493 + DBG_BLOCK_ID_GRBM_BY2 = 0x3, 494 + DBG_BLOCK_ID_CSC_BY2 = 0x4, 495 + DBG_BLOCK_ID_IH_BY2 = 0x5, 496 + DBG_BLOCK_ID_SQ_BY2 = 0x6, 497 + DBG_BLOCK_ID_GMCON_BY2 = 0x7, 498 + DBG_BLOCK_ID_DMA0_BY2 = 0x8, 499 + DBG_BLOCK_ID_SPIM_BY2 = 0x9, 500 + DBG_BLOCK_ID_SPIS_BY2 = 0xa, 501 + DBG_BLOCK_ID_PA0_BY2 = 0xb, 502 + DBG_BLOCK_ID_CP0_BY2 = 0xc, 503 + DBG_BLOCK_ID_CP2_BY2 = 0xd, 504 + DBG_BLOCK_ID_UVDU_BY2 = 0xe, 505 + DBG_BLOCK_ID_VCE_BY2 = 0xf, 506 + DBG_BLOCK_ID_VGT0_BY2 = 0x10, 507 + DBG_BLOCK_ID_IA_BY2 = 0x11, 508 + DBG_BLOCK_ID_SCT0_BY2 = 0x12, 509 + DBG_BLOCK_ID_SPM0_BY2 = 0x13, 510 + DBG_BLOCK_ID_TCAA_BY2 = 0x14, 511 + DBG_BLOCK_ID_TCCA_BY2 = 0x15, 512 + DBG_BLOCK_ID_MCC0_BY2 = 0x16, 513 + DBG_BLOCK_ID_MCC2_BY2 = 0x17, 514 + DBG_BLOCK_ID_SX0_BY2 = 0x18, 515 + DBG_BLOCK_ID_SX2_BY2 = 0x19, 516 + DBG_BLOCK_ID_UNUSED4_BY2 = 0x1a, 517 + DBG_BLOCK_ID_UNUSED6_BY2 = 0x1b, 518 + DBG_BLOCK_ID_PC0_BY2 = 0x1c, 519 + DBG_BLOCK_ID_UNUSED8_BY2 = 0x1d, 520 + DBG_BLOCK_ID_UNUSED10_BY2 = 0x1e, 521 + DBG_BLOCK_ID_MCB_BY2 = 0x1f, 522 + DBG_BLOCK_ID_SCB0_BY2 = 0x20, 523 + DBG_BLOCK_ID_UNUSED13_BY2 = 0x21, 524 + DBG_BLOCK_ID_SCF0_BY2 = 0x22, 525 + DBG_BLOCK_ID_UNUSED15_BY2 = 0x23, 526 + DBG_BLOCK_ID_BCI0_BY2 = 0x24, 527 + DBG_BLOCK_ID_BCI2_BY2 = 0x25, 528 + DBG_BLOCK_ID_UNUSED17_BY2 = 0x26, 529 + DBG_BLOCK_ID_UNUSED19_BY2 = 0x27, 530 + DBG_BLOCK_ID_CB00_BY2 = 0x28, 531 + DBG_BLOCK_ID_CB02_BY2 = 0x29, 532 + DBG_BLOCK_ID_CB04_BY2 = 0x2a, 533 + DBG_BLOCK_ID_UNUSED22_BY2 = 0x2b, 534 + DBG_BLOCK_ID_CB10_BY2 = 0x2c, 535 + DBG_BLOCK_ID_CB12_BY2 = 0x2d, 536 + DBG_BLOCK_ID_CB14_BY2 = 0x2e, 537 + DBG_BLOCK_ID_UNUSED25_BY2 = 0x2f, 538 + DBG_BLOCK_ID_TCP0_BY2 = 0x30, 539 + DBG_BLOCK_ID_TCP2_BY2 = 0x31, 540 + DBG_BLOCK_ID_TCP4_BY2 = 0x32, 541 + DBG_BLOCK_ID_TCP6_BY2 = 0x33, 542 + DBG_BLOCK_ID_TCP8_BY2 = 0x34, 543 + DBG_BLOCK_ID_TCP10_BY2 = 0x35, 544 + DBG_BLOCK_ID_TCP12_BY2 = 0x36, 545 + DBG_BLOCK_ID_TCP14_BY2 = 0x37, 546 + DBG_BLOCK_ID_TCP16_BY2 = 0x38, 547 + DBG_BLOCK_ID_TCP18_BY2 = 0x39, 548 + DBG_BLOCK_ID_TCP20_BY2 = 0x3a, 549 + DBG_BLOCK_ID_TCP22_BY2 = 0x3b, 550 + DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c, 551 + DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d, 552 + DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e, 553 + DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f, 554 + DBG_BLOCK_ID_DB00_BY2 = 0x40, 555 + DBG_BLOCK_ID_DB02_BY2 = 0x41, 556 + DBG_BLOCK_ID_DB04_BY2 = 0x42, 557 + DBG_BLOCK_ID_UNUSED28_BY2 = 0x43, 558 + DBG_BLOCK_ID_DB10_BY2 = 0x44, 559 + DBG_BLOCK_ID_DB12_BY2 = 0x45, 560 + DBG_BLOCK_ID_DB14_BY2 = 0x46, 561 + DBG_BLOCK_ID_UNUSED31_BY2 = 0x47, 562 + DBG_BLOCK_ID_TCC0_BY2 = 0x48, 563 + DBG_BLOCK_ID_TCC2_BY2 = 0x49, 564 + DBG_BLOCK_ID_TCC4_BY2 = 0x4a, 565 + DBG_BLOCK_ID_TCC6_BY2 = 0x4b, 566 + DBG_BLOCK_ID_SPS00_BY2 = 0x4c, 567 + DBG_BLOCK_ID_SPS02_BY2 = 0x4d, 568 + DBG_BLOCK_ID_SPS11_BY2 = 0x4e, 569 + DBG_BLOCK_ID_UNUSED33_BY2 = 0x4f, 570 + DBG_BLOCK_ID_TA00_BY2 = 0x50, 571 + DBG_BLOCK_ID_TA02_BY2 = 0x51, 572 + DBG_BLOCK_ID_TA04_BY2 = 0x52, 573 + DBG_BLOCK_ID_TA06_BY2 = 0x53, 574 + DBG_BLOCK_ID_TA08_BY2 = 0x54, 575 + DBG_BLOCK_ID_TA0A_BY2 = 0x55, 576 + DBG_BLOCK_ID_UNUSED35_BY2 = 0x56, 577 + DBG_BLOCK_ID_UNUSED37_BY2 = 0x57, 578 + DBG_BLOCK_ID_TA10_BY2 = 0x58, 579 + DBG_BLOCK_ID_TA12_BY2 = 0x59, 580 + DBG_BLOCK_ID_TA14_BY2 = 0x5a, 581 + DBG_BLOCK_ID_TA16_BY2 = 0x5b, 582 + DBG_BLOCK_ID_TA18_BY2 = 0x5c, 583 + DBG_BLOCK_ID_TA1A_BY2 = 0x5d, 584 + DBG_BLOCK_ID_UNUSED39_BY2 = 0x5e, 585 + DBG_BLOCK_ID_UNUSED41_BY2 = 0x5f, 586 + DBG_BLOCK_ID_TD00_BY2 = 0x60, 587 + DBG_BLOCK_ID_TD02_BY2 = 0x61, 588 + DBG_BLOCK_ID_TD04_BY2 = 0x62, 589 + DBG_BLOCK_ID_TD06_BY2 = 0x63, 590 + DBG_BLOCK_ID_TD08_BY2 = 0x64, 591 + DBG_BLOCK_ID_TD0A_BY2 = 0x65, 592 + DBG_BLOCK_ID_UNUSED43_BY2 = 0x66, 593 + DBG_BLOCK_ID_UNUSED45_BY2 = 0x67, 594 + DBG_BLOCK_ID_TD10_BY2 = 0x68, 595 + DBG_BLOCK_ID_TD12_BY2 = 0x69, 596 + DBG_BLOCK_ID_TD14_BY2 = 0x6a, 597 + DBG_BLOCK_ID_TD16_BY2 = 0x6b, 598 + DBG_BLOCK_ID_TD18_BY2 = 0x6c, 599 + DBG_BLOCK_ID_TD1A_BY2 = 0x6d, 600 + DBG_BLOCK_ID_UNUSED47_BY2 = 0x6e, 601 + DBG_BLOCK_ID_UNUSED49_BY2 = 0x6f, 602 + DBG_BLOCK_ID_MCD0_BY2 = 0x70, 603 + DBG_BLOCK_ID_MCD2_BY2 = 0x71, 604 + DBG_BLOCK_ID_MCD4_BY2 = 0x72, 605 + DBG_BLOCK_ID_UNUSED51_BY2 = 0x73, 606 + } DebugBlockId_BY2; 607 + typedef enum DebugBlockId_BY4 { 608 + DBG_BLOCK_ID_RESERVED_BY4 = 0x0, 609 + DBG_BLOCK_ID_CG_BY4 = 0x1, 610 + DBG_BLOCK_ID_CSC_BY4 = 0x2, 611 + DBG_BLOCK_ID_SQ_BY4 = 0x3, 612 + DBG_BLOCK_ID_DMA0_BY4 = 0x4, 613 + DBG_BLOCK_ID_SPIS_BY4 = 0x5, 614 + DBG_BLOCK_ID_CP0_BY4 = 0x6, 615 + DBG_BLOCK_ID_UVDU_BY4 = 0x7, 616 + DBG_BLOCK_ID_VGT0_BY4 = 0x8, 617 + DBG_BLOCK_ID_SCT0_BY4 = 0x9, 618 + DBG_BLOCK_ID_TCAA_BY4 = 0xa, 619 + DBG_BLOCK_ID_MCC0_BY4 = 0xb, 620 + DBG_BLOCK_ID_SX0_BY4 = 0xc, 621 + DBG_BLOCK_ID_UNUSED4_BY4 = 0xd, 622 + DBG_BLOCK_ID_PC0_BY4 = 0xe, 623 + DBG_BLOCK_ID_UNUSED10_BY4 = 0xf, 624 + DBG_BLOCK_ID_SCB0_BY4 = 0x10, 625 + DBG_BLOCK_ID_SCF0_BY4 = 0x11, 626 + DBG_BLOCK_ID_BCI0_BY4 = 0x12, 627 + DBG_BLOCK_ID_UNUSED17_BY4 = 0x13, 628 + DBG_BLOCK_ID_CB00_BY4 = 0x14, 629 + DBG_BLOCK_ID_CB04_BY4 = 0x15, 630 + DBG_BLOCK_ID_CB10_BY4 = 0x16, 631 + DBG_BLOCK_ID_CB14_BY4 = 0x17, 632 + DBG_BLOCK_ID_TCP0_BY4 = 0x18, 633 + DBG_BLOCK_ID_TCP4_BY4 = 0x19, 634 + DBG_BLOCK_ID_TCP8_BY4 = 0x1a, 635 + DBG_BLOCK_ID_TCP12_BY4 = 0x1b, 636 + DBG_BLOCK_ID_TCP16_BY4 = 0x1c, 637 + DBG_BLOCK_ID_TCP20_BY4 = 0x1d, 638 + DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e, 639 + DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f, 640 + DBG_BLOCK_ID_DB_BY4 = 0x20, 641 + DBG_BLOCK_ID_DB04_BY4 = 0x21, 642 + DBG_BLOCK_ID_DB10_BY4 = 0x22, 643 + DBG_BLOCK_ID_DB14_BY4 = 0x23, 644 + DBG_BLOCK_ID_TCC0_BY4 = 0x24, 645 + DBG_BLOCK_ID_TCC4_BY4 = 0x25, 646 + DBG_BLOCK_ID_SPS00_BY4 = 0x26, 647 + DBG_BLOCK_ID_SPS11_BY4 = 0x27, 648 + DBG_BLOCK_ID_TA00_BY4 = 0x28, 649 + DBG_BLOCK_ID_TA04_BY4 = 0x29, 650 + DBG_BLOCK_ID_TA08_BY4 = 0x2a, 651 + DBG_BLOCK_ID_UNUSED35_BY4 = 0x2b, 652 + DBG_BLOCK_ID_TA10_BY4 = 0x2c, 653 + DBG_BLOCK_ID_TA14_BY4 = 0x2d, 654 + DBG_BLOCK_ID_TA18_BY4 = 0x2e, 655 + DBG_BLOCK_ID_UNUSED39_BY4 = 0x2f, 656 + DBG_BLOCK_ID_TD00_BY4 = 0x30, 657 + DBG_BLOCK_ID_TD04_BY4 = 0x31, 658 + DBG_BLOCK_ID_TD08_BY4 = 0x32, 659 + DBG_BLOCK_ID_UNUSED43_BY4 = 0x33, 660 + DBG_BLOCK_ID_TD10_BY4 = 0x34, 661 + DBG_BLOCK_ID_TD14_BY4 = 0x35, 662 + DBG_BLOCK_ID_TD18_BY4 = 0x36, 663 + DBG_BLOCK_ID_UNUSED47_BY4 = 0x37, 664 + DBG_BLOCK_ID_MCD0_BY4 = 0x38, 665 + DBG_BLOCK_ID_MCD4_BY4 = 0x39, 666 + } DebugBlockId_BY4; 667 + typedef enum DebugBlockId_BY8 { 668 + DBG_BLOCK_ID_RESERVED_BY8 = 0x0, 669 + DBG_BLOCK_ID_CSC_BY8 = 0x1, 670 + DBG_BLOCK_ID_DMA0_BY8 = 0x2, 671 + DBG_BLOCK_ID_CP0_BY8 = 0x3, 672 + DBG_BLOCK_ID_VGT0_BY8 = 0x4, 673 + DBG_BLOCK_ID_TCAA_BY8 = 0x5, 674 + DBG_BLOCK_ID_SX0_BY8 = 0x6, 675 + DBG_BLOCK_ID_PC0_BY8 = 0x7, 676 + DBG_BLOCK_ID_SCB0_BY8 = 0x8, 677 + DBG_BLOCK_ID_BCI0_BY8 = 0x9, 678 + DBG_BLOCK_ID_CB00_BY8 = 0xa, 679 + DBG_BLOCK_ID_CB10_BY8 = 0xb, 680 + DBG_BLOCK_ID_TCP0_BY8 = 0xc, 681 + DBG_BLOCK_ID_TCP8_BY8 = 0xd, 682 + DBG_BLOCK_ID_TCP16_BY8 = 0xe, 683 + DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf, 684 + DBG_BLOCK_ID_DB00_BY8 = 0x10, 685 + DBG_BLOCK_ID_DB10_BY8 = 0x11, 686 + DBG_BLOCK_ID_TCC0_BY8 = 0x12, 687 + DBG_BLOCK_ID_SPS00_BY8 = 0x13, 688 + DBG_BLOCK_ID_TA00_BY8 = 0x14, 689 + DBG_BLOCK_ID_TA08_BY8 = 0x15, 690 + DBG_BLOCK_ID_TA10_BY8 = 0x16, 691 + DBG_BLOCK_ID_TA18_BY8 = 0x17, 692 + DBG_BLOCK_ID_TD00_BY8 = 0x18, 693 + DBG_BLOCK_ID_TD08_BY8 = 0x19, 694 + DBG_BLOCK_ID_TD10_BY8 = 0x1a, 695 + DBG_BLOCK_ID_TD18_BY8 = 0x1b, 696 + DBG_BLOCK_ID_MCD0_BY8 = 0x1c, 697 + } DebugBlockId_BY8; 698 + typedef enum DebugBlockId_BY16 { 699 + DBG_BLOCK_ID_RESERVED_BY16 = 0x0, 700 + DBG_BLOCK_ID_DMA0_BY16 = 0x1, 701 + DBG_BLOCK_ID_VGT0_BY16 = 0x2, 702 + DBG_BLOCK_ID_SX0_BY16 = 0x3, 703 + DBG_BLOCK_ID_SCB0_BY16 = 0x4, 704 + DBG_BLOCK_ID_CB00_BY16 = 0x5, 705 + DBG_BLOCK_ID_TCP0_BY16 = 0x6, 706 + DBG_BLOCK_ID_TCP16_BY16 = 0x7, 707 + DBG_BLOCK_ID_DB00_BY16 = 0x8, 708 + DBG_BLOCK_ID_TCC0_BY16 = 0x9, 709 + DBG_BLOCK_ID_TA00_BY16 = 0xa, 710 + DBG_BLOCK_ID_TA10_BY16 = 0xb, 711 + DBG_BLOCK_ID_TD00_BY16 = 0xc, 712 + DBG_BLOCK_ID_TD10_BY16 = 0xd, 713 + DBG_BLOCK_ID_MCD0_BY16 = 0xe, 714 + } DebugBlockId_BY16; 715 + typedef enum CompareRef { 716 + REF_NEVER = 0x0, 717 + REF_LESS = 0x1, 718 + REF_EQUAL = 0x2, 719 + REF_LEQUAL = 0x3, 720 + REF_GREATER = 0x4, 721 + REF_NOTEQUAL = 0x5, 722 + REF_GEQUAL = 0x6, 723 + REF_ALWAYS = 0x7, 724 + } CompareRef; 725 + typedef enum ReadSize { 726 + READ_256_BITS = 0x0, 727 + READ_512_BITS = 0x1, 728 + } ReadSize; 729 + typedef enum DepthFormat { 730 + DEPTH_INVALID = 0x0, 731 + DEPTH_16 = 0x1, 732 + DEPTH_X8_24 = 0x2, 733 + DEPTH_8_24 = 0x3, 734 + DEPTH_X8_24_FLOAT = 0x4, 735 + DEPTH_8_24_FLOAT = 0x5, 736 + DEPTH_32_FLOAT = 0x6, 737 + DEPTH_X24_8_32_FLOAT = 0x7, 738 + } DepthFormat; 739 + typedef enum ZFormat { 740 + Z_INVALID = 0x0, 741 + Z_16 = 0x1, 742 + Z_24 = 0x2, 743 + Z_32_FLOAT = 0x3, 744 + } ZFormat; 745 + typedef enum StencilFormat { 746 + STENCIL_INVALID = 0x0, 747 + STENCIL_8 = 0x1, 748 + } StencilFormat; 749 + typedef enum CmaskMode { 750 + CMASK_CLEAR_NONE = 0x0, 751 + CMASK_CLEAR_ONE = 0x1, 752 + CMASK_CLEAR_ALL = 0x2, 753 + CMASK_ANY_EXPANDED = 0x3, 754 + CMASK_ALPHA0_FRAG1 = 0x4, 755 + CMASK_ALPHA0_FRAG2 = 0x5, 756 + CMASK_ALPHA0_FRAG4 = 0x6, 757 + CMASK_ALPHA0_FRAGS = 0x7, 758 + CMASK_ALPHA1_FRAG1 = 0x8, 759 + CMASK_ALPHA1_FRAG2 = 0x9, 760 + CMASK_ALPHA1_FRAG4 = 0xa, 761 + CMASK_ALPHA1_FRAGS = 0xb, 762 + CMASK_ALPHAX_FRAG1 = 0xc, 763 + CMASK_ALPHAX_FRAG2 = 0xd, 764 + CMASK_ALPHAX_FRAG4 = 0xe, 765 + CMASK_ALPHAX_FRAGS = 0xf, 766 + } CmaskMode; 767 + typedef enum QuadExportFormat { 768 + EXPORT_UNUSED = 0x0, 769 + EXPORT_32_R = 0x1, 770 + EXPORT_32_GR = 0x2, 771 + EXPORT_32_AR = 0x3, 772 + EXPORT_FP16_ABGR = 0x4, 773 + EXPORT_UNSIGNED16_ABGR = 0x5, 774 + EXPORT_SIGNED16_ABGR = 0x6, 775 + EXPORT_32_ABGR = 0x7, 776 + } QuadExportFormat; 777 + typedef enum QuadExportFormatOld { 778 + EXPORT_4P_32BPC_ABGR = 0x0, 779 + EXPORT_4P_16BPC_ABGR = 0x1, 780 + EXPORT_4P_32BPC_GR = 0x2, 781 + EXPORT_4P_32BPC_AR = 0x3, 782 + EXPORT_2P_32BPC_ABGR = 0x4, 783 + EXPORT_8P_32BPC_R = 0x5, 784 + } QuadExportFormatOld; 785 + typedef enum ColorFormat { 786 + COLOR_INVALID = 0x0, 787 + COLOR_8 = 0x1, 788 + COLOR_16 = 0x2, 789 + COLOR_8_8 = 0x3, 790 + COLOR_32 = 0x4, 791 + COLOR_16_16 = 0x5, 792 + COLOR_10_11_11 = 0x6, 793 + COLOR_11_11_10 = 0x7, 794 + COLOR_10_10_10_2 = 0x8, 795 + COLOR_2_10_10_10 = 0x9, 796 + COLOR_8_8_8_8 = 0xa, 797 + COLOR_32_32 = 0xb, 798 + COLOR_16_16_16_16 = 0xc, 799 + COLOR_RESERVED_13 = 0xd, 800 + COLOR_32_32_32_32 = 0xe, 801 + COLOR_RESERVED_15 = 0xf, 802 + COLOR_5_6_5 = 0x10, 803 + COLOR_1_5_5_5 = 0x11, 804 + COLOR_5_5_5_1 = 0x12, 805 + COLOR_4_4_4_4 = 0x13, 806 + COLOR_8_24 = 0x14, 807 + COLOR_24_8 = 0x15, 808 + COLOR_X24_8_32_FLOAT = 0x16, 809 + COLOR_RESERVED_23 = 0x17, 810 + } ColorFormat; 811 + typedef enum SurfaceFormat { 812 + FMT_INVALID = 0x0, 813 + FMT_8 = 0x1, 814 + FMT_16 = 0x2, 815 + FMT_8_8 = 0x3, 816 + FMT_32 = 0x4, 817 + FMT_16_16 = 0x5, 818 + FMT_10_11_11 = 0x6, 819 + FMT_11_11_10 = 0x7, 820 + FMT_10_10_10_2 = 0x8, 821 + FMT_2_10_10_10 = 0x9, 822 + FMT_8_8_8_8 = 0xa, 823 + FMT_32_32 = 0xb, 824 + FMT_16_16_16_16 = 0xc, 825 + FMT_32_32_32 = 0xd, 826 + FMT_32_32_32_32 = 0xe, 827 + FMT_RESERVED_4 = 0xf, 828 + FMT_5_6_5 = 0x10, 829 + FMT_1_5_5_5 = 0x11, 830 + FMT_5_5_5_1 = 0x12, 831 + FMT_4_4_4_4 = 0x13, 832 + FMT_8_24 = 0x14, 833 + FMT_24_8 = 0x15, 834 + FMT_X24_8_32_FLOAT = 0x16, 835 + FMT_RESERVED_33 = 0x17, 836 + FMT_11_11_10_FLOAT = 0x18, 837 + FMT_16_FLOAT = 0x19, 838 + FMT_32_FLOAT = 0x1a, 839 + FMT_16_16_FLOAT = 0x1b, 840 + FMT_8_24_FLOAT = 0x1c, 841 + FMT_24_8_FLOAT = 0x1d, 842 + FMT_32_32_FLOAT = 0x1e, 843 + FMT_10_11_11_FLOAT = 0x1f, 844 + FMT_16_16_16_16_FLOAT = 0x20, 845 + FMT_3_3_2 = 0x21, 846 + FMT_6_5_5 = 0x22, 847 + FMT_32_32_32_32_FLOAT = 0x23, 848 + FMT_RESERVED_36 = 0x24, 849 + FMT_1 = 0x25, 850 + FMT_1_REVERSED = 0x26, 851 + FMT_GB_GR = 0x27, 852 + FMT_BG_RG = 0x28, 853 + FMT_32_AS_8 = 0x29, 854 + FMT_32_AS_8_8 = 0x2a, 855 + FMT_5_9_9_9_SHAREDEXP = 0x2b, 856 + FMT_8_8_8 = 0x2c, 857 + FMT_16_16_16 = 0x2d, 858 + FMT_16_16_16_FLOAT = 0x2e, 859 + FMT_4_4 = 0x2f, 860 + FMT_32_32_32_FLOAT = 0x30, 861 + FMT_BC1 = 0x31, 862 + FMT_BC2 = 0x32, 863 + FMT_BC3 = 0x33, 864 + FMT_BC4 = 0x34, 865 + FMT_BC5 = 0x35, 866 + FMT_BC6 = 0x36, 867 + FMT_BC7 = 0x37, 868 + FMT_32_AS_32_32_32_32 = 0x38, 869 + FMT_APC3 = 0x39, 870 + FMT_APC4 = 0x3a, 871 + FMT_APC5 = 0x3b, 872 + FMT_APC6 = 0x3c, 873 + FMT_APC7 = 0x3d, 874 + FMT_CTX1 = 0x3e, 875 + FMT_RESERVED_63 = 0x3f, 876 + } SurfaceFormat; 877 + typedef enum BUF_DATA_FORMAT { 878 + BUF_DATA_FORMAT_INVALID = 0x0, 879 + BUF_DATA_FORMAT_8 = 0x1, 880 + BUF_DATA_FORMAT_16 = 0x2, 881 + BUF_DATA_FORMAT_8_8 = 0x3, 882 + BUF_DATA_FORMAT_32 = 0x4, 883 + BUF_DATA_FORMAT_16_16 = 0x5, 884 + BUF_DATA_FORMAT_10_11_11 = 0x6, 885 + BUF_DATA_FORMAT_11_11_10 = 0x7, 886 + BUF_DATA_FORMAT_10_10_10_2 = 0x8, 887 + BUF_DATA_FORMAT_2_10_10_10 = 0x9, 888 + BUF_DATA_FORMAT_8_8_8_8 = 0xa, 889 + BUF_DATA_FORMAT_32_32 = 0xb, 890 + BUF_DATA_FORMAT_16_16_16_16 = 0xc, 891 + BUF_DATA_FORMAT_32_32_32 = 0xd, 892 + BUF_DATA_FORMAT_32_32_32_32 = 0xe, 893 + BUF_DATA_FORMAT_RESERVED_15 = 0xf, 894 + } BUF_DATA_FORMAT; 895 + typedef enum IMG_DATA_FORMAT { 896 + IMG_DATA_FORMAT_INVALID = 0x0, 897 + IMG_DATA_FORMAT_8 = 0x1, 898 + IMG_DATA_FORMAT_16 = 0x2, 899 + IMG_DATA_FORMAT_8_8 = 0x3, 900 + IMG_DATA_FORMAT_32 = 0x4, 901 + IMG_DATA_FORMAT_16_16 = 0x5, 902 + IMG_DATA_FORMAT_10_11_11 = 0x6, 903 + IMG_DATA_FORMAT_11_11_10 = 0x7, 904 + IMG_DATA_FORMAT_10_10_10_2 = 0x8, 905 + IMG_DATA_FORMAT_2_10_10_10 = 0x9, 906 + IMG_DATA_FORMAT_8_8_8_8 = 0xa, 907 + IMG_DATA_FORMAT_32_32 = 0xb, 908 + IMG_DATA_FORMAT_16_16_16_16 = 0xc, 909 + IMG_DATA_FORMAT_32_32_32 = 0xd, 910 + IMG_DATA_FORMAT_32_32_32_32 = 0xe, 911 + IMG_DATA_FORMAT_RESERVED_15 = 0xf, 912 + IMG_DATA_FORMAT_5_6_5 = 0x10, 913 + IMG_DATA_FORMAT_1_5_5_5 = 0x11, 914 + IMG_DATA_FORMAT_5_5_5_1 = 0x12, 915 + IMG_DATA_FORMAT_4_4_4_4 = 0x13, 916 + IMG_DATA_FORMAT_8_24 = 0x14, 917 + IMG_DATA_FORMAT_24_8 = 0x15, 918 + IMG_DATA_FORMAT_X24_8_32 = 0x16, 919 + IMG_DATA_FORMAT_RESERVED_23 = 0x17, 920 + IMG_DATA_FORMAT_RESERVED_24 = 0x18, 921 + IMG_DATA_FORMAT_RESERVED_25 = 0x19, 922 + IMG_DATA_FORMAT_RESERVED_26 = 0x1a, 923 + IMG_DATA_FORMAT_RESERVED_27 = 0x1b, 924 + IMG_DATA_FORMAT_RESERVED_28 = 0x1c, 925 + IMG_DATA_FORMAT_RESERVED_29 = 0x1d, 926 + IMG_DATA_FORMAT_RESERVED_30 = 0x1e, 927 + IMG_DATA_FORMAT_RESERVED_31 = 0x1f, 928 + IMG_DATA_FORMAT_GB_GR = 0x20, 929 + IMG_DATA_FORMAT_BG_RG = 0x21, 930 + IMG_DATA_FORMAT_5_9_9_9 = 0x22, 931 + IMG_DATA_FORMAT_BC1 = 0x23, 932 + IMG_DATA_FORMAT_BC2 = 0x24, 933 + IMG_DATA_FORMAT_BC3 = 0x25, 934 + IMG_DATA_FORMAT_BC4 = 0x26, 935 + IMG_DATA_FORMAT_BC5 = 0x27, 936 + IMG_DATA_FORMAT_BC6 = 0x28, 937 + IMG_DATA_FORMAT_BC7 = 0x29, 938 + IMG_DATA_FORMAT_RESERVED_42 = 0x2a, 939 + IMG_DATA_FORMAT_RESERVED_43 = 0x2b, 940 + IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c, 941 + IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d, 942 + IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e, 943 + IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f, 944 + IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30, 945 + IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31, 946 + IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32, 947 + IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33, 948 + IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34, 949 + IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35, 950 + IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36, 951 + IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37, 952 + IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38, 953 + IMG_DATA_FORMAT_4_4 = 0x39, 954 + IMG_DATA_FORMAT_6_5_5 = 0x3a, 955 + IMG_DATA_FORMAT_1 = 0x3b, 956 + IMG_DATA_FORMAT_1_REVERSED = 0x3c, 957 + IMG_DATA_FORMAT_32_AS_8 = 0x3d, 958 + IMG_DATA_FORMAT_32_AS_8_8 = 0x3e, 959 + IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f, 960 + } IMG_DATA_FORMAT; 961 + typedef enum BUF_NUM_FORMAT { 962 + BUF_NUM_FORMAT_UNORM = 0x0, 963 + BUF_NUM_FORMAT_SNORM = 0x1, 964 + BUF_NUM_FORMAT_USCALED = 0x2, 965 + BUF_NUM_FORMAT_SSCALED = 0x3, 966 + BUF_NUM_FORMAT_UINT = 0x4, 967 + BUF_NUM_FORMAT_SINT = 0x5, 968 + BUF_NUM_FORMAT_SNORM_OGL = 0x6, 969 + BUF_NUM_FORMAT_FLOAT = 0x7, 970 + } BUF_NUM_FORMAT; 971 + typedef enum IMG_NUM_FORMAT { 972 + IMG_NUM_FORMAT_UNORM = 0x0, 973 + IMG_NUM_FORMAT_SNORM = 0x1, 974 + IMG_NUM_FORMAT_USCALED = 0x2, 975 + IMG_NUM_FORMAT_SSCALED = 0x3, 976 + IMG_NUM_FORMAT_UINT = 0x4, 977 + IMG_NUM_FORMAT_SINT = 0x5, 978 + IMG_NUM_FORMAT_SNORM_OGL = 0x6, 979 + IMG_NUM_FORMAT_FLOAT = 0x7, 980 + IMG_NUM_FORMAT_RESERVED_8 = 0x8, 981 + IMG_NUM_FORMAT_SRGB = 0x9, 982 + IMG_NUM_FORMAT_UBNORM = 0xa, 983 + IMG_NUM_FORMAT_UBNORM_OGL = 0xb, 984 + IMG_NUM_FORMAT_UBINT = 0xc, 985 + IMG_NUM_FORMAT_UBSCALED = 0xd, 986 + IMG_NUM_FORMAT_RESERVED_14 = 0xe, 987 + IMG_NUM_FORMAT_RESERVED_15 = 0xf, 988 + } IMG_NUM_FORMAT; 989 + typedef enum TileType { 990 + ARRAY_COLOR_TILE = 0x0, 991 + ARRAY_DEPTH_TILE = 0x1, 992 + } TileType; 993 + typedef enum NonDispTilingOrder { 994 + ADDR_SURF_MICRO_TILING_DISPLAY = 0x0, 995 + ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1, 996 + } NonDispTilingOrder; 997 + typedef enum MicroTileMode { 998 + ADDR_SURF_DISPLAY_MICRO_TILING = 0x0, 999 + ADDR_SURF_THIN_MICRO_TILING = 0x1, 1000 + ADDR_SURF_DEPTH_MICRO_TILING = 0x2, 1001 + ADDR_SURF_ROTATED_MICRO_TILING = 0x3, 1002 + ADDR_SURF_THICK_MICRO_TILING = 0x4, 1003 + } MicroTileMode; 1004 + typedef enum TileSplit { 1005 + ADDR_SURF_TILE_SPLIT_64B = 0x0, 1006 + ADDR_SURF_TILE_SPLIT_128B = 0x1, 1007 + ADDR_SURF_TILE_SPLIT_256B = 0x2, 1008 + ADDR_SURF_TILE_SPLIT_512B = 0x3, 1009 + ADDR_SURF_TILE_SPLIT_1KB = 0x4, 1010 + ADDR_SURF_TILE_SPLIT_2KB = 0x5, 1011 + ADDR_SURF_TILE_SPLIT_4KB = 0x6, 1012 + } TileSplit; 1013 + typedef enum SampleSplit { 1014 + ADDR_SURF_SAMPLE_SPLIT_1 = 0x0, 1015 + ADDR_SURF_SAMPLE_SPLIT_2 = 0x1, 1016 + ADDR_SURF_SAMPLE_SPLIT_4 = 0x2, 1017 + ADDR_SURF_SAMPLE_SPLIT_8 = 0x3, 1018 + } SampleSplit; 1019 + typedef enum PipeConfig { 1020 + ADDR_SURF_P2 = 0x0, 1021 + ADDR_SURF_P2_RESERVED0 = 0x1, 1022 + ADDR_SURF_P2_RESERVED1 = 0x2, 1023 + ADDR_SURF_P2_RESERVED2 = 0x3, 1024 + ADDR_SURF_P4_8x16 = 0x4, 1025 + ADDR_SURF_P4_16x16 = 0x5, 1026 + ADDR_SURF_P4_16x32 = 0x6, 1027 + ADDR_SURF_P4_32x32 = 0x7, 1028 + ADDR_SURF_P8_16x16_8x16 = 0x8, 1029 + ADDR_SURF_P8_16x32_8x16 = 0x9, 1030 + ADDR_SURF_P8_32x32_8x16 = 0xa, 1031 + ADDR_SURF_P8_16x32_16x16 = 0xb, 1032 + ADDR_SURF_P8_32x32_16x16 = 0xc, 1033 + ADDR_SURF_P8_32x32_16x32 = 0xd, 1034 + ADDR_SURF_P8_32x64_32x32 = 0xe, 1035 + } PipeConfig; 1036 + typedef enum NumBanks { 1037 + ADDR_SURF_2_BANK = 0x0, 1038 + ADDR_SURF_4_BANK = 0x1, 1039 + ADDR_SURF_8_BANK = 0x2, 1040 + ADDR_SURF_16_BANK = 0x3, 1041 + } NumBanks; 1042 + typedef enum BankWidth { 1043 + ADDR_SURF_BANK_WIDTH_1 = 0x0, 1044 + ADDR_SURF_BANK_WIDTH_2 = 0x1, 1045 + ADDR_SURF_BANK_WIDTH_4 = 0x2, 1046 + ADDR_SURF_BANK_WIDTH_8 = 0x3, 1047 + } BankWidth; 1048 + typedef enum BankHeight { 1049 + ADDR_SURF_BANK_HEIGHT_1 = 0x0, 1050 + ADDR_SURF_BANK_HEIGHT_2 = 0x1, 1051 + ADDR_SURF_BANK_HEIGHT_4 = 0x2, 1052 + ADDR_SURF_BANK_HEIGHT_8 = 0x3, 1053 + } BankHeight; 1054 + typedef enum BankWidthHeight { 1055 + ADDR_SURF_BANK_WH_1 = 0x0, 1056 + ADDR_SURF_BANK_WH_2 = 0x1, 1057 + ADDR_SURF_BANK_WH_4 = 0x2, 1058 + ADDR_SURF_BANK_WH_8 = 0x3, 1059 + } BankWidthHeight; 1060 + typedef enum MacroTileAspect { 1061 + ADDR_SURF_MACRO_ASPECT_1 = 0x0, 1062 + ADDR_SURF_MACRO_ASPECT_2 = 0x1, 1063 + ADDR_SURF_MACRO_ASPECT_4 = 0x2, 1064 + ADDR_SURF_MACRO_ASPECT_8 = 0x3, 1065 + } MacroTileAspect; 1066 + typedef enum TCC_CACHE_POLICIES { 1067 + TCC_CACHE_POLICY_LRU = 0x0, 1068 + TCC_CACHE_POLICY_STREAM = 0x1, 1069 + TCC_CACHE_POLICY_BYPASS = 0x2, 1070 + } TCC_CACHE_POLICIES; 1071 + typedef enum PERFMON_COUNTER_MODE { 1072 + PERFMON_COUNTER_MODE_ACCUM = 0x0, 1073 + PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1, 1074 + PERFMON_COUNTER_MODE_MAX = 0x2, 1075 + PERFMON_COUNTER_MODE_DIRTY = 0x3, 1076 + PERFMON_COUNTER_MODE_SAMPLE = 0x4, 1077 + PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5, 1078 + PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6, 1079 + PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7, 1080 + PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8, 1081 + PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9, 1082 + PERFMON_COUNTER_MODE_RESERVED = 0xf, 1083 + } PERFMON_COUNTER_MODE; 1084 + typedef enum PERFMON_SPM_MODE { 1085 + PERFMON_SPM_MODE_OFF = 0x0, 1086 + PERFMON_SPM_MODE_16BIT_CLAMP = 0x1, 1087 + PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2, 1088 + PERFMON_SPM_MODE_32BIT_CLAMP = 0x3, 1089 + PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4, 1090 + PERFMON_SPM_MODE_RESERVED_5 = 0x5, 1091 + PERFMON_SPM_MODE_RESERVED_6 = 0x6, 1092 + PERFMON_SPM_MODE_RESERVED_7 = 0x7, 1093 + PERFMON_SPM_MODE_TEST_MODE_0 = 0x8, 1094 + PERFMON_SPM_MODE_TEST_MODE_1 = 0x9, 1095 + PERFMON_SPM_MODE_TEST_MODE_2 = 0xa, 1096 + } PERFMON_SPM_MODE; 1097 + typedef enum SurfaceTiling { 1098 + ARRAY_LINEAR = 0x0, 1099 + ARRAY_TILED = 0x1, 1100 + } SurfaceTiling; 1101 + typedef enum SurfaceArray { 1102 + ARRAY_1D = 0x0, 1103 + ARRAY_2D = 0x1, 1104 + ARRAY_3D = 0x2, 1105 + ARRAY_3D_SLICE = 0x3, 1106 + } SurfaceArray; 1107 + typedef enum ColorArray { 1108 + ARRAY_2D_ALT_COLOR = 0x0, 1109 + ARRAY_2D_COLOR = 0x1, 1110 + ARRAY_3D_SLICE_COLOR = 0x3, 1111 + } ColorArray; 1112 + typedef enum DepthArray { 1113 + ARRAY_2D_ALT_DEPTH = 0x0, 1114 + ARRAY_2D_DEPTH = 0x1, 1115 + } DepthArray; 1116 + 1117 + #endif /* DCE_8_0_ENUM_H */
+12
drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
··· 4130 4130 #define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0xe 4131 4131 #define PHY_AUX_CNTL__AUX_PAD_RXSEL_MASK 0x10000 4132 4132 #define PHY_AUX_CNTL__AUX_PAD_RXSEL__SHIFT 0x10 4133 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK_MASK 0x1 4134 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK__SHIFT 0x0 4135 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS_MASK 0x2 4136 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS__SHIFT 0x1 4137 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV_MASK 0x4 4138 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV__SHIFT 0x2 4139 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK_MASK 0x10 4140 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK__SHIFT 0x4 4141 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS_MASK 0x20 4142 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS__SHIFT 0x5 4143 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV_MASK 0x40 4144 + #define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV__SHIFT 0x6 4133 4145 #define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A_MASK 0x1 4134 4146 #define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A__SHIFT 0x0 4135 4147 #define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A_MASK 0x2
+102
drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
··· 1 + /* 2 + * Volcanic Islands IV SRC Register documentation 3 + * 4 + * Copyright (C) 2015 Advanced Micro Devices, Inc. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the "Software"), 8 + * to deal in the Software without restriction, including without limitation 9 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 + * and/or sell copies of the Software, and to permit persons to whom the 11 + * Software is furnished to do so, subject to the following conditions: 12 + * 13 + * The above copyright notice and this permission notice shall be included 14 + * in all copies or substantial portions of the Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 17 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 20 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 + */ 23 + 24 + #ifndef _IVSRCID_VISLANDS30_H_ 25 + #define _IVSRCID_VISLANDS30_H_ 26 + 27 + 28 + // IV Source IDs 29 + 30 + #define VISLANDS30_IV_SRCID_D1_V_UPDATE_INT 7 // 0x07 31 + #define VISLANDS30_IV_EXTID_D1_V_UPDATE_INT 0 32 + 33 + #define VISLANDS30_IV_SRCID_D1_GRPH_PFLIP 8 // 0x08 34 + #define VISLANDS30_IV_EXTID_D1_GRPH_PFLIP 0 35 + 36 + #define VISLANDS30_IV_SRCID_D2_V_UPDATE_INT 9 // 0x09 37 + #define VISLANDS30_IV_EXTID_D2_V_UPDATE_INT 0 38 + 39 + #define VISLANDS30_IV_SRCID_D2_GRPH_PFLIP 10 // 0x0a 40 + #define VISLANDS30_IV_EXTID_D2_GRPH_PFLIP 0 41 + 42 + #define VISLANDS30_IV_SRCID_D3_V_UPDATE_INT 11 // 0x0b 43 + #define VISLANDS30_IV_EXTID_D3_V_UPDATE_INT 0 44 + 45 + #define VISLANDS30_IV_SRCID_D3_GRPH_PFLIP 12 // 0x0c 46 + #define VISLANDS30_IV_EXTID_D3_GRPH_PFLIP 0 47 + 48 + #define VISLANDS30_IV_SRCID_D4_V_UPDATE_INT 13 // 0x0d 49 + #define VISLANDS30_IV_EXTID_D4_V_UPDATE_INT 0 50 + 51 + #define VISLANDS30_IV_SRCID_D4_GRPH_PFLIP 14 // 0x0e 52 + #define VISLANDS30_IV_EXTID_D4_GRPH_PFLIP 0 53 + 54 + #define VISLANDS30_IV_SRCID_D5_V_UPDATE_INT 15 // 0x0f 55 + #define VISLANDS30_IV_EXTID_D5_V_UPDATE_INT 0 56 + 57 + #define VISLANDS30_IV_SRCID_D5_GRPH_PFLIP 16 // 0x10 58 + #define VISLANDS30_IV_EXTID_D5_GRPH_PFLIP 0 59 + 60 + #define VISLANDS30_IV_SRCID_D6_V_UPDATE_INT 17 // 0x11 61 + #define VISLANDS30_IV_EXTID_D6_V_UPDATE_INT 0 62 + 63 + #define VISLANDS30_IV_SRCID_D6_GRPH_PFLIP 18 // 0x12 64 + #define VISLANDS30_IV_EXTID_D6_GRPH_PFLIP 0 65 + 66 + #define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A 42 // 0x2a 67 + #define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A 0 68 + 69 + #define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_B 42 // 0x2a 70 + #define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B 1 71 + 72 + #define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_C 42 // 0x2a 73 + #define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C 2 74 + 75 + #define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_D 42 // 0x2a 76 + #define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D 3 77 + 78 + #define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_E 42 // 0x2a 79 + #define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E 4 80 + 81 + #define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_F 42 // 0x2a 82 + #define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F 5 83 + 84 + #define VISLANDS30_IV_SRCID_HPD_RX_A 42 // 0x2a 85 + #define VISLANDS30_IV_EXTID_HPD_RX_A 6 86 + 87 + #define VISLANDS30_IV_SRCID_HPD_RX_B 42 // 0x2a 88 + #define VISLANDS30_IV_EXTID_HPD_RX_B 7 89 + 90 + #define VISLANDS30_IV_SRCID_HPD_RX_C 42 // 0x2a 91 + #define VISLANDS30_IV_EXTID_HPD_RX_C 8 92 + 93 + #define VISLANDS30_IV_SRCID_HPD_RX_D 42 // 0x2a 94 + #define VISLANDS30_IV_EXTID_HPD_RX_D 9 95 + 96 + #define VISLANDS30_IV_SRCID_HPD_RX_E 42 // 0x2a 97 + #define VISLANDS30_IV_EXTID_HPD_RX_E 10 98 + 99 + #define VISLANDS30_IV_SRCID_HPD_RX_F 42 // 0x2a 100 + #define VISLANDS30_IV_EXTID_HPD_RX_F 11 101 + 102 + #endif // _IVSRCID_VISLANDS30_H_
+202 -4
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
··· 29 29 #include "pp_instance.h" 30 30 #include "power_state.h" 31 31 #include "eventmanager.h" 32 + #include "pp_debug.h" 32 33 33 34 #define PP_CHECK(handle) \ 34 35 do { \ ··· 434 433 case PP_StateUILabel_Performance: 435 434 return POWER_STATE_TYPE_PERFORMANCE; 436 435 default: 437 - return POWER_STATE_TYPE_DEFAULT; 436 + if (state->classification.flags & PP_StateClassificationFlag_Boot) 437 + return POWER_STATE_TYPE_INTERNAL_BOOT; 438 + else 439 + return POWER_STATE_TYPE_DEFAULT; 438 440 } 439 441 } 440 442 ··· 539 535 return hwmgr->hwmgr_func->get_temperature(hwmgr); 540 536 } 541 537 538 + static int pp_dpm_get_pp_num_states(void *handle, 539 + struct pp_states_info *data) 540 + { 541 + struct pp_hwmgr *hwmgr; 542 + int i; 543 + 544 + if (!handle) 545 + return -EINVAL; 546 + 547 + hwmgr = ((struct pp_instance *)handle)->hwmgr; 548 + 549 + if (hwmgr == NULL || hwmgr->ps == NULL) 550 + return -EINVAL; 551 + 552 + data->nums = hwmgr->num_ps; 553 + 554 + for (i = 0; i < hwmgr->num_ps; i++) { 555 + struct pp_power_state *state = (struct pp_power_state *) 556 + ((unsigned long)hwmgr->ps + i * hwmgr->ps_size); 557 + switch (state->classification.ui_label) { 558 + case PP_StateUILabel_Battery: 559 + data->states[i] = POWER_STATE_TYPE_BATTERY; 560 + break; 561 + case PP_StateUILabel_Balanced: 562 + data->states[i] = POWER_STATE_TYPE_BALANCED; 563 + break; 564 + case PP_StateUILabel_Performance: 565 + data->states[i] = POWER_STATE_TYPE_PERFORMANCE; 566 + break; 567 + default: 568 + if (state->classification.flags & PP_StateClassificationFlag_Boot) 569 + data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT; 570 + else 571 + data->states[i] = POWER_STATE_TYPE_DEFAULT; 572 + } 573 + } 574 + 575 + return 0; 576 + } 577 + 578 + static int pp_dpm_get_pp_table(void *handle, char **table) 579 + { 580 + struct pp_hwmgr *hwmgr; 581 + 582 + if (!handle) 583 + return -EINVAL; 584 + 585 + hwmgr = ((struct pp_instance *)handle)->hwmgr; 586 + 587 + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || 588 + hwmgr->hwmgr_func->get_pp_table == NULL) 589 + return -EINVAL; 590 + 591 + return hwmgr->hwmgr_func->get_pp_table(hwmgr, table); 592 + } 593 + 594 + static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) 595 + { 596 + struct pp_hwmgr *hwmgr; 597 + 598 + if (!handle) 599 + return -EINVAL; 600 + 601 + hwmgr = ((struct pp_instance *)handle)->hwmgr; 602 + 603 + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || 604 + hwmgr->hwmgr_func->set_pp_table == NULL) 605 + return -EINVAL; 606 + 607 + return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size); 608 + } 609 + 610 + static int pp_dpm_force_clock_level(void *handle, 611 + enum pp_clock_type type, int level) 612 + { 613 + struct pp_hwmgr *hwmgr; 614 + 615 + if (!handle) 616 + return -EINVAL; 617 + 618 + hwmgr = ((struct pp_instance *)handle)->hwmgr; 619 + 620 + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || 621 + hwmgr->hwmgr_func->force_clock_level == NULL) 622 + return -EINVAL; 623 + 624 + return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level); 625 + } 626 + 627 + static int pp_dpm_print_clock_levels(void *handle, 628 + enum pp_clock_type type, char *buf) 629 + { 630 + struct pp_hwmgr *hwmgr; 631 + 632 + if (!handle) 633 + return -EINVAL; 634 + 635 + hwmgr = ((struct pp_instance *)handle)->hwmgr; 636 + 637 + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || 638 + hwmgr->hwmgr_func->print_clock_levels == NULL) 639 + return -EINVAL; 640 + 641 + return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); 642 + } 643 + 542 644 const struct amd_powerplay_funcs pp_dpm_funcs = { 543 645 .get_temperature = pp_dpm_get_temperature, 544 646 .load_firmware = pp_dpm_load_fw, ··· 662 552 .get_fan_control_mode = pp_dpm_get_fan_control_mode, 663 553 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent, 664 554 .get_fan_speed_percent = pp_dpm_get_fan_speed_percent, 555 + .get_pp_num_states = pp_dpm_get_pp_num_states, 556 + .get_pp_table = pp_dpm_get_pp_table, 557 + .set_pp_table = pp_dpm_set_pp_table, 558 + .force_clock_level = pp_dpm_force_clock_level, 559 + .print_clock_levels = pp_dpm_print_clock_levels, 665 560 }; 666 561 667 562 static int amd_pp_instance_init(struct amd_pp_init *pp_init, ··· 750 635 751 636 /* export this function to DAL */ 752 637 753 - int amd_powerplay_display_configuration_change(void *handle, const void *input) 638 + int amd_powerplay_display_configuration_change(void *handle, 639 + const struct amd_pp_display_configuration *display_config) 754 640 { 755 641 struct pp_hwmgr *hwmgr; 756 - const struct amd_pp_display_configuration *display_config = input; 757 642 758 643 PP_CHECK((struct pp_instance *)handle); 759 644 ··· 765 650 } 766 651 767 652 int amd_powerplay_get_display_power_level(void *handle, 768 - struct amd_pp_dal_clock_info *output) 653 + struct amd_pp_simple_clock_info *output) 769 654 { 770 655 struct pp_hwmgr *hwmgr; 771 656 ··· 778 663 779 664 return phm_get_dal_power_level(hwmgr, output); 780 665 } 666 + 667 + int amd_powerplay_get_current_clocks(void *handle, 668 + struct amd_pp_clock_info *clocks) 669 + { 670 + struct pp_hwmgr *hwmgr; 671 + struct amd_pp_simple_clock_info simple_clocks; 672 + struct pp_clock_info hw_clocks; 673 + 674 + PP_CHECK((struct pp_instance *)handle); 675 + 676 + if (clocks == NULL) 677 + return -EINVAL; 678 + 679 + hwmgr = ((struct pp_instance *)handle)->hwmgr; 680 + 681 + phm_get_dal_power_level(hwmgr, &simple_clocks); 682 + 683 + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { 684 + if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment)) 685 + PP_ASSERT_WITH_CODE(0, "Error in PHM_GetPowerContainmentClockInfo", return -1); 686 + } else { 687 + if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_Activity)) 688 + PP_ASSERT_WITH_CODE(0, "Error in PHM_GetClockInfo", return -1); 689 + } 690 + 691 + clocks->min_engine_clock = hw_clocks.min_eng_clk; 692 + clocks->max_engine_clock = hw_clocks.max_eng_clk; 693 + clocks->min_memory_clock = hw_clocks.min_mem_clk; 694 + clocks->max_memory_clock = hw_clocks.max_mem_clk; 695 + clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; 696 + clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; 697 + 698 + clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 699 + clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 700 + 701 + clocks->max_clocks_state = simple_clocks.level; 702 + 703 + if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) { 704 + clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 705 + clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 706 + } 707 + 708 + return 0; 709 + 710 + } 711 + 712 + int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) 713 + { 714 + int result = -1; 715 + 716 + struct pp_hwmgr *hwmgr; 717 + 718 + PP_CHECK((struct pp_instance *)handle); 719 + 720 + if (clocks == NULL) 721 + return -EINVAL; 722 + 723 + hwmgr = ((struct pp_instance *)handle)->hwmgr; 724 + 725 + result = phm_get_clock_by_type(hwmgr, type, clocks); 726 + 727 + return result; 728 + } 729 + 730 + int amd_powerplay_get_display_mode_validation_clocks(void *handle, 731 + struct amd_pp_simple_clock_info *clocks) 732 + { 733 + int result = -1; 734 + struct pp_hwmgr *hwmgr; 735 + 736 + PP_CHECK((struct pp_instance *)handle); 737 + 738 + if (clocks == NULL) 739 + return -EINVAL; 740 + 741 + hwmgr = ((struct pp_instance *)handle)->hwmgr; 742 + 743 + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) 744 + result = phm_get_max_high_clocks(hwmgr, clocks); 745 + 746 + return result; 747 + } 748 +
+184 -22
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
··· 715 715 unsigned long clock = 0; 716 716 unsigned long level; 717 717 unsigned long stable_pstate_sclk; 718 - struct PP_Clocks clocks; 719 718 unsigned long percentage; 720 719 721 720 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; ··· 725 726 else 726 727 cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; 727 728 728 - /*PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks);*/ 729 - clock = clocks.engineClock; 729 + clock = hwmgr->display_config.min_core_set_clock; 730 + if (clock == 0) 731 + printk(KERN_ERR "[ powerplay ] min_core_set_clock not set\n"); 730 732 731 733 if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) { 732 734 cz_hwmgr->sclk_dpm.hard_min_clk = clock; ··· 883 883 884 884 if (pnew_state->action == FORCE_HIGH) 885 885 cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); 886 - else if(pnew_state->action == CANCEL_FORCE_HIGH) 887 - cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); 888 - else 886 + else if (pnew_state->action == CANCEL_FORCE_HIGH) 887 + cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch); 888 + else 889 889 cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch); 890 890 } 891 891 return 0; ··· 1110 1110 cast_const_PhwCzPowerState(&pcurrent_ps->hardware); 1111 1111 1112 1112 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1113 - struct PP_Clocks clocks; 1113 + struct PP_Clocks clocks = {0, 0, 0, 0}; 1114 1114 bool force_high; 1115 - unsigned long num_of_active_displays = 4; 1115 + uint32_t num_of_active_displays = 0; 1116 + struct cgs_display_info info = {0}; 1116 1117 1117 1118 cz_ps->evclk = hwmgr->vce_arbiter.evclk; 1118 1119 cz_ps->ecclk = hwmgr->vce_arbiter.ecclk; ··· 1125 1124 1126 1125 cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); 1127 1126 1128 - /* to do PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks); */ 1129 - /* PECI_GetNumberOfActiveDisplays(pHwMgr->pPECI, &numOfActiveDisplays); */ 1127 + clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? 1128 + hwmgr->display_config.min_mem_set_clock : 1129 + cz_hwmgr->sys_info.nbp_memory_clock[1]; 1130 + 1131 + cgs_get_active_displays_info(hwmgr->device, &info); 1132 + num_of_active_displays = info.display_count; 1133 + 1130 1134 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) 1131 1135 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; 1132 - else 1133 - clocks.memoryClock = 0; 1134 1136 1135 1137 if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk) 1136 1138 clocks.memoryClock = hwmgr->gfx_arbiter.mclk; ··· 1203 1199 printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n"); 1204 1200 return result; 1205 1201 } 1202 + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS; 1206 1203 1207 1204 result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings)); 1208 1205 if (result != 0) { ··· 1635 1630 & PWRMGT_SEPARATION_TIME_MASK) 1636 1631 << PWRMGT_SEPARATION_TIME_SHIFT; 1637 1632 1638 - data|= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0) 1633 + data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0) 1639 1634 << PWRMGT_DISABLE_CPU_CSTATES_SHIFT; 1640 1635 1641 - data|= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0) 1636 + data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0) 1642 1637 << PWRMGT_DISABLE_CPU_PSTATES_SHIFT; 1643 1638 1644 1639 PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n", ··· 1653 1648 } 1654 1649 1655 1650 1656 - static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, 1651 + static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, 1657 1652 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) 1658 - { 1653 + { 1659 1654 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 1660 1655 1661 1656 if (separation_time != ··· 1683 1678 return 0; 1684 1679 } 1685 1680 1686 - static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, 1687 - struct amd_pp_dal_clock_info*info) 1681 + static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, 1682 + struct amd_pp_simple_clock_info *info) 1688 1683 { 1689 1684 uint32_t i; 1690 - const struct phm_clock_voltage_dependency_table * table = 1685 + const struct phm_clock_voltage_dependency_table *table = 1691 1686 hwmgr->dyn_state.vddc_dep_on_dal_pwrl; 1692 - const struct phm_clock_and_voltage_limits* limits = 1687 + const struct phm_clock_and_voltage_limits *limits = 1693 1688 &hwmgr->dyn_state.max_clock_voltage_on_ac; 1694 1689 1695 1690 info->engine_max_clock = limits->sclk; 1696 1691 info->memory_max_clock = limits->mclk; 1697 1692 1698 1693 for (i = table->count - 1; i > 0; i--) { 1699 - 1700 1694 if (limits->vddc >= table->entries[i].v) { 1701 1695 info->level = table->entries[i].clk; 1702 1696 return 0; 1703 1697 } 1704 1698 } 1705 1699 return -EINVAL; 1700 + } 1701 + 1702 + static int cz_force_clock_level(struct pp_hwmgr *hwmgr, 1703 + enum pp_clock_type type, int level) 1704 + { 1705 + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1706 + return -EINVAL; 1707 + 1708 + switch (type) { 1709 + case PP_SCLK: 1710 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1711 + PPSMC_MSG_SetSclkSoftMin, 1712 + (1 << level)); 1713 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1714 + PPSMC_MSG_SetSclkSoftMax, 1715 + (1 << level)); 1716 + break; 1717 + default: 1718 + break; 1719 + } 1720 + 1721 + return 0; 1722 + } 1723 + 1724 + static int cz_print_clock_levels(struct pp_hwmgr *hwmgr, 1725 + enum pp_clock_type type, char *buf) 1726 + { 1727 + struct phm_clock_voltage_dependency_table *sclk_table = 1728 + hwmgr->dyn_state.vddc_dependency_on_sclk; 1729 + int i, now, size = 0; 1730 + 1731 + switch (type) { 1732 + case PP_SCLK: 1733 + now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, 1734 + CGS_IND_REG__SMC, 1735 + ixTARGET_AND_CURRENT_PROFILE_INDEX), 1736 + TARGET_AND_CURRENT_PROFILE_INDEX, 1737 + CURR_SCLK_INDEX); 1738 + 1739 + for (i = 0; i < sclk_table->count; i++) 1740 + size += sprintf(buf + size, "%d: %uMhz %s\n", 1741 + i, sclk_table->entries[i].clk / 100, 1742 + (i == now) ? "*" : ""); 1743 + break; 1744 + default: 1745 + break; 1746 + } 1747 + return size; 1748 + } 1749 + 1750 + static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 1751 + PHM_PerformanceLevelDesignation designation, uint32_t index, 1752 + PHM_PerformanceLevel *level) 1753 + { 1754 + const struct cz_power_state *ps; 1755 + struct cz_hwmgr *data; 1756 + uint32_t level_index; 1757 + uint32_t i; 1758 + 1759 + if (level == NULL || hwmgr == NULL || state == NULL) 1760 + return -EINVAL; 1761 + 1762 + data = (struct cz_hwmgr *)(hwmgr->backend); 1763 + ps = cast_const_PhwCzPowerState(state); 1764 + 1765 + level_index = index > ps->level - 1 ? ps->level - 1 : index; 1766 + 1767 + level->coreClock = ps->levels[level_index].engineClock; 1768 + 1769 + if (designation == PHM_PerformanceLevelDesignation_PowerContainment) { 1770 + for (i = 1; i < ps->level; i++) { 1771 + if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) { 1772 + level->coreClock = ps->levels[i].engineClock; 1773 + break; 1774 + } 1775 + } 1776 + } 1777 + 1778 + if (level_index == 0) 1779 + level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]; 1780 + else 1781 + level->memory_clock = data->sys_info.nbp_memory_clock[0]; 1782 + 1783 + level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4; 1784 + level->nonLocalMemoryFreq = 0; 1785 + level->nonLocalMemoryWidth = 0; 1786 + 1787 + return 0; 1788 + } 1789 + 1790 + static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, 1791 + const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) 1792 + { 1793 + const struct cz_power_state *ps = cast_const_PhwCzPowerState(state); 1794 + 1795 + clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex)); 1796 + clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex)); 1797 + 1798 + return 0; 1799 + } 1800 + 1801 + static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, 1802 + struct amd_pp_clocks *clocks) 1803 + { 1804 + struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend); 1805 + int i; 1806 + struct phm_clock_voltage_dependency_table *table; 1807 + 1808 + clocks->count = cz_get_max_sclk_level(hwmgr); 1809 + switch (type) { 1810 + case amd_pp_disp_clock: 1811 + for (i = 0; i < clocks->count; i++) 1812 + clocks->clock[i] = data->sys_info.display_clock[i]; 1813 + break; 1814 + case amd_pp_sys_clock: 1815 + table = hwmgr->dyn_state.vddc_dependency_on_sclk; 1816 + for (i = 0; i < clocks->count; i++) 1817 + clocks->clock[i] = table->entries[i].clk; 1818 + break; 1819 + case amd_pp_mem_clock: 1820 + clocks->count = CZ_NUM_NBPMEMORYCLOCK; 1821 + for (i = 0; i < clocks->count; i++) 1822 + clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i]; 1823 + break; 1824 + default: 1825 + return -1; 1826 + } 1827 + 1828 + return 0; 1829 + } 1830 + 1831 + static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) 1832 + { 1833 + struct phm_clock_voltage_dependency_table *table = 1834 + hwmgr->dyn_state.vddc_dependency_on_sclk; 1835 + unsigned long level; 1836 + const struct phm_clock_and_voltage_limits *limits = 1837 + &hwmgr->dyn_state.max_clock_voltage_on_ac; 1838 + 1839 + if ((NULL == table) || (table->count <= 0) || (clocks == NULL)) 1840 + return -EINVAL; 1841 + 1842 + level = cz_get_max_sclk_level(hwmgr) - 1; 1843 + 1844 + if (level < table->count) 1845 + clocks->engine_max_clock = table->entries[level].clk; 1846 + else 1847 + clocks->engine_max_clock = table->entries[table->count - 1].clk; 1848 + 1849 + clocks->memory_max_clock = limits->mclk; 1850 + 1851 + return 0; 1706 1852 } 1707 1853 1708 1854 static const struct pp_hwmgr_func cz_hwmgr_funcs = { ··· 1874 1718 .print_current_perforce_level = cz_print_current_perforce_level, 1875 1719 .set_cpu_power_state = cz_set_cpu_power_state, 1876 1720 .store_cc6_data = cz_store_cc6_data, 1877 - .get_dal_power_level= cz_get_dal_power_level, 1721 + .force_clock_level = cz_force_clock_level, 1722 + .print_clock_levels = cz_print_clock_levels, 1723 + .get_dal_power_level = cz_get_dal_power_level, 1724 + .get_performance_level = cz_get_performance_level, 1725 + .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, 1726 + .get_clock_by_type = cz_get_clock_by_type, 1727 + .get_max_high_clocks = cz_get_max_high_clocks, 1878 1728 }; 1879 1729 1880 1730 int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
+123
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
··· 5073 5073 CG_FDO_CTRL2, FDO_PWM_MODE); 5074 5074 } 5075 5075 5076 + static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table) 5077 + { 5078 + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 5079 + 5080 + *table = (char *)&data->smc_state_table; 5081 + 5082 + return sizeof(struct SMU73_Discrete_DpmTable); 5083 + } 5084 + 5085 + static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) 5086 + { 5087 + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 5088 + 5089 + void *table = (void *)&data->smc_state_table; 5090 + 5091 + memcpy(table, buf, size); 5092 + 5093 + return 0; 5094 + } 5095 + 5096 + static int fiji_force_clock_level(struct pp_hwmgr *hwmgr, 5097 + enum pp_clock_type type, int level) 5098 + { 5099 + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 5100 + 5101 + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 5102 + return -EINVAL; 5103 + 5104 + switch (type) { 5105 + case PP_SCLK: 5106 + if (!data->sclk_dpm_key_disabled) 5107 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 5108 + PPSMC_MSG_SCLKDPM_SetEnabledMask, 5109 + (1 << level)); 5110 + break; 5111 + case PP_MCLK: 5112 + if (!data->mclk_dpm_key_disabled) 5113 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 5114 + PPSMC_MSG_MCLKDPM_SetEnabledMask, 5115 + (1 << level)); 5116 + break; 5117 + case PP_PCIE: 5118 + if (!data->pcie_dpm_key_disabled) 5119 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 5120 + PPSMC_MSG_PCIeDPM_ForceLevel, 5121 + (1 << level)); 5122 + break; 5123 + default: 5124 + break; 5125 + } 5126 + 5127 + return 0; 5128 + } 5129 + 5130 + static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr, 5131 + enum pp_clock_type type, char *buf) 5132 + { 5133 + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 5134 + struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 5135 + struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 5136 + struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); 5137 + int i, now, size = 0; 5138 + uint32_t clock, pcie_speed; 5139 + 5140 + switch (type) { 5141 + case PP_SCLK: 5142 + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); 5143 + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); 5144 + 5145 + for (i = 0; i < sclk_table->count; i++) { 5146 + if (clock > sclk_table->dpm_levels[i].value) 5147 + continue; 5148 + break; 5149 + } 5150 + now = i; 5151 + 5152 + for (i = 0; i < sclk_table->count; i++) 5153 + size += sprintf(buf + size, "%d: %uMhz %s\n", 5154 + i, sclk_table->dpm_levels[i].value / 100, 5155 + (i == now) ? "*" : ""); 5156 + break; 5157 + case PP_MCLK: 5158 + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); 5159 + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); 5160 + 5161 + for (i = 0; i < mclk_table->count; i++) { 5162 + if (clock > mclk_table->dpm_levels[i].value) 5163 + continue; 5164 + break; 5165 + } 5166 + now = i; 5167 + 5168 + for (i = 0; i < mclk_table->count; i++) 5169 + size += sprintf(buf + size, "%d: %uMhz %s\n", 5170 + i, mclk_table->dpm_levels[i].value / 100, 5171 + (i == now) ? "*" : ""); 5172 + break; 5173 + case PP_PCIE: 5174 + pcie_speed = fiji_get_current_pcie_speed(hwmgr); 5175 + for (i = 0; i < pcie_table->count; i++) { 5176 + if (pcie_speed != pcie_table->dpm_levels[i].value) 5177 + continue; 5178 + break; 5179 + } 5180 + now = i; 5181 + 5182 + for (i = 0; i < pcie_table->count; i++) 5183 + size += sprintf(buf + size, "%d: %s %s\n", i, 5184 + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" : 5185 + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : 5186 + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", 5187 + (i == now) ? "*" : ""); 5188 + break; 5189 + default: 5190 + break; 5191 + } 5192 + return size; 5193 + } 5194 + 5076 5195 static const struct pp_hwmgr_func fiji_hwmgr_funcs = { 5077 5196 .backend_init = &fiji_hwmgr_backend_init, 5078 5197 .backend_fini = &tonga_hwmgr_backend_fini, ··· 5227 5108 .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt, 5228 5109 .set_fan_control_mode = fiji_set_fan_control_mode, 5229 5110 .get_fan_control_mode = fiji_get_fan_control_mode, 5111 + .get_pp_table = fiji_get_pp_table, 5112 + .set_pp_table = fiji_set_pp_table, 5113 + .force_clock_level = fiji_force_clock_level, 5114 + .print_clock_levels = fiji_print_clock_levels, 5230 5115 }; 5231 5116 5232 5117 int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
+90 -3
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
··· 26 26 #include "power_state.h" 27 27 #include "pp_acpi.h" 28 28 #include "amd_acpi.h" 29 - #include "amd_powerplay.h" 29 + #include "pp_debug.h" 30 30 31 31 #define PHM_FUNC_CHECK(hw) \ 32 32 do { \ ··· 313 313 } 314 314 315 315 int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, 316 - struct amd_pp_dal_clock_info *info) 316 + struct amd_pp_simple_clock_info *info) 317 317 { 318 318 PHM_FUNC_CHECK(hwmgr); 319 319 320 320 if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL) 321 321 return -EINVAL; 322 - 323 322 return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info); 324 323 } 325 324 ··· 330 331 return hwmgr->hwmgr_func->set_cpu_power_state(hwmgr); 331 332 332 333 return 0; 334 + } 335 + 336 + 337 + int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 338 + PHM_PerformanceLevelDesignation designation, uint32_t index, 339 + PHM_PerformanceLevel *level) 340 + { 341 + PHM_FUNC_CHECK(hwmgr); 342 + if (hwmgr->hwmgr_func->get_performance_level == NULL) 343 + return -EINVAL; 344 + 345 + return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level); 346 + 347 + 348 + } 349 + 350 + 351 + /** 352 + * Gets Clock Info. 353 + * 354 + * @param pHwMgr the address of the powerplay hardware manager. 355 + * @param pPowerState the address of the Power State structure. 356 + * @param pClockInfo the address of PP_ClockInfo structure where the result will be returned. 357 + * @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end. 358 + */ 359 + int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info, 360 + PHM_PerformanceLevelDesignation designation) 361 + { 362 + int result; 363 + PHM_PerformanceLevel performance_level; 364 + 365 + PHM_FUNC_CHECK(hwmgr); 366 + 367 + PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL); 368 + PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL); 369 + 370 + result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level); 371 + 372 + PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result); 373 + 374 + 375 + pclock_info->min_mem_clk = performance_level.memory_clock; 376 + pclock_info->min_eng_clk = performance_level.coreClock; 377 + pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth; 378 + 379 + 380 + result = phm_get_performance_level(hwmgr, state, designation, 381 + (hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level); 382 + 383 + PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result); 384 + 385 + pclock_info->max_mem_clk = performance_level.memory_clock; 386 + pclock_info->max_eng_clk = performance_level.coreClock; 387 + pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth; 388 + 389 + return 0; 390 + } 391 + 392 + int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) 393 + { 394 + PHM_FUNC_CHECK(hwmgr); 395 + 396 + if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL) 397 + return -EINVAL; 398 + 399 + return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info); 400 + 401 + } 402 + 403 + int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) 404 + { 405 + PHM_FUNC_CHECK(hwmgr); 406 + 407 + if (hwmgr->hwmgr_func->get_clock_by_type == NULL) 408 + return -EINVAL; 409 + 410 + return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks); 411 + 412 + } 413 + 414 + int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) 415 + { 416 + PHM_FUNC_CHECK(hwmgr); 417 + 418 + if (hwmgr->hwmgr_func->get_max_high_clocks == NULL) 419 + return -EINVAL; 420 + 421 + return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks); 333 422 }
+2 -2
drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
··· 293 293 } 294 294 295 295 if (factor == 1) 296 - return (ConvertToFraction(X)); 296 + return ConvertToFraction(X); 297 297 298 298 fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor)); 299 299 ··· 371 371 fZERO = ConvertToFraction(0); 372 372 373 373 if (Equal(Y, fZERO)) 374 - return fZERO; 374 + return fZERO; 375 375 376 376 longlongX = (int64_t)X.full; 377 377 longlongY = (int64_t)Y.full;
+123
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
··· 6018 6018 CG_FDO_CTRL2, FDO_PWM_MODE); 6019 6019 } 6020 6020 6021 + static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table) 6022 + { 6023 + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); 6024 + 6025 + *table = (char *)&data->smc_state_table; 6026 + 6027 + return sizeof(struct SMU72_Discrete_DpmTable); 6028 + } 6029 + 6030 + static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) 6031 + { 6032 + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); 6033 + 6034 + void *table = (void *)&data->smc_state_table; 6035 + 6036 + memcpy(table, buf, size); 6037 + 6038 + return 0; 6039 + } 6040 + 6041 + static int tonga_force_clock_level(struct pp_hwmgr *hwmgr, 6042 + enum pp_clock_type type, int level) 6043 + { 6044 + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); 6045 + 6046 + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 6047 + return -EINVAL; 6048 + 6049 + switch (type) { 6050 + case PP_SCLK: 6051 + if (!data->sclk_dpm_key_disabled) 6052 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 6053 + PPSMC_MSG_SCLKDPM_SetEnabledMask, 6054 + (1 << level)); 6055 + break; 6056 + case PP_MCLK: 6057 + if (!data->mclk_dpm_key_disabled) 6058 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 6059 + PPSMC_MSG_MCLKDPM_SetEnabledMask, 6060 + (1 << level)); 6061 + break; 6062 + case PP_PCIE: 6063 + if (!data->pcie_dpm_key_disabled) 6064 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 6065 + PPSMC_MSG_PCIeDPM_ForceLevel, 6066 + (1 << level)); 6067 + break; 6068 + default: 6069 + break; 6070 + } 6071 + 6072 + return 0; 6073 + } 6074 + 6075 + static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr, 6076 + enum pp_clock_type type, char *buf) 6077 + { 6078 + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); 6079 + struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 6080 + struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 6081 + struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); 6082 + int i, now, size = 0; 6083 + uint32_t clock, pcie_speed; 6084 + 6085 + switch (type) { 6086 + case PP_SCLK: 6087 + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); 6088 + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); 6089 + 6090 + for (i = 0; i < sclk_table->count; i++) { 6091 + if (clock > sclk_table->dpm_levels[i].value) 6092 + continue; 6093 + break; 6094 + } 6095 + now = i; 6096 + 6097 + for (i = 0; i < sclk_table->count; i++) 6098 + size += sprintf(buf + size, "%d: %uMhz %s\n", 6099 + i, sclk_table->dpm_levels[i].value / 100, 6100 + (i == now) ? "*" : ""); 6101 + break; 6102 + case PP_MCLK: 6103 + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); 6104 + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); 6105 + 6106 + for (i = 0; i < mclk_table->count; i++) { 6107 + if (clock > mclk_table->dpm_levels[i].value) 6108 + continue; 6109 + break; 6110 + } 6111 + now = i; 6112 + 6113 + for (i = 0; i < mclk_table->count; i++) 6114 + size += sprintf(buf + size, "%d: %uMhz %s\n", 6115 + i, mclk_table->dpm_levels[i].value / 100, 6116 + (i == now) ? "*" : ""); 6117 + break; 6118 + case PP_PCIE: 6119 + pcie_speed = tonga_get_current_pcie_speed(hwmgr); 6120 + for (i = 0; i < pcie_table->count; i++) { 6121 + if (pcie_speed != pcie_table->dpm_levels[i].value) 6122 + continue; 6123 + break; 6124 + } 6125 + now = i; 6126 + 6127 + for (i = 0; i < pcie_table->count; i++) 6128 + size += sprintf(buf + size, "%d: %s %s\n", i, 6129 + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : 6130 + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : 6131 + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", 6132 + (i == now) ? "*" : ""); 6133 + break; 6134 + default: 6135 + break; 6136 + } 6137 + return size; 6138 + } 6139 + 6021 6140 static const struct pp_hwmgr_func tonga_hwmgr_funcs = { 6022 6141 .backend_init = &tonga_hwmgr_backend_init, 6023 6142 .backend_fini = &tonga_hwmgr_backend_fini, ··· 6174 6055 .check_states_equal = tonga_check_states_equal, 6175 6056 .set_fan_control_mode = tonga_set_fan_control_mode, 6176 6057 .get_fan_control_mode = tonga_get_fan_control_mode, 6058 + .get_pp_table = tonga_get_pp_table, 6059 + .set_pp_table = tonga_set_pp_table, 6060 + .force_clock_level = tonga_force_clock_level, 6061 + .print_clock_levels = tonga_print_clock_levels, 6177 6062 }; 6178 6063 6179 6064 int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
+75 -3
drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
··· 29 29 #include "amd_shared.h" 30 30 #include "cgs_common.h" 31 31 32 + 32 33 enum amd_pp_event { 33 34 AMD_PP_EVENT_INITIALIZE = 0, 34 35 AMD_PP_EVENT_UNINITIALIZE, ··· 124 123 AMD_DPM_FORCED_LEVEL_AUTO = 0, 125 124 AMD_DPM_FORCED_LEVEL_LOW = 1, 126 125 AMD_DPM_FORCED_LEVEL_HIGH = 2, 126 + AMD_DPM_FORCED_LEVEL_MANUAL = 3, 127 127 }; 128 128 129 129 struct amd_pp_init { ··· 214 212 uint32_t dce_tolerable_mclk_in_active_latency; 215 213 }; 216 214 217 - struct amd_pp_dal_clock_info { 215 + struct amd_pp_simple_clock_info { 218 216 uint32_t engine_max_clock; 219 217 uint32_t memory_max_clock; 220 218 uint32_t level; 221 219 }; 220 + 221 + enum PP_DAL_POWERLEVEL { 222 + PP_DAL_POWERLEVEL_INVALID = 0, 223 + PP_DAL_POWERLEVEL_ULTRALOW, 224 + PP_DAL_POWERLEVEL_LOW, 225 + PP_DAL_POWERLEVEL_NOMINAL, 226 + PP_DAL_POWERLEVEL_PERFORMANCE, 227 + 228 + PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW, 229 + PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW, 230 + PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL, 231 + PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE, 232 + PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1, 233 + PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1, 234 + PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1, 235 + PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1, 236 + }; 237 + 238 + struct amd_pp_clock_info { 239 + uint32_t min_engine_clock; 240 + uint32_t max_engine_clock; 241 + uint32_t min_memory_clock; 242 + uint32_t max_memory_clock; 243 + uint32_t min_bus_bandwidth; 244 + uint32_t max_bus_bandwidth; 245 + uint32_t max_engine_clock_in_sr; 246 + uint32_t min_engine_clock_in_sr; 247 + enum PP_DAL_POWERLEVEL max_clocks_state; 248 + }; 249 + 250 + enum amd_pp_clock_type { 251 + amd_pp_disp_clock = 1, 252 + amd_pp_sys_clock, 253 + amd_pp_mem_clock 254 + }; 255 + 256 + #define MAX_NUM_CLOCKS 16 257 + 258 + struct amd_pp_clocks { 259 + uint32_t count; 260 + uint32_t clock[MAX_NUM_CLOCKS]; 261 + }; 262 + 222 263 223 264 enum { 224 265 PP_GROUP_UNKNOWN = 0, 225 266 PP_GROUP_GFX = 1, 226 267 PP_GROUP_SYS, 227 268 PP_GROUP_MAX 269 + }; 270 + 271 + enum pp_clock_type { 272 + PP_SCLK, 273 + PP_MCLK, 274 + PP_PCIE, 275 + }; 276 + 277 + struct pp_states_info { 278 + uint32_t nums; 279 + uint32_t states[16]; 228 280 }; 229 281 230 282 #define PP_GROUP_MASK 0xF0000000 ··· 334 278 int (*get_fan_control_mode)(void *handle); 335 279 int (*set_fan_speed_percent)(void *handle, uint32_t percent); 336 280 int (*get_fan_speed_percent)(void *handle, uint32_t *speed); 281 + int (*get_pp_num_states)(void *handle, struct pp_states_info *data); 282 + int (*get_pp_table)(void *handle, char **table); 283 + int (*set_pp_table)(void *handle, const char *buf, size_t size); 284 + int (*force_clock_level)(void *handle, enum pp_clock_type type, int level); 285 + int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf); 337 286 }; 338 287 339 288 struct amd_powerplay { ··· 349 288 350 289 int amd_powerplay_init(struct amd_pp_init *pp_init, 351 290 struct amd_powerplay *amd_pp); 291 + 352 292 int amd_powerplay_fini(void *handle); 353 293 354 - int amd_powerplay_display_configuration_change(void *handle, const void *input); 294 + int amd_powerplay_display_configuration_change(void *handle, 295 + const struct amd_pp_display_configuration *input); 355 296 356 297 int amd_powerplay_get_display_power_level(void *handle, 357 - struct amd_pp_dal_clock_info *output); 298 + struct amd_pp_simple_clock_info *output); 358 299 300 + int amd_powerplay_get_current_clocks(void *handle, 301 + struct amd_pp_clock_info *output); 302 + 303 + int amd_powerplay_get_clock_by_type(void *handle, 304 + enum amd_pp_clock_type type, 305 + struct amd_pp_clocks *clocks); 306 + 307 + int amd_powerplay_get_display_mode_validation_clocks(void *handle, 308 + struct amd_pp_simple_clock_info *output); 359 309 360 310 #endif /* _AMD_POWERPLAY_H_ */
+25 -19
drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
··· 31 31 enum amd_dpm_forced_level; 32 32 struct PP_TemperatureRange; 33 33 34 + 34 35 struct phm_fan_speed_info { 35 36 uint32_t min_percent; 36 37 uint32_t max_percent; ··· 291 290 uint32_t engineClockInSR; 292 291 }; 293 292 293 + struct pp_clock_info { 294 + uint32_t min_mem_clk; 295 + uint32_t max_mem_clk; 296 + uint32_t min_eng_clk; 297 + uint32_t max_eng_clk; 298 + uint32_t min_bus_bandwidth; 299 + uint32_t max_bus_bandwidth; 300 + }; 301 + 294 302 struct phm_platform_descriptor { 295 303 uint32_t platformCaps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES]; 296 304 uint32_t vbiosInterruptId; ··· 332 322 uint32_t num_of_entries; 333 323 uint32_t clock[MAX_NUM_CLOCKS]; 334 324 }; 335 - 336 - enum PP_DAL_POWERLEVEL { 337 - PP_DAL_POWERLEVEL_INVALID = 0, 338 - PP_DAL_POWERLEVEL_ULTRALOW, 339 - PP_DAL_POWERLEVEL_LOW, 340 - PP_DAL_POWERLEVEL_NOMINAL, 341 - PP_DAL_POWERLEVEL_PERFORMANCE, 342 - 343 - PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW, 344 - PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW, 345 - PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL, 346 - PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE, 347 - PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1, 348 - PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1, 349 - PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1, 350 - PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1, 351 - }; 352 - 353 325 354 326 extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr); 355 327 extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate); ··· 367 375 const struct amd_pp_display_configuration *display_config); 368 376 369 377 extern int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, 370 - struct amd_pp_dal_clock_info*info); 378 + struct amd_pp_simple_clock_info *info); 371 379 372 380 extern int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr); 373 381 374 382 extern int phm_power_down_asic(struct pp_hwmgr *hwmgr); 383 + 384 + extern int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 385 + PHM_PerformanceLevelDesignation designation, uint32_t index, 386 + PHM_PerformanceLevel *level); 387 + 388 + extern int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 389 + struct pp_clock_info *pclock_info, 390 + PHM_PerformanceLevelDesignation designation); 391 + 392 + extern int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); 393 + 394 + extern int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); 395 + 396 + extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); 375 397 376 398 #endif /* _HARDWARE_MANAGER_H_ */ 377 399
+11 -1
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
··· 325 325 bool cc6_disable, bool pstate_disable, 326 326 bool pstate_switch_disable); 327 327 int (*get_dal_power_level)(struct pp_hwmgr *hwmgr, 328 - struct amd_pp_dal_clock_info *info); 328 + struct amd_pp_simple_clock_info *info); 329 + int (*get_performance_level)(struct pp_hwmgr *, const struct pp_hw_power_state *, 330 + PHM_PerformanceLevelDesignation, uint32_t, PHM_PerformanceLevel *); 331 + int (*get_current_shallow_sleep_clocks)(struct pp_hwmgr *hwmgr, 332 + const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); 333 + int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); 334 + int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); 329 335 int (*power_off_asic)(struct pp_hwmgr *hwmgr); 336 + int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table); 337 + int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size); 338 + int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, int level); 339 + int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); 330 340 }; 331 341 332 342 struct pp_table_func {
+9 -1
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
··· 229 229 amd_sched_wakeup(entity->sched); 230 230 } 231 231 232 + static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) 233 + { 234 + struct amd_sched_entity *entity = 235 + container_of(cb, struct amd_sched_entity, cb); 236 + entity->dependency = NULL; 237 + fence_put(f); 238 + } 239 + 232 240 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) 233 241 { 234 242 struct amd_gpu_scheduler *sched = entity->sched; ··· 259 251 } 260 252 261 253 /* Wait for fence to be scheduled */ 262 - entity->cb.func = amd_sched_entity_wakeup; 254 + entity->cb.func = amd_sched_entity_clear_dep; 263 255 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); 264 256 return true; 265 257 }
+9 -2
drivers/gpu/drm/radeon/cik.c
··· 4219 4219 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 4220 4220 return r; 4221 4221 } 4222 - r = radeon_fence_wait(ib.fence, false); 4223 - if (r) { 4222 + r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( 4223 + RADEON_USEC_IB_TEST_TIMEOUT)); 4224 + if (r < 0) { 4224 4225 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 4225 4226 radeon_scratch_free(rdev, scratch); 4226 4227 radeon_ib_free(rdev, &ib); 4227 4228 return r; 4229 + } else if (r == 0) { 4230 + DRM_ERROR("radeon: fence wait timed out.\n"); 4231 + radeon_scratch_free(rdev, scratch); 4232 + radeon_ib_free(rdev, &ib); 4233 + return -ETIMEDOUT; 4228 4234 } 4235 + r = 0; 4229 4236 for (i = 0; i < rdev->usec_timeout; i++) { 4230 4237 tmp = RREG32(scratch); 4231 4238 if (tmp == 0xDEADBEEF)
+7 -2
drivers/gpu/drm/radeon/cik_sdma.c
··· 737 737 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 738 738 return r; 739 739 } 740 - r = radeon_fence_wait(ib.fence, false); 741 - if (r) { 740 + r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( 741 + RADEON_USEC_IB_TEST_TIMEOUT)); 742 + if (r < 0) { 742 743 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 743 744 return r; 745 + } else if (r == 0) { 746 + DRM_ERROR("radeon: fence wait timed out.\n"); 747 + return -ETIMEDOUT; 744 748 } 749 + r = 0; 745 750 for (i = 0; i < rdev->usec_timeout; i++) { 746 751 tmp = le32_to_cpu(rdev->wb.wb[index/4]); 747 752 if (tmp == 0xDEADBEEF)
+8 -2
drivers/gpu/drm/radeon/r100.c
··· 3732 3732 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3733 3733 goto free_ib; 3734 3734 } 3735 - r = radeon_fence_wait(ib.fence, false); 3736 - if (r) { 3735 + r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( 3736 + RADEON_USEC_IB_TEST_TIMEOUT)); 3737 + if (r < 0) { 3737 3738 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3738 3739 goto free_ib; 3740 + } else if (r == 0) { 3741 + DRM_ERROR("radeon: fence wait timed out.\n"); 3742 + r = -ETIMEDOUT; 3743 + goto free_ib; 3739 3744 } 3745 + r = 0; 3740 3746 for (i = 0; i < rdev->usec_timeout; i++) { 3741 3747 tmp = RREG32(scratch); 3742 3748 if (tmp == 0xDEADBEEF) {
+8 -2
drivers/gpu/drm/radeon/r600.c
··· 3381 3381 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3382 3382 goto free_ib; 3383 3383 } 3384 - r = radeon_fence_wait(ib.fence, false); 3385 - if (r) { 3384 + r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( 3385 + RADEON_USEC_IB_TEST_TIMEOUT)); 3386 + if (r < 0) { 3386 3387 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3387 3388 goto free_ib; 3389 + } else if (r == 0) { 3390 + DRM_ERROR("radeon: fence wait timed out.\n"); 3391 + r = -ETIMEDOUT; 3392 + goto free_ib; 3388 3393 } 3394 + r = 0; 3389 3395 for (i = 0; i < rdev->usec_timeout; i++) { 3390 3396 tmp = RREG32(scratch); 3391 3397 if (tmp == 0xDEADBEEF)
+7 -2
drivers/gpu/drm/radeon/r600_dma.c
··· 368 368 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 369 369 return r; 370 370 } 371 - r = radeon_fence_wait(ib.fence, false); 372 - if (r) { 371 + r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( 372 + RADEON_USEC_IB_TEST_TIMEOUT)); 373 + if (r < 0) { 373 374 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 374 375 return r; 376 + } else if (r == 0) { 377 + DRM_ERROR("radeon: fence wait timed out.\n"); 378 + return -ETIMEDOUT; 375 379 } 380 + r = 0; 376 381 for (i = 0; i < rdev->usec_timeout; i++) { 377 382 tmp = le32_to_cpu(rdev->wb.wb[index/4]); 378 383 if (tmp == 0xDEADBEEF)
+2
drivers/gpu/drm/radeon/radeon.h
··· 120 120 */ 121 121 #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 122 122 #define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) 123 + #define RADEON_USEC_IB_TEST_TIMEOUT 1000000 /* 1s */ 123 124 /* RADEON_IB_POOL_SIZE must be a power of 2 */ 124 125 #define RADEON_IB_POOL_SIZE 16 125 126 #define RADEON_DEBUGFS_MAX_COMPONENTS 32 ··· 383 382 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); 384 383 void radeon_fence_process(struct radeon_device *rdev, int ring); 385 384 bool radeon_fence_signaled(struct radeon_fence *fence); 385 + long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, long timeout); 386 386 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 387 387 int radeon_fence_wait_next(struct radeon_device *rdev, int ring); 388 388 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
+3 -2
drivers/gpu/drm/radeon/radeon_display.c
··· 1686 1686 radeon_fbdev_fini(rdev); 1687 1687 kfree(rdev->mode_info.bios_hardcoded_edid); 1688 1688 1689 + /* free i2c buses */ 1690 + radeon_i2c_fini(rdev); 1691 + 1689 1692 if (rdev->mode_info.mode_config_initialized) { 1690 1693 radeon_afmt_fini(rdev); 1691 1694 drm_kms_helper_poll_fini(rdev->ddev); ··· 1696 1693 drm_mode_config_cleanup(rdev->ddev); 1697 1694 rdev->mode_info.mode_config_initialized = false; 1698 1695 } 1699 - /* free i2c buses */ 1700 - radeon_i2c_fini(rdev); 1701 1696 } 1702 1697 1703 1698 static bool is_hdtv_mode(const struct drm_display_mode *mode)
+44 -20
drivers/gpu/drm/radeon/radeon_fence.c
··· 527 527 } 528 528 529 529 /** 530 + * radeon_fence_wait_timeout - wait for a fence to signal with timeout 531 + * 532 + * @fence: radeon fence object 533 + * @intr: use interruptible sleep 534 + * 535 + * Wait for the requested fence to signal (all asics). 536 + * @intr selects whether to use interruptable (true) or non-interruptable 537 + * (false) sleep when waiting for the fence. 538 + * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait 539 + * Returns remaining time if the sequence number has passed, 0 when 540 + * the wait timeout, or an error for all other cases. 541 + */ 542 + long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout) 543 + { 544 + uint64_t seq[RADEON_NUM_RINGS] = {}; 545 + long r; 546 + int r_sig; 547 + 548 + /* 549 + * This function should not be called on !radeon fences. 550 + * If this is the case, it would mean this function can 551 + * also be called on radeon fences belonging to another card. 552 + * exclusive_lock is not held in that case. 553 + */ 554 + if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) 555 + return fence_wait(&fence->base, intr); 556 + 557 + seq[fence->ring] = fence->seq; 558 + r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); 559 + if (r <= 0) { 560 + return r; 561 + } 562 + 563 + r_sig = fence_signal(&fence->base); 564 + if (!r_sig) 565 + FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); 566 + return r; 567 + } 568 + 569 + /** 530 570 * radeon_fence_wait - wait for a fence to signal 531 571 * 532 572 * @fence: radeon fence object ··· 579 539 */ 580 540 int radeon_fence_wait(struct radeon_fence *fence, bool intr) 581 541 { 582 - uint64_t seq[RADEON_NUM_RINGS] = {}; 583 - long r; 584 - 585 - /* 586 - * This function should not be called on !radeon fences. 587 - * If this is the case, it would mean this function can 588 - * also be called on radeon fences belonging to another card. 589 - * exclusive_lock is not held in that case. 590 - */ 591 - if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) 592 - return fence_wait(&fence->base, intr); 593 - 594 - seq[fence->ring] = fence->seq; 595 - r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); 596 - if (r < 0) { 542 + long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); 543 + if (r > 0) { 544 + return 0; 545 + } else { 597 546 return r; 598 547 } 599 - 600 - r = fence_signal(&fence->base); 601 - if (!r) 602 - FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); 603 - return 0; 604 548 } 605 549 606 550 /**
+8 -3
drivers/gpu/drm/radeon/radeon_vce.c
··· 810 810 goto error; 811 811 } 812 812 813 - r = radeon_fence_wait(fence, false); 814 - if (r) { 813 + r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies( 814 + RADEON_USEC_IB_TEST_TIMEOUT)); 815 + if (r < 0) { 815 816 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 817 + } else if (r == 0) { 818 + DRM_ERROR("radeon: fence wait timed out.\n"); 819 + r = -ETIMEDOUT; 816 820 } else { 817 - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 821 + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 822 + r = 0; 818 823 } 819 824 error: 820 825 radeon_fence_unref(&fence);
+8 -2
drivers/gpu/drm/radeon/uvd_v1_0.c
··· 522 522 goto error; 523 523 } 524 524 525 - r = radeon_fence_wait(fence, false); 526 - if (r) { 525 + r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies( 526 + RADEON_USEC_IB_TEST_TIMEOUT)); 527 + if (r < 0) { 527 528 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 528 529 goto error; 530 + } else if (r == 0) { 531 + DRM_ERROR("radeon: fence wait timed out.\n"); 532 + r = -ETIMEDOUT; 533 + goto error; 529 534 } 535 + r = 0; 530 536 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 531 537 error: 532 538 radeon_fence_unref(&fence);