Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.14-rc7 396 lines 13 kB view raw
1/* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */ 24 25#ifndef AMDGPU_AMDKFD_H_INCLUDED 26#define AMDGPU_AMDKFD_H_INCLUDED 27 28#include <linux/types.h> 29#include <linux/mm.h> 30#include <linux/kthread.h> 31#include <linux/workqueue.h> 32#include <kgd_kfd_interface.h> 33#include <drm/ttm/ttm_execbuf_util.h> 34#include "amdgpu_sync.h" 35#include "amdgpu_vm.h" 36 37extern uint64_t amdgpu_amdkfd_total_mem_size; 38 39enum TLB_FLUSH_TYPE { 40 TLB_FLUSH_LEGACY = 0, 41 TLB_FLUSH_LIGHTWEIGHT, 42 TLB_FLUSH_HEAVYWEIGHT 43}; 44 45struct amdgpu_device; 46 47enum kfd_mem_attachment_type { 48 KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */ 49 KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */ 50 KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */ 51}; 52 53struct kfd_mem_attachment { 54 struct list_head list; 55 enum kfd_mem_attachment_type type; 56 bool is_mapped; 57 struct amdgpu_bo_va *bo_va; 58 struct amdgpu_device *adev; 59 uint64_t va; 60 uint64_t pte_flags; 61}; 62 63struct kgd_mem { 64 struct mutex lock; 65 struct amdgpu_bo *bo; 66 struct dma_buf *dmabuf; 67 struct list_head attachments; 68 /* protected by amdkfd_process_info.lock */ 69 struct ttm_validate_buffer validate_list; 70 struct ttm_validate_buffer resv_list; 71 uint32_t domain; 72 unsigned int mapped_to_gpu_memory; 73 uint64_t va; 74 75 uint32_t alloc_flags; 76 77 atomic_t invalid; 78 struct amdkfd_process_info *process_info; 79 80 struct amdgpu_sync sync; 81 82 bool aql_queue; 83 bool is_imported; 84}; 85 86/* KFD Memory Eviction */ 87struct amdgpu_amdkfd_fence { 88 struct dma_fence base; 89 struct mm_struct *mm; 90 spinlock_t lock; 91 char timeline_name[TASK_COMM_LEN]; 92 struct svm_range_bo *svm_bo; 93}; 94 95struct amdgpu_kfd_dev { 96 struct kfd_dev *dev; 97 uint64_t vram_used; 98 bool init_complete; 99}; 100 101enum kgd_engine_type { 102 KGD_ENGINE_PFP = 1, 103 KGD_ENGINE_ME, 104 KGD_ENGINE_CE, 105 KGD_ENGINE_MEC1, 106 KGD_ENGINE_MEC2, 107 KGD_ENGINE_RLC, 108 KGD_ENGINE_SDMA1, 109 KGD_ENGINE_SDMA2, 110 KGD_ENGINE_MAX 111}; 112 113 114struct amdkfd_process_info { 115 /* List head of all VMs that belong to a KFD process */ 116 struct list_head vm_list_head; 117 /* List head for all KFD BOs that belong to a KFD process. */ 118 struct list_head kfd_bo_list; 119 /* List of userptr BOs that are valid or invalid */ 120 struct list_head userptr_valid_list; 121 struct list_head userptr_inval_list; 122 /* Lock to protect kfd_bo_list */ 123 struct mutex lock; 124 125 /* Number of VMs */ 126 unsigned int n_vms; 127 /* Eviction Fence */ 128 struct amdgpu_amdkfd_fence *eviction_fence; 129 130 /* MMU-notifier related fields */ 131 atomic_t evicted_bos; 132 struct delayed_work restore_userptr_work; 133 struct pid *pid; 134}; 135 136int amdgpu_amdkfd_init(void); 137void amdgpu_amdkfd_fini(void); 138 139void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); 140int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); 141void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, 142 const void *ih_ring_entry); 143void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); 144void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); 145void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); 146int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, 147 uint32_t vmid, uint64_t gpu_addr, 148 uint32_t *ib_cmd, uint32_t ib_len); 149void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); 150bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); 151int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid); 152int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, 153 enum TLB_FLUSH_TYPE flush_type); 154 155bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); 156 157int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); 158 159int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); 160 161void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); 162 163int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, 164 int queue_bit); 165 166struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, 167 struct mm_struct *mm, 168 struct svm_range_bo *svm_bo); 169#if IS_ENABLED(CONFIG_HSA_AMD) 170bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); 171struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); 172int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); 173int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); 174#else 175static inline 176bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) 177{ 178 return false; 179} 180 181static inline 182struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) 183{ 184 return NULL; 185} 186 187static inline 188int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 189{ 190 return 0; 191} 192 193static inline 194int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) 195{ 196 return 0; 197} 198#endif 199/* Shared API */ 200int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 201 void **mem_obj, uint64_t *gpu_addr, 202 void **cpu_ptr, bool mqd_gfx9); 203void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); 204int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj); 205void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj); 206int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem); 207int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); 208uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, 209 enum kgd_engine_type type); 210void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, 211 struct kfd_local_mem_info *mem_info); 212uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd); 213 214uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd); 215void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); 216int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, 217 struct kgd_dev **dmabuf_kgd, 218 uint64_t *bo_size, void *metadata_buffer, 219 size_t buffer_size, uint32_t *metadata_size, 220 uint32_t *flags); 221uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); 222uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); 223uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd); 224uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); 225uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); 226uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd); 227int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd); 228uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); 229 230/* Read user wptr from a specified user address space with page fault 231 * disabled. The memory must be pinned and mapped to the hardware when 232 * this is called in hqd_load functions, so it should never fault in 233 * the first place. This resolves a circular lock dependency involving 234 * four locks, including the DQM lock and mmap_lock. 235 */ 236#define read_user_wptr(mmptr, wptr, dst) \ 237 ({ \ 238 bool valid = false; \ 239 if ((mmptr) && (wptr)) { \ 240 pagefault_disable(); \ 241 if ((mmptr) == current->mm) { \ 242 valid = !get_user((dst), (wptr)); \ 243 } else if (current->flags & PF_KTHREAD) { \ 244 kthread_use_mm(mmptr); \ 245 valid = !get_user((dst), (wptr)); \ 246 kthread_unuse_mm(mmptr); \ 247 } \ 248 pagefault_enable(); \ 249 } \ 250 valid; \ 251 }) 252 253/* GPUVM API */ 254#define drm_priv_to_vm(drm_priv) \ 255 (&((struct amdgpu_fpriv *) \ 256 ((struct drm_file *)(drm_priv))->driver_priv)->vm) 257 258int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 259 struct file *filp, u32 pasid, 260 void **process_info, 261 struct dma_fence **ef); 262void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv); 263uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); 264int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 265 struct kgd_dev *kgd, uint64_t va, uint64_t size, 266 void *drm_priv, struct kgd_mem **mem, 267 uint64_t *offset, uint32_t flags); 268int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 269 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, 270 uint64_t *size); 271int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 272 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); 273int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 274 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); 275int amdgpu_amdkfd_gpuvm_sync_memory( 276 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr); 277int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, 278 struct kgd_mem *mem, void **kptr, uint64_t *size); 279int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, 280 struct dma_fence **ef); 281int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, 282 struct kfd_vm_fault_info *info); 283int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, 284 struct dma_buf *dmabuf, 285 uint64_t va, void *drm_priv, 286 struct kgd_mem **mem, uint64_t *size, 287 uint64_t *mmap_offset); 288int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, 289 struct tile_config *config); 290#if IS_ENABLED(CONFIG_HSA_AMD) 291void amdgpu_amdkfd_gpuvm_init_mem_limits(void); 292void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 293 struct amdgpu_vm *vm); 294void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); 295void amdgpu_amdkfd_reserve_system_mem(uint64_t size); 296#else 297static inline 298void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 299{ 300} 301 302static inline 303void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 304 struct amdgpu_vm *vm) 305{ 306} 307 308static inline 309void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) 310{ 311} 312#endif 313/* KGD2KFD callbacks */ 314int kgd2kfd_quiesce_mm(struct mm_struct *mm); 315int kgd2kfd_resume_mm(struct mm_struct *mm); 316int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 317 struct dma_fence *fence); 318#if IS_ENABLED(CONFIG_HSA_AMD) 319int kgd2kfd_init(void); 320void kgd2kfd_exit(void); 321struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, 322 unsigned int asic_type, bool vf); 323bool kgd2kfd_device_init(struct kfd_dev *kfd, 324 struct drm_device *ddev, 325 const struct kgd2kfd_shared_resources *gpu_resources); 326void kgd2kfd_device_exit(struct kfd_dev *kfd); 327void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); 328int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); 329int kgd2kfd_pre_reset(struct kfd_dev *kfd); 330int kgd2kfd_post_reset(struct kfd_dev *kfd); 331void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); 332void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); 333void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask); 334#else 335static inline int kgd2kfd_init(void) 336{ 337 return -ENOENT; 338} 339 340static inline void kgd2kfd_exit(void) 341{ 342} 343 344static inline 345struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, 346 unsigned int asic_type, bool vf) 347{ 348 return NULL; 349} 350 351static inline 352bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, 353 const struct kgd2kfd_shared_resources *gpu_resources) 354{ 355 return false; 356} 357 358static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) 359{ 360} 361 362static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 363{ 364} 365 366static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) 367{ 368 return 0; 369} 370 371static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd) 372{ 373 return 0; 374} 375 376static inline int kgd2kfd_post_reset(struct kfd_dev *kfd) 377{ 378 return 0; 379} 380 381static inline 382void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 383{ 384} 385 386static inline 387void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) 388{ 389} 390 391static inline 392void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) 393{ 394} 395#endif 396#endif /* AMDGPU_AMDKFD_H_INCLUDED */