Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: separate VMID and PASID handling

Move both into the new files amdgpu_ids.[ch]. No functional change.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
620f774f df2869ab

+579 -465
+2 -1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 52 52 amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ 53 53 amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ 54 54 amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ 55 - amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o 55 + amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o \ 56 + amdgpu_ids.o 56 57 57 58 # add asic specific block 58 59 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
··· 169 169 .get_vmem_size = get_vmem_size, 170 170 .get_gpu_clock_counter = get_gpu_clock_counter, 171 171 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, 172 - .alloc_pasid = amdgpu_vm_alloc_pasid, 173 - .free_pasid = amdgpu_vm_free_pasid, 172 + .alloc_pasid = amdgpu_pasid_alloc, 173 + .free_pasid = amdgpu_pasid_free, 174 174 .program_sh_mem_settings = kgd_program_sh_mem_settings, 175 175 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, 176 176 .init_pipeline = kgd_init_pipeline,
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
··· 128 128 .get_vmem_size = get_vmem_size, 129 129 .get_gpu_clock_counter = get_gpu_clock_counter, 130 130 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, 131 - .alloc_pasid = amdgpu_vm_alloc_pasid, 132 - .free_pasid = amdgpu_vm_free_pasid, 131 + .alloc_pasid = amdgpu_pasid_alloc, 132 + .free_pasid = amdgpu_pasid_free, 133 133 .program_sh_mem_settings = kgd_program_sh_mem_settings, 134 134 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, 135 135 .init_pipeline = kgd_init_pipeline,
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 230 230 if (r) { 231 231 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 232 232 if (job && job->vm_id) 233 - amdgpu_vm_reset_id(adev, ring->funcs->vmhub, 234 - job->vm_id); 233 + amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vm_id); 235 234 amdgpu_ring_undo(ring); 236 235 return r; 237 236 }
+459
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 1 + /* 2 + * Copyright 2017 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #include "amdgpu_ids.h" 24 + 25 + #include <linux/idr.h> 26 + #include <linux/dma-fence-array.h> 27 + #include <drm/drmP.h> 28 + 29 + #include "amdgpu.h" 30 + #include "amdgpu_trace.h" 31 + 32 + /* 33 + * PASID manager 34 + * 35 + * PASIDs are global address space identifiers that can be shared 36 + * between the GPU, an IOMMU and the driver. VMs on different devices 37 + * may use the same PASID if they share the same address 38 + * space. Therefore PASIDs are allocated using a global IDA. VMs are 39 + * looked up from the PASID per amdgpu_device. 40 + */ 41 + static DEFINE_IDA(amdgpu_pasid_ida); 42 + 43 + /** 44 + * amdgpu_pasid_alloc - Allocate a PASID 45 + * @bits: Maximum width of the PASID in bits, must be at least 1 46 + * 47 + * Allocates a PASID of the given width while keeping smaller PASIDs 48 + * available if possible. 49 + * 50 + * Returns a positive integer on success. Returns %-EINVAL if bits==0. 51 + * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on 52 + * memory allocation failure. 53 + */ 54 + int amdgpu_pasid_alloc(unsigned int bits) 55 + { 56 + int pasid = -EINVAL; 57 + 58 + for (bits = min(bits, 31U); bits > 0; bits--) { 59 + pasid = ida_simple_get(&amdgpu_pasid_ida, 60 + 1U << (bits - 1), 1U << bits, 61 + GFP_KERNEL); 62 + if (pasid != -ENOSPC) 63 + break; 64 + } 65 + 66 + return pasid; 67 + } 68 + 69 + /** 70 + * amdgpu_pasid_free - Free a PASID 71 + * @pasid: PASID to free 72 + */ 73 + void amdgpu_pasid_free(unsigned int pasid) 74 + { 75 + ida_simple_remove(&amdgpu_pasid_ida, pasid); 76 + } 77 + 78 + /* 79 + * VMID manager 80 + * 81 + * VMIDs are a per VMHUB identifier for page tables handling. 82 + */ 83 + 84 + /** 85 + * amdgpu_vmid_had_gpu_reset - check if reset occured since last use 86 + * 87 + * @adev: amdgpu_device pointer 88 + * @id: VMID structure 89 + * 90 + * Check if GPU reset occured since last use of the VMID. 91 + */ 92 + bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, 93 + struct amdgpu_vmid *id) 94 + { 95 + return id->current_gpu_reset_count != 96 + atomic_read(&adev->gpu_reset_counter); 97 + } 98 + 99 + /* idr_mgr->lock must be held */ 100 + static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, 101 + struct amdgpu_ring *ring, 102 + struct amdgpu_sync *sync, 103 + struct dma_fence *fence, 104 + struct amdgpu_job *job) 105 + { 106 + struct amdgpu_device *adev = ring->adev; 107 + unsigned vmhub = ring->funcs->vmhub; 108 + uint64_t fence_context = adev->fence_context + ring->idx; 109 + struct amdgpu_vmid *id = vm->reserved_vmid[vmhub]; 110 + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 111 + struct dma_fence *updates = sync->last_vm_update; 112 + int r = 0; 113 + struct dma_fence *flushed, *tmp; 114 + bool needs_flush = vm->use_cpu_for_update; 115 + 116 + flushed = id->flushed_updates; 117 + if ((amdgpu_vmid_had_gpu_reset(adev, id)) || 118 + (atomic64_read(&id->owner) != vm->client_id) || 119 + (job->vm_pd_addr != id->pd_gpu_addr) || 120 + (updates && (!flushed || updates->context != flushed->context || 121 + dma_fence_is_later(updates, flushed))) || 122 + (!id->last_flush || (id->last_flush->context != fence_context && 123 + !dma_fence_is_signaled(id->last_flush)))) { 124 + needs_flush = true; 125 + /* to prevent one context starved by another context */ 126 + id->pd_gpu_addr = 0; 127 + tmp = amdgpu_sync_peek_fence(&id->active, ring); 128 + if (tmp) { 129 + r = amdgpu_sync_fence(adev, sync, tmp, false); 130 + return r; 131 + } 132 + } 133 + 134 + /* Good we can use this VMID. Remember this submission as 135 + * user of the VMID. 136 + */ 137 + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 138 + if (r) 139 + goto out; 140 + 141 + if (updates && (!flushed || updates->context != flushed->context || 142 + dma_fence_is_later(updates, flushed))) { 143 + dma_fence_put(id->flushed_updates); 144 + id->flushed_updates = dma_fence_get(updates); 145 + } 146 + id->pd_gpu_addr = job->vm_pd_addr; 147 + atomic64_set(&id->owner, vm->client_id); 148 + job->vm_needs_flush = needs_flush; 149 + if (needs_flush) { 150 + dma_fence_put(id->last_flush); 151 + id->last_flush = NULL; 152 + } 153 + job->vm_id = id - id_mgr->ids; 154 + trace_amdgpu_vm_grab_id(vm, ring, job); 155 + out: 156 + return r; 157 + } 158 + 159 + /** 160 + * amdgpu_vm_grab_id - allocate the next free VMID 161 + * 162 + * @vm: vm to allocate id for 163 + * @ring: ring we want to submit job to 164 + * @sync: sync object where we add dependencies 165 + * @fence: fence protecting ID from reuse 166 + * 167 + * Allocate an id for the vm, adding fences to the sync obj as necessary. 168 + */ 169 + int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 170 + struct amdgpu_sync *sync, struct dma_fence *fence, 171 + struct amdgpu_job *job) 172 + { 173 + struct amdgpu_device *adev = ring->adev; 174 + unsigned vmhub = ring->funcs->vmhub; 175 + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 176 + uint64_t fence_context = adev->fence_context + ring->idx; 177 + struct dma_fence *updates = sync->last_vm_update; 178 + struct amdgpu_vmid *id, *idle; 179 + struct dma_fence **fences; 180 + unsigned i; 181 + int r = 0; 182 + 183 + mutex_lock(&id_mgr->lock); 184 + if (vm->reserved_vmid[vmhub]) { 185 + r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job); 186 + mutex_unlock(&id_mgr->lock); 187 + return r; 188 + } 189 + fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); 190 + if (!fences) { 191 + mutex_unlock(&id_mgr->lock); 192 + return -ENOMEM; 193 + } 194 + /* Check if we have an idle VMID */ 195 + i = 0; 196 + list_for_each_entry(idle, &id_mgr->ids_lru, list) { 197 + fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); 198 + if (!fences[i]) 199 + break; 200 + ++i; 201 + } 202 + 203 + /* If we can't find a idle VMID to use, wait till one becomes available */ 204 + if (&idle->list == &id_mgr->ids_lru) { 205 + u64 fence_context = adev->vm_manager.fence_context + ring->idx; 206 + unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; 207 + struct dma_fence_array *array; 208 + unsigned j; 209 + 210 + for (j = 0; j < i; ++j) 211 + dma_fence_get(fences[j]); 212 + 213 + array = dma_fence_array_create(i, fences, fence_context, 214 + seqno, true); 215 + if (!array) { 216 + for (j = 0; j < i; ++j) 217 + dma_fence_put(fences[j]); 218 + kfree(fences); 219 + r = -ENOMEM; 220 + goto error; 221 + } 222 + 223 + 224 + r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); 225 + dma_fence_put(&array->base); 226 + if (r) 227 + goto error; 228 + 229 + mutex_unlock(&id_mgr->lock); 230 + return 0; 231 + 232 + } 233 + kfree(fences); 234 + 235 + job->vm_needs_flush = vm->use_cpu_for_update; 236 + /* Check if we can use a VMID already assigned to this VM */ 237 + list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { 238 + struct dma_fence *flushed; 239 + bool needs_flush = vm->use_cpu_for_update; 240 + 241 + /* Check all the prerequisites to using this VMID */ 242 + if (amdgpu_vmid_had_gpu_reset(adev, id)) 243 + continue; 244 + 245 + if (atomic64_read(&id->owner) != vm->client_id) 246 + continue; 247 + 248 + if (job->vm_pd_addr != id->pd_gpu_addr) 249 + continue; 250 + 251 + if (!id->last_flush || 252 + (id->last_flush->context != fence_context && 253 + !dma_fence_is_signaled(id->last_flush))) 254 + needs_flush = true; 255 + 256 + flushed = id->flushed_updates; 257 + if (updates && (!flushed || dma_fence_is_later(updates, flushed))) 258 + needs_flush = true; 259 + 260 + /* Concurrent flushes are only possible starting with Vega10 */ 261 + if (adev->asic_type < CHIP_VEGA10 && needs_flush) 262 + continue; 263 + 264 + /* Good we can use this VMID. Remember this submission as 265 + * user of the VMID. 266 + */ 267 + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 268 + if (r) 269 + goto error; 270 + 271 + if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { 272 + dma_fence_put(id->flushed_updates); 273 + id->flushed_updates = dma_fence_get(updates); 274 + } 275 + 276 + if (needs_flush) 277 + goto needs_flush; 278 + else 279 + goto no_flush_needed; 280 + 281 + }; 282 + 283 + /* Still no ID to use? Then use the idle one found earlier */ 284 + id = idle; 285 + 286 + /* Remember this submission as user of the VMID */ 287 + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 288 + if (r) 289 + goto error; 290 + 291 + id->pd_gpu_addr = job->vm_pd_addr; 292 + dma_fence_put(id->flushed_updates); 293 + id->flushed_updates = dma_fence_get(updates); 294 + atomic64_set(&id->owner, vm->client_id); 295 + 296 + needs_flush: 297 + job->vm_needs_flush = true; 298 + dma_fence_put(id->last_flush); 299 + id->last_flush = NULL; 300 + 301 + no_flush_needed: 302 + list_move_tail(&id->list, &id_mgr->ids_lru); 303 + 304 + job->vm_id = id - id_mgr->ids; 305 + trace_amdgpu_vm_grab_id(vm, ring, job); 306 + 307 + error: 308 + mutex_unlock(&id_mgr->lock); 309 + return r; 310 + } 311 + 312 + int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, 313 + struct amdgpu_vm *vm, 314 + unsigned vmhub) 315 + { 316 + struct amdgpu_vmid_mgr *id_mgr; 317 + struct amdgpu_vmid *idle; 318 + int r = 0; 319 + 320 + id_mgr = &adev->vm_manager.id_mgr[vmhub]; 321 + mutex_lock(&id_mgr->lock); 322 + if (vm->reserved_vmid[vmhub]) 323 + goto unlock; 324 + if (atomic_inc_return(&id_mgr->reserved_vmid_num) > 325 + AMDGPU_VM_MAX_RESERVED_VMID) { 326 + DRM_ERROR("Over limitation of reserved vmid\n"); 327 + atomic_dec(&id_mgr->reserved_vmid_num); 328 + r = -EINVAL; 329 + goto unlock; 330 + } 331 + /* Select the first entry VMID */ 332 + idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); 333 + list_del_init(&idle->list); 334 + vm->reserved_vmid[vmhub] = idle; 335 + mutex_unlock(&id_mgr->lock); 336 + 337 + return 0; 338 + unlock: 339 + mutex_unlock(&id_mgr->lock); 340 + return r; 341 + } 342 + 343 + void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, 344 + struct amdgpu_vm *vm, 345 + unsigned vmhub) 346 + { 347 + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 348 + 349 + mutex_lock(&id_mgr->lock); 350 + if (vm->reserved_vmid[vmhub]) { 351 + list_add(&vm->reserved_vmid[vmhub]->list, 352 + &id_mgr->ids_lru); 353 + vm->reserved_vmid[vmhub] = NULL; 354 + atomic_dec(&id_mgr->reserved_vmid_num); 355 + } 356 + mutex_unlock(&id_mgr->lock); 357 + } 358 + 359 + /** 360 + * amdgpu_vmid_reset - reset VMID to zero 361 + * 362 + * @adev: amdgpu device structure 363 + * @vm_id: vmid number to use 364 + * 365 + * Reset saved GDW, GWS and OA to force switch on next flush. 366 + */ 367 + void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, 368 + unsigned vmid) 369 + { 370 + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 371 + struct amdgpu_vmid *id = &id_mgr->ids[vmid]; 372 + 373 + atomic64_set(&id->owner, 0); 374 + id->gds_base = 0; 375 + id->gds_size = 0; 376 + id->gws_base = 0; 377 + id->gws_size = 0; 378 + id->oa_base = 0; 379 + id->oa_size = 0; 380 + } 381 + 382 + /** 383 + * amdgpu_vmid_reset_all - reset VMID to zero 384 + * 385 + * @adev: amdgpu device structure 386 + * 387 + * Reset VMID to force flush on next use 388 + */ 389 + void amdgpu_vmid_reset_all(struct amdgpu_device *adev) 390 + { 391 + unsigned i, j; 392 + 393 + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 394 + struct amdgpu_vmid_mgr *id_mgr = 395 + &adev->vm_manager.id_mgr[i]; 396 + 397 + for (j = 1; j < id_mgr->num_ids; ++j) 398 + amdgpu_vmid_reset(adev, i, j); 399 + } 400 + } 401 + 402 + /** 403 + * amdgpu_vmid_mgr_init - init the VMID manager 404 + * 405 + * @adev: amdgpu_device pointer 406 + * 407 + * Initialize the VM manager structures 408 + */ 409 + void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) 410 + { 411 + unsigned i, j; 412 + 413 + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 414 + struct amdgpu_vmid_mgr *id_mgr = 415 + &adev->vm_manager.id_mgr[i]; 416 + 417 + mutex_init(&id_mgr->lock); 418 + INIT_LIST_HEAD(&id_mgr->ids_lru); 419 + atomic_set(&id_mgr->reserved_vmid_num, 0); 420 + 421 + /* skip over VMID 0, since it is the system VM */ 422 + for (j = 1; j < id_mgr->num_ids; ++j) { 423 + amdgpu_vmid_reset(adev, i, j); 424 + amdgpu_sync_create(&id_mgr->ids[i].active); 425 + list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); 426 + } 427 + } 428 + 429 + adev->vm_manager.fence_context = 430 + dma_fence_context_alloc(AMDGPU_MAX_RINGS); 431 + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 432 + adev->vm_manager.seqno[i] = 0; 433 + } 434 + 435 + /** 436 + * amdgpu_vmid_mgr_fini - cleanup VM manager 437 + * 438 + * @adev: amdgpu_device pointer 439 + * 440 + * Cleanup the VM manager and free resources. 441 + */ 442 + void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) 443 + { 444 + unsigned i, j; 445 + 446 + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 447 + struct amdgpu_vmid_mgr *id_mgr = 448 + &adev->vm_manager.id_mgr[i]; 449 + 450 + mutex_destroy(&id_mgr->lock); 451 + for (j = 0; j < AMDGPU_NUM_VMID; ++j) { 452 + struct amdgpu_vmid *id = &id_mgr->ids[j]; 453 + 454 + amdgpu_sync_free(&id->active); 455 + dma_fence_put(id->flushed_updates); 456 + dma_fence_put(id->last_flush); 457 + } 458 + } 459 + }
+91
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
··· 1 + /* 2 + * Copyright 2017 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #ifndef __AMDGPU_IDS_H__ 24 + #define __AMDGPU_IDS_H__ 25 + 26 + #include <linux/types.h> 27 + #include <linux/mutex.h> 28 + #include <linux/list.h> 29 + #include <linux/dma-fence.h> 30 + 31 + #include "amdgpu_sync.h" 32 + 33 + /* maximum number of VMIDs */ 34 + #define AMDGPU_NUM_VMID 16 35 + 36 + struct amdgpu_device; 37 + struct amdgpu_vm; 38 + struct amdgpu_ring; 39 + struct amdgpu_sync; 40 + struct amdgpu_job; 41 + 42 + struct amdgpu_vmid { 43 + struct list_head list; 44 + struct amdgpu_sync active; 45 + struct dma_fence *last_flush; 46 + atomic64_t owner; 47 + 48 + uint64_t pd_gpu_addr; 49 + /* last flushed PD/PT update */ 50 + struct dma_fence *flushed_updates; 51 + 52 + uint32_t current_gpu_reset_count; 53 + 54 + uint32_t gds_base; 55 + uint32_t gds_size; 56 + uint32_t gws_base; 57 + uint32_t gws_size; 58 + uint32_t oa_base; 59 + uint32_t oa_size; 60 + }; 61 + 62 + struct amdgpu_vmid_mgr { 63 + struct mutex lock; 64 + unsigned num_ids; 65 + struct list_head ids_lru; 66 + struct amdgpu_vmid ids[AMDGPU_NUM_VMID]; 67 + atomic_t reserved_vmid_num; 68 + }; 69 + 70 + int amdgpu_pasid_alloc(unsigned int bits); 71 + void amdgpu_pasid_free(unsigned int pasid); 72 + 73 + bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, 74 + struct amdgpu_vmid *id); 75 + int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, 76 + struct amdgpu_vm *vm, 77 + unsigned vmhub); 78 + void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, 79 + struct amdgpu_vm *vm, 80 + unsigned vmhub); 81 + int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 82 + struct amdgpu_sync *sync, struct dma_fence *fence, 83 + struct amdgpu_job *job); 84 + void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, 85 + unsigned vmid); 86 + void amdgpu_vmid_reset_all(struct amdgpu_device *adev); 87 + 88 + void amdgpu_vmid_mgr_init(struct amdgpu_device *adev); 89 + void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev); 90 + 91 + #endif
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 161 161 while (fence == NULL && vm && !job->vm_id) { 162 162 struct amdgpu_ring *ring = job->ring; 163 163 164 - r = amdgpu_vm_grab_id(vm, ring, &job->sync, 165 - &job->base.s_fence->finished, 166 - job); 164 + r = amdgpu_vmid_grab(vm, ring, &job->sync, 165 + &job->base.s_fence->finished, 166 + job); 167 167 if (r) 168 168 DRM_ERROR("Error getting VM ID (%d)\n", r); 169 169
+12 -410
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 34 34 #include "amdgpu_trace.h" 35 35 36 36 /* 37 - * PASID manager 38 - * 39 - * PASIDs are global address space identifiers that can be shared 40 - * between the GPU, an IOMMU and the driver. VMs on different devices 41 - * may use the same PASID if they share the same address 42 - * space. Therefore PASIDs are allocated using a global IDA. VMs are 43 - * looked up from the PASID per amdgpu_device. 44 - */ 45 - static DEFINE_IDA(amdgpu_vm_pasid_ida); 46 - 47 - /** 48 - * amdgpu_vm_alloc_pasid - Allocate a PASID 49 - * @bits: Maximum width of the PASID in bits, must be at least 1 50 - * 51 - * Allocates a PASID of the given width while keeping smaller PASIDs 52 - * available if possible. 53 - * 54 - * Returns a positive integer on success. Returns %-EINVAL if bits==0. 55 - * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on 56 - * memory allocation failure. 57 - */ 58 - int amdgpu_vm_alloc_pasid(unsigned int bits) 59 - { 60 - int pasid = -EINVAL; 61 - 62 - for (bits = min(bits, 31U); bits > 0; bits--) { 63 - pasid = ida_simple_get(&amdgpu_vm_pasid_ida, 64 - 1U << (bits - 1), 1U << bits, 65 - GFP_KERNEL); 66 - if (pasid != -ENOSPC) 67 - break; 68 - } 69 - 70 - return pasid; 71 - } 72 - 73 - /** 74 - * amdgpu_vm_free_pasid - Free a PASID 75 - * @pasid: PASID to free 76 - */ 77 - void amdgpu_vm_free_pasid(unsigned int pasid) 78 - { 79 - ida_simple_remove(&amdgpu_vm_pasid_ida, pasid); 80 - } 81 - 82 - /* 83 37 * GPUVM 84 38 * GPUVM is similar to the legacy gart on older asics, however 85 39 * rather than there being a single global gart table ··· 402 448 } 403 449 404 450 /** 405 - * amdgpu_vm_had_gpu_reset - check if reset occured since last use 406 - * 407 - * @adev: amdgpu_device pointer 408 - * @id: VMID structure 409 - * 410 - * Check if GPU reset occured since last use of the VMID. 411 - */ 412 - static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev, 413 - struct amdgpu_vm_id *id) 414 - { 415 - return id->current_gpu_reset_count != 416 - atomic_read(&adev->gpu_reset_counter); 417 - } 418 - 419 - static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub) 420 - { 421 - return !!vm->reserved_vmid[vmhub]; 422 - } 423 - 424 - /* idr_mgr->lock must be held */ 425 - static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, 426 - struct amdgpu_ring *ring, 427 - struct amdgpu_sync *sync, 428 - struct dma_fence *fence, 429 - struct amdgpu_job *job) 430 - { 431 - struct amdgpu_device *adev = ring->adev; 432 - unsigned vmhub = ring->funcs->vmhub; 433 - uint64_t fence_context = adev->fence_context + ring->idx; 434 - struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub]; 435 - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 436 - struct dma_fence *updates = sync->last_vm_update; 437 - int r = 0; 438 - struct dma_fence *flushed, *tmp; 439 - bool needs_flush = vm->use_cpu_for_update; 440 - 441 - flushed = id->flushed_updates; 442 - if ((amdgpu_vm_had_gpu_reset(adev, id)) || 443 - (atomic64_read(&id->owner) != vm->client_id) || 444 - (job->vm_pd_addr != id->pd_gpu_addr) || 445 - (updates && (!flushed || updates->context != flushed->context || 446 - dma_fence_is_later(updates, flushed))) || 447 - (!id->last_flush || (id->last_flush->context != fence_context && 448 - !dma_fence_is_signaled(id->last_flush)))) { 449 - needs_flush = true; 450 - /* to prevent one context starved by another context */ 451 - id->pd_gpu_addr = 0; 452 - tmp = amdgpu_sync_peek_fence(&id->active, ring); 453 - if (tmp) { 454 - r = amdgpu_sync_fence(adev, sync, tmp, false); 455 - return r; 456 - } 457 - } 458 - 459 - /* Good we can use this VMID. Remember this submission as 460 - * user of the VMID. 461 - */ 462 - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 463 - if (r) 464 - goto out; 465 - 466 - if (updates && (!flushed || updates->context != flushed->context || 467 - dma_fence_is_later(updates, flushed))) { 468 - dma_fence_put(id->flushed_updates); 469 - id->flushed_updates = dma_fence_get(updates); 470 - } 471 - id->pd_gpu_addr = job->vm_pd_addr; 472 - atomic64_set(&id->owner, vm->client_id); 473 - job->vm_needs_flush = needs_flush; 474 - if (needs_flush) { 475 - dma_fence_put(id->last_flush); 476 - id->last_flush = NULL; 477 - } 478 - job->vm_id = id - id_mgr->ids; 479 - trace_amdgpu_vm_grab_id(vm, ring, job); 480 - out: 481 - return r; 482 - } 483 - 484 - /** 485 - * amdgpu_vm_grab_id - allocate the next free VMID 486 - * 487 - * @vm: vm to allocate id for 488 - * @ring: ring we want to submit job to 489 - * @sync: sync object where we add dependencies 490 - * @fence: fence protecting ID from reuse 491 - * 492 - * Allocate an id for the vm, adding fences to the sync obj as necessary. 493 - */ 494 - int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 495 - struct amdgpu_sync *sync, struct dma_fence *fence, 496 - struct amdgpu_job *job) 497 - { 498 - struct amdgpu_device *adev = ring->adev; 499 - unsigned vmhub = ring->funcs->vmhub; 500 - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 501 - uint64_t fence_context = adev->fence_context + ring->idx; 502 - struct dma_fence *updates = sync->last_vm_update; 503 - struct amdgpu_vm_id *id, *idle; 504 - struct dma_fence **fences; 505 - unsigned i; 506 - int r = 0; 507 - 508 - mutex_lock(&id_mgr->lock); 509 - if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) { 510 - r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job); 511 - mutex_unlock(&id_mgr->lock); 512 - return r; 513 - } 514 - fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); 515 - if (!fences) { 516 - mutex_unlock(&id_mgr->lock); 517 - return -ENOMEM; 518 - } 519 - /* Check if we have an idle VMID */ 520 - i = 0; 521 - list_for_each_entry(idle, &id_mgr->ids_lru, list) { 522 - fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); 523 - if (!fences[i]) 524 - break; 525 - ++i; 526 - } 527 - 528 - /* If we can't find a idle VMID to use, wait till one becomes available */ 529 - if (&idle->list == &id_mgr->ids_lru) { 530 - u64 fence_context = adev->vm_manager.fence_context + ring->idx; 531 - unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; 532 - struct dma_fence_array *array; 533 - unsigned j; 534 - 535 - for (j = 0; j < i; ++j) 536 - dma_fence_get(fences[j]); 537 - 538 - array = dma_fence_array_create(i, fences, fence_context, 539 - seqno, true); 540 - if (!array) { 541 - for (j = 0; j < i; ++j) 542 - dma_fence_put(fences[j]); 543 - kfree(fences); 544 - r = -ENOMEM; 545 - goto error; 546 - } 547 - 548 - 549 - r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); 550 - dma_fence_put(&array->base); 551 - if (r) 552 - goto error; 553 - 554 - mutex_unlock(&id_mgr->lock); 555 - return 0; 556 - 557 - } 558 - kfree(fences); 559 - 560 - job->vm_needs_flush = vm->use_cpu_for_update; 561 - /* Check if we can use a VMID already assigned to this VM */ 562 - list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { 563 - struct dma_fence *flushed; 564 - bool needs_flush = vm->use_cpu_for_update; 565 - 566 - /* Check all the prerequisites to using this VMID */ 567 - if (amdgpu_vm_had_gpu_reset(adev, id)) 568 - continue; 569 - 570 - if (atomic64_read(&id->owner) != vm->client_id) 571 - continue; 572 - 573 - if (job->vm_pd_addr != id->pd_gpu_addr) 574 - continue; 575 - 576 - if (!id->last_flush || 577 - (id->last_flush->context != fence_context && 578 - !dma_fence_is_signaled(id->last_flush))) 579 - needs_flush = true; 580 - 581 - flushed = id->flushed_updates; 582 - if (updates && (!flushed || dma_fence_is_later(updates, flushed))) 583 - needs_flush = true; 584 - 585 - /* Concurrent flushes are only possible starting with Vega10 */ 586 - if (adev->asic_type < CHIP_VEGA10 && needs_flush) 587 - continue; 588 - 589 - /* Good we can use this VMID. Remember this submission as 590 - * user of the VMID. 591 - */ 592 - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 593 - if (r) 594 - goto error; 595 - 596 - if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { 597 - dma_fence_put(id->flushed_updates); 598 - id->flushed_updates = dma_fence_get(updates); 599 - } 600 - 601 - if (needs_flush) 602 - goto needs_flush; 603 - else 604 - goto no_flush_needed; 605 - 606 - }; 607 - 608 - /* Still no ID to use? Then use the idle one found earlier */ 609 - id = idle; 610 - 611 - /* Remember this submission as user of the VMID */ 612 - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 613 - if (r) 614 - goto error; 615 - 616 - id->pd_gpu_addr = job->vm_pd_addr; 617 - dma_fence_put(id->flushed_updates); 618 - id->flushed_updates = dma_fence_get(updates); 619 - atomic64_set(&id->owner, vm->client_id); 620 - 621 - needs_flush: 622 - job->vm_needs_flush = true; 623 - dma_fence_put(id->last_flush); 624 - id->last_flush = NULL; 625 - 626 - no_flush_needed: 627 - list_move_tail(&id->list, &id_mgr->ids_lru); 628 - 629 - job->vm_id = id - id_mgr->ids; 630 - trace_amdgpu_vm_grab_id(vm, ring, job); 631 - 632 - error: 633 - mutex_unlock(&id_mgr->lock); 634 - return r; 635 - } 636 - 637 - static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev, 638 - struct amdgpu_vm *vm, 639 - unsigned vmhub) 640 - { 641 - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 642 - 643 - mutex_lock(&id_mgr->lock); 644 - if (vm->reserved_vmid[vmhub]) { 645 - list_add(&vm->reserved_vmid[vmhub]->list, 646 - &id_mgr->ids_lru); 647 - vm->reserved_vmid[vmhub] = NULL; 648 - atomic_dec(&id_mgr->reserved_vmid_num); 649 - } 650 - mutex_unlock(&id_mgr->lock); 651 - } 652 - 653 - static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev, 654 - struct amdgpu_vm *vm, 655 - unsigned vmhub) 656 - { 657 - struct amdgpu_vm_id_manager *id_mgr; 658 - struct amdgpu_vm_id *idle; 659 - int r = 0; 660 - 661 - id_mgr = &adev->vm_manager.id_mgr[vmhub]; 662 - mutex_lock(&id_mgr->lock); 663 - if (vm->reserved_vmid[vmhub]) 664 - goto unlock; 665 - if (atomic_inc_return(&id_mgr->reserved_vmid_num) > 666 - AMDGPU_VM_MAX_RESERVED_VMID) { 667 - DRM_ERROR("Over limitation of reserved vmid\n"); 668 - atomic_dec(&id_mgr->reserved_vmid_num); 669 - r = -EINVAL; 670 - goto unlock; 671 - } 672 - /* Select the first entry VMID */ 673 - idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list); 674 - list_del_init(&idle->list); 675 - vm->reserved_vmid[vmhub] = idle; 676 - mutex_unlock(&id_mgr->lock); 677 - 678 - return 0; 679 - unlock: 680 - mutex_unlock(&id_mgr->lock); 681 - return r; 682 - } 683 - 684 - /** 685 451 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug 686 452 * 687 453 * @adev: amdgpu_device pointer ··· 441 767 { 442 768 struct amdgpu_device *adev = ring->adev; 443 769 unsigned vmhub = ring->funcs->vmhub; 444 - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 445 - struct amdgpu_vm_id *id; 770 + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 771 + struct amdgpu_vmid *id; 446 772 bool gds_switch_needed; 447 773 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; 448 774 ··· 457 783 id->oa_base != job->oa_base || 458 784 id->oa_size != job->oa_size); 459 785 460 - if (amdgpu_vm_had_gpu_reset(adev, id)) 786 + if (amdgpu_vmid_had_gpu_reset(adev, id)) 461 787 return true; 462 788 463 789 return vm_flush_needed || gds_switch_needed; ··· 481 807 { 482 808 struct amdgpu_device *adev = ring->adev; 483 809 unsigned vmhub = ring->funcs->vmhub; 484 - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 485 - struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id]; 810 + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 811 + struct amdgpu_vmid *id = &id_mgr->ids[job->vm_id]; 486 812 bool gds_switch_needed = ring->funcs->emit_gds_switch && ( 487 813 id->gds_base != job->gds_base || 488 814 id->gds_size != job->gds_size || ··· 494 820 unsigned patch_offset = 0; 495 821 int r; 496 822 497 - if (amdgpu_vm_had_gpu_reset(adev, id)) { 823 + if (amdgpu_vmid_had_gpu_reset(adev, id)) { 498 824 gds_switch_needed = true; 499 825 vm_flush_needed = true; 500 826 } ··· 547 873 amdgpu_ring_emit_switch_buffer(ring); 548 874 } 549 875 return 0; 550 - } 551 - 552 - /** 553 - * amdgpu_vm_reset_id - reset VMID to zero 554 - * 555 - * @adev: amdgpu device structure 556 - * @vm_id: vmid number to use 557 - * 558 - * Reset saved GDW, GWS and OA to force switch on next flush. 559 - */ 560 - void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, 561 - unsigned vmid) 562 - { 563 - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 564 - struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; 565 - 566 - atomic64_set(&id->owner, 0); 567 - id->gds_base = 0; 568 - id->gds_size = 0; 569 - id->gws_base = 0; 570 - id->gws_size = 0; 571 - id->oa_base = 0; 572 - id->oa_size = 0; 573 - } 574 - 575 - /** 576 - * amdgpu_vm_reset_all_id - reset VMID to zero 577 - * 578 - * @adev: amdgpu device structure 579 - * 580 - * Reset VMID to force flush on next use 581 - */ 582 - void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev) 583 - { 584 - unsigned i, j; 585 - 586 - for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 587 - struct amdgpu_vm_id_manager *id_mgr = 588 - &adev->vm_manager.id_mgr[i]; 589 - 590 - for (j = 1; j < id_mgr->num_ids; ++j) 591 - amdgpu_vm_reset_id(adev, i, j); 592 - } 593 876 } 594 877 595 878 /** ··· 2450 2819 amdgpu_bo_unref(&root); 2451 2820 dma_fence_put(vm->last_update); 2452 2821 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2453 - amdgpu_vm_free_reserved_vmid(adev, vm, i); 2822 + amdgpu_vmid_free_reserved(adev, vm, i); 2454 2823 } 2455 2824 2456 2825 /** ··· 2492 2861 */ 2493 2862 void amdgpu_vm_manager_init(struct amdgpu_device *adev) 2494 2863 { 2495 - unsigned i, j; 2864 + unsigned i; 2496 2865 2497 - for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 2498 - struct amdgpu_vm_id_manager *id_mgr = 2499 - &adev->vm_manager.id_mgr[i]; 2500 - 2501 - mutex_init(&id_mgr->lock); 2502 - INIT_LIST_HEAD(&id_mgr->ids_lru); 2503 - atomic_set(&id_mgr->reserved_vmid_num, 0); 2504 - 2505 - /* skip over VMID 0, since it is the system VM */ 2506 - for (j = 1; j < id_mgr->num_ids; ++j) { 2507 - amdgpu_vm_reset_id(adev, i, j); 2508 - amdgpu_sync_create(&id_mgr->ids[i].active); 2509 - list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); 2510 - } 2511 - } 2866 + amdgpu_vmid_mgr_init(adev); 2512 2867 2513 2868 adev->vm_manager.fence_context = 2514 2869 dma_fence_context_alloc(AMDGPU_MAX_RINGS); ··· 2535 2918 */ 2536 2919 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 2537 2920 { 2538 - unsigned i, j; 2539 - 2540 2921 WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); 2541 2922 idr_destroy(&adev->vm_manager.pasid_idr); 2542 2923 2543 - for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 2544 - struct amdgpu_vm_id_manager *id_mgr = 2545 - &adev->vm_manager.id_mgr[i]; 2546 - 2547 - mutex_destroy(&id_mgr->lock); 2548 - for (j = 0; j < AMDGPU_NUM_VM; ++j) { 2549 - struct amdgpu_vm_id *id = &id_mgr->ids[j]; 2550 - 2551 - amdgpu_sync_free(&id->active); 2552 - dma_fence_put(id->flushed_updates); 2553 - dma_fence_put(id->last_flush); 2554 - } 2555 - } 2924 + amdgpu_vmid_mgr_fini(adev); 2556 2925 } 2557 2926 2558 2927 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ··· 2551 2948 switch (args->in.op) { 2552 2949 case AMDGPU_VM_OP_RESERVE_VMID: 2553 2950 /* current, we only have requirement to reserve vmid from gfxhub */ 2554 - r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm, 2555 - AMDGPU_GFXHUB); 2951 + r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); 2556 2952 if (r) 2557 2953 return r; 2558 2954 break; 2559 2955 case AMDGPU_VM_OP_UNRESERVE_VMID: 2560 - amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB); 2956 + amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); 2561 2957 break; 2562 2958 default: 2563 2959 return -EINVAL;
+3 -41
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 31 31 32 32 #include "amdgpu_sync.h" 33 33 #include "amdgpu_ring.h" 34 + #include "amdgpu_ids.h" 34 35 35 36 struct amdgpu_bo_va; 36 37 struct amdgpu_job; ··· 40 39 /* 41 40 * GPUVM handling 42 41 */ 43 - 44 - /* maximum number of VMIDs */ 45 - #define AMDGPU_NUM_VM 16 46 42 47 43 /* Maximum number of PTEs the hardware can write with one command */ 48 44 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF ··· 195 197 u64 client_id; 196 198 unsigned int pasid; 197 199 /* dedicated to vm */ 198 - struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; 200 + struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; 199 201 200 202 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ 201 203 bool use_cpu_for_update; ··· 210 212 unsigned int fault_credit; 211 213 }; 212 214 213 - struct amdgpu_vm_id { 214 - struct list_head list; 215 - struct amdgpu_sync active; 216 - struct dma_fence *last_flush; 217 - atomic64_t owner; 218 - 219 - uint64_t pd_gpu_addr; 220 - /* last flushed PD/PT update */ 221 - struct dma_fence *flushed_updates; 222 - 223 - uint32_t current_gpu_reset_count; 224 - 225 - uint32_t gds_base; 226 - uint32_t gds_size; 227 - uint32_t gws_base; 228 - uint32_t gws_size; 229 - uint32_t oa_base; 230 - uint32_t oa_size; 231 - }; 232 - 233 - struct amdgpu_vm_id_manager { 234 - struct mutex lock; 235 - unsigned num_ids; 236 - struct list_head ids_lru; 237 - struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; 238 - atomic_t reserved_vmid_num; 239 - }; 240 - 241 215 struct amdgpu_vm_manager { 242 216 /* Handling of VMIDs */ 243 - struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS]; 217 + struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; 244 218 245 219 /* Handling of VM fences */ 246 220 u64 fence_context; ··· 250 280 spinlock_t pasid_lock; 251 281 }; 252 282 253 - int amdgpu_vm_alloc_pasid(unsigned int bits); 254 - void amdgpu_vm_free_pasid(unsigned int pasid); 255 283 void amdgpu_vm_manager_init(struct amdgpu_device *adev); 256 284 void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 257 285 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ··· 267 299 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 268 300 struct amdgpu_vm *vm, 269 301 uint64_t saddr, uint64_t size); 270 - int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 271 - struct amdgpu_sync *sync, struct dma_fence *fence, 272 - struct amdgpu_job *job); 273 302 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); 274 - void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, 275 - unsigned vmid); 276 - void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); 277 303 int amdgpu_vm_update_directories(struct amdgpu_device *adev, 278 304 struct amdgpu_vm *vm); 279 305 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
··· 956 956 if (r) 957 957 return r; 958 958 959 - amdgpu_vm_reset_all_ids(adev); 959 + amdgpu_vmid_reset_all(adev); 960 960 961 961 return 0; 962 962 }
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 1107 1107 if (r) 1108 1108 return r; 1109 1109 1110 - amdgpu_vm_reset_all_ids(adev); 1110 + amdgpu_vmid_reset_all(adev); 1111 1111 1112 1112 return 0; 1113 1113 }
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 1212 1212 if (r) 1213 1213 return r; 1214 1214 1215 - amdgpu_vm_reset_all_ids(adev); 1215 + amdgpu_vmid_reset_all(adev); 1216 1216 1217 1217 return 0; 1218 1218 }
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1056 1056 if (r) 1057 1057 return r; 1058 1058 1059 - amdgpu_vm_reset_all_ids(adev); 1059 + amdgpu_vmid_reset_all(adev); 1060 1060 1061 1061 return 0; 1062 1062 }