Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.19-rc1 409 lines 11 kB view raw
1/* 2 * Copyright 2012 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * based on nouveau_prime.c 23 * 24 * Authors: Alex Deucher 25 */ 26 27/** 28 * DOC: PRIME Buffer Sharing 29 * 30 * The following callback implementations are used for :ref:`sharing GEM buffer 31 * objects between different devices via PRIME <prime_buffer_sharing>`. 32 */ 33 34#include <drm/drmP.h> 35 36#include "amdgpu.h" 37#include "amdgpu_display.h" 38#include <drm/amdgpu_drm.h> 39#include <linux/dma-buf.h> 40 41static const struct dma_buf_ops amdgpu_dmabuf_ops; 42 43/** 44 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table 45 * implementation 46 * @obj: GEM buffer object 47 * 48 * Returns: 49 * A scatter/gather table for the pinned pages of the buffer object's memory. 50 */ 51struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) 52{ 53 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 54 int npages = bo->tbo.num_pages; 55 56 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); 57} 58 59/** 60 * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation 61 * @obj: GEM buffer object 62 * 63 * Sets up an in-kernel virtual mapping of the buffer object's memory. 64 * 65 * Returns: 66 * The virtual address of the mapping or an error pointer. 67 */ 68void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) 69{ 70 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 71 int ret; 72 73 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 74 &bo->dma_buf_vmap); 75 if (ret) 76 return ERR_PTR(ret); 77 78 return bo->dma_buf_vmap.virtual; 79} 80 81/** 82 * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation 83 * @obj: GEM buffer object 84 * @vaddr: virtual address (unused) 85 * 86 * Tears down the in-kernel virtual mapping of the buffer object's memory. 87 */ 88void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 89{ 90 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 91 92 ttm_bo_kunmap(&bo->dma_buf_vmap); 93} 94 95/** 96 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation 97 * @obj: GEM buffer object 98 * @vma: virtual memory area 99 * 100 * Sets up a userspace mapping of the buffer object's memory in the given 101 * virtual memory area. 102 * 103 * Returns: 104 * 0 on success or negative error code. 105 */ 106int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 107{ 108 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 109 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 110 unsigned asize = amdgpu_bo_size(bo); 111 int ret; 112 113 if (!vma->vm_file) 114 return -ENODEV; 115 116 if (adev == NULL) 117 return -ENODEV; 118 119 /* Check for valid size. */ 120 if (asize < vma->vm_end - vma->vm_start) 121 return -EINVAL; 122 123 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || 124 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { 125 return -EPERM; 126 } 127 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT; 128 129 /* prime mmap does not need to check access, so allow here */ 130 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data); 131 if (ret) 132 return ret; 133 134 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev); 135 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data); 136 137 return ret; 138} 139 140/** 141 * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table 142 * implementation 143 * @dev: DRM device 144 * @attach: DMA-buf attachment 145 * @sg: Scatter/gather table 146 * 147 * Import shared DMA buffer memory exported by another device. 148 * 149 * Returns: 150 * A new GEM buffer object of the given DRM device, representing the memory 151 * described by the given DMA-buf attachment and scatter/gather table. 152 */ 153struct drm_gem_object * 154amdgpu_gem_prime_import_sg_table(struct drm_device *dev, 155 struct dma_buf_attachment *attach, 156 struct sg_table *sg) 157{ 158 struct reservation_object *resv = attach->dmabuf->resv; 159 struct amdgpu_device *adev = dev->dev_private; 160 struct amdgpu_bo *bo; 161 struct amdgpu_bo_param bp; 162 int ret; 163 164 memset(&bp, 0, sizeof(bp)); 165 bp.size = attach->dmabuf->size; 166 bp.byte_align = PAGE_SIZE; 167 bp.domain = AMDGPU_GEM_DOMAIN_CPU; 168 bp.flags = 0; 169 bp.type = ttm_bo_type_sg; 170 bp.resv = resv; 171 ww_mutex_lock(&resv->lock, NULL); 172 ret = amdgpu_bo_create(adev, &bp, &bo); 173 if (ret) 174 goto error; 175 176 bo->tbo.sg = sg; 177 bo->tbo.ttm->sg = sg; 178 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 179 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 180 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) 181 bo->prime_shared_count = 1; 182 183 ww_mutex_unlock(&resv->lock); 184 return &bo->gem_base; 185 186error: 187 ww_mutex_unlock(&resv->lock); 188 return ERR_PTR(ret); 189} 190 191/** 192 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation 193 * @dma_buf: shared DMA buffer 194 * @attach: DMA-buf attachment 195 * 196 * Makes sure that the shared DMA buffer can be accessed by the target device. 197 * For now, simply pins it to the GTT domain, where it should be accessible by 198 * all DMA devices. 199 * 200 * Returns: 201 * 0 on success or negative error code. 202 */ 203static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, 204 struct dma_buf_attachment *attach) 205{ 206 struct drm_gem_object *obj = dma_buf->priv; 207 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 208 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 209 long r; 210 211 r = drm_gem_map_attach(dma_buf, attach); 212 if (r) 213 return r; 214 215 r = amdgpu_bo_reserve(bo, false); 216 if (unlikely(r != 0)) 217 goto error_detach; 218 219 220 if (attach->dev->driver != adev->dev->driver) { 221 /* 222 * Wait for all shared fences to complete before we switch to future 223 * use of exclusive fence on this prime shared bo. 224 */ 225 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 226 true, false, 227 MAX_SCHEDULE_TIMEOUT); 228 if (unlikely(r < 0)) { 229 DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); 230 goto error_unreserve; 231 } 232 } 233 234 /* pin buffer into GTT */ 235 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 236 if (r) 237 goto error_unreserve; 238 239 if (attach->dev->driver != adev->dev->driver) 240 bo->prime_shared_count++; 241 242error_unreserve: 243 amdgpu_bo_unreserve(bo); 244 245error_detach: 246 if (r) 247 drm_gem_map_detach(dma_buf, attach); 248 return r; 249} 250 251/** 252 * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation 253 * @dma_buf: shared DMA buffer 254 * @attach: DMA-buf attachment 255 * 256 * This is called when a shared DMA buffer no longer needs to be accessible by 257 * the other device. For now, simply unpins the buffer from GTT. 258 */ 259static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, 260 struct dma_buf_attachment *attach) 261{ 262 struct drm_gem_object *obj = dma_buf->priv; 263 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 264 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 265 int ret = 0; 266 267 ret = amdgpu_bo_reserve(bo, true); 268 if (unlikely(ret != 0)) 269 goto error; 270 271 amdgpu_bo_unpin(bo); 272 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) 273 bo->prime_shared_count--; 274 amdgpu_bo_unreserve(bo); 275 276error: 277 drm_gem_map_detach(dma_buf, attach); 278} 279 280/** 281 * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation 282 * @obj: GEM buffer object 283 * 284 * Returns: 285 * The buffer object's reservation object. 286 */ 287struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) 288{ 289 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 290 291 return bo->tbo.resv; 292} 293 294/** 295 * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation 296 * @dma_buf: shared DMA buffer 297 * @direction: direction of DMA transfer 298 * 299 * This is called before CPU access to the shared DMA buffer's memory. If it's 300 * a read access, the buffer is moved to the GTT domain if possible, for optimal 301 * CPU read performance. 302 * 303 * Returns: 304 * 0 on success or negative error code. 305 */ 306static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, 307 enum dma_data_direction direction) 308{ 309 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); 310 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 311 struct ttm_operation_ctx ctx = { true, false }; 312 u32 domain = amdgpu_display_supported_domains(adev); 313 int ret; 314 bool reads = (direction == DMA_BIDIRECTIONAL || 315 direction == DMA_FROM_DEVICE); 316 317 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) 318 return 0; 319 320 /* move to gtt */ 321 ret = amdgpu_bo_reserve(bo, false); 322 if (unlikely(ret != 0)) 323 return ret; 324 325 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { 326 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 327 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 328 } 329 330 amdgpu_bo_unreserve(bo); 331 return ret; 332} 333 334static const struct dma_buf_ops amdgpu_dmabuf_ops = { 335 .attach = amdgpu_gem_map_attach, 336 .detach = amdgpu_gem_map_detach, 337 .map_dma_buf = drm_gem_map_dma_buf, 338 .unmap_dma_buf = drm_gem_unmap_dma_buf, 339 .release = drm_gem_dmabuf_release, 340 .begin_cpu_access = amdgpu_gem_begin_cpu_access, 341 .map = drm_gem_dmabuf_kmap, 342 .unmap = drm_gem_dmabuf_kunmap, 343 .mmap = drm_gem_dmabuf_mmap, 344 .vmap = drm_gem_dmabuf_vmap, 345 .vunmap = drm_gem_dmabuf_vunmap, 346}; 347 348/** 349 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation 350 * @dev: DRM device 351 * @gobj: GEM buffer object 352 * @flags: flags like DRM_CLOEXEC and DRM_RDWR 353 * 354 * The main work is done by the &drm_gem_prime_export helper, which in turn 355 * uses &amdgpu_gem_prime_res_obj. 356 * 357 * Returns: 358 * Shared DMA buffer representing the GEM buffer object from the given device. 359 */ 360struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 361 struct drm_gem_object *gobj, 362 int flags) 363{ 364 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 365 struct dma_buf *buf; 366 367 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || 368 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) 369 return ERR_PTR(-EPERM); 370 371 buf = drm_gem_prime_export(dev, gobj, flags); 372 if (!IS_ERR(buf)) { 373 buf->file->f_mapping = dev->anon_inode->i_mapping; 374 buf->ops = &amdgpu_dmabuf_ops; 375 } 376 377 return buf; 378} 379 380/** 381 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation 382 * @dev: DRM device 383 * @dma_buf: Shared DMA buffer 384 * 385 * The main work is done by the &drm_gem_prime_import helper, which in turn 386 * uses &amdgpu_gem_prime_import_sg_table. 387 * 388 * Returns: 389 * GEM buffer object representing the shared DMA buffer for the given device. 390 */ 391struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, 392 struct dma_buf *dma_buf) 393{ 394 struct drm_gem_object *obj; 395 396 if (dma_buf->ops == &amdgpu_dmabuf_ops) { 397 obj = dma_buf->priv; 398 if (obj->dev == dev) { 399 /* 400 * Importing dmabuf exported from out own gem increases 401 * refcount on gem itself instead of f_count of dmabuf. 402 */ 403 drm_gem_object_get(obj); 404 return obj; 405 } 406 } 407 408 return drm_gem_prime_import(dev, dma_buf); 409}