Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/tegra: Implement job submission part of new UAPI

Implement the job submission IOCTL with a minimum feature set.

Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>

authored by

Mikko Perttunen and committed by
Thierry Reding
13abe0bb 44e96138

+639 -1
+1
drivers/gpu/drm/tegra/Makefile
··· 4 4 tegra-drm-y := \ 5 5 drm.o \ 6 6 uapi.o \ 7 + submit.o \ 7 8 gem.o \ 8 9 fb.o \ 9 10 dp.o \
+3 -1
drivers/gpu/drm/tegra/drm.c
··· 728 728 DRM_RENDER_ALLOW), 729 729 DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap, 730 730 DRM_RENDER_ALLOW), 731 + DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_SUBMIT, tegra_drm_ioctl_channel_submit, 732 + DRM_RENDER_ALLOW), 731 733 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_ALLOCATE, tegra_drm_ioctl_syncpoint_allocate, 732 734 DRM_RENDER_ALLOW), 733 735 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_FREE, tegra_drm_ioctl_syncpoint_free, ··· 854 852 855 853 static const struct drm_driver tegra_drm_driver = { 856 854 .driver_features = DRIVER_MODESET | DRIVER_GEM | 857 - DRIVER_ATOMIC | DRIVER_RENDER, 855 + DRIVER_ATOMIC | DRIVER_RENDER | DRIVER_SYNCOBJ, 858 856 .open = tegra_drm_open, 859 857 .postclose = tegra_drm_postclose, 860 858 .lastclose = drm_fb_helper_lastclose,
+618
drivers/gpu/drm/tegra/submit.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (c) 2020 NVIDIA Corporation */ 3 + 4 + #include <linux/dma-fence-array.h> 5 + #include <linux/dma-mapping.h> 6 + #include <linux/file.h> 7 + #include <linux/host1x.h> 8 + #include <linux/iommu.h> 9 + #include <linux/kref.h> 10 + #include <linux/list.h> 11 + #include <linux/nospec.h> 12 + #include <linux/pm_runtime.h> 13 + #include <linux/scatterlist.h> 14 + #include <linux/slab.h> 15 + #include <linux/sync_file.h> 16 + 17 + #include <drm/drm_drv.h> 18 + #include <drm/drm_file.h> 19 + #include <drm/drm_syncobj.h> 20 + 21 + #include "drm.h" 22 + #include "gem.h" 23 + #include "submit.h" 24 + #include "uapi.h" 25 + 26 + #define SUBMIT_ERR(context, fmt, ...) \ 27 + dev_err_ratelimited(context->client->base.dev, \ 28 + "%s: job submission failed: " fmt "\n", \ 29 + current->comm, ##__VA_ARGS__) 30 + 31 + struct gather_bo { 32 + struct host1x_bo base; 33 + 34 + struct kref ref; 35 + 36 + struct device *dev; 37 + u32 *gather_data; 38 + dma_addr_t gather_data_dma; 39 + size_t gather_data_words; 40 + }; 41 + 42 + static struct host1x_bo *gather_bo_get(struct host1x_bo *host_bo) 43 + { 44 + struct gather_bo *bo = container_of(host_bo, struct gather_bo, base); 45 + 46 + kref_get(&bo->ref); 47 + 48 + return host_bo; 49 + } 50 + 51 + static void gather_bo_release(struct kref *ref) 52 + { 53 + struct gather_bo *bo = container_of(ref, struct gather_bo, ref); 54 + 55 + dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma, 56 + 0); 57 + kfree(bo); 58 + } 59 + 60 + static void gather_bo_put(struct host1x_bo *host_bo) 61 + { 62 + struct gather_bo *bo = container_of(host_bo, struct gather_bo, base); 63 + 64 + kref_put(&bo->ref, gather_bo_release); 65 + } 66 + 67 + static struct sg_table * 68 + gather_bo_pin(struct device *dev, struct host1x_bo *host_bo, dma_addr_t *phys) 69 + { 70 + struct gather_bo *bo = container_of(host_bo, struct gather_bo, base); 71 + struct sg_table *sgt; 72 + int err; 73 + 74 + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 75 + if (!sgt) 76 + return ERR_PTR(-ENOMEM); 77 + 78 + err = dma_get_sgtable(bo->dev, sgt, bo->gather_data, bo->gather_data_dma, 79 + bo->gather_data_words * 4); 80 + if (err) { 81 + kfree(sgt); 82 + return ERR_PTR(err); 83 + } 84 + 85 + return sgt; 86 + } 87 + 88 + static void gather_bo_unpin(struct device *dev, struct sg_table *sgt) 89 + { 90 + if (sgt) { 91 + sg_free_table(sgt); 92 + kfree(sgt); 93 + } 94 + } 95 + 96 + static void *gather_bo_mmap(struct host1x_bo *host_bo) 97 + { 98 + struct gather_bo *bo = container_of(host_bo, struct gather_bo, base); 99 + 100 + return bo->gather_data; 101 + } 102 + 103 + static void gather_bo_munmap(struct host1x_bo *host_bo, void *addr) 104 + { 105 + } 106 + 107 + const struct host1x_bo_ops gather_bo_ops = { 108 + .get = gather_bo_get, 109 + .put = gather_bo_put, 110 + .pin = gather_bo_pin, 111 + .unpin = gather_bo_unpin, 112 + .mmap = gather_bo_mmap, 113 + .munmap = gather_bo_munmap, 114 + }; 115 + 116 + static struct tegra_drm_mapping * 117 + tegra_drm_mapping_get(struct tegra_drm_context *context, u32 id) 118 + { 119 + struct tegra_drm_mapping *mapping; 120 + 121 + xa_lock(&context->mappings); 122 + 123 + mapping = xa_load(&context->mappings, id); 124 + if (mapping) 125 + kref_get(&mapping->ref); 126 + 127 + xa_unlock(&context->mappings); 128 + 129 + return mapping; 130 + } 131 + 132 + static void *alloc_copy_user_array(void __user *from, size_t count, size_t size) 133 + { 134 + size_t copy_len; 135 + void *data; 136 + 137 + if (check_mul_overflow(count, size, &copy_len)) 138 + return ERR_PTR(-EINVAL); 139 + 140 + if (copy_len > 0x4000) 141 + return ERR_PTR(-E2BIG); 142 + 143 + data = kvmalloc(copy_len, GFP_KERNEL); 144 + if (!data) 145 + return ERR_PTR(-ENOMEM); 146 + 147 + if (copy_from_user(data, from, copy_len)) { 148 + kvfree(data); 149 + return ERR_PTR(-EFAULT); 150 + } 151 + 152 + return data; 153 + } 154 + 155 + static int submit_copy_gather_data(struct gather_bo **pbo, struct device *dev, 156 + struct tegra_drm_context *context, 157 + struct drm_tegra_channel_submit *args) 158 + { 159 + struct gather_bo *bo; 160 + size_t copy_len; 161 + 162 + if (args->gather_data_words == 0) { 163 + SUBMIT_ERR(context, "gather_data_words cannot be zero"); 164 + return -EINVAL; 165 + } 166 + 167 + if (check_mul_overflow((size_t)args->gather_data_words, (size_t)4, &copy_len)) { 168 + SUBMIT_ERR(context, "gather_data_words is too large"); 169 + return -EINVAL; 170 + } 171 + 172 + bo = kzalloc(sizeof(*bo), GFP_KERNEL); 173 + if (!bo) { 174 + SUBMIT_ERR(context, "failed to allocate memory for bo info"); 175 + return -ENOMEM; 176 + } 177 + 178 + host1x_bo_init(&bo->base, &gather_bo_ops); 179 + kref_init(&bo->ref); 180 + bo->dev = dev; 181 + 182 + bo->gather_data = dma_alloc_attrs(dev, copy_len, &bo->gather_data_dma, 183 + GFP_KERNEL | __GFP_NOWARN, 0); 184 + if (!bo->gather_data) { 185 + SUBMIT_ERR(context, "failed to allocate memory for gather data"); 186 + kfree(bo); 187 + return -ENOMEM; 188 + } 189 + 190 + if (copy_from_user(bo->gather_data, u64_to_user_ptr(args->gather_data_ptr), copy_len)) { 191 + SUBMIT_ERR(context, "failed to copy gather data from userspace"); 192 + dma_free_attrs(dev, copy_len, bo->gather_data, bo->gather_data_dma, 0); 193 + kfree(bo); 194 + return -EFAULT; 195 + } 196 + 197 + bo->gather_data_words = args->gather_data_words; 198 + 199 + *pbo = bo; 200 + 201 + return 0; 202 + } 203 + 204 + static int submit_write_reloc(struct tegra_drm_context *context, struct gather_bo *bo, 205 + struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping) 206 + { 207 + /* TODO check that target_offset is within bounds */ 208 + dma_addr_t iova = mapping->iova + buf->reloc.target_offset; 209 + u32 written_ptr; 210 + 211 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 212 + if (buf->flags & DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT) 213 + iova |= BIT_ULL(39); 214 + #endif 215 + 216 + written_ptr = iova >> buf->reloc.shift; 217 + 218 + if (buf->reloc.gather_offset_words >= bo->gather_data_words) { 219 + SUBMIT_ERR(context, 220 + "relocation has too large gather offset (%u vs gather length %zu)", 221 + buf->reloc.gather_offset_words, bo->gather_data_words); 222 + return -EINVAL; 223 + } 224 + 225 + buf->reloc.gather_offset_words = array_index_nospec(buf->reloc.gather_offset_words, 226 + bo->gather_data_words); 227 + 228 + bo->gather_data[buf->reloc.gather_offset_words] = written_ptr; 229 + 230 + return 0; 231 + } 232 + 233 + static int submit_process_bufs(struct tegra_drm_context *context, struct gather_bo *bo, 234 + struct drm_tegra_channel_submit *args, 235 + struct tegra_drm_submit_data *job_data) 236 + { 237 + struct tegra_drm_used_mapping *mappings; 238 + struct drm_tegra_submit_buf *bufs; 239 + int err; 240 + u32 i; 241 + 242 + bufs = alloc_copy_user_array(u64_to_user_ptr(args->bufs_ptr), args->num_bufs, 243 + sizeof(*bufs)); 244 + if (IS_ERR(bufs)) { 245 + SUBMIT_ERR(context, "failed to copy bufs array from userspace"); 246 + return PTR_ERR(bufs); 247 + } 248 + 249 + mappings = kcalloc(args->num_bufs, sizeof(*mappings), GFP_KERNEL); 250 + if (!mappings) { 251 + SUBMIT_ERR(context, "failed to allocate memory for mapping info"); 252 + err = -ENOMEM; 253 + goto done; 254 + } 255 + 256 + for (i = 0; i < args->num_bufs; i++) { 257 + struct drm_tegra_submit_buf *buf = &bufs[i]; 258 + struct tegra_drm_mapping *mapping; 259 + 260 + if (buf->flags & ~DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT) { 261 + SUBMIT_ERR(context, "invalid flag specified for buffer"); 262 + err = -EINVAL; 263 + goto drop_refs; 264 + } 265 + 266 + mapping = tegra_drm_mapping_get(context, buf->mapping); 267 + if (!mapping) { 268 + SUBMIT_ERR(context, "invalid mapping ID '%u' for buffer", buf->mapping); 269 + err = -EINVAL; 270 + goto drop_refs; 271 + } 272 + 273 + err = submit_write_reloc(context, bo, buf, mapping); 274 + if (err) { 275 + tegra_drm_mapping_put(mapping); 276 + goto drop_refs; 277 + } 278 + 279 + mappings[i].mapping = mapping; 280 + mappings[i].flags = buf->flags; 281 + } 282 + 283 + job_data->used_mappings = mappings; 284 + job_data->num_used_mappings = i; 285 + 286 + err = 0; 287 + 288 + goto done; 289 + 290 + drop_refs: 291 + while (i--) 292 + tegra_drm_mapping_put(mappings[i].mapping); 293 + 294 + kfree(mappings); 295 + job_data->used_mappings = NULL; 296 + 297 + done: 298 + kvfree(bufs); 299 + 300 + return err; 301 + } 302 + 303 + static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job, 304 + struct xarray *syncpoints, struct drm_tegra_channel_submit *args) 305 + { 306 + struct host1x_syncpt *sp; 307 + 308 + if (args->syncpt.flags) { 309 + SUBMIT_ERR(context, "invalid flag specified for syncpt"); 310 + return -EINVAL; 311 + } 312 + 313 + /* Syncpt ref will be dropped on job release */ 314 + sp = xa_load(syncpoints, args->syncpt.id); 315 + if (!sp) { 316 + SUBMIT_ERR(context, "syncpoint specified in syncpt was not allocated"); 317 + return -EINVAL; 318 + } 319 + 320 + job->syncpt = host1x_syncpt_get(sp); 321 + job->syncpt_incrs = args->syncpt.increments; 322 + 323 + return 0; 324 + } 325 + 326 + static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, 327 + struct drm_tegra_submit_cmd_gather_uptr *cmd, 328 + struct gather_bo *bo, u32 *offset, 329 + struct tegra_drm_submit_data *job_data) 330 + { 331 + u32 next_offset; 332 + 333 + if (cmd->reserved[0] || cmd->reserved[1] || cmd->reserved[2]) { 334 + SUBMIT_ERR(context, "non-zero reserved field in GATHER_UPTR command"); 335 + return -EINVAL; 336 + } 337 + 338 + /* Check for maximum gather size */ 339 + if (cmd->words > 16383) { 340 + SUBMIT_ERR(context, "too many words in GATHER_UPTR command"); 341 + return -EINVAL; 342 + } 343 + 344 + if (check_add_overflow(*offset, cmd->words, &next_offset)) { 345 + SUBMIT_ERR(context, "too many total words in job"); 346 + return -EINVAL; 347 + } 348 + 349 + if (next_offset > bo->gather_data_words) { 350 + SUBMIT_ERR(context, "GATHER_UPTR command overflows gather data"); 351 + return -EINVAL; 352 + } 353 + 354 + host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); 355 + 356 + *offset = next_offset; 357 + 358 + return 0; 359 + } 360 + 361 + static struct host1x_job * 362 + submit_create_job(struct tegra_drm_context *context, struct gather_bo *bo, 363 + struct drm_tegra_channel_submit *args, struct tegra_drm_submit_data *job_data, 364 + struct xarray *syncpoints) 365 + { 366 + struct drm_tegra_submit_cmd *cmds; 367 + u32 i, gather_offset = 0, class; 368 + struct host1x_job *job; 369 + int err; 370 + 371 + /* Set initial class for firewall. */ 372 + class = context->client->base.class; 373 + 374 + cmds = alloc_copy_user_array(u64_to_user_ptr(args->cmds_ptr), args->num_cmds, 375 + sizeof(*cmds)); 376 + if (IS_ERR(cmds)) { 377 + SUBMIT_ERR(context, "failed to copy cmds array from userspace"); 378 + return ERR_CAST(cmds); 379 + } 380 + 381 + job = host1x_job_alloc(context->channel, args->num_cmds, 0, true); 382 + if (!job) { 383 + SUBMIT_ERR(context, "failed to allocate memory for job"); 384 + job = ERR_PTR(-ENOMEM); 385 + goto done; 386 + } 387 + 388 + err = submit_get_syncpt(context, job, syncpoints, args); 389 + if (err < 0) 390 + goto free_job; 391 + 392 + job->client = &context->client->base; 393 + job->class = context->client->base.class; 394 + job->serialize = true; 395 + 396 + for (i = 0; i < args->num_cmds; i++) { 397 + struct drm_tegra_submit_cmd *cmd = &cmds[i]; 398 + 399 + if (cmd->flags) { 400 + SUBMIT_ERR(context, "unknown flags given for cmd"); 401 + err = -EINVAL; 402 + goto free_job; 403 + } 404 + 405 + if (cmd->type == DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR) { 406 + err = submit_job_add_gather(job, context, &cmd->gather_uptr, bo, 407 + &gather_offset, job_data); 408 + if (err) 409 + goto free_job; 410 + } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT) { 411 + if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) { 412 + SUBMIT_ERR(context, "non-zero reserved value"); 413 + err = -EINVAL; 414 + goto free_job; 415 + } 416 + 417 + host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value, 418 + false, class); 419 + } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE) { 420 + if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) { 421 + SUBMIT_ERR(context, "non-zero reserved value"); 422 + err = -EINVAL; 423 + goto free_job; 424 + } 425 + 426 + if (cmd->wait_syncpt.id != args->syncpt.id) { 427 + SUBMIT_ERR(context, "syncpoint ID in CMD_WAIT_SYNCPT_RELATIVE is not used by the job"); 428 + err = -EINVAL; 429 + goto free_job; 430 + } 431 + 432 + host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value, 433 + true, class); 434 + } else { 435 + SUBMIT_ERR(context, "unknown cmd type"); 436 + err = -EINVAL; 437 + goto free_job; 438 + } 439 + } 440 + 441 + if (gather_offset == 0) { 442 + SUBMIT_ERR(context, "job must have at least one gather"); 443 + err = -EINVAL; 444 + goto free_job; 445 + } 446 + 447 + goto done; 448 + 449 + free_job: 450 + host1x_job_put(job); 451 + job = ERR_PTR(err); 452 + 453 + done: 454 + kvfree(cmds); 455 + 456 + return job; 457 + } 458 + 459 + static void release_job(struct host1x_job *job) 460 + { 461 + struct tegra_drm_client *client = container_of(job->client, struct tegra_drm_client, base); 462 + struct tegra_drm_submit_data *job_data = job->user_data; 463 + u32 i; 464 + 465 + for (i = 0; i < job_data->num_used_mappings; i++) 466 + tegra_drm_mapping_put(job_data->used_mappings[i].mapping); 467 + 468 + kfree(job_data->used_mappings); 469 + kfree(job_data); 470 + 471 + if (pm_runtime_enabled(client->base.dev)) 472 + pm_runtime_put_autosuspend(client->base.dev); 473 + } 474 + 475 + int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data, 476 + struct drm_file *file) 477 + { 478 + struct tegra_drm_file *fpriv = file->driver_priv; 479 + struct drm_tegra_channel_submit *args = data; 480 + struct tegra_drm_submit_data *job_data; 481 + struct drm_syncobj *syncobj = NULL; 482 + struct tegra_drm_context *context; 483 + struct host1x_job *job; 484 + struct gather_bo *bo; 485 + u32 i; 486 + int err; 487 + 488 + mutex_lock(&fpriv->lock); 489 + 490 + context = xa_load(&fpriv->contexts, args->context); 491 + if (!context) { 492 + mutex_unlock(&fpriv->lock); 493 + pr_err_ratelimited("%s: %s: invalid channel context '%#x'", __func__, 494 + current->comm, args->context); 495 + return -EINVAL; 496 + } 497 + 498 + if (args->syncobj_in) { 499 + struct dma_fence *fence; 500 + 501 + err = drm_syncobj_find_fence(file, args->syncobj_in, 0, 0, &fence); 502 + if (err) { 503 + SUBMIT_ERR(context, "invalid syncobj_in '%#x'", args->syncobj_in); 504 + goto unlock; 505 + } 506 + 507 + err = dma_fence_wait_timeout(fence, true, msecs_to_jiffies(10000)); 508 + dma_fence_put(fence); 509 + if (err) { 510 + SUBMIT_ERR(context, "wait for syncobj_in timed out"); 511 + goto unlock; 512 + } 513 + } 514 + 515 + if (args->syncobj_out) { 516 + syncobj = drm_syncobj_find(file, args->syncobj_out); 517 + if (!syncobj) { 518 + SUBMIT_ERR(context, "invalid syncobj_out '%#x'", args->syncobj_out); 519 + err = -ENOENT; 520 + goto unlock; 521 + } 522 + } 523 + 524 + /* Allocate gather BO and copy gather words in. */ 525 + err = submit_copy_gather_data(&bo, drm->dev, context, args); 526 + if (err) 527 + goto unlock; 528 + 529 + job_data = kzalloc(sizeof(*job_data), GFP_KERNEL); 530 + if (!job_data) { 531 + SUBMIT_ERR(context, "failed to allocate memory for job data"); 532 + err = -ENOMEM; 533 + goto put_bo; 534 + } 535 + 536 + /* Get data buffer mappings and do relocation patching. */ 537 + err = submit_process_bufs(context, bo, args, job_data); 538 + if (err) 539 + goto free_job_data; 540 + 541 + /* Allocate host1x_job and add gathers and waits to it. */ 542 + job = submit_create_job(context, bo, args, job_data, &fpriv->syncpoints); 543 + if (IS_ERR(job)) { 544 + err = PTR_ERR(job); 545 + goto free_job_data; 546 + } 547 + 548 + /* Map gather data for Host1x. */ 549 + err = host1x_job_pin(job, context->client->base.dev); 550 + if (err) { 551 + SUBMIT_ERR(context, "failed to pin job: %d", err); 552 + goto put_job; 553 + } 554 + 555 + /* Boot engine. */ 556 + if (pm_runtime_enabled(context->client->base.dev)) { 557 + err = pm_runtime_resume_and_get(context->client->base.dev); 558 + if (err < 0) { 559 + SUBMIT_ERR(context, "could not power up engine: %d", err); 560 + goto unpin_job; 561 + } 562 + } 563 + 564 + job->user_data = job_data; 565 + job->release = release_job; 566 + job->timeout = 10000; 567 + 568 + /* 569 + * job_data is now part of job reference counting, so don't release 570 + * it from here. 571 + */ 572 + job_data = NULL; 573 + 574 + /* Submit job to hardware. */ 575 + err = host1x_job_submit(job); 576 + if (err) { 577 + SUBMIT_ERR(context, "host1x job submission failed: %d", err); 578 + goto unpin_job; 579 + } 580 + 581 + /* Return postfences to userspace and add fences to DMA reservations. */ 582 + args->syncpt.value = job->syncpt_end; 583 + 584 + if (syncobj) { 585 + struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end); 586 + if (IS_ERR(fence)) { 587 + err = PTR_ERR(fence); 588 + SUBMIT_ERR(context, "failed to create postfence: %d", err); 589 + } 590 + 591 + drm_syncobj_replace_fence(syncobj, fence); 592 + } 593 + 594 + goto put_job; 595 + 596 + unpin_job: 597 + host1x_job_unpin(job); 598 + put_job: 599 + host1x_job_put(job); 600 + free_job_data: 601 + if (job_data && job_data->used_mappings) { 602 + for (i = 0; i < job_data->num_used_mappings; i++) 603 + tegra_drm_mapping_put(job_data->used_mappings[i].mapping); 604 + 605 + kfree(job_data->used_mappings); 606 + } 607 + 608 + if (job_data) 609 + kfree(job_data); 610 + put_bo: 611 + gather_bo_put(&bo->base); 612 + unlock: 613 + if (syncobj) 614 + drm_syncobj_put(syncobj); 615 + 616 + mutex_unlock(&fpriv->lock); 617 + return err; 618 + }
+17
drivers/gpu/drm/tegra/submit.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* Copyright (c) 2020 NVIDIA Corporation */ 3 + 4 + #ifndef _TEGRA_DRM_UAPI_SUBMIT_H 5 + #define _TEGRA_DRM_UAPI_SUBMIT_H 6 + 7 + struct tegra_drm_used_mapping { 8 + struct tegra_drm_mapping *mapping; 9 + u32 flags; 10 + }; 11 + 12 + struct tegra_drm_submit_data { 13 + struct tegra_drm_used_mapping *used_mappings; 14 + u32 num_used_mappings; 15 + }; 16 + 17 + #endif