Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/fault-inject.h>
10#include <linux/debugfs.h>
11#include <linux/of_address.h>
12#include <linux/uaccess.h>
13
14#include <drm/drm_drv.h>
15#include <drm/drm_file.h>
16#include <drm/drm_ioctl.h>
17#include <drm/drm_of.h>
18
19#include "msm_drv.h"
20#include "msm_debugfs.h"
21#include "msm_gem.h"
22#include "msm_gpu.h"
23#include "msm_kms.h"
24
25/*
26 * MSM driver version:
27 * - 1.0.0 - initial interface
28 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
29 * - 1.2.0 - adds explicit fence support for submit ioctl
30 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
31 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
32 * MSM_GEM_INFO ioctl.
33 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
34 * GEM object's debug name
35 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
36 * - 1.6.0 - Syncobj support
37 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
38 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
39 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
40 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
41 * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
42 * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
43 * - 1.13.0 - Add VM_BIND
44 */
45#define MSM_VERSION_MAJOR 1
46#define MSM_VERSION_MINOR 13
47#define MSM_VERSION_PATCHLEVEL 0
48
49bool dumpstate;
50MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
51module_param(dumpstate, bool, 0600);
52
53static bool modeset = true;
54MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
55module_param(modeset, bool, 0600);
56
57static bool separate_gpu_kms;
58MODULE_PARM_DESC(separate_gpu_drm, "Use separate DRM device for the GPU (0=single DRM device for both GPU and display (default), 1=two DRM devices)");
59module_param(separate_gpu_kms, bool, 0400);
60
61DECLARE_FAULT_ATTR(fail_gem_alloc);
62DECLARE_FAULT_ATTR(fail_gem_iova);
63
64bool msm_gpu_no_components(void)
65{
66 return separate_gpu_kms;
67}
68
69static int msm_drm_uninit(struct device *dev, const struct component_ops *gpu_ops)
70{
71 struct platform_device *pdev = to_platform_device(dev);
72 struct msm_drm_private *priv = platform_get_drvdata(pdev);
73 struct drm_device *ddev = priv->dev;
74
75 /*
76 * Shutdown the hw if we're far enough along where things might be on.
77 * If we run this too early, we'll end up panicking in any variety of
78 * places. Since we don't register the drm device until late in
79 * msm_drm_init, drm_dev->registered is used as an indicator that the
80 * shutdown will be successful.
81 */
82 if (ddev->registered) {
83 drm_dev_unregister(ddev);
84 if (priv->kms)
85 msm_drm_kms_unregister(dev);
86 }
87
88 msm_gem_shrinker_cleanup(ddev);
89
90 msm_perf_debugfs_cleanup(priv);
91 msm_rd_debugfs_cleanup(priv);
92
93 if (priv->kms)
94 msm_drm_kms_uninit(dev);
95
96 if (gpu_ops)
97 gpu_ops->unbind(dev, dev, NULL);
98 else
99 component_unbind_all(dev, ddev);
100
101 ddev->dev_private = NULL;
102 drm_dev_put(ddev);
103
104 return 0;
105}
106
107static int msm_drm_init(struct device *dev, const struct drm_driver *drv,
108 const struct component_ops *gpu_ops)
109{
110 struct msm_drm_private *priv = dev_get_drvdata(dev);
111 struct drm_device *ddev;
112 int ret;
113
114 if (drm_firmware_drivers_only())
115 return -ENODEV;
116
117 ddev = drm_dev_alloc(drv, dev);
118 if (IS_ERR(ddev)) {
119 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
120 return PTR_ERR(ddev);
121 }
122 ddev->dev_private = priv;
123 priv->dev = ddev;
124
125 INIT_LIST_HEAD(&priv->objects);
126 mutex_init(&priv->obj_lock);
127
128 /*
129 * Initialize the LRUs:
130 */
131 mutex_init(&priv->lru.lock);
132 drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
133 drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
134 drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
135 drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
136
137 /* Initialize stall-on-fault */
138 spin_lock_init(&priv->fault_stall_lock);
139 priv->stall_enabled = true;
140
141 /* Teach lockdep about lock ordering wrt. shrinker: */
142 fs_reclaim_acquire(GFP_KERNEL);
143 might_lock(&priv->lru.lock);
144 fs_reclaim_release(GFP_KERNEL);
145
146 if (priv->kms_init) {
147 ret = drmm_mode_config_init(ddev);
148 if (ret)
149 goto err_put_dev;
150 }
151
152 dma_set_max_seg_size(dev, UINT_MAX);
153
154 /* Bind all our sub-components: */
155 if (gpu_ops)
156 ret = gpu_ops->bind(dev, dev, NULL);
157 else
158 ret = component_bind_all(dev, ddev);
159 if (ret)
160 goto err_put_dev;
161
162 ret = msm_gem_shrinker_init(ddev);
163 if (ret)
164 goto err_msm_uninit;
165
166 if (priv->kms_init) {
167 ret = msm_drm_kms_init(dev, drv);
168 if (ret)
169 goto err_msm_uninit;
170 }
171
172 ret = drm_dev_register(ddev, 0);
173 if (ret)
174 goto err_msm_uninit;
175
176 ret = msm_debugfs_late_init(ddev);
177 if (ret)
178 goto err_msm_uninit;
179
180 if (priv->kms_init)
181 msm_drm_kms_post_init(dev);
182
183 return 0;
184
185err_msm_uninit:
186 msm_drm_uninit(dev, gpu_ops);
187
188 return ret;
189
190err_put_dev:
191 drm_dev_put(ddev);
192
193 return ret;
194}
195
196/*
197 * DRM operations:
198 */
199
200static void load_gpu(struct drm_device *dev)
201{
202 static DEFINE_MUTEX(init_lock);
203 struct msm_drm_private *priv = dev->dev_private;
204
205 mutex_lock(&init_lock);
206
207 if (!priv->gpu)
208 priv->gpu = adreno_load_gpu(dev);
209
210 mutex_unlock(&init_lock);
211}
212
213/**
214 * msm_context_vm - lazily create the context's VM
215 *
216 * @dev: the drm device
217 * @ctx: the context
218 *
219 * The VM is lazily created, so that userspace has a chance to opt-in to having
220 * a userspace managed VM before the VM is created.
221 *
222 * Note that this does not return a reference to the VM. Once the VM is created,
223 * it exists for the lifetime of the context.
224 */
225struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx)
226{
227 static DEFINE_MUTEX(init_lock);
228 struct msm_drm_private *priv = dev->dev_private;
229
230 /* Once ctx->vm is created it is valid for the lifetime of the context: */
231 if (ctx->vm)
232 return ctx->vm;
233
234 mutex_lock(&init_lock);
235 if (!ctx->vm) {
236 ctx->vm = msm_gpu_create_private_vm(
237 priv->gpu, current, !ctx->userspace_managed_vm);
238
239 }
240 mutex_unlock(&init_lock);
241
242 return ctx->vm;
243}
244
245static int context_init(struct drm_device *dev, struct drm_file *file)
246{
247 static atomic_t ident = ATOMIC_INIT(0);
248 struct msm_context *ctx;
249
250 ctx = kzalloc_obj(*ctx);
251 if (!ctx)
252 return -ENOMEM;
253
254 INIT_LIST_HEAD(&ctx->submitqueues);
255 rwlock_init(&ctx->queuelock);
256
257 kref_init(&ctx->ref);
258 msm_submitqueue_init(dev, ctx);
259
260 file->driver_priv = ctx;
261
262 ctx->seqno = atomic_inc_return(&ident);
263
264 return 0;
265}
266
267static int msm_open(struct drm_device *dev, struct drm_file *file)
268{
269 /* For now, load gpu on open.. to avoid the requirement of having
270 * firmware in the initrd.
271 */
272 load_gpu(dev);
273
274 return context_init(dev, file);
275}
276
277static void context_close(struct msm_context *ctx)
278{
279 ctx->closed = true;
280 msm_submitqueue_close(ctx);
281 msm_context_put(ctx);
282}
283
284static void msm_postclose(struct drm_device *dev, struct drm_file *file)
285{
286 struct msm_drm_private *priv = dev->dev_private;
287 struct msm_context *ctx = file->driver_priv;
288
289 /*
290 * It is not possible to set sysprof param to non-zero if gpu
291 * is not initialized:
292 */
293 if (priv->gpu)
294 msm_context_set_sysprof(ctx, priv->gpu, 0);
295
296 context_close(ctx);
297}
298
299/*
300 * DRM ioctls:
301 */
302
303static int msm_ioctl_get_param(struct drm_device *dev, void *data,
304 struct drm_file *file)
305{
306 struct msm_drm_private *priv = dev->dev_private;
307 struct drm_msm_param *args = data;
308 struct msm_gpu *gpu;
309
310 /* for now, we just have 3d pipe.. eventually this would need to
311 * be more clever to dispatch to appropriate gpu module:
312 */
313 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
314 return -EINVAL;
315
316 gpu = priv->gpu;
317
318 if (!gpu)
319 return -ENXIO;
320
321 return gpu->funcs->get_param(gpu, file->driver_priv,
322 args->param, &args->value, &args->len);
323}
324
325static int msm_ioctl_set_param(struct drm_device *dev, void *data,
326 struct drm_file *file)
327{
328 struct msm_drm_private *priv = dev->dev_private;
329 struct drm_msm_param *args = data;
330 struct msm_gpu *gpu;
331
332 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
333 return -EINVAL;
334
335 gpu = priv->gpu;
336
337 if (!gpu)
338 return -ENXIO;
339
340 return gpu->funcs->set_param(gpu, file->driver_priv,
341 args->param, args->value, args->len);
342}
343
344static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
345 struct drm_file *file)
346{
347 struct drm_msm_gem_new *args = data;
348 uint32_t flags = args->flags;
349
350 if (args->flags & ~MSM_BO_FLAGS) {
351 DRM_ERROR("invalid flags: %08x\n", args->flags);
352 return -EINVAL;
353 }
354
355 /*
356 * Uncached CPU mappings are deprecated, as of:
357 *
358 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
359 *
360 * So promote them to WC.
361 */
362 if (flags & MSM_BO_UNCACHED) {
363 flags &= ~MSM_BO_CACHED;
364 flags |= MSM_BO_WC;
365 }
366
367 if (should_fail(&fail_gem_alloc, args->size))
368 return -ENOMEM;
369
370 return msm_gem_new_handle(dev, file, args->size,
371 args->flags, &args->handle, NULL);
372}
373
374static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
375{
376 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
377}
378
379static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
380 struct drm_file *file)
381{
382 struct drm_msm_gem_cpu_prep *args = data;
383 struct drm_gem_object *obj;
384 ktime_t timeout = to_ktime(args->timeout);
385 int ret;
386
387 if (args->op & ~MSM_PREP_FLAGS) {
388 DRM_ERROR("invalid op: %08x\n", args->op);
389 return -EINVAL;
390 }
391
392 obj = drm_gem_object_lookup(file, args->handle);
393 if (!obj)
394 return -ENOENT;
395
396 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
397
398 drm_gem_object_put(obj);
399
400 return ret;
401}
402
403static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
404 struct drm_file *file)
405{
406 struct drm_msm_gem_cpu_fini *args = data;
407 struct drm_gem_object *obj;
408 int ret;
409
410 obj = drm_gem_object_lookup(file, args->handle);
411 if (!obj)
412 return -ENOENT;
413
414 ret = msm_gem_cpu_fini(obj);
415
416 drm_gem_object_put(obj);
417
418 return ret;
419}
420
421static int msm_ioctl_gem_info_iova(struct drm_device *dev,
422 struct drm_file *file, struct drm_gem_object *obj,
423 uint64_t *iova)
424{
425 struct msm_drm_private *priv = dev->dev_private;
426 struct msm_context *ctx = file->driver_priv;
427
428 if (!priv->gpu)
429 return -EINVAL;
430
431 if (msm_context_is_vmbind(ctx))
432 return UERR(EINVAL, dev, "VM_BIND is enabled");
433
434 if (should_fail(&fail_gem_iova, obj->size))
435 return -ENOMEM;
436
437 /*
438 * Don't pin the memory here - just get an address so that userspace can
439 * be productive
440 */
441 return msm_gem_get_iova(obj, msm_context_vm(dev, ctx), iova);
442}
443
444static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
445 struct drm_file *file, struct drm_gem_object *obj,
446 uint64_t iova)
447{
448 struct msm_drm_private *priv = dev->dev_private;
449 struct msm_context *ctx = file->driver_priv;
450 struct drm_gpuvm *vm = msm_context_vm(dev, ctx);
451
452 if (!priv->gpu)
453 return -EINVAL;
454
455 if (msm_context_is_vmbind(ctx))
456 return UERR(EINVAL, dev, "VM_BIND is enabled");
457
458 /* Only supported if per-process address space is supported: */
459 if (priv->gpu->vm == vm)
460 return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
461
462 if (should_fail(&fail_gem_iova, obj->size))
463 return -ENOMEM;
464
465 return msm_gem_set_iova(obj, vm, iova);
466}
467
468static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
469 __user void *metadata,
470 u32 metadata_size)
471{
472 struct msm_gem_object *msm_obj = to_msm_bo(obj);
473 void *new_metadata;
474 void *buf;
475 int ret;
476
477 /* Impose a moderate upper bound on metadata size: */
478 if (metadata_size > 128) {
479 return -EOVERFLOW;
480 }
481
482 /* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */
483 buf = memdup_user(metadata, metadata_size);
484 if (IS_ERR(buf))
485 return PTR_ERR(buf);
486
487 ret = msm_gem_lock_interruptible(obj);
488 if (ret)
489 goto out;
490
491 new_metadata =
492 krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL);
493 if (!new_metadata) {
494 ret = -ENOMEM;
495 goto out;
496 }
497
498 msm_obj->metadata = new_metadata;
499 msm_obj->metadata_size = metadata_size;
500 memcpy(msm_obj->metadata, buf, metadata_size);
501
502 msm_gem_unlock(obj);
503
504out:
505 kfree(buf);
506
507 return ret;
508}
509
510static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj,
511 __user void *metadata,
512 u32 *metadata_size)
513{
514 struct msm_gem_object *msm_obj = to_msm_bo(obj);
515 void *buf;
516 int ret, len;
517
518 if (!metadata) {
519 /*
520 * Querying the size is inherently racey, but
521 * EXT_external_objects expects the app to confirm
522 * via device and driver UUIDs that the exporter and
523 * importer versions match. All we can do from the
524 * kernel side is check the length under obj lock
525 * when userspace tries to retrieve the metadata
526 */
527 *metadata_size = msm_obj->metadata_size;
528 return 0;
529 }
530
531 ret = msm_gem_lock_interruptible(obj);
532 if (ret)
533 return ret;
534
535 /* Avoid copy_to_user() under gem obj lock: */
536 len = msm_obj->metadata_size;
537 buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL);
538
539 if (!buf) {
540 msm_gem_unlock(obj);
541 return -ENOMEM;
542 }
543
544 msm_gem_unlock(obj);
545
546 if (*metadata_size < len) {
547 ret = -ETOOSMALL;
548 } else if (copy_to_user(metadata, buf, len)) {
549 ret = -EFAULT;
550 } else {
551 *metadata_size = len;
552 }
553
554 kfree(buf);
555
556 return ret;
557}
558
559static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
560 struct drm_file *file)
561{
562 struct drm_msm_gem_info *args = data;
563 struct drm_gem_object *obj;
564 struct msm_gem_object *msm_obj;
565 int i, ret = 0;
566
567 if (args->pad)
568 return -EINVAL;
569
570 switch (args->info) {
571 case MSM_INFO_GET_OFFSET:
572 case MSM_INFO_GET_IOVA:
573 case MSM_INFO_SET_IOVA:
574 case MSM_INFO_GET_FLAGS:
575 /* value returned as immediate, not pointer, so len==0: */
576 if (args->len)
577 return -EINVAL;
578 break;
579 case MSM_INFO_SET_NAME:
580 case MSM_INFO_GET_NAME:
581 case MSM_INFO_SET_METADATA:
582 case MSM_INFO_GET_METADATA:
583 break;
584 default:
585 return -EINVAL;
586 }
587
588 obj = drm_gem_object_lookup(file, args->handle);
589 if (!obj)
590 return -ENOENT;
591
592 msm_obj = to_msm_bo(obj);
593
594 switch (args->info) {
595 case MSM_INFO_GET_OFFSET:
596 ret = drm_gem_create_mmap_offset(obj);
597 if (ret == 0)
598 args->value = drm_vma_node_offset_addr(&obj->vma_node);
599 break;
600 case MSM_INFO_GET_IOVA:
601 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
602 break;
603 case MSM_INFO_SET_IOVA:
604 ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
605 break;
606 case MSM_INFO_GET_FLAGS:
607 if (drm_gem_is_imported(obj)) {
608 ret = -EINVAL;
609 break;
610 }
611 /* Hide internal kernel-only flags: */
612 args->value = to_msm_bo(obj)->flags & MSM_BO_FLAGS;
613 ret = 0;
614 break;
615 case MSM_INFO_SET_NAME:
616 /* length check should leave room for terminating null: */
617 if (args->len >= sizeof(msm_obj->name)) {
618 ret = -EINVAL;
619 break;
620 }
621 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
622 args->len)) {
623 msm_obj->name[0] = '\0';
624 ret = -EFAULT;
625 break;
626 }
627 msm_obj->name[args->len] = '\0';
628 for (i = 0; i < args->len; i++) {
629 if (!isprint(msm_obj->name[i])) {
630 msm_obj->name[i] = '\0';
631 break;
632 }
633 }
634 break;
635 case MSM_INFO_GET_NAME:
636 if (args->value && (args->len < strlen(msm_obj->name))) {
637 ret = -ETOOSMALL;
638 break;
639 }
640 args->len = strlen(msm_obj->name);
641 if (args->value) {
642 if (copy_to_user(u64_to_user_ptr(args->value),
643 msm_obj->name, args->len))
644 ret = -EFAULT;
645 }
646 break;
647 case MSM_INFO_SET_METADATA:
648 ret = msm_ioctl_gem_info_set_metadata(
649 obj, u64_to_user_ptr(args->value), args->len);
650 break;
651 case MSM_INFO_GET_METADATA:
652 ret = msm_ioctl_gem_info_get_metadata(
653 obj, u64_to_user_ptr(args->value), &args->len);
654 break;
655 }
656
657 drm_gem_object_put(obj);
658
659 return ret;
660}
661
662static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
663 ktime_t timeout, uint32_t flags)
664{
665 struct dma_fence *fence;
666 int ret;
667
668 if (fence_after(fence_id, queue->last_fence)) {
669 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
670 fence_id, queue->last_fence);
671 return -EINVAL;
672 }
673
674 /*
675 * Map submitqueue scoped "seqno" (which is actually an idr key)
676 * back to underlying dma-fence
677 *
678 * The fence is removed from the fence_idr when the submit is
679 * retired, so if the fence is not found it means there is nothing
680 * to wait for
681 */
682 spin_lock(&queue->idr_lock);
683 fence = idr_find(&queue->fence_idr, fence_id);
684 if (fence)
685 fence = dma_fence_get_rcu(fence);
686 spin_unlock(&queue->idr_lock);
687
688 if (!fence)
689 return 0;
690
691 if (flags & MSM_WAIT_FENCE_BOOST)
692 dma_fence_set_deadline(fence, ktime_get());
693
694 ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
695 if (ret == 0) {
696 ret = -ETIMEDOUT;
697 } else if (ret != -ERESTARTSYS) {
698 ret = 0;
699 }
700
701 dma_fence_put(fence);
702
703 return ret;
704}
705
706static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
707 struct drm_file *file)
708{
709 struct msm_drm_private *priv = dev->dev_private;
710 struct drm_msm_wait_fence *args = data;
711 struct msm_gpu_submitqueue *queue;
712 int ret;
713
714 if (args->flags & ~MSM_WAIT_FENCE_FLAGS) {
715 DRM_ERROR("invalid flags: %08x\n", args->flags);
716 return -EINVAL;
717 }
718
719 if (!priv->gpu)
720 return 0;
721
722 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
723 if (!queue)
724 return -ENOENT;
725
726 ret = wait_fence(queue, args->fence, to_ktime(args->timeout), args->flags);
727
728 msm_submitqueue_put(queue);
729
730 return ret;
731}
732
733static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
734 struct drm_file *file)
735{
736 struct drm_msm_gem_madvise *args = data;
737 struct drm_gem_object *obj;
738 int ret;
739
740 switch (args->madv) {
741 case MSM_MADV_DONTNEED:
742 case MSM_MADV_WILLNEED:
743 break;
744 default:
745 return -EINVAL;
746 }
747
748 obj = drm_gem_object_lookup(file, args->handle);
749 if (!obj) {
750 return -ENOENT;
751 }
752
753 ret = msm_gem_madvise(obj, args->madv);
754 if (ret >= 0) {
755 args->retained = ret;
756 ret = 0;
757 }
758
759 drm_gem_object_put(obj);
760
761 return ret;
762}
763
764
765static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
766 struct drm_file *file)
767{
768 struct drm_msm_submitqueue *args = data;
769
770 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
771 return -EINVAL;
772
773 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
774 args->flags, &args->id);
775}
776
777static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
778 struct drm_file *file)
779{
780 return msm_submitqueue_query(dev, file->driver_priv, data);
781}
782
783static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
784 struct drm_file *file)
785{
786 u32 id = *(u32 *) data;
787
788 return msm_submitqueue_remove(file->driver_priv, id);
789}
790
791static const struct drm_ioctl_desc msm_ioctls[] = {
792 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
793 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW),
794 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
795 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
796 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
797 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
798 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
799 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
800 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
801 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
802 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
803 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
804 DRM_IOCTL_DEF_DRV(MSM_VM_BIND, msm_ioctl_vm_bind, DRM_RENDER_ALLOW),
805};
806
807static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file)
808{
809 struct drm_device *dev = file->minor->dev;
810 struct msm_drm_private *priv = dev->dev_private;
811
812 if (!priv->gpu)
813 return;
814
815 msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, p);
816
817 drm_show_memory_stats(p, file);
818}
819
820static const struct file_operations fops = {
821 .owner = THIS_MODULE,
822 DRM_GEM_FOPS,
823 .show_fdinfo = drm_show_fdinfo,
824};
825
826#define DRIVER_FEATURES_GPU ( \
827 DRIVER_GEM | \
828 DRIVER_GEM_GPUVA | \
829 DRIVER_RENDER | \
830 DRIVER_SYNCOBJ | \
831 DRIVER_SYNCOBJ_TIMELINE | \
832 0 )
833
834#define DRIVER_FEATURES_KMS ( \
835 DRIVER_GEM | \
836 DRIVER_GEM_GPUVA | \
837 DRIVER_ATOMIC | \
838 DRIVER_MODESET | \
839 0 )
840
841static const struct drm_driver msm_driver = {
842 .driver_features = DRIVER_FEATURES_GPU | DRIVER_FEATURES_KMS,
843 .open = msm_open,
844 .postclose = msm_postclose,
845 .dumb_create = msm_gem_dumb_create,
846 .dumb_map_offset = drm_gem_dumb_map_offset,
847 .gem_prime_import = msm_gem_prime_import,
848 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
849#ifdef CONFIG_DEBUG_FS
850 .debugfs_init = msm_debugfs_init,
851#endif
852 MSM_FBDEV_DRIVER_OPS,
853 .show_fdinfo = msm_show_fdinfo,
854 .ioctls = msm_ioctls,
855 .num_ioctls = ARRAY_SIZE(msm_ioctls),
856 .fops = &fops,
857 .name = "msm",
858 .desc = "MSM Snapdragon DRM",
859 .major = MSM_VERSION_MAJOR,
860 .minor = MSM_VERSION_MINOR,
861 .patchlevel = MSM_VERSION_PATCHLEVEL,
862};
863
864static const struct drm_driver msm_kms_driver = {
865 .driver_features = DRIVER_FEATURES_KMS,
866 .open = msm_open,
867 .postclose = msm_postclose,
868 .dumb_create = msm_gem_dumb_create,
869 .dumb_map_offset = drm_gem_dumb_map_offset,
870 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
871#ifdef CONFIG_DEBUG_FS
872 .debugfs_init = msm_debugfs_init,
873#endif
874 MSM_FBDEV_DRIVER_OPS,
875 .show_fdinfo = msm_show_fdinfo,
876 .fops = &fops,
877 .name = "msm-kms",
878 .desc = "MSM Snapdragon DRM",
879 .major = MSM_VERSION_MAJOR,
880 .minor = MSM_VERSION_MINOR,
881 .patchlevel = MSM_VERSION_PATCHLEVEL,
882};
883
884static const struct drm_driver msm_gpu_driver = {
885 .driver_features = DRIVER_FEATURES_GPU,
886 .open = msm_open,
887 .postclose = msm_postclose,
888 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
889#ifdef CONFIG_DEBUG_FS
890 .debugfs_init = msm_debugfs_init,
891#endif
892 .show_fdinfo = msm_show_fdinfo,
893 .ioctls = msm_ioctls,
894 .num_ioctls = ARRAY_SIZE(msm_ioctls),
895 .fops = &fops,
896 .name = "msm",
897 .desc = "MSM Snapdragon DRM",
898 .major = MSM_VERSION_MAJOR,
899 .minor = MSM_VERSION_MINOR,
900 .patchlevel = MSM_VERSION_PATCHLEVEL,
901};
902
903/*
904 * Componentized driver support:
905 */
906
907/*
908 * Identify what components need to be added by parsing what remote-endpoints
909 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
910 * is no external component that we need to add since LVDS is within MDP4
911 * itself.
912 */
913static int add_mdp_components(struct device *master_dev,
914 struct component_match **matchptr)
915{
916 struct device_node *np = master_dev->of_node;
917 struct device_node *ep_node;
918
919 for_each_endpoint_of_node(np, ep_node) {
920 struct device_node *intf;
921 struct of_endpoint ep;
922 int ret;
923
924 ret = of_graph_parse_endpoint(ep_node, &ep);
925 if (ret) {
926 DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
927 of_node_put(ep_node);
928 return ret;
929 }
930
931 /*
932 * The LCDC/LVDS port on MDP4 is a speacial case where the
933 * remote-endpoint isn't a component that we need to add
934 */
935 if (of_device_is_compatible(np, "qcom,mdp4") &&
936 ep.port == 0)
937 continue;
938
939 /*
940 * It's okay if some of the ports don't have a remote endpoint
941 * specified. It just means that the port isn't connected to
942 * any external interface.
943 */
944 intf = of_graph_get_remote_port_parent(ep_node);
945 if (!intf)
946 continue;
947
948 if (of_device_is_available(intf))
949 drm_of_component_match_add(master_dev, matchptr,
950 component_compare_of, intf);
951
952 of_node_put(intf);
953 }
954
955 return 0;
956}
957
958#if !IS_REACHABLE(CONFIG_DRM_MSM_MDP5) || !IS_REACHABLE(CONFIG_DRM_MSM_DPU)
959bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver)
960{
961 /* If just a single driver is enabled, use it no matter what */
962 return true;
963}
964#else
965
966static bool prefer_mdp5 = true;
967MODULE_PARM_DESC(prefer_mdp5, "Select whether MDP5 or DPU driver should be preferred");
968module_param(prefer_mdp5, bool, 0444);
969
970/* list all platforms that have been migrated from mdp5 to dpu driver */
971static const char *const msm_mdp5_dpu_migrated[] = {
972 /* there never was qcom,msm8998-mdp5 */
973 "qcom,sdm630-mdp5",
974 "qcom,sdm660-mdp5",
975 NULL
976};
977
978/* list all platforms supported by both mdp5 and dpu drivers */
979static const char *const msm_mdp5_dpu_migration[] = {
980 "qcom,msm8917-mdp5",
981 "qcom,msm8937-mdp5",
982 "qcom,msm8953-mdp5",
983 "qcom,msm8996-mdp5",
984 NULL,
985};
986
987bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver)
988{
989 /* If it is not an MDP5 device, use DPU */
990 if (!of_device_is_compatible(dev->of_node, "qcom,mdp5"))
991 return dpu_driver;
992
993 /* If it is no longer supported by MDP5, use DPU */
994 if (of_device_compatible_match(dev->of_node, msm_mdp5_dpu_migrated))
995 return dpu_driver;
996
997 /* If it is not in the migration list, use MDP5 */
998 if (!of_device_compatible_match(dev->of_node, msm_mdp5_dpu_migration))
999 return !dpu_driver;
1000
1001 return prefer_mdp5 ? !dpu_driver : dpu_driver;
1002}
1003#endif
1004
1005/*
1006 * We don't know what's the best binding to link the gpu with the drm device.
1007 * Fow now, we just hunt for all the possible gpus that we support, and add them
1008 * as components.
1009 */
1010static const struct of_device_id msm_gpu_match[] = {
1011 { .compatible = "qcom,adreno" },
1012 { .compatible = "qcom,adreno-3xx" },
1013 { .compatible = "amd,imageon" },
1014 { .compatible = "qcom,kgsl-3d0" },
1015 { },
1016};
1017
1018static int add_gpu_components(struct device *dev,
1019 struct component_match **matchptr)
1020{
1021 struct device_node *np;
1022
1023 np = of_find_matching_node(NULL, msm_gpu_match);
1024 if (!np)
1025 return 0;
1026
1027 if (of_device_is_available(np) && adreno_has_gpu(np))
1028 drm_of_component_match_add(dev, matchptr, component_compare_of, np);
1029
1030 of_node_put(np);
1031
1032 return 0;
1033}
1034
1035static int msm_drm_bind(struct device *dev)
1036{
1037 return msm_drm_init(dev,
1038 msm_gpu_no_components() ?
1039 &msm_kms_driver :
1040 &msm_driver,
1041 NULL);
1042}
1043
1044static void msm_drm_unbind(struct device *dev)
1045{
1046 msm_drm_uninit(dev, NULL);
1047}
1048
1049const struct component_master_ops msm_drm_ops = {
1050 .bind = msm_drm_bind,
1051 .unbind = msm_drm_unbind,
1052};
1053
1054int msm_drv_probe(struct device *master_dev,
1055 int (*kms_init)(struct drm_device *dev),
1056 struct msm_kms *kms)
1057{
1058 struct msm_drm_private *priv;
1059 struct component_match *match = NULL;
1060 int ret;
1061
1062 priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
1063 if (!priv)
1064 return -ENOMEM;
1065
1066 priv->kms = kms;
1067 priv->kms_init = kms_init;
1068 dev_set_drvdata(master_dev, priv);
1069
1070 /* Add mdp components if we have KMS. */
1071 if (kms_init) {
1072 ret = add_mdp_components(master_dev, &match);
1073 if (ret)
1074 return ret;
1075 }
1076
1077 if (!msm_gpu_no_components()) {
1078 ret = add_gpu_components(master_dev, &match);
1079 if (ret)
1080 return ret;
1081 }
1082
1083 /* on all devices that I am aware of, iommu's which can map
1084 * any address the cpu can see are used:
1085 */
1086 ret = dma_set_mask_and_coherent(master_dev, ~0);
1087 if (ret)
1088 return ret;
1089
1090 ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
1091 if (ret)
1092 return ret;
1093
1094 return 0;
1095}
1096
1097int msm_gpu_probe(struct platform_device *pdev,
1098 const struct component_ops *ops)
1099{
1100 struct msm_drm_private *priv;
1101 int ret;
1102
1103 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1104 if (!priv)
1105 return -ENOMEM;
1106
1107 platform_set_drvdata(pdev, priv);
1108
1109 /* on all devices that I am aware of, iommu's which can map
1110 * any address the cpu can see are used:
1111 */
1112 ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1113 if (ret)
1114 return ret;
1115
1116 return msm_drm_init(&pdev->dev, &msm_gpu_driver, ops);
1117}
1118
1119void msm_gpu_remove(struct platform_device *pdev,
1120 const struct component_ops *ops)
1121{
1122 msm_drm_uninit(&pdev->dev, ops);
1123}
1124
1125static int __init msm_drm_register(void)
1126{
1127 if (!modeset)
1128 return -EINVAL;
1129
1130 DBG("init");
1131 msm_mdp_register();
1132 msm_dpu_register();
1133 msm_dsi_register();
1134 msm_hdmi_register();
1135 msm_dp_register();
1136 adreno_register();
1137 msm_mdp4_register();
1138 msm_mdss_register();
1139
1140 return 0;
1141}
1142
1143static void __exit msm_drm_unregister(void)
1144{
1145 DBG("fini");
1146 msm_mdss_unregister();
1147 msm_mdp4_unregister();
1148 msm_dp_unregister();
1149 msm_hdmi_unregister();
1150 adreno_unregister();
1151 msm_dsi_unregister();
1152 msm_mdp_unregister();
1153 msm_dpu_unregister();
1154}
1155
1156module_init(msm_drm_register);
1157module_exit(msm_drm_unregister);
1158
1159MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1160MODULE_DESCRIPTION("MSM DRM Driver");
1161MODULE_LICENSE("GPL");