Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/dma-map-ops.h>
8#include <linux/vmalloc.h>
9#include <linux/spinlock.h>
10#include <linux/shmem_fs.h>
11#include <linux/dma-buf.h>
12
13#include <drm/drm_dumb_buffers.h>
14#include <drm/drm_prime.h>
15#include <drm/drm_file.h>
16#include <drm/drm_fourcc.h>
17
18#include <trace/events/gpu_mem.h>
19
20#include "msm_drv.h"
21#include "msm_gem.h"
22#include "msm_gpu.h"
23#include "msm_kms.h"
24
25static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
26{
27 uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
28 trace_gpu_mem_total(0, 0, total_mem);
29}
30
31static void update_ctx_mem(struct drm_file *file, ssize_t size)
32{
33 struct msm_context *ctx = file->driver_priv;
34 uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
35
36 rcu_read_lock(); /* Locks file->pid! */
37 trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
38 rcu_read_unlock();
39
40}
41
42static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
43{
44 msm_gem_vma_get(obj);
45 update_ctx_mem(file, obj->size);
46 return 0;
47}
48
49static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
50 bool close, const char *reason);
51
52static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
53{
54 struct msm_context *ctx = file->driver_priv;
55 struct drm_exec exec;
56
57 update_ctx_mem(file, -obj->size);
58 msm_gem_vma_put(obj);
59
60 /*
61 * If VM isn't created yet, nothing to cleanup. And in fact calling
62 * put_iova_spaces() with vm=NULL would be bad, in that it will tear-
63 * down the mappings of shared buffers in other contexts.
64 */
65 if (!ctx->vm)
66 return;
67
68 /*
69 * VM_BIND does not depend on implicit teardown of VMAs on handle
70 * close, but instead on implicit teardown of the VM when the device
71 * is closed (see msm_gem_vm_close())
72 */
73 if (msm_context_is_vmbind(ctx))
74 return;
75
76 /*
77 * TODO we might need to kick this to a queue to avoid blocking
78 * in CLOSE ioctl
79 */
80 dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
81 MAX_SCHEDULE_TIMEOUT);
82
83 msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
84 put_iova_spaces(obj, ctx->vm, true, "close");
85 drm_exec_fini(&exec); /* drop locks */
86}
87
88/*
89 * Get/put for kms->vm VMA
90 */
91
92void msm_gem_vma_get(struct drm_gem_object *obj)
93{
94 atomic_inc(&to_msm_bo(obj)->vma_ref);
95}
96
97void msm_gem_vma_put(struct drm_gem_object *obj)
98{
99 struct msm_drm_private *priv = obj->dev->dev_private;
100
101 if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
102 return;
103
104 if (!priv->kms)
105 return;
106
107#ifdef CONFIG_DRM_MSM_KMS
108 struct drm_exec exec;
109
110 msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
111 put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
112 drm_exec_fini(&exec); /* drop locks */
113#endif
114}
115
116/*
117 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
118 * API. Really GPU cache is out of scope here (handled on cmdstream)
119 * and all we need to do is invalidate newly allocated pages before
120 * mapping to CPU as uncached/writecombine.
121 *
122 * On top of this, we have the added headache, that depending on
123 * display generation, the display's iommu may be wired up to either
124 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
125 * that here we either have dma-direct or iommu ops.
126 *
127 * Let this be a cautionary tail of abstraction gone wrong.
128 */
129
130static void sync_for_device(struct msm_gem_object *msm_obj)
131{
132 struct device *dev = msm_obj->base.dev->dev;
133
134 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
135}
136
137static void sync_for_cpu(struct msm_gem_object *msm_obj)
138{
139 struct device *dev = msm_obj->base.dev->dev;
140
141 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
142}
143
144static void update_lru_active(struct drm_gem_object *obj)
145{
146 struct msm_drm_private *priv = obj->dev->dev_private;
147 struct msm_gem_object *msm_obj = to_msm_bo(obj);
148
149 GEM_WARN_ON(!msm_obj->pages);
150
151 if (msm_obj->pin_count) {
152 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
153 } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
154 drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
155 } else {
156 GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
157
158 drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
159 }
160}
161
162static void update_lru_locked(struct drm_gem_object *obj)
163{
164 struct msm_drm_private *priv = obj->dev->dev_private;
165 struct msm_gem_object *msm_obj = to_msm_bo(obj);
166
167 msm_gem_assert_locked(&msm_obj->base);
168
169 if (!msm_obj->pages) {
170 GEM_WARN_ON(msm_obj->pin_count);
171
172 drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
173 } else {
174 update_lru_active(obj);
175 }
176}
177
178static void update_lru(struct drm_gem_object *obj)
179{
180 struct msm_drm_private *priv = obj->dev->dev_private;
181
182 mutex_lock(&priv->lru.lock);
183 update_lru_locked(obj);
184 mutex_unlock(&priv->lru.lock);
185}
186
187static struct page **get_pages(struct drm_gem_object *obj)
188{
189 struct msm_gem_object *msm_obj = to_msm_bo(obj);
190
191 msm_gem_assert_locked(obj);
192
193 if (!msm_obj->pages) {
194 struct drm_device *dev = obj->dev;
195 struct page **p;
196 size_t npages = obj->size >> PAGE_SHIFT;
197
198 p = drm_gem_get_pages(obj);
199
200 if (IS_ERR(p)) {
201 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
202 PTR_ERR(p));
203 return p;
204 }
205
206 update_device_mem(dev->dev_private, obj->size);
207
208 msm_obj->pages = p;
209
210 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
211 if (IS_ERR(msm_obj->sgt)) {
212 void *ptr = ERR_CAST(msm_obj->sgt);
213
214 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
215 msm_obj->sgt = NULL;
216 return ptr;
217 }
218
219 /* For non-cached buffers, ensure the new pages are clean
220 * because display controller, GPU, etc. are not coherent:
221 */
222 if (msm_obj->flags & MSM_BO_WC)
223 sync_for_device(msm_obj);
224
225 update_lru(obj);
226 }
227
228 return msm_obj->pages;
229}
230
231static void put_pages(struct drm_gem_object *obj)
232{
233 struct msm_gem_object *msm_obj = to_msm_bo(obj);
234
235 /*
236 * Skip gpuvm in the object free path to avoid a WARN_ON() splat.
237 * See explaination in msm_gem_assert_locked()
238 */
239 if (kref_read(&obj->refcount))
240 drm_gpuvm_bo_gem_evict(obj, true);
241
242 if (msm_obj->pages) {
243 if (msm_obj->sgt) {
244 /* For non-cached buffers, ensure the new
245 * pages are clean because display controller,
246 * GPU, etc. are not coherent:
247 */
248 if (msm_obj->flags & MSM_BO_WC)
249 sync_for_cpu(msm_obj);
250
251 sg_free_table(msm_obj->sgt);
252 kfree(msm_obj->sgt);
253 msm_obj->sgt = NULL;
254 }
255
256 update_device_mem(obj->dev->dev_private, -obj->size);
257
258 drm_gem_put_pages(obj, msm_obj->pages, true, false);
259
260 msm_obj->pages = NULL;
261 update_lru(obj);
262 }
263}
264
265struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
266{
267 struct msm_gem_object *msm_obj = to_msm_bo(obj);
268
269 msm_gem_assert_locked(obj);
270
271 if (msm_obj->madv > madv) {
272 DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
273 msm_obj->madv, madv);
274 return ERR_PTR(-EBUSY);
275 }
276
277 return get_pages(obj);
278}
279
280/*
281 * Update the pin count of the object, call under lru.lock
282 */
283void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
284{
285 struct msm_drm_private *priv = obj->dev->dev_private;
286
287 msm_gem_assert_locked(obj);
288
289 to_msm_bo(obj)->pin_count++;
290 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
291}
292
293static void pin_obj_locked(struct drm_gem_object *obj)
294{
295 struct msm_drm_private *priv = obj->dev->dev_private;
296
297 mutex_lock(&priv->lru.lock);
298 msm_gem_pin_obj_locked(obj);
299 mutex_unlock(&priv->lru.lock);
300}
301
302struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
303{
304 struct page **p;
305
306 msm_gem_assert_locked(obj);
307
308 p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
309 if (!IS_ERR(p))
310 pin_obj_locked(obj);
311
312 return p;
313}
314
315void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
316{
317 msm_gem_assert_locked(obj);
318
319 msm_gem_unpin_locked(obj);
320}
321
322static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
323{
324 if (msm_obj->flags & MSM_BO_WC)
325 return pgprot_writecombine(prot);
326 return prot;
327}
328
329static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
330{
331 struct vm_area_struct *vma = vmf->vma;
332 struct drm_gem_object *obj = vma->vm_private_data;
333 struct msm_gem_object *msm_obj = to_msm_bo(obj);
334 struct page **pages;
335 unsigned long pfn;
336 pgoff_t pgoff;
337 int err;
338 vm_fault_t ret;
339
340 /*
341 * vm_ops.open/drm_gem_mmap_obj and close get and put
342 * a reference on obj. So, we dont need to hold one here.
343 */
344 err = msm_gem_lock_interruptible(obj);
345 if (err) {
346 ret = VM_FAULT_NOPAGE;
347 goto out;
348 }
349
350 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
351 msm_gem_unlock(obj);
352 return VM_FAULT_SIGBUS;
353 }
354
355 /* make sure we have pages attached now */
356 pages = get_pages(obj);
357 if (IS_ERR(pages)) {
358 ret = vmf_error(PTR_ERR(pages));
359 goto out_unlock;
360 }
361
362 /* We don't use vmf->pgoff since that has the fake offset: */
363 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
364
365 pfn = page_to_pfn(pages[pgoff]);
366
367 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
368 pfn, pfn << PAGE_SHIFT);
369
370 ret = vmf_insert_pfn(vma, vmf->address, pfn);
371
372out_unlock:
373 msm_gem_unlock(obj);
374out:
375 return ret;
376}
377
378
379static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
380 struct drm_gpuvm *vm)
381{
382 struct drm_gpuvm_bo *vm_bo;
383
384 msm_gem_assert_locked(obj);
385
386 drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
387 struct drm_gpuva *vma;
388
389 drm_gpuvm_bo_for_each_va (vma, vm_bo) {
390 if (vma->vm == vm) {
391 /* lookup_vma() should only be used in paths
392 * with at most one vma per vm
393 */
394 GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
395
396 return vma;
397 }
398 }
399 }
400
401 return NULL;
402}
403
404/*
405 * If close is true, this also closes the VMA (releasing the allocated
406 * iova range) in addition to removing the iommu mapping. In the eviction
407 * case (!close), we keep the iova allocated, but only remove the iommu
408 * mapping.
409 */
410static void
411put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
412 bool close, const char *reason)
413{
414 struct drm_gpuvm_bo *vm_bo, *tmp;
415
416 msm_gem_assert_locked(obj);
417
418 drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
419 struct drm_gpuva *vma, *vmatmp;
420
421 if (vm && vm_bo->vm != vm)
422 continue;
423
424 drm_gpuvm_bo_get(vm_bo);
425
426 drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
427 msm_gem_vma_unmap(vma, reason);
428 if (close)
429 msm_gem_vma_close(vma);
430 }
431
432 drm_gpuvm_bo_put(vm_bo);
433 }
434}
435
436static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
437 struct drm_gpuvm *vm, u64 range_start,
438 u64 range_end)
439{
440 struct drm_gpuva *vma;
441
442 msm_gem_assert_locked(obj);
443
444 vma = lookup_vma(obj, vm);
445
446 if (!vma) {
447 vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
448 } else {
449 GEM_WARN_ON(vma->va.addr < range_start);
450 GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
451 }
452
453 return vma;
454}
455
456int msm_gem_prot(struct drm_gem_object *obj)
457{
458 struct msm_gem_object *msm_obj = to_msm_bo(obj);
459 int prot = IOMMU_READ;
460
461 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
462 prot |= IOMMU_WRITE;
463
464 if (msm_obj->flags & MSM_BO_MAP_PRIV)
465 prot |= IOMMU_PRIV;
466
467 if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
468 prot |= IOMMU_CACHE;
469
470 return prot;
471}
472
473int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
474{
475 struct msm_gem_object *msm_obj = to_msm_bo(obj);
476 struct page **pages;
477 int prot = msm_gem_prot(obj);
478
479 msm_gem_assert_locked(obj);
480
481 pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
482 if (IS_ERR(pages))
483 return PTR_ERR(pages);
484
485 return msm_gem_vma_map(vma, prot, msm_obj->sgt);
486}
487
488void msm_gem_unpin_locked(struct drm_gem_object *obj)
489{
490 struct msm_drm_private *priv = obj->dev->dev_private;
491 struct msm_gem_object *msm_obj = to_msm_bo(obj);
492
493 msm_gem_assert_locked(obj);
494
495 mutex_lock(&priv->lru.lock);
496 msm_obj->pin_count--;
497 GEM_WARN_ON(msm_obj->pin_count < 0);
498 update_lru_locked(obj);
499 mutex_unlock(&priv->lru.lock);
500}
501
502/* Special unpin path for use in fence-signaling path, avoiding the need
503 * to hold the obj lock by only depending on things that a protected by
504 * the LRU lock. In particular we know that that we already have backing
505 * and and that the object's dma_resv has the fence for the current
506 * submit/job which will prevent us racing against page eviction.
507 */
508void msm_gem_unpin_active(struct drm_gem_object *obj)
509{
510 struct msm_drm_private *priv = obj->dev->dev_private;
511 struct msm_gem_object *msm_obj = to_msm_bo(obj);
512
513 GEM_WARN_ON(!mutex_is_locked(&priv->lru.lock));
514
515 msm_obj->pin_count--;
516 GEM_WARN_ON(msm_obj->pin_count < 0);
517 update_lru_active(obj);
518}
519
520struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
521 struct drm_gpuvm *vm)
522{
523 return get_vma_locked(obj, vm, 0, U64_MAX);
524}
525
526static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
527 struct drm_gpuvm *vm, uint64_t *iova,
528 u64 range_start, u64 range_end)
529{
530 struct drm_gpuva *vma;
531 int ret;
532
533 msm_gem_assert_locked(obj);
534
535 if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
536 return -EINVAL;
537
538 vma = get_vma_locked(obj, vm, range_start, range_end);
539 if (IS_ERR(vma))
540 return PTR_ERR(vma);
541
542 ret = msm_gem_pin_vma_locked(obj, vma);
543 if (!ret) {
544 *iova = vma->va.addr;
545 pin_obj_locked(obj);
546 }
547
548 return ret;
549}
550
551/*
552 * get iova and pin it. Should have a matching put
553 * limits iova to specified range (in pages)
554 */
555int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
556 struct drm_gpuvm *vm, uint64_t *iova,
557 u64 range_start, u64 range_end)
558{
559 struct drm_exec exec;
560 int ret;
561
562 msm_gem_lock_vm_and_obj(&exec, obj, vm);
563 ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
564 drm_exec_fini(&exec); /* drop locks */
565
566 return ret;
567}
568
569/* get iova and pin it. Should have a matching put */
570int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
571 uint64_t *iova)
572{
573 return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
574}
575
576/*
577 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
578 * valid for the life of the object
579 */
580int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
581 uint64_t *iova)
582{
583 struct drm_gpuva *vma;
584 struct drm_exec exec;
585 int ret = 0;
586
587 msm_gem_lock_vm_and_obj(&exec, obj, vm);
588 vma = get_vma_locked(obj, vm, 0, U64_MAX);
589 if (IS_ERR(vma)) {
590 ret = PTR_ERR(vma);
591 } else {
592 *iova = vma->va.addr;
593 }
594 drm_exec_fini(&exec); /* drop locks */
595
596 return ret;
597}
598
599static int clear_iova(struct drm_gem_object *obj,
600 struct drm_gpuvm *vm)
601{
602 struct drm_gpuva *vma = lookup_vma(obj, vm);
603
604 if (!vma)
605 return 0;
606
607 msm_gem_vma_unmap(vma, NULL);
608 msm_gem_vma_close(vma);
609
610 return 0;
611}
612
613/*
614 * Get the requested iova but don't pin it. Fails if the requested iova is
615 * not available. Doesn't need a put because iovas are currently valid for
616 * the life of the object.
617 *
618 * Setting an iova of zero will clear the vma.
619 */
620int msm_gem_set_iova(struct drm_gem_object *obj,
621 struct drm_gpuvm *vm, uint64_t iova)
622{
623 struct drm_exec exec;
624 int ret = 0;
625
626 msm_gem_lock_vm_and_obj(&exec, obj, vm);
627 if (!iova) {
628 ret = clear_iova(obj, vm);
629 } else {
630 struct drm_gpuva *vma;
631 vma = get_vma_locked(obj, vm, iova, iova + obj->size);
632 if (IS_ERR(vma)) {
633 ret = PTR_ERR(vma);
634 } else if (GEM_WARN_ON(vma->va.addr != iova)) {
635 clear_iova(obj, vm);
636 ret = -EBUSY;
637 }
638 }
639 drm_exec_fini(&exec); /* drop locks */
640
641 return ret;
642}
643
644static bool is_kms_vm(struct drm_gpuvm *vm)
645{
646#ifdef CONFIG_DRM_MSM_KMS
647 struct msm_drm_private *priv = vm->drm->dev_private;
648
649 return priv->kms && (priv->kms->vm == vm);
650#else
651 return false;
652#endif
653}
654
655/*
656 * Unpin a iova by updating the reference counts. The memory isn't actually
657 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
658 * to get rid of it
659 */
660void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
661{
662 struct drm_gpuva *vma;
663 struct drm_exec exec;
664
665 msm_gem_lock_vm_and_obj(&exec, obj, vm);
666 vma = lookup_vma(obj, vm);
667 if (vma) {
668 msm_gem_unpin_locked(obj);
669 }
670 if (!is_kms_vm(vm))
671 put_iova_spaces(obj, vm, true, "close");
672 drm_exec_fini(&exec); /* drop locks */
673}
674
675int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
676 struct drm_mode_create_dumb *args)
677{
678 u32 fourcc;
679 u64 pitch_align;
680 int ret;
681
682 /*
683 * Adreno needs pitch aligned to 32 pixels. Compute the number
684 * of bytes for a block of 32 pixels at the given color format.
685 * Use the result as pitch alignment.
686 */
687 fourcc = drm_driver_color_mode_format(dev, args->bpp);
688 if (fourcc != DRM_FORMAT_INVALID) {
689 const struct drm_format_info *info;
690
691 info = drm_format_info(fourcc);
692 if (!info)
693 return -EINVAL;
694 pitch_align = drm_format_info_min_pitch(info, 0, 32);
695 } else {
696 pitch_align = round_up(args->width, 32) * DIV_ROUND_UP(args->bpp, SZ_8);
697 }
698 if (!pitch_align || pitch_align > U32_MAX)
699 return -EINVAL;
700 ret = drm_mode_size_dumb(dev, args, pitch_align, 0);
701 if (ret)
702 return ret;
703
704 return msm_gem_new_handle(dev, file, args->size,
705 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
706}
707
708static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
709{
710 struct msm_gem_object *msm_obj = to_msm_bo(obj);
711 struct page **pages;
712 int ret = 0;
713
714 msm_gem_assert_locked(obj);
715
716 if (drm_gem_is_imported(obj))
717 return ERR_PTR(-ENODEV);
718
719 pages = msm_gem_get_pages_locked(obj, madv);
720 if (IS_ERR(pages))
721 return ERR_CAST(pages);
722
723 pin_obj_locked(obj);
724
725 /* increment vmap_count *before* vmap() call, so shrinker can
726 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
727 * This guarantees that we won't try to msm_gem_vunmap() this
728 * same object from within the vmap() call (while we already
729 * hold msm_obj lock)
730 */
731 msm_obj->vmap_count++;
732
733 if (!msm_obj->vaddr) {
734 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
735 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
736 if (msm_obj->vaddr == NULL) {
737 ret = -ENOMEM;
738 goto fail;
739 }
740 }
741
742 return msm_obj->vaddr;
743
744fail:
745 msm_obj->vmap_count--;
746 msm_gem_unpin_locked(obj);
747 return ERR_PTR(ret);
748}
749
750void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
751{
752 return get_vaddr(obj, MSM_MADV_WILLNEED);
753}
754
755void *msm_gem_get_vaddr(struct drm_gem_object *obj)
756{
757 void *ret;
758
759 msm_gem_lock(obj);
760 ret = msm_gem_get_vaddr_locked(obj);
761 msm_gem_unlock(obj);
762
763 return ret;
764}
765
766/*
767 * Don't use this! It is for the very special case of dumping
768 * submits from GPU hangs or faults, were the bo may already
769 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
770 * active list.
771 */
772void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
773{
774 return get_vaddr(obj, __MSM_MADV_PURGED);
775}
776
777void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
778{
779 struct msm_gem_object *msm_obj = to_msm_bo(obj);
780
781 msm_gem_assert_locked(obj);
782 GEM_WARN_ON(msm_obj->vmap_count < 1);
783
784 msm_obj->vmap_count--;
785 msm_gem_unpin_locked(obj);
786}
787
788void msm_gem_put_vaddr(struct drm_gem_object *obj)
789{
790 msm_gem_lock(obj);
791 msm_gem_put_vaddr_locked(obj);
792 msm_gem_unlock(obj);
793}
794
795/* Update madvise status, returns true if not purged, else
796 * false or -errno.
797 */
798int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
799{
800 struct msm_drm_private *priv = obj->dev->dev_private;
801 struct msm_gem_object *msm_obj = to_msm_bo(obj);
802
803 msm_gem_lock(obj);
804
805 mutex_lock(&priv->lru.lock);
806
807 if (msm_obj->madv != __MSM_MADV_PURGED)
808 msm_obj->madv = madv;
809
810 madv = msm_obj->madv;
811
812 /* If the obj is inactive, we might need to move it
813 * between inactive lists
814 */
815 update_lru_locked(obj);
816
817 mutex_unlock(&priv->lru.lock);
818
819 msm_gem_unlock(obj);
820
821 return (madv != __MSM_MADV_PURGED);
822}
823
824void msm_gem_purge(struct drm_gem_object *obj)
825{
826 struct drm_device *dev = obj->dev;
827 struct msm_drm_private *priv = obj->dev->dev_private;
828 struct msm_gem_object *msm_obj = to_msm_bo(obj);
829
830 msm_gem_assert_locked(obj);
831 GEM_WARN_ON(!is_purgeable(msm_obj));
832
833 /* Get rid of any iommu mapping(s): */
834 put_iova_spaces(obj, NULL, false, "purge");
835
836 msm_gem_vunmap(obj);
837
838 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
839
840 put_pages(obj);
841
842 mutex_lock(&priv->lru.lock);
843 /* A one-way transition: */
844 msm_obj->madv = __MSM_MADV_PURGED;
845 mutex_unlock(&priv->lru.lock);
846
847 drm_gem_free_mmap_offset(obj);
848
849 /* Our goal here is to return as much of the memory as
850 * is possible back to the system as we are called from OOM.
851 * To do this we must instruct the shmfs to drop all of its
852 * backing pages, *now*.
853 */
854 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
855
856 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
857 0, (loff_t)-1);
858}
859
860/*
861 * Unpin the backing pages and make them available to be swapped out.
862 */
863void msm_gem_evict(struct drm_gem_object *obj)
864{
865 struct drm_device *dev = obj->dev;
866 struct msm_gem_object *msm_obj = to_msm_bo(obj);
867
868 msm_gem_assert_locked(obj);
869 GEM_WARN_ON(is_unevictable(msm_obj));
870
871 /* Get rid of any iommu mapping(s): */
872 put_iova_spaces(obj, NULL, false, "evict");
873
874 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
875
876 put_pages(obj);
877}
878
879void msm_gem_vunmap(struct drm_gem_object *obj)
880{
881 struct msm_gem_object *msm_obj = to_msm_bo(obj);
882
883 msm_gem_assert_locked(obj);
884
885 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
886 return;
887
888 vunmap(msm_obj->vaddr);
889 msm_obj->vaddr = NULL;
890}
891
892bool msm_gem_active(struct drm_gem_object *obj)
893{
894 msm_gem_assert_locked(obj);
895
896 if (to_msm_bo(obj)->pin_count)
897 return true;
898
899 return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP);
900}
901
902int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
903{
904 bool write = !!(op & MSM_PREP_WRITE);
905 unsigned long remain =
906 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
907 long ret;
908
909 if (op & MSM_PREP_BOOST) {
910 dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
911 ktime_get());
912 }
913
914 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
915 true, remain);
916 if (ret == 0)
917 return remain == 0 ? -EBUSY : -ETIMEDOUT;
918 else if (ret < 0)
919 return ret;
920
921 /* TODO cache maintenance */
922
923 return 0;
924}
925
926int msm_gem_cpu_fini(struct drm_gem_object *obj)
927{
928 /* TODO cache maintenance */
929 return 0;
930}
931
932#ifdef CONFIG_DEBUG_FS
933void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
934 struct msm_gem_stats *stats)
935{
936 struct msm_gem_object *msm_obj = to_msm_bo(obj);
937 struct dma_resv *robj = obj->resv;
938 uint64_t off = drm_vma_node_start(&obj->vma_node);
939 const char *madv;
940
941 if (!msm_gem_trylock(obj))
942 return;
943
944 stats->all.count++;
945 stats->all.size += obj->size;
946
947 if (msm_gem_active(obj)) {
948 stats->active.count++;
949 stats->active.size += obj->size;
950 }
951
952 if (msm_obj->pages) {
953 stats->resident.count++;
954 stats->resident.size += obj->size;
955 }
956
957 switch (msm_obj->madv) {
958 case __MSM_MADV_PURGED:
959 stats->purged.count++;
960 stats->purged.size += obj->size;
961 madv = " purged";
962 break;
963 case MSM_MADV_DONTNEED:
964 stats->purgeable.count++;
965 stats->purgeable.size += obj->size;
966 madv = " purgeable";
967 break;
968 case MSM_MADV_WILLNEED:
969 default:
970 madv = "";
971 break;
972 }
973
974 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
975 msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
976 obj->name, kref_read(&obj->refcount),
977 off, msm_obj->vaddr);
978
979 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
980
981 if (!list_empty(&obj->gpuva.list)) {
982 struct drm_gpuvm_bo *vm_bo;
983
984 seq_puts(m, " vmas:");
985
986 drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
987 struct drm_gpuva *vma;
988
989 drm_gpuvm_bo_for_each_va (vma, vm_bo) {
990 const char *name, *comm;
991 struct msm_gem_vm *vm = to_msm_vm(vma->vm);
992 struct task_struct *task =
993 get_pid_task(vm->pid, PIDTYPE_PID);
994 if (task) {
995 comm = kstrdup(task->comm, GFP_KERNEL);
996 put_task_struct(task);
997 } else {
998 comm = NULL;
999 }
1000 name = vm->base.name;
1001
1002 seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
1003 name, comm ? ":" : "", comm ? comm : "",
1004 vma->vm, vma->va.addr,
1005 to_msm_vma(vma)->mapped ? "" : "un");
1006 kfree(comm);
1007 }
1008 }
1009
1010 seq_puts(m, "\n");
1011 }
1012
1013 dma_resv_describe(robj, m);
1014 msm_gem_unlock(obj);
1015}
1016
1017void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1018{
1019 struct msm_gem_stats stats = {};
1020 struct msm_gem_object *msm_obj;
1021
1022 seq_puts(m, " flags id ref offset kaddr size madv name\n");
1023 list_for_each_entry(msm_obj, list, node) {
1024 struct drm_gem_object *obj = &msm_obj->base;
1025 seq_puts(m, " ");
1026 msm_gem_describe(obj, m, &stats);
1027 }
1028
1029 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
1030 stats.all.count, stats.all.size);
1031 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
1032 stats.active.count, stats.active.size);
1033 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
1034 stats.resident.count, stats.resident.size);
1035 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1036 stats.purgeable.count, stats.purgeable.size);
1037 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
1038 stats.purged.count, stats.purged.size);
1039}
1040#endif
1041
1042/* don't call directly! Use drm_gem_object_put() */
1043static void msm_gem_free_object(struct drm_gem_object *obj)
1044{
1045 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1046 struct drm_device *dev = obj->dev;
1047 struct msm_drm_private *priv = dev->dev_private;
1048 struct drm_exec exec;
1049
1050 mutex_lock(&priv->obj_lock);
1051 list_del(&msm_obj->node);
1052 mutex_unlock(&priv->obj_lock);
1053
1054 /*
1055 * We need to lock any VMs the object is still attached to, but not
1056 * the object itself (see explaination in msm_gem_assert_locked()),
1057 * so just open-code this special case.
1058 *
1059 * Note that we skip the dance if we aren't attached to any VM. This
1060 * is load bearing. The driver needs to support two usage models:
1061 *
1062 * 1. Legacy kernel managed VM: Userspace expects the VMA's to be
1063 * implicitly torn down when the object is freed, the VMA's do
1064 * not hold a hard reference to the BO.
1065 *
1066 * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
1067 * BO. This can be dropped when the VM is closed and it's associated
1068 * VMAs are torn down. (See msm_gem_vm_close()).
1069 *
1070 * In the latter case the last reference to a BO can be dropped while
1071 * we already have the VM locked. It would have already been removed
1072 * from the gpuva list, but lockdep doesn't know that. Or understand
1073 * the differences between the two usage models.
1074 */
1075 if (!list_empty(&obj->gpuva.list)) {
1076 drm_exec_init(&exec, 0, 0);
1077 drm_exec_until_all_locked (&exec) {
1078 struct drm_gpuvm_bo *vm_bo;
1079 drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
1080 drm_exec_lock_obj(&exec,
1081 drm_gpuvm_resv_obj(vm_bo->vm));
1082 drm_exec_retry_on_contention(&exec);
1083 }
1084 }
1085 put_iova_spaces(obj, NULL, true, "free");
1086 drm_exec_fini(&exec); /* drop locks */
1087 }
1088
1089 if (drm_gem_is_imported(obj)) {
1090 GEM_WARN_ON(msm_obj->vaddr);
1091
1092 /* Don't drop the pages for imported dmabuf, as they are not
1093 * ours, just free the array we allocated:
1094 */
1095 kvfree(msm_obj->pages);
1096
1097 drm_prime_gem_destroy(obj, msm_obj->sgt);
1098 } else {
1099 msm_gem_vunmap(obj);
1100 put_pages(obj);
1101 }
1102
1103 /*
1104 * In error paths, we could end up here before msm_gem_new_handle()
1105 * has changed obj->resv to point to the shared resv. In this case,
1106 * we don't want to drop a ref to the shared r_obj that we haven't
1107 * taken yet.
1108 */
1109 if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
1110 struct drm_gem_object *r_obj =
1111 container_of(obj->resv, struct drm_gem_object, _resv);
1112
1113 /* Drop reference we hold to shared resv obj: */
1114 drm_gem_object_put(r_obj);
1115 }
1116
1117 drm_gem_object_release(obj);
1118
1119 kfree(msm_obj->metadata);
1120 kfree(msm_obj);
1121}
1122
1123static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1124{
1125 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1126
1127 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1128 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1129
1130 return 0;
1131}
1132
1133/* convenience method to construct a GEM buffer object, and userspace handle */
1134int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1135 size_t size, uint32_t flags, uint32_t *handle,
1136 char *name)
1137{
1138 struct drm_gem_object *obj;
1139 int ret;
1140
1141 obj = msm_gem_new(dev, size, flags);
1142
1143 if (IS_ERR(obj))
1144 return PTR_ERR(obj);
1145
1146 if (name)
1147 msm_gem_object_set_name(obj, "%s", name);
1148
1149 if (flags & MSM_BO_NO_SHARE) {
1150 struct msm_context *ctx = file->driver_priv;
1151 struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
1152
1153 drm_gem_object_get(r_obj);
1154
1155 obj->resv = r_obj->resv;
1156 }
1157
1158 ret = drm_gem_handle_create(file, obj, handle);
1159
1160 /* drop reference from allocate - handle holds it now */
1161 drm_gem_object_put(obj);
1162
1163 return ret;
1164}
1165
1166static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1167{
1168 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1169 enum drm_gem_object_status status = 0;
1170
1171 if (msm_obj->pages)
1172 status |= DRM_GEM_OBJECT_RESIDENT;
1173
1174 if (msm_obj->madv == MSM_MADV_DONTNEED)
1175 status |= DRM_GEM_OBJECT_PURGEABLE;
1176
1177 return status;
1178}
1179
1180static const struct vm_operations_struct vm_ops = {
1181 .fault = msm_gem_fault,
1182 .open = drm_gem_vm_open,
1183 .close = drm_gem_vm_close,
1184};
1185
1186static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1187 .free = msm_gem_free_object,
1188 .open = msm_gem_open,
1189 .close = msm_gem_close,
1190 .export = msm_gem_prime_export,
1191 .pin = msm_gem_prime_pin,
1192 .unpin = msm_gem_prime_unpin,
1193 .get_sg_table = msm_gem_prime_get_sg_table,
1194 .vmap = msm_gem_prime_vmap,
1195 .vunmap = msm_gem_prime_vunmap,
1196 .mmap = msm_gem_object_mmap,
1197 .status = msm_gem_status,
1198 .vm_ops = &vm_ops,
1199};
1200
1201static int msm_gem_new_impl(struct drm_device *dev, uint32_t flags,
1202 struct drm_gem_object **obj)
1203{
1204 struct msm_drm_private *priv = dev->dev_private;
1205 struct msm_gem_object *msm_obj;
1206
1207 switch (flags & MSM_BO_CACHE_MASK) {
1208 case MSM_BO_CACHED:
1209 case MSM_BO_WC:
1210 break;
1211 case MSM_BO_CACHED_COHERENT:
1212 if (priv->has_cached_coherent)
1213 break;
1214 fallthrough;
1215 default:
1216 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1217 (flags & MSM_BO_CACHE_MASK));
1218 return -EINVAL;
1219 }
1220
1221 msm_obj = kzalloc_obj(*msm_obj);
1222 if (!msm_obj)
1223 return -ENOMEM;
1224
1225 msm_obj->flags = flags;
1226 msm_obj->madv = MSM_MADV_WILLNEED;
1227
1228 INIT_LIST_HEAD(&msm_obj->node);
1229
1230 *obj = &msm_obj->base;
1231 (*obj)->funcs = &msm_gem_object_funcs;
1232
1233 return 0;
1234}
1235
1236struct drm_gem_object *msm_gem_new(struct drm_device *dev, size_t size, uint32_t flags)
1237{
1238 struct msm_drm_private *priv = dev->dev_private;
1239 struct msm_gem_object *msm_obj;
1240 struct drm_gem_object *obj = NULL;
1241 int ret;
1242
1243 size = PAGE_ALIGN(size);
1244
1245 /* Disallow zero sized objects as they make the underlying
1246 * infrastructure grumpy
1247 */
1248 if (size == 0)
1249 return ERR_PTR(-EINVAL);
1250
1251 ret = msm_gem_new_impl(dev, flags, &obj);
1252 if (ret)
1253 return ERR_PTR(ret);
1254
1255 msm_obj = to_msm_bo(obj);
1256
1257 ret = drm_gem_object_init(dev, obj, size);
1258 if (ret)
1259 goto fail;
1260 /*
1261 * Our buffers are kept pinned, so allocating them from the
1262 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1263 * See comments above new_inode() why this is required _and_
1264 * expected if you're going to pin these pages.
1265 */
1266 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1267
1268 drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1269
1270 mutex_lock(&priv->obj_lock);
1271 list_add_tail(&msm_obj->node, &priv->objects);
1272 mutex_unlock(&priv->obj_lock);
1273
1274 ret = drm_gem_create_mmap_offset(obj);
1275 if (ret)
1276 goto fail;
1277
1278 return obj;
1279
1280fail:
1281 drm_gem_object_put(obj);
1282 return ERR_PTR(ret);
1283}
1284
1285struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1286 struct dma_buf *dmabuf, struct sg_table *sgt)
1287{
1288 struct msm_drm_private *priv = dev->dev_private;
1289 struct msm_gem_object *msm_obj;
1290 struct drm_gem_object *obj;
1291 size_t size, npages;
1292 int ret;
1293
1294 size = PAGE_ALIGN(dmabuf->size);
1295
1296 ret = msm_gem_new_impl(dev, MSM_BO_WC, &obj);
1297 if (ret)
1298 return ERR_PTR(ret);
1299
1300 drm_gem_private_object_init(dev, obj, size);
1301
1302 npages = size / PAGE_SIZE;
1303
1304 msm_obj = to_msm_bo(obj);
1305 msm_gem_lock(obj);
1306 msm_obj->sgt = sgt;
1307 msm_obj->pages = kvmalloc_objs(struct page *, npages);
1308 if (!msm_obj->pages) {
1309 msm_gem_unlock(obj);
1310 ret = -ENOMEM;
1311 goto fail;
1312 }
1313
1314 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1315 if (ret) {
1316 msm_gem_unlock(obj);
1317 goto fail;
1318 }
1319
1320 msm_gem_unlock(obj);
1321
1322 drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1323
1324 mutex_lock(&priv->obj_lock);
1325 list_add_tail(&msm_obj->node, &priv->objects);
1326 mutex_unlock(&priv->obj_lock);
1327
1328 ret = drm_gem_create_mmap_offset(obj);
1329 if (ret)
1330 goto fail;
1331
1332 return obj;
1333
1334fail:
1335 drm_gem_object_put(obj);
1336 return ERR_PTR(ret);
1337}
1338
1339void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags,
1340 struct drm_gpuvm *vm, struct drm_gem_object **bo,
1341 uint64_t *iova)
1342{
1343 void *vaddr;
1344 struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1345 int ret;
1346
1347 if (IS_ERR(obj))
1348 return ERR_CAST(obj);
1349
1350 if (iova) {
1351 ret = msm_gem_get_and_pin_iova(obj, vm, iova);
1352 if (ret)
1353 goto err;
1354 }
1355
1356 vaddr = msm_gem_get_vaddr(obj);
1357 if (IS_ERR(vaddr)) {
1358 msm_gem_unpin_iova(obj, vm);
1359 ret = PTR_ERR(vaddr);
1360 goto err;
1361 }
1362
1363 if (bo)
1364 *bo = obj;
1365
1366 return vaddr;
1367err:
1368 drm_gem_object_put(obj);
1369
1370 return ERR_PTR(ret);
1371
1372}
1373
1374void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
1375{
1376 if (IS_ERR_OR_NULL(bo))
1377 return;
1378
1379 msm_gem_put_vaddr(bo);
1380 msm_gem_unpin_iova(bo, vm);
1381 drm_gem_object_put(bo);
1382}
1383
1384void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1385{
1386 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1387 va_list ap;
1388
1389 if (!fmt)
1390 return;
1391
1392 va_start(ap, fmt);
1393 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1394 va_end(ap);
1395}