Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/dma-map-ops.h>
8#include <linux/vmalloc.h>
9#include <linux/spinlock.h>
10#include <linux/shmem_fs.h>
11#include <linux/dma-buf.h>
12#include <linux/pfn_t.h>
13
14#include <drm/drm_prime.h>
15
16#include "msm_drv.h"
17#include "msm_fence.h"
18#include "msm_gem.h"
19#include "msm_gpu.h"
20#include "msm_mmu.h"
21
22static dma_addr_t physaddr(struct drm_gem_object *obj)
23{
24 struct msm_gem_object *msm_obj = to_msm_bo(obj);
25 struct msm_drm_private *priv = obj->dev->dev_private;
26 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
27 priv->vram.paddr;
28}
29
30static bool use_pages(struct drm_gem_object *obj)
31{
32 struct msm_gem_object *msm_obj = to_msm_bo(obj);
33 return !msm_obj->vram_node;
34}
35
36/*
37 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
38 * API. Really GPU cache is out of scope here (handled on cmdstream)
39 * and all we need to do is invalidate newly allocated pages before
40 * mapping to CPU as uncached/writecombine.
41 *
42 * On top of this, we have the added headache, that depending on
43 * display generation, the display's iommu may be wired up to either
44 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
45 * that here we either have dma-direct or iommu ops.
46 *
47 * Let this be a cautionary tail of abstraction gone wrong.
48 */
49
50static void sync_for_device(struct msm_gem_object *msm_obj)
51{
52 struct device *dev = msm_obj->base.dev->dev;
53
54 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
55}
56
57static void sync_for_cpu(struct msm_gem_object *msm_obj)
58{
59 struct device *dev = msm_obj->base.dev->dev;
60
61 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
62}
63
64static void update_lru_active(struct drm_gem_object *obj)
65{
66 struct msm_drm_private *priv = obj->dev->dev_private;
67 struct msm_gem_object *msm_obj = to_msm_bo(obj);
68
69 GEM_WARN_ON(!msm_obj->pages);
70
71 if (msm_obj->pin_count) {
72 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
73 } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
74 drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
75 } else {
76 GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
77
78 drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
79 }
80}
81
82static void update_lru_locked(struct drm_gem_object *obj)
83{
84 struct msm_drm_private *priv = obj->dev->dev_private;
85 struct msm_gem_object *msm_obj = to_msm_bo(obj);
86
87 msm_gem_assert_locked(&msm_obj->base);
88
89 if (!msm_obj->pages) {
90 GEM_WARN_ON(msm_obj->pin_count);
91
92 drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
93 } else {
94 update_lru_active(obj);
95 }
96}
97
98static void update_lru(struct drm_gem_object *obj)
99{
100 struct msm_drm_private *priv = obj->dev->dev_private;
101
102 mutex_lock(&priv->lru.lock);
103 update_lru_locked(obj);
104 mutex_unlock(&priv->lru.lock);
105}
106
107/* allocate pages from VRAM carveout, used when no IOMMU: */
108static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
109{
110 struct msm_gem_object *msm_obj = to_msm_bo(obj);
111 struct msm_drm_private *priv = obj->dev->dev_private;
112 dma_addr_t paddr;
113 struct page **p;
114 int ret, i;
115
116 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
117 if (!p)
118 return ERR_PTR(-ENOMEM);
119
120 spin_lock(&priv->vram.lock);
121 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
122 spin_unlock(&priv->vram.lock);
123 if (ret) {
124 kvfree(p);
125 return ERR_PTR(ret);
126 }
127
128 paddr = physaddr(obj);
129 for (i = 0; i < npages; i++) {
130 p[i] = pfn_to_page(__phys_to_pfn(paddr));
131 paddr += PAGE_SIZE;
132 }
133
134 return p;
135}
136
137static struct page **get_pages(struct drm_gem_object *obj)
138{
139 struct msm_gem_object *msm_obj = to_msm_bo(obj);
140
141 msm_gem_assert_locked(obj);
142
143 if (!msm_obj->pages) {
144 struct drm_device *dev = obj->dev;
145 struct page **p;
146 int npages = obj->size >> PAGE_SHIFT;
147
148 if (use_pages(obj))
149 p = drm_gem_get_pages(obj);
150 else
151 p = get_pages_vram(obj, npages);
152
153 if (IS_ERR(p)) {
154 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
155 PTR_ERR(p));
156 return p;
157 }
158
159 msm_obj->pages = p;
160
161 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
162 if (IS_ERR(msm_obj->sgt)) {
163 void *ptr = ERR_CAST(msm_obj->sgt);
164
165 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
166 msm_obj->sgt = NULL;
167 return ptr;
168 }
169
170 /* For non-cached buffers, ensure the new pages are clean
171 * because display controller, GPU, etc. are not coherent:
172 */
173 if (msm_obj->flags & MSM_BO_WC)
174 sync_for_device(msm_obj);
175
176 update_lru(obj);
177 }
178
179 return msm_obj->pages;
180}
181
182static void put_pages_vram(struct drm_gem_object *obj)
183{
184 struct msm_gem_object *msm_obj = to_msm_bo(obj);
185 struct msm_drm_private *priv = obj->dev->dev_private;
186
187 spin_lock(&priv->vram.lock);
188 drm_mm_remove_node(msm_obj->vram_node);
189 spin_unlock(&priv->vram.lock);
190
191 kvfree(msm_obj->pages);
192}
193
194static void put_pages(struct drm_gem_object *obj)
195{
196 struct msm_gem_object *msm_obj = to_msm_bo(obj);
197
198 if (msm_obj->pages) {
199 if (msm_obj->sgt) {
200 /* For non-cached buffers, ensure the new
201 * pages are clean because display controller,
202 * GPU, etc. are not coherent:
203 */
204 if (msm_obj->flags & MSM_BO_WC)
205 sync_for_cpu(msm_obj);
206
207 sg_free_table(msm_obj->sgt);
208 kfree(msm_obj->sgt);
209 msm_obj->sgt = NULL;
210 }
211
212 if (use_pages(obj))
213 drm_gem_put_pages(obj, msm_obj->pages, true, false);
214 else
215 put_pages_vram(obj);
216
217 msm_obj->pages = NULL;
218 update_lru(obj);
219 }
220}
221
222static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
223{
224 struct msm_drm_private *priv = obj->dev->dev_private;
225 struct msm_gem_object *msm_obj = to_msm_bo(obj);
226 struct page **p;
227
228 msm_gem_assert_locked(obj);
229
230 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
231 return ERR_PTR(-EBUSY);
232 }
233
234 p = get_pages(obj);
235 if (IS_ERR(p))
236 return p;
237
238 mutex_lock(&priv->lru.lock);
239 msm_obj->pin_count++;
240 update_lru_locked(obj);
241 mutex_unlock(&priv->lru.lock);
242
243 return p;
244}
245
246struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
247{
248 struct page **p;
249
250 msm_gem_lock(obj);
251 p = msm_gem_pin_pages_locked(obj);
252 msm_gem_unlock(obj);
253
254 return p;
255}
256
257void msm_gem_unpin_pages(struct drm_gem_object *obj)
258{
259 msm_gem_lock(obj);
260 msm_gem_unpin_locked(obj);
261 msm_gem_unlock(obj);
262}
263
264static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
265{
266 if (msm_obj->flags & MSM_BO_WC)
267 return pgprot_writecombine(prot);
268 return prot;
269}
270
271static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
272{
273 struct vm_area_struct *vma = vmf->vma;
274 struct drm_gem_object *obj = vma->vm_private_data;
275 struct msm_gem_object *msm_obj = to_msm_bo(obj);
276 struct page **pages;
277 unsigned long pfn;
278 pgoff_t pgoff;
279 int err;
280 vm_fault_t ret;
281
282 /*
283 * vm_ops.open/drm_gem_mmap_obj and close get and put
284 * a reference on obj. So, we dont need to hold one here.
285 */
286 err = msm_gem_lock_interruptible(obj);
287 if (err) {
288 ret = VM_FAULT_NOPAGE;
289 goto out;
290 }
291
292 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
293 msm_gem_unlock(obj);
294 return VM_FAULT_SIGBUS;
295 }
296
297 /* make sure we have pages attached now */
298 pages = get_pages(obj);
299 if (IS_ERR(pages)) {
300 ret = vmf_error(PTR_ERR(pages));
301 goto out_unlock;
302 }
303
304 /* We don't use vmf->pgoff since that has the fake offset: */
305 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
306
307 pfn = page_to_pfn(pages[pgoff]);
308
309 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
310 pfn, pfn << PAGE_SHIFT);
311
312 ret = vmf_insert_pfn(vma, vmf->address, pfn);
313
314out_unlock:
315 msm_gem_unlock(obj);
316out:
317 return ret;
318}
319
320/** get mmap offset */
321static uint64_t mmap_offset(struct drm_gem_object *obj)
322{
323 struct drm_device *dev = obj->dev;
324 int ret;
325
326 msm_gem_assert_locked(obj);
327
328 /* Make it mmapable */
329 ret = drm_gem_create_mmap_offset(obj);
330
331 if (ret) {
332 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
333 return 0;
334 }
335
336 return drm_vma_node_offset_addr(&obj->vma_node);
337}
338
339uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
340{
341 uint64_t offset;
342
343 msm_gem_lock(obj);
344 offset = mmap_offset(obj);
345 msm_gem_unlock(obj);
346 return offset;
347}
348
349static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
350 struct msm_gem_address_space *aspace)
351{
352 struct msm_gem_object *msm_obj = to_msm_bo(obj);
353 struct msm_gem_vma *vma;
354
355 msm_gem_assert_locked(obj);
356
357 vma = msm_gem_vma_new(aspace);
358 if (!vma)
359 return ERR_PTR(-ENOMEM);
360
361 list_add_tail(&vma->list, &msm_obj->vmas);
362
363 return vma;
364}
365
366static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
367 struct msm_gem_address_space *aspace)
368{
369 struct msm_gem_object *msm_obj = to_msm_bo(obj);
370 struct msm_gem_vma *vma;
371
372 msm_gem_assert_locked(obj);
373
374 list_for_each_entry(vma, &msm_obj->vmas, list) {
375 if (vma->aspace == aspace)
376 return vma;
377 }
378
379 return NULL;
380}
381
382static void del_vma(struct msm_gem_vma *vma)
383{
384 if (!vma)
385 return;
386
387 list_del(&vma->list);
388 kfree(vma);
389}
390
391/*
392 * If close is true, this also closes the VMA (releasing the allocated
393 * iova range) in addition to removing the iommu mapping. In the eviction
394 * case (!close), we keep the iova allocated, but only remove the iommu
395 * mapping.
396 */
397static void
398put_iova_spaces(struct drm_gem_object *obj, bool close)
399{
400 struct msm_gem_object *msm_obj = to_msm_bo(obj);
401 struct msm_gem_vma *vma;
402
403 msm_gem_assert_locked(obj);
404
405 list_for_each_entry(vma, &msm_obj->vmas, list) {
406 if (vma->aspace) {
407 msm_gem_vma_purge(vma);
408 if (close)
409 msm_gem_vma_close(vma);
410 }
411 }
412}
413
414/* Called with msm_obj locked */
415static void
416put_iova_vmas(struct drm_gem_object *obj)
417{
418 struct msm_gem_object *msm_obj = to_msm_bo(obj);
419 struct msm_gem_vma *vma, *tmp;
420
421 msm_gem_assert_locked(obj);
422
423 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
424 del_vma(vma);
425 }
426}
427
428static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
429 struct msm_gem_address_space *aspace,
430 u64 range_start, u64 range_end)
431{
432 struct msm_gem_vma *vma;
433
434 msm_gem_assert_locked(obj);
435
436 vma = lookup_vma(obj, aspace);
437
438 if (!vma) {
439 int ret;
440
441 vma = add_vma(obj, aspace);
442 if (IS_ERR(vma))
443 return vma;
444
445 ret = msm_gem_vma_init(vma, obj->size,
446 range_start, range_end);
447 if (ret) {
448 del_vma(vma);
449 return ERR_PTR(ret);
450 }
451 } else {
452 GEM_WARN_ON(vma->iova < range_start);
453 GEM_WARN_ON((vma->iova + obj->size) > range_end);
454 }
455
456 return vma;
457}
458
459int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
460{
461 struct msm_gem_object *msm_obj = to_msm_bo(obj);
462 struct page **pages;
463 int ret, prot = IOMMU_READ;
464
465 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
466 prot |= IOMMU_WRITE;
467
468 if (msm_obj->flags & MSM_BO_MAP_PRIV)
469 prot |= IOMMU_PRIV;
470
471 if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
472 prot |= IOMMU_CACHE;
473
474 msm_gem_assert_locked(obj);
475
476 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
477 return -EBUSY;
478
479 pages = msm_gem_pin_pages_locked(obj);
480 if (IS_ERR(pages))
481 return PTR_ERR(pages);
482
483 ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
484 if (ret)
485 msm_gem_unpin_locked(obj);
486
487 return ret;
488}
489
490void msm_gem_unpin_locked(struct drm_gem_object *obj)
491{
492 struct msm_drm_private *priv = obj->dev->dev_private;
493 struct msm_gem_object *msm_obj = to_msm_bo(obj);
494
495 msm_gem_assert_locked(obj);
496
497 mutex_lock(&priv->lru.lock);
498 msm_obj->pin_count--;
499 GEM_WARN_ON(msm_obj->pin_count < 0);
500 update_lru_locked(obj);
501 mutex_unlock(&priv->lru.lock);
502}
503
504/* Special unpin path for use in fence-signaling path, avoiding the need
505 * to hold the obj lock by only depending on things that a protected by
506 * the LRU lock. In particular we know that that we already have backing
507 * and and that the object's dma_resv has the fence for the current
508 * submit/job which will prevent us racing against page eviction.
509 */
510void msm_gem_unpin_active(struct drm_gem_object *obj)
511{
512 struct msm_drm_private *priv = obj->dev->dev_private;
513 struct msm_gem_object *msm_obj = to_msm_bo(obj);
514
515 mutex_lock(&priv->lru.lock);
516 msm_obj->pin_count--;
517 GEM_WARN_ON(msm_obj->pin_count < 0);
518 update_lru_active(obj);
519 mutex_unlock(&priv->lru.lock);
520}
521
522struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
523 struct msm_gem_address_space *aspace)
524{
525 return get_vma_locked(obj, aspace, 0, U64_MAX);
526}
527
528static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
529 struct msm_gem_address_space *aspace, uint64_t *iova,
530 u64 range_start, u64 range_end)
531{
532 struct msm_gem_vma *vma;
533 int ret;
534
535 msm_gem_assert_locked(obj);
536
537 vma = get_vma_locked(obj, aspace, range_start, range_end);
538 if (IS_ERR(vma))
539 return PTR_ERR(vma);
540
541 ret = msm_gem_pin_vma_locked(obj, vma);
542 if (!ret)
543 *iova = vma->iova;
544
545 return ret;
546}
547
548/*
549 * get iova and pin it. Should have a matching put
550 * limits iova to specified range (in pages)
551 */
552int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
553 struct msm_gem_address_space *aspace, uint64_t *iova,
554 u64 range_start, u64 range_end)
555{
556 int ret;
557
558 msm_gem_lock(obj);
559 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
560 msm_gem_unlock(obj);
561
562 return ret;
563}
564
565/* get iova and pin it. Should have a matching put */
566int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
567 struct msm_gem_address_space *aspace, uint64_t *iova)
568{
569 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
570}
571
572/*
573 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
574 * valid for the life of the object
575 */
576int msm_gem_get_iova(struct drm_gem_object *obj,
577 struct msm_gem_address_space *aspace, uint64_t *iova)
578{
579 struct msm_gem_vma *vma;
580 int ret = 0;
581
582 msm_gem_lock(obj);
583 vma = get_vma_locked(obj, aspace, 0, U64_MAX);
584 if (IS_ERR(vma)) {
585 ret = PTR_ERR(vma);
586 } else {
587 *iova = vma->iova;
588 }
589 msm_gem_unlock(obj);
590
591 return ret;
592}
593
594static int clear_iova(struct drm_gem_object *obj,
595 struct msm_gem_address_space *aspace)
596{
597 struct msm_gem_vma *vma = lookup_vma(obj, aspace);
598
599 if (!vma)
600 return 0;
601
602 if (msm_gem_vma_inuse(vma))
603 return -EBUSY;
604
605 msm_gem_vma_purge(vma);
606 msm_gem_vma_close(vma);
607 del_vma(vma);
608
609 return 0;
610}
611
612/*
613 * Get the requested iova but don't pin it. Fails if the requested iova is
614 * not available. Doesn't need a put because iovas are currently valid for
615 * the life of the object.
616 *
617 * Setting an iova of zero will clear the vma.
618 */
619int msm_gem_set_iova(struct drm_gem_object *obj,
620 struct msm_gem_address_space *aspace, uint64_t iova)
621{
622 int ret = 0;
623
624 msm_gem_lock(obj);
625 if (!iova) {
626 ret = clear_iova(obj, aspace);
627 } else {
628 struct msm_gem_vma *vma;
629 vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
630 if (IS_ERR(vma)) {
631 ret = PTR_ERR(vma);
632 } else if (GEM_WARN_ON(vma->iova != iova)) {
633 clear_iova(obj, aspace);
634 ret = -EBUSY;
635 }
636 }
637 msm_gem_unlock(obj);
638
639 return ret;
640}
641
642/*
643 * Unpin a iova by updating the reference counts. The memory isn't actually
644 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
645 * to get rid of it
646 */
647void msm_gem_unpin_iova(struct drm_gem_object *obj,
648 struct msm_gem_address_space *aspace)
649{
650 struct msm_gem_vma *vma;
651
652 msm_gem_lock(obj);
653 vma = lookup_vma(obj, aspace);
654 if (!GEM_WARN_ON(!vma)) {
655 msm_gem_vma_unpin(vma);
656 msm_gem_unpin_locked(obj);
657 }
658 msm_gem_unlock(obj);
659}
660
661int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
662 struct drm_mode_create_dumb *args)
663{
664 args->pitch = align_pitch(args->width, args->bpp);
665 args->size = PAGE_ALIGN(args->pitch * args->height);
666 return msm_gem_new_handle(dev, file, args->size,
667 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
668}
669
670int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
671 uint32_t handle, uint64_t *offset)
672{
673 struct drm_gem_object *obj;
674 int ret = 0;
675
676 /* GEM does all our handle to object mapping */
677 obj = drm_gem_object_lookup(file, handle);
678 if (obj == NULL) {
679 ret = -ENOENT;
680 goto fail;
681 }
682
683 *offset = msm_gem_mmap_offset(obj);
684
685 drm_gem_object_put(obj);
686
687fail:
688 return ret;
689}
690
691static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
692{
693 struct msm_gem_object *msm_obj = to_msm_bo(obj);
694 struct page **pages;
695 int ret = 0;
696
697 msm_gem_assert_locked(obj);
698
699 if (obj->import_attach)
700 return ERR_PTR(-ENODEV);
701
702 if (GEM_WARN_ON(msm_obj->madv > madv)) {
703 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
704 msm_obj->madv, madv);
705 return ERR_PTR(-EBUSY);
706 }
707
708 pages = msm_gem_pin_pages_locked(obj);
709 if (IS_ERR(pages))
710 return ERR_CAST(pages);
711
712 /* increment vmap_count *before* vmap() call, so shrinker can
713 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
714 * This guarantees that we won't try to msm_gem_vunmap() this
715 * same object from within the vmap() call (while we already
716 * hold msm_obj lock)
717 */
718 msm_obj->vmap_count++;
719
720 if (!msm_obj->vaddr) {
721 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
722 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
723 if (msm_obj->vaddr == NULL) {
724 ret = -ENOMEM;
725 goto fail;
726 }
727 }
728
729 return msm_obj->vaddr;
730
731fail:
732 msm_obj->vmap_count--;
733 msm_gem_unpin_locked(obj);
734 return ERR_PTR(ret);
735}
736
737void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
738{
739 return get_vaddr(obj, MSM_MADV_WILLNEED);
740}
741
742void *msm_gem_get_vaddr(struct drm_gem_object *obj)
743{
744 void *ret;
745
746 msm_gem_lock(obj);
747 ret = msm_gem_get_vaddr_locked(obj);
748 msm_gem_unlock(obj);
749
750 return ret;
751}
752
753/*
754 * Don't use this! It is for the very special case of dumping
755 * submits from GPU hangs or faults, were the bo may already
756 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
757 * active list.
758 */
759void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
760{
761 return get_vaddr(obj, __MSM_MADV_PURGED);
762}
763
764void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
765{
766 struct msm_gem_object *msm_obj = to_msm_bo(obj);
767
768 msm_gem_assert_locked(obj);
769 GEM_WARN_ON(msm_obj->vmap_count < 1);
770
771 msm_obj->vmap_count--;
772 msm_gem_unpin_locked(obj);
773}
774
775void msm_gem_put_vaddr(struct drm_gem_object *obj)
776{
777 msm_gem_lock(obj);
778 msm_gem_put_vaddr_locked(obj);
779 msm_gem_unlock(obj);
780}
781
782/* Update madvise status, returns true if not purged, else
783 * false or -errno.
784 */
785int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
786{
787 struct msm_drm_private *priv = obj->dev->dev_private;
788 struct msm_gem_object *msm_obj = to_msm_bo(obj);
789
790 msm_gem_lock(obj);
791
792 mutex_lock(&priv->lru.lock);
793
794 if (msm_obj->madv != __MSM_MADV_PURGED)
795 msm_obj->madv = madv;
796
797 madv = msm_obj->madv;
798
799 /* If the obj is inactive, we might need to move it
800 * between inactive lists
801 */
802 update_lru_locked(obj);
803
804 mutex_unlock(&priv->lru.lock);
805
806 msm_gem_unlock(obj);
807
808 return (madv != __MSM_MADV_PURGED);
809}
810
811void msm_gem_purge(struct drm_gem_object *obj)
812{
813 struct drm_device *dev = obj->dev;
814 struct msm_drm_private *priv = obj->dev->dev_private;
815 struct msm_gem_object *msm_obj = to_msm_bo(obj);
816
817 msm_gem_assert_locked(obj);
818 GEM_WARN_ON(!is_purgeable(msm_obj));
819
820 /* Get rid of any iommu mapping(s): */
821 put_iova_spaces(obj, true);
822
823 msm_gem_vunmap(obj);
824
825 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
826
827 put_pages(obj);
828
829 put_iova_vmas(obj);
830
831 mutex_lock(&priv->lru.lock);
832 /* A one-way transition: */
833 msm_obj->madv = __MSM_MADV_PURGED;
834 mutex_unlock(&priv->lru.lock);
835
836 drm_gem_free_mmap_offset(obj);
837
838 /* Our goal here is to return as much of the memory as
839 * is possible back to the system as we are called from OOM.
840 * To do this we must instruct the shmfs to drop all of its
841 * backing pages, *now*.
842 */
843 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
844
845 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
846 0, (loff_t)-1);
847}
848
849/*
850 * Unpin the backing pages and make them available to be swapped out.
851 */
852void msm_gem_evict(struct drm_gem_object *obj)
853{
854 struct drm_device *dev = obj->dev;
855 struct msm_gem_object *msm_obj = to_msm_bo(obj);
856
857 msm_gem_assert_locked(obj);
858 GEM_WARN_ON(is_unevictable(msm_obj));
859
860 /* Get rid of any iommu mapping(s): */
861 put_iova_spaces(obj, false);
862
863 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
864
865 put_pages(obj);
866}
867
868void msm_gem_vunmap(struct drm_gem_object *obj)
869{
870 struct msm_gem_object *msm_obj = to_msm_bo(obj);
871
872 msm_gem_assert_locked(obj);
873
874 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
875 return;
876
877 vunmap(msm_obj->vaddr);
878 msm_obj->vaddr = NULL;
879}
880
881bool msm_gem_active(struct drm_gem_object *obj)
882{
883 msm_gem_assert_locked(obj);
884
885 if (to_msm_bo(obj)->pin_count)
886 return true;
887
888 return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
889}
890
891int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
892{
893 bool write = !!(op & MSM_PREP_WRITE);
894 unsigned long remain =
895 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
896 long ret;
897
898 if (op & MSM_PREP_BOOST) {
899 dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
900 ktime_get());
901 }
902
903 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
904 true, remain);
905 if (ret == 0)
906 return remain == 0 ? -EBUSY : -ETIMEDOUT;
907 else if (ret < 0)
908 return ret;
909
910 /* TODO cache maintenance */
911
912 return 0;
913}
914
915int msm_gem_cpu_fini(struct drm_gem_object *obj)
916{
917 /* TODO cache maintenance */
918 return 0;
919}
920
921#ifdef CONFIG_DEBUG_FS
922void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
923 struct msm_gem_stats *stats)
924{
925 struct msm_gem_object *msm_obj = to_msm_bo(obj);
926 struct dma_resv *robj = obj->resv;
927 struct msm_gem_vma *vma;
928 uint64_t off = drm_vma_node_start(&obj->vma_node);
929 const char *madv;
930
931 msm_gem_lock(obj);
932
933 stats->all.count++;
934 stats->all.size += obj->size;
935
936 if (msm_gem_active(obj)) {
937 stats->active.count++;
938 stats->active.size += obj->size;
939 }
940
941 if (msm_obj->pages) {
942 stats->resident.count++;
943 stats->resident.size += obj->size;
944 }
945
946 switch (msm_obj->madv) {
947 case __MSM_MADV_PURGED:
948 stats->purged.count++;
949 stats->purged.size += obj->size;
950 madv = " purged";
951 break;
952 case MSM_MADV_DONTNEED:
953 stats->purgeable.count++;
954 stats->purgeable.size += obj->size;
955 madv = " purgeable";
956 break;
957 case MSM_MADV_WILLNEED:
958 default:
959 madv = "";
960 break;
961 }
962
963 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
964 msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
965 obj->name, kref_read(&obj->refcount),
966 off, msm_obj->vaddr);
967
968 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
969
970 if (!list_empty(&msm_obj->vmas)) {
971
972 seq_puts(m, " vmas:");
973
974 list_for_each_entry(vma, &msm_obj->vmas, list) {
975 const char *name, *comm;
976 if (vma->aspace) {
977 struct msm_gem_address_space *aspace = vma->aspace;
978 struct task_struct *task =
979 get_pid_task(aspace->pid, PIDTYPE_PID);
980 if (task) {
981 comm = kstrdup(task->comm, GFP_KERNEL);
982 put_task_struct(task);
983 } else {
984 comm = NULL;
985 }
986 name = aspace->name;
987 } else {
988 name = comm = NULL;
989 }
990 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
991 name, comm ? ":" : "", comm ? comm : "",
992 vma->aspace, vma->iova,
993 vma->mapped ? "mapped" : "unmapped",
994 msm_gem_vma_inuse(vma));
995 kfree(comm);
996 }
997
998 seq_puts(m, "\n");
999 }
1000
1001 dma_resv_describe(robj, m);
1002 msm_gem_unlock(obj);
1003}
1004
1005void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1006{
1007 struct msm_gem_stats stats = {};
1008 struct msm_gem_object *msm_obj;
1009
1010 seq_puts(m, " flags id ref offset kaddr size madv name\n");
1011 list_for_each_entry(msm_obj, list, node) {
1012 struct drm_gem_object *obj = &msm_obj->base;
1013 seq_puts(m, " ");
1014 msm_gem_describe(obj, m, &stats);
1015 }
1016
1017 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
1018 stats.all.count, stats.all.size);
1019 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
1020 stats.active.count, stats.active.size);
1021 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
1022 stats.resident.count, stats.resident.size);
1023 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1024 stats.purgeable.count, stats.purgeable.size);
1025 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
1026 stats.purged.count, stats.purged.size);
1027}
1028#endif
1029
1030/* don't call directly! Use drm_gem_object_put() */
1031static void msm_gem_free_object(struct drm_gem_object *obj)
1032{
1033 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1034 struct drm_device *dev = obj->dev;
1035 struct msm_drm_private *priv = dev->dev_private;
1036
1037 mutex_lock(&priv->obj_lock);
1038 list_del(&msm_obj->node);
1039 mutex_unlock(&priv->obj_lock);
1040
1041 put_iova_spaces(obj, true);
1042
1043 if (obj->import_attach) {
1044 GEM_WARN_ON(msm_obj->vaddr);
1045
1046 /* Don't drop the pages for imported dmabuf, as they are not
1047 * ours, just free the array we allocated:
1048 */
1049 kvfree(msm_obj->pages);
1050
1051 put_iova_vmas(obj);
1052
1053 drm_prime_gem_destroy(obj, msm_obj->sgt);
1054 } else {
1055 msm_gem_vunmap(obj);
1056 put_pages(obj);
1057 put_iova_vmas(obj);
1058 }
1059
1060 drm_gem_object_release(obj);
1061
1062 kfree(msm_obj);
1063}
1064
1065static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1066{
1067 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1068
1069 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1070 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1071
1072 return 0;
1073}
1074
1075/* convenience method to construct a GEM buffer object, and userspace handle */
1076int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1077 uint32_t size, uint32_t flags, uint32_t *handle,
1078 char *name)
1079{
1080 struct drm_gem_object *obj;
1081 int ret;
1082
1083 obj = msm_gem_new(dev, size, flags);
1084
1085 if (IS_ERR(obj))
1086 return PTR_ERR(obj);
1087
1088 if (name)
1089 msm_gem_object_set_name(obj, "%s", name);
1090
1091 ret = drm_gem_handle_create(file, obj, handle);
1092
1093 /* drop reference from allocate - handle holds it now */
1094 drm_gem_object_put(obj);
1095
1096 return ret;
1097}
1098
1099static const struct vm_operations_struct vm_ops = {
1100 .fault = msm_gem_fault,
1101 .open = drm_gem_vm_open,
1102 .close = drm_gem_vm_close,
1103};
1104
1105static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1106 .free = msm_gem_free_object,
1107 .pin = msm_gem_prime_pin,
1108 .unpin = msm_gem_prime_unpin,
1109 .get_sg_table = msm_gem_prime_get_sg_table,
1110 .vmap = msm_gem_prime_vmap,
1111 .vunmap = msm_gem_prime_vunmap,
1112 .mmap = msm_gem_object_mmap,
1113 .vm_ops = &vm_ops,
1114};
1115
1116static int msm_gem_new_impl(struct drm_device *dev,
1117 uint32_t size, uint32_t flags,
1118 struct drm_gem_object **obj)
1119{
1120 struct msm_drm_private *priv = dev->dev_private;
1121 struct msm_gem_object *msm_obj;
1122
1123 switch (flags & MSM_BO_CACHE_MASK) {
1124 case MSM_BO_CACHED:
1125 case MSM_BO_WC:
1126 break;
1127 case MSM_BO_CACHED_COHERENT:
1128 if (priv->has_cached_coherent)
1129 break;
1130 fallthrough;
1131 default:
1132 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1133 (flags & MSM_BO_CACHE_MASK));
1134 return -EINVAL;
1135 }
1136
1137 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1138 if (!msm_obj)
1139 return -ENOMEM;
1140
1141 msm_obj->flags = flags;
1142 msm_obj->madv = MSM_MADV_WILLNEED;
1143
1144 INIT_LIST_HEAD(&msm_obj->node);
1145 INIT_LIST_HEAD(&msm_obj->vmas);
1146
1147 *obj = &msm_obj->base;
1148 (*obj)->funcs = &msm_gem_object_funcs;
1149
1150 return 0;
1151}
1152
1153struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1154{
1155 struct msm_drm_private *priv = dev->dev_private;
1156 struct msm_gem_object *msm_obj;
1157 struct drm_gem_object *obj = NULL;
1158 bool use_vram = false;
1159 int ret;
1160
1161 size = PAGE_ALIGN(size);
1162
1163 if (!msm_use_mmu(dev))
1164 use_vram = true;
1165 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1166 use_vram = true;
1167
1168 if (GEM_WARN_ON(use_vram && !priv->vram.size))
1169 return ERR_PTR(-EINVAL);
1170
1171 /* Disallow zero sized objects as they make the underlying
1172 * infrastructure grumpy
1173 */
1174 if (size == 0)
1175 return ERR_PTR(-EINVAL);
1176
1177 ret = msm_gem_new_impl(dev, size, flags, &obj);
1178 if (ret)
1179 return ERR_PTR(ret);
1180
1181 msm_obj = to_msm_bo(obj);
1182
1183 if (use_vram) {
1184 struct msm_gem_vma *vma;
1185 struct page **pages;
1186
1187 drm_gem_private_object_init(dev, obj, size);
1188
1189 msm_gem_lock(obj);
1190
1191 vma = add_vma(obj, NULL);
1192 msm_gem_unlock(obj);
1193 if (IS_ERR(vma)) {
1194 ret = PTR_ERR(vma);
1195 goto fail;
1196 }
1197
1198 to_msm_bo(obj)->vram_node = &vma->node;
1199
1200 msm_gem_lock(obj);
1201 pages = get_pages(obj);
1202 msm_gem_unlock(obj);
1203 if (IS_ERR(pages)) {
1204 ret = PTR_ERR(pages);
1205 goto fail;
1206 }
1207
1208 vma->iova = physaddr(obj);
1209 } else {
1210 ret = drm_gem_object_init(dev, obj, size);
1211 if (ret)
1212 goto fail;
1213 /*
1214 * Our buffers are kept pinned, so allocating them from the
1215 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1216 * See comments above new_inode() why this is required _and_
1217 * expected if you're going to pin these pages.
1218 */
1219 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1220 }
1221
1222 drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1223
1224 mutex_lock(&priv->obj_lock);
1225 list_add_tail(&msm_obj->node, &priv->objects);
1226 mutex_unlock(&priv->obj_lock);
1227
1228 return obj;
1229
1230fail:
1231 drm_gem_object_put(obj);
1232 return ERR_PTR(ret);
1233}
1234
1235struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1236 struct dma_buf *dmabuf, struct sg_table *sgt)
1237{
1238 struct msm_drm_private *priv = dev->dev_private;
1239 struct msm_gem_object *msm_obj;
1240 struct drm_gem_object *obj;
1241 uint32_t size;
1242 int ret, npages;
1243
1244 /* if we don't have IOMMU, don't bother pretending we can import: */
1245 if (!msm_use_mmu(dev)) {
1246 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1247 return ERR_PTR(-EINVAL);
1248 }
1249
1250 size = PAGE_ALIGN(dmabuf->size);
1251
1252 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1253 if (ret)
1254 return ERR_PTR(ret);
1255
1256 drm_gem_private_object_init(dev, obj, size);
1257
1258 npages = size / PAGE_SIZE;
1259
1260 msm_obj = to_msm_bo(obj);
1261 msm_gem_lock(obj);
1262 msm_obj->sgt = sgt;
1263 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1264 if (!msm_obj->pages) {
1265 msm_gem_unlock(obj);
1266 ret = -ENOMEM;
1267 goto fail;
1268 }
1269
1270 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1271 if (ret) {
1272 msm_gem_unlock(obj);
1273 goto fail;
1274 }
1275
1276 msm_gem_unlock(obj);
1277
1278 drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1279
1280 mutex_lock(&priv->obj_lock);
1281 list_add_tail(&msm_obj->node, &priv->objects);
1282 mutex_unlock(&priv->obj_lock);
1283
1284 return obj;
1285
1286fail:
1287 drm_gem_object_put(obj);
1288 return ERR_PTR(ret);
1289}
1290
1291void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1292 uint32_t flags, struct msm_gem_address_space *aspace,
1293 struct drm_gem_object **bo, uint64_t *iova)
1294{
1295 void *vaddr;
1296 struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1297 int ret;
1298
1299 if (IS_ERR(obj))
1300 return ERR_CAST(obj);
1301
1302 if (iova) {
1303 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1304 if (ret)
1305 goto err;
1306 }
1307
1308 vaddr = msm_gem_get_vaddr(obj);
1309 if (IS_ERR(vaddr)) {
1310 msm_gem_unpin_iova(obj, aspace);
1311 ret = PTR_ERR(vaddr);
1312 goto err;
1313 }
1314
1315 if (bo)
1316 *bo = obj;
1317
1318 return vaddr;
1319err:
1320 drm_gem_object_put(obj);
1321
1322 return ERR_PTR(ret);
1323
1324}
1325
1326void msm_gem_kernel_put(struct drm_gem_object *bo,
1327 struct msm_gem_address_space *aspace)
1328{
1329 if (IS_ERR_OR_NULL(bo))
1330 return;
1331
1332 msm_gem_put_vaddr(bo);
1333 msm_gem_unpin_iova(bo, aspace);
1334 drm_gem_object_put(bo);
1335}
1336
1337void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1338{
1339 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1340 va_list ap;
1341
1342 if (!fmt)
1343 return;
1344
1345 va_start(ap, fmt);
1346 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1347 va_end(ap);
1348}