Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/dma-map-ops.h>
8#include <linux/vmalloc.h>
9#include <linux/spinlock.h>
10#include <linux/shmem_fs.h>
11#include <linux/dma-buf.h>
12#include <linux/pfn_t.h>
13
14#include <drm/drm_prime.h>
15
16#include "msm_drv.h"
17#include "msm_fence.h"
18#include "msm_gem.h"
19#include "msm_gpu.h"
20#include "msm_mmu.h"
21
22static void update_inactive(struct msm_gem_object *msm_obj);
23
24static dma_addr_t physaddr(struct drm_gem_object *obj)
25{
26 struct msm_gem_object *msm_obj = to_msm_bo(obj);
27 struct msm_drm_private *priv = obj->dev->dev_private;
28 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29 priv->vram.paddr;
30}
31
32static bool use_pages(struct drm_gem_object *obj)
33{
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 return !msm_obj->vram_node;
36}
37
38/*
39 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
40 * API. Really GPU cache is out of scope here (handled on cmdstream)
41 * and all we need to do is invalidate newly allocated pages before
42 * mapping to CPU as uncached/writecombine.
43 *
44 * On top of this, we have the added headache, that depending on
45 * display generation, the display's iommu may be wired up to either
46 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
47 * that here we either have dma-direct or iommu ops.
48 *
49 * Let this be a cautionary tail of abstraction gone wrong.
50 */
51
52static void sync_for_device(struct msm_gem_object *msm_obj)
53{
54 struct device *dev = msm_obj->base.dev->dev;
55
56 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
57}
58
59static void sync_for_cpu(struct msm_gem_object *msm_obj)
60{
61 struct device *dev = msm_obj->base.dev->dev;
62
63 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
64}
65
66/* allocate pages from VRAM carveout, used when no IOMMU: */
67static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68{
69 struct msm_gem_object *msm_obj = to_msm_bo(obj);
70 struct msm_drm_private *priv = obj->dev->dev_private;
71 dma_addr_t paddr;
72 struct page **p;
73 int ret, i;
74
75 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76 if (!p)
77 return ERR_PTR(-ENOMEM);
78
79 spin_lock(&priv->vram.lock);
80 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
81 spin_unlock(&priv->vram.lock);
82 if (ret) {
83 kvfree(p);
84 return ERR_PTR(ret);
85 }
86
87 paddr = physaddr(obj);
88 for (i = 0; i < npages; i++) {
89 p[i] = pfn_to_page(__phys_to_pfn(paddr));
90 paddr += PAGE_SIZE;
91 }
92
93 return p;
94}
95
96static struct page **get_pages(struct drm_gem_object *obj)
97{
98 struct msm_gem_object *msm_obj = to_msm_bo(obj);
99
100 GEM_WARN_ON(!msm_gem_is_locked(obj));
101
102 if (!msm_obj->pages) {
103 struct drm_device *dev = obj->dev;
104 struct page **p;
105 int npages = obj->size >> PAGE_SHIFT;
106
107 if (use_pages(obj))
108 p = drm_gem_get_pages(obj);
109 else
110 p = get_pages_vram(obj, npages);
111
112 if (IS_ERR(p)) {
113 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
114 PTR_ERR(p));
115 return p;
116 }
117
118 msm_obj->pages = p;
119
120 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
121 if (IS_ERR(msm_obj->sgt)) {
122 void *ptr = ERR_CAST(msm_obj->sgt);
123
124 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
125 msm_obj->sgt = NULL;
126 return ptr;
127 }
128
129 /* For non-cached buffers, ensure the new pages are clean
130 * because display controller, GPU, etc. are not coherent:
131 */
132 if (msm_obj->flags & MSM_BO_WC)
133 sync_for_device(msm_obj);
134
135 update_inactive(msm_obj);
136 }
137
138 return msm_obj->pages;
139}
140
141static void put_pages_vram(struct drm_gem_object *obj)
142{
143 struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 struct msm_drm_private *priv = obj->dev->dev_private;
145
146 spin_lock(&priv->vram.lock);
147 drm_mm_remove_node(msm_obj->vram_node);
148 spin_unlock(&priv->vram.lock);
149
150 kvfree(msm_obj->pages);
151}
152
153static void put_pages(struct drm_gem_object *obj)
154{
155 struct msm_gem_object *msm_obj = to_msm_bo(obj);
156
157 if (msm_obj->pages) {
158 if (msm_obj->sgt) {
159 /* For non-cached buffers, ensure the new
160 * pages are clean because display controller,
161 * GPU, etc. are not coherent:
162 */
163 if (msm_obj->flags & MSM_BO_WC)
164 sync_for_cpu(msm_obj);
165
166 sg_free_table(msm_obj->sgt);
167 kfree(msm_obj->sgt);
168 msm_obj->sgt = NULL;
169 }
170
171 if (use_pages(obj))
172 drm_gem_put_pages(obj, msm_obj->pages, true, false);
173 else
174 put_pages_vram(obj);
175
176 msm_obj->pages = NULL;
177 }
178}
179
180struct page **msm_gem_get_pages(struct drm_gem_object *obj)
181{
182 struct msm_gem_object *msm_obj = to_msm_bo(obj);
183 struct page **p;
184
185 msm_gem_lock(obj);
186
187 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
188 msm_gem_unlock(obj);
189 return ERR_PTR(-EBUSY);
190 }
191
192 p = get_pages(obj);
193
194 if (!IS_ERR(p)) {
195 msm_obj->pin_count++;
196 update_inactive(msm_obj);
197 }
198
199 msm_gem_unlock(obj);
200 return p;
201}
202
203void msm_gem_put_pages(struct drm_gem_object *obj)
204{
205 struct msm_gem_object *msm_obj = to_msm_bo(obj);
206
207 msm_gem_lock(obj);
208 msm_obj->pin_count--;
209 GEM_WARN_ON(msm_obj->pin_count < 0);
210 update_inactive(msm_obj);
211 msm_gem_unlock(obj);
212}
213
214static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
215{
216 if (msm_obj->flags & MSM_BO_WC)
217 return pgprot_writecombine(prot);
218 return prot;
219}
220
221static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
222{
223 struct vm_area_struct *vma = vmf->vma;
224 struct drm_gem_object *obj = vma->vm_private_data;
225 struct msm_gem_object *msm_obj = to_msm_bo(obj);
226 struct page **pages;
227 unsigned long pfn;
228 pgoff_t pgoff;
229 int err;
230 vm_fault_t ret;
231
232 /*
233 * vm_ops.open/drm_gem_mmap_obj and close get and put
234 * a reference on obj. So, we dont need to hold one here.
235 */
236 err = msm_gem_lock_interruptible(obj);
237 if (err) {
238 ret = VM_FAULT_NOPAGE;
239 goto out;
240 }
241
242 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
243 msm_gem_unlock(obj);
244 return VM_FAULT_SIGBUS;
245 }
246
247 /* make sure we have pages attached now */
248 pages = get_pages(obj);
249 if (IS_ERR(pages)) {
250 ret = vmf_error(PTR_ERR(pages));
251 goto out_unlock;
252 }
253
254 /* We don't use vmf->pgoff since that has the fake offset: */
255 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
256
257 pfn = page_to_pfn(pages[pgoff]);
258
259 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
260 pfn, pfn << PAGE_SHIFT);
261
262 ret = vmf_insert_pfn(vma, vmf->address, pfn);
263
264out_unlock:
265 msm_gem_unlock(obj);
266out:
267 return ret;
268}
269
270/** get mmap offset */
271static uint64_t mmap_offset(struct drm_gem_object *obj)
272{
273 struct drm_device *dev = obj->dev;
274 int ret;
275
276 GEM_WARN_ON(!msm_gem_is_locked(obj));
277
278 /* Make it mmapable */
279 ret = drm_gem_create_mmap_offset(obj);
280
281 if (ret) {
282 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
283 return 0;
284 }
285
286 return drm_vma_node_offset_addr(&obj->vma_node);
287}
288
289uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
290{
291 uint64_t offset;
292
293 msm_gem_lock(obj);
294 offset = mmap_offset(obj);
295 msm_gem_unlock(obj);
296 return offset;
297}
298
299static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
300 struct msm_gem_address_space *aspace)
301{
302 struct msm_gem_object *msm_obj = to_msm_bo(obj);
303 struct msm_gem_vma *vma;
304
305 GEM_WARN_ON(!msm_gem_is_locked(obj));
306
307 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
308 if (!vma)
309 return ERR_PTR(-ENOMEM);
310
311 vma->aspace = aspace;
312
313 list_add_tail(&vma->list, &msm_obj->vmas);
314
315 return vma;
316}
317
318static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
319 struct msm_gem_address_space *aspace)
320{
321 struct msm_gem_object *msm_obj = to_msm_bo(obj);
322 struct msm_gem_vma *vma;
323
324 GEM_WARN_ON(!msm_gem_is_locked(obj));
325
326 list_for_each_entry(vma, &msm_obj->vmas, list) {
327 if (vma->aspace == aspace)
328 return vma;
329 }
330
331 return NULL;
332}
333
334static void del_vma(struct msm_gem_vma *vma)
335{
336 if (!vma)
337 return;
338
339 list_del(&vma->list);
340 kfree(vma);
341}
342
343/*
344 * If close is true, this also closes the VMA (releasing the allocated
345 * iova range) in addition to removing the iommu mapping. In the eviction
346 * case (!close), we keep the iova allocated, but only remove the iommu
347 * mapping.
348 */
349static void
350put_iova_spaces(struct drm_gem_object *obj, bool close)
351{
352 struct msm_gem_object *msm_obj = to_msm_bo(obj);
353 struct msm_gem_vma *vma;
354
355 GEM_WARN_ON(!msm_gem_is_locked(obj));
356
357 list_for_each_entry(vma, &msm_obj->vmas, list) {
358 if (vma->aspace) {
359 msm_gem_purge_vma(vma->aspace, vma);
360 if (close)
361 msm_gem_close_vma(vma->aspace, vma);
362 }
363 }
364}
365
366/* Called with msm_obj locked */
367static void
368put_iova_vmas(struct drm_gem_object *obj)
369{
370 struct msm_gem_object *msm_obj = to_msm_bo(obj);
371 struct msm_gem_vma *vma, *tmp;
372
373 GEM_WARN_ON(!msm_gem_is_locked(obj));
374
375 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
376 del_vma(vma);
377 }
378}
379
380static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
381 struct msm_gem_address_space *aspace,
382 u64 range_start, u64 range_end)
383{
384 struct msm_gem_vma *vma;
385
386 GEM_WARN_ON(!msm_gem_is_locked(obj));
387
388 vma = lookup_vma(obj, aspace);
389
390 if (!vma) {
391 int ret;
392
393 vma = add_vma(obj, aspace);
394 if (IS_ERR(vma))
395 return vma;
396
397 ret = msm_gem_init_vma(aspace, vma, obj->size,
398 range_start, range_end);
399 if (ret) {
400 del_vma(vma);
401 return ERR_PTR(ret);
402 }
403 } else {
404 GEM_WARN_ON(vma->iova < range_start);
405 GEM_WARN_ON((vma->iova + obj->size) > range_end);
406 }
407
408 return vma;
409}
410
411int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
412{
413 struct msm_gem_object *msm_obj = to_msm_bo(obj);
414 struct page **pages;
415 int ret, prot = IOMMU_READ;
416
417 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
418 prot |= IOMMU_WRITE;
419
420 if (msm_obj->flags & MSM_BO_MAP_PRIV)
421 prot |= IOMMU_PRIV;
422
423 if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
424 prot |= IOMMU_CACHE;
425
426 GEM_WARN_ON(!msm_gem_is_locked(obj));
427
428 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
429 return -EBUSY;
430
431 pages = get_pages(obj);
432 if (IS_ERR(pages))
433 return PTR_ERR(pages);
434
435 ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
436
437 if (!ret)
438 msm_obj->pin_count++;
439
440 return ret;
441}
442
443void msm_gem_unpin_locked(struct drm_gem_object *obj)
444{
445 struct msm_gem_object *msm_obj = to_msm_bo(obj);
446
447 GEM_WARN_ON(!msm_gem_is_locked(obj));
448
449 msm_obj->pin_count--;
450 GEM_WARN_ON(msm_obj->pin_count < 0);
451
452 update_inactive(msm_obj);
453}
454
455struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
456 struct msm_gem_address_space *aspace)
457{
458 return get_vma_locked(obj, aspace, 0, U64_MAX);
459}
460
461static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
462 struct msm_gem_address_space *aspace, uint64_t *iova,
463 u64 range_start, u64 range_end)
464{
465 struct msm_gem_vma *vma;
466 int ret;
467
468 GEM_WARN_ON(!msm_gem_is_locked(obj));
469
470 vma = get_vma_locked(obj, aspace, range_start, range_end);
471 if (IS_ERR(vma))
472 return PTR_ERR(vma);
473
474 ret = msm_gem_pin_vma_locked(obj, vma);
475 if (!ret)
476 *iova = vma->iova;
477
478 return ret;
479}
480
481/*
482 * get iova and pin it. Should have a matching put
483 * limits iova to specified range (in pages)
484 */
485int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
486 struct msm_gem_address_space *aspace, uint64_t *iova,
487 u64 range_start, u64 range_end)
488{
489 int ret;
490
491 msm_gem_lock(obj);
492 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
493 msm_gem_unlock(obj);
494
495 return ret;
496}
497
498/* get iova and pin it. Should have a matching put */
499int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
500 struct msm_gem_address_space *aspace, uint64_t *iova)
501{
502 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
503}
504
505/*
506 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
507 * valid for the life of the object
508 */
509int msm_gem_get_iova(struct drm_gem_object *obj,
510 struct msm_gem_address_space *aspace, uint64_t *iova)
511{
512 struct msm_gem_vma *vma;
513 int ret = 0;
514
515 msm_gem_lock(obj);
516 vma = get_vma_locked(obj, aspace, 0, U64_MAX);
517 if (IS_ERR(vma)) {
518 ret = PTR_ERR(vma);
519 } else {
520 *iova = vma->iova;
521 }
522 msm_gem_unlock(obj);
523
524 return ret;
525}
526
527static int clear_iova(struct drm_gem_object *obj,
528 struct msm_gem_address_space *aspace)
529{
530 struct msm_gem_vma *vma = lookup_vma(obj, aspace);
531
532 if (!vma)
533 return 0;
534
535 if (msm_gem_vma_inuse(vma))
536 return -EBUSY;
537
538 msm_gem_purge_vma(vma->aspace, vma);
539 msm_gem_close_vma(vma->aspace, vma);
540 del_vma(vma);
541
542 return 0;
543}
544
545/*
546 * Get the requested iova but don't pin it. Fails if the requested iova is
547 * not available. Doesn't need a put because iovas are currently valid for
548 * the life of the object.
549 *
550 * Setting an iova of zero will clear the vma.
551 */
552int msm_gem_set_iova(struct drm_gem_object *obj,
553 struct msm_gem_address_space *aspace, uint64_t iova)
554{
555 int ret = 0;
556
557 msm_gem_lock(obj);
558 if (!iova) {
559 ret = clear_iova(obj, aspace);
560 } else {
561 struct msm_gem_vma *vma;
562 vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
563 if (IS_ERR(vma)) {
564 ret = PTR_ERR(vma);
565 } else if (GEM_WARN_ON(vma->iova != iova)) {
566 clear_iova(obj, aspace);
567 ret = -EBUSY;
568 }
569 }
570 msm_gem_unlock(obj);
571
572 return ret;
573}
574
575/*
576 * Unpin a iova by updating the reference counts. The memory isn't actually
577 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
578 * to get rid of it
579 */
580void msm_gem_unpin_iova(struct drm_gem_object *obj,
581 struct msm_gem_address_space *aspace)
582{
583 struct msm_gem_vma *vma;
584
585 msm_gem_lock(obj);
586 vma = lookup_vma(obj, aspace);
587 if (!GEM_WARN_ON(!vma)) {
588 msm_gem_unpin_vma(vma);
589 msm_gem_unpin_locked(obj);
590 }
591 msm_gem_unlock(obj);
592}
593
594int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
595 struct drm_mode_create_dumb *args)
596{
597 args->pitch = align_pitch(args->width, args->bpp);
598 args->size = PAGE_ALIGN(args->pitch * args->height);
599 return msm_gem_new_handle(dev, file, args->size,
600 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
601}
602
603int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
604 uint32_t handle, uint64_t *offset)
605{
606 struct drm_gem_object *obj;
607 int ret = 0;
608
609 /* GEM does all our handle to object mapping */
610 obj = drm_gem_object_lookup(file, handle);
611 if (obj == NULL) {
612 ret = -ENOENT;
613 goto fail;
614 }
615
616 *offset = msm_gem_mmap_offset(obj);
617
618 drm_gem_object_put(obj);
619
620fail:
621 return ret;
622}
623
624static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
625{
626 struct msm_gem_object *msm_obj = to_msm_bo(obj);
627 int ret = 0;
628
629 GEM_WARN_ON(!msm_gem_is_locked(obj));
630
631 if (obj->import_attach)
632 return ERR_PTR(-ENODEV);
633
634 if (GEM_WARN_ON(msm_obj->madv > madv)) {
635 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
636 msm_obj->madv, madv);
637 return ERR_PTR(-EBUSY);
638 }
639
640 /* increment vmap_count *before* vmap() call, so shrinker can
641 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
642 * This guarantees that we won't try to msm_gem_vunmap() this
643 * same object from within the vmap() call (while we already
644 * hold msm_obj lock)
645 */
646 msm_obj->vmap_count++;
647
648 if (!msm_obj->vaddr) {
649 struct page **pages = get_pages(obj);
650 if (IS_ERR(pages)) {
651 ret = PTR_ERR(pages);
652 goto fail;
653 }
654 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
655 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
656 if (msm_obj->vaddr == NULL) {
657 ret = -ENOMEM;
658 goto fail;
659 }
660
661 update_inactive(msm_obj);
662 }
663
664 return msm_obj->vaddr;
665
666fail:
667 msm_obj->vmap_count--;
668 return ERR_PTR(ret);
669}
670
671void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
672{
673 return get_vaddr(obj, MSM_MADV_WILLNEED);
674}
675
676void *msm_gem_get_vaddr(struct drm_gem_object *obj)
677{
678 void *ret;
679
680 msm_gem_lock(obj);
681 ret = msm_gem_get_vaddr_locked(obj);
682 msm_gem_unlock(obj);
683
684 return ret;
685}
686
687/*
688 * Don't use this! It is for the very special case of dumping
689 * submits from GPU hangs or faults, were the bo may already
690 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
691 * active list.
692 */
693void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
694{
695 return get_vaddr(obj, __MSM_MADV_PURGED);
696}
697
698void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
699{
700 struct msm_gem_object *msm_obj = to_msm_bo(obj);
701
702 GEM_WARN_ON(!msm_gem_is_locked(obj));
703 GEM_WARN_ON(msm_obj->vmap_count < 1);
704
705 msm_obj->vmap_count--;
706}
707
708void msm_gem_put_vaddr(struct drm_gem_object *obj)
709{
710 msm_gem_lock(obj);
711 msm_gem_put_vaddr_locked(obj);
712 msm_gem_unlock(obj);
713}
714
715/* Update madvise status, returns true if not purged, else
716 * false or -errno.
717 */
718int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
719{
720 struct msm_gem_object *msm_obj = to_msm_bo(obj);
721
722 msm_gem_lock(obj);
723
724 if (msm_obj->madv != __MSM_MADV_PURGED)
725 msm_obj->madv = madv;
726
727 madv = msm_obj->madv;
728
729 /* If the obj is inactive, we might need to move it
730 * between inactive lists
731 */
732 if (msm_obj->active_count == 0)
733 update_inactive(msm_obj);
734
735 msm_gem_unlock(obj);
736
737 return (madv != __MSM_MADV_PURGED);
738}
739
740void msm_gem_purge(struct drm_gem_object *obj)
741{
742 struct drm_device *dev = obj->dev;
743 struct msm_gem_object *msm_obj = to_msm_bo(obj);
744
745 GEM_WARN_ON(!msm_gem_is_locked(obj));
746 GEM_WARN_ON(!is_purgeable(msm_obj));
747
748 /* Get rid of any iommu mapping(s): */
749 put_iova_spaces(obj, true);
750
751 msm_gem_vunmap(obj);
752
753 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
754
755 put_pages(obj);
756
757 put_iova_vmas(obj);
758
759 msm_obj->madv = __MSM_MADV_PURGED;
760 update_inactive(msm_obj);
761
762 drm_gem_free_mmap_offset(obj);
763
764 /* Our goal here is to return as much of the memory as
765 * is possible back to the system as we are called from OOM.
766 * To do this we must instruct the shmfs to drop all of its
767 * backing pages, *now*.
768 */
769 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
770
771 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
772 0, (loff_t)-1);
773}
774
775/*
776 * Unpin the backing pages and make them available to be swapped out.
777 */
778void msm_gem_evict(struct drm_gem_object *obj)
779{
780 struct drm_device *dev = obj->dev;
781 struct msm_gem_object *msm_obj = to_msm_bo(obj);
782
783 GEM_WARN_ON(!msm_gem_is_locked(obj));
784 GEM_WARN_ON(is_unevictable(msm_obj));
785 GEM_WARN_ON(!msm_obj->evictable);
786 GEM_WARN_ON(msm_obj->active_count);
787
788 /* Get rid of any iommu mapping(s): */
789 put_iova_spaces(obj, false);
790
791 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
792
793 put_pages(obj);
794
795 update_inactive(msm_obj);
796}
797
798void msm_gem_vunmap(struct drm_gem_object *obj)
799{
800 struct msm_gem_object *msm_obj = to_msm_bo(obj);
801
802 GEM_WARN_ON(!msm_gem_is_locked(obj));
803
804 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
805 return;
806
807 vunmap(msm_obj->vaddr);
808 msm_obj->vaddr = NULL;
809}
810
811void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
812{
813 struct msm_gem_object *msm_obj = to_msm_bo(obj);
814 struct msm_drm_private *priv = obj->dev->dev_private;
815
816 might_sleep();
817 GEM_WARN_ON(!msm_gem_is_locked(obj));
818 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
819 GEM_WARN_ON(msm_obj->dontneed);
820
821 if (msm_obj->active_count++ == 0) {
822 mutex_lock(&priv->mm_lock);
823 if (msm_obj->evictable)
824 mark_unevictable(msm_obj);
825 list_move_tail(&msm_obj->mm_list, &gpu->active_list);
826 mutex_unlock(&priv->mm_lock);
827 }
828}
829
830void msm_gem_active_put(struct drm_gem_object *obj)
831{
832 struct msm_gem_object *msm_obj = to_msm_bo(obj);
833
834 might_sleep();
835 GEM_WARN_ON(!msm_gem_is_locked(obj));
836
837 if (--msm_obj->active_count == 0) {
838 update_inactive(msm_obj);
839 }
840}
841
842static void update_inactive(struct msm_gem_object *msm_obj)
843{
844 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
845
846 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
847
848 if (msm_obj->active_count != 0)
849 return;
850
851 mutex_lock(&priv->mm_lock);
852
853 if (msm_obj->dontneed)
854 mark_unpurgeable(msm_obj);
855 if (msm_obj->evictable)
856 mark_unevictable(msm_obj);
857
858 list_del(&msm_obj->mm_list);
859 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
860 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
861 mark_evictable(msm_obj);
862 } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
863 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
864 mark_purgeable(msm_obj);
865 } else {
866 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
867 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
868 }
869
870 mutex_unlock(&priv->mm_lock);
871}
872
873int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
874{
875 bool write = !!(op & MSM_PREP_WRITE);
876 unsigned long remain =
877 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
878 long ret;
879
880 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
881 true, remain);
882 if (ret == 0)
883 return remain == 0 ? -EBUSY : -ETIMEDOUT;
884 else if (ret < 0)
885 return ret;
886
887 /* TODO cache maintenance */
888
889 return 0;
890}
891
892int msm_gem_cpu_fini(struct drm_gem_object *obj)
893{
894 /* TODO cache maintenance */
895 return 0;
896}
897
898#ifdef CONFIG_DEBUG_FS
899void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
900 struct msm_gem_stats *stats)
901{
902 struct msm_gem_object *msm_obj = to_msm_bo(obj);
903 struct dma_resv *robj = obj->resv;
904 struct msm_gem_vma *vma;
905 uint64_t off = drm_vma_node_start(&obj->vma_node);
906 const char *madv;
907
908 msm_gem_lock(obj);
909
910 stats->all.count++;
911 stats->all.size += obj->size;
912
913 if (is_active(msm_obj)) {
914 stats->active.count++;
915 stats->active.size += obj->size;
916 }
917
918 if (msm_obj->pages) {
919 stats->resident.count++;
920 stats->resident.size += obj->size;
921 }
922
923 switch (msm_obj->madv) {
924 case __MSM_MADV_PURGED:
925 stats->purged.count++;
926 stats->purged.size += obj->size;
927 madv = " purged";
928 break;
929 case MSM_MADV_DONTNEED:
930 stats->purgeable.count++;
931 stats->purgeable.size += obj->size;
932 madv = " purgeable";
933 break;
934 case MSM_MADV_WILLNEED:
935 default:
936 madv = "";
937 break;
938 }
939
940 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
941 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
942 obj->name, kref_read(&obj->refcount),
943 off, msm_obj->vaddr);
944
945 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
946
947 if (!list_empty(&msm_obj->vmas)) {
948
949 seq_puts(m, " vmas:");
950
951 list_for_each_entry(vma, &msm_obj->vmas, list) {
952 const char *name, *comm;
953 if (vma->aspace) {
954 struct msm_gem_address_space *aspace = vma->aspace;
955 struct task_struct *task =
956 get_pid_task(aspace->pid, PIDTYPE_PID);
957 if (task) {
958 comm = kstrdup(task->comm, GFP_KERNEL);
959 put_task_struct(task);
960 } else {
961 comm = NULL;
962 }
963 name = aspace->name;
964 } else {
965 name = comm = NULL;
966 }
967 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
968 name, comm ? ":" : "", comm ? comm : "",
969 vma->aspace, vma->iova,
970 vma->mapped ? "mapped" : "unmapped",
971 msm_gem_vma_inuse(vma));
972 kfree(comm);
973 }
974
975 seq_puts(m, "\n");
976 }
977
978 dma_resv_describe(robj, m);
979 msm_gem_unlock(obj);
980}
981
982void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
983{
984 struct msm_gem_stats stats = {};
985 struct msm_gem_object *msm_obj;
986
987 seq_puts(m, " flags id ref offset kaddr size madv name\n");
988 list_for_each_entry(msm_obj, list, node) {
989 struct drm_gem_object *obj = &msm_obj->base;
990 seq_puts(m, " ");
991 msm_gem_describe(obj, m, &stats);
992 }
993
994 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
995 stats.all.count, stats.all.size);
996 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
997 stats.active.count, stats.active.size);
998 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
999 stats.resident.count, stats.resident.size);
1000 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1001 stats.purgeable.count, stats.purgeable.size);
1002 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
1003 stats.purged.count, stats.purged.size);
1004}
1005#endif
1006
1007/* don't call directly! Use drm_gem_object_put() */
1008static void msm_gem_free_object(struct drm_gem_object *obj)
1009{
1010 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1011 struct drm_device *dev = obj->dev;
1012 struct msm_drm_private *priv = dev->dev_private;
1013
1014 mutex_lock(&priv->obj_lock);
1015 list_del(&msm_obj->node);
1016 mutex_unlock(&priv->obj_lock);
1017
1018 mutex_lock(&priv->mm_lock);
1019 if (msm_obj->dontneed)
1020 mark_unpurgeable(msm_obj);
1021 list_del(&msm_obj->mm_list);
1022 mutex_unlock(&priv->mm_lock);
1023
1024 /* object should not be on active list: */
1025 GEM_WARN_ON(is_active(msm_obj));
1026
1027 put_iova_spaces(obj, true);
1028
1029 if (obj->import_attach) {
1030 GEM_WARN_ON(msm_obj->vaddr);
1031
1032 /* Don't drop the pages for imported dmabuf, as they are not
1033 * ours, just free the array we allocated:
1034 */
1035 kvfree(msm_obj->pages);
1036
1037 put_iova_vmas(obj);
1038
1039 drm_prime_gem_destroy(obj, msm_obj->sgt);
1040 } else {
1041 msm_gem_vunmap(obj);
1042 put_pages(obj);
1043 put_iova_vmas(obj);
1044 }
1045
1046 drm_gem_object_release(obj);
1047
1048 kfree(msm_obj);
1049}
1050
1051static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1052{
1053 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1054
1055 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1056 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1057
1058 return 0;
1059}
1060
1061/* convenience method to construct a GEM buffer object, and userspace handle */
1062int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1063 uint32_t size, uint32_t flags, uint32_t *handle,
1064 char *name)
1065{
1066 struct drm_gem_object *obj;
1067 int ret;
1068
1069 obj = msm_gem_new(dev, size, flags);
1070
1071 if (IS_ERR(obj))
1072 return PTR_ERR(obj);
1073
1074 if (name)
1075 msm_gem_object_set_name(obj, "%s", name);
1076
1077 ret = drm_gem_handle_create(file, obj, handle);
1078
1079 /* drop reference from allocate - handle holds it now */
1080 drm_gem_object_put(obj);
1081
1082 return ret;
1083}
1084
1085static const struct vm_operations_struct vm_ops = {
1086 .fault = msm_gem_fault,
1087 .open = drm_gem_vm_open,
1088 .close = drm_gem_vm_close,
1089};
1090
1091static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1092 .free = msm_gem_free_object,
1093 .pin = msm_gem_prime_pin,
1094 .unpin = msm_gem_prime_unpin,
1095 .get_sg_table = msm_gem_prime_get_sg_table,
1096 .vmap = msm_gem_prime_vmap,
1097 .vunmap = msm_gem_prime_vunmap,
1098 .mmap = msm_gem_object_mmap,
1099 .vm_ops = &vm_ops,
1100};
1101
1102static int msm_gem_new_impl(struct drm_device *dev,
1103 uint32_t size, uint32_t flags,
1104 struct drm_gem_object **obj)
1105{
1106 struct msm_drm_private *priv = dev->dev_private;
1107 struct msm_gem_object *msm_obj;
1108
1109 switch (flags & MSM_BO_CACHE_MASK) {
1110 case MSM_BO_CACHED:
1111 case MSM_BO_WC:
1112 break;
1113 case MSM_BO_CACHED_COHERENT:
1114 if (priv->has_cached_coherent)
1115 break;
1116 fallthrough;
1117 default:
1118 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1119 (flags & MSM_BO_CACHE_MASK));
1120 return -EINVAL;
1121 }
1122
1123 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1124 if (!msm_obj)
1125 return -ENOMEM;
1126
1127 msm_obj->flags = flags;
1128 msm_obj->madv = MSM_MADV_WILLNEED;
1129
1130 INIT_LIST_HEAD(&msm_obj->node);
1131 INIT_LIST_HEAD(&msm_obj->vmas);
1132
1133 *obj = &msm_obj->base;
1134 (*obj)->funcs = &msm_gem_object_funcs;
1135
1136 return 0;
1137}
1138
1139struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1140{
1141 struct msm_drm_private *priv = dev->dev_private;
1142 struct msm_gem_object *msm_obj;
1143 struct drm_gem_object *obj = NULL;
1144 bool use_vram = false;
1145 int ret;
1146
1147 size = PAGE_ALIGN(size);
1148
1149 if (!msm_use_mmu(dev))
1150 use_vram = true;
1151 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1152 use_vram = true;
1153
1154 if (GEM_WARN_ON(use_vram && !priv->vram.size))
1155 return ERR_PTR(-EINVAL);
1156
1157 /* Disallow zero sized objects as they make the underlying
1158 * infrastructure grumpy
1159 */
1160 if (size == 0)
1161 return ERR_PTR(-EINVAL);
1162
1163 ret = msm_gem_new_impl(dev, size, flags, &obj);
1164 if (ret)
1165 return ERR_PTR(ret);
1166
1167 msm_obj = to_msm_bo(obj);
1168
1169 if (use_vram) {
1170 struct msm_gem_vma *vma;
1171 struct page **pages;
1172
1173 drm_gem_private_object_init(dev, obj, size);
1174
1175 msm_gem_lock(obj);
1176
1177 vma = add_vma(obj, NULL);
1178 msm_gem_unlock(obj);
1179 if (IS_ERR(vma)) {
1180 ret = PTR_ERR(vma);
1181 goto fail;
1182 }
1183
1184 to_msm_bo(obj)->vram_node = &vma->node;
1185
1186 /* Call chain get_pages() -> update_inactive() tries to
1187 * access msm_obj->mm_list, but it is not initialized yet.
1188 * To avoid NULL pointer dereference error, initialize
1189 * mm_list to be empty.
1190 */
1191 INIT_LIST_HEAD(&msm_obj->mm_list);
1192
1193 msm_gem_lock(obj);
1194 pages = get_pages(obj);
1195 msm_gem_unlock(obj);
1196 if (IS_ERR(pages)) {
1197 ret = PTR_ERR(pages);
1198 goto fail;
1199 }
1200
1201 vma->iova = physaddr(obj);
1202 } else {
1203 ret = drm_gem_object_init(dev, obj, size);
1204 if (ret)
1205 goto fail;
1206 /*
1207 * Our buffers are kept pinned, so allocating them from the
1208 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1209 * See comments above new_inode() why this is required _and_
1210 * expected if you're going to pin these pages.
1211 */
1212 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1213 }
1214
1215 mutex_lock(&priv->mm_lock);
1216 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1217 mutex_unlock(&priv->mm_lock);
1218
1219 mutex_lock(&priv->obj_lock);
1220 list_add_tail(&msm_obj->node, &priv->objects);
1221 mutex_unlock(&priv->obj_lock);
1222
1223 return obj;
1224
1225fail:
1226 drm_gem_object_put(obj);
1227 return ERR_PTR(ret);
1228}
1229
1230struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1231 struct dma_buf *dmabuf, struct sg_table *sgt)
1232{
1233 struct msm_drm_private *priv = dev->dev_private;
1234 struct msm_gem_object *msm_obj;
1235 struct drm_gem_object *obj;
1236 uint32_t size;
1237 int ret, npages;
1238
1239 /* if we don't have IOMMU, don't bother pretending we can import: */
1240 if (!msm_use_mmu(dev)) {
1241 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1242 return ERR_PTR(-EINVAL);
1243 }
1244
1245 size = PAGE_ALIGN(dmabuf->size);
1246
1247 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1248 if (ret)
1249 return ERR_PTR(ret);
1250
1251 drm_gem_private_object_init(dev, obj, size);
1252
1253 npages = size / PAGE_SIZE;
1254
1255 msm_obj = to_msm_bo(obj);
1256 msm_gem_lock(obj);
1257 msm_obj->sgt = sgt;
1258 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1259 if (!msm_obj->pages) {
1260 msm_gem_unlock(obj);
1261 ret = -ENOMEM;
1262 goto fail;
1263 }
1264
1265 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1266 if (ret) {
1267 msm_gem_unlock(obj);
1268 goto fail;
1269 }
1270
1271 msm_gem_unlock(obj);
1272
1273 mutex_lock(&priv->mm_lock);
1274 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1275 mutex_unlock(&priv->mm_lock);
1276
1277 mutex_lock(&priv->obj_lock);
1278 list_add_tail(&msm_obj->node, &priv->objects);
1279 mutex_unlock(&priv->obj_lock);
1280
1281 return obj;
1282
1283fail:
1284 drm_gem_object_put(obj);
1285 return ERR_PTR(ret);
1286}
1287
1288void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1289 uint32_t flags, struct msm_gem_address_space *aspace,
1290 struct drm_gem_object **bo, uint64_t *iova)
1291{
1292 void *vaddr;
1293 struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1294 int ret;
1295
1296 if (IS_ERR(obj))
1297 return ERR_CAST(obj);
1298
1299 if (iova) {
1300 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1301 if (ret)
1302 goto err;
1303 }
1304
1305 vaddr = msm_gem_get_vaddr(obj);
1306 if (IS_ERR(vaddr)) {
1307 msm_gem_unpin_iova(obj, aspace);
1308 ret = PTR_ERR(vaddr);
1309 goto err;
1310 }
1311
1312 if (bo)
1313 *bo = obj;
1314
1315 return vaddr;
1316err:
1317 drm_gem_object_put(obj);
1318
1319 return ERR_PTR(ret);
1320
1321}
1322
1323void msm_gem_kernel_put(struct drm_gem_object *bo,
1324 struct msm_gem_address_space *aspace)
1325{
1326 if (IS_ERR_OR_NULL(bo))
1327 return;
1328
1329 msm_gem_put_vaddr(bo);
1330 msm_gem_unpin_iova(bo, aspace);
1331 drm_gem_object_put(bo);
1332}
1333
1334void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1335{
1336 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1337 va_list ap;
1338
1339 if (!fmt)
1340 return;
1341
1342 va_start(ap, fmt);
1343 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1344 va_end(ap);
1345}