Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20#include <linux/dma-buf.h>
21#include <linux/pfn_t.h>
22
23#include "msm_drv.h"
24#include "msm_fence.h"
25#include "msm_gem.h"
26#include "msm_gpu.h"
27#include "msm_mmu.h"
28
29static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30
31
32static dma_addr_t physaddr(struct drm_gem_object *obj)
33{
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 priv->vram.paddr;
38}
39
40static bool use_pages(struct drm_gem_object *obj)
41{
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
44}
45
46/* allocate pages from VRAM carveout, used when no IOMMU: */
47static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
48{
49 struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 struct msm_drm_private *priv = obj->dev->dev_private;
51 dma_addr_t paddr;
52 struct page **p;
53 int ret, i;
54
55 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
56 if (!p)
57 return ERR_PTR(-ENOMEM);
58
59 spin_lock(&priv->vram.lock);
60 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61 spin_unlock(&priv->vram.lock);
62 if (ret) {
63 kvfree(p);
64 return ERR_PTR(ret);
65 }
66
67 paddr = physaddr(obj);
68 for (i = 0; i < npages; i++) {
69 p[i] = phys_to_page(paddr);
70 paddr += PAGE_SIZE;
71 }
72
73 return p;
74}
75
76static struct page **get_pages(struct drm_gem_object *obj)
77{
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
79
80 if (!msm_obj->pages) {
81 struct drm_device *dev = obj->dev;
82 struct page **p;
83 int npages = obj->size >> PAGE_SHIFT;
84
85 if (use_pages(obj))
86 p = drm_gem_get_pages(obj);
87 else
88 p = get_pages_vram(obj, npages);
89
90 if (IS_ERR(p)) {
91 dev_err(dev->dev, "could not get pages: %ld\n",
92 PTR_ERR(p));
93 return p;
94 }
95
96 msm_obj->pages = p;
97
98 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
99 if (IS_ERR(msm_obj->sgt)) {
100 void *ptr = ERR_CAST(msm_obj->sgt);
101
102 dev_err(dev->dev, "failed to allocate sgt\n");
103 msm_obj->sgt = NULL;
104 return ptr;
105 }
106
107 /* For non-cached buffers, ensure the new pages are clean
108 * because display controller, GPU, etc. are not coherent:
109 */
110 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
111 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
112 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
113 }
114
115 return msm_obj->pages;
116}
117
118static void put_pages_vram(struct drm_gem_object *obj)
119{
120 struct msm_gem_object *msm_obj = to_msm_bo(obj);
121 struct msm_drm_private *priv = obj->dev->dev_private;
122
123 spin_lock(&priv->vram.lock);
124 drm_mm_remove_node(msm_obj->vram_node);
125 spin_unlock(&priv->vram.lock);
126
127 kvfree(msm_obj->pages);
128}
129
130static void put_pages(struct drm_gem_object *obj)
131{
132 struct msm_gem_object *msm_obj = to_msm_bo(obj);
133
134 if (msm_obj->pages) {
135 /* For non-cached buffers, ensure the new pages are clean
136 * because display controller, GPU, etc. are not coherent:
137 */
138 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
139 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
140 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
141
142 if (msm_obj->sgt)
143 sg_free_table(msm_obj->sgt);
144
145 kfree(msm_obj->sgt);
146
147 if (use_pages(obj))
148 drm_gem_put_pages(obj, msm_obj->pages, true, false);
149 else
150 put_pages_vram(obj);
151
152 msm_obj->pages = NULL;
153 }
154}
155
156struct page **msm_gem_get_pages(struct drm_gem_object *obj)
157{
158 struct msm_gem_object *msm_obj = to_msm_bo(obj);
159 struct page **p;
160
161 mutex_lock(&msm_obj->lock);
162
163 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
164 mutex_unlock(&msm_obj->lock);
165 return ERR_PTR(-EBUSY);
166 }
167
168 p = get_pages(obj);
169 mutex_unlock(&msm_obj->lock);
170 return p;
171}
172
173void msm_gem_put_pages(struct drm_gem_object *obj)
174{
175 /* when we start tracking the pin count, then do something here */
176}
177
178int msm_gem_mmap_obj(struct drm_gem_object *obj,
179 struct vm_area_struct *vma)
180{
181 struct msm_gem_object *msm_obj = to_msm_bo(obj);
182
183 vma->vm_flags &= ~VM_PFNMAP;
184 vma->vm_flags |= VM_MIXEDMAP;
185
186 if (msm_obj->flags & MSM_BO_WC) {
187 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
188 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
189 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
190 } else {
191 /*
192 * Shunt off cached objs to shmem file so they have their own
193 * address_space (so unmap_mapping_range does what we want,
194 * in particular in the case of mmap'd dmabufs)
195 */
196 fput(vma->vm_file);
197 get_file(obj->filp);
198 vma->vm_pgoff = 0;
199 vma->vm_file = obj->filp;
200
201 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
202 }
203
204 return 0;
205}
206
207int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
208{
209 int ret;
210
211 ret = drm_gem_mmap(filp, vma);
212 if (ret) {
213 DBG("mmap failed: %d", ret);
214 return ret;
215 }
216
217 return msm_gem_mmap_obj(vma->vm_private_data, vma);
218}
219
220int msm_gem_fault(struct vm_fault *vmf)
221{
222 struct vm_area_struct *vma = vmf->vma;
223 struct drm_gem_object *obj = vma->vm_private_data;
224 struct msm_gem_object *msm_obj = to_msm_bo(obj);
225 struct page **pages;
226 unsigned long pfn;
227 pgoff_t pgoff;
228 int ret;
229
230 /*
231 * vm_ops.open/drm_gem_mmap_obj and close get and put
232 * a reference on obj. So, we dont need to hold one here.
233 */
234 ret = mutex_lock_interruptible(&msm_obj->lock);
235 if (ret)
236 goto out;
237
238 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
239 mutex_unlock(&msm_obj->lock);
240 return VM_FAULT_SIGBUS;
241 }
242
243 /* make sure we have pages attached now */
244 pages = get_pages(obj);
245 if (IS_ERR(pages)) {
246 ret = PTR_ERR(pages);
247 goto out_unlock;
248 }
249
250 /* We don't use vmf->pgoff since that has the fake offset: */
251 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
252
253 pfn = page_to_pfn(pages[pgoff]);
254
255 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
256 pfn, pfn << PAGE_SHIFT);
257
258 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
259
260out_unlock:
261 mutex_unlock(&msm_obj->lock);
262out:
263 switch (ret) {
264 case -EAGAIN:
265 case 0:
266 case -ERESTARTSYS:
267 case -EINTR:
268 case -EBUSY:
269 /*
270 * EBUSY is ok: this just means that another thread
271 * already did the job.
272 */
273 return VM_FAULT_NOPAGE;
274 case -ENOMEM:
275 return VM_FAULT_OOM;
276 default:
277 return VM_FAULT_SIGBUS;
278 }
279}
280
281/** get mmap offset */
282static uint64_t mmap_offset(struct drm_gem_object *obj)
283{
284 struct drm_device *dev = obj->dev;
285 struct msm_gem_object *msm_obj = to_msm_bo(obj);
286 int ret;
287
288 WARN_ON(!mutex_is_locked(&msm_obj->lock));
289
290 /* Make it mmapable */
291 ret = drm_gem_create_mmap_offset(obj);
292
293 if (ret) {
294 dev_err(dev->dev, "could not allocate mmap offset\n");
295 return 0;
296 }
297
298 return drm_vma_node_offset_addr(&obj->vma_node);
299}
300
301uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
302{
303 uint64_t offset;
304 struct msm_gem_object *msm_obj = to_msm_bo(obj);
305
306 mutex_lock(&msm_obj->lock);
307 offset = mmap_offset(obj);
308 mutex_unlock(&msm_obj->lock);
309 return offset;
310}
311
312static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
313 struct msm_gem_address_space *aspace)
314{
315 struct msm_gem_object *msm_obj = to_msm_bo(obj);
316 struct msm_gem_vma *vma;
317
318 WARN_ON(!mutex_is_locked(&msm_obj->lock));
319
320 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
321 if (!vma)
322 return ERR_PTR(-ENOMEM);
323
324 vma->aspace = aspace;
325
326 list_add_tail(&vma->list, &msm_obj->vmas);
327
328 return vma;
329}
330
331static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
332 struct msm_gem_address_space *aspace)
333{
334 struct msm_gem_object *msm_obj = to_msm_bo(obj);
335 struct msm_gem_vma *vma;
336
337 WARN_ON(!mutex_is_locked(&msm_obj->lock));
338
339 list_for_each_entry(vma, &msm_obj->vmas, list) {
340 if (vma->aspace == aspace)
341 return vma;
342 }
343
344 return NULL;
345}
346
347static void del_vma(struct msm_gem_vma *vma)
348{
349 if (!vma)
350 return;
351
352 list_del(&vma->list);
353 kfree(vma);
354}
355
356/* Called with msm_obj->lock locked */
357static void
358put_iova(struct drm_gem_object *obj)
359{
360 struct msm_gem_object *msm_obj = to_msm_bo(obj);
361 struct msm_gem_vma *vma, *tmp;
362
363 WARN_ON(!mutex_is_locked(&msm_obj->lock));
364
365 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
366 msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
367 del_vma(vma);
368 }
369}
370
371/* get iova, taking a reference. Should have a matching put */
372int msm_gem_get_iova(struct drm_gem_object *obj,
373 struct msm_gem_address_space *aspace, uint64_t *iova)
374{
375 struct msm_gem_object *msm_obj = to_msm_bo(obj);
376 struct msm_gem_vma *vma;
377 int ret = 0;
378
379 mutex_lock(&msm_obj->lock);
380
381 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
382 mutex_unlock(&msm_obj->lock);
383 return -EBUSY;
384 }
385
386 vma = lookup_vma(obj, aspace);
387
388 if (!vma) {
389 struct page **pages;
390
391 vma = add_vma(obj, aspace);
392 if (IS_ERR(vma)) {
393 ret = PTR_ERR(vma);
394 goto unlock;
395 }
396
397 pages = get_pages(obj);
398 if (IS_ERR(pages)) {
399 ret = PTR_ERR(pages);
400 goto fail;
401 }
402
403 ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
404 obj->size >> PAGE_SHIFT);
405 if (ret)
406 goto fail;
407 }
408
409 *iova = vma->iova;
410
411 mutex_unlock(&msm_obj->lock);
412 return 0;
413
414fail:
415 del_vma(vma);
416unlock:
417 mutex_unlock(&msm_obj->lock);
418 return ret;
419}
420
421/* get iova without taking a reference, used in places where you have
422 * already done a 'msm_gem_get_iova()'.
423 */
424uint64_t msm_gem_iova(struct drm_gem_object *obj,
425 struct msm_gem_address_space *aspace)
426{
427 struct msm_gem_object *msm_obj = to_msm_bo(obj);
428 struct msm_gem_vma *vma;
429
430 mutex_lock(&msm_obj->lock);
431 vma = lookup_vma(obj, aspace);
432 mutex_unlock(&msm_obj->lock);
433 WARN_ON(!vma);
434
435 return vma ? vma->iova : 0;
436}
437
438void msm_gem_put_iova(struct drm_gem_object *obj,
439 struct msm_gem_address_space *aspace)
440{
441 // XXX TODO ..
442 // NOTE: probably don't need a _locked() version.. we wouldn't
443 // normally unmap here, but instead just mark that it could be
444 // unmapped (if the iova refcnt drops to zero), but then later
445 // if another _get_iova_locked() fails we can start unmapping
446 // things that are no longer needed..
447}
448
449int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
450 struct drm_mode_create_dumb *args)
451{
452 args->pitch = align_pitch(args->width, args->bpp);
453 args->size = PAGE_ALIGN(args->pitch * args->height);
454 return msm_gem_new_handle(dev, file, args->size,
455 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
456}
457
458int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
459 uint32_t handle, uint64_t *offset)
460{
461 struct drm_gem_object *obj;
462 int ret = 0;
463
464 /* GEM does all our handle to object mapping */
465 obj = drm_gem_object_lookup(file, handle);
466 if (obj == NULL) {
467 ret = -ENOENT;
468 goto fail;
469 }
470
471 *offset = msm_gem_mmap_offset(obj);
472
473 drm_gem_object_unreference_unlocked(obj);
474
475fail:
476 return ret;
477}
478
479static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
480{
481 struct msm_gem_object *msm_obj = to_msm_bo(obj);
482 int ret = 0;
483
484 mutex_lock(&msm_obj->lock);
485
486 if (WARN_ON(msm_obj->madv > madv)) {
487 dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
488 msm_obj->madv, madv);
489 mutex_unlock(&msm_obj->lock);
490 return ERR_PTR(-EBUSY);
491 }
492
493 /* increment vmap_count *before* vmap() call, so shrinker can
494 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
495 * This guarantees that we won't try to msm_gem_vunmap() this
496 * same object from within the vmap() call (while we already
497 * hold msm_obj->lock)
498 */
499 msm_obj->vmap_count++;
500
501 if (!msm_obj->vaddr) {
502 struct page **pages = get_pages(obj);
503 if (IS_ERR(pages)) {
504 ret = PTR_ERR(pages);
505 goto fail;
506 }
507 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
508 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
509 if (msm_obj->vaddr == NULL) {
510 ret = -ENOMEM;
511 goto fail;
512 }
513 }
514
515 mutex_unlock(&msm_obj->lock);
516 return msm_obj->vaddr;
517
518fail:
519 msm_obj->vmap_count--;
520 mutex_unlock(&msm_obj->lock);
521 return ERR_PTR(ret);
522}
523
524void *msm_gem_get_vaddr(struct drm_gem_object *obj)
525{
526 return get_vaddr(obj, MSM_MADV_WILLNEED);
527}
528
529/*
530 * Don't use this! It is for the very special case of dumping
531 * submits from GPU hangs or faults, were the bo may already
532 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
533 * active list.
534 */
535void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
536{
537 return get_vaddr(obj, __MSM_MADV_PURGED);
538}
539
540void msm_gem_put_vaddr(struct drm_gem_object *obj)
541{
542 struct msm_gem_object *msm_obj = to_msm_bo(obj);
543
544 mutex_lock(&msm_obj->lock);
545 WARN_ON(msm_obj->vmap_count < 1);
546 msm_obj->vmap_count--;
547 mutex_unlock(&msm_obj->lock);
548}
549
550/* Update madvise status, returns true if not purged, else
551 * false or -errno.
552 */
553int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
554{
555 struct msm_gem_object *msm_obj = to_msm_bo(obj);
556
557 mutex_lock(&msm_obj->lock);
558
559 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
560
561 if (msm_obj->madv != __MSM_MADV_PURGED)
562 msm_obj->madv = madv;
563
564 madv = msm_obj->madv;
565
566 mutex_unlock(&msm_obj->lock);
567
568 return (madv != __MSM_MADV_PURGED);
569}
570
571void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
572{
573 struct drm_device *dev = obj->dev;
574 struct msm_gem_object *msm_obj = to_msm_bo(obj);
575
576 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
577 WARN_ON(!is_purgeable(msm_obj));
578 WARN_ON(obj->import_attach);
579
580 mutex_lock_nested(&msm_obj->lock, subclass);
581
582 put_iova(obj);
583
584 msm_gem_vunmap_locked(obj);
585
586 put_pages(obj);
587
588 msm_obj->madv = __MSM_MADV_PURGED;
589
590 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
591 drm_gem_free_mmap_offset(obj);
592
593 /* Our goal here is to return as much of the memory as
594 * is possible back to the system as we are called from OOM.
595 * To do this we must instruct the shmfs to drop all of its
596 * backing pages, *now*.
597 */
598 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
599
600 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
601 0, (loff_t)-1);
602
603 mutex_unlock(&msm_obj->lock);
604}
605
606static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
607{
608 struct msm_gem_object *msm_obj = to_msm_bo(obj);
609
610 WARN_ON(!mutex_is_locked(&msm_obj->lock));
611
612 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
613 return;
614
615 vunmap(msm_obj->vaddr);
616 msm_obj->vaddr = NULL;
617}
618
619void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
620{
621 struct msm_gem_object *msm_obj = to_msm_bo(obj);
622
623 mutex_lock_nested(&msm_obj->lock, subclass);
624 msm_gem_vunmap_locked(obj);
625 mutex_unlock(&msm_obj->lock);
626}
627
628/* must be called before _move_to_active().. */
629int msm_gem_sync_object(struct drm_gem_object *obj,
630 struct msm_fence_context *fctx, bool exclusive)
631{
632 struct msm_gem_object *msm_obj = to_msm_bo(obj);
633 struct reservation_object_list *fobj;
634 struct dma_fence *fence;
635 int i, ret;
636
637 fobj = reservation_object_get_list(msm_obj->resv);
638 if (!fobj || (fobj->shared_count == 0)) {
639 fence = reservation_object_get_excl(msm_obj->resv);
640 /* don't need to wait on our own fences, since ring is fifo */
641 if (fence && (fence->context != fctx->context)) {
642 ret = dma_fence_wait(fence, true);
643 if (ret)
644 return ret;
645 }
646 }
647
648 if (!exclusive || !fobj)
649 return 0;
650
651 for (i = 0; i < fobj->shared_count; i++) {
652 fence = rcu_dereference_protected(fobj->shared[i],
653 reservation_object_held(msm_obj->resv));
654 if (fence->context != fctx->context) {
655 ret = dma_fence_wait(fence, true);
656 if (ret)
657 return ret;
658 }
659 }
660
661 return 0;
662}
663
664void msm_gem_move_to_active(struct drm_gem_object *obj,
665 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
666{
667 struct msm_gem_object *msm_obj = to_msm_bo(obj);
668 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
669 msm_obj->gpu = gpu;
670 if (exclusive)
671 reservation_object_add_excl_fence(msm_obj->resv, fence);
672 else
673 reservation_object_add_shared_fence(msm_obj->resv, fence);
674 list_del_init(&msm_obj->mm_list);
675 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
676}
677
678void msm_gem_move_to_inactive(struct drm_gem_object *obj)
679{
680 struct drm_device *dev = obj->dev;
681 struct msm_drm_private *priv = dev->dev_private;
682 struct msm_gem_object *msm_obj = to_msm_bo(obj);
683
684 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
685
686 msm_obj->gpu = NULL;
687 list_del_init(&msm_obj->mm_list);
688 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
689}
690
691int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
692{
693 struct msm_gem_object *msm_obj = to_msm_bo(obj);
694 bool write = !!(op & MSM_PREP_WRITE);
695 unsigned long remain =
696 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
697 long ret;
698
699 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
700 true, remain);
701 if (ret == 0)
702 return remain == 0 ? -EBUSY : -ETIMEDOUT;
703 else if (ret < 0)
704 return ret;
705
706 /* TODO cache maintenance */
707
708 return 0;
709}
710
711int msm_gem_cpu_fini(struct drm_gem_object *obj)
712{
713 /* TODO cache maintenance */
714 return 0;
715}
716
717#ifdef CONFIG_DEBUG_FS
718static void describe_fence(struct dma_fence *fence, const char *type,
719 struct seq_file *m)
720{
721 if (!dma_fence_is_signaled(fence))
722 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
723 fence->ops->get_driver_name(fence),
724 fence->ops->get_timeline_name(fence),
725 fence->seqno);
726}
727
728void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
729{
730 struct msm_gem_object *msm_obj = to_msm_bo(obj);
731 struct reservation_object *robj = msm_obj->resv;
732 struct reservation_object_list *fobj;
733 struct dma_fence *fence;
734 struct msm_gem_vma *vma;
735 uint64_t off = drm_vma_node_start(&obj->vma_node);
736 const char *madv;
737
738 mutex_lock(&msm_obj->lock);
739
740 switch (msm_obj->madv) {
741 case __MSM_MADV_PURGED:
742 madv = " purged";
743 break;
744 case MSM_MADV_DONTNEED:
745 madv = " purgeable";
746 break;
747 case MSM_MADV_WILLNEED:
748 default:
749 madv = "";
750 break;
751 }
752
753 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
754 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
755 obj->name, kref_read(&obj->refcount),
756 off, msm_obj->vaddr);
757
758 /* FIXME: we need to print the address space here too */
759 list_for_each_entry(vma, &msm_obj->vmas, list)
760 seq_printf(m, " %08llx", vma->iova);
761
762 seq_printf(m, " %zu%s\n", obj->size, madv);
763
764 rcu_read_lock();
765 fobj = rcu_dereference(robj->fence);
766 if (fobj) {
767 unsigned int i, shared_count = fobj->shared_count;
768
769 for (i = 0; i < shared_count; i++) {
770 fence = rcu_dereference(fobj->shared[i]);
771 describe_fence(fence, "Shared", m);
772 }
773 }
774
775 fence = rcu_dereference(robj->fence_excl);
776 if (fence)
777 describe_fence(fence, "Exclusive", m);
778 rcu_read_unlock();
779
780 mutex_unlock(&msm_obj->lock);
781}
782
783void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
784{
785 struct msm_gem_object *msm_obj;
786 int count = 0;
787 size_t size = 0;
788
789 list_for_each_entry(msm_obj, list, mm_list) {
790 struct drm_gem_object *obj = &msm_obj->base;
791 seq_printf(m, " ");
792 msm_gem_describe(obj, m);
793 count++;
794 size += obj->size;
795 }
796
797 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
798}
799#endif
800
801void msm_gem_free_object(struct drm_gem_object *obj)
802{
803 struct drm_device *dev = obj->dev;
804 struct msm_gem_object *msm_obj = to_msm_bo(obj);
805
806 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
807
808 /* object should not be on active list: */
809 WARN_ON(is_active(msm_obj));
810
811 list_del(&msm_obj->mm_list);
812
813 mutex_lock(&msm_obj->lock);
814
815 put_iova(obj);
816
817 if (obj->import_attach) {
818 if (msm_obj->vaddr)
819 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
820
821 /* Don't drop the pages for imported dmabuf, as they are not
822 * ours, just free the array we allocated:
823 */
824 if (msm_obj->pages)
825 kvfree(msm_obj->pages);
826
827 drm_prime_gem_destroy(obj, msm_obj->sgt);
828 } else {
829 msm_gem_vunmap_locked(obj);
830 put_pages(obj);
831 }
832
833 if (msm_obj->resv == &msm_obj->_resv)
834 reservation_object_fini(msm_obj->resv);
835
836 drm_gem_object_release(obj);
837
838 mutex_unlock(&msm_obj->lock);
839 kfree(msm_obj);
840}
841
842/* convenience method to construct a GEM buffer object, and userspace handle */
843int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
844 uint32_t size, uint32_t flags, uint32_t *handle)
845{
846 struct drm_gem_object *obj;
847 int ret;
848
849 obj = msm_gem_new(dev, size, flags);
850
851 if (IS_ERR(obj))
852 return PTR_ERR(obj);
853
854 ret = drm_gem_handle_create(file, obj, handle);
855
856 /* drop reference from allocate - handle holds it now */
857 drm_gem_object_unreference_unlocked(obj);
858
859 return ret;
860}
861
862static int msm_gem_new_impl(struct drm_device *dev,
863 uint32_t size, uint32_t flags,
864 struct reservation_object *resv,
865 struct drm_gem_object **obj,
866 bool struct_mutex_locked)
867{
868 struct msm_drm_private *priv = dev->dev_private;
869 struct msm_gem_object *msm_obj;
870
871 switch (flags & MSM_BO_CACHE_MASK) {
872 case MSM_BO_UNCACHED:
873 case MSM_BO_CACHED:
874 case MSM_BO_WC:
875 break;
876 default:
877 dev_err(dev->dev, "invalid cache flag: %x\n",
878 (flags & MSM_BO_CACHE_MASK));
879 return -EINVAL;
880 }
881
882 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
883 if (!msm_obj)
884 return -ENOMEM;
885
886 mutex_init(&msm_obj->lock);
887
888 msm_obj->flags = flags;
889 msm_obj->madv = MSM_MADV_WILLNEED;
890
891 if (resv) {
892 msm_obj->resv = resv;
893 } else {
894 msm_obj->resv = &msm_obj->_resv;
895 reservation_object_init(msm_obj->resv);
896 }
897
898 INIT_LIST_HEAD(&msm_obj->submit_entry);
899 INIT_LIST_HEAD(&msm_obj->vmas);
900
901 if (struct_mutex_locked) {
902 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
903 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
904 } else {
905 mutex_lock(&dev->struct_mutex);
906 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
907 mutex_unlock(&dev->struct_mutex);
908 }
909
910 *obj = &msm_obj->base;
911
912 return 0;
913}
914
915static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
916 uint32_t size, uint32_t flags, bool struct_mutex_locked)
917{
918 struct msm_drm_private *priv = dev->dev_private;
919 struct drm_gem_object *obj = NULL;
920 bool use_vram = false;
921 int ret;
922
923 size = PAGE_ALIGN(size);
924
925 if (!iommu_present(&platform_bus_type))
926 use_vram = true;
927 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
928 use_vram = true;
929
930 if (WARN_ON(use_vram && !priv->vram.size))
931 return ERR_PTR(-EINVAL);
932
933 /* Disallow zero sized objects as they make the underlying
934 * infrastructure grumpy
935 */
936 if (size == 0)
937 return ERR_PTR(-EINVAL);
938
939 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
940 if (ret)
941 goto fail;
942
943 if (use_vram) {
944 struct msm_gem_vma *vma;
945 struct page **pages;
946 struct msm_gem_object *msm_obj = to_msm_bo(obj);
947
948 mutex_lock(&msm_obj->lock);
949
950 vma = add_vma(obj, NULL);
951 mutex_unlock(&msm_obj->lock);
952 if (IS_ERR(vma)) {
953 ret = PTR_ERR(vma);
954 goto fail;
955 }
956
957 to_msm_bo(obj)->vram_node = &vma->node;
958
959 drm_gem_private_object_init(dev, obj, size);
960
961 pages = get_pages(obj);
962 if (IS_ERR(pages)) {
963 ret = PTR_ERR(pages);
964 goto fail;
965 }
966
967 vma->iova = physaddr(obj);
968 } else {
969 ret = drm_gem_object_init(dev, obj, size);
970 if (ret)
971 goto fail;
972 }
973
974 return obj;
975
976fail:
977 drm_gem_object_unreference_unlocked(obj);
978 return ERR_PTR(ret);
979}
980
981struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
982 uint32_t size, uint32_t flags)
983{
984 return _msm_gem_new(dev, size, flags, true);
985}
986
987struct drm_gem_object *msm_gem_new(struct drm_device *dev,
988 uint32_t size, uint32_t flags)
989{
990 return _msm_gem_new(dev, size, flags, false);
991}
992
993struct drm_gem_object *msm_gem_import(struct drm_device *dev,
994 struct dma_buf *dmabuf, struct sg_table *sgt)
995{
996 struct msm_gem_object *msm_obj;
997 struct drm_gem_object *obj;
998 uint32_t size;
999 int ret, npages;
1000
1001 /* if we don't have IOMMU, don't bother pretending we can import: */
1002 if (!iommu_present(&platform_bus_type)) {
1003 dev_err(dev->dev, "cannot import without IOMMU\n");
1004 return ERR_PTR(-EINVAL);
1005 }
1006
1007 size = PAGE_ALIGN(dmabuf->size);
1008
1009 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1010 if (ret)
1011 goto fail;
1012
1013 drm_gem_private_object_init(dev, obj, size);
1014
1015 npages = size / PAGE_SIZE;
1016
1017 msm_obj = to_msm_bo(obj);
1018 mutex_lock(&msm_obj->lock);
1019 msm_obj->sgt = sgt;
1020 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1021 if (!msm_obj->pages) {
1022 mutex_unlock(&msm_obj->lock);
1023 ret = -ENOMEM;
1024 goto fail;
1025 }
1026
1027 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1028 if (ret) {
1029 mutex_unlock(&msm_obj->lock);
1030 goto fail;
1031 }
1032
1033 mutex_unlock(&msm_obj->lock);
1034 return obj;
1035
1036fail:
1037 drm_gem_object_unreference_unlocked(obj);
1038 return ERR_PTR(ret);
1039}
1040
1041static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1042 uint32_t flags, struct msm_gem_address_space *aspace,
1043 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1044{
1045 void *vaddr;
1046 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1047 int ret;
1048
1049 if (IS_ERR(obj))
1050 return ERR_CAST(obj);
1051
1052 if (iova) {
1053 ret = msm_gem_get_iova(obj, aspace, iova);
1054 if (ret) {
1055 drm_gem_object_unreference(obj);
1056 return ERR_PTR(ret);
1057 }
1058 }
1059
1060 vaddr = msm_gem_get_vaddr(obj);
1061 if (IS_ERR(vaddr)) {
1062 msm_gem_put_iova(obj, aspace);
1063 drm_gem_object_unreference(obj);
1064 return ERR_CAST(vaddr);
1065 }
1066
1067 if (bo)
1068 *bo = obj;
1069
1070 return vaddr;
1071}
1072
1073void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1074 uint32_t flags, struct msm_gem_address_space *aspace,
1075 struct drm_gem_object **bo, uint64_t *iova)
1076{
1077 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1078}
1079
1080void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1081 uint32_t flags, struct msm_gem_address_space *aspace,
1082 struct drm_gem_object **bo, uint64_t *iova)
1083{
1084 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1085}