Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * drivers/gpu/drm/omapdrm/omap_gem.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20
21#include <linux/spinlock.h>
22#include <linux/shmem_fs.h>
23#include <drm/drm_vma_manager.h>
24
25#include "omap_drv.h"
26#include "omap_dmm_tiler.h"
27
28/* remove these once drm core helpers are merged */
29struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
30void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
31 bool dirty, bool accessed);
32int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
33
34/*
35 * GEM buffer object implementation.
36 */
37
38#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
39
40/* note: we use upper 8 bits of flags for driver-internal flags: */
41#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
42#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
43#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
44
45
46struct omap_gem_object {
47 struct drm_gem_object base;
48
49 struct list_head mm_list;
50
51 uint32_t flags;
52
53 /** width/height for tiled formats (rounded up to slot boundaries) */
54 uint16_t width, height;
55
56 /** roll applied when mapping to DMM */
57 uint32_t roll;
58
59 /**
60 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
61 * is set and the paddr is valid. Also if the buffer is remapped in
62 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
63 * the physical address and OMAP_BO_DMA is not set, then you should
64 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
65 * not removed from under your feet.
66 *
67 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
68 * buffer is requested, but doesn't mean that it is. Use the
69 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
70 * physical address.
71 */
72 dma_addr_t paddr;
73
74 /**
75 * # of users of paddr
76 */
77 uint32_t paddr_cnt;
78
79 /**
80 * tiler block used when buffer is remapped in DMM/TILER.
81 */
82 struct tiler_block *block;
83
84 /**
85 * Array of backing pages, if allocated. Note that pages are never
86 * allocated for buffers originally allocated from contiguous memory
87 */
88 struct page **pages;
89
90 /** addresses corresponding to pages in above array */
91 dma_addr_t *addrs;
92
93 /**
94 * Virtual address, if mapped.
95 */
96 void *vaddr;
97
98 /**
99 * sync-object allocated on demand (if needed)
100 *
101 * Per-buffer sync-object for tracking pending and completed hw/dma
102 * read and write operations. The layout in memory is dictated by
103 * the SGX firmware, which uses this information to stall the command
104 * stream if a surface is not ready yet.
105 *
106 * Note that when buffer is used by SGX, the sync-object needs to be
107 * allocated from a special heap of sync-objects. This way many sync
108 * objects can be packed in a page, and not waste GPU virtual address
109 * space. Because of this we have to have a omap_gem_set_sync_object()
110 * API to allow replacement of the syncobj after it has (potentially)
111 * already been allocated. A bit ugly but I haven't thought of a
112 * better alternative.
113 */
114 struct {
115 uint32_t write_pending;
116 uint32_t write_complete;
117 uint32_t read_pending;
118 uint32_t read_complete;
119 } *sync;
120};
121
122static int get_pages(struct drm_gem_object *obj, struct page ***pages);
123static uint64_t mmap_offset(struct drm_gem_object *obj);
124
125/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
126 * not necessarily pinned in TILER all the time, and (b) when they are
127 * they are not necessarily page aligned, we reserve one or more small
128 * regions in each of the 2d containers to use as a user-GART where we
129 * can create a second page-aligned mapping of parts of the buffer
130 * being accessed from userspace.
131 *
132 * Note that we could optimize slightly when we know that multiple
133 * tiler containers are backed by the same PAT.. but I'll leave that
134 * for later..
135 */
136#define NUM_USERGART_ENTRIES 2
137struct usergart_entry {
138 struct tiler_block *block; /* the reserved tiler block */
139 dma_addr_t paddr;
140 struct drm_gem_object *obj; /* the current pinned obj */
141 pgoff_t obj_pgoff; /* page offset of obj currently
142 mapped in */
143};
144static struct {
145 struct usergart_entry entry[NUM_USERGART_ENTRIES];
146 int height; /* height in rows */
147 int height_shift; /* ilog2(height in rows) */
148 int slot_shift; /* ilog2(width per slot) */
149 int stride_pfn; /* stride in pages */
150 int last; /* index of last used entry */
151} *usergart;
152
153static void evict_entry(struct drm_gem_object *obj,
154 enum tiler_fmt fmt, struct usergart_entry *entry)
155{
156 struct omap_gem_object *omap_obj = to_omap_bo(obj);
157 int n = usergart[fmt].height;
158 size_t size = PAGE_SIZE * n;
159 loff_t off = mmap_offset(obj) +
160 (entry->obj_pgoff << PAGE_SHIFT);
161 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
162
163 if (m > 1) {
164 int i;
165 /* if stride > than PAGE_SIZE then sparse mapping: */
166 for (i = n; i > 0; i--) {
167 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
168 off, PAGE_SIZE, 1);
169 off += PAGE_SIZE * m;
170 }
171 } else {
172 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
173 off, size, 1);
174 }
175
176 entry->obj = NULL;
177}
178
179/* Evict a buffer from usergart, if it is mapped there */
180static void evict(struct drm_gem_object *obj)
181{
182 struct omap_gem_object *omap_obj = to_omap_bo(obj);
183
184 if (omap_obj->flags & OMAP_BO_TILED) {
185 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
186 int i;
187
188 if (!usergart)
189 return;
190
191 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
192 struct usergart_entry *entry = &usergart[fmt].entry[i];
193 if (entry->obj == obj)
194 evict_entry(obj, fmt, entry);
195 }
196 }
197}
198
199/* GEM objects can either be allocated from contiguous memory (in which
200 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
201 * contiguous buffers can be remapped in TILER/DMM if they need to be
202 * contiguous... but we don't do this all the time to reduce pressure
203 * on TILER/DMM space when we know at allocation time that the buffer
204 * will need to be scanned out.
205 */
206static inline bool is_shmem(struct drm_gem_object *obj)
207{
208 return obj->filp != NULL;
209}
210
211/**
212 * shmem buffers that are mapped cached can simulate coherency via using
213 * page faulting to keep track of dirty pages
214 */
215static inline bool is_cached_coherent(struct drm_gem_object *obj)
216{
217 struct omap_gem_object *omap_obj = to_omap_bo(obj);
218 return is_shmem(obj) &&
219 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
220}
221
222static DEFINE_SPINLOCK(sync_lock);
223
224/** ensure backing pages are allocated */
225static int omap_gem_attach_pages(struct drm_gem_object *obj)
226{
227 struct drm_device *dev = obj->dev;
228 struct omap_gem_object *omap_obj = to_omap_bo(obj);
229 struct page **pages;
230 int npages = obj->size >> PAGE_SHIFT;
231 int i, ret;
232 dma_addr_t *addrs;
233
234 WARN_ON(omap_obj->pages);
235
236 pages = drm_gem_get_pages(obj);
237 if (IS_ERR(pages)) {
238 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
239 return PTR_ERR(pages);
240 }
241
242 /* for non-cached buffers, ensure the new pages are clean because
243 * DSS, GPU, etc. are not cache coherent:
244 */
245 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
246 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
247 if (!addrs) {
248 ret = -ENOMEM;
249 goto free_pages;
250 }
251
252 for (i = 0; i < npages; i++) {
253 addrs[i] = dma_map_page(dev->dev, pages[i],
254 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
255 }
256 } else {
257 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
258 if (!addrs) {
259 ret = -ENOMEM;
260 goto free_pages;
261 }
262 }
263
264 omap_obj->addrs = addrs;
265 omap_obj->pages = pages;
266
267 return 0;
268
269free_pages:
270 drm_gem_put_pages(obj, pages, true, false);
271
272 return ret;
273}
274
275/** release backing pages */
276static void omap_gem_detach_pages(struct drm_gem_object *obj)
277{
278 struct omap_gem_object *omap_obj = to_omap_bo(obj);
279
280 /* for non-cached buffers, ensure the new pages are clean because
281 * DSS, GPU, etc. are not cache coherent:
282 */
283 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
284 int i, npages = obj->size >> PAGE_SHIFT;
285 for (i = 0; i < npages; i++) {
286 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
287 PAGE_SIZE, DMA_BIDIRECTIONAL);
288 }
289 }
290
291 kfree(omap_obj->addrs);
292 omap_obj->addrs = NULL;
293
294 drm_gem_put_pages(obj, omap_obj->pages, true, false);
295 omap_obj->pages = NULL;
296}
297
298/* get buffer flags */
299uint32_t omap_gem_flags(struct drm_gem_object *obj)
300{
301 return to_omap_bo(obj)->flags;
302}
303
304/** get mmap offset */
305static uint64_t mmap_offset(struct drm_gem_object *obj)
306{
307 struct drm_device *dev = obj->dev;
308 int ret;
309 size_t size;
310
311 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
312
313 /* Make it mmapable */
314 size = omap_gem_mmap_size(obj);
315 ret = drm_gem_create_mmap_offset_size(obj, size);
316 if (ret) {
317 dev_err(dev->dev, "could not allocate mmap offset\n");
318 return 0;
319 }
320
321 return drm_vma_node_offset_addr(&obj->vma_node);
322}
323
324uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
325{
326 uint64_t offset;
327 mutex_lock(&obj->dev->struct_mutex);
328 offset = mmap_offset(obj);
329 mutex_unlock(&obj->dev->struct_mutex);
330 return offset;
331}
332
333/** get mmap size */
334size_t omap_gem_mmap_size(struct drm_gem_object *obj)
335{
336 struct omap_gem_object *omap_obj = to_omap_bo(obj);
337 size_t size = obj->size;
338
339 if (omap_obj->flags & OMAP_BO_TILED) {
340 /* for tiled buffers, the virtual size has stride rounded up
341 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
342 * 32kb later!). But we don't back the entire buffer with
343 * pages, only the valid picture part.. so need to adjust for
344 * this in the size used to mmap and generate mmap offset
345 */
346 size = tiler_vsize(gem2fmt(omap_obj->flags),
347 omap_obj->width, omap_obj->height);
348 }
349
350 return size;
351}
352
353/* get tiled size, returns -EINVAL if not tiled buffer */
354int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
355{
356 struct omap_gem_object *omap_obj = to_omap_bo(obj);
357 if (omap_obj->flags & OMAP_BO_TILED) {
358 *w = omap_obj->width;
359 *h = omap_obj->height;
360 return 0;
361 }
362 return -EINVAL;
363}
364
365/* Normal handling for the case of faulting in non-tiled buffers */
366static int fault_1d(struct drm_gem_object *obj,
367 struct vm_area_struct *vma, struct vm_fault *vmf)
368{
369 struct omap_gem_object *omap_obj = to_omap_bo(obj);
370 unsigned long pfn;
371 pgoff_t pgoff;
372
373 /* We don't use vmf->pgoff since that has the fake offset: */
374 pgoff = ((unsigned long)vmf->virtual_address -
375 vma->vm_start) >> PAGE_SHIFT;
376
377 if (omap_obj->pages) {
378 omap_gem_cpu_sync(obj, pgoff);
379 pfn = page_to_pfn(omap_obj->pages[pgoff]);
380 } else {
381 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
382 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
383 }
384
385 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
386 pfn, pfn << PAGE_SHIFT);
387
388 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
389}
390
391/* Special handling for the case of faulting in 2d tiled buffers */
392static int fault_2d(struct drm_gem_object *obj,
393 struct vm_area_struct *vma, struct vm_fault *vmf)
394{
395 struct omap_gem_object *omap_obj = to_omap_bo(obj);
396 struct usergart_entry *entry;
397 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
398 struct page *pages[64]; /* XXX is this too much to have on stack? */
399 unsigned long pfn;
400 pgoff_t pgoff, base_pgoff;
401 void __user *vaddr;
402 int i, ret, slots;
403
404 /*
405 * Note the height of the slot is also equal to the number of pages
406 * that need to be mapped in to fill 4kb wide CPU page. If the slot
407 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
408 */
409 const int n = usergart[fmt].height;
410 const int n_shift = usergart[fmt].height_shift;
411
412 /*
413 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
414 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
415 * into account in some of the math, so figure out virtual stride
416 * in pages
417 */
418 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
419
420 /* We don't use vmf->pgoff since that has the fake offset: */
421 pgoff = ((unsigned long)vmf->virtual_address -
422 vma->vm_start) >> PAGE_SHIFT;
423
424 /*
425 * Actual address we start mapping at is rounded down to previous slot
426 * boundary in the y direction:
427 */
428 base_pgoff = round_down(pgoff, m << n_shift);
429
430 /* figure out buffer width in slots */
431 slots = omap_obj->width >> usergart[fmt].slot_shift;
432
433 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
434
435 entry = &usergart[fmt].entry[usergart[fmt].last];
436
437 /* evict previous buffer using this usergart entry, if any: */
438 if (entry->obj)
439 evict_entry(entry->obj, fmt, entry);
440
441 entry->obj = obj;
442 entry->obj_pgoff = base_pgoff;
443
444 /* now convert base_pgoff to phys offset from virt offset: */
445 base_pgoff = (base_pgoff >> n_shift) * slots;
446
447 /* for wider-than 4k.. figure out which part of the slot-row we want: */
448 if (m > 1) {
449 int off = pgoff % m;
450 entry->obj_pgoff += off;
451 base_pgoff /= m;
452 slots = min(slots - (off << n_shift), n);
453 base_pgoff += off << n_shift;
454 vaddr += off << PAGE_SHIFT;
455 }
456
457 /*
458 * Map in pages. Beyond the valid pixel part of the buffer, we set
459 * pages[i] to NULL to get a dummy page mapped in.. if someone
460 * reads/writes it they will get random/undefined content, but at
461 * least it won't be corrupting whatever other random page used to
462 * be mapped in, or other undefined behavior.
463 */
464 memcpy(pages, &omap_obj->pages[base_pgoff],
465 sizeof(struct page *) * slots);
466 memset(pages + slots, 0,
467 sizeof(struct page *) * (n - slots));
468
469 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
470 if (ret) {
471 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
472 return ret;
473 }
474
475 pfn = entry->paddr >> PAGE_SHIFT;
476
477 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
478 pfn, pfn << PAGE_SHIFT);
479
480 for (i = n; i > 0; i--) {
481 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
482 pfn += usergart[fmt].stride_pfn;
483 vaddr += PAGE_SIZE * m;
484 }
485
486 /* simple round-robin: */
487 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
488
489 return 0;
490}
491
492/**
493 * omap_gem_fault - pagefault handler for GEM objects
494 * @vma: the VMA of the GEM object
495 * @vmf: fault detail
496 *
497 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
498 * does most of the work for us including the actual map/unmap calls
499 * but we need to do the actual page work.
500 *
501 * The VMA was set up by GEM. In doing so it also ensured that the
502 * vma->vm_private_data points to the GEM object that is backing this
503 * mapping.
504 */
505int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
506{
507 struct drm_gem_object *obj = vma->vm_private_data;
508 struct omap_gem_object *omap_obj = to_omap_bo(obj);
509 struct drm_device *dev = obj->dev;
510 struct page **pages;
511 int ret;
512
513 /* Make sure we don't parallel update on a fault, nor move or remove
514 * something from beneath our feet
515 */
516 mutex_lock(&dev->struct_mutex);
517
518 /* if a shmem backed object, make sure we have pages attached now */
519 ret = get_pages(obj, &pages);
520 if (ret)
521 goto fail;
522
523 /* where should we do corresponding put_pages().. we are mapping
524 * the original page, rather than thru a GART, so we can't rely
525 * on eviction to trigger this. But munmap() or all mappings should
526 * probably trigger put_pages()?
527 */
528
529 if (omap_obj->flags & OMAP_BO_TILED)
530 ret = fault_2d(obj, vma, vmf);
531 else
532 ret = fault_1d(obj, vma, vmf);
533
534
535fail:
536 mutex_unlock(&dev->struct_mutex);
537 switch (ret) {
538 case 0:
539 case -ERESTARTSYS:
540 case -EINTR:
541 return VM_FAULT_NOPAGE;
542 case -ENOMEM:
543 return VM_FAULT_OOM;
544 default:
545 return VM_FAULT_SIGBUS;
546 }
547}
548
549/** We override mainly to fix up some of the vm mapping flags.. */
550int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
551{
552 int ret;
553
554 ret = drm_gem_mmap(filp, vma);
555 if (ret) {
556 DBG("mmap failed: %d", ret);
557 return ret;
558 }
559
560 return omap_gem_mmap_obj(vma->vm_private_data, vma);
561}
562
563int omap_gem_mmap_obj(struct drm_gem_object *obj,
564 struct vm_area_struct *vma)
565{
566 struct omap_gem_object *omap_obj = to_omap_bo(obj);
567
568 vma->vm_flags &= ~VM_PFNMAP;
569 vma->vm_flags |= VM_MIXEDMAP;
570
571 if (omap_obj->flags & OMAP_BO_WC) {
572 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
573 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
574 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
575 } else {
576 /*
577 * We do have some private objects, at least for scanout buffers
578 * on hardware without DMM/TILER. But these are allocated write-
579 * combine
580 */
581 if (WARN_ON(!obj->filp))
582 return -EINVAL;
583
584 /*
585 * Shunt off cached objs to shmem file so they have their own
586 * address_space (so unmap_mapping_range does what we want,
587 * in particular in the case of mmap'd dmabufs)
588 */
589 fput(vma->vm_file);
590 vma->vm_pgoff = 0;
591 vma->vm_file = get_file(obj->filp);
592
593 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
594 }
595
596 return 0;
597}
598
599
600/**
601 * omap_gem_dumb_create - create a dumb buffer
602 * @drm_file: our client file
603 * @dev: our device
604 * @args: the requested arguments copied from userspace
605 *
606 * Allocate a buffer suitable for use for a frame buffer of the
607 * form described by user space. Give userspace a handle by which
608 * to reference it.
609 */
610int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
611 struct drm_mode_create_dumb *args)
612{
613 union omap_gem_size gsize;
614
615 /* in case someone tries to feed us a completely bogus stride: */
616 args->pitch = align_pitch(args->pitch, args->width, args->bpp);
617 args->size = PAGE_ALIGN(args->pitch * args->height);
618
619 gsize = (union omap_gem_size){
620 .bytes = args->size,
621 };
622
623 return omap_gem_new_handle(dev, file, gsize,
624 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
625}
626
627/**
628 * omap_gem_dumb_map - buffer mapping for dumb interface
629 * @file: our drm client file
630 * @dev: drm device
631 * @handle: GEM handle to the object (from dumb_create)
632 *
633 * Do the necessary setup to allow the mapping of the frame buffer
634 * into user memory. We don't have to do much here at the moment.
635 */
636int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
637 uint32_t handle, uint64_t *offset)
638{
639 struct drm_gem_object *obj;
640 int ret = 0;
641
642 /* GEM does all our handle to object mapping */
643 obj = drm_gem_object_lookup(dev, file, handle);
644 if (obj == NULL) {
645 ret = -ENOENT;
646 goto fail;
647 }
648
649 *offset = omap_gem_mmap_offset(obj);
650
651 drm_gem_object_unreference_unlocked(obj);
652
653fail:
654 return ret;
655}
656
657/* Set scrolling position. This allows us to implement fast scrolling
658 * for console.
659 *
660 * Call only from non-atomic contexts.
661 */
662int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
663{
664 struct omap_gem_object *omap_obj = to_omap_bo(obj);
665 uint32_t npages = obj->size >> PAGE_SHIFT;
666 int ret = 0;
667
668 if (roll > npages) {
669 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
670 return -EINVAL;
671 }
672
673 omap_obj->roll = roll;
674
675 mutex_lock(&obj->dev->struct_mutex);
676
677 /* if we aren't mapped yet, we don't need to do anything */
678 if (omap_obj->block) {
679 struct page **pages;
680 ret = get_pages(obj, &pages);
681 if (ret)
682 goto fail;
683 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
684 if (ret)
685 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
686 }
687
688fail:
689 mutex_unlock(&obj->dev->struct_mutex);
690
691 return ret;
692}
693
694/* Sync the buffer for CPU access.. note pages should already be
695 * attached, ie. omap_gem_get_pages()
696 */
697void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
698{
699 struct drm_device *dev = obj->dev;
700 struct omap_gem_object *omap_obj = to_omap_bo(obj);
701
702 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
703 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
704 PAGE_SIZE, DMA_BIDIRECTIONAL);
705 omap_obj->addrs[pgoff] = 0;
706 }
707}
708
709/* sync the buffer for DMA access */
710void omap_gem_dma_sync(struct drm_gem_object *obj,
711 enum dma_data_direction dir)
712{
713 struct drm_device *dev = obj->dev;
714 struct omap_gem_object *omap_obj = to_omap_bo(obj);
715
716 if (is_cached_coherent(obj)) {
717 int i, npages = obj->size >> PAGE_SHIFT;
718 struct page **pages = omap_obj->pages;
719 bool dirty = false;
720
721 for (i = 0; i < npages; i++) {
722 if (!omap_obj->addrs[i]) {
723 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
724 PAGE_SIZE, DMA_BIDIRECTIONAL);
725 dirty = true;
726 }
727 }
728
729 if (dirty) {
730 unmap_mapping_range(obj->filp->f_mapping, 0,
731 omap_gem_mmap_size(obj), 1);
732 }
733 }
734}
735
736/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
737 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
738 * map in TILER)
739 */
740int omap_gem_get_paddr(struct drm_gem_object *obj,
741 dma_addr_t *paddr, bool remap)
742{
743 struct omap_drm_private *priv = obj->dev->dev_private;
744 struct omap_gem_object *omap_obj = to_omap_bo(obj);
745 int ret = 0;
746
747 mutex_lock(&obj->dev->struct_mutex);
748
749 if (remap && is_shmem(obj) && priv->has_dmm) {
750 if (omap_obj->paddr_cnt == 0) {
751 struct page **pages;
752 uint32_t npages = obj->size >> PAGE_SHIFT;
753 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
754 struct tiler_block *block;
755
756 BUG_ON(omap_obj->block);
757
758 ret = get_pages(obj, &pages);
759 if (ret)
760 goto fail;
761
762 if (omap_obj->flags & OMAP_BO_TILED) {
763 block = tiler_reserve_2d(fmt,
764 omap_obj->width,
765 omap_obj->height, 0);
766 } else {
767 block = tiler_reserve_1d(obj->size);
768 }
769
770 if (IS_ERR(block)) {
771 ret = PTR_ERR(block);
772 dev_err(obj->dev->dev,
773 "could not remap: %d (%d)\n", ret, fmt);
774 goto fail;
775 }
776
777 /* TODO: enable async refill.. */
778 ret = tiler_pin(block, pages, npages,
779 omap_obj->roll, true);
780 if (ret) {
781 tiler_release(block);
782 dev_err(obj->dev->dev,
783 "could not pin: %d\n", ret);
784 goto fail;
785 }
786
787 omap_obj->paddr = tiler_ssptr(block);
788 omap_obj->block = block;
789
790 DBG("got paddr: %pad", &omap_obj->paddr);
791 }
792
793 omap_obj->paddr_cnt++;
794
795 *paddr = omap_obj->paddr;
796 } else if (omap_obj->flags & OMAP_BO_DMA) {
797 *paddr = omap_obj->paddr;
798 } else {
799 ret = -EINVAL;
800 goto fail;
801 }
802
803fail:
804 mutex_unlock(&obj->dev->struct_mutex);
805
806 return ret;
807}
808
809/* Release physical address, when DMA is no longer being performed.. this
810 * could potentially unpin and unmap buffers from TILER
811 */
812int omap_gem_put_paddr(struct drm_gem_object *obj)
813{
814 struct omap_gem_object *omap_obj = to_omap_bo(obj);
815 int ret = 0;
816
817 mutex_lock(&obj->dev->struct_mutex);
818 if (omap_obj->paddr_cnt > 0) {
819 omap_obj->paddr_cnt--;
820 if (omap_obj->paddr_cnt == 0) {
821 ret = tiler_unpin(omap_obj->block);
822 if (ret) {
823 dev_err(obj->dev->dev,
824 "could not unpin pages: %d\n", ret);
825 goto fail;
826 }
827 ret = tiler_release(omap_obj->block);
828 if (ret) {
829 dev_err(obj->dev->dev,
830 "could not release unmap: %d\n", ret);
831 }
832 omap_obj->block = NULL;
833 }
834 }
835fail:
836 mutex_unlock(&obj->dev->struct_mutex);
837 return ret;
838}
839
840/* Get rotated scanout address (only valid if already pinned), at the
841 * specified orientation and x,y offset from top-left corner of buffer
842 * (only valid for tiled 2d buffers)
843 */
844int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
845 int x, int y, dma_addr_t *paddr)
846{
847 struct omap_gem_object *omap_obj = to_omap_bo(obj);
848 int ret = -EINVAL;
849
850 mutex_lock(&obj->dev->struct_mutex);
851 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
852 (omap_obj->flags & OMAP_BO_TILED)) {
853 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
854 ret = 0;
855 }
856 mutex_unlock(&obj->dev->struct_mutex);
857 return ret;
858}
859
860/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
861int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
862{
863 struct omap_gem_object *omap_obj = to_omap_bo(obj);
864 int ret = -EINVAL;
865 if (omap_obj->flags & OMAP_BO_TILED)
866 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
867 return ret;
868}
869
870/* acquire pages when needed (for example, for DMA where physically
871 * contiguous buffer is not required
872 */
873static int get_pages(struct drm_gem_object *obj, struct page ***pages)
874{
875 struct omap_gem_object *omap_obj = to_omap_bo(obj);
876 int ret = 0;
877
878 if (is_shmem(obj) && !omap_obj->pages) {
879 ret = omap_gem_attach_pages(obj);
880 if (ret) {
881 dev_err(obj->dev->dev, "could not attach pages\n");
882 return ret;
883 }
884 }
885
886 /* TODO: even phys-contig.. we should have a list of pages? */
887 *pages = omap_obj->pages;
888
889 return 0;
890}
891
892/* if !remap, and we don't have pages backing, then fail, rather than
893 * increasing the pin count (which we don't really do yet anyways,
894 * because we don't support swapping pages back out). And 'remap'
895 * might not be quite the right name, but I wanted to keep it working
896 * similarly to omap_gem_get_paddr(). Note though that mutex is not
897 * aquired if !remap (because this can be called in atomic ctxt),
898 * but probably omap_gem_get_paddr() should be changed to work in the
899 * same way. If !remap, a matching omap_gem_put_pages() call is not
900 * required (and should not be made).
901 */
902int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
903 bool remap)
904{
905 int ret;
906 if (!remap) {
907 struct omap_gem_object *omap_obj = to_omap_bo(obj);
908 if (!omap_obj->pages)
909 return -ENOMEM;
910 *pages = omap_obj->pages;
911 return 0;
912 }
913 mutex_lock(&obj->dev->struct_mutex);
914 ret = get_pages(obj, pages);
915 mutex_unlock(&obj->dev->struct_mutex);
916 return ret;
917}
918
919/* release pages when DMA no longer being performed */
920int omap_gem_put_pages(struct drm_gem_object *obj)
921{
922 /* do something here if we dynamically attach/detach pages.. at
923 * least they would no longer need to be pinned if everyone has
924 * released the pages..
925 */
926 return 0;
927}
928
929/* Get kernel virtual address for CPU access.. this more or less only
930 * exists for omap_fbdev. This should be called with struct_mutex
931 * held.
932 */
933void *omap_gem_vaddr(struct drm_gem_object *obj)
934{
935 struct omap_gem_object *omap_obj = to_omap_bo(obj);
936 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
937 if (!omap_obj->vaddr) {
938 struct page **pages;
939 int ret = get_pages(obj, &pages);
940 if (ret)
941 return ERR_PTR(ret);
942 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
943 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
944 }
945 return omap_obj->vaddr;
946}
947
948#ifdef CONFIG_PM
949/* re-pin objects in DMM in resume path: */
950int omap_gem_resume(struct device *dev)
951{
952 struct drm_device *drm_dev = dev_get_drvdata(dev);
953 struct omap_drm_private *priv = drm_dev->dev_private;
954 struct omap_gem_object *omap_obj;
955 int ret = 0;
956
957 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
958 if (omap_obj->block) {
959 struct drm_gem_object *obj = &omap_obj->base;
960 uint32_t npages = obj->size >> PAGE_SHIFT;
961 WARN_ON(!omap_obj->pages); /* this can't happen */
962 ret = tiler_pin(omap_obj->block,
963 omap_obj->pages, npages,
964 omap_obj->roll, true);
965 if (ret) {
966 dev_err(dev, "could not repin: %d\n", ret);
967 return ret;
968 }
969 }
970 }
971
972 return 0;
973}
974#endif
975
976#ifdef CONFIG_DEBUG_FS
977void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
978{
979 struct omap_gem_object *omap_obj = to_omap_bo(obj);
980 uint64_t off;
981
982 off = drm_vma_node_start(&obj->vma_node);
983
984 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
985 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
986 off, &omap_obj->paddr, omap_obj->paddr_cnt,
987 omap_obj->vaddr, omap_obj->roll);
988
989 if (omap_obj->flags & OMAP_BO_TILED) {
990 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
991 if (omap_obj->block) {
992 struct tcm_area *area = &omap_obj->block->area;
993 seq_printf(m, " (%dx%d, %dx%d)",
994 area->p0.x, area->p0.y,
995 area->p1.x, area->p1.y);
996 }
997 } else {
998 seq_printf(m, " %d", obj->size);
999 }
1000
1001 seq_printf(m, "\n");
1002}
1003
1004void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1005{
1006 struct omap_gem_object *omap_obj;
1007 int count = 0;
1008 size_t size = 0;
1009
1010 list_for_each_entry(omap_obj, list, mm_list) {
1011 struct drm_gem_object *obj = &omap_obj->base;
1012 seq_printf(m, " ");
1013 omap_gem_describe(obj, m);
1014 count++;
1015 size += obj->size;
1016 }
1017
1018 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1019}
1020#endif
1021
1022/* Buffer Synchronization:
1023 */
1024
1025struct omap_gem_sync_waiter {
1026 struct list_head list;
1027 struct omap_gem_object *omap_obj;
1028 enum omap_gem_op op;
1029 uint32_t read_target, write_target;
1030 /* notify called w/ sync_lock held */
1031 void (*notify)(void *arg);
1032 void *arg;
1033};
1034
1035/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1036 * the read and/or write target count is achieved which can call a user
1037 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1038 * cpu access), etc.
1039 */
1040static LIST_HEAD(waiters);
1041
1042static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1043{
1044 struct omap_gem_object *omap_obj = waiter->omap_obj;
1045 if ((waiter->op & OMAP_GEM_READ) &&
1046 (omap_obj->sync->write_complete < waiter->write_target))
1047 return true;
1048 if ((waiter->op & OMAP_GEM_WRITE) &&
1049 (omap_obj->sync->read_complete < waiter->read_target))
1050 return true;
1051 return false;
1052}
1053
1054/* macro for sync debug.. */
1055#define SYNCDBG 0
1056#define SYNC(fmt, ...) do { if (SYNCDBG) \
1057 printk(KERN_ERR "%s:%d: "fmt"\n", \
1058 __func__, __LINE__, ##__VA_ARGS__); \
1059 } while (0)
1060
1061
1062static void sync_op_update(void)
1063{
1064 struct omap_gem_sync_waiter *waiter, *n;
1065 list_for_each_entry_safe(waiter, n, &waiters, list) {
1066 if (!is_waiting(waiter)) {
1067 list_del(&waiter->list);
1068 SYNC("notify: %p", waiter);
1069 waiter->notify(waiter->arg);
1070 kfree(waiter);
1071 }
1072 }
1073}
1074
1075static inline int sync_op(struct drm_gem_object *obj,
1076 enum omap_gem_op op, bool start)
1077{
1078 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1079 int ret = 0;
1080
1081 spin_lock(&sync_lock);
1082
1083 if (!omap_obj->sync) {
1084 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1085 if (!omap_obj->sync) {
1086 ret = -ENOMEM;
1087 goto unlock;
1088 }
1089 }
1090
1091 if (start) {
1092 if (op & OMAP_GEM_READ)
1093 omap_obj->sync->read_pending++;
1094 if (op & OMAP_GEM_WRITE)
1095 omap_obj->sync->write_pending++;
1096 } else {
1097 if (op & OMAP_GEM_READ)
1098 omap_obj->sync->read_complete++;
1099 if (op & OMAP_GEM_WRITE)
1100 omap_obj->sync->write_complete++;
1101 sync_op_update();
1102 }
1103
1104unlock:
1105 spin_unlock(&sync_lock);
1106
1107 return ret;
1108}
1109
1110/* it is a bit lame to handle updates in this sort of polling way, but
1111 * in case of PVR, the GPU can directly update read/write complete
1112 * values, and not really tell us which ones it updated.. this also
1113 * means that sync_lock is not quite sufficient. So we'll need to
1114 * do something a bit better when it comes time to add support for
1115 * separate 2d hw..
1116 */
1117void omap_gem_op_update(void)
1118{
1119 spin_lock(&sync_lock);
1120 sync_op_update();
1121 spin_unlock(&sync_lock);
1122}
1123
1124/* mark the start of read and/or write operation */
1125int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1126{
1127 return sync_op(obj, op, true);
1128}
1129
1130int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1131{
1132 return sync_op(obj, op, false);
1133}
1134
1135static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1136
1137static void sync_notify(void *arg)
1138{
1139 struct task_struct **waiter_task = arg;
1140 *waiter_task = NULL;
1141 wake_up_all(&sync_event);
1142}
1143
1144int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1145{
1146 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1147 int ret = 0;
1148 if (omap_obj->sync) {
1149 struct task_struct *waiter_task = current;
1150 struct omap_gem_sync_waiter *waiter =
1151 kzalloc(sizeof(*waiter), GFP_KERNEL);
1152
1153 if (!waiter)
1154 return -ENOMEM;
1155
1156 waiter->omap_obj = omap_obj;
1157 waiter->op = op;
1158 waiter->read_target = omap_obj->sync->read_pending;
1159 waiter->write_target = omap_obj->sync->write_pending;
1160 waiter->notify = sync_notify;
1161 waiter->arg = &waiter_task;
1162
1163 spin_lock(&sync_lock);
1164 if (is_waiting(waiter)) {
1165 SYNC("waited: %p", waiter);
1166 list_add_tail(&waiter->list, &waiters);
1167 spin_unlock(&sync_lock);
1168 ret = wait_event_interruptible(sync_event,
1169 (waiter_task == NULL));
1170 spin_lock(&sync_lock);
1171 if (waiter_task) {
1172 SYNC("interrupted: %p", waiter);
1173 /* we were interrupted */
1174 list_del(&waiter->list);
1175 waiter_task = NULL;
1176 } else {
1177 /* freed in sync_op_update() */
1178 waiter = NULL;
1179 }
1180 }
1181 spin_unlock(&sync_lock);
1182 kfree(waiter);
1183 }
1184 return ret;
1185}
1186
1187/* call fxn(arg), either synchronously or asynchronously if the op
1188 * is currently blocked.. fxn() can be called from any context
1189 *
1190 * (TODO for now fxn is called back from whichever context calls
1191 * omap_gem_op_update().. but this could be better defined later
1192 * if needed)
1193 *
1194 * TODO more code in common w/ _sync()..
1195 */
1196int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1197 void (*fxn)(void *arg), void *arg)
1198{
1199 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1200 if (omap_obj->sync) {
1201 struct omap_gem_sync_waiter *waiter =
1202 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1203
1204 if (!waiter)
1205 return -ENOMEM;
1206
1207 waiter->omap_obj = omap_obj;
1208 waiter->op = op;
1209 waiter->read_target = omap_obj->sync->read_pending;
1210 waiter->write_target = omap_obj->sync->write_pending;
1211 waiter->notify = fxn;
1212 waiter->arg = arg;
1213
1214 spin_lock(&sync_lock);
1215 if (is_waiting(waiter)) {
1216 SYNC("waited: %p", waiter);
1217 list_add_tail(&waiter->list, &waiters);
1218 spin_unlock(&sync_lock);
1219 return 0;
1220 }
1221
1222 spin_unlock(&sync_lock);
1223
1224 kfree(waiter);
1225 }
1226
1227 /* no waiting.. */
1228 fxn(arg);
1229
1230 return 0;
1231}
1232
1233/* special API so PVR can update the buffer to use a sync-object allocated
1234 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1235 * perspective) sync-object, so we overwrite the new syncobj w/ values
1236 * from the already allocated syncobj (if there is one)
1237 */
1238int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1239{
1240 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1241 int ret = 0;
1242
1243 spin_lock(&sync_lock);
1244
1245 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1246 /* clearing a previously set syncobj */
1247 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1248 GFP_ATOMIC);
1249 if (!syncobj) {
1250 ret = -ENOMEM;
1251 goto unlock;
1252 }
1253 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1254 omap_obj->sync = syncobj;
1255 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1256 /* replacing an existing syncobj */
1257 if (omap_obj->sync) {
1258 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1259 kfree(omap_obj->sync);
1260 }
1261 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1262 omap_obj->sync = syncobj;
1263 }
1264
1265unlock:
1266 spin_unlock(&sync_lock);
1267 return ret;
1268}
1269
1270/* don't call directly.. called from GEM core when it is time to actually
1271 * free the object..
1272 */
1273void omap_gem_free_object(struct drm_gem_object *obj)
1274{
1275 struct drm_device *dev = obj->dev;
1276 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1277
1278 evict(obj);
1279
1280 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1281
1282 list_del(&omap_obj->mm_list);
1283
1284 drm_gem_free_mmap_offset(obj);
1285
1286 /* this means the object is still pinned.. which really should
1287 * not happen. I think..
1288 */
1289 WARN_ON(omap_obj->paddr_cnt > 0);
1290
1291 /* don't free externally allocated backing memory */
1292 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
1293 if (omap_obj->pages)
1294 omap_gem_detach_pages(obj);
1295
1296 if (!is_shmem(obj)) {
1297 dma_free_writecombine(dev->dev, obj->size,
1298 omap_obj->vaddr, omap_obj->paddr);
1299 } else if (omap_obj->vaddr) {
1300 vunmap(omap_obj->vaddr);
1301 }
1302 }
1303
1304 /* don't free externally allocated syncobj */
1305 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
1306 kfree(omap_obj->sync);
1307
1308 drm_gem_object_release(obj);
1309
1310 kfree(obj);
1311}
1312
1313/* convenience method to construct a GEM buffer object, and userspace handle */
1314int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1315 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1316{
1317 struct drm_gem_object *obj;
1318 int ret;
1319
1320 obj = omap_gem_new(dev, gsize, flags);
1321 if (!obj)
1322 return -ENOMEM;
1323
1324 ret = drm_gem_handle_create(file, obj, handle);
1325 if (ret) {
1326 drm_gem_object_release(obj);
1327 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1328 return ret;
1329 }
1330
1331 /* drop reference from allocate - handle holds it now */
1332 drm_gem_object_unreference_unlocked(obj);
1333
1334 return 0;
1335}
1336
1337/* GEM buffer object constructor */
1338struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1339 union omap_gem_size gsize, uint32_t flags)
1340{
1341 struct omap_drm_private *priv = dev->dev_private;
1342 struct omap_gem_object *omap_obj;
1343 struct drm_gem_object *obj = NULL;
1344 struct address_space *mapping;
1345 size_t size;
1346 int ret;
1347
1348 if (flags & OMAP_BO_TILED) {
1349 if (!usergart) {
1350 dev_err(dev->dev, "Tiled buffers require DMM\n");
1351 goto fail;
1352 }
1353
1354 /* tiled buffers are always shmem paged backed.. when they are
1355 * scanned out, they are remapped into DMM/TILER
1356 */
1357 flags &= ~OMAP_BO_SCANOUT;
1358
1359 /* currently don't allow cached buffers.. there is some caching
1360 * stuff that needs to be handled better
1361 */
1362 flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
1363 flags |= OMAP_BO_WC;
1364
1365 /* align dimensions to slot boundaries... */
1366 tiler_align(gem2fmt(flags),
1367 &gsize.tiled.width, &gsize.tiled.height);
1368
1369 /* ...and calculate size based on aligned dimensions */
1370 size = tiler_size(gem2fmt(flags),
1371 gsize.tiled.width, gsize.tiled.height);
1372 } else {
1373 size = PAGE_ALIGN(gsize.bytes);
1374 }
1375
1376 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1377 if (!omap_obj)
1378 goto fail;
1379
1380 list_add(&omap_obj->mm_list, &priv->obj_list);
1381
1382 obj = &omap_obj->base;
1383
1384 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1385 /* attempt to allocate contiguous memory if we don't
1386 * have DMM for remappign discontiguous buffers
1387 */
1388 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1389 &omap_obj->paddr, GFP_KERNEL);
1390 if (omap_obj->vaddr)
1391 flags |= OMAP_BO_DMA;
1392
1393 }
1394
1395 omap_obj->flags = flags;
1396
1397 if (flags & OMAP_BO_TILED) {
1398 omap_obj->width = gsize.tiled.width;
1399 omap_obj->height = gsize.tiled.height;
1400 }
1401
1402 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
1403 drm_gem_private_object_init(dev, obj, size);
1404 } else {
1405 ret = drm_gem_object_init(dev, obj, size);
1406 if (ret)
1407 goto fail;
1408
1409 mapping = file_inode(obj->filp)->i_mapping;
1410 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1411 }
1412
1413 return obj;
1414
1415fail:
1416 if (obj)
1417 omap_gem_free_object(obj);
1418
1419 return NULL;
1420}
1421
1422/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1423void omap_gem_init(struct drm_device *dev)
1424{
1425 struct omap_drm_private *priv = dev->dev_private;
1426 const enum tiler_fmt fmts[] = {
1427 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1428 };
1429 int i, j;
1430
1431 if (!dmm_is_available()) {
1432 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1433 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1434 return;
1435 }
1436
1437 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1438 if (!usergart)
1439 return;
1440
1441 /* reserve 4k aligned/wide regions for userspace mappings: */
1442 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1443 uint16_t h = 1, w = PAGE_SIZE >> i;
1444 tiler_align(fmts[i], &w, &h);
1445 /* note: since each region is 1 4kb page wide, and minimum
1446 * number of rows, the height ends up being the same as the
1447 * # of pages in the region
1448 */
1449 usergart[i].height = h;
1450 usergart[i].height_shift = ilog2(h);
1451 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1452 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1453 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1454 struct usergart_entry *entry = &usergart[i].entry[j];
1455 struct tiler_block *block =
1456 tiler_reserve_2d(fmts[i], w, h,
1457 PAGE_SIZE);
1458 if (IS_ERR(block)) {
1459 dev_err(dev->dev,
1460 "reserve failed: %d, %d, %ld\n",
1461 i, j, PTR_ERR(block));
1462 return;
1463 }
1464 entry->paddr = tiler_ssptr(block);
1465 entry->block = block;
1466
1467 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1468 &entry->paddr,
1469 usergart[i].stride_pfn << PAGE_SHIFT);
1470 }
1471 }
1472
1473 priv->has_dmm = true;
1474}
1475
1476void omap_gem_deinit(struct drm_device *dev)
1477{
1478 /* I believe we can rely on there being no more outstanding GEM
1479 * objects which could depend on usergart/dmm at this point.
1480 */
1481 kfree(usergart);
1482}