2 * drivers/staging/omapdrm/omap_gem.c
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/spinlock.h>
22 #include <linux/shmem_fs.h>
25 #include "omap_dmm_tiler.h"
27 /* remove these once drm core helpers are merged */
28 struct page
** _drm_gem_get_pages(struct drm_gem_object
*obj
, gfp_t gfpmask
);
29 void _drm_gem_put_pages(struct drm_gem_object
*obj
, struct page
**pages
,
30 bool dirty
, bool accessed
);
31 int _drm_gem_create_mmap_offset_size(struct drm_gem_object
*obj
, size_t size
);
34 * GEM buffer object implementation.
37 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
39 /* note: we use upper 8 bits of flags for driver-internal flags: */
40 #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
41 #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
42 #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
45 struct omap_gem_object
{
46 struct drm_gem_object base
;
48 struct list_head mm_list
;
52 /** width/height for tiled formats (rounded up to slot boundaries) */
53 uint16_t width
, height
;
55 /** roll applied when mapping to DMM */
59 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
60 * is set and the paddr is valid. Also if the buffer is remapped in
61 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
62 * the physical address and OMAP_BO_DMA is not set, then you should
63 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
64 * not removed from under your feet.
66 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
67 * buffer is requested, but doesn't mean that it is. Use the
68 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
79 * tiler block used when buffer is remapped in DMM/TILER.
81 struct tiler_block
*block
;
84 * Array of backing pages, if allocated. Note that pages are never
85 * allocated for buffers originally allocated from contiguous memory
89 /** addresses corresponding to pages in above array */
93 * Virtual address, if mapped.
98 * sync-object allocated on demand (if needed)
100 * Per-buffer sync-object for tracking pending and completed hw/dma
101 * read and write operations. The layout in memory is dictated by
102 * the SGX firmware, which uses this information to stall the command
103 * stream if a surface is not ready yet.
105 * Note that when buffer is used by SGX, the sync-object needs to be
106 * allocated from a special heap of sync-objects. This way many sync
107 * objects can be packed in a page, and not waste GPU virtual address
108 * space. Because of this we have to have a omap_gem_set_sync_object()
109 * API to allow replacement of the syncobj after it has (potentially)
110 * already been allocated. A bit ugly but I haven't thought of a
111 * better alternative.
114 uint32_t write_pending
;
115 uint32_t write_complete
;
116 uint32_t read_pending
;
117 uint32_t read_complete
;
121 static int get_pages(struct drm_gem_object
*obj
, struct page
***pages
);
122 static uint64_t mmap_offset(struct drm_gem_object
*obj
);
124 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
125 * not necessarily pinned in TILER all the time, and (b) when they are
126 * they are not necessarily page aligned, we reserve one or more small
127 * regions in each of the 2d containers to use as a user-GART where we
128 * can create a second page-aligned mapping of parts of the buffer
129 * being accessed from userspace.
131 * Note that we could optimize slightly when we know that multiple
132 * tiler containers are backed by the same PAT.. but I'll leave that
135 #define NUM_USERGART_ENTRIES 2
136 struct usergart_entry
{
137 struct tiler_block
*block
; /* the reserved tiler block */
139 struct drm_gem_object
*obj
; /* the current pinned obj */
140 pgoff_t obj_pgoff
; /* page offset of obj currently
144 struct usergart_entry entry
[NUM_USERGART_ENTRIES
];
145 int height
; /* height in rows */
146 int height_shift
; /* ilog2(height in rows) */
147 int slot_shift
; /* ilog2(width per slot) */
148 int stride_pfn
; /* stride in pages */
149 int last
; /* index of last used entry */
152 static void evict_entry(struct drm_gem_object
*obj
,
153 enum tiler_fmt fmt
, struct usergart_entry
*entry
)
155 if (obj
->dev
->dev_mapping
) {
156 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
157 int n
= usergart
[fmt
].height
;
158 size_t size
= PAGE_SIZE
* n
;
159 loff_t off
= mmap_offset(obj
) +
160 (entry
->obj_pgoff
<< PAGE_SHIFT
);
161 const int m
= 1 + ((omap_obj
->width
<< fmt
) / PAGE_SIZE
);
164 /* if stride > than PAGE_SIZE then sparse mapping: */
165 for (i
= n
; i
> 0; i
--) {
166 unmap_mapping_range(obj
->dev
->dev_mapping
,
168 off
+= PAGE_SIZE
* m
;
171 unmap_mapping_range(obj
->dev
->dev_mapping
, off
, size
, 1);
178 /* Evict a buffer from usergart, if it is mapped there */
179 static void evict(struct drm_gem_object
*obj
)
181 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
183 if (omap_obj
->flags
& OMAP_BO_TILED
) {
184 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
190 for (i
= 0; i
< NUM_USERGART_ENTRIES
; i
++) {
191 struct usergart_entry
*entry
= &usergart
[fmt
].entry
[i
];
192 if (entry
->obj
== obj
)
193 evict_entry(obj
, fmt
, entry
);
198 /* GEM objects can either be allocated from contiguous memory (in which
199 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
200 * contiguous buffers can be remapped in TILER/DMM if they need to be
201 * contiguous... but we don't do this all the time to reduce pressure
202 * on TILER/DMM space when we know at allocation time that the buffer
203 * will need to be scanned out.
205 static inline bool is_shmem(struct drm_gem_object
*obj
)
207 return obj
->filp
!= NULL
;
211 * shmem buffers that are mapped cached can simulate coherency via using
212 * page faulting to keep track of dirty pages
214 static inline bool is_cached_coherent(struct drm_gem_object
*obj
)
216 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
217 return is_shmem(obj
) &&
218 ((omap_obj
->flags
& OMAP_BO_CACHE_MASK
) == OMAP_BO_CACHED
);
221 static DEFINE_SPINLOCK(sync_lock
);
223 /** ensure backing pages are allocated */
224 static int omap_gem_attach_pages(struct drm_gem_object
*obj
)
226 struct drm_device
*dev
= obj
->dev
;
227 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
229 int i
, npages
= obj
->size
>> PAGE_SHIFT
;
232 WARN_ON(omap_obj
->pages
);
234 /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
235 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
236 * we actually want CMA memory for it all anyways..
238 pages
= _drm_gem_get_pages(obj
, GFP_KERNEL
);
240 dev_err(obj
->dev
->dev
, "could not get pages: %ld\n", PTR_ERR(pages
));
241 return PTR_ERR(pages
);
244 /* for non-cached buffers, ensure the new pages are clean because
245 * DSS, GPU, etc. are not cache coherent:
247 if (omap_obj
->flags
& (OMAP_BO_WC
|OMAP_BO_UNCACHED
)) {
248 addrs
= kmalloc(npages
* sizeof(addrs
), GFP_KERNEL
);
249 for (i
= 0; i
< npages
; i
++) {
250 addrs
[i
] = dma_map_page(dev
->dev
, pages
[i
],
251 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
254 addrs
= kzalloc(npages
* sizeof(addrs
), GFP_KERNEL
);
257 omap_obj
->addrs
= addrs
;
258 omap_obj
->pages
= pages
;
263 /** release backing pages */
264 static void omap_gem_detach_pages(struct drm_gem_object
*obj
)
266 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
268 /* for non-cached buffers, ensure the new pages are clean because
269 * DSS, GPU, etc. are not cache coherent:
271 if (omap_obj
->flags
& (OMAP_BO_WC
|OMAP_BO_UNCACHED
)) {
272 int i
, npages
= obj
->size
>> PAGE_SHIFT
;
273 for (i
= 0; i
< npages
; i
++) {
274 dma_unmap_page(obj
->dev
->dev
, omap_obj
->addrs
[i
],
275 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
279 kfree(omap_obj
->addrs
);
280 omap_obj
->addrs
= NULL
;
282 _drm_gem_put_pages(obj
, omap_obj
->pages
, true, false);
283 omap_obj
->pages
= NULL
;
286 /* get buffer flags */
287 uint32_t omap_gem_flags(struct drm_gem_object
*obj
)
289 return to_omap_bo(obj
)->flags
;
292 /** get mmap offset */
293 static uint64_t mmap_offset(struct drm_gem_object
*obj
)
295 struct drm_device
*dev
= obj
->dev
;
297 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
299 if (!obj
->map_list
.map
) {
300 /* Make it mmapable */
301 size_t size
= omap_gem_mmap_size(obj
);
302 int ret
= _drm_gem_create_mmap_offset_size(obj
, size
);
305 dev_err(dev
->dev
, "could not allocate mmap offset\n");
310 return (uint64_t)obj
->map_list
.hash
.key
<< PAGE_SHIFT
;
313 uint64_t omap_gem_mmap_offset(struct drm_gem_object
*obj
)
316 mutex_lock(&obj
->dev
->struct_mutex
);
317 offset
= mmap_offset(obj
);
318 mutex_unlock(&obj
->dev
->struct_mutex
);
323 size_t omap_gem_mmap_size(struct drm_gem_object
*obj
)
325 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
326 size_t size
= obj
->size
;
328 if (omap_obj
->flags
& OMAP_BO_TILED
) {
329 /* for tiled buffers, the virtual size has stride rounded up
330 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
331 * 32kb later!). But we don't back the entire buffer with
332 * pages, only the valid picture part.. so need to adjust for
333 * this in the size used to mmap and generate mmap offset
335 size
= tiler_vsize(gem2fmt(omap_obj
->flags
),
336 omap_obj
->width
, omap_obj
->height
);
343 /* Normal handling for the case of faulting in non-tiled buffers */
344 static int fault_1d(struct drm_gem_object
*obj
,
345 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
347 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
351 /* We don't use vmf->pgoff since that has the fake offset: */
352 pgoff
= ((unsigned long)vmf
->virtual_address
-
353 vma
->vm_start
) >> PAGE_SHIFT
;
355 if (omap_obj
->pages
) {
356 omap_gem_cpu_sync(obj
, pgoff
);
357 pfn
= page_to_pfn(omap_obj
->pages
[pgoff
]);
359 BUG_ON(!(omap_obj
->flags
& OMAP_BO_DMA
));
360 pfn
= (omap_obj
->paddr
>> PAGE_SHIFT
) + pgoff
;
363 VERB("Inserting %p pfn %lx, pa %lx", vmf
->virtual_address
,
364 pfn
, pfn
<< PAGE_SHIFT
);
366 return vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
369 /* Special handling for the case of faulting in 2d tiled buffers */
370 static int fault_2d(struct drm_gem_object
*obj
,
371 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
373 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
374 struct usergart_entry
*entry
;
375 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
376 struct page
*pages
[64]; /* XXX is this too much to have on stack? */
378 pgoff_t pgoff
, base_pgoff
;
383 * Note the height of the slot is also equal to the number of pages
384 * that need to be mapped in to fill 4kb wide CPU page. If the slot
385 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
387 const int n
= usergart
[fmt
].height
;
388 const int n_shift
= usergart
[fmt
].height_shift
;
391 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
392 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
393 * into account in some of the math, so figure out virtual stride
396 const int m
= 1 + ((omap_obj
->width
<< fmt
) / PAGE_SIZE
);
398 /* We don't use vmf->pgoff since that has the fake offset: */
399 pgoff
= ((unsigned long)vmf
->virtual_address
-
400 vma
->vm_start
) >> PAGE_SHIFT
;
403 * Actual address we start mapping at is rounded down to previous slot
404 * boundary in the y direction:
406 base_pgoff
= round_down(pgoff
, m
<< n_shift
);
408 /* figure out buffer width in slots */
409 slots
= omap_obj
->width
>> usergart
[fmt
].slot_shift
;
411 vaddr
= vmf
->virtual_address
- ((pgoff
- base_pgoff
) << PAGE_SHIFT
);
413 entry
= &usergart
[fmt
].entry
[usergart
[fmt
].last
];
415 /* evict previous buffer using this usergart entry, if any: */
417 evict_entry(entry
->obj
, fmt
, entry
);
420 entry
->obj_pgoff
= base_pgoff
;
422 /* now convert base_pgoff to phys offset from virt offset: */
423 base_pgoff
= (base_pgoff
>> n_shift
) * slots
;
425 /* for wider-than 4k.. figure out which part of the slot-row we want: */
428 entry
->obj_pgoff
+= off
;
430 slots
= min(slots
- (off
<< n_shift
), n
);
431 base_pgoff
+= off
<< n_shift
;
432 vaddr
+= off
<< PAGE_SHIFT
;
436 * Map in pages. Beyond the valid pixel part of the buffer, we set
437 * pages[i] to NULL to get a dummy page mapped in.. if someone
438 * reads/writes it they will get random/undefined content, but at
439 * least it won't be corrupting whatever other random page used to
440 * be mapped in, or other undefined behavior.
442 memcpy(pages
, &omap_obj
->pages
[base_pgoff
],
443 sizeof(struct page
*) * slots
);
444 memset(pages
+ slots
, 0,
445 sizeof(struct page
*) * (n
- slots
));
447 ret
= tiler_pin(entry
->block
, pages
, ARRAY_SIZE(pages
), 0, true);
449 dev_err(obj
->dev
->dev
, "failed to pin: %d\n", ret
);
453 pfn
= entry
->paddr
>> PAGE_SHIFT
;
455 VERB("Inserting %p pfn %lx, pa %lx", vmf
->virtual_address
,
456 pfn
, pfn
<< PAGE_SHIFT
);
458 for (i
= n
; i
> 0; i
--) {
459 vm_insert_mixed(vma
, (unsigned long)vaddr
, pfn
);
460 pfn
+= usergart
[fmt
].stride_pfn
;
461 vaddr
+= PAGE_SIZE
* m
;
464 /* simple round-robin: */
465 usergart
[fmt
].last
= (usergart
[fmt
].last
+ 1) % NUM_USERGART_ENTRIES
;
471 * omap_gem_fault - pagefault handler for GEM objects
472 * @vma: the VMA of the GEM object
475 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
476 * does most of the work for us including the actual map/unmap calls
477 * but we need to do the actual page work.
479 * The VMA was set up by GEM. In doing so it also ensured that the
480 * vma->vm_private_data points to the GEM object that is backing this
483 int omap_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
485 struct drm_gem_object
*obj
= vma
->vm_private_data
;
486 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
487 struct drm_device
*dev
= obj
->dev
;
491 /* Make sure we don't parallel update on a fault, nor move or remove
492 * something from beneath our feet
494 mutex_lock(&dev
->struct_mutex
);
496 /* if a shmem backed object, make sure we have pages attached now */
497 ret
= get_pages(obj
, &pages
);
502 /* where should we do corresponding put_pages().. we are mapping
503 * the original page, rather than thru a GART, so we can't rely
504 * on eviction to trigger this. But munmap() or all mappings should
505 * probably trigger put_pages()?
508 if (omap_obj
->flags
& OMAP_BO_TILED
)
509 ret
= fault_2d(obj
, vma
, vmf
);
511 ret
= fault_1d(obj
, vma
, vmf
);
515 mutex_unlock(&dev
->struct_mutex
);
520 return VM_FAULT_NOPAGE
;
524 return VM_FAULT_SIGBUS
;
528 /** We override mainly to fix up some of the vm mapping flags.. */
529 int omap_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
533 ret
= drm_gem_mmap(filp
, vma
);
535 DBG("mmap failed: %d", ret
);
539 return omap_gem_mmap_obj(vma
->vm_private_data
, vma
);
542 int omap_gem_mmap_obj(struct drm_gem_object
*obj
,
543 struct vm_area_struct
*vma
)
545 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
547 vma
->vm_flags
&= ~VM_PFNMAP
;
548 vma
->vm_flags
|= VM_MIXEDMAP
;
550 if (omap_obj
->flags
& OMAP_BO_WC
) {
551 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
552 } else if (omap_obj
->flags
& OMAP_BO_UNCACHED
) {
553 vma
->vm_page_prot
= pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
556 * We do have some private objects, at least for scanout buffers
557 * on hardware without DMM/TILER. But these are allocated write-
560 if (WARN_ON(!obj
->filp
))
564 * Shunt off cached objs to shmem file so they have their own
565 * address_space (so unmap_mapping_range does what we want,
566 * in particular in the case of mmap'd dmabufs)
570 vma
->vm_file
= get_file(obj
->filp
);
572 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
580 * omap_gem_dumb_create - create a dumb buffer
581 * @drm_file: our client file
583 * @args: the requested arguments copied from userspace
585 * Allocate a buffer suitable for use for a frame buffer of the
586 * form described by user space. Give userspace a handle by which
589 int omap_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
590 struct drm_mode_create_dumb
*args
)
592 union omap_gem_size gsize
;
594 /* in case someone tries to feed us a completely bogus stride: */
595 args
->pitch
= align_pitch(args
->pitch
, args
->width
, args
->bpp
);
596 args
->size
= PAGE_ALIGN(args
->pitch
* args
->height
);
598 gsize
= (union omap_gem_size
){
602 return omap_gem_new_handle(dev
, file
, gsize
,
603 OMAP_BO_SCANOUT
| OMAP_BO_WC
, &args
->handle
);
607 * omap_gem_dumb_destroy - destroy a dumb buffer
609 * @dev: our DRM device
610 * @handle: the object handle
612 * Destroy a handle that was created via omap_gem_dumb_create.
614 int omap_gem_dumb_destroy(struct drm_file
*file
, struct drm_device
*dev
,
617 /* No special work needed, drop the reference and see what falls out */
618 return drm_gem_handle_delete(file
, handle
);
622 * omap_gem_dumb_map - buffer mapping for dumb interface
623 * @file: our drm client file
625 * @handle: GEM handle to the object (from dumb_create)
627 * Do the necessary setup to allow the mapping of the frame buffer
628 * into user memory. We don't have to do much here at the moment.
630 int omap_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
631 uint32_t handle
, uint64_t *offset
)
633 struct drm_gem_object
*obj
;
636 /* GEM does all our handle to object mapping */
637 obj
= drm_gem_object_lookup(dev
, file
, handle
);
643 *offset
= omap_gem_mmap_offset(obj
);
645 drm_gem_object_unreference_unlocked(obj
);
651 /* Set scrolling position. This allows us to implement fast scrolling
654 * Call only from non-atomic contexts.
656 int omap_gem_roll(struct drm_gem_object
*obj
, uint32_t roll
)
658 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
659 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
663 dev_err(obj
->dev
->dev
, "invalid roll: %d\n", roll
);
667 omap_obj
->roll
= roll
;
669 mutex_lock(&obj
->dev
->struct_mutex
);
671 /* if we aren't mapped yet, we don't need to do anything */
672 if (omap_obj
->block
) {
674 ret
= get_pages(obj
, &pages
);
677 ret
= tiler_pin(omap_obj
->block
, pages
, npages
, roll
, true);
679 dev_err(obj
->dev
->dev
, "could not repin: %d\n", ret
);
683 mutex_unlock(&obj
->dev
->struct_mutex
);
688 /* Sync the buffer for CPU access.. note pages should already be
689 * attached, ie. omap_gem_get_pages()
691 void omap_gem_cpu_sync(struct drm_gem_object
*obj
, int pgoff
)
693 struct drm_device
*dev
= obj
->dev
;
694 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
696 if (is_cached_coherent(obj
) && omap_obj
->addrs
[pgoff
]) {
697 dma_unmap_page(dev
->dev
, omap_obj
->addrs
[pgoff
],
698 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
699 omap_obj
->addrs
[pgoff
] = 0;
703 /* sync the buffer for DMA access */
704 void omap_gem_dma_sync(struct drm_gem_object
*obj
,
705 enum dma_data_direction dir
)
707 struct drm_device
*dev
= obj
->dev
;
708 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
710 if (is_cached_coherent(obj
)) {
711 int i
, npages
= obj
->size
>> PAGE_SHIFT
;
712 struct page
**pages
= omap_obj
->pages
;
715 for (i
= 0; i
< npages
; i
++) {
716 if (!omap_obj
->addrs
[i
]) {
717 omap_obj
->addrs
[i
] = dma_map_page(dev
->dev
, pages
[i
], 0,
718 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
724 unmap_mapping_range(obj
->filp
->f_mapping
, 0,
725 omap_gem_mmap_size(obj
), 1);
730 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
731 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
734 int omap_gem_get_paddr(struct drm_gem_object
*obj
,
735 dma_addr_t
*paddr
, bool remap
)
737 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
738 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
741 mutex_lock(&obj
->dev
->struct_mutex
);
743 if (remap
&& is_shmem(obj
) && priv
->has_dmm
) {
744 if (omap_obj
->paddr_cnt
== 0) {
746 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
747 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
748 struct tiler_block
*block
;
750 BUG_ON(omap_obj
->block
);
752 ret
= get_pages(obj
, &pages
);
756 if (omap_obj
->flags
& OMAP_BO_TILED
) {
757 block
= tiler_reserve_2d(fmt
,
759 omap_obj
->height
, 0);
761 block
= tiler_reserve_1d(obj
->size
);
765 ret
= PTR_ERR(block
);
766 dev_err(obj
->dev
->dev
,
767 "could not remap: %d (%d)\n", ret
, fmt
);
771 /* TODO: enable async refill.. */
772 ret
= tiler_pin(block
, pages
, npages
,
773 omap_obj
->roll
, true);
775 tiler_release(block
);
776 dev_err(obj
->dev
->dev
,
777 "could not pin: %d\n", ret
);
781 omap_obj
->paddr
= tiler_ssptr(block
);
782 omap_obj
->block
= block
;
784 DBG("got paddr: %08x", omap_obj
->paddr
);
787 omap_obj
->paddr_cnt
++;
789 *paddr
= omap_obj
->paddr
;
790 } else if (omap_obj
->flags
& OMAP_BO_DMA
) {
791 *paddr
= omap_obj
->paddr
;
798 mutex_unlock(&obj
->dev
->struct_mutex
);
803 /* Release physical address, when DMA is no longer being performed.. this
804 * could potentially unpin and unmap buffers from TILER
806 int omap_gem_put_paddr(struct drm_gem_object
*obj
)
808 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
811 mutex_lock(&obj
->dev
->struct_mutex
);
812 if (omap_obj
->paddr_cnt
> 0) {
813 omap_obj
->paddr_cnt
--;
814 if (omap_obj
->paddr_cnt
== 0) {
815 ret
= tiler_unpin(omap_obj
->block
);
817 dev_err(obj
->dev
->dev
,
818 "could not unpin pages: %d\n", ret
);
821 ret
= tiler_release(omap_obj
->block
);
823 dev_err(obj
->dev
->dev
,
824 "could not release unmap: %d\n", ret
);
826 omap_obj
->block
= NULL
;
830 mutex_unlock(&obj
->dev
->struct_mutex
);
834 /* acquire pages when needed (for example, for DMA where physically
835 * contiguous buffer is not required
837 static int get_pages(struct drm_gem_object
*obj
, struct page
***pages
)
839 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
842 if (is_shmem(obj
) && !omap_obj
->pages
) {
843 ret
= omap_gem_attach_pages(obj
);
845 dev_err(obj
->dev
->dev
, "could not attach pages\n");
850 /* TODO: even phys-contig.. we should have a list of pages? */
851 *pages
= omap_obj
->pages
;
856 /* if !remap, and we don't have pages backing, then fail, rather than
857 * increasing the pin count (which we don't really do yet anyways,
858 * because we don't support swapping pages back out). And 'remap'
859 * might not be quite the right name, but I wanted to keep it working
860 * similarly to omap_gem_get_paddr(). Note though that mutex is not
861 * aquired if !remap (because this can be called in atomic ctxt),
862 * but probably omap_gem_get_paddr() should be changed to work in the
863 * same way. If !remap, a matching omap_gem_put_pages() call is not
864 * required (and should not be made).
866 int omap_gem_get_pages(struct drm_gem_object
*obj
, struct page
***pages
,
871 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
872 if (!omap_obj
->pages
)
874 *pages
= omap_obj
->pages
;
877 mutex_lock(&obj
->dev
->struct_mutex
);
878 ret
= get_pages(obj
, pages
);
879 mutex_unlock(&obj
->dev
->struct_mutex
);
883 /* release pages when DMA no longer being performed */
884 int omap_gem_put_pages(struct drm_gem_object
*obj
)
886 /* do something here if we dynamically attach/detach pages.. at
887 * least they would no longer need to be pinned if everyone has
888 * released the pages..
893 /* Get kernel virtual address for CPU access.. this more or less only
894 * exists for omap_fbdev. This should be called with struct_mutex
897 void *omap_gem_vaddr(struct drm_gem_object
*obj
)
899 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
900 WARN_ON(! mutex_is_locked(&obj
->dev
->struct_mutex
));
901 if (!omap_obj
->vaddr
) {
903 int ret
= get_pages(obj
, &pages
);
906 omap_obj
->vaddr
= vmap(pages
, obj
->size
>> PAGE_SHIFT
,
907 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
909 return omap_obj
->vaddr
;
912 #ifdef CONFIG_DEBUG_FS
913 void omap_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
)
915 struct drm_device
*dev
= obj
->dev
;
916 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
919 WARN_ON(! mutex_is_locked(&dev
->struct_mutex
));
921 if (obj
->map_list
.map
)
922 off
= (uint64_t)obj
->map_list
.hash
.key
;
924 seq_printf(m
, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
925 omap_obj
->flags
, obj
->name
, obj
->refcount
.refcount
.counter
,
926 off
, omap_obj
->paddr
, omap_obj
->paddr_cnt
,
927 omap_obj
->vaddr
, omap_obj
->roll
);
929 if (omap_obj
->flags
& OMAP_BO_TILED
) {
930 seq_printf(m
, " %dx%d", omap_obj
->width
, omap_obj
->height
);
931 if (omap_obj
->block
) {
932 struct tcm_area
*area
= &omap_obj
->block
->area
;
933 seq_printf(m
, " (%dx%d, %dx%d)",
934 area
->p0
.x
, area
->p0
.y
,
935 area
->p1
.x
, area
->p1
.y
);
938 seq_printf(m
, " %d", obj
->size
);
944 void omap_gem_describe_objects(struct list_head
*list
, struct seq_file
*m
)
946 struct omap_gem_object
*omap_obj
;
950 list_for_each_entry(omap_obj
, list
, mm_list
) {
951 struct drm_gem_object
*obj
= &omap_obj
->base
;
953 omap_gem_describe(obj
, m
);
958 seq_printf(m
, "Total %d objects, %zu bytes\n", count
, size
);
962 /* Buffer Synchronization:
965 struct omap_gem_sync_waiter
{
966 struct list_head list
;
967 struct omap_gem_object
*omap_obj
;
969 uint32_t read_target
, write_target
;
970 /* notify called w/ sync_lock held */
971 void (*notify
)(void *arg
);
975 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
976 * the read and/or write target count is achieved which can call a user
977 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
980 static LIST_HEAD(waiters
);
982 static inline bool is_waiting(struct omap_gem_sync_waiter
*waiter
)
984 struct omap_gem_object
*omap_obj
= waiter
->omap_obj
;
985 if ((waiter
->op
& OMAP_GEM_READ
) &&
986 (omap_obj
->sync
->read_complete
< waiter
->read_target
))
988 if ((waiter
->op
& OMAP_GEM_WRITE
) &&
989 (omap_obj
->sync
->write_complete
< waiter
->write_target
))
994 /* macro for sync debug.. */
996 #define SYNC(fmt, ...) do { if (SYNCDBG) \
997 printk(KERN_ERR "%s:%d: "fmt"\n", \
998 __func__, __LINE__, ##__VA_ARGS__); \
1002 static void sync_op_update(void)
1004 struct omap_gem_sync_waiter
*waiter
, *n
;
1005 list_for_each_entry_safe(waiter
, n
, &waiters
, list
) {
1006 if (!is_waiting(waiter
)) {
1007 list_del(&waiter
->list
);
1008 SYNC("notify: %p", waiter
);
1009 waiter
->notify(waiter
->arg
);
1015 static inline int sync_op(struct drm_gem_object
*obj
,
1016 enum omap_gem_op op
, bool start
)
1018 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1021 spin_lock(&sync_lock
);
1023 if (!omap_obj
->sync
) {
1024 omap_obj
->sync
= kzalloc(sizeof(*omap_obj
->sync
), GFP_ATOMIC
);
1025 if (!omap_obj
->sync
) {
1032 if (op
& OMAP_GEM_READ
)
1033 omap_obj
->sync
->read_pending
++;
1034 if (op
& OMAP_GEM_WRITE
)
1035 omap_obj
->sync
->write_pending
++;
1037 if (op
& OMAP_GEM_READ
)
1038 omap_obj
->sync
->read_complete
++;
1039 if (op
& OMAP_GEM_WRITE
)
1040 omap_obj
->sync
->write_complete
++;
1045 spin_unlock(&sync_lock
);
1050 /* it is a bit lame to handle updates in this sort of polling way, but
1051 * in case of PVR, the GPU can directly update read/write complete
1052 * values, and not really tell us which ones it updated.. this also
1053 * means that sync_lock is not quite sufficient. So we'll need to
1054 * do something a bit better when it comes time to add support for
1057 void omap_gem_op_update(void)
1059 spin_lock(&sync_lock
);
1061 spin_unlock(&sync_lock
);
1064 /* mark the start of read and/or write operation */
1065 int omap_gem_op_start(struct drm_gem_object
*obj
, enum omap_gem_op op
)
1067 return sync_op(obj
, op
, true);
1070 int omap_gem_op_finish(struct drm_gem_object
*obj
, enum omap_gem_op op
)
1072 return sync_op(obj
, op
, false);
1075 static DECLARE_WAIT_QUEUE_HEAD(sync_event
);
1077 static void sync_notify(void *arg
)
1079 struct task_struct
**waiter_task
= arg
;
1080 *waiter_task
= NULL
;
1081 wake_up_all(&sync_event
);
1084 int omap_gem_op_sync(struct drm_gem_object
*obj
, enum omap_gem_op op
)
1086 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1088 if (omap_obj
->sync
) {
1089 struct task_struct
*waiter_task
= current
;
1090 struct omap_gem_sync_waiter
*waiter
=
1091 kzalloc(sizeof(*waiter
), GFP_KERNEL
);
1097 waiter
->omap_obj
= omap_obj
;
1099 waiter
->read_target
= omap_obj
->sync
->read_pending
;
1100 waiter
->write_target
= omap_obj
->sync
->write_pending
;
1101 waiter
->notify
= sync_notify
;
1102 waiter
->arg
= &waiter_task
;
1104 spin_lock(&sync_lock
);
1105 if (is_waiting(waiter
)) {
1106 SYNC("waited: %p", waiter
);
1107 list_add_tail(&waiter
->list
, &waiters
);
1108 spin_unlock(&sync_lock
);
1109 ret
= wait_event_interruptible(sync_event
,
1110 (waiter_task
== NULL
));
1111 spin_lock(&sync_lock
);
1113 SYNC("interrupted: %p", waiter
);
1114 /* we were interrupted */
1115 list_del(&waiter
->list
);
1118 /* freed in sync_op_update() */
1122 spin_unlock(&sync_lock
);
1131 /* call fxn(arg), either synchronously or asynchronously if the op
1132 * is currently blocked.. fxn() can be called from any context
1134 * (TODO for now fxn is called back from whichever context calls
1135 * omap_gem_op_update().. but this could be better defined later
1138 * TODO more code in common w/ _sync()..
1140 int omap_gem_op_async(struct drm_gem_object
*obj
, enum omap_gem_op op
,
1141 void (*fxn
)(void *arg
), void *arg
)
1143 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1144 if (omap_obj
->sync
) {
1145 struct omap_gem_sync_waiter
*waiter
=
1146 kzalloc(sizeof(*waiter
), GFP_ATOMIC
);
1152 waiter
->omap_obj
= omap_obj
;
1154 waiter
->read_target
= omap_obj
->sync
->read_pending
;
1155 waiter
->write_target
= omap_obj
->sync
->write_pending
;
1156 waiter
->notify
= fxn
;
1159 spin_lock(&sync_lock
);
1160 if (is_waiting(waiter
)) {
1161 SYNC("waited: %p", waiter
);
1162 list_add_tail(&waiter
->list
, &waiters
);
1163 spin_unlock(&sync_lock
);
1167 spin_unlock(&sync_lock
);
1176 /* special API so PVR can update the buffer to use a sync-object allocated
1177 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1178 * perspective) sync-object, so we overwrite the new syncobj w/ values
1179 * from the already allocated syncobj (if there is one)
1181 int omap_gem_set_sync_object(struct drm_gem_object
*obj
, void *syncobj
)
1183 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1186 spin_lock(&sync_lock
);
1188 if ((omap_obj
->flags
& OMAP_BO_EXT_SYNC
) && !syncobj
) {
1189 /* clearing a previously set syncobj */
1190 syncobj
= kzalloc(sizeof(*omap_obj
->sync
), GFP_ATOMIC
);
1195 memcpy(syncobj
, omap_obj
->sync
, sizeof(*omap_obj
->sync
));
1196 omap_obj
->flags
&= ~OMAP_BO_EXT_SYNC
;
1197 omap_obj
->sync
= syncobj
;
1198 } else if (syncobj
&& !(omap_obj
->flags
& OMAP_BO_EXT_SYNC
)) {
1199 /* replacing an existing syncobj */
1200 if (omap_obj
->sync
) {
1201 memcpy(syncobj
, omap_obj
->sync
, sizeof(*omap_obj
->sync
));
1202 kfree(omap_obj
->sync
);
1204 omap_obj
->flags
|= OMAP_BO_EXT_SYNC
;
1205 omap_obj
->sync
= syncobj
;
1209 spin_unlock(&sync_lock
);
1213 int omap_gem_init_object(struct drm_gem_object
*obj
)
1215 return -EINVAL
; /* unused */
1218 /* don't call directly.. called from GEM core when it is time to actually
1221 void omap_gem_free_object(struct drm_gem_object
*obj
)
1223 struct drm_device
*dev
= obj
->dev
;
1224 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1228 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
1230 list_del(&omap_obj
->mm_list
);
1232 if (obj
->map_list
.map
) {
1233 drm_gem_free_mmap_offset(obj
);
1236 /* this means the object is still pinned.. which really should
1237 * not happen. I think..
1239 WARN_ON(omap_obj
->paddr_cnt
> 0);
1241 /* don't free externally allocated backing memory */
1242 if (!(omap_obj
->flags
& OMAP_BO_EXT_MEM
)) {
1243 if (omap_obj
->pages
) {
1244 omap_gem_detach_pages(obj
);
1246 if (!is_shmem(obj
)) {
1247 dma_free_writecombine(dev
->dev
, obj
->size
,
1248 omap_obj
->vaddr
, omap_obj
->paddr
);
1249 } else if (omap_obj
->vaddr
) {
1250 vunmap(omap_obj
->vaddr
);
1254 /* don't free externally allocated syncobj */
1255 if (!(omap_obj
->flags
& OMAP_BO_EXT_SYNC
)) {
1256 kfree(omap_obj
->sync
);
1259 drm_gem_object_release(obj
);
1264 /* convenience method to construct a GEM buffer object, and userspace handle */
1265 int omap_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
1266 union omap_gem_size gsize
, uint32_t flags
, uint32_t *handle
)
1268 struct drm_gem_object
*obj
;
1271 obj
= omap_gem_new(dev
, gsize
, flags
);
1275 ret
= drm_gem_handle_create(file
, obj
, handle
);
1277 drm_gem_object_release(obj
);
1278 kfree(obj
); /* TODO isn't there a dtor to call? just copying i915 */
1282 /* drop reference from allocate - handle holds it now */
1283 drm_gem_object_unreference_unlocked(obj
);
1288 /* GEM buffer object constructor */
1289 struct drm_gem_object
*omap_gem_new(struct drm_device
*dev
,
1290 union omap_gem_size gsize
, uint32_t flags
)
1292 struct omap_drm_private
*priv
= dev
->dev_private
;
1293 struct omap_gem_object
*omap_obj
;
1294 struct drm_gem_object
*obj
= NULL
;
1298 if (flags
& OMAP_BO_TILED
) {
1300 dev_err(dev
->dev
, "Tiled buffers require DMM\n");
1304 /* tiled buffers are always shmem paged backed.. when they are
1305 * scanned out, they are remapped into DMM/TILER
1307 flags
&= ~OMAP_BO_SCANOUT
;
1309 /* currently don't allow cached buffers.. there is some caching
1310 * stuff that needs to be handled better
1312 flags
&= ~(OMAP_BO_CACHED
|OMAP_BO_UNCACHED
);
1313 flags
|= OMAP_BO_WC
;
1315 /* align dimensions to slot boundaries... */
1316 tiler_align(gem2fmt(flags
),
1317 &gsize
.tiled
.width
, &gsize
.tiled
.height
);
1319 /* ...and calculate size based on aligned dimensions */
1320 size
= tiler_size(gem2fmt(flags
),
1321 gsize
.tiled
.width
, gsize
.tiled
.height
);
1323 size
= PAGE_ALIGN(gsize
.bytes
);
1326 omap_obj
= kzalloc(sizeof(*omap_obj
), GFP_KERNEL
);
1328 dev_err(dev
->dev
, "could not allocate GEM object\n");
1332 list_add(&omap_obj
->mm_list
, &priv
->obj_list
);
1334 obj
= &omap_obj
->base
;
1336 if ((flags
& OMAP_BO_SCANOUT
) && !priv
->has_dmm
) {
1337 /* attempt to allocate contiguous memory if we don't
1338 * have DMM for remappign discontiguous buffers
1340 omap_obj
->vaddr
= dma_alloc_writecombine(dev
->dev
, size
,
1341 &omap_obj
->paddr
, GFP_KERNEL
);
1342 if (omap_obj
->vaddr
) {
1343 flags
|= OMAP_BO_DMA
;
1347 omap_obj
->flags
= flags
;
1349 if (flags
& OMAP_BO_TILED
) {
1350 omap_obj
->width
= gsize
.tiled
.width
;
1351 omap_obj
->height
= gsize
.tiled
.height
;
1354 if (flags
& (OMAP_BO_DMA
|OMAP_BO_EXT_MEM
)) {
1355 ret
= drm_gem_private_object_init(dev
, obj
, size
);
1357 ret
= drm_gem_object_init(dev
, obj
, size
);
1368 omap_gem_free_object(obj
);
1373 /* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1374 void omap_gem_init(struct drm_device
*dev
)
1376 struct omap_drm_private
*priv
= dev
->dev_private
;
1377 const enum tiler_fmt fmts
[] = {
1378 TILFMT_8BIT
, TILFMT_16BIT
, TILFMT_32BIT
1382 if (!dmm_is_initialized()) {
1383 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1384 dev_warn(dev
->dev
, "DMM not available, disable DMM support\n");
1388 usergart
= kzalloc(3 * sizeof(*usergart
), GFP_KERNEL
);
1390 dev_warn(dev
->dev
, "could not allocate usergart\n");
1394 /* reserve 4k aligned/wide regions for userspace mappings: */
1395 for (i
= 0; i
< ARRAY_SIZE(fmts
); i
++) {
1396 uint16_t h
= 1, w
= PAGE_SIZE
>> i
;
1397 tiler_align(fmts
[i
], &w
, &h
);
1398 /* note: since each region is 1 4kb page wide, and minimum
1399 * number of rows, the height ends up being the same as the
1400 * # of pages in the region
1402 usergart
[i
].height
= h
;
1403 usergart
[i
].height_shift
= ilog2(h
);
1404 usergart
[i
].stride_pfn
= tiler_stride(fmts
[i
]) >> PAGE_SHIFT
;
1405 usergart
[i
].slot_shift
= ilog2((PAGE_SIZE
/ h
) >> i
);
1406 for (j
= 0; j
< NUM_USERGART_ENTRIES
; j
++) {
1407 struct usergart_entry
*entry
= &usergart
[i
].entry
[j
];
1408 struct tiler_block
*block
=
1409 tiler_reserve_2d(fmts
[i
], w
, h
,
1411 if (IS_ERR(block
)) {
1413 "reserve failed: %d, %d, %ld\n",
1414 i
, j
, PTR_ERR(block
));
1417 entry
->paddr
= tiler_ssptr(block
);
1418 entry
->block
= block
;
1420 DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i
, j
, w
, h
,
1422 usergart
[i
].stride_pfn
<< PAGE_SHIFT
);
1426 priv
->has_dmm
= true;
1429 void omap_gem_deinit(struct drm_device
*dev
)
1431 /* I believe we can rely on there being no more outstanding GEM
1432 * objects which could depend on usergart/dmm at this point.
This page took 0.10422 seconds and 5 git commands to generate.