2 * drivers/gpu/drm/omapdrm/omap_gem.c
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/shmem_fs.h>
21 #include <linux/spinlock.h>
22 #include <linux/pfn_t.h>
24 #include <drm/drm_vma_manager.h>
27 #include "omap_dmm_tiler.h"
30 * GEM buffer object implementation.
33 /* note: we use upper 8 bits of flags for driver-internal flags: */
34 #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
35 #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
36 #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
38 struct omap_gem_object
{
39 struct drm_gem_object base
;
41 struct list_head mm_list
;
45 /** width/height for tiled formats (rounded up to slot boundaries) */
46 uint16_t width
, height
;
48 /** roll applied when mapping to DMM */
52 * paddr contains the buffer DMA address. It is valid for
54 * - buffers allocated through the DMA mapping API (with the
55 * OMAP_BO_MEM_DMA_API flag set)
57 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
58 * if they are physically contiguous (when sgt->orig_nents == 1)
60 * - buffers mapped through the TILER when paddr_cnt is not zero, in
61 * which case the DMA address points to the TILER aperture
63 * Physically contiguous buffers have their DMA address equal to the
64 * physical address as we don't remap those buffers through the TILER.
66 * Buffers mapped to the TILER have their DMA address pointing to the
67 * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
68 * the DMA address must be accessed through omap_get_get_paddr() to
69 * ensure that the mapping won't disappear unexpectedly. References must
70 * be released with omap_gem_put_paddr().
80 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
81 * is set and the sgt field is valid.
86 * tiler block used when buffer is remapped in DMM/TILER.
88 struct tiler_block
*block
;
91 * Array of backing pages, if allocated. Note that pages are never
92 * allocated for buffers originally allocated from contiguous memory
96 /** addresses corresponding to pages in above array */
100 * Virtual address, if mapped.
105 * sync-object allocated on demand (if needed)
107 * Per-buffer sync-object for tracking pending and completed hw/dma
108 * read and write operations.
111 uint32_t write_pending
;
112 uint32_t write_complete
;
113 uint32_t read_pending
;
114 uint32_t read_complete
;
118 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
120 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
121 * not necessarily pinned in TILER all the time, and (b) when they are
122 * they are not necessarily page aligned, we reserve one or more small
123 * regions in each of the 2d containers to use as a user-GART where we
124 * can create a second page-aligned mapping of parts of the buffer
125 * being accessed from userspace.
127 * Note that we could optimize slightly when we know that multiple
128 * tiler containers are backed by the same PAT.. but I'll leave that
131 #define NUM_USERGART_ENTRIES 2
132 struct omap_drm_usergart_entry
{
133 struct tiler_block
*block
; /* the reserved tiler block */
135 struct drm_gem_object
*obj
; /* the current pinned obj */
136 pgoff_t obj_pgoff
; /* page offset of obj currently
140 struct omap_drm_usergart
{
141 struct omap_drm_usergart_entry entry
[NUM_USERGART_ENTRIES
];
142 int height
; /* height in rows */
143 int height_shift
; /* ilog2(height in rows) */
144 int slot_shift
; /* ilog2(width per slot) */
145 int stride_pfn
; /* stride in pages */
146 int last
; /* index of last used entry */
149 /* -----------------------------------------------------------------------------
153 /** get mmap offset */
154 static uint64_t mmap_offset(struct drm_gem_object
*obj
)
156 struct drm_device
*dev
= obj
->dev
;
160 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
162 /* Make it mmapable */
163 size
= omap_gem_mmap_size(obj
);
164 ret
= drm_gem_create_mmap_offset_size(obj
, size
);
166 dev_err(dev
->dev
, "could not allocate mmap offset\n");
170 return drm_vma_node_offset_addr(&obj
->vma_node
);
173 static bool is_contiguous(struct omap_gem_object
*omap_obj
)
175 if (omap_obj
->flags
& OMAP_BO_MEM_DMA_API
)
178 if ((omap_obj
->flags
& OMAP_BO_MEM_DMABUF
) && omap_obj
->sgt
->nents
== 1)
184 /* -----------------------------------------------------------------------------
188 static void evict_entry(struct drm_gem_object
*obj
,
189 enum tiler_fmt fmt
, struct omap_drm_usergart_entry
*entry
)
191 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
192 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
193 int n
= priv
->usergart
[fmt
].height
;
194 size_t size
= PAGE_SIZE
* n
;
195 loff_t off
= mmap_offset(obj
) +
196 (entry
->obj_pgoff
<< PAGE_SHIFT
);
197 const int m
= 1 + ((omap_obj
->width
<< fmt
) / PAGE_SIZE
);
201 /* if stride > than PAGE_SIZE then sparse mapping: */
202 for (i
= n
; i
> 0; i
--) {
203 unmap_mapping_range(obj
->dev
->anon_inode
->i_mapping
,
205 off
+= PAGE_SIZE
* m
;
208 unmap_mapping_range(obj
->dev
->anon_inode
->i_mapping
,
215 /* Evict a buffer from usergart, if it is mapped there */
216 static void evict(struct drm_gem_object
*obj
)
218 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
219 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
221 if (omap_obj
->flags
& OMAP_BO_TILED
) {
222 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
225 for (i
= 0; i
< NUM_USERGART_ENTRIES
; i
++) {
226 struct omap_drm_usergart_entry
*entry
=
227 &priv
->usergart
[fmt
].entry
[i
];
229 if (entry
->obj
== obj
)
230 evict_entry(obj
, fmt
, entry
);
235 /* -----------------------------------------------------------------------------
239 /** ensure backing pages are allocated */
240 static int omap_gem_attach_pages(struct drm_gem_object
*obj
)
242 struct drm_device
*dev
= obj
->dev
;
243 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
245 int npages
= obj
->size
>> PAGE_SHIFT
;
249 WARN_ON(omap_obj
->pages
);
251 pages
= drm_gem_get_pages(obj
);
253 dev_err(obj
->dev
->dev
, "could not get pages: %ld\n", PTR_ERR(pages
));
254 return PTR_ERR(pages
);
257 /* for non-cached buffers, ensure the new pages are clean because
258 * DSS, GPU, etc. are not cache coherent:
260 if (omap_obj
->flags
& (OMAP_BO_WC
|OMAP_BO_UNCACHED
)) {
261 addrs
= kmalloc(npages
* sizeof(*addrs
), GFP_KERNEL
);
267 for (i
= 0; i
< npages
; i
++) {
268 addrs
[i
] = dma_map_page(dev
->dev
, pages
[i
],
269 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
271 if (dma_mapping_error(dev
->dev
, addrs
[i
])) {
273 "%s: failed to map page\n", __func__
);
275 for (i
= i
- 1; i
>= 0; --i
) {
276 dma_unmap_page(dev
->dev
, addrs
[i
],
277 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
285 addrs
= kzalloc(npages
* sizeof(*addrs
), GFP_KERNEL
);
292 omap_obj
->addrs
= addrs
;
293 omap_obj
->pages
= pages
;
300 drm_gem_put_pages(obj
, pages
, true, false);
305 /* acquire pages when needed (for example, for DMA where physically
306 * contiguous buffer is not required
308 static int get_pages(struct drm_gem_object
*obj
, struct page
***pages
)
310 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
313 if ((omap_obj
->flags
& OMAP_BO_MEM_SHMEM
) && !omap_obj
->pages
) {
314 ret
= omap_gem_attach_pages(obj
);
316 dev_err(obj
->dev
->dev
, "could not attach pages\n");
321 /* TODO: even phys-contig.. we should have a list of pages? */
322 *pages
= omap_obj
->pages
;
327 /** release backing pages */
328 static void omap_gem_detach_pages(struct drm_gem_object
*obj
)
330 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
332 /* for non-cached buffers, ensure the new pages are clean because
333 * DSS, GPU, etc. are not cache coherent:
335 if (omap_obj
->flags
& (OMAP_BO_WC
|OMAP_BO_UNCACHED
)) {
336 int i
, npages
= obj
->size
>> PAGE_SHIFT
;
337 for (i
= 0; i
< npages
; i
++) {
338 dma_unmap_page(obj
->dev
->dev
, omap_obj
->addrs
[i
],
339 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
343 kfree(omap_obj
->addrs
);
344 omap_obj
->addrs
= NULL
;
346 drm_gem_put_pages(obj
, omap_obj
->pages
, true, false);
347 omap_obj
->pages
= NULL
;
350 /* get buffer flags */
351 uint32_t omap_gem_flags(struct drm_gem_object
*obj
)
353 return to_omap_bo(obj
)->flags
;
356 uint64_t omap_gem_mmap_offset(struct drm_gem_object
*obj
)
359 mutex_lock(&obj
->dev
->struct_mutex
);
360 offset
= mmap_offset(obj
);
361 mutex_unlock(&obj
->dev
->struct_mutex
);
366 size_t omap_gem_mmap_size(struct drm_gem_object
*obj
)
368 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
369 size_t size
= obj
->size
;
371 if (omap_obj
->flags
& OMAP_BO_TILED
) {
372 /* for tiled buffers, the virtual size has stride rounded up
373 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
374 * 32kb later!). But we don't back the entire buffer with
375 * pages, only the valid picture part.. so need to adjust for
376 * this in the size used to mmap and generate mmap offset
378 size
= tiler_vsize(gem2fmt(omap_obj
->flags
),
379 omap_obj
->width
, omap_obj
->height
);
385 /* get tiled size, returns -EINVAL if not tiled buffer */
386 int omap_gem_tiled_size(struct drm_gem_object
*obj
, uint16_t *w
, uint16_t *h
)
388 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
389 if (omap_obj
->flags
& OMAP_BO_TILED
) {
390 *w
= omap_obj
->width
;
391 *h
= omap_obj
->height
;
397 /* -----------------------------------------------------------------------------
401 /* Normal handling for the case of faulting in non-tiled buffers */
402 static int fault_1d(struct drm_gem_object
*obj
,
403 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
405 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
409 /* We don't use vmf->pgoff since that has the fake offset: */
410 pgoff
= ((unsigned long)vmf
->virtual_address
-
411 vma
->vm_start
) >> PAGE_SHIFT
;
413 if (omap_obj
->pages
) {
414 omap_gem_cpu_sync(obj
, pgoff
);
415 pfn
= page_to_pfn(omap_obj
->pages
[pgoff
]);
417 BUG_ON(!is_contiguous(omap_obj
));
418 pfn
= (omap_obj
->paddr
>> PAGE_SHIFT
) + pgoff
;
421 VERB("Inserting %p pfn %lx, pa %lx", vmf
->virtual_address
,
422 pfn
, pfn
<< PAGE_SHIFT
);
424 return vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
,
425 __pfn_to_pfn_t(pfn
, PFN_DEV
));
428 /* Special handling for the case of faulting in 2d tiled buffers */
429 static int fault_2d(struct drm_gem_object
*obj
,
430 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
432 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
433 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
434 struct omap_drm_usergart_entry
*entry
;
435 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
436 struct page
*pages
[64]; /* XXX is this too much to have on stack? */
438 pgoff_t pgoff
, base_pgoff
;
443 * Note the height of the slot is also equal to the number of pages
444 * that need to be mapped in to fill 4kb wide CPU page. If the slot
445 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
447 const int n
= priv
->usergart
[fmt
].height
;
448 const int n_shift
= priv
->usergart
[fmt
].height_shift
;
451 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
452 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
453 * into account in some of the math, so figure out virtual stride
456 const int m
= 1 + ((omap_obj
->width
<< fmt
) / PAGE_SIZE
);
458 /* We don't use vmf->pgoff since that has the fake offset: */
459 pgoff
= ((unsigned long)vmf
->virtual_address
-
460 vma
->vm_start
) >> PAGE_SHIFT
;
463 * Actual address we start mapping at is rounded down to previous slot
464 * boundary in the y direction:
466 base_pgoff
= round_down(pgoff
, m
<< n_shift
);
468 /* figure out buffer width in slots */
469 slots
= omap_obj
->width
>> priv
->usergart
[fmt
].slot_shift
;
471 vaddr
= vmf
->virtual_address
- ((pgoff
- base_pgoff
) << PAGE_SHIFT
);
473 entry
= &priv
->usergart
[fmt
].entry
[priv
->usergart
[fmt
].last
];
475 /* evict previous buffer using this usergart entry, if any: */
477 evict_entry(entry
->obj
, fmt
, entry
);
480 entry
->obj_pgoff
= base_pgoff
;
482 /* now convert base_pgoff to phys offset from virt offset: */
483 base_pgoff
= (base_pgoff
>> n_shift
) * slots
;
485 /* for wider-than 4k.. figure out which part of the slot-row we want: */
488 entry
->obj_pgoff
+= off
;
490 slots
= min(slots
- (off
<< n_shift
), n
);
491 base_pgoff
+= off
<< n_shift
;
492 vaddr
+= off
<< PAGE_SHIFT
;
496 * Map in pages. Beyond the valid pixel part of the buffer, we set
497 * pages[i] to NULL to get a dummy page mapped in.. if someone
498 * reads/writes it they will get random/undefined content, but at
499 * least it won't be corrupting whatever other random page used to
500 * be mapped in, or other undefined behavior.
502 memcpy(pages
, &omap_obj
->pages
[base_pgoff
],
503 sizeof(struct page
*) * slots
);
504 memset(pages
+ slots
, 0,
505 sizeof(struct page
*) * (n
- slots
));
507 ret
= tiler_pin(entry
->block
, pages
, ARRAY_SIZE(pages
), 0, true);
509 dev_err(obj
->dev
->dev
, "failed to pin: %d\n", ret
);
513 pfn
= entry
->paddr
>> PAGE_SHIFT
;
515 VERB("Inserting %p pfn %lx, pa %lx", vmf
->virtual_address
,
516 pfn
, pfn
<< PAGE_SHIFT
);
518 for (i
= n
; i
> 0; i
--) {
519 vm_insert_mixed(vma
, (unsigned long)vaddr
,
520 __pfn_to_pfn_t(pfn
, PFN_DEV
));
521 pfn
+= priv
->usergart
[fmt
].stride_pfn
;
522 vaddr
+= PAGE_SIZE
* m
;
525 /* simple round-robin: */
526 priv
->usergart
[fmt
].last
= (priv
->usergart
[fmt
].last
+ 1)
527 % NUM_USERGART_ENTRIES
;
533 * omap_gem_fault - pagefault handler for GEM objects
534 * @vma: the VMA of the GEM object
537 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
538 * does most of the work for us including the actual map/unmap calls
539 * but we need to do the actual page work.
541 * The VMA was set up by GEM. In doing so it also ensured that the
542 * vma->vm_private_data points to the GEM object that is backing this
545 int omap_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
547 struct drm_gem_object
*obj
= vma
->vm_private_data
;
548 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
549 struct drm_device
*dev
= obj
->dev
;
553 /* Make sure we don't parallel update on a fault, nor move or remove
554 * something from beneath our feet
556 mutex_lock(&dev
->struct_mutex
);
558 /* if a shmem backed object, make sure we have pages attached now */
559 ret
= get_pages(obj
, &pages
);
563 /* where should we do corresponding put_pages().. we are mapping
564 * the original page, rather than thru a GART, so we can't rely
565 * on eviction to trigger this. But munmap() or all mappings should
566 * probably trigger put_pages()?
569 if (omap_obj
->flags
& OMAP_BO_TILED
)
570 ret
= fault_2d(obj
, vma
, vmf
);
572 ret
= fault_1d(obj
, vma
, vmf
);
576 mutex_unlock(&dev
->struct_mutex
);
583 * EBUSY is ok: this just means that another thread
584 * already did the job.
586 return VM_FAULT_NOPAGE
;
590 return VM_FAULT_SIGBUS
;
594 /** We override mainly to fix up some of the vm mapping flags.. */
595 int omap_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
599 ret
= drm_gem_mmap(filp
, vma
);
601 DBG("mmap failed: %d", ret
);
605 return omap_gem_mmap_obj(vma
->vm_private_data
, vma
);
608 int omap_gem_mmap_obj(struct drm_gem_object
*obj
,
609 struct vm_area_struct
*vma
)
611 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
613 vma
->vm_flags
&= ~VM_PFNMAP
;
614 vma
->vm_flags
|= VM_MIXEDMAP
;
616 if (omap_obj
->flags
& OMAP_BO_WC
) {
617 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
618 } else if (omap_obj
->flags
& OMAP_BO_UNCACHED
) {
619 vma
->vm_page_prot
= pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
622 * We do have some private objects, at least for scanout buffers
623 * on hardware without DMM/TILER. But these are allocated write-
626 if (WARN_ON(!obj
->filp
))
630 * Shunt off cached objs to shmem file so they have their own
631 * address_space (so unmap_mapping_range does what we want,
632 * in particular in the case of mmap'd dmabufs)
636 vma
->vm_file
= get_file(obj
->filp
);
638 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
644 /* -----------------------------------------------------------------------------
649 * omap_gem_dumb_create - create a dumb buffer
650 * @drm_file: our client file
652 * @args: the requested arguments copied from userspace
654 * Allocate a buffer suitable for use for a frame buffer of the
655 * form described by user space. Give userspace a handle by which
658 int omap_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
659 struct drm_mode_create_dumb
*args
)
661 union omap_gem_size gsize
;
663 args
->pitch
= align_pitch(0, args
->width
, args
->bpp
);
664 args
->size
= PAGE_ALIGN(args
->pitch
* args
->height
);
666 gsize
= (union omap_gem_size
){
670 return omap_gem_new_handle(dev
, file
, gsize
,
671 OMAP_BO_SCANOUT
| OMAP_BO_WC
, &args
->handle
);
675 * omap_gem_dumb_map - buffer mapping for dumb interface
676 * @file: our drm client file
678 * @handle: GEM handle to the object (from dumb_create)
680 * Do the necessary setup to allow the mapping of the frame buffer
681 * into user memory. We don't have to do much here at the moment.
683 int omap_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
684 uint32_t handle
, uint64_t *offset
)
686 struct drm_gem_object
*obj
;
689 /* GEM does all our handle to object mapping */
690 obj
= drm_gem_object_lookup(file
, handle
);
696 *offset
= omap_gem_mmap_offset(obj
);
698 drm_gem_object_unreference_unlocked(obj
);
704 #ifdef CONFIG_DRM_FBDEV_EMULATION
705 /* Set scrolling position. This allows us to implement fast scrolling
708 * Call only from non-atomic contexts.
710 int omap_gem_roll(struct drm_gem_object
*obj
, uint32_t roll
)
712 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
713 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
717 dev_err(obj
->dev
->dev
, "invalid roll: %d\n", roll
);
721 omap_obj
->roll
= roll
;
723 mutex_lock(&obj
->dev
->struct_mutex
);
725 /* if we aren't mapped yet, we don't need to do anything */
726 if (omap_obj
->block
) {
728 ret
= get_pages(obj
, &pages
);
731 ret
= tiler_pin(omap_obj
->block
, pages
, npages
, roll
, true);
733 dev_err(obj
->dev
->dev
, "could not repin: %d\n", ret
);
737 mutex_unlock(&obj
->dev
->struct_mutex
);
743 /* -----------------------------------------------------------------------------
744 * Memory Management & DMA Sync
748 * shmem buffers that are mapped cached can simulate coherency via using
749 * page faulting to keep track of dirty pages
751 static inline bool is_cached_coherent(struct drm_gem_object
*obj
)
753 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
755 return (omap_obj
->flags
& OMAP_BO_MEM_SHMEM
) &&
756 ((omap_obj
->flags
& OMAP_BO_CACHE_MASK
) == OMAP_BO_CACHED
);
759 /* Sync the buffer for CPU access.. note pages should already be
760 * attached, ie. omap_gem_get_pages()
762 void omap_gem_cpu_sync(struct drm_gem_object
*obj
, int pgoff
)
764 struct drm_device
*dev
= obj
->dev
;
765 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
767 if (is_cached_coherent(obj
) && omap_obj
->addrs
[pgoff
]) {
768 dma_unmap_page(dev
->dev
, omap_obj
->addrs
[pgoff
],
769 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
770 omap_obj
->addrs
[pgoff
] = 0;
774 /* sync the buffer for DMA access */
775 void omap_gem_dma_sync(struct drm_gem_object
*obj
,
776 enum dma_data_direction dir
)
778 struct drm_device
*dev
= obj
->dev
;
779 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
781 if (is_cached_coherent(obj
)) {
782 int i
, npages
= obj
->size
>> PAGE_SHIFT
;
783 struct page
**pages
= omap_obj
->pages
;
786 for (i
= 0; i
< npages
; i
++) {
787 if (!omap_obj
->addrs
[i
]) {
790 addr
= dma_map_page(dev
->dev
, pages
[i
], 0,
791 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
793 if (dma_mapping_error(dev
->dev
, addr
)) {
795 "%s: failed to map page\n",
801 omap_obj
->addrs
[i
] = addr
;
806 unmap_mapping_range(obj
->filp
->f_mapping
, 0,
807 omap_gem_mmap_size(obj
), 1);
812 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
813 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
816 int omap_gem_get_paddr(struct drm_gem_object
*obj
,
817 dma_addr_t
*paddr
, bool remap
)
819 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
820 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
823 mutex_lock(&obj
->dev
->struct_mutex
);
825 if (!is_contiguous(omap_obj
) && remap
&& priv
->has_dmm
) {
826 if (omap_obj
->paddr_cnt
== 0) {
828 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
829 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
830 struct tiler_block
*block
;
832 BUG_ON(omap_obj
->block
);
834 ret
= get_pages(obj
, &pages
);
838 if (omap_obj
->flags
& OMAP_BO_TILED
) {
839 block
= tiler_reserve_2d(fmt
,
841 omap_obj
->height
, 0);
843 block
= tiler_reserve_1d(obj
->size
);
847 ret
= PTR_ERR(block
);
848 dev_err(obj
->dev
->dev
,
849 "could not remap: %d (%d)\n", ret
, fmt
);
853 /* TODO: enable async refill.. */
854 ret
= tiler_pin(block
, pages
, npages
,
855 omap_obj
->roll
, true);
857 tiler_release(block
);
858 dev_err(obj
->dev
->dev
,
859 "could not pin: %d\n", ret
);
863 omap_obj
->paddr
= tiler_ssptr(block
);
864 omap_obj
->block
= block
;
866 DBG("got paddr: %pad", &omap_obj
->paddr
);
869 omap_obj
->paddr_cnt
++;
871 *paddr
= omap_obj
->paddr
;
872 } else if (is_contiguous(omap_obj
)) {
873 *paddr
= omap_obj
->paddr
;
880 mutex_unlock(&obj
->dev
->struct_mutex
);
885 /* Release physical address, when DMA is no longer being performed.. this
886 * could potentially unpin and unmap buffers from TILER
888 void omap_gem_put_paddr(struct drm_gem_object
*obj
)
890 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
893 mutex_lock(&obj
->dev
->struct_mutex
);
894 if (omap_obj
->paddr_cnt
> 0) {
895 omap_obj
->paddr_cnt
--;
896 if (omap_obj
->paddr_cnt
== 0) {
897 ret
= tiler_unpin(omap_obj
->block
);
899 dev_err(obj
->dev
->dev
,
900 "could not unpin pages: %d\n", ret
);
902 ret
= tiler_release(omap_obj
->block
);
904 dev_err(obj
->dev
->dev
,
905 "could not release unmap: %d\n", ret
);
908 omap_obj
->block
= NULL
;
912 mutex_unlock(&obj
->dev
->struct_mutex
);
915 /* Get rotated scanout address (only valid if already pinned), at the
916 * specified orientation and x,y offset from top-left corner of buffer
917 * (only valid for tiled 2d buffers)
919 int omap_gem_rotated_paddr(struct drm_gem_object
*obj
, uint32_t orient
,
920 int x
, int y
, dma_addr_t
*paddr
)
922 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
925 mutex_lock(&obj
->dev
->struct_mutex
);
926 if ((omap_obj
->paddr_cnt
> 0) && omap_obj
->block
&&
927 (omap_obj
->flags
& OMAP_BO_TILED
)) {
928 *paddr
= tiler_tsptr(omap_obj
->block
, orient
, x
, y
);
931 mutex_unlock(&obj
->dev
->struct_mutex
);
935 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
936 int omap_gem_tiled_stride(struct drm_gem_object
*obj
, uint32_t orient
)
938 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
940 if (omap_obj
->flags
& OMAP_BO_TILED
)
941 ret
= tiler_stride(gem2fmt(omap_obj
->flags
), orient
);
945 /* if !remap, and we don't have pages backing, then fail, rather than
946 * increasing the pin count (which we don't really do yet anyways,
947 * because we don't support swapping pages back out). And 'remap'
948 * might not be quite the right name, but I wanted to keep it working
949 * similarly to omap_gem_get_paddr(). Note though that mutex is not
950 * aquired if !remap (because this can be called in atomic ctxt),
951 * but probably omap_gem_get_paddr() should be changed to work in the
952 * same way. If !remap, a matching omap_gem_put_pages() call is not
953 * required (and should not be made).
955 int omap_gem_get_pages(struct drm_gem_object
*obj
, struct page
***pages
,
960 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
961 if (!omap_obj
->pages
)
963 *pages
= omap_obj
->pages
;
966 mutex_lock(&obj
->dev
->struct_mutex
);
967 ret
= get_pages(obj
, pages
);
968 mutex_unlock(&obj
->dev
->struct_mutex
);
972 /* release pages when DMA no longer being performed */
973 int omap_gem_put_pages(struct drm_gem_object
*obj
)
975 /* do something here if we dynamically attach/detach pages.. at
976 * least they would no longer need to be pinned if everyone has
977 * released the pages..
982 #ifdef CONFIG_DRM_FBDEV_EMULATION
983 /* Get kernel virtual address for CPU access.. this more or less only
984 * exists for omap_fbdev. This should be called with struct_mutex
987 void *omap_gem_vaddr(struct drm_gem_object
*obj
)
989 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
990 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
991 if (!omap_obj
->vaddr
) {
993 int ret
= get_pages(obj
, &pages
);
996 omap_obj
->vaddr
= vmap(pages
, obj
->size
>> PAGE_SHIFT
,
997 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
999 return omap_obj
->vaddr
;
1003 /* -----------------------------------------------------------------------------
1008 /* re-pin objects in DMM in resume path: */
1009 int omap_gem_resume(struct device
*dev
)
1011 struct drm_device
*drm_dev
= dev_get_drvdata(dev
);
1012 struct omap_drm_private
*priv
= drm_dev
->dev_private
;
1013 struct omap_gem_object
*omap_obj
;
1016 list_for_each_entry(omap_obj
, &priv
->obj_list
, mm_list
) {
1017 if (omap_obj
->block
) {
1018 struct drm_gem_object
*obj
= &omap_obj
->base
;
1019 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
1020 WARN_ON(!omap_obj
->pages
); /* this can't happen */
1021 ret
= tiler_pin(omap_obj
->block
,
1022 omap_obj
->pages
, npages
,
1023 omap_obj
->roll
, true);
1025 dev_err(dev
, "could not repin: %d\n", ret
);
1035 /* -----------------------------------------------------------------------------
1039 #ifdef CONFIG_DEBUG_FS
1040 void omap_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
)
1042 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1045 off
= drm_vma_node_start(&obj
->vma_node
);
1047 seq_printf(m
, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1048 omap_obj
->flags
, obj
->name
, obj
->refcount
.refcount
.counter
,
1049 off
, &omap_obj
->paddr
, omap_obj
->paddr_cnt
,
1050 omap_obj
->vaddr
, omap_obj
->roll
);
1052 if (omap_obj
->flags
& OMAP_BO_TILED
) {
1053 seq_printf(m
, " %dx%d", omap_obj
->width
, omap_obj
->height
);
1054 if (omap_obj
->block
) {
1055 struct tcm_area
*area
= &omap_obj
->block
->area
;
1056 seq_printf(m
, " (%dx%d, %dx%d)",
1057 area
->p0
.x
, area
->p0
.y
,
1058 area
->p1
.x
, area
->p1
.y
);
1061 seq_printf(m
, " %d", obj
->size
);
1064 seq_printf(m
, "\n");
1067 void omap_gem_describe_objects(struct list_head
*list
, struct seq_file
*m
)
1069 struct omap_gem_object
*omap_obj
;
1073 list_for_each_entry(omap_obj
, list
, mm_list
) {
1074 struct drm_gem_object
*obj
= &omap_obj
->base
;
1076 omap_gem_describe(obj
, m
);
1081 seq_printf(m
, "Total %d objects, %zu bytes\n", count
, size
);
1085 /* -----------------------------------------------------------------------------
1086 * Buffer Synchronization
1089 static DEFINE_SPINLOCK(sync_lock
);
1091 struct omap_gem_sync_waiter
{
1092 struct list_head list
;
1093 struct omap_gem_object
*omap_obj
;
1094 enum omap_gem_op op
;
1095 uint32_t read_target
, write_target
;
1096 /* notify called w/ sync_lock held */
1097 void (*notify
)(void *arg
);
1101 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1102 * the read and/or write target count is achieved which can call a user
1103 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1106 static LIST_HEAD(waiters
);
1108 static inline bool is_waiting(struct omap_gem_sync_waiter
*waiter
)
1110 struct omap_gem_object
*omap_obj
= waiter
->omap_obj
;
1111 if ((waiter
->op
& OMAP_GEM_READ
) &&
1112 (omap_obj
->sync
->write_complete
< waiter
->write_target
))
1114 if ((waiter
->op
& OMAP_GEM_WRITE
) &&
1115 (omap_obj
->sync
->read_complete
< waiter
->read_target
))
1120 /* macro for sync debug.. */
1122 #define SYNC(fmt, ...) do { if (SYNCDBG) \
1123 printk(KERN_ERR "%s:%d: "fmt"\n", \
1124 __func__, __LINE__, ##__VA_ARGS__); \
1128 static void sync_op_update(void)
1130 struct omap_gem_sync_waiter
*waiter
, *n
;
1131 list_for_each_entry_safe(waiter
, n
, &waiters
, list
) {
1132 if (!is_waiting(waiter
)) {
1133 list_del(&waiter
->list
);
1134 SYNC("notify: %p", waiter
);
1135 waiter
->notify(waiter
->arg
);
1141 static inline int sync_op(struct drm_gem_object
*obj
,
1142 enum omap_gem_op op
, bool start
)
1144 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1147 spin_lock(&sync_lock
);
1149 if (!omap_obj
->sync
) {
1150 omap_obj
->sync
= kzalloc(sizeof(*omap_obj
->sync
), GFP_ATOMIC
);
1151 if (!omap_obj
->sync
) {
1158 if (op
& OMAP_GEM_READ
)
1159 omap_obj
->sync
->read_pending
++;
1160 if (op
& OMAP_GEM_WRITE
)
1161 omap_obj
->sync
->write_pending
++;
1163 if (op
& OMAP_GEM_READ
)
1164 omap_obj
->sync
->read_complete
++;
1165 if (op
& OMAP_GEM_WRITE
)
1166 omap_obj
->sync
->write_complete
++;
1171 spin_unlock(&sync_lock
);
1176 /* mark the start of read and/or write operation */
1177 int omap_gem_op_start(struct drm_gem_object
*obj
, enum omap_gem_op op
)
1179 return sync_op(obj
, op
, true);
1182 int omap_gem_op_finish(struct drm_gem_object
*obj
, enum omap_gem_op op
)
1184 return sync_op(obj
, op
, false);
1187 static DECLARE_WAIT_QUEUE_HEAD(sync_event
);
1189 static void sync_notify(void *arg
)
1191 struct task_struct
**waiter_task
= arg
;
1192 *waiter_task
= NULL
;
1193 wake_up_all(&sync_event
);
1196 int omap_gem_op_sync(struct drm_gem_object
*obj
, enum omap_gem_op op
)
1198 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1200 if (omap_obj
->sync
) {
1201 struct task_struct
*waiter_task
= current
;
1202 struct omap_gem_sync_waiter
*waiter
=
1203 kzalloc(sizeof(*waiter
), GFP_KERNEL
);
1208 waiter
->omap_obj
= omap_obj
;
1210 waiter
->read_target
= omap_obj
->sync
->read_pending
;
1211 waiter
->write_target
= omap_obj
->sync
->write_pending
;
1212 waiter
->notify
= sync_notify
;
1213 waiter
->arg
= &waiter_task
;
1215 spin_lock(&sync_lock
);
1216 if (is_waiting(waiter
)) {
1217 SYNC("waited: %p", waiter
);
1218 list_add_tail(&waiter
->list
, &waiters
);
1219 spin_unlock(&sync_lock
);
1220 ret
= wait_event_interruptible(sync_event
,
1221 (waiter_task
== NULL
));
1222 spin_lock(&sync_lock
);
1224 SYNC("interrupted: %p", waiter
);
1225 /* we were interrupted */
1226 list_del(&waiter
->list
);
1229 /* freed in sync_op_update() */
1233 spin_unlock(&sync_lock
);
1239 /* call fxn(arg), either synchronously or asynchronously if the op
1240 * is currently blocked.. fxn() can be called from any context
1242 * (TODO for now fxn is called back from whichever context calls
1243 * omap_gem_op_finish().. but this could be better defined later
1246 * TODO more code in common w/ _sync()..
1248 int omap_gem_op_async(struct drm_gem_object
*obj
, enum omap_gem_op op
,
1249 void (*fxn
)(void *arg
), void *arg
)
1251 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1252 if (omap_obj
->sync
) {
1253 struct omap_gem_sync_waiter
*waiter
=
1254 kzalloc(sizeof(*waiter
), GFP_ATOMIC
);
1259 waiter
->omap_obj
= omap_obj
;
1261 waiter
->read_target
= omap_obj
->sync
->read_pending
;
1262 waiter
->write_target
= omap_obj
->sync
->write_pending
;
1263 waiter
->notify
= fxn
;
1266 spin_lock(&sync_lock
);
1267 if (is_waiting(waiter
)) {
1268 SYNC("waited: %p", waiter
);
1269 list_add_tail(&waiter
->list
, &waiters
);
1270 spin_unlock(&sync_lock
);
1274 spin_unlock(&sync_lock
);
1285 /* -----------------------------------------------------------------------------
1286 * Constructor & Destructor
1289 void omap_gem_free_object(struct drm_gem_object
*obj
)
1291 struct drm_device
*dev
= obj
->dev
;
1292 struct omap_drm_private
*priv
= dev
->dev_private
;
1293 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1297 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
1299 spin_lock(&priv
->list_lock
);
1300 list_del(&omap_obj
->mm_list
);
1301 spin_unlock(&priv
->list_lock
);
1303 /* this means the object is still pinned.. which really should
1304 * not happen. I think..
1306 WARN_ON(omap_obj
->paddr_cnt
> 0);
1308 if (omap_obj
->pages
) {
1309 if (omap_obj
->flags
& OMAP_BO_MEM_DMABUF
)
1310 kfree(omap_obj
->pages
);
1312 omap_gem_detach_pages(obj
);
1315 if (omap_obj
->flags
& OMAP_BO_MEM_DMA_API
) {
1316 dma_free_wc(dev
->dev
, obj
->size
, omap_obj
->vaddr
,
1318 } else if (omap_obj
->vaddr
) {
1319 vunmap(omap_obj
->vaddr
);
1320 } else if (obj
->import_attach
) {
1321 drm_prime_gem_destroy(obj
, omap_obj
->sgt
);
1324 kfree(omap_obj
->sync
);
1326 drm_gem_object_release(obj
);
1331 /* GEM buffer object constructor */
1332 struct drm_gem_object
*omap_gem_new(struct drm_device
*dev
,
1333 union omap_gem_size gsize
, uint32_t flags
)
1335 struct omap_drm_private
*priv
= dev
->dev_private
;
1336 struct omap_gem_object
*omap_obj
;
1337 struct drm_gem_object
*obj
;
1338 struct address_space
*mapping
;
1342 /* Validate the flags and compute the memory and cache flags. */
1343 if (flags
& OMAP_BO_TILED
) {
1344 if (!priv
->usergart
) {
1345 dev_err(dev
->dev
, "Tiled buffers require DMM\n");
1350 * Tiled buffers are always shmem paged backed. When they are
1351 * scanned out, they are remapped into DMM/TILER.
1353 flags
&= ~OMAP_BO_SCANOUT
;
1354 flags
|= OMAP_BO_MEM_SHMEM
;
1357 * Currently don't allow cached buffers. There is some caching
1358 * stuff that needs to be handled better.
1360 flags
&= ~(OMAP_BO_CACHED
|OMAP_BO_WC
|OMAP_BO_UNCACHED
);
1361 flags
|= tiler_get_cpu_cache_flags();
1362 } else if ((flags
& OMAP_BO_SCANOUT
) && !priv
->has_dmm
) {
1364 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1365 * tiled. However, to lower the pressure on memory allocation,
1366 * use contiguous memory only if no TILER is available.
1368 flags
|= OMAP_BO_MEM_DMA_API
;
1369 } else if (!(flags
& OMAP_BO_MEM_DMABUF
)) {
1371 * All other buffers not backed by dma_buf are shmem-backed.
1373 flags
|= OMAP_BO_MEM_SHMEM
;
1376 /* Allocate the initialize the OMAP GEM object. */
1377 omap_obj
= kzalloc(sizeof(*omap_obj
), GFP_KERNEL
);
1381 obj
= &omap_obj
->base
;
1382 omap_obj
->flags
= flags
;
1384 if (flags
& OMAP_BO_TILED
) {
1386 * For tiled buffers align dimensions to slot boundaries and
1387 * calculate size based on aligned dimensions.
1389 tiler_align(gem2fmt(flags
), &gsize
.tiled
.width
,
1390 &gsize
.tiled
.height
);
1392 size
= tiler_size(gem2fmt(flags
), gsize
.tiled
.width
,
1393 gsize
.tiled
.height
);
1395 omap_obj
->width
= gsize
.tiled
.width
;
1396 omap_obj
->height
= gsize
.tiled
.height
;
1398 size
= PAGE_ALIGN(gsize
.bytes
);
1401 /* Initialize the GEM object. */
1402 if (!(flags
& OMAP_BO_MEM_SHMEM
)) {
1403 drm_gem_private_object_init(dev
, obj
, size
);
1405 ret
= drm_gem_object_init(dev
, obj
, size
);
1409 mapping
= file_inode(obj
->filp
)->i_mapping
;
1410 mapping_set_gfp_mask(mapping
, GFP_USER
| __GFP_DMA32
);
1413 /* Allocate memory if needed. */
1414 if (flags
& OMAP_BO_MEM_DMA_API
) {
1415 omap_obj
->vaddr
= dma_alloc_wc(dev
->dev
, size
,
1418 if (!omap_obj
->vaddr
)
1422 spin_lock(&priv
->list_lock
);
1423 list_add(&omap_obj
->mm_list
, &priv
->obj_list
);
1424 spin_unlock(&priv
->list_lock
);
1429 drm_gem_object_release(obj
);
1435 struct drm_gem_object
*omap_gem_new_dmabuf(struct drm_device
*dev
, size_t size
,
1436 struct sg_table
*sgt
)
1438 struct omap_drm_private
*priv
= dev
->dev_private
;
1439 struct omap_gem_object
*omap_obj
;
1440 struct drm_gem_object
*obj
;
1441 union omap_gem_size gsize
;
1443 /* Without a DMM only physically contiguous buffers can be supported. */
1444 if (sgt
->orig_nents
!= 1 && !priv
->has_dmm
)
1445 return ERR_PTR(-EINVAL
);
1447 mutex_lock(&dev
->struct_mutex
);
1449 gsize
.bytes
= PAGE_ALIGN(size
);
1450 obj
= omap_gem_new(dev
, gsize
, OMAP_BO_MEM_DMABUF
| OMAP_BO_WC
);
1452 obj
= ERR_PTR(-ENOMEM
);
1456 omap_obj
= to_omap_bo(obj
);
1457 omap_obj
->sgt
= sgt
;
1459 if (sgt
->orig_nents
== 1) {
1460 omap_obj
->paddr
= sg_dma_address(sgt
->sgl
);
1462 /* Create pages list from sgt */
1463 struct sg_page_iter iter
;
1464 struct page
**pages
;
1465 unsigned int npages
;
1468 npages
= DIV_ROUND_UP(size
, PAGE_SIZE
);
1469 pages
= kcalloc(npages
, sizeof(*pages
), GFP_KERNEL
);
1471 omap_gem_free_object(obj
);
1472 obj
= ERR_PTR(-ENOMEM
);
1476 omap_obj
->pages
= pages
;
1478 for_each_sg_page(sgt
->sgl
, &iter
, sgt
->orig_nents
, 0) {
1479 pages
[i
++] = sg_page_iter_page(&iter
);
1484 if (WARN_ON(i
!= npages
)) {
1485 omap_gem_free_object(obj
);
1486 obj
= ERR_PTR(-ENOMEM
);
1492 mutex_unlock(&dev
->struct_mutex
);
1496 /* convenience method to construct a GEM buffer object, and userspace handle */
1497 int omap_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
1498 union omap_gem_size gsize
, uint32_t flags
, uint32_t *handle
)
1500 struct drm_gem_object
*obj
;
1503 obj
= omap_gem_new(dev
, gsize
, flags
);
1507 ret
= drm_gem_handle_create(file
, obj
, handle
);
1509 omap_gem_free_object(obj
);
1513 /* drop reference from allocate - handle holds it now */
1514 drm_gem_object_unreference_unlocked(obj
);
1519 /* -----------------------------------------------------------------------------
1523 /* If DMM is used, we need to set some stuff up.. */
1524 void omap_gem_init(struct drm_device
*dev
)
1526 struct omap_drm_private
*priv
= dev
->dev_private
;
1527 struct omap_drm_usergart
*usergart
;
1528 const enum tiler_fmt fmts
[] = {
1529 TILFMT_8BIT
, TILFMT_16BIT
, TILFMT_32BIT
1533 if (!dmm_is_available()) {
1534 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1535 dev_warn(dev
->dev
, "DMM not available, disable DMM support\n");
1539 usergart
= kcalloc(3, sizeof(*usergart
), GFP_KERNEL
);
1543 /* reserve 4k aligned/wide regions for userspace mappings: */
1544 for (i
= 0; i
< ARRAY_SIZE(fmts
); i
++) {
1545 uint16_t h
= 1, w
= PAGE_SIZE
>> i
;
1546 tiler_align(fmts
[i
], &w
, &h
);
1547 /* note: since each region is 1 4kb page wide, and minimum
1548 * number of rows, the height ends up being the same as the
1549 * # of pages in the region
1551 usergart
[i
].height
= h
;
1552 usergart
[i
].height_shift
= ilog2(h
);
1553 usergart
[i
].stride_pfn
= tiler_stride(fmts
[i
], 0) >> PAGE_SHIFT
;
1554 usergart
[i
].slot_shift
= ilog2((PAGE_SIZE
/ h
) >> i
);
1555 for (j
= 0; j
< NUM_USERGART_ENTRIES
; j
++) {
1556 struct omap_drm_usergart_entry
*entry
;
1557 struct tiler_block
*block
;
1559 entry
= &usergart
[i
].entry
[j
];
1560 block
= tiler_reserve_2d(fmts
[i
], w
, h
, PAGE_SIZE
);
1561 if (IS_ERR(block
)) {
1563 "reserve failed: %d, %d, %ld\n",
1564 i
, j
, PTR_ERR(block
));
1567 entry
->paddr
= tiler_ssptr(block
);
1568 entry
->block
= block
;
1570 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i
, j
, w
, h
,
1572 usergart
[i
].stride_pfn
<< PAGE_SHIFT
);
1576 priv
->usergart
= usergart
;
1577 priv
->has_dmm
= true;
1580 void omap_gem_deinit(struct drm_device
*dev
)
1582 struct omap_drm_private
*priv
= dev
->dev_private
;
1584 /* I believe we can rely on there being no more outstanding GEM
1585 * objects which could depend on usergart/dmm at this point.
1587 kfree(priv
->usergart
);