2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/uaccess.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
43 * This file provides some of the base ioctls and library routines for
44 * the graphics memory manager implemented by each device driver.
46 * Because various devices have different requirements in terms of
47 * synchronization and migration strategies, implementing that is left up to
48 * the driver, and all that the general API provides should be generic --
49 * allocating objects, reading/writing data with the cpu, freeing objects.
50 * Even there, platform-dependent optimizations for reading/writing data with
51 * the CPU mean we'll likely hook those out to driver-specific calls. However,
52 * the DRI2 implementation wants to have at least allocate/mmap be generic.
54 * The goal was to have swap-backed object allocation managed through
55 * struct file. However, file descriptors as handles to a struct file have
57 * - Process limits prevent more than 1024 or so being used at a time by
59 * - Inability to allocate high fds will aggravate the X Server's select()
60 * handling, and likely that of many GL client applications as well.
62 * This led to a plan of using our own integer IDs (called handles, following
63 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
64 * ioctls. The objects themselves will still include the struct file so
65 * that we can transition to fds if the required kernel infrastructure shows
66 * up at a later date, and as our interface with shmfs for memory allocation.
70 * We make up offsets for buffer objects so we can recognize them at
74 /* pgoff in mmap is an unsigned long, so we need to make sure that
75 * the faked up offset will fit
78 #if BITS_PER_LONG == 64
79 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
80 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
82 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
83 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
87 * Initialize the GEM device fields
91 drm_gem_init(struct drm_device
*dev
)
93 struct drm_gem_mm
*mm
;
95 spin_lock_init(&dev
->object_name_lock
);
96 idr_init(&dev
->object_name_idr
);
98 mm
= kzalloc(sizeof(struct drm_gem_mm
), GFP_KERNEL
);
100 DRM_ERROR("out of memory\n");
104 dev
->mm_private
= mm
;
106 if (drm_ht_create(&mm
->offset_hash
, 12)) {
111 drm_mm_init(&mm
->offset_manager
, DRM_FILE_PAGE_OFFSET_START
,
112 DRM_FILE_PAGE_OFFSET_SIZE
);
118 drm_gem_destroy(struct drm_device
*dev
)
120 struct drm_gem_mm
*mm
= dev
->mm_private
;
122 drm_mm_takedown(&mm
->offset_manager
);
123 drm_ht_remove(&mm
->offset_hash
);
125 dev
->mm_private
= NULL
;
129 * Initialize an already allocated GEM object of the specified size with
130 * shmfs backing store.
132 int drm_gem_object_init(struct drm_device
*dev
,
133 struct drm_gem_object
*obj
, size_t size
)
137 filp
= shmem_file_setup("drm mm object", size
, VM_NORESERVE
);
139 return PTR_ERR(filp
);
141 drm_gem_private_object_init(dev
, obj
, size
);
146 EXPORT_SYMBOL(drm_gem_object_init
);
149 * Initialize an already allocated GEM object of the specified size with
150 * no GEM provided backing store. Instead the caller is responsible for
151 * backing the object and handling it.
153 void drm_gem_private_object_init(struct drm_device
*dev
,
154 struct drm_gem_object
*obj
, size_t size
)
156 BUG_ON((size
& (PAGE_SIZE
- 1)) != 0);
161 kref_init(&obj
->refcount
);
162 atomic_set(&obj
->handle_count
, 0);
165 EXPORT_SYMBOL(drm_gem_private_object_init
);
168 * Allocate a GEM object of the specified size with shmfs backing store
170 struct drm_gem_object
*
171 drm_gem_object_alloc(struct drm_device
*dev
, size_t size
)
173 struct drm_gem_object
*obj
;
175 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
179 if (drm_gem_object_init(dev
, obj
, size
) != 0)
182 if (dev
->driver
->gem_init_object
!= NULL
&&
183 dev
->driver
->gem_init_object(obj
) != 0) {
188 /* Object_init mangles the global counters - readjust them. */
194 EXPORT_SYMBOL(drm_gem_object_alloc
);
197 drm_gem_remove_prime_handles(struct drm_gem_object
*obj
, struct drm_file
*filp
)
199 if (obj
->import_attach
) {
200 drm_prime_remove_buf_handle(&filp
->prime
,
201 obj
->import_attach
->dmabuf
);
203 if (obj
->export_dma_buf
) {
204 drm_prime_remove_buf_handle(&filp
->prime
,
205 obj
->export_dma_buf
);
210 * Removes the mapping from handle to filp for this object.
213 drm_gem_handle_delete(struct drm_file
*filp
, u32 handle
)
215 struct drm_device
*dev
;
216 struct drm_gem_object
*obj
;
218 /* This is gross. The idr system doesn't let us try a delete and
219 * return an error code. It just spews if you fail at deleting.
220 * So, we have to grab a lock around finding the object and then
221 * doing the delete on it and dropping the refcount, or the user
222 * could race us to double-decrement the refcount and cause a
223 * use-after-free later. Given the frequency of our handle lookups,
224 * we may want to use ida for number allocation and a hash table
225 * for the pointers, anyway.
227 spin_lock(&filp
->table_lock
);
229 /* Check if we currently have a reference on the object */
230 obj
= idr_find(&filp
->object_idr
, handle
);
232 spin_unlock(&filp
->table_lock
);
237 /* Release reference and decrement refcount. */
238 idr_remove(&filp
->object_idr
, handle
);
239 spin_unlock(&filp
->table_lock
);
241 drm_gem_remove_prime_handles(obj
, filp
);
243 if (dev
->driver
->gem_close_object
)
244 dev
->driver
->gem_close_object(obj
, filp
);
245 drm_gem_object_handle_unreference_unlocked(obj
);
249 EXPORT_SYMBOL(drm_gem_handle_delete
);
252 * Create a handle for this object. This adds a handle reference
253 * to the object, which includes a regular reference count. Callers
254 * will likely want to dereference the object afterwards.
257 drm_gem_handle_create(struct drm_file
*file_priv
,
258 struct drm_gem_object
*obj
,
261 struct drm_device
*dev
= obj
->dev
;
265 * Get the user-visible handle using idr. Preload and perform
266 * allocation under our spinlock.
268 idr_preload(GFP_KERNEL
);
269 spin_lock(&file_priv
->table_lock
);
271 ret
= idr_alloc(&file_priv
->object_idr
, obj
, 1, 0, GFP_NOWAIT
);
273 spin_unlock(&file_priv
->table_lock
);
279 drm_gem_object_handle_reference(obj
);
281 if (dev
->driver
->gem_open_object
) {
282 ret
= dev
->driver
->gem_open_object(obj
, file_priv
);
284 drm_gem_handle_delete(file_priv
, *handlep
);
291 EXPORT_SYMBOL(drm_gem_handle_create
);
295 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
296 * @obj: obj in question
298 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
301 drm_gem_free_mmap_offset(struct drm_gem_object
*obj
)
303 struct drm_device
*dev
= obj
->dev
;
304 struct drm_gem_mm
*mm
= dev
->mm_private
;
305 struct drm_map_list
*list
= &obj
->map_list
;
307 drm_ht_remove_item(&mm
->offset_hash
, &list
->hash
);
308 drm_mm_put_block(list
->file_offset_node
);
312 EXPORT_SYMBOL(drm_gem_free_mmap_offset
);
315 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
316 * @obj: obj in question
318 * GEM memory mapping works by handing back to userspace a fake mmap offset
319 * it can use in a subsequent mmap(2) call. The DRM core code then looks
320 * up the object based on the offset and sets up the various memory mapping
323 * This routine allocates and attaches a fake offset for @obj.
326 drm_gem_create_mmap_offset(struct drm_gem_object
*obj
)
328 struct drm_device
*dev
= obj
->dev
;
329 struct drm_gem_mm
*mm
= dev
->mm_private
;
330 struct drm_map_list
*list
;
331 struct drm_local_map
*map
;
334 /* Set the object up for mmap'ing */
335 list
= &obj
->map_list
;
336 list
->map
= kzalloc(sizeof(struct drm_map_list
), GFP_KERNEL
);
341 map
->type
= _DRM_GEM
;
342 map
->size
= obj
->size
;
345 /* Get a DRM GEM mmap offset allocated... */
346 list
->file_offset_node
= drm_mm_search_free(&mm
->offset_manager
,
347 obj
->size
/ PAGE_SIZE
, 0, false);
349 if (!list
->file_offset_node
) {
350 DRM_ERROR("failed to allocate offset for bo %d\n", obj
->name
);
355 list
->file_offset_node
= drm_mm_get_block(list
->file_offset_node
,
356 obj
->size
/ PAGE_SIZE
, 0);
357 if (!list
->file_offset_node
) {
362 list
->hash
.key
= list
->file_offset_node
->start
;
363 ret
= drm_ht_insert_item(&mm
->offset_hash
, &list
->hash
);
365 DRM_ERROR("failed to add to map hash\n");
372 drm_mm_put_block(list
->file_offset_node
);
379 EXPORT_SYMBOL(drm_gem_create_mmap_offset
);
381 /** Returns a reference to the object named by the handle. */
382 struct drm_gem_object
*
383 drm_gem_object_lookup(struct drm_device
*dev
, struct drm_file
*filp
,
386 struct drm_gem_object
*obj
;
388 spin_lock(&filp
->table_lock
);
390 /* Check if we currently have a reference on the object */
391 obj
= idr_find(&filp
->object_idr
, handle
);
393 spin_unlock(&filp
->table_lock
);
397 drm_gem_object_reference(obj
);
399 spin_unlock(&filp
->table_lock
);
403 EXPORT_SYMBOL(drm_gem_object_lookup
);
406 * Releases the handle to an mm object.
409 drm_gem_close_ioctl(struct drm_device
*dev
, void *data
,
410 struct drm_file
*file_priv
)
412 struct drm_gem_close
*args
= data
;
415 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
418 ret
= drm_gem_handle_delete(file_priv
, args
->handle
);
424 * Create a global name for an object, returning the name.
426 * Note that the name does not hold a reference; when the object
427 * is freed, the name goes away.
430 drm_gem_flink_ioctl(struct drm_device
*dev
, void *data
,
431 struct drm_file
*file_priv
)
433 struct drm_gem_flink
*args
= data
;
434 struct drm_gem_object
*obj
;
437 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
440 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
444 idr_preload(GFP_KERNEL
);
445 spin_lock(&dev
->object_name_lock
);
447 ret
= idr_alloc(&dev
->object_name_idr
, obj
, 1, 0, GFP_NOWAIT
);
453 /* Allocate a reference for the name table. */
454 drm_gem_object_reference(obj
);
457 args
->name
= (uint64_t) obj
->name
;
461 spin_unlock(&dev
->object_name_lock
);
463 drm_gem_object_unreference_unlocked(obj
);
468 * Open an object using the global name, returning a handle and the size.
470 * This handle (of course) holds a reference to the object, so the object
471 * will not go away until the handle is deleted.
474 drm_gem_open_ioctl(struct drm_device
*dev
, void *data
,
475 struct drm_file
*file_priv
)
477 struct drm_gem_open
*args
= data
;
478 struct drm_gem_object
*obj
;
482 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
485 spin_lock(&dev
->object_name_lock
);
486 obj
= idr_find(&dev
->object_name_idr
, (int) args
->name
);
488 drm_gem_object_reference(obj
);
489 spin_unlock(&dev
->object_name_lock
);
493 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
494 drm_gem_object_unreference_unlocked(obj
);
498 args
->handle
= handle
;
499 args
->size
= obj
->size
;
505 * Called at device open time, sets up the structure for handling refcounting
509 drm_gem_open(struct drm_device
*dev
, struct drm_file
*file_private
)
511 idr_init(&file_private
->object_idr
);
512 spin_lock_init(&file_private
->table_lock
);
516 * Called at device close to release the file's
517 * handle references on objects.
520 drm_gem_object_release_handle(int id
, void *ptr
, void *data
)
522 struct drm_file
*file_priv
= data
;
523 struct drm_gem_object
*obj
= ptr
;
524 struct drm_device
*dev
= obj
->dev
;
526 drm_gem_remove_prime_handles(obj
, file_priv
);
528 if (dev
->driver
->gem_close_object
)
529 dev
->driver
->gem_close_object(obj
, file_priv
);
531 drm_gem_object_handle_unreference_unlocked(obj
);
537 * Called at close time when the filp is going away.
539 * Releases any remaining references on objects by this filp.
542 drm_gem_release(struct drm_device
*dev
, struct drm_file
*file_private
)
544 idr_for_each(&file_private
->object_idr
,
545 &drm_gem_object_release_handle
, file_private
);
546 idr_destroy(&file_private
->object_idr
);
550 drm_gem_object_release(struct drm_gem_object
*obj
)
555 EXPORT_SYMBOL(drm_gem_object_release
);
558 * Called after the last reference to the object has been lost.
559 * Must be called holding struct_ mutex
564 drm_gem_object_free(struct kref
*kref
)
566 struct drm_gem_object
*obj
= (struct drm_gem_object
*) kref
;
567 struct drm_device
*dev
= obj
->dev
;
569 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
571 if (dev
->driver
->gem_free_object
!= NULL
)
572 dev
->driver
->gem_free_object(obj
);
574 EXPORT_SYMBOL(drm_gem_object_free
);
576 static void drm_gem_object_ref_bug(struct kref
*list_kref
)
582 * Called after the last handle to the object has been closed
584 * Removes any name for the object. Note that this must be
585 * called before drm_gem_object_free or we'll be touching
588 void drm_gem_object_handle_free(struct drm_gem_object
*obj
)
590 struct drm_device
*dev
= obj
->dev
;
592 /* Remove any name for this object */
593 spin_lock(&dev
->object_name_lock
);
595 idr_remove(&dev
->object_name_idr
, obj
->name
);
597 spin_unlock(&dev
->object_name_lock
);
599 * The object name held a reference to this object, drop
602 * This cannot be the last reference, since the handle holds one too.
604 kref_put(&obj
->refcount
, drm_gem_object_ref_bug
);
606 spin_unlock(&dev
->object_name_lock
);
609 EXPORT_SYMBOL(drm_gem_object_handle_free
);
611 void drm_gem_vm_open(struct vm_area_struct
*vma
)
613 struct drm_gem_object
*obj
= vma
->vm_private_data
;
615 drm_gem_object_reference(obj
);
617 mutex_lock(&obj
->dev
->struct_mutex
);
618 drm_vm_open_locked(obj
->dev
, vma
);
619 mutex_unlock(&obj
->dev
->struct_mutex
);
621 EXPORT_SYMBOL(drm_gem_vm_open
);
623 void drm_gem_vm_close(struct vm_area_struct
*vma
)
625 struct drm_gem_object
*obj
= vma
->vm_private_data
;
626 struct drm_device
*dev
= obj
->dev
;
628 mutex_lock(&dev
->struct_mutex
);
629 drm_vm_close_locked(obj
->dev
, vma
);
630 drm_gem_object_unreference(obj
);
631 mutex_unlock(&dev
->struct_mutex
);
633 EXPORT_SYMBOL(drm_gem_vm_close
);
636 * drm_gem_mmap_obj - memory map a GEM object
637 * @obj: the GEM object to map
638 * @obj_size: the object size to be mapped, in bytes
639 * @vma: VMA for the area to be mapped
641 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
642 * provided by the driver. Depending on their requirements, drivers can either
643 * provide a fault handler in their gem_vm_ops (in which case any accesses to
644 * the object will be trapped, to perform migration, GTT binding, surface
645 * register allocation, or performance monitoring), or mmap the buffer memory
646 * synchronously after calling drm_gem_mmap_obj.
648 * This function is mainly intended to implement the DMABUF mmap operation, when
649 * the GEM object is not looked up based on its fake offset. To implement the
650 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
652 * NOTE: This function has to be protected with dev->struct_mutex
654 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
655 * size, or if no gem_vm_ops are provided.
657 int drm_gem_mmap_obj(struct drm_gem_object
*obj
, unsigned long obj_size
,
658 struct vm_area_struct
*vma
)
660 struct drm_device
*dev
= obj
->dev
;
662 lockdep_assert_held(&dev
->struct_mutex
);
664 /* Check for valid size. */
665 if (obj_size
< vma
->vm_end
- vma
->vm_start
)
668 if (!dev
->driver
->gem_vm_ops
)
671 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
| VM_DONTDUMP
;
672 vma
->vm_ops
= dev
->driver
->gem_vm_ops
;
673 vma
->vm_private_data
= obj
;
674 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
676 /* Take a ref for this mapping of the object, so that the fault
677 * handler can dereference the mmap offset's pointer to the object.
678 * This reference is cleaned up by the corresponding vm_close
679 * (which should happen whether the vma was created by this call, or
680 * by a vm_open due to mremap or partial unmap or whatever).
682 drm_gem_object_reference(obj
);
684 drm_vm_open_locked(dev
, vma
);
687 EXPORT_SYMBOL(drm_gem_mmap_obj
);
690 * drm_gem_mmap - memory map routine for GEM objects
691 * @filp: DRM file pointer
692 * @vma: VMA for the area to be mapped
694 * If a driver supports GEM object mapping, mmap calls on the DRM file
695 * descriptor will end up here.
697 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
698 * contain the fake offset we created when the GTT map ioctl was called on
699 * the object) and map it with a call to drm_gem_mmap_obj().
701 int drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
703 struct drm_file
*priv
= filp
->private_data
;
704 struct drm_device
*dev
= priv
->minor
->dev
;
705 struct drm_gem_mm
*mm
= dev
->mm_private
;
706 struct drm_local_map
*map
= NULL
;
707 struct drm_hash_item
*hash
;
710 if (drm_device_is_unplugged(dev
))
713 mutex_lock(&dev
->struct_mutex
);
715 if (drm_ht_find_item(&mm
->offset_hash
, vma
->vm_pgoff
, &hash
)) {
716 mutex_unlock(&dev
->struct_mutex
);
717 return drm_mmap(filp
, vma
);
720 map
= drm_hash_entry(hash
, struct drm_map_list
, hash
)->map
;
722 ((map
->flags
& _DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
))) {
727 ret
= drm_gem_mmap_obj(map
->handle
, map
->size
, vma
);
730 mutex_unlock(&dev
->struct_mutex
);
734 EXPORT_SYMBOL(drm_gem_mmap
);