2 * Legacy: Generic DRM Buffer Management
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
31 #include <linux/vmalloc.h>
32 #include <linux/slab.h>
33 #include <linux/log2.h>
34 #include <linux/export.h>
35 #include <asm/shmparam.h>
37 #include "drm_legacy.h"
39 static struct drm_map_list
*drm_find_matching_map(struct drm_device
*dev
,
40 struct drm_local_map
*map
)
42 struct drm_map_list
*entry
;
43 list_for_each_entry(entry
, &dev
->maplist
, head
) {
45 * Because the kernel-userspace ABI is fixed at a 32-bit offset
46 * while PCI resources may live above that, we only compare the
47 * lower 32 bits of the map offset for maps of type
48 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
49 * It is assumed that if a driver have more than one resource
50 * of each type, the lower 32 bits are different.
53 map
->type
!= entry
->map
->type
||
54 entry
->master
!= dev
->primary
->master
)
58 if (map
->flags
!= _DRM_CONTAINS_LOCK
)
62 case _DRM_FRAME_BUFFER
:
63 if ((entry
->map
->offset
& 0xffffffff) ==
64 (map
->offset
& 0xffffffff))
66 default: /* Make gcc happy */
69 if (entry
->map
->offset
== map
->offset
)
76 static int drm_map_handle(struct drm_device
*dev
, struct drm_hash_item
*hash
,
77 unsigned long user_token
, int hashed_handle
, int shm
)
79 int use_hashed_handle
, shift
;
82 #if (BITS_PER_LONG == 64)
83 use_hashed_handle
= ((user_token
& 0xFFFFFFFF00000000UL
) || hashed_handle
);
84 #elif (BITS_PER_LONG == 32)
85 use_hashed_handle
= hashed_handle
;
87 #error Unsupported long size. Neither 64 nor 32 bits.
90 if (!use_hashed_handle
) {
92 hash
->key
= user_token
>> PAGE_SHIFT
;
93 ret
= drm_ht_insert_item(&dev
->map_hash
, hash
);
99 add
= DRM_MAP_HASH_OFFSET
>> PAGE_SHIFT
;
100 if (shm
&& (SHMLBA
> PAGE_SIZE
)) {
101 int bits
= ilog2(SHMLBA
>> PAGE_SHIFT
) + 1;
103 /* For shared memory, we have to preserve the SHMLBA
104 * bits of the eventual vma->vm_pgoff value during
105 * mmap(). Otherwise we run into cache aliasing problems
106 * on some platforms. On these platforms, the pgoff of
107 * a mmap() request is used to pick a suitable virtual
108 * address for the mmap() region such that it will not
109 * cause cache aliasing problems.
111 * Therefore, make sure the SHMLBA relevant bits of the
112 * hash value we use are equal to those in the original
113 * kernel virtual address.
116 add
|= ((user_token
>> PAGE_SHIFT
) & ((1UL << bits
) - 1UL));
119 return drm_ht_just_insert_please(&dev
->map_hash
, hash
,
120 user_token
, 32 - PAGE_SHIFT
- 3,
125 * Core function to create a range of memory available for mapping by a
128 * Adjusts the memory offset to its absolute value according to the mapping
129 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
130 * applicable and if supported by the kernel.
132 static int drm_addmap_core(struct drm_device
* dev
, resource_size_t offset
,
133 unsigned int size
, enum drm_map_type type
,
134 enum drm_map_flags flags
,
135 struct drm_map_list
** maplist
)
137 struct drm_local_map
*map
;
138 struct drm_map_list
*list
;
139 drm_dma_handle_t
*dmah
;
140 unsigned long user_token
;
143 map
= kmalloc(sizeof(*map
), GFP_KERNEL
);
147 map
->offset
= offset
;
152 /* Only allow shared memory to be removable since we only keep enough
153 * book keeping information about shared memory to allow for removal
154 * when processes fork.
156 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
160 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
161 (unsigned long long)map
->offset
, map
->size
, map
->type
);
163 /* page-align _DRM_SHM maps. They are allocated here so there is no security
164 * hole created by that and it works around various broken drivers that use
165 * a non-aligned quantity to map the SAREA. --BenH
167 if (map
->type
== _DRM_SHM
)
168 map
->size
= PAGE_ALIGN(map
->size
);
170 if ((map
->offset
& (~(resource_size_t
)PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
179 case _DRM_FRAME_BUFFER
:
180 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
181 if (map
->offset
+ (map
->size
-1) < map
->offset
||
182 map
->offset
< virt_to_phys(high_memory
)) {
187 /* Some drivers preinitialize some maps, without the X Server
188 * needing to be aware of it. Therefore, we just return success
189 * when the server tries to create a duplicate map.
191 list
= drm_find_matching_map(dev
, map
);
193 if (list
->map
->size
!= map
->size
) {
194 DRM_DEBUG("Matching maps of type %d with "
195 "mismatched sizes, (%ld vs %ld)\n",
196 map
->type
, map
->size
,
198 list
->map
->size
= map
->size
;
206 if (map
->type
== _DRM_FRAME_BUFFER
||
207 (map
->flags
& _DRM_WRITE_COMBINING
)) {
209 arch_phys_wc_add(map
->offset
, map
->size
);
211 if (map
->type
== _DRM_REGISTERS
) {
212 if (map
->flags
& _DRM_WRITE_COMBINING
)
213 map
->handle
= ioremap_wc(map
->offset
,
216 map
->handle
= ioremap(map
->offset
, map
->size
);
225 list
= drm_find_matching_map(dev
, map
);
227 if(list
->map
->size
!= map
->size
) {
228 DRM_DEBUG("Matching maps of type %d with "
229 "mismatched sizes, (%ld vs %ld)\n",
230 map
->type
, map
->size
, list
->map
->size
);
231 list
->map
->size
= map
->size
;
238 map
->handle
= vmalloc_user(map
->size
);
239 DRM_DEBUG("%lu %d %p\n",
240 map
->size
, order_base_2(map
->size
), map
->handle
);
245 map
->offset
= (unsigned long)map
->handle
;
246 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
247 /* Prevent a 2nd X Server from creating a 2nd lock */
248 if (dev
->primary
->master
->lock
.hw_lock
!= NULL
) {
253 dev
->sigdata
.lock
= dev
->primary
->master
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
257 struct drm_agp_mem
*entry
;
265 map
->offset
+= dev
->hose
->mem_space
->start
;
267 /* In some cases (i810 driver), user space may have already
268 * added the AGP base itself, because dev->agp->base previously
269 * only got set during AGP enable. So, only add the base
270 * address if the map's offset isn't already within the
273 if (map
->offset
< dev
->agp
->base
||
274 map
->offset
> dev
->agp
->base
+
275 dev
->agp
->agp_info
.aper_size
* 1024 * 1024 - 1) {
276 map
->offset
+= dev
->agp
->base
;
278 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
280 /* This assumes the DRM is in total control of AGP space.
281 * It's not always the case as AGP can be in the control
282 * of user space (i.e. i810 driver). So this loop will get
283 * skipped and we double check that dev->agp->memory is
284 * actually set as well as being invalid before EPERM'ing
286 list_for_each_entry(entry
, &dev
->agp
->memory
, head
) {
287 if ((map
->offset
>= entry
->bound
) &&
288 (map
->offset
+ map
->size
<= entry
->bound
+ entry
->pages
* PAGE_SIZE
)) {
293 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
297 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
298 (unsigned long long)map
->offset
, map
->size
);
302 case _DRM_SCATTER_GATHER
:
307 map
->offset
+= (unsigned long)dev
->sg
->virtual;
309 case _DRM_CONSISTENT
:
310 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
311 * As we're limiting the address to 2^32-1 (or less),
312 * casting it down to 32 bits is no problem, but we
313 * need to point to a 64bit variable first. */
314 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
);
319 map
->handle
= dmah
->vaddr
;
320 map
->offset
= (unsigned long)dmah
->busaddr
;
328 list
= kzalloc(sizeof(*list
), GFP_KERNEL
);
330 if (map
->type
== _DRM_REGISTERS
)
331 iounmap(map
->handle
);
337 mutex_lock(&dev
->struct_mutex
);
338 list_add(&list
->head
, &dev
->maplist
);
340 /* Assign a 32-bit handle */
341 /* We do it here so that dev->struct_mutex protects the increment */
342 user_token
= (map
->type
== _DRM_SHM
) ? (unsigned long)map
->handle
:
344 ret
= drm_map_handle(dev
, &list
->hash
, user_token
, 0,
345 (map
->type
== _DRM_SHM
));
347 if (map
->type
== _DRM_REGISTERS
)
348 iounmap(map
->handle
);
351 mutex_unlock(&dev
->struct_mutex
);
355 list
->user_token
= list
->hash
.key
<< PAGE_SHIFT
;
356 mutex_unlock(&dev
->struct_mutex
);
358 if (!(map
->flags
& _DRM_DRIVER
))
359 list
->master
= dev
->primary
->master
;
364 int drm_legacy_addmap(struct drm_device
* dev
, resource_size_t offset
,
365 unsigned int size
, enum drm_map_type type
,
366 enum drm_map_flags flags
, struct drm_local_map
**map_ptr
)
368 struct drm_map_list
*list
;
371 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
373 *map_ptr
= list
->map
;
376 EXPORT_SYMBOL(drm_legacy_addmap
);
379 * Ioctl to specify a range of memory that is available for mapping by a
382 * \param inode device inode.
383 * \param file_priv DRM file private.
384 * \param cmd command.
385 * \param arg pointer to a drm_map structure.
386 * \return zero on success or a negative value on error.
389 int drm_legacy_addmap_ioctl(struct drm_device
*dev
, void *data
,
390 struct drm_file
*file_priv
)
392 struct drm_map
*map
= data
;
393 struct drm_map_list
*maplist
;
396 if (!(capable(CAP_SYS_ADMIN
) || map
->type
== _DRM_AGP
|| map
->type
== _DRM_SHM
))
399 if (!drm_core_check_feature(dev
, DRIVER_KMS_LEGACY_CONTEXT
) &&
400 drm_core_check_feature(dev
, DRIVER_MODESET
))
403 err
= drm_addmap_core(dev
, map
->offset
, map
->size
, map
->type
,
404 map
->flags
, &maplist
);
409 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
410 map
->handle
= (void *)(unsigned long)maplist
->user_token
;
413 * It appears that there are no users of this value whatsoever --
414 * drmAddMap just discards it. Let's not encourage its use.
415 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
416 * it's not a real mtrr index anymore.)
424 * Get a mapping information.
426 * \param inode device inode.
427 * \param file_priv DRM file private.
428 * \param cmd command.
429 * \param arg user argument, pointing to a drm_map structure.
431 * \return zero on success or a negative number on failure.
433 * Searches for the mapping with the specified offset and copies its information
436 int drm_legacy_getmap_ioctl(struct drm_device
*dev
, void *data
,
437 struct drm_file
*file_priv
)
439 struct drm_map
*map
= data
;
440 struct drm_map_list
*r_list
= NULL
;
441 struct list_head
*list
;
445 if (!drm_core_check_feature(dev
, DRIVER_KMS_LEGACY_CONTEXT
) &&
446 drm_core_check_feature(dev
, DRIVER_MODESET
))
454 mutex_lock(&dev
->struct_mutex
);
455 list_for_each(list
, &dev
->maplist
) {
457 r_list
= list_entry(list
, struct drm_map_list
, head
);
462 if (!r_list
|| !r_list
->map
) {
463 mutex_unlock(&dev
->struct_mutex
);
467 map
->offset
= r_list
->map
->offset
;
468 map
->size
= r_list
->map
->size
;
469 map
->type
= r_list
->map
->type
;
470 map
->flags
= r_list
->map
->flags
;
471 map
->handle
= (void *)(unsigned long) r_list
->user_token
;
472 map
->mtrr
= arch_phys_wc_index(r_list
->map
->mtrr
);
474 mutex_unlock(&dev
->struct_mutex
);
480 * Remove a map private from list and deallocate resources if the mapping
483 * Searches the map on drm_device::maplist, removes it from the list, see if
484 * its being used, and free any associate resource (such as MTRR's) if it's not
487 * \sa drm_legacy_addmap
489 int drm_legacy_rmmap_locked(struct drm_device
*dev
, struct drm_local_map
*map
)
491 struct drm_map_list
*r_list
= NULL
, *list_t
;
492 drm_dma_handle_t dmah
;
494 struct drm_master
*master
;
496 /* Find the list entry for the map and remove it */
497 list_for_each_entry_safe(r_list
, list_t
, &dev
->maplist
, head
) {
498 if (r_list
->map
== map
) {
499 master
= r_list
->master
;
500 list_del(&r_list
->head
);
501 drm_ht_remove_key(&dev
->map_hash
,
502 r_list
->user_token
>> PAGE_SHIFT
);
514 iounmap(map
->handle
);
516 case _DRM_FRAME_BUFFER
:
517 arch_phys_wc_del(map
->mtrr
);
522 if (dev
->sigdata
.lock
== master
->lock
.hw_lock
)
523 dev
->sigdata
.lock
= NULL
;
524 master
->lock
.hw_lock
= NULL
; /* SHM removed */
525 master
->lock
.file_priv
= NULL
;
526 wake_up_interruptible_all(&master
->lock
.lock_queue
);
530 case _DRM_SCATTER_GATHER
:
532 case _DRM_CONSISTENT
:
533 dmah
.vaddr
= map
->handle
;
534 dmah
.busaddr
= map
->offset
;
535 dmah
.size
= map
->size
;
536 __drm_legacy_pci_free(dev
, &dmah
);
543 EXPORT_SYMBOL(drm_legacy_rmmap_locked
);
545 int drm_legacy_rmmap(struct drm_device
*dev
, struct drm_local_map
*map
)
549 mutex_lock(&dev
->struct_mutex
);
550 ret
= drm_legacy_rmmap_locked(dev
, map
);
551 mutex_unlock(&dev
->struct_mutex
);
555 EXPORT_SYMBOL(drm_legacy_rmmap
);
557 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
558 * the last close of the device, and this is necessary for cleanup when things
559 * exit uncleanly. Therefore, having userland manually remove mappings seems
560 * like a pointless exercise since they're going away anyway.
562 * One use case might be after addmap is allowed for normal users for SHM and
563 * gets used by drivers that the server doesn't need to care about. This seems
566 * \param inode device inode.
567 * \param file_priv DRM file private.
568 * \param cmd command.
569 * \param arg pointer to a struct drm_map structure.
570 * \return zero on success or a negative value on error.
572 int drm_legacy_rmmap_ioctl(struct drm_device
*dev
, void *data
,
573 struct drm_file
*file_priv
)
575 struct drm_map
*request
= data
;
576 struct drm_local_map
*map
= NULL
;
577 struct drm_map_list
*r_list
;
580 if (!drm_core_check_feature(dev
, DRIVER_KMS_LEGACY_CONTEXT
) &&
581 drm_core_check_feature(dev
, DRIVER_MODESET
))
584 mutex_lock(&dev
->struct_mutex
);
585 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
587 r_list
->user_token
== (unsigned long)request
->handle
&&
588 r_list
->map
->flags
& _DRM_REMOVABLE
) {
594 /* List has wrapped around to the head pointer, or its empty we didn't
597 if (list_empty(&dev
->maplist
) || !map
) {
598 mutex_unlock(&dev
->struct_mutex
);
602 /* Register and framebuffer maps are permanent */
603 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
604 mutex_unlock(&dev
->struct_mutex
);
608 ret
= drm_legacy_rmmap_locked(dev
, map
);
610 mutex_unlock(&dev
->struct_mutex
);
616 * Cleanup after an error on one of the addbufs() functions.
618 * \param dev DRM device.
619 * \param entry buffer entry where the error occurred.
621 * Frees any pages and buffers associated with the given entry.
623 static void drm_cleanup_buf_error(struct drm_device
* dev
,
624 struct drm_buf_entry
* entry
)
628 if (entry
->seg_count
) {
629 for (i
= 0; i
< entry
->seg_count
; i
++) {
630 if (entry
->seglist
[i
]) {
631 drm_pci_free(dev
, entry
->seglist
[i
]);
634 kfree(entry
->seglist
);
636 entry
->seg_count
= 0;
639 if (entry
->buf_count
) {
640 for (i
= 0; i
< entry
->buf_count
; i
++) {
641 kfree(entry
->buflist
[i
].dev_private
);
643 kfree(entry
->buflist
);
645 entry
->buf_count
= 0;
649 #if IS_ENABLED(CONFIG_AGP)
651 * Add AGP buffers for DMA transfers.
653 * \param dev struct drm_device to which the buffers are to be added.
654 * \param request pointer to a struct drm_buf_desc describing the request.
655 * \return zero on success or a negative number on failure.
657 * After some sanity checks creates a drm_buf structure for each buffer and
658 * reallocates the buffer list of the same size order to accommodate the new
661 int drm_legacy_addbufs_agp(struct drm_device
*dev
,
662 struct drm_buf_desc
*request
)
664 struct drm_device_dma
*dma
= dev
->dma
;
665 struct drm_buf_entry
*entry
;
666 struct drm_agp_mem
*agp_entry
;
668 unsigned long offset
;
669 unsigned long agp_offset
;
678 struct drm_buf
**temp_buflist
;
683 count
= request
->count
;
684 order
= order_base_2(request
->size
);
687 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
688 ? PAGE_ALIGN(size
) : size
;
689 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
690 total
= PAGE_SIZE
<< page_order
;
693 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
695 DRM_DEBUG("count: %d\n", count
);
696 DRM_DEBUG("order: %d\n", order
);
697 DRM_DEBUG("size: %d\n", size
);
698 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
699 DRM_DEBUG("alignment: %d\n", alignment
);
700 DRM_DEBUG("page_order: %d\n", page_order
);
701 DRM_DEBUG("total: %d\n", total
);
703 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
706 /* Make sure buffers are located in AGP memory that we own */
708 list_for_each_entry(agp_entry
, &dev
->agp
->memory
, head
) {
709 if ((agp_offset
>= agp_entry
->bound
) &&
710 (agp_offset
+ total
* count
<= agp_entry
->bound
+ agp_entry
->pages
* PAGE_SIZE
)) {
715 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
716 DRM_DEBUG("zone invalid\n");
719 spin_lock(&dev
->buf_lock
);
721 spin_unlock(&dev
->buf_lock
);
724 atomic_inc(&dev
->buf_alloc
);
725 spin_unlock(&dev
->buf_lock
);
727 mutex_lock(&dev
->struct_mutex
);
728 entry
= &dma
->bufs
[order
];
729 if (entry
->buf_count
) {
730 mutex_unlock(&dev
->struct_mutex
);
731 atomic_dec(&dev
->buf_alloc
);
732 return -ENOMEM
; /* May only call once for each order */
735 if (count
< 0 || count
> 4096) {
736 mutex_unlock(&dev
->struct_mutex
);
737 atomic_dec(&dev
->buf_alloc
);
741 entry
->buflist
= kzalloc(count
* sizeof(*entry
->buflist
), GFP_KERNEL
);
742 if (!entry
->buflist
) {
743 mutex_unlock(&dev
->struct_mutex
);
744 atomic_dec(&dev
->buf_alloc
);
748 entry
->buf_size
= size
;
749 entry
->page_order
= page_order
;
753 while (entry
->buf_count
< count
) {
754 buf
= &entry
->buflist
[entry
->buf_count
];
755 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
756 buf
->total
= alignment
;
760 buf
->offset
= (dma
->byte_count
+ offset
);
761 buf
->bus_address
= agp_offset
+ offset
;
762 buf
->address
= (void *)(agp_offset
+ offset
);
766 buf
->file_priv
= NULL
;
768 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
769 buf
->dev_private
= kzalloc(buf
->dev_priv_size
, GFP_KERNEL
);
770 if (!buf
->dev_private
) {
771 /* Set count correctly so we free the proper amount. */
772 entry
->buf_count
= count
;
773 drm_cleanup_buf_error(dev
, entry
);
774 mutex_unlock(&dev
->struct_mutex
);
775 atomic_dec(&dev
->buf_alloc
);
779 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
783 byte_count
+= PAGE_SIZE
<< page_order
;
786 DRM_DEBUG("byte_count: %d\n", byte_count
);
788 temp_buflist
= krealloc(dma
->buflist
,
789 (dma
->buf_count
+ entry
->buf_count
) *
790 sizeof(*dma
->buflist
), GFP_KERNEL
);
792 /* Free the entry because it isn't valid */
793 drm_cleanup_buf_error(dev
, entry
);
794 mutex_unlock(&dev
->struct_mutex
);
795 atomic_dec(&dev
->buf_alloc
);
798 dma
->buflist
= temp_buflist
;
800 for (i
= 0; i
< entry
->buf_count
; i
++) {
801 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
804 dma
->buf_count
+= entry
->buf_count
;
805 dma
->seg_count
+= entry
->seg_count
;
806 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
807 dma
->byte_count
+= byte_count
;
809 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
810 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
812 mutex_unlock(&dev
->struct_mutex
);
814 request
->count
= entry
->buf_count
;
815 request
->size
= size
;
817 dma
->flags
= _DRM_DMA_USE_AGP
;
819 atomic_dec(&dev
->buf_alloc
);
822 EXPORT_SYMBOL(drm_legacy_addbufs_agp
);
823 #endif /* CONFIG_AGP */
825 int drm_legacy_addbufs_pci(struct drm_device
*dev
,
826 struct drm_buf_desc
*request
)
828 struct drm_device_dma
*dma
= dev
->dma
;
834 struct drm_buf_entry
*entry
;
835 drm_dma_handle_t
*dmah
;
838 unsigned long offset
;
842 unsigned long *temp_pagelist
;
843 struct drm_buf
**temp_buflist
;
845 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
851 if (!capable(CAP_SYS_ADMIN
))
854 count
= request
->count
;
855 order
= order_base_2(request
->size
);
858 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
859 request
->count
, request
->size
, size
, order
);
861 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
864 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
865 ? PAGE_ALIGN(size
) : size
;
866 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
867 total
= PAGE_SIZE
<< page_order
;
869 spin_lock(&dev
->buf_lock
);
871 spin_unlock(&dev
->buf_lock
);
874 atomic_inc(&dev
->buf_alloc
);
875 spin_unlock(&dev
->buf_lock
);
877 mutex_lock(&dev
->struct_mutex
);
878 entry
= &dma
->bufs
[order
];
879 if (entry
->buf_count
) {
880 mutex_unlock(&dev
->struct_mutex
);
881 atomic_dec(&dev
->buf_alloc
);
882 return -ENOMEM
; /* May only call once for each order */
885 if (count
< 0 || count
> 4096) {
886 mutex_unlock(&dev
->struct_mutex
);
887 atomic_dec(&dev
->buf_alloc
);
891 entry
->buflist
= kzalloc(count
* sizeof(*entry
->buflist
), GFP_KERNEL
);
892 if (!entry
->buflist
) {
893 mutex_unlock(&dev
->struct_mutex
);
894 atomic_dec(&dev
->buf_alloc
);
898 entry
->seglist
= kzalloc(count
* sizeof(*entry
->seglist
), GFP_KERNEL
);
899 if (!entry
->seglist
) {
900 kfree(entry
->buflist
);
901 mutex_unlock(&dev
->struct_mutex
);
902 atomic_dec(&dev
->buf_alloc
);
906 /* Keep the original pagelist until we know all the allocations
909 temp_pagelist
= kmalloc((dma
->page_count
+ (count
<< page_order
)) *
910 sizeof(*dma
->pagelist
), GFP_KERNEL
);
911 if (!temp_pagelist
) {
912 kfree(entry
->buflist
);
913 kfree(entry
->seglist
);
914 mutex_unlock(&dev
->struct_mutex
);
915 atomic_dec(&dev
->buf_alloc
);
918 memcpy(temp_pagelist
,
919 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
920 DRM_DEBUG("pagelist: %d entries\n",
921 dma
->page_count
+ (count
<< page_order
));
923 entry
->buf_size
= size
;
924 entry
->page_order
= page_order
;
928 while (entry
->buf_count
< count
) {
930 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000);
933 /* Set count correctly so we free the proper amount. */
934 entry
->buf_count
= count
;
935 entry
->seg_count
= count
;
936 drm_cleanup_buf_error(dev
, entry
);
937 kfree(temp_pagelist
);
938 mutex_unlock(&dev
->struct_mutex
);
939 atomic_dec(&dev
->buf_alloc
);
942 entry
->seglist
[entry
->seg_count
++] = dmah
;
943 for (i
= 0; i
< (1 << page_order
); i
++) {
944 DRM_DEBUG("page %d @ 0x%08lx\n",
945 dma
->page_count
+ page_count
,
946 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
947 temp_pagelist
[dma
->page_count
+ page_count
++]
948 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
951 offset
+ size
<= total
&& entry
->buf_count
< count
;
952 offset
+= alignment
, ++entry
->buf_count
) {
953 buf
= &entry
->buflist
[entry
->buf_count
];
954 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
955 buf
->total
= alignment
;
958 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
959 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
960 buf
->bus_address
= dmah
->busaddr
+ offset
;
964 buf
->file_priv
= NULL
;
966 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
967 buf
->dev_private
= kzalloc(buf
->dev_priv_size
,
969 if (!buf
->dev_private
) {
970 /* Set count correctly so we free the proper amount. */
971 entry
->buf_count
= count
;
972 entry
->seg_count
= count
;
973 drm_cleanup_buf_error(dev
, entry
);
974 kfree(temp_pagelist
);
975 mutex_unlock(&dev
->struct_mutex
);
976 atomic_dec(&dev
->buf_alloc
);
980 DRM_DEBUG("buffer %d @ %p\n",
981 entry
->buf_count
, buf
->address
);
983 byte_count
+= PAGE_SIZE
<< page_order
;
986 temp_buflist
= krealloc(dma
->buflist
,
987 (dma
->buf_count
+ entry
->buf_count
) *
988 sizeof(*dma
->buflist
), GFP_KERNEL
);
990 /* Free the entry because it isn't valid */
991 drm_cleanup_buf_error(dev
, entry
);
992 kfree(temp_pagelist
);
993 mutex_unlock(&dev
->struct_mutex
);
994 atomic_dec(&dev
->buf_alloc
);
997 dma
->buflist
= temp_buflist
;
999 for (i
= 0; i
< entry
->buf_count
; i
++) {
1000 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1003 /* No allocations failed, so now we can replace the original pagelist
1006 if (dma
->page_count
) {
1007 kfree(dma
->pagelist
);
1009 dma
->pagelist
= temp_pagelist
;
1011 dma
->buf_count
+= entry
->buf_count
;
1012 dma
->seg_count
+= entry
->seg_count
;
1013 dma
->page_count
+= entry
->seg_count
<< page_order
;
1014 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
1016 mutex_unlock(&dev
->struct_mutex
);
1018 request
->count
= entry
->buf_count
;
1019 request
->size
= size
;
1021 if (request
->flags
& _DRM_PCI_BUFFER_RO
)
1022 dma
->flags
= _DRM_DMA_USE_PCI_RO
;
1024 atomic_dec(&dev
->buf_alloc
);
1028 EXPORT_SYMBOL(drm_legacy_addbufs_pci
);
1030 static int drm_legacy_addbufs_sg(struct drm_device
*dev
,
1031 struct drm_buf_desc
*request
)
1033 struct drm_device_dma
*dma
= dev
->dma
;
1034 struct drm_buf_entry
*entry
;
1035 struct drm_buf
*buf
;
1036 unsigned long offset
;
1037 unsigned long agp_offset
;
1046 struct drm_buf
**temp_buflist
;
1048 if (!drm_core_check_feature(dev
, DRIVER_SG
))
1054 if (!capable(CAP_SYS_ADMIN
))
1057 count
= request
->count
;
1058 order
= order_base_2(request
->size
);
1061 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1062 ? PAGE_ALIGN(size
) : size
;
1063 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1064 total
= PAGE_SIZE
<< page_order
;
1067 agp_offset
= request
->agp_start
;
1069 DRM_DEBUG("count: %d\n", count
);
1070 DRM_DEBUG("order: %d\n", order
);
1071 DRM_DEBUG("size: %d\n", size
);
1072 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1073 DRM_DEBUG("alignment: %d\n", alignment
);
1074 DRM_DEBUG("page_order: %d\n", page_order
);
1075 DRM_DEBUG("total: %d\n", total
);
1077 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1080 spin_lock(&dev
->buf_lock
);
1082 spin_unlock(&dev
->buf_lock
);
1085 atomic_inc(&dev
->buf_alloc
);
1086 spin_unlock(&dev
->buf_lock
);
1088 mutex_lock(&dev
->struct_mutex
);
1089 entry
= &dma
->bufs
[order
];
1090 if (entry
->buf_count
) {
1091 mutex_unlock(&dev
->struct_mutex
);
1092 atomic_dec(&dev
->buf_alloc
);
1093 return -ENOMEM
; /* May only call once for each order */
1096 if (count
< 0 || count
> 4096) {
1097 mutex_unlock(&dev
->struct_mutex
);
1098 atomic_dec(&dev
->buf_alloc
);
1102 entry
->buflist
= kzalloc(count
* sizeof(*entry
->buflist
),
1104 if (!entry
->buflist
) {
1105 mutex_unlock(&dev
->struct_mutex
);
1106 atomic_dec(&dev
->buf_alloc
);
1110 entry
->buf_size
= size
;
1111 entry
->page_order
= page_order
;
1115 while (entry
->buf_count
< count
) {
1116 buf
= &entry
->buflist
[entry
->buf_count
];
1117 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1118 buf
->total
= alignment
;
1122 buf
->offset
= (dma
->byte_count
+ offset
);
1123 buf
->bus_address
= agp_offset
+ offset
;
1124 buf
->address
= (void *)(agp_offset
+ offset
1125 + (unsigned long)dev
->sg
->virtual);
1129 buf
->file_priv
= NULL
;
1131 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1132 buf
->dev_private
= kzalloc(buf
->dev_priv_size
, GFP_KERNEL
);
1133 if (!buf
->dev_private
) {
1134 /* Set count correctly so we free the proper amount. */
1135 entry
->buf_count
= count
;
1136 drm_cleanup_buf_error(dev
, entry
);
1137 mutex_unlock(&dev
->struct_mutex
);
1138 atomic_dec(&dev
->buf_alloc
);
1142 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1144 offset
+= alignment
;
1146 byte_count
+= PAGE_SIZE
<< page_order
;
1149 DRM_DEBUG("byte_count: %d\n", byte_count
);
1151 temp_buflist
= krealloc(dma
->buflist
,
1152 (dma
->buf_count
+ entry
->buf_count
) *
1153 sizeof(*dma
->buflist
), GFP_KERNEL
);
1154 if (!temp_buflist
) {
1155 /* Free the entry because it isn't valid */
1156 drm_cleanup_buf_error(dev
, entry
);
1157 mutex_unlock(&dev
->struct_mutex
);
1158 atomic_dec(&dev
->buf_alloc
);
1161 dma
->buflist
= temp_buflist
;
1163 for (i
= 0; i
< entry
->buf_count
; i
++) {
1164 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1167 dma
->buf_count
+= entry
->buf_count
;
1168 dma
->seg_count
+= entry
->seg_count
;
1169 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1170 dma
->byte_count
+= byte_count
;
1172 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1173 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1175 mutex_unlock(&dev
->struct_mutex
);
1177 request
->count
= entry
->buf_count
;
1178 request
->size
= size
;
1180 dma
->flags
= _DRM_DMA_USE_SG
;
1182 atomic_dec(&dev
->buf_alloc
);
1187 * Add buffers for DMA transfers (ioctl).
1189 * \param inode device inode.
1190 * \param file_priv DRM file private.
1191 * \param cmd command.
1192 * \param arg pointer to a struct drm_buf_desc request.
1193 * \return zero on success or a negative number on failure.
1195 * According with the memory type specified in drm_buf_desc::flags and the
1196 * build options, it dispatches the call either to addbufs_agp(),
1197 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1198 * PCI memory respectively.
1200 int drm_legacy_addbufs(struct drm_device
*dev
, void *data
,
1201 struct drm_file
*file_priv
)
1203 struct drm_buf_desc
*request
= data
;
1206 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1209 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1212 #if IS_ENABLED(CONFIG_AGP)
1213 if (request
->flags
& _DRM_AGP_BUFFER
)
1214 ret
= drm_legacy_addbufs_agp(dev
, request
);
1217 if (request
->flags
& _DRM_SG_BUFFER
)
1218 ret
= drm_legacy_addbufs_sg(dev
, request
);
1219 else if (request
->flags
& _DRM_FB_BUFFER
)
1222 ret
= drm_legacy_addbufs_pci(dev
, request
);
1228 * Get information about the buffer mappings.
1230 * This was originally mean for debugging purposes, or by a sophisticated
1231 * client library to determine how best to use the available buffers (e.g.,
1232 * large buffers can be used for image transfer).
1234 * \param inode device inode.
1235 * \param file_priv DRM file private.
1236 * \param cmd command.
1237 * \param arg pointer to a drm_buf_info structure.
1238 * \return zero on success or a negative number on failure.
1240 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1241 * lock, preventing of allocating more buffers after this call. Information
1242 * about each requested buffer is then copied into user space.
1244 int drm_legacy_infobufs(struct drm_device
*dev
, void *data
,
1245 struct drm_file
*file_priv
)
1247 struct drm_device_dma
*dma
= dev
->dma
;
1248 struct drm_buf_info
*request
= data
;
1252 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1255 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1261 spin_lock(&dev
->buf_lock
);
1262 if (atomic_read(&dev
->buf_alloc
)) {
1263 spin_unlock(&dev
->buf_lock
);
1266 ++dev
->buf_use
; /* Can't allocate more after this call */
1267 spin_unlock(&dev
->buf_lock
);
1269 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1270 if (dma
->bufs
[i
].buf_count
)
1274 DRM_DEBUG("count = %d\n", count
);
1276 if (request
->count
>= count
) {
1277 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1278 if (dma
->bufs
[i
].buf_count
) {
1279 struct drm_buf_desc __user
*to
=
1280 &request
->list
[count
];
1281 struct drm_buf_entry
*from
= &dma
->bufs
[i
];
1282 if (copy_to_user(&to
->count
,
1284 sizeof(from
->buf_count
)) ||
1285 copy_to_user(&to
->size
,
1287 sizeof(from
->buf_size
)) ||
1288 copy_to_user(&to
->low_mark
,
1290 sizeof(from
->low_mark
)) ||
1291 copy_to_user(&to
->high_mark
,
1293 sizeof(from
->high_mark
)))
1296 DRM_DEBUG("%d %d %d %d %d\n",
1298 dma
->bufs
[i
].buf_count
,
1299 dma
->bufs
[i
].buf_size
,
1300 dma
->bufs
[i
].low_mark
,
1301 dma
->bufs
[i
].high_mark
);
1306 request
->count
= count
;
1312 * Specifies a low and high water mark for buffer allocation
1314 * \param inode device inode.
1315 * \param file_priv DRM file private.
1316 * \param cmd command.
1317 * \param arg a pointer to a drm_buf_desc structure.
1318 * \return zero on success or a negative number on failure.
1320 * Verifies that the size order is bounded between the admissible orders and
1321 * updates the respective drm_device_dma::bufs entry low and high water mark.
1323 * \note This ioctl is deprecated and mostly never used.
1325 int drm_legacy_markbufs(struct drm_device
*dev
, void *data
,
1326 struct drm_file
*file_priv
)
1328 struct drm_device_dma
*dma
= dev
->dma
;
1329 struct drm_buf_desc
*request
= data
;
1331 struct drm_buf_entry
*entry
;
1333 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1336 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1342 DRM_DEBUG("%d, %d, %d\n",
1343 request
->size
, request
->low_mark
, request
->high_mark
);
1344 order
= order_base_2(request
->size
);
1345 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1347 entry
= &dma
->bufs
[order
];
1349 if (request
->low_mark
< 0 || request
->low_mark
> entry
->buf_count
)
1351 if (request
->high_mark
< 0 || request
->high_mark
> entry
->buf_count
)
1354 entry
->low_mark
= request
->low_mark
;
1355 entry
->high_mark
= request
->high_mark
;
1361 * Unreserve the buffers in list, previously reserved using drmDMA.
1363 * \param inode device inode.
1364 * \param file_priv DRM file private.
1365 * \param cmd command.
1366 * \param arg pointer to a drm_buf_free structure.
1367 * \return zero on success or a negative number on failure.
1369 * Calls free_buffer() for each used buffer.
1370 * This function is primarily used for debugging.
1372 int drm_legacy_freebufs(struct drm_device
*dev
, void *data
,
1373 struct drm_file
*file_priv
)
1375 struct drm_device_dma
*dma
= dev
->dma
;
1376 struct drm_buf_free
*request
= data
;
1379 struct drm_buf
*buf
;
1381 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1384 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1390 DRM_DEBUG("%d\n", request
->count
);
1391 for (i
= 0; i
< request
->count
; i
++) {
1392 if (copy_from_user(&idx
, &request
->list
[i
], sizeof(idx
)))
1394 if (idx
< 0 || idx
>= dma
->buf_count
) {
1395 DRM_ERROR("Index %d (of %d max)\n",
1396 idx
, dma
->buf_count
- 1);
1399 buf
= dma
->buflist
[idx
];
1400 if (buf
->file_priv
!= file_priv
) {
1401 DRM_ERROR("Process %d freeing buffer not owned\n",
1402 task_pid_nr(current
));
1405 drm_legacy_free_buffer(dev
, buf
);
1412 * Maps all of the DMA buffers into client-virtual space (ioctl).
1414 * \param inode device inode.
1415 * \param file_priv DRM file private.
1416 * \param cmd command.
1417 * \param arg pointer to a drm_buf_map structure.
1418 * \return zero on success or a negative number on failure.
1420 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1421 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1422 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1425 int drm_legacy_mapbufs(struct drm_device
*dev
, void *data
,
1426 struct drm_file
*file_priv
)
1428 struct drm_device_dma
*dma
= dev
->dma
;
1431 unsigned long virtual;
1432 unsigned long address
;
1433 struct drm_buf_map
*request
= data
;
1436 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1439 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1445 spin_lock(&dev
->buf_lock
);
1446 if (atomic_read(&dev
->buf_alloc
)) {
1447 spin_unlock(&dev
->buf_lock
);
1450 dev
->buf_use
++; /* Can't allocate more after this call */
1451 spin_unlock(&dev
->buf_lock
);
1453 if (request
->count
>= dma
->buf_count
) {
1454 if ((dev
->agp
&& (dma
->flags
& _DRM_DMA_USE_AGP
))
1455 || (drm_core_check_feature(dev
, DRIVER_SG
)
1456 && (dma
->flags
& _DRM_DMA_USE_SG
))) {
1457 struct drm_local_map
*map
= dev
->agp_buffer_map
;
1458 unsigned long token
= dev
->agp_buffer_token
;
1464 virtual = vm_mmap(file_priv
->filp
, 0, map
->size
,
1465 PROT_READ
| PROT_WRITE
,
1469 virtual = vm_mmap(file_priv
->filp
, 0, dma
->byte_count
,
1470 PROT_READ
| PROT_WRITE
,
1473 if (virtual > -1024UL) {
1475 retcode
= (signed long)virtual;
1478 request
->virtual = (void __user
*)virtual;
1480 for (i
= 0; i
< dma
->buf_count
; i
++) {
1481 if (copy_to_user(&request
->list
[i
].idx
,
1482 &dma
->buflist
[i
]->idx
,
1483 sizeof(request
->list
[0].idx
))) {
1487 if (copy_to_user(&request
->list
[i
].total
,
1488 &dma
->buflist
[i
]->total
,
1489 sizeof(request
->list
[0].total
))) {
1493 if (copy_to_user(&request
->list
[i
].used
,
1494 &zero
, sizeof(zero
))) {
1498 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1499 if (copy_to_user(&request
->list
[i
].address
,
1500 &address
, sizeof(address
))) {
1507 request
->count
= dma
->buf_count
;
1508 DRM_DEBUG("%d buffers, retcode = %d\n", request
->count
, retcode
);
1513 int drm_legacy_dma_ioctl(struct drm_device
*dev
, void *data
,
1514 struct drm_file
*file_priv
)
1516 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1519 if (dev
->driver
->dma_ioctl
)
1520 return dev
->driver
->dma_ioctl(dev
, data
, file_priv
);
1525 struct drm_local_map
*drm_legacy_getsarea(struct drm_device
*dev
)
1527 struct drm_map_list
*entry
;
1529 list_for_each_entry(entry
, &dev
->maplist
, head
) {
1530 if (entry
->map
&& entry
->map
->type
== _DRM_SHM
&&
1531 (entry
->map
->flags
& _DRM_CONTAINS_LOCK
)) {
1537 EXPORT_SYMBOL(drm_legacy_getsarea
);