3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
37 #include <linux/slab.h>
38 #include <linux/log2.h>
39 #include <linux/export.h>
40 #include <asm/shmparam.h>
43 static struct drm_map_list
*drm_find_matching_map(struct drm_device
*dev
,
44 struct drm_local_map
*map
)
46 struct drm_map_list
*entry
;
47 list_for_each_entry(entry
, &dev
->maplist
, head
) {
49 * Because the kernel-userspace ABI is fixed at a 32-bit offset
50 * while PCI resources may live above that, we only compare the
51 * lower 32 bits of the map offset for maps of type
52 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
53 * It is assumed that if a driver have more than one resource
54 * of each type, the lower 32 bits are different.
57 map
->type
!= entry
->map
->type
||
58 entry
->master
!= dev
->primary
->master
)
62 if (map
->flags
!= _DRM_CONTAINS_LOCK
)
66 case _DRM_FRAME_BUFFER
:
67 if ((entry
->map
->offset
& 0xffffffff) ==
68 (map
->offset
& 0xffffffff))
70 default: /* Make gcc happy */
73 if (entry
->map
->offset
== map
->offset
)
80 static int drm_map_handle(struct drm_device
*dev
, struct drm_hash_item
*hash
,
81 unsigned long user_token
, int hashed_handle
, int shm
)
83 int use_hashed_handle
, shift
;
86 #if (BITS_PER_LONG == 64)
87 use_hashed_handle
= ((user_token
& 0xFFFFFFFF00000000UL
) || hashed_handle
);
88 #elif (BITS_PER_LONG == 32)
89 use_hashed_handle
= hashed_handle
;
91 #error Unsupported long size. Neither 64 nor 32 bits.
94 if (!use_hashed_handle
) {
96 hash
->key
= user_token
>> PAGE_SHIFT
;
97 ret
= drm_ht_insert_item(&dev
->map_hash
, hash
);
103 add
= DRM_MAP_HASH_OFFSET
>> PAGE_SHIFT
;
104 if (shm
&& (SHMLBA
> PAGE_SIZE
)) {
105 int bits
= ilog2(SHMLBA
>> PAGE_SHIFT
) + 1;
107 /* For shared memory, we have to preserve the SHMLBA
108 * bits of the eventual vma->vm_pgoff value during
109 * mmap(). Otherwise we run into cache aliasing problems
110 * on some platforms. On these platforms, the pgoff of
111 * a mmap() request is used to pick a suitable virtual
112 * address for the mmap() region such that it will not
113 * cause cache aliasing problems.
115 * Therefore, make sure the SHMLBA relevant bits of the
116 * hash value we use are equal to those in the original
117 * kernel virtual address.
120 add
|= ((user_token
>> PAGE_SHIFT
) & ((1UL << bits
) - 1UL));
123 return drm_ht_just_insert_please(&dev
->map_hash
, hash
,
124 user_token
, 32 - PAGE_SHIFT
- 3,
129 * Core function to create a range of memory available for mapping by a
132 * Adjusts the memory offset to its absolute value according to the mapping
133 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
134 * applicable and if supported by the kernel.
136 static int drm_addmap_core(struct drm_device
* dev
, resource_size_t offset
,
137 unsigned int size
, enum drm_map_type type
,
138 enum drm_map_flags flags
,
139 struct drm_map_list
** maplist
)
141 struct drm_local_map
*map
;
142 struct drm_map_list
*list
;
143 drm_dma_handle_t
*dmah
;
144 unsigned long user_token
;
147 map
= kmalloc(sizeof(*map
), GFP_KERNEL
);
151 map
->offset
= offset
;
156 /* Only allow shared memory to be removable since we only keep enough
157 * book keeping information about shared memory to allow for removal
158 * when processes fork.
160 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
164 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
165 (unsigned long long)map
->offset
, map
->size
, map
->type
);
167 /* page-align _DRM_SHM maps. They are allocated here so there is no security
168 * hole created by that and it works around various broken drivers that use
169 * a non-aligned quantity to map the SAREA. --BenH
171 if (map
->type
== _DRM_SHM
)
172 map
->size
= PAGE_ALIGN(map
->size
);
174 if ((map
->offset
& (~(resource_size_t
)PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
183 case _DRM_FRAME_BUFFER
:
184 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
185 if (map
->offset
+ (map
->size
-1) < map
->offset
||
186 map
->offset
< virt_to_phys(high_memory
)) {
191 /* Some drivers preinitialize some maps, without the X Server
192 * needing to be aware of it. Therefore, we just return success
193 * when the server tries to create a duplicate map.
195 list
= drm_find_matching_map(dev
, map
);
197 if (list
->map
->size
!= map
->size
) {
198 DRM_DEBUG("Matching maps of type %d with "
199 "mismatched sizes, (%ld vs %ld)\n",
200 map
->type
, map
->size
,
202 list
->map
->size
= map
->size
;
210 if (drm_core_has_MTRR(dev
)) {
211 if (map
->type
== _DRM_FRAME_BUFFER
||
212 (map
->flags
& _DRM_WRITE_COMBINING
)) {
214 arch_phys_wc_add(map
->offset
, map
->size
);
217 if (map
->type
== _DRM_REGISTERS
) {
218 if (map
->flags
& _DRM_WRITE_COMBINING
)
219 map
->handle
= ioremap_wc(map
->offset
,
222 map
->handle
= ioremap(map
->offset
, map
->size
);
231 list
= drm_find_matching_map(dev
, map
);
233 if(list
->map
->size
!= map
->size
) {
234 DRM_DEBUG("Matching maps of type %d with "
235 "mismatched sizes, (%ld vs %ld)\n",
236 map
->type
, map
->size
, list
->map
->size
);
237 list
->map
->size
= map
->size
;
244 map
->handle
= vmalloc_user(map
->size
);
245 DRM_DEBUG("%lu %d %p\n",
246 map
->size
, drm_order(map
->size
), map
->handle
);
251 map
->offset
= (unsigned long)map
->handle
;
252 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
253 /* Prevent a 2nd X Server from creating a 2nd lock */
254 if (dev
->primary
->master
->lock
.hw_lock
!= NULL
) {
259 dev
->sigdata
.lock
= dev
->primary
->master
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
263 struct drm_agp_mem
*entry
;
266 if (!drm_core_has_AGP(dev
)) {
271 map
->offset
+= dev
->hose
->mem_space
->start
;
273 /* In some cases (i810 driver), user space may have already
274 * added the AGP base itself, because dev->agp->base previously
275 * only got set during AGP enable. So, only add the base
276 * address if the map's offset isn't already within the
279 if (map
->offset
< dev
->agp
->base
||
280 map
->offset
> dev
->agp
->base
+
281 dev
->agp
->agp_info
.aper_size
* 1024 * 1024 - 1) {
282 map
->offset
+= dev
->agp
->base
;
284 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
286 /* This assumes the DRM is in total control of AGP space.
287 * It's not always the case as AGP can be in the control
288 * of user space (i.e. i810 driver). So this loop will get
289 * skipped and we double check that dev->agp->memory is
290 * actually set as well as being invalid before EPERM'ing
292 list_for_each_entry(entry
, &dev
->agp
->memory
, head
) {
293 if ((map
->offset
>= entry
->bound
) &&
294 (map
->offset
+ map
->size
<= entry
->bound
+ entry
->pages
* PAGE_SIZE
)) {
299 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
303 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
304 (unsigned long long)map
->offset
, map
->size
);
309 DRM_ERROR("tried to addmap GEM object\n");
311 case _DRM_SCATTER_GATHER
:
316 map
->offset
+= (unsigned long)dev
->sg
->virtual;
318 case _DRM_CONSISTENT
:
319 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
320 * As we're limiting the address to 2^32-1 (or less),
321 * casting it down to 32 bits is no problem, but we
322 * need to point to a 64bit variable first. */
323 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
);
328 map
->handle
= dmah
->vaddr
;
329 map
->offset
= (unsigned long)dmah
->busaddr
;
337 list
= kzalloc(sizeof(*list
), GFP_KERNEL
);
339 if (map
->type
== _DRM_REGISTERS
)
340 iounmap(map
->handle
);
346 mutex_lock(&dev
->struct_mutex
);
347 list_add(&list
->head
, &dev
->maplist
);
349 /* Assign a 32-bit handle */
350 /* We do it here so that dev->struct_mutex protects the increment */
351 user_token
= (map
->type
== _DRM_SHM
) ? (unsigned long)map
->handle
:
353 ret
= drm_map_handle(dev
, &list
->hash
, user_token
, 0,
354 (map
->type
== _DRM_SHM
));
356 if (map
->type
== _DRM_REGISTERS
)
357 iounmap(map
->handle
);
360 mutex_unlock(&dev
->struct_mutex
);
364 list
->user_token
= list
->hash
.key
<< PAGE_SHIFT
;
365 mutex_unlock(&dev
->struct_mutex
);
367 if (!(map
->flags
& _DRM_DRIVER
))
368 list
->master
= dev
->primary
->master
;
373 int drm_addmap(struct drm_device
* dev
, resource_size_t offset
,
374 unsigned int size
, enum drm_map_type type
,
375 enum drm_map_flags flags
, struct drm_local_map
** map_ptr
)
377 struct drm_map_list
*list
;
380 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
382 *map_ptr
= list
->map
;
386 EXPORT_SYMBOL(drm_addmap
);
389 * Ioctl to specify a range of memory that is available for mapping by a
392 * \param inode device inode.
393 * \param file_priv DRM file private.
394 * \param cmd command.
395 * \param arg pointer to a drm_map structure.
396 * \return zero on success or a negative value on error.
399 int drm_addmap_ioctl(struct drm_device
*dev
, void *data
,
400 struct drm_file
*file_priv
)
402 struct drm_map
*map
= data
;
403 struct drm_map_list
*maplist
;
406 if (!(capable(CAP_SYS_ADMIN
) || map
->type
== _DRM_AGP
|| map
->type
== _DRM_SHM
))
409 err
= drm_addmap_core(dev
, map
->offset
, map
->size
, map
->type
,
410 map
->flags
, &maplist
);
415 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
416 map
->handle
= (void *)(unsigned long)maplist
->user_token
;
419 * It appears that there are no users of this value whatsoever --
420 * drmAddMap just discards it. Let's not encourage its use.
421 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
422 * it's not a real mtrr index anymore.)
430 * Remove a map private from list and deallocate resources if the mapping
433 * Searches the map on drm_device::maplist, removes it from the list, see if
434 * its being used, and free any associate resource (such as MTRR's) if it's not
439 int drm_rmmap_locked(struct drm_device
*dev
, struct drm_local_map
*map
)
441 struct drm_map_list
*r_list
= NULL
, *list_t
;
442 drm_dma_handle_t dmah
;
444 struct drm_master
*master
;
446 /* Find the list entry for the map and remove it */
447 list_for_each_entry_safe(r_list
, list_t
, &dev
->maplist
, head
) {
448 if (r_list
->map
== map
) {
449 master
= r_list
->master
;
450 list_del(&r_list
->head
);
451 drm_ht_remove_key(&dev
->map_hash
,
452 r_list
->user_token
>> PAGE_SHIFT
);
464 iounmap(map
->handle
);
466 case _DRM_FRAME_BUFFER
:
467 if (drm_core_has_MTRR(dev
))
468 arch_phys_wc_del(map
->mtrr
);
473 if (dev
->sigdata
.lock
== master
->lock
.hw_lock
)
474 dev
->sigdata
.lock
= NULL
;
475 master
->lock
.hw_lock
= NULL
; /* SHM removed */
476 master
->lock
.file_priv
= NULL
;
477 wake_up_interruptible_all(&master
->lock
.lock_queue
);
481 case _DRM_SCATTER_GATHER
:
483 case _DRM_CONSISTENT
:
484 dmah
.vaddr
= map
->handle
;
485 dmah
.busaddr
= map
->offset
;
486 dmah
.size
= map
->size
;
487 __drm_pci_free(dev
, &dmah
);
490 DRM_ERROR("tried to rmmap GEM object\n");
497 EXPORT_SYMBOL(drm_rmmap_locked
);
499 int drm_rmmap(struct drm_device
*dev
, struct drm_local_map
*map
)
503 mutex_lock(&dev
->struct_mutex
);
504 ret
= drm_rmmap_locked(dev
, map
);
505 mutex_unlock(&dev
->struct_mutex
);
509 EXPORT_SYMBOL(drm_rmmap
);
511 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
512 * the last close of the device, and this is necessary for cleanup when things
513 * exit uncleanly. Therefore, having userland manually remove mappings seems
514 * like a pointless exercise since they're going away anyway.
516 * One use case might be after addmap is allowed for normal users for SHM and
517 * gets used by drivers that the server doesn't need to care about. This seems
520 * \param inode device inode.
521 * \param file_priv DRM file private.
522 * \param cmd command.
523 * \param arg pointer to a struct drm_map structure.
524 * \return zero on success or a negative value on error.
526 int drm_rmmap_ioctl(struct drm_device
*dev
, void *data
,
527 struct drm_file
*file_priv
)
529 struct drm_map
*request
= data
;
530 struct drm_local_map
*map
= NULL
;
531 struct drm_map_list
*r_list
;
534 mutex_lock(&dev
->struct_mutex
);
535 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
537 r_list
->user_token
== (unsigned long)request
->handle
&&
538 r_list
->map
->flags
& _DRM_REMOVABLE
) {
544 /* List has wrapped around to the head pointer, or its empty we didn't
547 if (list_empty(&dev
->maplist
) || !map
) {
548 mutex_unlock(&dev
->struct_mutex
);
552 /* Register and framebuffer maps are permanent */
553 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
554 mutex_unlock(&dev
->struct_mutex
);
558 ret
= drm_rmmap_locked(dev
, map
);
560 mutex_unlock(&dev
->struct_mutex
);
566 * Cleanup after an error on one of the addbufs() functions.
568 * \param dev DRM device.
569 * \param entry buffer entry where the error occurred.
571 * Frees any pages and buffers associated with the given entry.
573 static void drm_cleanup_buf_error(struct drm_device
* dev
,
574 struct drm_buf_entry
* entry
)
578 if (entry
->seg_count
) {
579 for (i
= 0; i
< entry
->seg_count
; i
++) {
580 if (entry
->seglist
[i
]) {
581 drm_pci_free(dev
, entry
->seglist
[i
]);
584 kfree(entry
->seglist
);
586 entry
->seg_count
= 0;
589 if (entry
->buf_count
) {
590 for (i
= 0; i
< entry
->buf_count
; i
++) {
591 kfree(entry
->buflist
[i
].dev_private
);
593 kfree(entry
->buflist
);
595 entry
->buf_count
= 0;
601 * Add AGP buffers for DMA transfers.
603 * \param dev struct drm_device to which the buffers are to be added.
604 * \param request pointer to a struct drm_buf_desc describing the request.
605 * \return zero on success or a negative number on failure.
607 * After some sanity checks creates a drm_buf structure for each buffer and
608 * reallocates the buffer list of the same size order to accommodate the new
611 int drm_addbufs_agp(struct drm_device
* dev
, struct drm_buf_desc
* request
)
613 struct drm_device_dma
*dma
= dev
->dma
;
614 struct drm_buf_entry
*entry
;
615 struct drm_agp_mem
*agp_entry
;
617 unsigned long offset
;
618 unsigned long agp_offset
;
627 struct drm_buf
**temp_buflist
;
632 count
= request
->count
;
633 order
= drm_order(request
->size
);
636 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
637 ? PAGE_ALIGN(size
) : size
;
638 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
639 total
= PAGE_SIZE
<< page_order
;
642 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
644 DRM_DEBUG("count: %d\n", count
);
645 DRM_DEBUG("order: %d\n", order
);
646 DRM_DEBUG("size: %d\n", size
);
647 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
648 DRM_DEBUG("alignment: %d\n", alignment
);
649 DRM_DEBUG("page_order: %d\n", page_order
);
650 DRM_DEBUG("total: %d\n", total
);
652 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
655 /* Make sure buffers are located in AGP memory that we own */
657 list_for_each_entry(agp_entry
, &dev
->agp
->memory
, head
) {
658 if ((agp_offset
>= agp_entry
->bound
) &&
659 (agp_offset
+ total
* count
<= agp_entry
->bound
+ agp_entry
->pages
* PAGE_SIZE
)) {
664 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
665 DRM_DEBUG("zone invalid\n");
668 spin_lock(&dev
->count_lock
);
670 spin_unlock(&dev
->count_lock
);
673 atomic_inc(&dev
->buf_alloc
);
674 spin_unlock(&dev
->count_lock
);
676 mutex_lock(&dev
->struct_mutex
);
677 entry
= &dma
->bufs
[order
];
678 if (entry
->buf_count
) {
679 mutex_unlock(&dev
->struct_mutex
);
680 atomic_dec(&dev
->buf_alloc
);
681 return -ENOMEM
; /* May only call once for each order */
684 if (count
< 0 || count
> 4096) {
685 mutex_unlock(&dev
->struct_mutex
);
686 atomic_dec(&dev
->buf_alloc
);
690 entry
->buflist
= kzalloc(count
* sizeof(*entry
->buflist
), GFP_KERNEL
);
691 if (!entry
->buflist
) {
692 mutex_unlock(&dev
->struct_mutex
);
693 atomic_dec(&dev
->buf_alloc
);
697 entry
->buf_size
= size
;
698 entry
->page_order
= page_order
;
702 while (entry
->buf_count
< count
) {
703 buf
= &entry
->buflist
[entry
->buf_count
];
704 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
705 buf
->total
= alignment
;
709 buf
->offset
= (dma
->byte_count
+ offset
);
710 buf
->bus_address
= agp_offset
+ offset
;
711 buf
->address
= (void *)(agp_offset
+ offset
);
715 buf
->file_priv
= NULL
;
717 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
718 buf
->dev_private
= kzalloc(buf
->dev_priv_size
, GFP_KERNEL
);
719 if (!buf
->dev_private
) {
720 /* Set count correctly so we free the proper amount. */
721 entry
->buf_count
= count
;
722 drm_cleanup_buf_error(dev
, entry
);
723 mutex_unlock(&dev
->struct_mutex
);
724 atomic_dec(&dev
->buf_alloc
);
728 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
732 byte_count
+= PAGE_SIZE
<< page_order
;
735 DRM_DEBUG("byte_count: %d\n", byte_count
);
737 temp_buflist
= krealloc(dma
->buflist
,
738 (dma
->buf_count
+ entry
->buf_count
) *
739 sizeof(*dma
->buflist
), GFP_KERNEL
);
741 /* Free the entry because it isn't valid */
742 drm_cleanup_buf_error(dev
, entry
);
743 mutex_unlock(&dev
->struct_mutex
);
744 atomic_dec(&dev
->buf_alloc
);
747 dma
->buflist
= temp_buflist
;
749 for (i
= 0; i
< entry
->buf_count
; i
++) {
750 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
753 dma
->buf_count
+= entry
->buf_count
;
754 dma
->seg_count
+= entry
->seg_count
;
755 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
756 dma
->byte_count
+= byte_count
;
758 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
759 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
761 mutex_unlock(&dev
->struct_mutex
);
763 request
->count
= entry
->buf_count
;
764 request
->size
= size
;
766 dma
->flags
= _DRM_DMA_USE_AGP
;
768 atomic_dec(&dev
->buf_alloc
);
771 EXPORT_SYMBOL(drm_addbufs_agp
);
772 #endif /* __OS_HAS_AGP */
774 int drm_addbufs_pci(struct drm_device
* dev
, struct drm_buf_desc
* request
)
776 struct drm_device_dma
*dma
= dev
->dma
;
782 struct drm_buf_entry
*entry
;
783 drm_dma_handle_t
*dmah
;
786 unsigned long offset
;
790 unsigned long *temp_pagelist
;
791 struct drm_buf
**temp_buflist
;
793 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
799 if (!capable(CAP_SYS_ADMIN
))
802 count
= request
->count
;
803 order
= drm_order(request
->size
);
806 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
807 request
->count
, request
->size
, size
, order
);
809 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
812 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
813 ? PAGE_ALIGN(size
) : size
;
814 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
815 total
= PAGE_SIZE
<< page_order
;
817 spin_lock(&dev
->count_lock
);
819 spin_unlock(&dev
->count_lock
);
822 atomic_inc(&dev
->buf_alloc
);
823 spin_unlock(&dev
->count_lock
);
825 mutex_lock(&dev
->struct_mutex
);
826 entry
= &dma
->bufs
[order
];
827 if (entry
->buf_count
) {
828 mutex_unlock(&dev
->struct_mutex
);
829 atomic_dec(&dev
->buf_alloc
);
830 return -ENOMEM
; /* May only call once for each order */
833 if (count
< 0 || count
> 4096) {
834 mutex_unlock(&dev
->struct_mutex
);
835 atomic_dec(&dev
->buf_alloc
);
839 entry
->buflist
= kzalloc(count
* sizeof(*entry
->buflist
), GFP_KERNEL
);
840 if (!entry
->buflist
) {
841 mutex_unlock(&dev
->struct_mutex
);
842 atomic_dec(&dev
->buf_alloc
);
846 entry
->seglist
= kzalloc(count
* sizeof(*entry
->seglist
), GFP_KERNEL
);
847 if (!entry
->seglist
) {
848 kfree(entry
->buflist
);
849 mutex_unlock(&dev
->struct_mutex
);
850 atomic_dec(&dev
->buf_alloc
);
854 /* Keep the original pagelist until we know all the allocations
857 temp_pagelist
= kmalloc((dma
->page_count
+ (count
<< page_order
)) *
858 sizeof(*dma
->pagelist
), GFP_KERNEL
);
859 if (!temp_pagelist
) {
860 kfree(entry
->buflist
);
861 kfree(entry
->seglist
);
862 mutex_unlock(&dev
->struct_mutex
);
863 atomic_dec(&dev
->buf_alloc
);
866 memcpy(temp_pagelist
,
867 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
868 DRM_DEBUG("pagelist: %d entries\n",
869 dma
->page_count
+ (count
<< page_order
));
871 entry
->buf_size
= size
;
872 entry
->page_order
= page_order
;
876 while (entry
->buf_count
< count
) {
878 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000);
881 /* Set count correctly so we free the proper amount. */
882 entry
->buf_count
= count
;
883 entry
->seg_count
= count
;
884 drm_cleanup_buf_error(dev
, entry
);
885 kfree(temp_pagelist
);
886 mutex_unlock(&dev
->struct_mutex
);
887 atomic_dec(&dev
->buf_alloc
);
890 entry
->seglist
[entry
->seg_count
++] = dmah
;
891 for (i
= 0; i
< (1 << page_order
); i
++) {
892 DRM_DEBUG("page %d @ 0x%08lx\n",
893 dma
->page_count
+ page_count
,
894 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
895 temp_pagelist
[dma
->page_count
+ page_count
++]
896 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
899 offset
+ size
<= total
&& entry
->buf_count
< count
;
900 offset
+= alignment
, ++entry
->buf_count
) {
901 buf
= &entry
->buflist
[entry
->buf_count
];
902 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
903 buf
->total
= alignment
;
906 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
907 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
908 buf
->bus_address
= dmah
->busaddr
+ offset
;
912 buf
->file_priv
= NULL
;
914 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
915 buf
->dev_private
= kzalloc(buf
->dev_priv_size
,
917 if (!buf
->dev_private
) {
918 /* Set count correctly so we free the proper amount. */
919 entry
->buf_count
= count
;
920 entry
->seg_count
= count
;
921 drm_cleanup_buf_error(dev
, entry
);
922 kfree(temp_pagelist
);
923 mutex_unlock(&dev
->struct_mutex
);
924 atomic_dec(&dev
->buf_alloc
);
928 DRM_DEBUG("buffer %d @ %p\n",
929 entry
->buf_count
, buf
->address
);
931 byte_count
+= PAGE_SIZE
<< page_order
;
934 temp_buflist
= krealloc(dma
->buflist
,
935 (dma
->buf_count
+ entry
->buf_count
) *
936 sizeof(*dma
->buflist
), GFP_KERNEL
);
938 /* Free the entry because it isn't valid */
939 drm_cleanup_buf_error(dev
, entry
);
940 kfree(temp_pagelist
);
941 mutex_unlock(&dev
->struct_mutex
);
942 atomic_dec(&dev
->buf_alloc
);
945 dma
->buflist
= temp_buflist
;
947 for (i
= 0; i
< entry
->buf_count
; i
++) {
948 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
951 /* No allocations failed, so now we can replace the original pagelist
954 if (dma
->page_count
) {
955 kfree(dma
->pagelist
);
957 dma
->pagelist
= temp_pagelist
;
959 dma
->buf_count
+= entry
->buf_count
;
960 dma
->seg_count
+= entry
->seg_count
;
961 dma
->page_count
+= entry
->seg_count
<< page_order
;
962 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
964 mutex_unlock(&dev
->struct_mutex
);
966 request
->count
= entry
->buf_count
;
967 request
->size
= size
;
969 if (request
->flags
& _DRM_PCI_BUFFER_RO
)
970 dma
->flags
= _DRM_DMA_USE_PCI_RO
;
972 atomic_dec(&dev
->buf_alloc
);
976 EXPORT_SYMBOL(drm_addbufs_pci
);
978 static int drm_addbufs_sg(struct drm_device
* dev
, struct drm_buf_desc
* request
)
980 struct drm_device_dma
*dma
= dev
->dma
;
981 struct drm_buf_entry
*entry
;
983 unsigned long offset
;
984 unsigned long agp_offset
;
993 struct drm_buf
**temp_buflist
;
995 if (!drm_core_check_feature(dev
, DRIVER_SG
))
1001 if (!capable(CAP_SYS_ADMIN
))
1004 count
= request
->count
;
1005 order
= drm_order(request
->size
);
1008 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1009 ? PAGE_ALIGN(size
) : size
;
1010 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1011 total
= PAGE_SIZE
<< page_order
;
1014 agp_offset
= request
->agp_start
;
1016 DRM_DEBUG("count: %d\n", count
);
1017 DRM_DEBUG("order: %d\n", order
);
1018 DRM_DEBUG("size: %d\n", size
);
1019 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1020 DRM_DEBUG("alignment: %d\n", alignment
);
1021 DRM_DEBUG("page_order: %d\n", page_order
);
1022 DRM_DEBUG("total: %d\n", total
);
1024 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1027 spin_lock(&dev
->count_lock
);
1029 spin_unlock(&dev
->count_lock
);
1032 atomic_inc(&dev
->buf_alloc
);
1033 spin_unlock(&dev
->count_lock
);
1035 mutex_lock(&dev
->struct_mutex
);
1036 entry
= &dma
->bufs
[order
];
1037 if (entry
->buf_count
) {
1038 mutex_unlock(&dev
->struct_mutex
);
1039 atomic_dec(&dev
->buf_alloc
);
1040 return -ENOMEM
; /* May only call once for each order */
1043 if (count
< 0 || count
> 4096) {
1044 mutex_unlock(&dev
->struct_mutex
);
1045 atomic_dec(&dev
->buf_alloc
);
1049 entry
->buflist
= kzalloc(count
* sizeof(*entry
->buflist
),
1051 if (!entry
->buflist
) {
1052 mutex_unlock(&dev
->struct_mutex
);
1053 atomic_dec(&dev
->buf_alloc
);
1057 entry
->buf_size
= size
;
1058 entry
->page_order
= page_order
;
1062 while (entry
->buf_count
< count
) {
1063 buf
= &entry
->buflist
[entry
->buf_count
];
1064 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1065 buf
->total
= alignment
;
1069 buf
->offset
= (dma
->byte_count
+ offset
);
1070 buf
->bus_address
= agp_offset
+ offset
;
1071 buf
->address
= (void *)(agp_offset
+ offset
1072 + (unsigned long)dev
->sg
->virtual);
1076 buf
->file_priv
= NULL
;
1078 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1079 buf
->dev_private
= kzalloc(buf
->dev_priv_size
, GFP_KERNEL
);
1080 if (!buf
->dev_private
) {
1081 /* Set count correctly so we free the proper amount. */
1082 entry
->buf_count
= count
;
1083 drm_cleanup_buf_error(dev
, entry
);
1084 mutex_unlock(&dev
->struct_mutex
);
1085 atomic_dec(&dev
->buf_alloc
);
1089 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1091 offset
+= alignment
;
1093 byte_count
+= PAGE_SIZE
<< page_order
;
1096 DRM_DEBUG("byte_count: %d\n", byte_count
);
1098 temp_buflist
= krealloc(dma
->buflist
,
1099 (dma
->buf_count
+ entry
->buf_count
) *
1100 sizeof(*dma
->buflist
), GFP_KERNEL
);
1101 if (!temp_buflist
) {
1102 /* Free the entry because it isn't valid */
1103 drm_cleanup_buf_error(dev
, entry
);
1104 mutex_unlock(&dev
->struct_mutex
);
1105 atomic_dec(&dev
->buf_alloc
);
1108 dma
->buflist
= temp_buflist
;
1110 for (i
= 0; i
< entry
->buf_count
; i
++) {
1111 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1114 dma
->buf_count
+= entry
->buf_count
;
1115 dma
->seg_count
+= entry
->seg_count
;
1116 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1117 dma
->byte_count
+= byte_count
;
1119 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1120 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1122 mutex_unlock(&dev
->struct_mutex
);
1124 request
->count
= entry
->buf_count
;
1125 request
->size
= size
;
1127 dma
->flags
= _DRM_DMA_USE_SG
;
1129 atomic_dec(&dev
->buf_alloc
);
1133 static int drm_addbufs_fb(struct drm_device
* dev
, struct drm_buf_desc
* request
)
1135 struct drm_device_dma
*dma
= dev
->dma
;
1136 struct drm_buf_entry
*entry
;
1137 struct drm_buf
*buf
;
1138 unsigned long offset
;
1139 unsigned long agp_offset
;
1148 struct drm_buf
**temp_buflist
;
1150 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
1156 if (!capable(CAP_SYS_ADMIN
))
1159 count
= request
->count
;
1160 order
= drm_order(request
->size
);
1163 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1164 ? PAGE_ALIGN(size
) : size
;
1165 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1166 total
= PAGE_SIZE
<< page_order
;
1169 agp_offset
= request
->agp_start
;
1171 DRM_DEBUG("count: %d\n", count
);
1172 DRM_DEBUG("order: %d\n", order
);
1173 DRM_DEBUG("size: %d\n", size
);
1174 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1175 DRM_DEBUG("alignment: %d\n", alignment
);
1176 DRM_DEBUG("page_order: %d\n", page_order
);
1177 DRM_DEBUG("total: %d\n", total
);
1179 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1182 spin_lock(&dev
->count_lock
);
1184 spin_unlock(&dev
->count_lock
);
1187 atomic_inc(&dev
->buf_alloc
);
1188 spin_unlock(&dev
->count_lock
);
1190 mutex_lock(&dev
->struct_mutex
);
1191 entry
= &dma
->bufs
[order
];
1192 if (entry
->buf_count
) {
1193 mutex_unlock(&dev
->struct_mutex
);
1194 atomic_dec(&dev
->buf_alloc
);
1195 return -ENOMEM
; /* May only call once for each order */
1198 if (count
< 0 || count
> 4096) {
1199 mutex_unlock(&dev
->struct_mutex
);
1200 atomic_dec(&dev
->buf_alloc
);
1204 entry
->buflist
= kzalloc(count
* sizeof(*entry
->buflist
),
1206 if (!entry
->buflist
) {
1207 mutex_unlock(&dev
->struct_mutex
);
1208 atomic_dec(&dev
->buf_alloc
);
1212 entry
->buf_size
= size
;
1213 entry
->page_order
= page_order
;
1217 while (entry
->buf_count
< count
) {
1218 buf
= &entry
->buflist
[entry
->buf_count
];
1219 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1220 buf
->total
= alignment
;
1224 buf
->offset
= (dma
->byte_count
+ offset
);
1225 buf
->bus_address
= agp_offset
+ offset
;
1226 buf
->address
= (void *)(agp_offset
+ offset
);
1230 buf
->file_priv
= NULL
;
1232 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1233 buf
->dev_private
= kzalloc(buf
->dev_priv_size
, GFP_KERNEL
);
1234 if (!buf
->dev_private
) {
1235 /* Set count correctly so we free the proper amount. */
1236 entry
->buf_count
= count
;
1237 drm_cleanup_buf_error(dev
, entry
);
1238 mutex_unlock(&dev
->struct_mutex
);
1239 atomic_dec(&dev
->buf_alloc
);
1243 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1245 offset
+= alignment
;
1247 byte_count
+= PAGE_SIZE
<< page_order
;
1250 DRM_DEBUG("byte_count: %d\n", byte_count
);
1252 temp_buflist
= krealloc(dma
->buflist
,
1253 (dma
->buf_count
+ entry
->buf_count
) *
1254 sizeof(*dma
->buflist
), GFP_KERNEL
);
1255 if (!temp_buflist
) {
1256 /* Free the entry because it isn't valid */
1257 drm_cleanup_buf_error(dev
, entry
);
1258 mutex_unlock(&dev
->struct_mutex
);
1259 atomic_dec(&dev
->buf_alloc
);
1262 dma
->buflist
= temp_buflist
;
1264 for (i
= 0; i
< entry
->buf_count
; i
++) {
1265 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1268 dma
->buf_count
+= entry
->buf_count
;
1269 dma
->seg_count
+= entry
->seg_count
;
1270 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1271 dma
->byte_count
+= byte_count
;
1273 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1274 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1276 mutex_unlock(&dev
->struct_mutex
);
1278 request
->count
= entry
->buf_count
;
1279 request
->size
= size
;
1281 dma
->flags
= _DRM_DMA_USE_FB
;
1283 atomic_dec(&dev
->buf_alloc
);
1289 * Add buffers for DMA transfers (ioctl).
1291 * \param inode device inode.
1292 * \param file_priv DRM file private.
1293 * \param cmd command.
1294 * \param arg pointer to a struct drm_buf_desc request.
1295 * \return zero on success or a negative number on failure.
1297 * According with the memory type specified in drm_buf_desc::flags and the
1298 * build options, it dispatches the call either to addbufs_agp(),
1299 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1300 * PCI memory respectively.
1302 int drm_addbufs(struct drm_device
*dev
, void *data
,
1303 struct drm_file
*file_priv
)
1305 struct drm_buf_desc
*request
= data
;
1308 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1312 if (request
->flags
& _DRM_AGP_BUFFER
)
1313 ret
= drm_addbufs_agp(dev
, request
);
1316 if (request
->flags
& _DRM_SG_BUFFER
)
1317 ret
= drm_addbufs_sg(dev
, request
);
1318 else if (request
->flags
& _DRM_FB_BUFFER
)
1319 ret
= drm_addbufs_fb(dev
, request
);
1321 ret
= drm_addbufs_pci(dev
, request
);
1327 * Get information about the buffer mappings.
1329 * This was originally mean for debugging purposes, or by a sophisticated
1330 * client library to determine how best to use the available buffers (e.g.,
1331 * large buffers can be used for image transfer).
1333 * \param inode device inode.
1334 * \param file_priv DRM file private.
1335 * \param cmd command.
1336 * \param arg pointer to a drm_buf_info structure.
1337 * \return zero on success or a negative number on failure.
1339 * Increments drm_device::buf_use while holding the drm_device::count_lock
1340 * lock, preventing of allocating more buffers after this call. Information
1341 * about each requested buffer is then copied into user space.
1343 int drm_infobufs(struct drm_device
*dev
, void *data
,
1344 struct drm_file
*file_priv
)
1346 struct drm_device_dma
*dma
= dev
->dma
;
1347 struct drm_buf_info
*request
= data
;
1351 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1357 spin_lock(&dev
->count_lock
);
1358 if (atomic_read(&dev
->buf_alloc
)) {
1359 spin_unlock(&dev
->count_lock
);
1362 ++dev
->buf_use
; /* Can't allocate more after this call */
1363 spin_unlock(&dev
->count_lock
);
1365 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1366 if (dma
->bufs
[i
].buf_count
)
1370 DRM_DEBUG("count = %d\n", count
);
1372 if (request
->count
>= count
) {
1373 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1374 if (dma
->bufs
[i
].buf_count
) {
1375 struct drm_buf_desc __user
*to
=
1376 &request
->list
[count
];
1377 struct drm_buf_entry
*from
= &dma
->bufs
[i
];
1378 struct drm_freelist
*list
= &dma
->bufs
[i
].freelist
;
1379 if (copy_to_user(&to
->count
,
1381 sizeof(from
->buf_count
)) ||
1382 copy_to_user(&to
->size
,
1384 sizeof(from
->buf_size
)) ||
1385 copy_to_user(&to
->low_mark
,
1387 sizeof(list
->low_mark
)) ||
1388 copy_to_user(&to
->high_mark
,
1390 sizeof(list
->high_mark
)))
1393 DRM_DEBUG("%d %d %d %d %d\n",
1395 dma
->bufs
[i
].buf_count
,
1396 dma
->bufs
[i
].buf_size
,
1397 dma
->bufs
[i
].freelist
.low_mark
,
1398 dma
->bufs
[i
].freelist
.high_mark
);
1403 request
->count
= count
;
1409 * Specifies a low and high water mark for buffer allocation
1411 * \param inode device inode.
1412 * \param file_priv DRM file private.
1413 * \param cmd command.
1414 * \param arg a pointer to a drm_buf_desc structure.
1415 * \return zero on success or a negative number on failure.
1417 * Verifies that the size order is bounded between the admissible orders and
1418 * updates the respective drm_device_dma::bufs entry low and high water mark.
1420 * \note This ioctl is deprecated and mostly never used.
1422 int drm_markbufs(struct drm_device
*dev
, void *data
,
1423 struct drm_file
*file_priv
)
1425 struct drm_device_dma
*dma
= dev
->dma
;
1426 struct drm_buf_desc
*request
= data
;
1428 struct drm_buf_entry
*entry
;
1430 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1436 DRM_DEBUG("%d, %d, %d\n",
1437 request
->size
, request
->low_mark
, request
->high_mark
);
1438 order
= drm_order(request
->size
);
1439 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1441 entry
= &dma
->bufs
[order
];
1443 if (request
->low_mark
< 0 || request
->low_mark
> entry
->buf_count
)
1445 if (request
->high_mark
< 0 || request
->high_mark
> entry
->buf_count
)
1448 entry
->freelist
.low_mark
= request
->low_mark
;
1449 entry
->freelist
.high_mark
= request
->high_mark
;
1455 * Unreserve the buffers in list, previously reserved using drmDMA.
1457 * \param inode device inode.
1458 * \param file_priv DRM file private.
1459 * \param cmd command.
1460 * \param arg pointer to a drm_buf_free structure.
1461 * \return zero on success or a negative number on failure.
1463 * Calls free_buffer() for each used buffer.
1464 * This function is primarily used for debugging.
1466 int drm_freebufs(struct drm_device
*dev
, void *data
,
1467 struct drm_file
*file_priv
)
1469 struct drm_device_dma
*dma
= dev
->dma
;
1470 struct drm_buf_free
*request
= data
;
1473 struct drm_buf
*buf
;
1475 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1481 DRM_DEBUG("%d\n", request
->count
);
1482 for (i
= 0; i
< request
->count
; i
++) {
1483 if (copy_from_user(&idx
, &request
->list
[i
], sizeof(idx
)))
1485 if (idx
< 0 || idx
>= dma
->buf_count
) {
1486 DRM_ERROR("Index %d (of %d max)\n",
1487 idx
, dma
->buf_count
- 1);
1490 buf
= dma
->buflist
[idx
];
1491 if (buf
->file_priv
!= file_priv
) {
1492 DRM_ERROR("Process %d freeing buffer not owned\n",
1493 task_pid_nr(current
));
1496 drm_free_buffer(dev
, buf
);
1503 * Maps all of the DMA buffers into client-virtual space (ioctl).
1505 * \param inode device inode.
1506 * \param file_priv DRM file private.
1507 * \param cmd command.
1508 * \param arg pointer to a drm_buf_map structure.
1509 * \return zero on success or a negative number on failure.
1511 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1512 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1513 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1516 int drm_mapbufs(struct drm_device
*dev
, void *data
,
1517 struct drm_file
*file_priv
)
1519 struct drm_device_dma
*dma
= dev
->dma
;
1522 unsigned long virtual;
1523 unsigned long address
;
1524 struct drm_buf_map
*request
= data
;
1527 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1533 spin_lock(&dev
->count_lock
);
1534 if (atomic_read(&dev
->buf_alloc
)) {
1535 spin_unlock(&dev
->count_lock
);
1538 dev
->buf_use
++; /* Can't allocate more after this call */
1539 spin_unlock(&dev
->count_lock
);
1541 if (request
->count
>= dma
->buf_count
) {
1542 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1543 || (drm_core_check_feature(dev
, DRIVER_SG
)
1544 && (dma
->flags
& _DRM_DMA_USE_SG
))
1545 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1546 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1547 struct drm_local_map
*map
= dev
->agp_buffer_map
;
1548 unsigned long token
= dev
->agp_buffer_token
;
1554 virtual = vm_mmap(file_priv
->filp
, 0, map
->size
,
1555 PROT_READ
| PROT_WRITE
,
1559 virtual = vm_mmap(file_priv
->filp
, 0, dma
->byte_count
,
1560 PROT_READ
| PROT_WRITE
,
1563 if (virtual > -1024UL) {
1565 retcode
= (signed long)virtual;
1568 request
->virtual = (void __user
*)virtual;
1570 for (i
= 0; i
< dma
->buf_count
; i
++) {
1571 if (copy_to_user(&request
->list
[i
].idx
,
1572 &dma
->buflist
[i
]->idx
,
1573 sizeof(request
->list
[0].idx
))) {
1577 if (copy_to_user(&request
->list
[i
].total
,
1578 &dma
->buflist
[i
]->total
,
1579 sizeof(request
->list
[0].total
))) {
1583 if (copy_to_user(&request
->list
[i
].used
,
1584 &zero
, sizeof(zero
))) {
1588 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1589 if (copy_to_user(&request
->list
[i
].address
,
1590 &address
, sizeof(address
))) {
1597 request
->count
= dma
->buf_count
;
1598 DRM_DEBUG("%d buffers, retcode = %d\n", request
->count
, retcode
);
1604 * Compute size order. Returns the exponent of the smaller power of two which
1605 * is greater or equal to given number.
1610 * \todo Can be made faster.
1612 int drm_order(unsigned long size
)
1617 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++) ;
1619 if (size
& (size
- 1))
1624 EXPORT_SYMBOL(drm_order
);
This page took 0.099619 seconds and 5 git commands to generate.