6d80d17f1e96f9b6d137cfd7ec553378620ca6e2
3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
37 #include <linux/log2.h>
38 #include <asm/shmparam.h>
41 resource_size_t
drm_get_resource_start(struct drm_device
*dev
, unsigned int resource
)
43 return pci_resource_start(dev
->pdev
, resource
);
45 EXPORT_SYMBOL(drm_get_resource_start
);
47 resource_size_t
drm_get_resource_len(struct drm_device
*dev
, unsigned int resource
)
49 return pci_resource_len(dev
->pdev
, resource
);
52 EXPORT_SYMBOL(drm_get_resource_len
);
54 static struct drm_map_list
*drm_find_matching_map(struct drm_device
*dev
,
55 struct drm_local_map
*map
)
57 struct drm_map_list
*entry
;
58 list_for_each_entry(entry
, &dev
->maplist
, head
) {
60 * Because the kernel-userspace ABI is fixed at a 32-bit offset
61 * while PCI resources may live above that, we ignore the map
62 * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
63 * It is assumed that each driver will have only one resource of
67 map
->type
!= entry
->map
->type
||
68 entry
->master
!= dev
->primary
->master
)
72 if (map
->flags
!= _DRM_CONTAINS_LOCK
)
75 case _DRM_FRAME_BUFFER
:
77 default: /* Make gcc happy */
80 if (entry
->map
->offset
== map
->offset
)
87 static int drm_map_handle(struct drm_device
*dev
, struct drm_hash_item
*hash
,
88 unsigned long user_token
, int hashed_handle
, int shm
)
90 int use_hashed_handle
, shift
;
93 #if (BITS_PER_LONG == 64)
94 use_hashed_handle
= ((user_token
& 0xFFFFFFFF00000000UL
) || hashed_handle
);
95 #elif (BITS_PER_LONG == 32)
96 use_hashed_handle
= hashed_handle
;
98 #error Unsupported long size. Neither 64 nor 32 bits.
101 if (!use_hashed_handle
) {
103 hash
->key
= user_token
>> PAGE_SHIFT
;
104 ret
= drm_ht_insert_item(&dev
->map_hash
, hash
);
110 add
= DRM_MAP_HASH_OFFSET
>> PAGE_SHIFT
;
111 if (shm
&& (SHMLBA
> PAGE_SIZE
)) {
112 int bits
= ilog2(SHMLBA
>> PAGE_SHIFT
) + 1;
114 /* For shared memory, we have to preserve the SHMLBA
115 * bits of the eventual vma->vm_pgoff value during
116 * mmap(). Otherwise we run into cache aliasing problems
117 * on some platforms. On these platforms, the pgoff of
118 * a mmap() request is used to pick a suitable virtual
119 * address for the mmap() region such that it will not
120 * cause cache aliasing problems.
122 * Therefore, make sure the SHMLBA relevant bits of the
123 * hash value we use are equal to those in the original
124 * kernel virtual address.
127 add
|= ((user_token
>> PAGE_SHIFT
) & ((1UL << bits
) - 1UL));
130 return drm_ht_just_insert_please(&dev
->map_hash
, hash
,
131 user_token
, 32 - PAGE_SHIFT
- 3,
136 * Core function to create a range of memory available for mapping by a
139 * Adjusts the memory offset to its absolute value according to the mapping
140 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
141 * applicable and if supported by the kernel.
143 static int drm_addmap_core(struct drm_device
* dev
, resource_size_t offset
,
144 unsigned int size
, enum drm_map_type type
,
145 enum drm_map_flags flags
,
146 struct drm_map_list
** maplist
)
148 struct drm_local_map
*map
;
149 struct drm_map_list
*list
;
150 drm_dma_handle_t
*dmah
;
151 unsigned long user_token
;
154 map
= drm_alloc(sizeof(*map
), DRM_MEM_MAPS
);
158 map
->offset
= offset
;
163 /* Only allow shared memory to be removable since we only keep enough
164 * book keeping information about shared memory to allow for removal
165 * when processes fork.
167 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
168 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
172 (unsigned long long)map
->offset
, map
->size
, map
->type
);
173 if ((map
->offset
& (~(resource_size_t
)PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
174 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
182 case _DRM_FRAME_BUFFER
:
183 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
184 if (map
->offset
+ (map
->size
-1) < map
->offset
||
185 map
->offset
< virt_to_phys(high_memory
)) {
186 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
191 map
->offset
+= dev
->hose
->mem_space
->start
;
193 /* Some drivers preinitialize some maps, without the X Server
194 * needing to be aware of it. Therefore, we just return success
195 * when the server tries to create a duplicate map.
197 list
= drm_find_matching_map(dev
, map
);
199 if (list
->map
->size
!= map
->size
) {
200 DRM_DEBUG("Matching maps of type %d with "
201 "mismatched sizes, (%ld vs %ld)\n",
202 map
->type
, map
->size
,
204 list
->map
->size
= map
->size
;
207 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
212 if (drm_core_has_MTRR(dev
)) {
213 if (map
->type
== _DRM_FRAME_BUFFER
||
214 (map
->flags
& _DRM_WRITE_COMBINING
)) {
215 map
->mtrr
= mtrr_add(map
->offset
, map
->size
,
216 MTRR_TYPE_WRCOMB
, 1);
219 if (map
->type
== _DRM_REGISTERS
) {
220 map
->handle
= ioremap(map
->offset
, map
->size
);
222 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
229 list
= drm_find_matching_map(dev
, map
);
231 if(list
->map
->size
!= map
->size
) {
232 DRM_DEBUG("Matching maps of type %d with "
233 "mismatched sizes, (%ld vs %ld)\n",
234 map
->type
, map
->size
, list
->map
->size
);
235 list
->map
->size
= map
->size
;
238 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
242 map
->handle
= vmalloc_user(map
->size
);
243 DRM_DEBUG("%lu %d %p\n",
244 map
->size
, drm_order(map
->size
), map
->handle
);
246 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
249 map
->offset
= (unsigned long)map
->handle
;
250 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
251 /* Prevent a 2nd X Server from creating a 2nd lock */
252 if (dev
->primary
->master
->lock
.hw_lock
!= NULL
) {
254 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
257 dev
->sigdata
.lock
= dev
->primary
->master
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
261 struct drm_agp_mem
*entry
;
264 if (!drm_core_has_AGP(dev
)) {
265 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
269 map
->offset
+= dev
->hose
->mem_space
->start
;
271 /* In some cases (i810 driver), user space may have already
272 * added the AGP base itself, because dev->agp->base previously
273 * only got set during AGP enable. So, only add the base
274 * address if the map's offset isn't already within the
277 if (map
->offset
< dev
->agp
->base
||
278 map
->offset
> dev
->agp
->base
+
279 dev
->agp
->agp_info
.aper_size
* 1024 * 1024 - 1) {
280 map
->offset
+= dev
->agp
->base
;
282 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
284 /* This assumes the DRM is in total control of AGP space.
285 * It's not always the case as AGP can be in the control
286 * of user space (i.e. i810 driver). So this loop will get
287 * skipped and we double check that dev->agp->memory is
288 * actually set as well as being invalid before EPERM'ing
290 list_for_each_entry(entry
, &dev
->agp
->memory
, head
) {
291 if ((map
->offset
>= entry
->bound
) &&
292 (map
->offset
+ map
->size
<= entry
->bound
+ entry
->pages
* PAGE_SIZE
)) {
297 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
298 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
301 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
302 (unsigned long long)map
->offset
, map
->size
);
306 DRM_ERROR("tried to rmmap GEM object\n");
309 case _DRM_SCATTER_GATHER
:
311 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
314 map
->offset
+= (unsigned long)dev
->sg
->virtual;
316 case _DRM_CONSISTENT
:
317 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
318 * As we're limiting the address to 2^32-1 (or less),
319 * casting it down to 32 bits is no problem, but we
320 * need to point to a 64bit variable first. */
321 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
, 0xffffffffUL
);
323 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
326 map
->handle
= dmah
->vaddr
;
327 map
->offset
= (unsigned long)dmah
->busaddr
;
331 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
335 list
= drm_alloc(sizeof(*list
), DRM_MEM_MAPS
);
337 if (map
->type
== _DRM_REGISTERS
)
338 iounmap(map
->handle
);
339 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
342 memset(list
, 0, sizeof(*list
));
345 mutex_lock(&dev
->struct_mutex
);
346 list_add(&list
->head
, &dev
->maplist
);
348 /* Assign a 32-bit handle */
349 /* We do it here so that dev->struct_mutex protects the increment */
350 user_token
= (map
->type
== _DRM_SHM
) ? (unsigned long)map
->handle
:
352 ret
= drm_map_handle(dev
, &list
->hash
, user_token
, 0,
353 (map
->type
== _DRM_SHM
));
355 if (map
->type
== _DRM_REGISTERS
)
356 iounmap(map
->handle
);
357 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
358 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
359 mutex_unlock(&dev
->struct_mutex
);
363 list
->user_token
= list
->hash
.key
<< PAGE_SHIFT
;
364 mutex_unlock(&dev
->struct_mutex
);
366 list
->master
= dev
->primary
->master
;
371 int drm_addmap(struct drm_device
* dev
, resource_size_t offset
,
372 unsigned int size
, enum drm_map_type type
,
373 enum drm_map_flags flags
, struct drm_local_map
** map_ptr
)
375 struct drm_map_list
*list
;
378 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
380 *map_ptr
= list
->map
;
384 EXPORT_SYMBOL(drm_addmap
);
387 * Ioctl to specify a range of memory that is available for mapping by a
390 * \param inode device inode.
391 * \param file_priv DRM file private.
392 * \param cmd command.
393 * \param arg pointer to a drm_map structure.
394 * \return zero on success or a negative value on error.
397 int drm_addmap_ioctl(struct drm_device
*dev
, void *data
,
398 struct drm_file
*file_priv
)
400 struct drm_map
*map
= data
;
401 struct drm_map_list
*maplist
;
404 if (!(capable(CAP_SYS_ADMIN
) || map
->type
== _DRM_AGP
|| map
->type
== _DRM_SHM
))
407 err
= drm_addmap_core(dev
, map
->offset
, map
->size
, map
->type
,
408 map
->flags
, &maplist
);
413 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
414 map
->handle
= (void *)(unsigned long)maplist
->user_token
;
419 * Remove a map private from list and deallocate resources if the mapping
422 * Searches the map on drm_device::maplist, removes it from the list, see if
423 * its being used, and free any associate resource (such as MTRR's) if it's not
428 int drm_rmmap_locked(struct drm_device
*dev
, struct drm_local_map
*map
)
430 struct drm_map_list
*r_list
= NULL
, *list_t
;
431 drm_dma_handle_t dmah
;
433 struct drm_master
*master
;
435 /* Find the list entry for the map and remove it */
436 list_for_each_entry_safe(r_list
, list_t
, &dev
->maplist
, head
) {
437 if (r_list
->map
== map
) {
438 master
= r_list
->master
;
439 list_del(&r_list
->head
);
440 drm_ht_remove_key(&dev
->map_hash
,
441 r_list
->user_token
>> PAGE_SHIFT
);
442 drm_free(r_list
, sizeof(*r_list
), DRM_MEM_MAPS
);
453 iounmap(map
->handle
);
455 case _DRM_FRAME_BUFFER
:
456 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
458 retcode
= mtrr_del(map
->mtrr
, map
->offset
, map
->size
);
459 DRM_DEBUG("mtrr_del=%d\n", retcode
);
465 if (dev
->sigdata
.lock
== master
->lock
.hw_lock
)
466 dev
->sigdata
.lock
= NULL
;
467 master
->lock
.hw_lock
= NULL
; /* SHM removed */
468 master
->lock
.file_priv
= NULL
;
469 wake_up_interruptible_all(&master
->lock
.lock_queue
);
473 case _DRM_SCATTER_GATHER
:
475 case _DRM_CONSISTENT
:
476 dmah
.vaddr
= map
->handle
;
477 dmah
.busaddr
= map
->offset
;
478 dmah
.size
= map
->size
;
479 __drm_pci_free(dev
, &dmah
);
482 DRM_ERROR("tried to rmmap GEM object\n");
485 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
489 EXPORT_SYMBOL(drm_rmmap_locked
);
491 int drm_rmmap(struct drm_device
*dev
, struct drm_local_map
*map
)
495 mutex_lock(&dev
->struct_mutex
);
496 ret
= drm_rmmap_locked(dev
, map
);
497 mutex_unlock(&dev
->struct_mutex
);
501 EXPORT_SYMBOL(drm_rmmap
);
503 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
504 * the last close of the device, and this is necessary for cleanup when things
505 * exit uncleanly. Therefore, having userland manually remove mappings seems
506 * like a pointless exercise since they're going away anyway.
508 * One use case might be after addmap is allowed for normal users for SHM and
509 * gets used by drivers that the server doesn't need to care about. This seems
512 * \param inode device inode.
513 * \param file_priv DRM file private.
514 * \param cmd command.
515 * \param arg pointer to a struct drm_map structure.
516 * \return zero on success or a negative value on error.
518 int drm_rmmap_ioctl(struct drm_device
*dev
, void *data
,
519 struct drm_file
*file_priv
)
521 struct drm_map
*request
= data
;
522 struct drm_local_map
*map
= NULL
;
523 struct drm_map_list
*r_list
;
526 mutex_lock(&dev
->struct_mutex
);
527 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
529 r_list
->user_token
== (unsigned long)request
->handle
&&
530 r_list
->map
->flags
& _DRM_REMOVABLE
) {
536 /* List has wrapped around to the head pointer, or its empty we didn't
539 if (list_empty(&dev
->maplist
) || !map
) {
540 mutex_unlock(&dev
->struct_mutex
);
544 /* Register and framebuffer maps are permanent */
545 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
546 mutex_unlock(&dev
->struct_mutex
);
550 ret
= drm_rmmap_locked(dev
, map
);
552 mutex_unlock(&dev
->struct_mutex
);
558 * Cleanup after an error on one of the addbufs() functions.
560 * \param dev DRM device.
561 * \param entry buffer entry where the error occurred.
563 * Frees any pages and buffers associated with the given entry.
565 static void drm_cleanup_buf_error(struct drm_device
* dev
,
566 struct drm_buf_entry
* entry
)
570 if (entry
->seg_count
) {
571 for (i
= 0; i
< entry
->seg_count
; i
++) {
572 if (entry
->seglist
[i
]) {
573 drm_pci_free(dev
, entry
->seglist
[i
]);
576 drm_free(entry
->seglist
,
578 sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
580 entry
->seg_count
= 0;
583 if (entry
->buf_count
) {
584 for (i
= 0; i
< entry
->buf_count
; i
++) {
585 if (entry
->buflist
[i
].dev_private
) {
586 drm_free(entry
->buflist
[i
].dev_private
,
587 entry
->buflist
[i
].dev_priv_size
,
591 drm_free(entry
->buflist
,
593 sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
595 entry
->buf_count
= 0;
601 * Add AGP buffers for DMA transfers.
603 * \param dev struct drm_device to which the buffers are to be added.
604 * \param request pointer to a struct drm_buf_desc describing the request.
605 * \return zero on success or a negative number on failure.
607 * After some sanity checks creates a drm_buf structure for each buffer and
608 * reallocates the buffer list of the same size order to accommodate the new
611 int drm_addbufs_agp(struct drm_device
* dev
, struct drm_buf_desc
* request
)
613 struct drm_device_dma
*dma
= dev
->dma
;
614 struct drm_buf_entry
*entry
;
615 struct drm_agp_mem
*agp_entry
;
617 unsigned long offset
;
618 unsigned long agp_offset
;
627 struct drm_buf
**temp_buflist
;
632 count
= request
->count
;
633 order
= drm_order(request
->size
);
636 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
637 ? PAGE_ALIGN(size
) : size
;
638 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
639 total
= PAGE_SIZE
<< page_order
;
642 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
644 DRM_DEBUG("count: %d\n", count
);
645 DRM_DEBUG("order: %d\n", order
);
646 DRM_DEBUG("size: %d\n", size
);
647 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
648 DRM_DEBUG("alignment: %d\n", alignment
);
649 DRM_DEBUG("page_order: %d\n", page_order
);
650 DRM_DEBUG("total: %d\n", total
);
652 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
654 if (dev
->queue_count
)
655 return -EBUSY
; /* Not while in use */
657 /* Make sure buffers are located in AGP memory that we own */
659 list_for_each_entry(agp_entry
, &dev
->agp
->memory
, head
) {
660 if ((agp_offset
>= agp_entry
->bound
) &&
661 (agp_offset
+ total
* count
<= agp_entry
->bound
+ agp_entry
->pages
* PAGE_SIZE
)) {
666 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
667 DRM_DEBUG("zone invalid\n");
670 spin_lock(&dev
->count_lock
);
672 spin_unlock(&dev
->count_lock
);
675 atomic_inc(&dev
->buf_alloc
);
676 spin_unlock(&dev
->count_lock
);
678 mutex_lock(&dev
->struct_mutex
);
679 entry
= &dma
->bufs
[order
];
680 if (entry
->buf_count
) {
681 mutex_unlock(&dev
->struct_mutex
);
682 atomic_dec(&dev
->buf_alloc
);
683 return -ENOMEM
; /* May only call once for each order */
686 if (count
< 0 || count
> 4096) {
687 mutex_unlock(&dev
->struct_mutex
);
688 atomic_dec(&dev
->buf_alloc
);
692 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
694 if (!entry
->buflist
) {
695 mutex_unlock(&dev
->struct_mutex
);
696 atomic_dec(&dev
->buf_alloc
);
699 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
701 entry
->buf_size
= size
;
702 entry
->page_order
= page_order
;
706 while (entry
->buf_count
< count
) {
707 buf
= &entry
->buflist
[entry
->buf_count
];
708 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
709 buf
->total
= alignment
;
713 buf
->offset
= (dma
->byte_count
+ offset
);
714 buf
->bus_address
= agp_offset
+ offset
;
715 buf
->address
= (void *)(agp_offset
+ offset
);
719 init_waitqueue_head(&buf
->dma_wait
);
720 buf
->file_priv
= NULL
;
722 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
723 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
724 if (!buf
->dev_private
) {
725 /* Set count correctly so we free the proper amount. */
726 entry
->buf_count
= count
;
727 drm_cleanup_buf_error(dev
, entry
);
728 mutex_unlock(&dev
->struct_mutex
);
729 atomic_dec(&dev
->buf_alloc
);
732 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
734 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
738 byte_count
+= PAGE_SIZE
<< page_order
;
741 DRM_DEBUG("byte_count: %d\n", byte_count
);
743 temp_buflist
= drm_realloc(dma
->buflist
,
744 dma
->buf_count
* sizeof(*dma
->buflist
),
745 (dma
->buf_count
+ entry
->buf_count
)
746 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
748 /* Free the entry because it isn't valid */
749 drm_cleanup_buf_error(dev
, entry
);
750 mutex_unlock(&dev
->struct_mutex
);
751 atomic_dec(&dev
->buf_alloc
);
754 dma
->buflist
= temp_buflist
;
756 for (i
= 0; i
< entry
->buf_count
; i
++) {
757 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
760 dma
->buf_count
+= entry
->buf_count
;
761 dma
->seg_count
+= entry
->seg_count
;
762 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
763 dma
->byte_count
+= byte_count
;
765 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
766 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
768 mutex_unlock(&dev
->struct_mutex
);
770 request
->count
= entry
->buf_count
;
771 request
->size
= size
;
773 dma
->flags
= _DRM_DMA_USE_AGP
;
775 atomic_dec(&dev
->buf_alloc
);
778 EXPORT_SYMBOL(drm_addbufs_agp
);
779 #endif /* __OS_HAS_AGP */
781 int drm_addbufs_pci(struct drm_device
* dev
, struct drm_buf_desc
* request
)
783 struct drm_device_dma
*dma
= dev
->dma
;
789 struct drm_buf_entry
*entry
;
790 drm_dma_handle_t
*dmah
;
793 unsigned long offset
;
797 unsigned long *temp_pagelist
;
798 struct drm_buf
**temp_buflist
;
800 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
806 if (!capable(CAP_SYS_ADMIN
))
809 count
= request
->count
;
810 order
= drm_order(request
->size
);
813 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
814 request
->count
, request
->size
, size
, order
, dev
->queue_count
);
816 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
818 if (dev
->queue_count
)
819 return -EBUSY
; /* Not while in use */
821 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
822 ? PAGE_ALIGN(size
) : size
;
823 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
824 total
= PAGE_SIZE
<< page_order
;
826 spin_lock(&dev
->count_lock
);
828 spin_unlock(&dev
->count_lock
);
831 atomic_inc(&dev
->buf_alloc
);
832 spin_unlock(&dev
->count_lock
);
834 mutex_lock(&dev
->struct_mutex
);
835 entry
= &dma
->bufs
[order
];
836 if (entry
->buf_count
) {
837 mutex_unlock(&dev
->struct_mutex
);
838 atomic_dec(&dev
->buf_alloc
);
839 return -ENOMEM
; /* May only call once for each order */
842 if (count
< 0 || count
> 4096) {
843 mutex_unlock(&dev
->struct_mutex
);
844 atomic_dec(&dev
->buf_alloc
);
848 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
850 if (!entry
->buflist
) {
851 mutex_unlock(&dev
->struct_mutex
);
852 atomic_dec(&dev
->buf_alloc
);
855 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
857 entry
->seglist
= drm_alloc(count
* sizeof(*entry
->seglist
),
859 if (!entry
->seglist
) {
860 drm_free(entry
->buflist
,
861 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
862 mutex_unlock(&dev
->struct_mutex
);
863 atomic_dec(&dev
->buf_alloc
);
866 memset(entry
->seglist
, 0, count
* sizeof(*entry
->seglist
));
868 /* Keep the original pagelist until we know all the allocations
871 temp_pagelist
= drm_alloc((dma
->page_count
+ (count
<< page_order
))
872 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
873 if (!temp_pagelist
) {
874 drm_free(entry
->buflist
,
875 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
876 drm_free(entry
->seglist
,
877 count
* sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
878 mutex_unlock(&dev
->struct_mutex
);
879 atomic_dec(&dev
->buf_alloc
);
882 memcpy(temp_pagelist
,
883 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
884 DRM_DEBUG("pagelist: %d entries\n",
885 dma
->page_count
+ (count
<< page_order
));
887 entry
->buf_size
= size
;
888 entry
->page_order
= page_order
;
892 while (entry
->buf_count
< count
) {
894 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000, 0xfffffffful
);
897 /* Set count correctly so we free the proper amount. */
898 entry
->buf_count
= count
;
899 entry
->seg_count
= count
;
900 drm_cleanup_buf_error(dev
, entry
);
901 drm_free(temp_pagelist
,
902 (dma
->page_count
+ (count
<< page_order
))
903 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
904 mutex_unlock(&dev
->struct_mutex
);
905 atomic_dec(&dev
->buf_alloc
);
908 entry
->seglist
[entry
->seg_count
++] = dmah
;
909 for (i
= 0; i
< (1 << page_order
); i
++) {
910 DRM_DEBUG("page %d @ 0x%08lx\n",
911 dma
->page_count
+ page_count
,
912 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
913 temp_pagelist
[dma
->page_count
+ page_count
++]
914 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
917 offset
+ size
<= total
&& entry
->buf_count
< count
;
918 offset
+= alignment
, ++entry
->buf_count
) {
919 buf
= &entry
->buflist
[entry
->buf_count
];
920 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
921 buf
->total
= alignment
;
924 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
925 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
926 buf
->bus_address
= dmah
->busaddr
+ offset
;
930 init_waitqueue_head(&buf
->dma_wait
);
931 buf
->file_priv
= NULL
;
933 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
934 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
,
936 if (!buf
->dev_private
) {
937 /* Set count correctly so we free the proper amount. */
938 entry
->buf_count
= count
;
939 entry
->seg_count
= count
;
940 drm_cleanup_buf_error(dev
, entry
);
941 drm_free(temp_pagelist
,
943 (count
<< page_order
))
944 * sizeof(*dma
->pagelist
),
946 mutex_unlock(&dev
->struct_mutex
);
947 atomic_dec(&dev
->buf_alloc
);
950 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
952 DRM_DEBUG("buffer %d @ %p\n",
953 entry
->buf_count
, buf
->address
);
955 byte_count
+= PAGE_SIZE
<< page_order
;
958 temp_buflist
= drm_realloc(dma
->buflist
,
959 dma
->buf_count
* sizeof(*dma
->buflist
),
960 (dma
->buf_count
+ entry
->buf_count
)
961 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
963 /* Free the entry because it isn't valid */
964 drm_cleanup_buf_error(dev
, entry
);
965 drm_free(temp_pagelist
,
966 (dma
->page_count
+ (count
<< page_order
))
967 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
968 mutex_unlock(&dev
->struct_mutex
);
969 atomic_dec(&dev
->buf_alloc
);
972 dma
->buflist
= temp_buflist
;
974 for (i
= 0; i
< entry
->buf_count
; i
++) {
975 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
978 /* No allocations failed, so now we can replace the orginal pagelist
981 if (dma
->page_count
) {
982 drm_free(dma
->pagelist
,
983 dma
->page_count
* sizeof(*dma
->pagelist
),
986 dma
->pagelist
= temp_pagelist
;
988 dma
->buf_count
+= entry
->buf_count
;
989 dma
->seg_count
+= entry
->seg_count
;
990 dma
->page_count
+= entry
->seg_count
<< page_order
;
991 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
993 mutex_unlock(&dev
->struct_mutex
);
995 request
->count
= entry
->buf_count
;
996 request
->size
= size
;
998 if (request
->flags
& _DRM_PCI_BUFFER_RO
)
999 dma
->flags
= _DRM_DMA_USE_PCI_RO
;
1001 atomic_dec(&dev
->buf_alloc
);
1005 EXPORT_SYMBOL(drm_addbufs_pci
);
1007 static int drm_addbufs_sg(struct drm_device
* dev
, struct drm_buf_desc
* request
)
1009 struct drm_device_dma
*dma
= dev
->dma
;
1010 struct drm_buf_entry
*entry
;
1011 struct drm_buf
*buf
;
1012 unsigned long offset
;
1013 unsigned long agp_offset
;
1022 struct drm_buf
**temp_buflist
;
1024 if (!drm_core_check_feature(dev
, DRIVER_SG
))
1030 if (!capable(CAP_SYS_ADMIN
))
1033 count
= request
->count
;
1034 order
= drm_order(request
->size
);
1037 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1038 ? PAGE_ALIGN(size
) : size
;
1039 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1040 total
= PAGE_SIZE
<< page_order
;
1043 agp_offset
= request
->agp_start
;
1045 DRM_DEBUG("count: %d\n", count
);
1046 DRM_DEBUG("order: %d\n", order
);
1047 DRM_DEBUG("size: %d\n", size
);
1048 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1049 DRM_DEBUG("alignment: %d\n", alignment
);
1050 DRM_DEBUG("page_order: %d\n", page_order
);
1051 DRM_DEBUG("total: %d\n", total
);
1053 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1055 if (dev
->queue_count
)
1056 return -EBUSY
; /* Not while in use */
1058 spin_lock(&dev
->count_lock
);
1060 spin_unlock(&dev
->count_lock
);
1063 atomic_inc(&dev
->buf_alloc
);
1064 spin_unlock(&dev
->count_lock
);
1066 mutex_lock(&dev
->struct_mutex
);
1067 entry
= &dma
->bufs
[order
];
1068 if (entry
->buf_count
) {
1069 mutex_unlock(&dev
->struct_mutex
);
1070 atomic_dec(&dev
->buf_alloc
);
1071 return -ENOMEM
; /* May only call once for each order */
1074 if (count
< 0 || count
> 4096) {
1075 mutex_unlock(&dev
->struct_mutex
);
1076 atomic_dec(&dev
->buf_alloc
);
1080 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1082 if (!entry
->buflist
) {
1083 mutex_unlock(&dev
->struct_mutex
);
1084 atomic_dec(&dev
->buf_alloc
);
1087 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1089 entry
->buf_size
= size
;
1090 entry
->page_order
= page_order
;
1094 while (entry
->buf_count
< count
) {
1095 buf
= &entry
->buflist
[entry
->buf_count
];
1096 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1097 buf
->total
= alignment
;
1101 buf
->offset
= (dma
->byte_count
+ offset
);
1102 buf
->bus_address
= agp_offset
+ offset
;
1103 buf
->address
= (void *)(agp_offset
+ offset
1104 + (unsigned long)dev
->sg
->virtual);
1108 init_waitqueue_head(&buf
->dma_wait
);
1109 buf
->file_priv
= NULL
;
1111 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1112 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1113 if (!buf
->dev_private
) {
1114 /* Set count correctly so we free the proper amount. */
1115 entry
->buf_count
= count
;
1116 drm_cleanup_buf_error(dev
, entry
);
1117 mutex_unlock(&dev
->struct_mutex
);
1118 atomic_dec(&dev
->buf_alloc
);
1122 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1124 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1126 offset
+= alignment
;
1128 byte_count
+= PAGE_SIZE
<< page_order
;
1131 DRM_DEBUG("byte_count: %d\n", byte_count
);
1133 temp_buflist
= drm_realloc(dma
->buflist
,
1134 dma
->buf_count
* sizeof(*dma
->buflist
),
1135 (dma
->buf_count
+ entry
->buf_count
)
1136 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1137 if (!temp_buflist
) {
1138 /* Free the entry because it isn't valid */
1139 drm_cleanup_buf_error(dev
, entry
);
1140 mutex_unlock(&dev
->struct_mutex
);
1141 atomic_dec(&dev
->buf_alloc
);
1144 dma
->buflist
= temp_buflist
;
1146 for (i
= 0; i
< entry
->buf_count
; i
++) {
1147 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1150 dma
->buf_count
+= entry
->buf_count
;
1151 dma
->seg_count
+= entry
->seg_count
;
1152 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1153 dma
->byte_count
+= byte_count
;
1155 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1156 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1158 mutex_unlock(&dev
->struct_mutex
);
1160 request
->count
= entry
->buf_count
;
1161 request
->size
= size
;
1163 dma
->flags
= _DRM_DMA_USE_SG
;
1165 atomic_dec(&dev
->buf_alloc
);
1169 static int drm_addbufs_fb(struct drm_device
* dev
, struct drm_buf_desc
* request
)
1171 struct drm_device_dma
*dma
= dev
->dma
;
1172 struct drm_buf_entry
*entry
;
1173 struct drm_buf
*buf
;
1174 unsigned long offset
;
1175 unsigned long agp_offset
;
1184 struct drm_buf
**temp_buflist
;
1186 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
1192 if (!capable(CAP_SYS_ADMIN
))
1195 count
= request
->count
;
1196 order
= drm_order(request
->size
);
1199 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1200 ? PAGE_ALIGN(size
) : size
;
1201 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1202 total
= PAGE_SIZE
<< page_order
;
1205 agp_offset
= request
->agp_start
;
1207 DRM_DEBUG("count: %d\n", count
);
1208 DRM_DEBUG("order: %d\n", order
);
1209 DRM_DEBUG("size: %d\n", size
);
1210 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1211 DRM_DEBUG("alignment: %d\n", alignment
);
1212 DRM_DEBUG("page_order: %d\n", page_order
);
1213 DRM_DEBUG("total: %d\n", total
);
1215 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1217 if (dev
->queue_count
)
1218 return -EBUSY
; /* Not while in use */
1220 spin_lock(&dev
->count_lock
);
1222 spin_unlock(&dev
->count_lock
);
1225 atomic_inc(&dev
->buf_alloc
);
1226 spin_unlock(&dev
->count_lock
);
1228 mutex_lock(&dev
->struct_mutex
);
1229 entry
= &dma
->bufs
[order
];
1230 if (entry
->buf_count
) {
1231 mutex_unlock(&dev
->struct_mutex
);
1232 atomic_dec(&dev
->buf_alloc
);
1233 return -ENOMEM
; /* May only call once for each order */
1236 if (count
< 0 || count
> 4096) {
1237 mutex_unlock(&dev
->struct_mutex
);
1238 atomic_dec(&dev
->buf_alloc
);
1242 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1244 if (!entry
->buflist
) {
1245 mutex_unlock(&dev
->struct_mutex
);
1246 atomic_dec(&dev
->buf_alloc
);
1249 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1251 entry
->buf_size
= size
;
1252 entry
->page_order
= page_order
;
1256 while (entry
->buf_count
< count
) {
1257 buf
= &entry
->buflist
[entry
->buf_count
];
1258 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1259 buf
->total
= alignment
;
1263 buf
->offset
= (dma
->byte_count
+ offset
);
1264 buf
->bus_address
= agp_offset
+ offset
;
1265 buf
->address
= (void *)(agp_offset
+ offset
);
1269 init_waitqueue_head(&buf
->dma_wait
);
1270 buf
->file_priv
= NULL
;
1272 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1273 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1274 if (!buf
->dev_private
) {
1275 /* Set count correctly so we free the proper amount. */
1276 entry
->buf_count
= count
;
1277 drm_cleanup_buf_error(dev
, entry
);
1278 mutex_unlock(&dev
->struct_mutex
);
1279 atomic_dec(&dev
->buf_alloc
);
1282 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1284 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1286 offset
+= alignment
;
1288 byte_count
+= PAGE_SIZE
<< page_order
;
1291 DRM_DEBUG("byte_count: %d\n", byte_count
);
1293 temp_buflist
= drm_realloc(dma
->buflist
,
1294 dma
->buf_count
* sizeof(*dma
->buflist
),
1295 (dma
->buf_count
+ entry
->buf_count
)
1296 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1297 if (!temp_buflist
) {
1298 /* Free the entry because it isn't valid */
1299 drm_cleanup_buf_error(dev
, entry
);
1300 mutex_unlock(&dev
->struct_mutex
);
1301 atomic_dec(&dev
->buf_alloc
);
1304 dma
->buflist
= temp_buflist
;
1306 for (i
= 0; i
< entry
->buf_count
; i
++) {
1307 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1310 dma
->buf_count
+= entry
->buf_count
;
1311 dma
->seg_count
+= entry
->seg_count
;
1312 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1313 dma
->byte_count
+= byte_count
;
1315 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1316 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1318 mutex_unlock(&dev
->struct_mutex
);
1320 request
->count
= entry
->buf_count
;
1321 request
->size
= size
;
1323 dma
->flags
= _DRM_DMA_USE_FB
;
1325 atomic_dec(&dev
->buf_alloc
);
1331 * Add buffers for DMA transfers (ioctl).
1333 * \param inode device inode.
1334 * \param file_priv DRM file private.
1335 * \param cmd command.
1336 * \param arg pointer to a struct drm_buf_desc request.
1337 * \return zero on success or a negative number on failure.
1339 * According with the memory type specified in drm_buf_desc::flags and the
1340 * build options, it dispatches the call either to addbufs_agp(),
1341 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1342 * PCI memory respectively.
1344 int drm_addbufs(struct drm_device
*dev
, void *data
,
1345 struct drm_file
*file_priv
)
1347 struct drm_buf_desc
*request
= data
;
1350 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1354 if (request
->flags
& _DRM_AGP_BUFFER
)
1355 ret
= drm_addbufs_agp(dev
, request
);
1358 if (request
->flags
& _DRM_SG_BUFFER
)
1359 ret
= drm_addbufs_sg(dev
, request
);
1360 else if (request
->flags
& _DRM_FB_BUFFER
)
1361 ret
= drm_addbufs_fb(dev
, request
);
1363 ret
= drm_addbufs_pci(dev
, request
);
1369 * Get information about the buffer mappings.
1371 * This was originally mean for debugging purposes, or by a sophisticated
1372 * client library to determine how best to use the available buffers (e.g.,
1373 * large buffers can be used for image transfer).
1375 * \param inode device inode.
1376 * \param file_priv DRM file private.
1377 * \param cmd command.
1378 * \param arg pointer to a drm_buf_info structure.
1379 * \return zero on success or a negative number on failure.
1381 * Increments drm_device::buf_use while holding the drm_device::count_lock
1382 * lock, preventing of allocating more buffers after this call. Information
1383 * about each requested buffer is then copied into user space.
1385 int drm_infobufs(struct drm_device
*dev
, void *data
,
1386 struct drm_file
*file_priv
)
1388 struct drm_device_dma
*dma
= dev
->dma
;
1389 struct drm_buf_info
*request
= data
;
1393 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1399 spin_lock(&dev
->count_lock
);
1400 if (atomic_read(&dev
->buf_alloc
)) {
1401 spin_unlock(&dev
->count_lock
);
1404 ++dev
->buf_use
; /* Can't allocate more after this call */
1405 spin_unlock(&dev
->count_lock
);
1407 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1408 if (dma
->bufs
[i
].buf_count
)
1412 DRM_DEBUG("count = %d\n", count
);
1414 if (request
->count
>= count
) {
1415 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1416 if (dma
->bufs
[i
].buf_count
) {
1417 struct drm_buf_desc __user
*to
=
1418 &request
->list
[count
];
1419 struct drm_buf_entry
*from
= &dma
->bufs
[i
];
1420 struct drm_freelist
*list
= &dma
->bufs
[i
].freelist
;
1421 if (copy_to_user(&to
->count
,
1423 sizeof(from
->buf_count
)) ||
1424 copy_to_user(&to
->size
,
1426 sizeof(from
->buf_size
)) ||
1427 copy_to_user(&to
->low_mark
,
1429 sizeof(list
->low_mark
)) ||
1430 copy_to_user(&to
->high_mark
,
1432 sizeof(list
->high_mark
)))
1435 DRM_DEBUG("%d %d %d %d %d\n",
1437 dma
->bufs
[i
].buf_count
,
1438 dma
->bufs
[i
].buf_size
,
1439 dma
->bufs
[i
].freelist
.low_mark
,
1440 dma
->bufs
[i
].freelist
.high_mark
);
1445 request
->count
= count
;
1451 * Specifies a low and high water mark for buffer allocation
1453 * \param inode device inode.
1454 * \param file_priv DRM file private.
1455 * \param cmd command.
1456 * \param arg a pointer to a drm_buf_desc structure.
1457 * \return zero on success or a negative number on failure.
1459 * Verifies that the size order is bounded between the admissible orders and
1460 * updates the respective drm_device_dma::bufs entry low and high water mark.
1462 * \note This ioctl is deprecated and mostly never used.
1464 int drm_markbufs(struct drm_device
*dev
, void *data
,
1465 struct drm_file
*file_priv
)
1467 struct drm_device_dma
*dma
= dev
->dma
;
1468 struct drm_buf_desc
*request
= data
;
1470 struct drm_buf_entry
*entry
;
1472 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1478 DRM_DEBUG("%d, %d, %d\n",
1479 request
->size
, request
->low_mark
, request
->high_mark
);
1480 order
= drm_order(request
->size
);
1481 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1483 entry
= &dma
->bufs
[order
];
1485 if (request
->low_mark
< 0 || request
->low_mark
> entry
->buf_count
)
1487 if (request
->high_mark
< 0 || request
->high_mark
> entry
->buf_count
)
1490 entry
->freelist
.low_mark
= request
->low_mark
;
1491 entry
->freelist
.high_mark
= request
->high_mark
;
1497 * Unreserve the buffers in list, previously reserved using drmDMA.
1499 * \param inode device inode.
1500 * \param file_priv DRM file private.
1501 * \param cmd command.
1502 * \param arg pointer to a drm_buf_free structure.
1503 * \return zero on success or a negative number on failure.
1505 * Calls free_buffer() for each used buffer.
1506 * This function is primarily used for debugging.
1508 int drm_freebufs(struct drm_device
*dev
, void *data
,
1509 struct drm_file
*file_priv
)
1511 struct drm_device_dma
*dma
= dev
->dma
;
1512 struct drm_buf_free
*request
= data
;
1515 struct drm_buf
*buf
;
1517 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1523 DRM_DEBUG("%d\n", request
->count
);
1524 for (i
= 0; i
< request
->count
; i
++) {
1525 if (copy_from_user(&idx
, &request
->list
[i
], sizeof(idx
)))
1527 if (idx
< 0 || idx
>= dma
->buf_count
) {
1528 DRM_ERROR("Index %d (of %d max)\n",
1529 idx
, dma
->buf_count
- 1);
1532 buf
= dma
->buflist
[idx
];
1533 if (buf
->file_priv
!= file_priv
) {
1534 DRM_ERROR("Process %d freeing buffer not owned\n",
1535 task_pid_nr(current
));
1538 drm_free_buffer(dev
, buf
);
1545 * Maps all of the DMA buffers into client-virtual space (ioctl).
1547 * \param inode device inode.
1548 * \param file_priv DRM file private.
1549 * \param cmd command.
1550 * \param arg pointer to a drm_buf_map structure.
1551 * \return zero on success or a negative number on failure.
1553 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1554 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1555 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1558 int drm_mapbufs(struct drm_device
*dev
, void *data
,
1559 struct drm_file
*file_priv
)
1561 struct drm_device_dma
*dma
= dev
->dma
;
1564 unsigned long virtual;
1565 unsigned long address
;
1566 struct drm_buf_map
*request
= data
;
1569 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1575 spin_lock(&dev
->count_lock
);
1576 if (atomic_read(&dev
->buf_alloc
)) {
1577 spin_unlock(&dev
->count_lock
);
1580 dev
->buf_use
++; /* Can't allocate more after this call */
1581 spin_unlock(&dev
->count_lock
);
1583 if (request
->count
>= dma
->buf_count
) {
1584 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1585 || (drm_core_check_feature(dev
, DRIVER_SG
)
1586 && (dma
->flags
& _DRM_DMA_USE_SG
))
1587 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1588 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1589 struct drm_local_map
*map
= dev
->agp_buffer_map
;
1590 unsigned long token
= dev
->agp_buffer_token
;
1596 down_write(¤t
->mm
->mmap_sem
);
1597 virtual = do_mmap(file_priv
->filp
, 0, map
->size
,
1598 PROT_READ
| PROT_WRITE
,
1601 up_write(¤t
->mm
->mmap_sem
);
1603 down_write(¤t
->mm
->mmap_sem
);
1604 virtual = do_mmap(file_priv
->filp
, 0, dma
->byte_count
,
1605 PROT_READ
| PROT_WRITE
,
1607 up_write(¤t
->mm
->mmap_sem
);
1609 if (virtual > -1024UL) {
1611 retcode
= (signed long)virtual;
1614 request
->virtual = (void __user
*)virtual;
1616 for (i
= 0; i
< dma
->buf_count
; i
++) {
1617 if (copy_to_user(&request
->list
[i
].idx
,
1618 &dma
->buflist
[i
]->idx
,
1619 sizeof(request
->list
[0].idx
))) {
1623 if (copy_to_user(&request
->list
[i
].total
,
1624 &dma
->buflist
[i
]->total
,
1625 sizeof(request
->list
[0].total
))) {
1629 if (copy_to_user(&request
->list
[i
].used
,
1630 &zero
, sizeof(zero
))) {
1634 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1635 if (copy_to_user(&request
->list
[i
].address
,
1636 &address
, sizeof(address
))) {
1643 request
->count
= dma
->buf_count
;
1644 DRM_DEBUG("%d buffers, retcode = %d\n", request
->count
, retcode
);
1650 * Compute size order. Returns the exponent of the smaller power of two which
1651 * is greater or equal to given number.
1656 * \todo Can be made faster.
1658 int drm_order(unsigned long size
)
1663 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++) ;
1665 if (size
& (size
- 1))
1670 EXPORT_SYMBOL(drm_order
);
This page took 0.230626 seconds and 4 git commands to generate.