drm: lindent the drm directory.
[deliverable/linux.git] / drivers / char / drm / drm_bufs.c
1 /**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9 /*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource)
40 {
41 return pci_resource_start(dev->pdev, resource);
42 }
43
44 EXPORT_SYMBOL(drm_get_resource_start);
45
46 unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource)
47 {
48 return pci_resource_len(dev->pdev, resource);
49 }
50
51 EXPORT_SYMBOL(drm_get_resource_len);
52
53 static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
54 drm_local_map_t * map)
55 {
56 struct list_head *list;
57
58 list_for_each(list, &dev->maplist->head) {
59 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
60 if (entry->map && map->type == entry->map->type &&
61 entry->map->offset == map->offset) {
62 return entry;
63 }
64 }
65
66 return NULL;
67 }
68
69 /*
70 * Used to allocate 32-bit handles for mappings.
71 */
72 #define START_RANGE 0x10000000
73 #define END_RANGE 0x40000000
74
75 #ifdef _LP64
76 static __inline__ unsigned int HandleID(unsigned long lhandle,
77 drm_device_t * dev)
78 {
79 static unsigned int map32_handle = START_RANGE;
80 unsigned int hash;
81
82 if (lhandle & 0xffffffff00000000) {
83 hash = map32_handle;
84 map32_handle += PAGE_SIZE;
85 if (map32_handle > END_RANGE)
86 map32_handle = START_RANGE;
87 } else
88 hash = lhandle;
89
90 while (1) {
91 drm_map_list_t *_entry;
92 list_for_each_entry(_entry, &dev->maplist->head, head) {
93 if (_entry->user_token == hash)
94 break;
95 }
96 if (&_entry->head == &dev->maplist->head)
97 return hash;
98
99 hash += PAGE_SIZE;
100 map32_handle += PAGE_SIZE;
101 }
102 }
103 #else
104 # define HandleID(x,dev) (unsigned int)(x)
105 #endif
106
107 /**
108 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
109 *
110 * \param inode device inode.
111 * \param filp file pointer.
112 * \param cmd command.
113 * \param arg pointer to a drm_map structure.
114 * \return zero on success or a negative value on error.
115 *
116 * Adjusts the memory offset to its absolute value according to the mapping
117 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
118 * applicable and if supported by the kernel.
119 */
120 int drm_addmap_core(drm_device_t * dev, unsigned int offset,
121 unsigned int size, drm_map_type_t type,
122 drm_map_flags_t flags, drm_map_list_t ** maplist)
123 {
124 drm_map_t *map;
125 drm_map_list_t *list;
126 drm_dma_handle_t *dmah;
127
128 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
129 if (!map)
130 return -ENOMEM;
131
132 map->offset = offset;
133 map->size = size;
134 map->flags = flags;
135 map->type = type;
136
137 /* Only allow shared memory to be removable since we only keep enough
138 * book keeping information about shared memory to allow for removal
139 * when processes fork.
140 */
141 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
142 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
143 return -EINVAL;
144 }
145 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
146 map->offset, map->size, map->type);
147 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
148 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
149 return -EINVAL;
150 }
151 map->mtrr = -1;
152 map->handle = NULL;
153
154 switch (map->type) {
155 case _DRM_REGISTERS:
156 case _DRM_FRAME_BUFFER:
157 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
158 if (map->offset + map->size < map->offset ||
159 map->offset < virt_to_phys(high_memory)) {
160 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
161 return -EINVAL;
162 }
163 #endif
164 #ifdef __alpha__
165 map->offset += dev->hose->mem_space->start;
166 #endif
167 /* Some drivers preinitialize some maps, without the X Server
168 * needing to be aware of it. Therefore, we just return success
169 * when the server tries to create a duplicate map.
170 */
171 list = drm_find_matching_map(dev, map);
172 if (list != NULL) {
173 if (list->map->size != map->size) {
174 DRM_DEBUG("Matching maps of type %d with "
175 "mismatched sizes, (%ld vs %ld)\n",
176 map->type, map->size,
177 list->map->size);
178 list->map->size = map->size;
179 }
180
181 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
182 *maplist = list;
183 return 0;
184 }
185
186 if (drm_core_has_MTRR(dev)) {
187 if (map->type == _DRM_FRAME_BUFFER ||
188 (map->flags & _DRM_WRITE_COMBINING)) {
189 map->mtrr = mtrr_add(map->offset, map->size,
190 MTRR_TYPE_WRCOMB, 1);
191 }
192 }
193 if (map->type == _DRM_REGISTERS)
194 map->handle = drm_ioremap(map->offset, map->size, dev);
195 break;
196
197 case _DRM_SHM:
198 map->handle = vmalloc_32(map->size);
199 DRM_DEBUG("%lu %d %p\n",
200 map->size, drm_order(map->size), map->handle);
201 if (!map->handle) {
202 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
203 return -ENOMEM;
204 }
205 map->offset = (unsigned long)map->handle;
206 if (map->flags & _DRM_CONTAINS_LOCK) {
207 /* Prevent a 2nd X Server from creating a 2nd lock */
208 if (dev->lock.hw_lock != NULL) {
209 vfree(map->handle);
210 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
211 return -EBUSY;
212 }
213 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
214 }
215 break;
216 case _DRM_AGP:
217 if (drm_core_has_AGP(dev)) {
218 #ifdef __alpha__
219 map->offset += dev->hose->mem_space->start;
220 #endif
221 map->offset += dev->agp->base;
222 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
223 }
224 break;
225 case _DRM_SCATTER_GATHER:
226 if (!dev->sg) {
227 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
228 return -EINVAL;
229 }
230 map->offset += (unsigned long)dev->sg->virtual;
231 break;
232 case _DRM_CONSISTENT:
233 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
234 * As we're limiting the address to 2^32-1 (or less),
235 * casting it down to 32 bits is no problem, but we
236 * need to point to a 64bit variable first. */
237 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
238 if (!dmah) {
239 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
240 return -ENOMEM;
241 }
242 map->handle = dmah->vaddr;
243 map->offset = (unsigned long)dmah->busaddr;
244 kfree(dmah);
245 break;
246 default:
247 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
248 return -EINVAL;
249 }
250
251 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
252 if (!list) {
253 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
254 return -EINVAL;
255 }
256 memset(list, 0, sizeof(*list));
257 list->map = map;
258
259 down(&dev->struct_sem);
260 list_add(&list->head, &dev->maplist->head);
261 /* Assign a 32-bit handle */
262 /* We do it here so that dev->struct_sem protects the increment */
263 list->user_token = HandleID(map->type == _DRM_SHM
264 ? (unsigned long)map->handle
265 : map->offset, dev);
266 up(&dev->struct_sem);
267
268 *maplist = list;
269 return 0;
270 }
271
272 int drm_addmap(drm_device_t * dev, unsigned int offset,
273 unsigned int size, drm_map_type_t type,
274 drm_map_flags_t flags, drm_local_map_t ** map_ptr)
275 {
276 drm_map_list_t *list;
277 int rc;
278
279 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
280 if (!rc)
281 *map_ptr = list->map;
282 return rc;
283 }
284
285 EXPORT_SYMBOL(drm_addmap);
286
287 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
288 unsigned int cmd, unsigned long arg)
289 {
290 drm_file_t *priv = filp->private_data;
291 drm_device_t *dev = priv->head->dev;
292 drm_map_t map;
293 drm_map_list_t *maplist;
294 drm_map_t __user *argp = (void __user *)arg;
295 int err;
296
297 if (!(filp->f_mode & 3))
298 return -EACCES; /* Require read/write */
299
300 if (copy_from_user(&map, argp, sizeof(map))) {
301 return -EFAULT;
302 }
303
304 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
305 &maplist);
306
307 if (err)
308 return err;
309
310 if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
311 return -EFAULT;
312 if (put_user(maplist->user_token, &argp->handle))
313 return -EFAULT;
314 return 0;
315 }
316
317 /**
318 * Remove a map private from list and deallocate resources if the mapping
319 * isn't in use.
320 *
321 * \param inode device inode.
322 * \param filp file pointer.
323 * \param cmd command.
324 * \param arg pointer to a drm_map_t structure.
325 * \return zero on success or a negative value on error.
326 *
327 * Searches the map on drm_device::maplist, removes it from the list, see if
328 * its being used, and free any associate resource (such as MTRR's) if it's not
329 * being on use.
330 *
331 * \sa drm_addmap
332 */
333 int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
334 {
335 struct list_head *list;
336 drm_map_list_t *r_list = NULL;
337 drm_dma_handle_t dmah;
338
339 /* Find the list entry for the map and remove it */
340 list_for_each(list, &dev->maplist->head) {
341 r_list = list_entry(list, drm_map_list_t, head);
342
343 if (r_list->map == map) {
344 list_del(list);
345 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
346 break;
347 }
348 }
349
350 /* List has wrapped around to the head pointer, or it's empty and we
351 * didn't find anything.
352 */
353 if (list == (&dev->maplist->head)) {
354 return -EINVAL;
355 }
356
357 switch (map->type) {
358 case _DRM_REGISTERS:
359 drm_ioremapfree(map->handle, map->size, dev);
360 /* FALLTHROUGH */
361 case _DRM_FRAME_BUFFER:
362 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
363 int retcode;
364 retcode = mtrr_del(map->mtrr, map->offset, map->size);
365 DRM_DEBUG("mtrr_del=%d\n", retcode);
366 }
367 break;
368 case _DRM_SHM:
369 vfree(map->handle);
370 break;
371 case _DRM_AGP:
372 case _DRM_SCATTER_GATHER:
373 break;
374 case _DRM_CONSISTENT:
375 dmah.vaddr = map->handle;
376 dmah.busaddr = map->offset;
377 dmah.size = map->size;
378 __drm_pci_free(dev, &dmah);
379 break;
380 }
381 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
382
383 return 0;
384 }
385
386 EXPORT_SYMBOL(drm_rmmap_locked);
387
388 int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
389 {
390 int ret;
391
392 down(&dev->struct_sem);
393 ret = drm_rmmap_locked(dev, map);
394 up(&dev->struct_sem);
395
396 return ret;
397 }
398
399 EXPORT_SYMBOL(drm_rmmap);
400
401 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
402 * the last close of the device, and this is necessary for cleanup when things
403 * exit uncleanly. Therefore, having userland manually remove mappings seems
404 * like a pointless exercise since they're going away anyway.
405 *
406 * One use case might be after addmap is allowed for normal users for SHM and
407 * gets used by drivers that the server doesn't need to care about. This seems
408 * unlikely.
409 */
410 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
411 unsigned int cmd, unsigned long arg)
412 {
413 drm_file_t *priv = filp->private_data;
414 drm_device_t *dev = priv->head->dev;
415 drm_map_t request;
416 drm_local_map_t *map = NULL;
417 struct list_head *list;
418 int ret;
419
420 if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
421 return -EFAULT;
422 }
423
424 down(&dev->struct_sem);
425 list_for_each(list, &dev->maplist->head) {
426 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
427
428 if (r_list->map &&
429 r_list->user_token == (unsigned long)request.handle &&
430 r_list->map->flags & _DRM_REMOVABLE) {
431 map = r_list->map;
432 break;
433 }
434 }
435
436 /* List has wrapped around to the head pointer, or its empty we didn't
437 * find anything.
438 */
439 if (list == (&dev->maplist->head)) {
440 up(&dev->struct_sem);
441 return -EINVAL;
442 }
443
444 if (!map)
445 return -EINVAL;
446
447 /* Register and framebuffer maps are permanent */
448 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
449 up(&dev->struct_sem);
450 return 0;
451 }
452
453 ret = drm_rmmap_locked(dev, map);
454
455 up(&dev->struct_sem);
456
457 return ret;
458 }
459
460 /**
461 * Cleanup after an error on one of the addbufs() functions.
462 *
463 * \param dev DRM device.
464 * \param entry buffer entry where the error occurred.
465 *
466 * Frees any pages and buffers associated with the given entry.
467 */
468 static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
469 {
470 int i;
471
472 if (entry->seg_count) {
473 for (i = 0; i < entry->seg_count; i++) {
474 if (entry->seglist[i]) {
475 drm_free_pages(entry->seglist[i],
476 entry->page_order, DRM_MEM_DMA);
477 }
478 }
479 drm_free(entry->seglist,
480 entry->seg_count *
481 sizeof(*entry->seglist), DRM_MEM_SEGS);
482
483 entry->seg_count = 0;
484 }
485
486 if (entry->buf_count) {
487 for (i = 0; i < entry->buf_count; i++) {
488 if (entry->buflist[i].dev_private) {
489 drm_free(entry->buflist[i].dev_private,
490 entry->buflist[i].dev_priv_size,
491 DRM_MEM_BUFS);
492 }
493 }
494 drm_free(entry->buflist,
495 entry->buf_count *
496 sizeof(*entry->buflist), DRM_MEM_BUFS);
497
498 entry->buf_count = 0;
499 }
500 }
501
502 #if __OS_HAS_AGP
503 /**
504 * Add AGP buffers for DMA transfers.
505 *
506 * \param dev drm_device_t to which the buffers are to be added.
507 * \param request pointer to a drm_buf_desc_t describing the request.
508 * \return zero on success or a negative number on failure.
509 *
510 * After some sanity checks creates a drm_buf structure for each buffer and
511 * reallocates the buffer list of the same size order to accommodate the new
512 * buffers.
513 */
514 int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
515 {
516 drm_device_dma_t *dma = dev->dma;
517 drm_buf_entry_t *entry;
518 drm_buf_t *buf;
519 unsigned long offset;
520 unsigned long agp_offset;
521 int count;
522 int order;
523 int size;
524 int alignment;
525 int page_order;
526 int total;
527 int byte_count;
528 int i;
529 drm_buf_t **temp_buflist;
530
531 if (!dma)
532 return -EINVAL;
533
534 count = request->count;
535 order = drm_order(request->size);
536 size = 1 << order;
537
538 alignment = (request->flags & _DRM_PAGE_ALIGN)
539 ? PAGE_ALIGN(size) : size;
540 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
541 total = PAGE_SIZE << page_order;
542
543 byte_count = 0;
544 agp_offset = dev->agp->base + request->agp_start;
545
546 DRM_DEBUG("count: %d\n", count);
547 DRM_DEBUG("order: %d\n", order);
548 DRM_DEBUG("size: %d\n", size);
549 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
550 DRM_DEBUG("alignment: %d\n", alignment);
551 DRM_DEBUG("page_order: %d\n", page_order);
552 DRM_DEBUG("total: %d\n", total);
553
554 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
555 return -EINVAL;
556 if (dev->queue_count)
557 return -EBUSY; /* Not while in use */
558
559 spin_lock(&dev->count_lock);
560 if (dev->buf_use) {
561 spin_unlock(&dev->count_lock);
562 return -EBUSY;
563 }
564 atomic_inc(&dev->buf_alloc);
565 spin_unlock(&dev->count_lock);
566
567 down(&dev->struct_sem);
568 entry = &dma->bufs[order];
569 if (entry->buf_count) {
570 up(&dev->struct_sem);
571 atomic_dec(&dev->buf_alloc);
572 return -ENOMEM; /* May only call once for each order */
573 }
574
575 if (count < 0 || count > 4096) {
576 up(&dev->struct_sem);
577 atomic_dec(&dev->buf_alloc);
578 return -EINVAL;
579 }
580
581 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
582 DRM_MEM_BUFS);
583 if (!entry->buflist) {
584 up(&dev->struct_sem);
585 atomic_dec(&dev->buf_alloc);
586 return -ENOMEM;
587 }
588 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
589
590 entry->buf_size = size;
591 entry->page_order = page_order;
592
593 offset = 0;
594
595 while (entry->buf_count < count) {
596 buf = &entry->buflist[entry->buf_count];
597 buf->idx = dma->buf_count + entry->buf_count;
598 buf->total = alignment;
599 buf->order = order;
600 buf->used = 0;
601
602 buf->offset = (dma->byte_count + offset);
603 buf->bus_address = agp_offset + offset;
604 buf->address = (void *)(agp_offset + offset);
605 buf->next = NULL;
606 buf->waiting = 0;
607 buf->pending = 0;
608 init_waitqueue_head(&buf->dma_wait);
609 buf->filp = NULL;
610
611 buf->dev_priv_size = dev->driver->dev_priv_size;
612 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
613 if (!buf->dev_private) {
614 /* Set count correctly so we free the proper amount. */
615 entry->buf_count = count;
616 drm_cleanup_buf_error(dev, entry);
617 up(&dev->struct_sem);
618 atomic_dec(&dev->buf_alloc);
619 return -ENOMEM;
620 }
621 memset(buf->dev_private, 0, buf->dev_priv_size);
622
623 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
624
625 offset += alignment;
626 entry->buf_count++;
627 byte_count += PAGE_SIZE << page_order;
628 }
629
630 DRM_DEBUG("byte_count: %d\n", byte_count);
631
632 temp_buflist = drm_realloc(dma->buflist,
633 dma->buf_count * sizeof(*dma->buflist),
634 (dma->buf_count + entry->buf_count)
635 * sizeof(*dma->buflist), DRM_MEM_BUFS);
636 if (!temp_buflist) {
637 /* Free the entry because it isn't valid */
638 drm_cleanup_buf_error(dev, entry);
639 up(&dev->struct_sem);
640 atomic_dec(&dev->buf_alloc);
641 return -ENOMEM;
642 }
643 dma->buflist = temp_buflist;
644
645 for (i = 0; i < entry->buf_count; i++) {
646 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
647 }
648
649 dma->buf_count += entry->buf_count;
650 dma->byte_count += byte_count;
651
652 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
653 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
654
655 up(&dev->struct_sem);
656
657 request->count = entry->buf_count;
658 request->size = size;
659
660 dma->flags = _DRM_DMA_USE_AGP;
661
662 atomic_dec(&dev->buf_alloc);
663 return 0;
664 }
665
666 EXPORT_SYMBOL(drm_addbufs_agp);
667 #endif /* __OS_HAS_AGP */
668
669 int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
670 {
671 drm_device_dma_t *dma = dev->dma;
672 int count;
673 int order;
674 int size;
675 int total;
676 int page_order;
677 drm_buf_entry_t *entry;
678 unsigned long page;
679 drm_buf_t *buf;
680 int alignment;
681 unsigned long offset;
682 int i;
683 int byte_count;
684 int page_count;
685 unsigned long *temp_pagelist;
686 drm_buf_t **temp_buflist;
687
688 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
689 return -EINVAL;
690 if (!dma)
691 return -EINVAL;
692
693 count = request->count;
694 order = drm_order(request->size);
695 size = 1 << order;
696
697 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
698 request->count, request->size, size, order, dev->queue_count);
699
700 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
701 return -EINVAL;
702 if (dev->queue_count)
703 return -EBUSY; /* Not while in use */
704
705 alignment = (request->flags & _DRM_PAGE_ALIGN)
706 ? PAGE_ALIGN(size) : size;
707 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
708 total = PAGE_SIZE << page_order;
709
710 spin_lock(&dev->count_lock);
711 if (dev->buf_use) {
712 spin_unlock(&dev->count_lock);
713 return -EBUSY;
714 }
715 atomic_inc(&dev->buf_alloc);
716 spin_unlock(&dev->count_lock);
717
718 down(&dev->struct_sem);
719 entry = &dma->bufs[order];
720 if (entry->buf_count) {
721 up(&dev->struct_sem);
722 atomic_dec(&dev->buf_alloc);
723 return -ENOMEM; /* May only call once for each order */
724 }
725
726 if (count < 0 || count > 4096) {
727 up(&dev->struct_sem);
728 atomic_dec(&dev->buf_alloc);
729 return -EINVAL;
730 }
731
732 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
733 DRM_MEM_BUFS);
734 if (!entry->buflist) {
735 up(&dev->struct_sem);
736 atomic_dec(&dev->buf_alloc);
737 return -ENOMEM;
738 }
739 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
740
741 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
742 DRM_MEM_SEGS);
743 if (!entry->seglist) {
744 drm_free(entry->buflist,
745 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
746 up(&dev->struct_sem);
747 atomic_dec(&dev->buf_alloc);
748 return -ENOMEM;
749 }
750 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
751
752 /* Keep the original pagelist until we know all the allocations
753 * have succeeded
754 */
755 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
756 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
757 if (!temp_pagelist) {
758 drm_free(entry->buflist,
759 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
760 drm_free(entry->seglist,
761 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
762 up(&dev->struct_sem);
763 atomic_dec(&dev->buf_alloc);
764 return -ENOMEM;
765 }
766 memcpy(temp_pagelist,
767 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
768 DRM_DEBUG("pagelist: %d entries\n",
769 dma->page_count + (count << page_order));
770
771 entry->buf_size = size;
772 entry->page_order = page_order;
773 byte_count = 0;
774 page_count = 0;
775
776 while (entry->buf_count < count) {
777 page = drm_alloc_pages(page_order, DRM_MEM_DMA);
778 if (!page) {
779 /* Set count correctly so we free the proper amount. */
780 entry->buf_count = count;
781 entry->seg_count = count;
782 drm_cleanup_buf_error(dev, entry);
783 drm_free(temp_pagelist,
784 (dma->page_count + (count << page_order))
785 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
786 up(&dev->struct_sem);
787 atomic_dec(&dev->buf_alloc);
788 return -ENOMEM;
789 }
790 entry->seglist[entry->seg_count++] = page;
791 for (i = 0; i < (1 << page_order); i++) {
792 DRM_DEBUG("page %d @ 0x%08lx\n",
793 dma->page_count + page_count,
794 page + PAGE_SIZE * i);
795 temp_pagelist[dma->page_count + page_count++]
796 = page + PAGE_SIZE * i;
797 }
798 for (offset = 0;
799 offset + size <= total && entry->buf_count < count;
800 offset += alignment, ++entry->buf_count) {
801 buf = &entry->buflist[entry->buf_count];
802 buf->idx = dma->buf_count + entry->buf_count;
803 buf->total = alignment;
804 buf->order = order;
805 buf->used = 0;
806 buf->offset = (dma->byte_count + byte_count + offset);
807 buf->address = (void *)(page + offset);
808 buf->next = NULL;
809 buf->waiting = 0;
810 buf->pending = 0;
811 init_waitqueue_head(&buf->dma_wait);
812 buf->filp = NULL;
813
814 buf->dev_priv_size = dev->driver->dev_priv_size;
815 buf->dev_private = drm_alloc(buf->dev_priv_size,
816 DRM_MEM_BUFS);
817 if (!buf->dev_private) {
818 /* Set count correctly so we free the proper amount. */
819 entry->buf_count = count;
820 entry->seg_count = count;
821 drm_cleanup_buf_error(dev, entry);
822 drm_free(temp_pagelist,
823 (dma->page_count +
824 (count << page_order))
825 * sizeof(*dma->pagelist),
826 DRM_MEM_PAGES);
827 up(&dev->struct_sem);
828 atomic_dec(&dev->buf_alloc);
829 return -ENOMEM;
830 }
831 memset(buf->dev_private, 0, buf->dev_priv_size);
832
833 DRM_DEBUG("buffer %d @ %p\n",
834 entry->buf_count, buf->address);
835 }
836 byte_count += PAGE_SIZE << page_order;
837 }
838
839 temp_buflist = drm_realloc(dma->buflist,
840 dma->buf_count * sizeof(*dma->buflist),
841 (dma->buf_count + entry->buf_count)
842 * sizeof(*dma->buflist), DRM_MEM_BUFS);
843 if (!temp_buflist) {
844 /* Free the entry because it isn't valid */
845 drm_cleanup_buf_error(dev, entry);
846 drm_free(temp_pagelist,
847 (dma->page_count + (count << page_order))
848 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
849 up(&dev->struct_sem);
850 atomic_dec(&dev->buf_alloc);
851 return -ENOMEM;
852 }
853 dma->buflist = temp_buflist;
854
855 for (i = 0; i < entry->buf_count; i++) {
856 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
857 }
858
859 /* No allocations failed, so now we can replace the orginal pagelist
860 * with the new one.
861 */
862 if (dma->page_count) {
863 drm_free(dma->pagelist,
864 dma->page_count * sizeof(*dma->pagelist),
865 DRM_MEM_PAGES);
866 }
867 dma->pagelist = temp_pagelist;
868
869 dma->buf_count += entry->buf_count;
870 dma->seg_count += entry->seg_count;
871 dma->page_count += entry->seg_count << page_order;
872 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
873
874 up(&dev->struct_sem);
875
876 request->count = entry->buf_count;
877 request->size = size;
878
879 atomic_dec(&dev->buf_alloc);
880 return 0;
881
882 }
883
884 EXPORT_SYMBOL(drm_addbufs_pci);
885
886 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
887 {
888 drm_device_dma_t *dma = dev->dma;
889 drm_buf_entry_t *entry;
890 drm_buf_t *buf;
891 unsigned long offset;
892 unsigned long agp_offset;
893 int count;
894 int order;
895 int size;
896 int alignment;
897 int page_order;
898 int total;
899 int byte_count;
900 int i;
901 drm_buf_t **temp_buflist;
902
903 if (!drm_core_check_feature(dev, DRIVER_SG))
904 return -EINVAL;
905
906 if (!dma)
907 return -EINVAL;
908
909 count = request->count;
910 order = drm_order(request->size);
911 size = 1 << order;
912
913 alignment = (request->flags & _DRM_PAGE_ALIGN)
914 ? PAGE_ALIGN(size) : size;
915 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
916 total = PAGE_SIZE << page_order;
917
918 byte_count = 0;
919 agp_offset = request->agp_start;
920
921 DRM_DEBUG("count: %d\n", count);
922 DRM_DEBUG("order: %d\n", order);
923 DRM_DEBUG("size: %d\n", size);
924 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
925 DRM_DEBUG("alignment: %d\n", alignment);
926 DRM_DEBUG("page_order: %d\n", page_order);
927 DRM_DEBUG("total: %d\n", total);
928
929 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
930 return -EINVAL;
931 if (dev->queue_count)
932 return -EBUSY; /* Not while in use */
933
934 spin_lock(&dev->count_lock);
935 if (dev->buf_use) {
936 spin_unlock(&dev->count_lock);
937 return -EBUSY;
938 }
939 atomic_inc(&dev->buf_alloc);
940 spin_unlock(&dev->count_lock);
941
942 down(&dev->struct_sem);
943 entry = &dma->bufs[order];
944 if (entry->buf_count) {
945 up(&dev->struct_sem);
946 atomic_dec(&dev->buf_alloc);
947 return -ENOMEM; /* May only call once for each order */
948 }
949
950 if (count < 0 || count > 4096) {
951 up(&dev->struct_sem);
952 atomic_dec(&dev->buf_alloc);
953 return -EINVAL;
954 }
955
956 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
957 DRM_MEM_BUFS);
958 if (!entry->buflist) {
959 up(&dev->struct_sem);
960 atomic_dec(&dev->buf_alloc);
961 return -ENOMEM;
962 }
963 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
964
965 entry->buf_size = size;
966 entry->page_order = page_order;
967
968 offset = 0;
969
970 while (entry->buf_count < count) {
971 buf = &entry->buflist[entry->buf_count];
972 buf->idx = dma->buf_count + entry->buf_count;
973 buf->total = alignment;
974 buf->order = order;
975 buf->used = 0;
976
977 buf->offset = (dma->byte_count + offset);
978 buf->bus_address = agp_offset + offset;
979 buf->address = (void *)(agp_offset + offset
980 + (unsigned long)dev->sg->virtual);
981 buf->next = NULL;
982 buf->waiting = 0;
983 buf->pending = 0;
984 init_waitqueue_head(&buf->dma_wait);
985 buf->filp = NULL;
986
987 buf->dev_priv_size = dev->driver->dev_priv_size;
988 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
989 if (!buf->dev_private) {
990 /* Set count correctly so we free the proper amount. */
991 entry->buf_count = count;
992 drm_cleanup_buf_error(dev, entry);
993 up(&dev->struct_sem);
994 atomic_dec(&dev->buf_alloc);
995 return -ENOMEM;
996 }
997
998 memset(buf->dev_private, 0, buf->dev_priv_size);
999
1000 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1001
1002 offset += alignment;
1003 entry->buf_count++;
1004 byte_count += PAGE_SIZE << page_order;
1005 }
1006
1007 DRM_DEBUG("byte_count: %d\n", byte_count);
1008
1009 temp_buflist = drm_realloc(dma->buflist,
1010 dma->buf_count * sizeof(*dma->buflist),
1011 (dma->buf_count + entry->buf_count)
1012 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1013 if (!temp_buflist) {
1014 /* Free the entry because it isn't valid */
1015 drm_cleanup_buf_error(dev, entry);
1016 up(&dev->struct_sem);
1017 atomic_dec(&dev->buf_alloc);
1018 return -ENOMEM;
1019 }
1020 dma->buflist = temp_buflist;
1021
1022 for (i = 0; i < entry->buf_count; i++) {
1023 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1024 }
1025
1026 dma->buf_count += entry->buf_count;
1027 dma->byte_count += byte_count;
1028
1029 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1030 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1031
1032 up(&dev->struct_sem);
1033
1034 request->count = entry->buf_count;
1035 request->size = size;
1036
1037 dma->flags = _DRM_DMA_USE_SG;
1038
1039 atomic_dec(&dev->buf_alloc);
1040 return 0;
1041 }
1042
1043 static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1044 {
1045 drm_device_dma_t *dma = dev->dma;
1046 drm_buf_entry_t *entry;
1047 drm_buf_t *buf;
1048 unsigned long offset;
1049 unsigned long agp_offset;
1050 int count;
1051 int order;
1052 int size;
1053 int alignment;
1054 int page_order;
1055 int total;
1056 int byte_count;
1057 int i;
1058 drm_buf_t **temp_buflist;
1059
1060 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1061 return -EINVAL;
1062
1063 if (!dma)
1064 return -EINVAL;
1065
1066 count = request->count;
1067 order = drm_order(request->size);
1068 size = 1 << order;
1069
1070 alignment = (request->flags & _DRM_PAGE_ALIGN)
1071 ? PAGE_ALIGN(size) : size;
1072 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1073 total = PAGE_SIZE << page_order;
1074
1075 byte_count = 0;
1076 agp_offset = request->agp_start;
1077
1078 DRM_DEBUG("count: %d\n", count);
1079 DRM_DEBUG("order: %d\n", order);
1080 DRM_DEBUG("size: %d\n", size);
1081 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1082 DRM_DEBUG("alignment: %d\n", alignment);
1083 DRM_DEBUG("page_order: %d\n", page_order);
1084 DRM_DEBUG("total: %d\n", total);
1085
1086 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1087 return -EINVAL;
1088 if (dev->queue_count)
1089 return -EBUSY; /* Not while in use */
1090
1091 spin_lock(&dev->count_lock);
1092 if (dev->buf_use) {
1093 spin_unlock(&dev->count_lock);
1094 return -EBUSY;
1095 }
1096 atomic_inc(&dev->buf_alloc);
1097 spin_unlock(&dev->count_lock);
1098
1099 down(&dev->struct_sem);
1100 entry = &dma->bufs[order];
1101 if (entry->buf_count) {
1102 up(&dev->struct_sem);
1103 atomic_dec(&dev->buf_alloc);
1104 return -ENOMEM; /* May only call once for each order */
1105 }
1106
1107 if (count < 0 || count > 4096) {
1108 up(&dev->struct_sem);
1109 atomic_dec(&dev->buf_alloc);
1110 return -EINVAL;
1111 }
1112
1113 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1114 DRM_MEM_BUFS);
1115 if (!entry->buflist) {
1116 up(&dev->struct_sem);
1117 atomic_dec(&dev->buf_alloc);
1118 return -ENOMEM;
1119 }
1120 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1121
1122 entry->buf_size = size;
1123 entry->page_order = page_order;
1124
1125 offset = 0;
1126
1127 while (entry->buf_count < count) {
1128 buf = &entry->buflist[entry->buf_count];
1129 buf->idx = dma->buf_count + entry->buf_count;
1130 buf->total = alignment;
1131 buf->order = order;
1132 buf->used = 0;
1133
1134 buf->offset = (dma->byte_count + offset);
1135 buf->bus_address = agp_offset + offset;
1136 buf->address = (void *)(agp_offset + offset);
1137 buf->next = NULL;
1138 buf->waiting = 0;
1139 buf->pending = 0;
1140 init_waitqueue_head(&buf->dma_wait);
1141 buf->filp = NULL;
1142
1143 buf->dev_priv_size = dev->driver->dev_priv_size;
1144 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1145 if (!buf->dev_private) {
1146 /* Set count correctly so we free the proper amount. */
1147 entry->buf_count = count;
1148 drm_cleanup_buf_error(dev, entry);
1149 up(&dev->struct_sem);
1150 atomic_dec(&dev->buf_alloc);
1151 return -ENOMEM;
1152 }
1153 memset(buf->dev_private, 0, buf->dev_priv_size);
1154
1155 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1156
1157 offset += alignment;
1158 entry->buf_count++;
1159 byte_count += PAGE_SIZE << page_order;
1160 }
1161
1162 DRM_DEBUG("byte_count: %d\n", byte_count);
1163
1164 temp_buflist = drm_realloc(dma->buflist,
1165 dma->buf_count * sizeof(*dma->buflist),
1166 (dma->buf_count + entry->buf_count)
1167 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1168 if (!temp_buflist) {
1169 /* Free the entry because it isn't valid */
1170 drm_cleanup_buf_error(dev, entry);
1171 up(&dev->struct_sem);
1172 atomic_dec(&dev->buf_alloc);
1173 return -ENOMEM;
1174 }
1175 dma->buflist = temp_buflist;
1176
1177 for (i = 0; i < entry->buf_count; i++) {
1178 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1179 }
1180
1181 dma->buf_count += entry->buf_count;
1182 dma->byte_count += byte_count;
1183
1184 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1185 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1186
1187 up(&dev->struct_sem);
1188
1189 request->count = entry->buf_count;
1190 request->size = size;
1191
1192 dma->flags = _DRM_DMA_USE_FB;
1193
1194 atomic_dec(&dev->buf_alloc);
1195 return 0;
1196 }
1197
1198 /**
1199 * Add buffers for DMA transfers (ioctl).
1200 *
1201 * \param inode device inode.
1202 * \param filp file pointer.
1203 * \param cmd command.
1204 * \param arg pointer to a drm_buf_desc_t request.
1205 * \return zero on success or a negative number on failure.
1206 *
1207 * According with the memory type specified in drm_buf_desc::flags and the
1208 * build options, it dispatches the call either to addbufs_agp(),
1209 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1210 * PCI memory respectively.
1211 */
1212 int drm_addbufs(struct inode *inode, struct file *filp,
1213 unsigned int cmd, unsigned long arg)
1214 {
1215 drm_buf_desc_t request;
1216 drm_file_t *priv = filp->private_data;
1217 drm_device_t *dev = priv->head->dev;
1218 int ret;
1219
1220 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1221 return -EINVAL;
1222
1223 if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
1224 sizeof(request)))
1225 return -EFAULT;
1226
1227 #if __OS_HAS_AGP
1228 if (request.flags & _DRM_AGP_BUFFER)
1229 ret = drm_addbufs_agp(dev, &request);
1230 else
1231 #endif
1232 if (request.flags & _DRM_SG_BUFFER)
1233 ret = drm_addbufs_sg(dev, &request);
1234 else if (request.flags & _DRM_FB_BUFFER)
1235 ret = drm_addbufs_fb(dev, &request);
1236 else
1237 ret = drm_addbufs_pci(dev, &request);
1238
1239 if (ret == 0) {
1240 if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1241 ret = -EFAULT;
1242 }
1243 }
1244 return ret;
1245 }
1246
1247 /**
1248 * Get information about the buffer mappings.
1249 *
1250 * This was originally mean for debugging purposes, or by a sophisticated
1251 * client library to determine how best to use the available buffers (e.g.,
1252 * large buffers can be used for image transfer).
1253 *
1254 * \param inode device inode.
1255 * \param filp file pointer.
1256 * \param cmd command.
1257 * \param arg pointer to a drm_buf_info structure.
1258 * \return zero on success or a negative number on failure.
1259 *
1260 * Increments drm_device::buf_use while holding the drm_device::count_lock
1261 * lock, preventing of allocating more buffers after this call. Information
1262 * about each requested buffer is then copied into user space.
1263 */
1264 int drm_infobufs(struct inode *inode, struct file *filp,
1265 unsigned int cmd, unsigned long arg)
1266 {
1267 drm_file_t *priv = filp->private_data;
1268 drm_device_t *dev = priv->head->dev;
1269 drm_device_dma_t *dma = dev->dma;
1270 drm_buf_info_t request;
1271 drm_buf_info_t __user *argp = (void __user *)arg;
1272 int i;
1273 int count;
1274
1275 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1276 return -EINVAL;
1277
1278 if (!dma)
1279 return -EINVAL;
1280
1281 spin_lock(&dev->count_lock);
1282 if (atomic_read(&dev->buf_alloc)) {
1283 spin_unlock(&dev->count_lock);
1284 return -EBUSY;
1285 }
1286 ++dev->buf_use; /* Can't allocate more after this call */
1287 spin_unlock(&dev->count_lock);
1288
1289 if (copy_from_user(&request, argp, sizeof(request)))
1290 return -EFAULT;
1291
1292 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1293 if (dma->bufs[i].buf_count)
1294 ++count;
1295 }
1296
1297 DRM_DEBUG("count = %d\n", count);
1298
1299 if (request.count >= count) {
1300 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1301 if (dma->bufs[i].buf_count) {
1302 drm_buf_desc_t __user *to =
1303 &request.list[count];
1304 drm_buf_entry_t *from = &dma->bufs[i];
1305 drm_freelist_t *list = &dma->bufs[i].freelist;
1306 if (copy_to_user(&to->count,
1307 &from->buf_count,
1308 sizeof(from->buf_count)) ||
1309 copy_to_user(&to->size,
1310 &from->buf_size,
1311 sizeof(from->buf_size)) ||
1312 copy_to_user(&to->low_mark,
1313 &list->low_mark,
1314 sizeof(list->low_mark)) ||
1315 copy_to_user(&to->high_mark,
1316 &list->high_mark,
1317 sizeof(list->high_mark)))
1318 return -EFAULT;
1319
1320 DRM_DEBUG("%d %d %d %d %d\n",
1321 i,
1322 dma->bufs[i].buf_count,
1323 dma->bufs[i].buf_size,
1324 dma->bufs[i].freelist.low_mark,
1325 dma->bufs[i].freelist.high_mark);
1326 ++count;
1327 }
1328 }
1329 }
1330 request.count = count;
1331
1332 if (copy_to_user(argp, &request, sizeof(request)))
1333 return -EFAULT;
1334
1335 return 0;
1336 }
1337
1338 /**
1339 * Specifies a low and high water mark for buffer allocation
1340 *
1341 * \param inode device inode.
1342 * \param filp file pointer.
1343 * \param cmd command.
1344 * \param arg a pointer to a drm_buf_desc structure.
1345 * \return zero on success or a negative number on failure.
1346 *
1347 * Verifies that the size order is bounded between the admissible orders and
1348 * updates the respective drm_device_dma::bufs entry low and high water mark.
1349 *
1350 * \note This ioctl is deprecated and mostly never used.
1351 */
1352 int drm_markbufs(struct inode *inode, struct file *filp,
1353 unsigned int cmd, unsigned long arg)
1354 {
1355 drm_file_t *priv = filp->private_data;
1356 drm_device_t *dev = priv->head->dev;
1357 drm_device_dma_t *dma = dev->dma;
1358 drm_buf_desc_t request;
1359 int order;
1360 drm_buf_entry_t *entry;
1361
1362 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1363 return -EINVAL;
1364
1365 if (!dma)
1366 return -EINVAL;
1367
1368 if (copy_from_user(&request,
1369 (drm_buf_desc_t __user *) arg, sizeof(request)))
1370 return -EFAULT;
1371
1372 DRM_DEBUG("%d, %d, %d\n",
1373 request.size, request.low_mark, request.high_mark);
1374 order = drm_order(request.size);
1375 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1376 return -EINVAL;
1377 entry = &dma->bufs[order];
1378
1379 if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1380 return -EINVAL;
1381 if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1382 return -EINVAL;
1383
1384 entry->freelist.low_mark = request.low_mark;
1385 entry->freelist.high_mark = request.high_mark;
1386
1387 return 0;
1388 }
1389
1390 /**
1391 * Unreserve the buffers in list, previously reserved using drmDMA.
1392 *
1393 * \param inode device inode.
1394 * \param filp file pointer.
1395 * \param cmd command.
1396 * \param arg pointer to a drm_buf_free structure.
1397 * \return zero on success or a negative number on failure.
1398 *
1399 * Calls free_buffer() for each used buffer.
1400 * This function is primarily used for debugging.
1401 */
1402 int drm_freebufs(struct inode *inode, struct file *filp,
1403 unsigned int cmd, unsigned long arg)
1404 {
1405 drm_file_t *priv = filp->private_data;
1406 drm_device_t *dev = priv->head->dev;
1407 drm_device_dma_t *dma = dev->dma;
1408 drm_buf_free_t request;
1409 int i;
1410 int idx;
1411 drm_buf_t *buf;
1412
1413 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1414 return -EINVAL;
1415
1416 if (!dma)
1417 return -EINVAL;
1418
1419 if (copy_from_user(&request,
1420 (drm_buf_free_t __user *) arg, sizeof(request)))
1421 return -EFAULT;
1422
1423 DRM_DEBUG("%d\n", request.count);
1424 for (i = 0; i < request.count; i++) {
1425 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1426 return -EFAULT;
1427 if (idx < 0 || idx >= dma->buf_count) {
1428 DRM_ERROR("Index %d (of %d max)\n",
1429 idx, dma->buf_count - 1);
1430 return -EINVAL;
1431 }
1432 buf = dma->buflist[idx];
1433 if (buf->filp != filp) {
1434 DRM_ERROR("Process %d freeing buffer not owned\n",
1435 current->pid);
1436 return -EINVAL;
1437 }
1438 drm_free_buffer(dev, buf);
1439 }
1440
1441 return 0;
1442 }
1443
1444 /**
1445 * Maps all of the DMA buffers into client-virtual space (ioctl).
1446 *
1447 * \param inode device inode.
1448 * \param filp file pointer.
1449 * \param cmd command.
1450 * \param arg pointer to a drm_buf_map structure.
1451 * \return zero on success or a negative number on failure.
1452 *
1453 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1454 * about each buffer into user space. The PCI buffers are already mapped on the
1455 * addbufs_pci() call.
1456 */
1457 int drm_mapbufs(struct inode *inode, struct file *filp,
1458 unsigned int cmd, unsigned long arg)
1459 {
1460 drm_file_t *priv = filp->private_data;
1461 drm_device_t *dev = priv->head->dev;
1462 drm_device_dma_t *dma = dev->dma;
1463 drm_buf_map_t __user *argp = (void __user *)arg;
1464 int retcode = 0;
1465 const int zero = 0;
1466 unsigned long virtual;
1467 unsigned long address;
1468 drm_buf_map_t request;
1469 int i;
1470
1471 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1472 return -EINVAL;
1473
1474 if (!dma)
1475 return -EINVAL;
1476
1477 spin_lock(&dev->count_lock);
1478 if (atomic_read(&dev->buf_alloc)) {
1479 spin_unlock(&dev->count_lock);
1480 return -EBUSY;
1481 }
1482 dev->buf_use++; /* Can't allocate more after this call */
1483 spin_unlock(&dev->count_lock);
1484
1485 if (copy_from_user(&request, argp, sizeof(request)))
1486 return -EFAULT;
1487
1488 if (request.count >= dma->buf_count) {
1489 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1490 || (drm_core_check_feature(dev, DRIVER_SG)
1491 && (dma->flags & _DRM_DMA_USE_SG))
1492 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1493 && (dma->flags & _DRM_DMA_USE_FB))) {
1494 drm_map_t *map = dev->agp_buffer_map;
1495 unsigned long token = dev->agp_buffer_token;
1496
1497 if (!map) {
1498 retcode = -EINVAL;
1499 goto done;
1500 }
1501
1502 down_write(&current->mm->mmap_sem);
1503 virtual = do_mmap(filp, 0, map->size,
1504 PROT_READ | PROT_WRITE,
1505 MAP_SHARED, token);
1506 up_write(&current->mm->mmap_sem);
1507 } else {
1508 down_write(&current->mm->mmap_sem);
1509 virtual = do_mmap(filp, 0, dma->byte_count,
1510 PROT_READ | PROT_WRITE,
1511 MAP_SHARED, 0);
1512 up_write(&current->mm->mmap_sem);
1513 }
1514 if (virtual > -1024UL) {
1515 /* Real error */
1516 retcode = (signed long)virtual;
1517 goto done;
1518 }
1519 request.virtual = (void __user *)virtual;
1520
1521 for (i = 0; i < dma->buf_count; i++) {
1522 if (copy_to_user(&request.list[i].idx,
1523 &dma->buflist[i]->idx,
1524 sizeof(request.list[0].idx))) {
1525 retcode = -EFAULT;
1526 goto done;
1527 }
1528 if (copy_to_user(&request.list[i].total,
1529 &dma->buflist[i]->total,
1530 sizeof(request.list[0].total))) {
1531 retcode = -EFAULT;
1532 goto done;
1533 }
1534 if (copy_to_user(&request.list[i].used,
1535 &zero, sizeof(zero))) {
1536 retcode = -EFAULT;
1537 goto done;
1538 }
1539 address = virtual + dma->buflist[i]->offset; /* *** */
1540 if (copy_to_user(&request.list[i].address,
1541 &address, sizeof(address))) {
1542 retcode = -EFAULT;
1543 goto done;
1544 }
1545 }
1546 }
1547 done:
1548 request.count = dma->buf_count;
1549 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1550
1551 if (copy_to_user(argp, &request, sizeof(request)))
1552 return -EFAULT;
1553
1554 return retcode;
1555 }
1556
1557 /**
1558 * Compute size order. Returns the exponent of the smaller power of two which
1559 * is greater or equal to given number.
1560 *
1561 * \param size size.
1562 * \return order.
1563 *
1564 * \todo Can be made faster.
1565 */
1566 int drm_order(unsigned long size)
1567 {
1568 int order;
1569 unsigned long tmp;
1570
1571 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1572
1573 if (size & (size - 1))
1574 ++order;
1575
1576 return order;
1577 }
1578
1579 EXPORT_SYMBOL(drm_order);
This page took 0.134471 seconds and 5 git commands to generate.