6d80d17f1e96f9b6d137cfd7ec553378620ca6e2
[deliverable/linux.git] / drivers / gpu / drm / drm_bufs.c
1 /**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9 /*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/vmalloc.h>
37 #include <linux/log2.h>
38 #include <asm/shmparam.h>
39 #include "drmP.h"
40
41 resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
42 {
43 return pci_resource_start(dev->pdev, resource);
44 }
45 EXPORT_SYMBOL(drm_get_resource_start);
46
47 resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
48 {
49 return pci_resource_len(dev->pdev, resource);
50 }
51
52 EXPORT_SYMBOL(drm_get_resource_len);
53
54 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
55 struct drm_local_map *map)
56 {
57 struct drm_map_list *entry;
58 list_for_each_entry(entry, &dev->maplist, head) {
59 /*
60 * Because the kernel-userspace ABI is fixed at a 32-bit offset
61 * while PCI resources may live above that, we ignore the map
62 * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
63 * It is assumed that each driver will have only one resource of
64 * each type.
65 */
66 if (!entry->map ||
67 map->type != entry->map->type ||
68 entry->master != dev->primary->master)
69 continue;
70 switch (map->type) {
71 case _DRM_SHM:
72 if (map->flags != _DRM_CONTAINS_LOCK)
73 break;
74 case _DRM_REGISTERS:
75 case _DRM_FRAME_BUFFER:
76 return entry;
77 default: /* Make gcc happy */
78 ;
79 }
80 if (entry->map->offset == map->offset)
81 return entry;
82 }
83
84 return NULL;
85 }
86
87 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
88 unsigned long user_token, int hashed_handle, int shm)
89 {
90 int use_hashed_handle, shift;
91 unsigned long add;
92
93 #if (BITS_PER_LONG == 64)
94 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
95 #elif (BITS_PER_LONG == 32)
96 use_hashed_handle = hashed_handle;
97 #else
98 #error Unsupported long size. Neither 64 nor 32 bits.
99 #endif
100
101 if (!use_hashed_handle) {
102 int ret;
103 hash->key = user_token >> PAGE_SHIFT;
104 ret = drm_ht_insert_item(&dev->map_hash, hash);
105 if (ret != -EINVAL)
106 return ret;
107 }
108
109 shift = 0;
110 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
111 if (shm && (SHMLBA > PAGE_SIZE)) {
112 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
113
114 /* For shared memory, we have to preserve the SHMLBA
115 * bits of the eventual vma->vm_pgoff value during
116 * mmap(). Otherwise we run into cache aliasing problems
117 * on some platforms. On these platforms, the pgoff of
118 * a mmap() request is used to pick a suitable virtual
119 * address for the mmap() region such that it will not
120 * cause cache aliasing problems.
121 *
122 * Therefore, make sure the SHMLBA relevant bits of the
123 * hash value we use are equal to those in the original
124 * kernel virtual address.
125 */
126 shift = bits;
127 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
128 }
129
130 return drm_ht_just_insert_please(&dev->map_hash, hash,
131 user_token, 32 - PAGE_SHIFT - 3,
132 shift, add);
133 }
134
135 /**
136 * Core function to create a range of memory available for mapping by a
137 * non-root process.
138 *
139 * Adjusts the memory offset to its absolute value according to the mapping
140 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
141 * applicable and if supported by the kernel.
142 */
143 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
144 unsigned int size, enum drm_map_type type,
145 enum drm_map_flags flags,
146 struct drm_map_list ** maplist)
147 {
148 struct drm_local_map *map;
149 struct drm_map_list *list;
150 drm_dma_handle_t *dmah;
151 unsigned long user_token;
152 int ret;
153
154 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
155 if (!map)
156 return -ENOMEM;
157
158 map->offset = offset;
159 map->size = size;
160 map->flags = flags;
161 map->type = type;
162
163 /* Only allow shared memory to be removable since we only keep enough
164 * book keeping information about shared memory to allow for removal
165 * when processes fork.
166 */
167 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
168 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
169 return -EINVAL;
170 }
171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
172 (unsigned long long)map->offset, map->size, map->type);
173 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
174 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
175 return -EINVAL;
176 }
177 map->mtrr = -1;
178 map->handle = NULL;
179
180 switch (map->type) {
181 case _DRM_REGISTERS:
182 case _DRM_FRAME_BUFFER:
183 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
184 if (map->offset + (map->size-1) < map->offset ||
185 map->offset < virt_to_phys(high_memory)) {
186 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
187 return -EINVAL;
188 }
189 #endif
190 #ifdef __alpha__
191 map->offset += dev->hose->mem_space->start;
192 #endif
193 /* Some drivers preinitialize some maps, without the X Server
194 * needing to be aware of it. Therefore, we just return success
195 * when the server tries to create a duplicate map.
196 */
197 list = drm_find_matching_map(dev, map);
198 if (list != NULL) {
199 if (list->map->size != map->size) {
200 DRM_DEBUG("Matching maps of type %d with "
201 "mismatched sizes, (%ld vs %ld)\n",
202 map->type, map->size,
203 list->map->size);
204 list->map->size = map->size;
205 }
206
207 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
208 *maplist = list;
209 return 0;
210 }
211
212 if (drm_core_has_MTRR(dev)) {
213 if (map->type == _DRM_FRAME_BUFFER ||
214 (map->flags & _DRM_WRITE_COMBINING)) {
215 map->mtrr = mtrr_add(map->offset, map->size,
216 MTRR_TYPE_WRCOMB, 1);
217 }
218 }
219 if (map->type == _DRM_REGISTERS) {
220 map->handle = ioremap(map->offset, map->size);
221 if (!map->handle) {
222 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
223 return -ENOMEM;
224 }
225 }
226
227 break;
228 case _DRM_SHM:
229 list = drm_find_matching_map(dev, map);
230 if (list != NULL) {
231 if(list->map->size != map->size) {
232 DRM_DEBUG("Matching maps of type %d with "
233 "mismatched sizes, (%ld vs %ld)\n",
234 map->type, map->size, list->map->size);
235 list->map->size = map->size;
236 }
237
238 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
239 *maplist = list;
240 return 0;
241 }
242 map->handle = vmalloc_user(map->size);
243 DRM_DEBUG("%lu %d %p\n",
244 map->size, drm_order(map->size), map->handle);
245 if (!map->handle) {
246 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
247 return -ENOMEM;
248 }
249 map->offset = (unsigned long)map->handle;
250 if (map->flags & _DRM_CONTAINS_LOCK) {
251 /* Prevent a 2nd X Server from creating a 2nd lock */
252 if (dev->primary->master->lock.hw_lock != NULL) {
253 vfree(map->handle);
254 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
255 return -EBUSY;
256 }
257 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
258 }
259 break;
260 case _DRM_AGP: {
261 struct drm_agp_mem *entry;
262 int valid = 0;
263
264 if (!drm_core_has_AGP(dev)) {
265 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
266 return -EINVAL;
267 }
268 #ifdef __alpha__
269 map->offset += dev->hose->mem_space->start;
270 #endif
271 /* In some cases (i810 driver), user space may have already
272 * added the AGP base itself, because dev->agp->base previously
273 * only got set during AGP enable. So, only add the base
274 * address if the map's offset isn't already within the
275 * aperture.
276 */
277 if (map->offset < dev->agp->base ||
278 map->offset > dev->agp->base +
279 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
280 map->offset += dev->agp->base;
281 }
282 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
283
284 /* This assumes the DRM is in total control of AGP space.
285 * It's not always the case as AGP can be in the control
286 * of user space (i.e. i810 driver). So this loop will get
287 * skipped and we double check that dev->agp->memory is
288 * actually set as well as being invalid before EPERM'ing
289 */
290 list_for_each_entry(entry, &dev->agp->memory, head) {
291 if ((map->offset >= entry->bound) &&
292 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
293 valid = 1;
294 break;
295 }
296 }
297 if (!list_empty(&dev->agp->memory) && !valid) {
298 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
299 return -EPERM;
300 }
301 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
302 (unsigned long long)map->offset, map->size);
303
304 break;
305 case _DRM_GEM:
306 DRM_ERROR("tried to rmmap GEM object\n");
307 break;
308 }
309 case _DRM_SCATTER_GATHER:
310 if (!dev->sg) {
311 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
312 return -EINVAL;
313 }
314 map->offset += (unsigned long)dev->sg->virtual;
315 break;
316 case _DRM_CONSISTENT:
317 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
318 * As we're limiting the address to 2^32-1 (or less),
319 * casting it down to 32 bits is no problem, but we
320 * need to point to a 64bit variable first. */
321 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
322 if (!dmah) {
323 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
324 return -ENOMEM;
325 }
326 map->handle = dmah->vaddr;
327 map->offset = (unsigned long)dmah->busaddr;
328 kfree(dmah);
329 break;
330 default:
331 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
332 return -EINVAL;
333 }
334
335 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
336 if (!list) {
337 if (map->type == _DRM_REGISTERS)
338 iounmap(map->handle);
339 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
340 return -EINVAL;
341 }
342 memset(list, 0, sizeof(*list));
343 list->map = map;
344
345 mutex_lock(&dev->struct_mutex);
346 list_add(&list->head, &dev->maplist);
347
348 /* Assign a 32-bit handle */
349 /* We do it here so that dev->struct_mutex protects the increment */
350 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
351 map->offset;
352 ret = drm_map_handle(dev, &list->hash, user_token, 0,
353 (map->type == _DRM_SHM));
354 if (ret) {
355 if (map->type == _DRM_REGISTERS)
356 iounmap(map->handle);
357 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
358 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
359 mutex_unlock(&dev->struct_mutex);
360 return ret;
361 }
362
363 list->user_token = list->hash.key << PAGE_SHIFT;
364 mutex_unlock(&dev->struct_mutex);
365
366 list->master = dev->primary->master;
367 *maplist = list;
368 return 0;
369 }
370
371 int drm_addmap(struct drm_device * dev, resource_size_t offset,
372 unsigned int size, enum drm_map_type type,
373 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
374 {
375 struct drm_map_list *list;
376 int rc;
377
378 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
379 if (!rc)
380 *map_ptr = list->map;
381 return rc;
382 }
383
384 EXPORT_SYMBOL(drm_addmap);
385
386 /**
387 * Ioctl to specify a range of memory that is available for mapping by a
388 * non-root process.
389 *
390 * \param inode device inode.
391 * \param file_priv DRM file private.
392 * \param cmd command.
393 * \param arg pointer to a drm_map structure.
394 * \return zero on success or a negative value on error.
395 *
396 */
397 int drm_addmap_ioctl(struct drm_device *dev, void *data,
398 struct drm_file *file_priv)
399 {
400 struct drm_map *map = data;
401 struct drm_map_list *maplist;
402 int err;
403
404 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
405 return -EPERM;
406
407 err = drm_addmap_core(dev, map->offset, map->size, map->type,
408 map->flags, &maplist);
409
410 if (err)
411 return err;
412
413 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
414 map->handle = (void *)(unsigned long)maplist->user_token;
415 return 0;
416 }
417
418 /**
419 * Remove a map private from list and deallocate resources if the mapping
420 * isn't in use.
421 *
422 * Searches the map on drm_device::maplist, removes it from the list, see if
423 * its being used, and free any associate resource (such as MTRR's) if it's not
424 * being on use.
425 *
426 * \sa drm_addmap
427 */
428 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
429 {
430 struct drm_map_list *r_list = NULL, *list_t;
431 drm_dma_handle_t dmah;
432 int found = 0;
433 struct drm_master *master;
434
435 /* Find the list entry for the map and remove it */
436 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
437 if (r_list->map == map) {
438 master = r_list->master;
439 list_del(&r_list->head);
440 drm_ht_remove_key(&dev->map_hash,
441 r_list->user_token >> PAGE_SHIFT);
442 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
443 found = 1;
444 break;
445 }
446 }
447
448 if (!found)
449 return -EINVAL;
450
451 switch (map->type) {
452 case _DRM_REGISTERS:
453 iounmap(map->handle);
454 /* FALLTHROUGH */
455 case _DRM_FRAME_BUFFER:
456 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
457 int retcode;
458 retcode = mtrr_del(map->mtrr, map->offset, map->size);
459 DRM_DEBUG("mtrr_del=%d\n", retcode);
460 }
461 break;
462 case _DRM_SHM:
463 vfree(map->handle);
464 if (master) {
465 if (dev->sigdata.lock == master->lock.hw_lock)
466 dev->sigdata.lock = NULL;
467 master->lock.hw_lock = NULL; /* SHM removed */
468 master->lock.file_priv = NULL;
469 wake_up_interruptible_all(&master->lock.lock_queue);
470 }
471 break;
472 case _DRM_AGP:
473 case _DRM_SCATTER_GATHER:
474 break;
475 case _DRM_CONSISTENT:
476 dmah.vaddr = map->handle;
477 dmah.busaddr = map->offset;
478 dmah.size = map->size;
479 __drm_pci_free(dev, &dmah);
480 break;
481 case _DRM_GEM:
482 DRM_ERROR("tried to rmmap GEM object\n");
483 break;
484 }
485 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
486
487 return 0;
488 }
489 EXPORT_SYMBOL(drm_rmmap_locked);
490
491 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
492 {
493 int ret;
494
495 mutex_lock(&dev->struct_mutex);
496 ret = drm_rmmap_locked(dev, map);
497 mutex_unlock(&dev->struct_mutex);
498
499 return ret;
500 }
501 EXPORT_SYMBOL(drm_rmmap);
502
503 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
504 * the last close of the device, and this is necessary for cleanup when things
505 * exit uncleanly. Therefore, having userland manually remove mappings seems
506 * like a pointless exercise since they're going away anyway.
507 *
508 * One use case might be after addmap is allowed for normal users for SHM and
509 * gets used by drivers that the server doesn't need to care about. This seems
510 * unlikely.
511 *
512 * \param inode device inode.
513 * \param file_priv DRM file private.
514 * \param cmd command.
515 * \param arg pointer to a struct drm_map structure.
516 * \return zero on success or a negative value on error.
517 */
518 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
519 struct drm_file *file_priv)
520 {
521 struct drm_map *request = data;
522 struct drm_local_map *map = NULL;
523 struct drm_map_list *r_list;
524 int ret;
525
526 mutex_lock(&dev->struct_mutex);
527 list_for_each_entry(r_list, &dev->maplist, head) {
528 if (r_list->map &&
529 r_list->user_token == (unsigned long)request->handle &&
530 r_list->map->flags & _DRM_REMOVABLE) {
531 map = r_list->map;
532 break;
533 }
534 }
535
536 /* List has wrapped around to the head pointer, or its empty we didn't
537 * find anything.
538 */
539 if (list_empty(&dev->maplist) || !map) {
540 mutex_unlock(&dev->struct_mutex);
541 return -EINVAL;
542 }
543
544 /* Register and framebuffer maps are permanent */
545 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
546 mutex_unlock(&dev->struct_mutex);
547 return 0;
548 }
549
550 ret = drm_rmmap_locked(dev, map);
551
552 mutex_unlock(&dev->struct_mutex);
553
554 return ret;
555 }
556
557 /**
558 * Cleanup after an error on one of the addbufs() functions.
559 *
560 * \param dev DRM device.
561 * \param entry buffer entry where the error occurred.
562 *
563 * Frees any pages and buffers associated with the given entry.
564 */
565 static void drm_cleanup_buf_error(struct drm_device * dev,
566 struct drm_buf_entry * entry)
567 {
568 int i;
569
570 if (entry->seg_count) {
571 for (i = 0; i < entry->seg_count; i++) {
572 if (entry->seglist[i]) {
573 drm_pci_free(dev, entry->seglist[i]);
574 }
575 }
576 drm_free(entry->seglist,
577 entry->seg_count *
578 sizeof(*entry->seglist), DRM_MEM_SEGS);
579
580 entry->seg_count = 0;
581 }
582
583 if (entry->buf_count) {
584 for (i = 0; i < entry->buf_count; i++) {
585 if (entry->buflist[i].dev_private) {
586 drm_free(entry->buflist[i].dev_private,
587 entry->buflist[i].dev_priv_size,
588 DRM_MEM_BUFS);
589 }
590 }
591 drm_free(entry->buflist,
592 entry->buf_count *
593 sizeof(*entry->buflist), DRM_MEM_BUFS);
594
595 entry->buf_count = 0;
596 }
597 }
598
599 #if __OS_HAS_AGP
600 /**
601 * Add AGP buffers for DMA transfers.
602 *
603 * \param dev struct drm_device to which the buffers are to be added.
604 * \param request pointer to a struct drm_buf_desc describing the request.
605 * \return zero on success or a negative number on failure.
606 *
607 * After some sanity checks creates a drm_buf structure for each buffer and
608 * reallocates the buffer list of the same size order to accommodate the new
609 * buffers.
610 */
611 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
612 {
613 struct drm_device_dma *dma = dev->dma;
614 struct drm_buf_entry *entry;
615 struct drm_agp_mem *agp_entry;
616 struct drm_buf *buf;
617 unsigned long offset;
618 unsigned long agp_offset;
619 int count;
620 int order;
621 int size;
622 int alignment;
623 int page_order;
624 int total;
625 int byte_count;
626 int i, valid;
627 struct drm_buf **temp_buflist;
628
629 if (!dma)
630 return -EINVAL;
631
632 count = request->count;
633 order = drm_order(request->size);
634 size = 1 << order;
635
636 alignment = (request->flags & _DRM_PAGE_ALIGN)
637 ? PAGE_ALIGN(size) : size;
638 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
639 total = PAGE_SIZE << page_order;
640
641 byte_count = 0;
642 agp_offset = dev->agp->base + request->agp_start;
643
644 DRM_DEBUG("count: %d\n", count);
645 DRM_DEBUG("order: %d\n", order);
646 DRM_DEBUG("size: %d\n", size);
647 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
648 DRM_DEBUG("alignment: %d\n", alignment);
649 DRM_DEBUG("page_order: %d\n", page_order);
650 DRM_DEBUG("total: %d\n", total);
651
652 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
653 return -EINVAL;
654 if (dev->queue_count)
655 return -EBUSY; /* Not while in use */
656
657 /* Make sure buffers are located in AGP memory that we own */
658 valid = 0;
659 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
660 if ((agp_offset >= agp_entry->bound) &&
661 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
662 valid = 1;
663 break;
664 }
665 }
666 if (!list_empty(&dev->agp->memory) && !valid) {
667 DRM_DEBUG("zone invalid\n");
668 return -EINVAL;
669 }
670 spin_lock(&dev->count_lock);
671 if (dev->buf_use) {
672 spin_unlock(&dev->count_lock);
673 return -EBUSY;
674 }
675 atomic_inc(&dev->buf_alloc);
676 spin_unlock(&dev->count_lock);
677
678 mutex_lock(&dev->struct_mutex);
679 entry = &dma->bufs[order];
680 if (entry->buf_count) {
681 mutex_unlock(&dev->struct_mutex);
682 atomic_dec(&dev->buf_alloc);
683 return -ENOMEM; /* May only call once for each order */
684 }
685
686 if (count < 0 || count > 4096) {
687 mutex_unlock(&dev->struct_mutex);
688 atomic_dec(&dev->buf_alloc);
689 return -EINVAL;
690 }
691
692 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
693 DRM_MEM_BUFS);
694 if (!entry->buflist) {
695 mutex_unlock(&dev->struct_mutex);
696 atomic_dec(&dev->buf_alloc);
697 return -ENOMEM;
698 }
699 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
700
701 entry->buf_size = size;
702 entry->page_order = page_order;
703
704 offset = 0;
705
706 while (entry->buf_count < count) {
707 buf = &entry->buflist[entry->buf_count];
708 buf->idx = dma->buf_count + entry->buf_count;
709 buf->total = alignment;
710 buf->order = order;
711 buf->used = 0;
712
713 buf->offset = (dma->byte_count + offset);
714 buf->bus_address = agp_offset + offset;
715 buf->address = (void *)(agp_offset + offset);
716 buf->next = NULL;
717 buf->waiting = 0;
718 buf->pending = 0;
719 init_waitqueue_head(&buf->dma_wait);
720 buf->file_priv = NULL;
721
722 buf->dev_priv_size = dev->driver->dev_priv_size;
723 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
724 if (!buf->dev_private) {
725 /* Set count correctly so we free the proper amount. */
726 entry->buf_count = count;
727 drm_cleanup_buf_error(dev, entry);
728 mutex_unlock(&dev->struct_mutex);
729 atomic_dec(&dev->buf_alloc);
730 return -ENOMEM;
731 }
732 memset(buf->dev_private, 0, buf->dev_priv_size);
733
734 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
735
736 offset += alignment;
737 entry->buf_count++;
738 byte_count += PAGE_SIZE << page_order;
739 }
740
741 DRM_DEBUG("byte_count: %d\n", byte_count);
742
743 temp_buflist = drm_realloc(dma->buflist,
744 dma->buf_count * sizeof(*dma->buflist),
745 (dma->buf_count + entry->buf_count)
746 * sizeof(*dma->buflist), DRM_MEM_BUFS);
747 if (!temp_buflist) {
748 /* Free the entry because it isn't valid */
749 drm_cleanup_buf_error(dev, entry);
750 mutex_unlock(&dev->struct_mutex);
751 atomic_dec(&dev->buf_alloc);
752 return -ENOMEM;
753 }
754 dma->buflist = temp_buflist;
755
756 for (i = 0; i < entry->buf_count; i++) {
757 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
758 }
759
760 dma->buf_count += entry->buf_count;
761 dma->seg_count += entry->seg_count;
762 dma->page_count += byte_count >> PAGE_SHIFT;
763 dma->byte_count += byte_count;
764
765 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
766 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
767
768 mutex_unlock(&dev->struct_mutex);
769
770 request->count = entry->buf_count;
771 request->size = size;
772
773 dma->flags = _DRM_DMA_USE_AGP;
774
775 atomic_dec(&dev->buf_alloc);
776 return 0;
777 }
778 EXPORT_SYMBOL(drm_addbufs_agp);
779 #endif /* __OS_HAS_AGP */
780
781 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
782 {
783 struct drm_device_dma *dma = dev->dma;
784 int count;
785 int order;
786 int size;
787 int total;
788 int page_order;
789 struct drm_buf_entry *entry;
790 drm_dma_handle_t *dmah;
791 struct drm_buf *buf;
792 int alignment;
793 unsigned long offset;
794 int i;
795 int byte_count;
796 int page_count;
797 unsigned long *temp_pagelist;
798 struct drm_buf **temp_buflist;
799
800 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
801 return -EINVAL;
802
803 if (!dma)
804 return -EINVAL;
805
806 if (!capable(CAP_SYS_ADMIN))
807 return -EPERM;
808
809 count = request->count;
810 order = drm_order(request->size);
811 size = 1 << order;
812
813 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
814 request->count, request->size, size, order, dev->queue_count);
815
816 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
817 return -EINVAL;
818 if (dev->queue_count)
819 return -EBUSY; /* Not while in use */
820
821 alignment = (request->flags & _DRM_PAGE_ALIGN)
822 ? PAGE_ALIGN(size) : size;
823 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
824 total = PAGE_SIZE << page_order;
825
826 spin_lock(&dev->count_lock);
827 if (dev->buf_use) {
828 spin_unlock(&dev->count_lock);
829 return -EBUSY;
830 }
831 atomic_inc(&dev->buf_alloc);
832 spin_unlock(&dev->count_lock);
833
834 mutex_lock(&dev->struct_mutex);
835 entry = &dma->bufs[order];
836 if (entry->buf_count) {
837 mutex_unlock(&dev->struct_mutex);
838 atomic_dec(&dev->buf_alloc);
839 return -ENOMEM; /* May only call once for each order */
840 }
841
842 if (count < 0 || count > 4096) {
843 mutex_unlock(&dev->struct_mutex);
844 atomic_dec(&dev->buf_alloc);
845 return -EINVAL;
846 }
847
848 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
849 DRM_MEM_BUFS);
850 if (!entry->buflist) {
851 mutex_unlock(&dev->struct_mutex);
852 atomic_dec(&dev->buf_alloc);
853 return -ENOMEM;
854 }
855 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
856
857 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
858 DRM_MEM_SEGS);
859 if (!entry->seglist) {
860 drm_free(entry->buflist,
861 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
862 mutex_unlock(&dev->struct_mutex);
863 atomic_dec(&dev->buf_alloc);
864 return -ENOMEM;
865 }
866 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
867
868 /* Keep the original pagelist until we know all the allocations
869 * have succeeded
870 */
871 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
872 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
873 if (!temp_pagelist) {
874 drm_free(entry->buflist,
875 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
876 drm_free(entry->seglist,
877 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
878 mutex_unlock(&dev->struct_mutex);
879 atomic_dec(&dev->buf_alloc);
880 return -ENOMEM;
881 }
882 memcpy(temp_pagelist,
883 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
884 DRM_DEBUG("pagelist: %d entries\n",
885 dma->page_count + (count << page_order));
886
887 entry->buf_size = size;
888 entry->page_order = page_order;
889 byte_count = 0;
890 page_count = 0;
891
892 while (entry->buf_count < count) {
893
894 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
895
896 if (!dmah) {
897 /* Set count correctly so we free the proper amount. */
898 entry->buf_count = count;
899 entry->seg_count = count;
900 drm_cleanup_buf_error(dev, entry);
901 drm_free(temp_pagelist,
902 (dma->page_count + (count << page_order))
903 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
904 mutex_unlock(&dev->struct_mutex);
905 atomic_dec(&dev->buf_alloc);
906 return -ENOMEM;
907 }
908 entry->seglist[entry->seg_count++] = dmah;
909 for (i = 0; i < (1 << page_order); i++) {
910 DRM_DEBUG("page %d @ 0x%08lx\n",
911 dma->page_count + page_count,
912 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
913 temp_pagelist[dma->page_count + page_count++]
914 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
915 }
916 for (offset = 0;
917 offset + size <= total && entry->buf_count < count;
918 offset += alignment, ++entry->buf_count) {
919 buf = &entry->buflist[entry->buf_count];
920 buf->idx = dma->buf_count + entry->buf_count;
921 buf->total = alignment;
922 buf->order = order;
923 buf->used = 0;
924 buf->offset = (dma->byte_count + byte_count + offset);
925 buf->address = (void *)(dmah->vaddr + offset);
926 buf->bus_address = dmah->busaddr + offset;
927 buf->next = NULL;
928 buf->waiting = 0;
929 buf->pending = 0;
930 init_waitqueue_head(&buf->dma_wait);
931 buf->file_priv = NULL;
932
933 buf->dev_priv_size = dev->driver->dev_priv_size;
934 buf->dev_private = drm_alloc(buf->dev_priv_size,
935 DRM_MEM_BUFS);
936 if (!buf->dev_private) {
937 /* Set count correctly so we free the proper amount. */
938 entry->buf_count = count;
939 entry->seg_count = count;
940 drm_cleanup_buf_error(dev, entry);
941 drm_free(temp_pagelist,
942 (dma->page_count +
943 (count << page_order))
944 * sizeof(*dma->pagelist),
945 DRM_MEM_PAGES);
946 mutex_unlock(&dev->struct_mutex);
947 atomic_dec(&dev->buf_alloc);
948 return -ENOMEM;
949 }
950 memset(buf->dev_private, 0, buf->dev_priv_size);
951
952 DRM_DEBUG("buffer %d @ %p\n",
953 entry->buf_count, buf->address);
954 }
955 byte_count += PAGE_SIZE << page_order;
956 }
957
958 temp_buflist = drm_realloc(dma->buflist,
959 dma->buf_count * sizeof(*dma->buflist),
960 (dma->buf_count + entry->buf_count)
961 * sizeof(*dma->buflist), DRM_MEM_BUFS);
962 if (!temp_buflist) {
963 /* Free the entry because it isn't valid */
964 drm_cleanup_buf_error(dev, entry);
965 drm_free(temp_pagelist,
966 (dma->page_count + (count << page_order))
967 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
968 mutex_unlock(&dev->struct_mutex);
969 atomic_dec(&dev->buf_alloc);
970 return -ENOMEM;
971 }
972 dma->buflist = temp_buflist;
973
974 for (i = 0; i < entry->buf_count; i++) {
975 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
976 }
977
978 /* No allocations failed, so now we can replace the orginal pagelist
979 * with the new one.
980 */
981 if (dma->page_count) {
982 drm_free(dma->pagelist,
983 dma->page_count * sizeof(*dma->pagelist),
984 DRM_MEM_PAGES);
985 }
986 dma->pagelist = temp_pagelist;
987
988 dma->buf_count += entry->buf_count;
989 dma->seg_count += entry->seg_count;
990 dma->page_count += entry->seg_count << page_order;
991 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
992
993 mutex_unlock(&dev->struct_mutex);
994
995 request->count = entry->buf_count;
996 request->size = size;
997
998 if (request->flags & _DRM_PCI_BUFFER_RO)
999 dma->flags = _DRM_DMA_USE_PCI_RO;
1000
1001 atomic_dec(&dev->buf_alloc);
1002 return 0;
1003
1004 }
1005 EXPORT_SYMBOL(drm_addbufs_pci);
1006
1007 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
1008 {
1009 struct drm_device_dma *dma = dev->dma;
1010 struct drm_buf_entry *entry;
1011 struct drm_buf *buf;
1012 unsigned long offset;
1013 unsigned long agp_offset;
1014 int count;
1015 int order;
1016 int size;
1017 int alignment;
1018 int page_order;
1019 int total;
1020 int byte_count;
1021 int i;
1022 struct drm_buf **temp_buflist;
1023
1024 if (!drm_core_check_feature(dev, DRIVER_SG))
1025 return -EINVAL;
1026
1027 if (!dma)
1028 return -EINVAL;
1029
1030 if (!capable(CAP_SYS_ADMIN))
1031 return -EPERM;
1032
1033 count = request->count;
1034 order = drm_order(request->size);
1035 size = 1 << order;
1036
1037 alignment = (request->flags & _DRM_PAGE_ALIGN)
1038 ? PAGE_ALIGN(size) : size;
1039 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1040 total = PAGE_SIZE << page_order;
1041
1042 byte_count = 0;
1043 agp_offset = request->agp_start;
1044
1045 DRM_DEBUG("count: %d\n", count);
1046 DRM_DEBUG("order: %d\n", order);
1047 DRM_DEBUG("size: %d\n", size);
1048 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1049 DRM_DEBUG("alignment: %d\n", alignment);
1050 DRM_DEBUG("page_order: %d\n", page_order);
1051 DRM_DEBUG("total: %d\n", total);
1052
1053 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1054 return -EINVAL;
1055 if (dev->queue_count)
1056 return -EBUSY; /* Not while in use */
1057
1058 spin_lock(&dev->count_lock);
1059 if (dev->buf_use) {
1060 spin_unlock(&dev->count_lock);
1061 return -EBUSY;
1062 }
1063 atomic_inc(&dev->buf_alloc);
1064 spin_unlock(&dev->count_lock);
1065
1066 mutex_lock(&dev->struct_mutex);
1067 entry = &dma->bufs[order];
1068 if (entry->buf_count) {
1069 mutex_unlock(&dev->struct_mutex);
1070 atomic_dec(&dev->buf_alloc);
1071 return -ENOMEM; /* May only call once for each order */
1072 }
1073
1074 if (count < 0 || count > 4096) {
1075 mutex_unlock(&dev->struct_mutex);
1076 atomic_dec(&dev->buf_alloc);
1077 return -EINVAL;
1078 }
1079
1080 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1081 DRM_MEM_BUFS);
1082 if (!entry->buflist) {
1083 mutex_unlock(&dev->struct_mutex);
1084 atomic_dec(&dev->buf_alloc);
1085 return -ENOMEM;
1086 }
1087 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1088
1089 entry->buf_size = size;
1090 entry->page_order = page_order;
1091
1092 offset = 0;
1093
1094 while (entry->buf_count < count) {
1095 buf = &entry->buflist[entry->buf_count];
1096 buf->idx = dma->buf_count + entry->buf_count;
1097 buf->total = alignment;
1098 buf->order = order;
1099 buf->used = 0;
1100
1101 buf->offset = (dma->byte_count + offset);
1102 buf->bus_address = agp_offset + offset;
1103 buf->address = (void *)(agp_offset + offset
1104 + (unsigned long)dev->sg->virtual);
1105 buf->next = NULL;
1106 buf->waiting = 0;
1107 buf->pending = 0;
1108 init_waitqueue_head(&buf->dma_wait);
1109 buf->file_priv = NULL;
1110
1111 buf->dev_priv_size = dev->driver->dev_priv_size;
1112 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1113 if (!buf->dev_private) {
1114 /* Set count correctly so we free the proper amount. */
1115 entry->buf_count = count;
1116 drm_cleanup_buf_error(dev, entry);
1117 mutex_unlock(&dev->struct_mutex);
1118 atomic_dec(&dev->buf_alloc);
1119 return -ENOMEM;
1120 }
1121
1122 memset(buf->dev_private, 0, buf->dev_priv_size);
1123
1124 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1125
1126 offset += alignment;
1127 entry->buf_count++;
1128 byte_count += PAGE_SIZE << page_order;
1129 }
1130
1131 DRM_DEBUG("byte_count: %d\n", byte_count);
1132
1133 temp_buflist = drm_realloc(dma->buflist,
1134 dma->buf_count * sizeof(*dma->buflist),
1135 (dma->buf_count + entry->buf_count)
1136 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1137 if (!temp_buflist) {
1138 /* Free the entry because it isn't valid */
1139 drm_cleanup_buf_error(dev, entry);
1140 mutex_unlock(&dev->struct_mutex);
1141 atomic_dec(&dev->buf_alloc);
1142 return -ENOMEM;
1143 }
1144 dma->buflist = temp_buflist;
1145
1146 for (i = 0; i < entry->buf_count; i++) {
1147 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1148 }
1149
1150 dma->buf_count += entry->buf_count;
1151 dma->seg_count += entry->seg_count;
1152 dma->page_count += byte_count >> PAGE_SHIFT;
1153 dma->byte_count += byte_count;
1154
1155 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1156 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1157
1158 mutex_unlock(&dev->struct_mutex);
1159
1160 request->count = entry->buf_count;
1161 request->size = size;
1162
1163 dma->flags = _DRM_DMA_USE_SG;
1164
1165 atomic_dec(&dev->buf_alloc);
1166 return 0;
1167 }
1168
1169 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1170 {
1171 struct drm_device_dma *dma = dev->dma;
1172 struct drm_buf_entry *entry;
1173 struct drm_buf *buf;
1174 unsigned long offset;
1175 unsigned long agp_offset;
1176 int count;
1177 int order;
1178 int size;
1179 int alignment;
1180 int page_order;
1181 int total;
1182 int byte_count;
1183 int i;
1184 struct drm_buf **temp_buflist;
1185
1186 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1187 return -EINVAL;
1188
1189 if (!dma)
1190 return -EINVAL;
1191
1192 if (!capable(CAP_SYS_ADMIN))
1193 return -EPERM;
1194
1195 count = request->count;
1196 order = drm_order(request->size);
1197 size = 1 << order;
1198
1199 alignment = (request->flags & _DRM_PAGE_ALIGN)
1200 ? PAGE_ALIGN(size) : size;
1201 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1202 total = PAGE_SIZE << page_order;
1203
1204 byte_count = 0;
1205 agp_offset = request->agp_start;
1206
1207 DRM_DEBUG("count: %d\n", count);
1208 DRM_DEBUG("order: %d\n", order);
1209 DRM_DEBUG("size: %d\n", size);
1210 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1211 DRM_DEBUG("alignment: %d\n", alignment);
1212 DRM_DEBUG("page_order: %d\n", page_order);
1213 DRM_DEBUG("total: %d\n", total);
1214
1215 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1216 return -EINVAL;
1217 if (dev->queue_count)
1218 return -EBUSY; /* Not while in use */
1219
1220 spin_lock(&dev->count_lock);
1221 if (dev->buf_use) {
1222 spin_unlock(&dev->count_lock);
1223 return -EBUSY;
1224 }
1225 atomic_inc(&dev->buf_alloc);
1226 spin_unlock(&dev->count_lock);
1227
1228 mutex_lock(&dev->struct_mutex);
1229 entry = &dma->bufs[order];
1230 if (entry->buf_count) {
1231 mutex_unlock(&dev->struct_mutex);
1232 atomic_dec(&dev->buf_alloc);
1233 return -ENOMEM; /* May only call once for each order */
1234 }
1235
1236 if (count < 0 || count > 4096) {
1237 mutex_unlock(&dev->struct_mutex);
1238 atomic_dec(&dev->buf_alloc);
1239 return -EINVAL;
1240 }
1241
1242 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1243 DRM_MEM_BUFS);
1244 if (!entry->buflist) {
1245 mutex_unlock(&dev->struct_mutex);
1246 atomic_dec(&dev->buf_alloc);
1247 return -ENOMEM;
1248 }
1249 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1250
1251 entry->buf_size = size;
1252 entry->page_order = page_order;
1253
1254 offset = 0;
1255
1256 while (entry->buf_count < count) {
1257 buf = &entry->buflist[entry->buf_count];
1258 buf->idx = dma->buf_count + entry->buf_count;
1259 buf->total = alignment;
1260 buf->order = order;
1261 buf->used = 0;
1262
1263 buf->offset = (dma->byte_count + offset);
1264 buf->bus_address = agp_offset + offset;
1265 buf->address = (void *)(agp_offset + offset);
1266 buf->next = NULL;
1267 buf->waiting = 0;
1268 buf->pending = 0;
1269 init_waitqueue_head(&buf->dma_wait);
1270 buf->file_priv = NULL;
1271
1272 buf->dev_priv_size = dev->driver->dev_priv_size;
1273 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1274 if (!buf->dev_private) {
1275 /* Set count correctly so we free the proper amount. */
1276 entry->buf_count = count;
1277 drm_cleanup_buf_error(dev, entry);
1278 mutex_unlock(&dev->struct_mutex);
1279 atomic_dec(&dev->buf_alloc);
1280 return -ENOMEM;
1281 }
1282 memset(buf->dev_private, 0, buf->dev_priv_size);
1283
1284 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1285
1286 offset += alignment;
1287 entry->buf_count++;
1288 byte_count += PAGE_SIZE << page_order;
1289 }
1290
1291 DRM_DEBUG("byte_count: %d\n", byte_count);
1292
1293 temp_buflist = drm_realloc(dma->buflist,
1294 dma->buf_count * sizeof(*dma->buflist),
1295 (dma->buf_count + entry->buf_count)
1296 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1297 if (!temp_buflist) {
1298 /* Free the entry because it isn't valid */
1299 drm_cleanup_buf_error(dev, entry);
1300 mutex_unlock(&dev->struct_mutex);
1301 atomic_dec(&dev->buf_alloc);
1302 return -ENOMEM;
1303 }
1304 dma->buflist = temp_buflist;
1305
1306 for (i = 0; i < entry->buf_count; i++) {
1307 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1308 }
1309
1310 dma->buf_count += entry->buf_count;
1311 dma->seg_count += entry->seg_count;
1312 dma->page_count += byte_count >> PAGE_SHIFT;
1313 dma->byte_count += byte_count;
1314
1315 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1316 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1317
1318 mutex_unlock(&dev->struct_mutex);
1319
1320 request->count = entry->buf_count;
1321 request->size = size;
1322
1323 dma->flags = _DRM_DMA_USE_FB;
1324
1325 atomic_dec(&dev->buf_alloc);
1326 return 0;
1327 }
1328
1329
1330 /**
1331 * Add buffers for DMA transfers (ioctl).
1332 *
1333 * \param inode device inode.
1334 * \param file_priv DRM file private.
1335 * \param cmd command.
1336 * \param arg pointer to a struct drm_buf_desc request.
1337 * \return zero on success or a negative number on failure.
1338 *
1339 * According with the memory type specified in drm_buf_desc::flags and the
1340 * build options, it dispatches the call either to addbufs_agp(),
1341 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1342 * PCI memory respectively.
1343 */
1344 int drm_addbufs(struct drm_device *dev, void *data,
1345 struct drm_file *file_priv)
1346 {
1347 struct drm_buf_desc *request = data;
1348 int ret;
1349
1350 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1351 return -EINVAL;
1352
1353 #if __OS_HAS_AGP
1354 if (request->flags & _DRM_AGP_BUFFER)
1355 ret = drm_addbufs_agp(dev, request);
1356 else
1357 #endif
1358 if (request->flags & _DRM_SG_BUFFER)
1359 ret = drm_addbufs_sg(dev, request);
1360 else if (request->flags & _DRM_FB_BUFFER)
1361 ret = drm_addbufs_fb(dev, request);
1362 else
1363 ret = drm_addbufs_pci(dev, request);
1364
1365 return ret;
1366 }
1367
1368 /**
1369 * Get information about the buffer mappings.
1370 *
1371 * This was originally mean for debugging purposes, or by a sophisticated
1372 * client library to determine how best to use the available buffers (e.g.,
1373 * large buffers can be used for image transfer).
1374 *
1375 * \param inode device inode.
1376 * \param file_priv DRM file private.
1377 * \param cmd command.
1378 * \param arg pointer to a drm_buf_info structure.
1379 * \return zero on success or a negative number on failure.
1380 *
1381 * Increments drm_device::buf_use while holding the drm_device::count_lock
1382 * lock, preventing of allocating more buffers after this call. Information
1383 * about each requested buffer is then copied into user space.
1384 */
1385 int drm_infobufs(struct drm_device *dev, void *data,
1386 struct drm_file *file_priv)
1387 {
1388 struct drm_device_dma *dma = dev->dma;
1389 struct drm_buf_info *request = data;
1390 int i;
1391 int count;
1392
1393 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1394 return -EINVAL;
1395
1396 if (!dma)
1397 return -EINVAL;
1398
1399 spin_lock(&dev->count_lock);
1400 if (atomic_read(&dev->buf_alloc)) {
1401 spin_unlock(&dev->count_lock);
1402 return -EBUSY;
1403 }
1404 ++dev->buf_use; /* Can't allocate more after this call */
1405 spin_unlock(&dev->count_lock);
1406
1407 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1408 if (dma->bufs[i].buf_count)
1409 ++count;
1410 }
1411
1412 DRM_DEBUG("count = %d\n", count);
1413
1414 if (request->count >= count) {
1415 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1416 if (dma->bufs[i].buf_count) {
1417 struct drm_buf_desc __user *to =
1418 &request->list[count];
1419 struct drm_buf_entry *from = &dma->bufs[i];
1420 struct drm_freelist *list = &dma->bufs[i].freelist;
1421 if (copy_to_user(&to->count,
1422 &from->buf_count,
1423 sizeof(from->buf_count)) ||
1424 copy_to_user(&to->size,
1425 &from->buf_size,
1426 sizeof(from->buf_size)) ||
1427 copy_to_user(&to->low_mark,
1428 &list->low_mark,
1429 sizeof(list->low_mark)) ||
1430 copy_to_user(&to->high_mark,
1431 &list->high_mark,
1432 sizeof(list->high_mark)))
1433 return -EFAULT;
1434
1435 DRM_DEBUG("%d %d %d %d %d\n",
1436 i,
1437 dma->bufs[i].buf_count,
1438 dma->bufs[i].buf_size,
1439 dma->bufs[i].freelist.low_mark,
1440 dma->bufs[i].freelist.high_mark);
1441 ++count;
1442 }
1443 }
1444 }
1445 request->count = count;
1446
1447 return 0;
1448 }
1449
1450 /**
1451 * Specifies a low and high water mark for buffer allocation
1452 *
1453 * \param inode device inode.
1454 * \param file_priv DRM file private.
1455 * \param cmd command.
1456 * \param arg a pointer to a drm_buf_desc structure.
1457 * \return zero on success or a negative number on failure.
1458 *
1459 * Verifies that the size order is bounded between the admissible orders and
1460 * updates the respective drm_device_dma::bufs entry low and high water mark.
1461 *
1462 * \note This ioctl is deprecated and mostly never used.
1463 */
1464 int drm_markbufs(struct drm_device *dev, void *data,
1465 struct drm_file *file_priv)
1466 {
1467 struct drm_device_dma *dma = dev->dma;
1468 struct drm_buf_desc *request = data;
1469 int order;
1470 struct drm_buf_entry *entry;
1471
1472 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1473 return -EINVAL;
1474
1475 if (!dma)
1476 return -EINVAL;
1477
1478 DRM_DEBUG("%d, %d, %d\n",
1479 request->size, request->low_mark, request->high_mark);
1480 order = drm_order(request->size);
1481 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1482 return -EINVAL;
1483 entry = &dma->bufs[order];
1484
1485 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1486 return -EINVAL;
1487 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1488 return -EINVAL;
1489
1490 entry->freelist.low_mark = request->low_mark;
1491 entry->freelist.high_mark = request->high_mark;
1492
1493 return 0;
1494 }
1495
1496 /**
1497 * Unreserve the buffers in list, previously reserved using drmDMA.
1498 *
1499 * \param inode device inode.
1500 * \param file_priv DRM file private.
1501 * \param cmd command.
1502 * \param arg pointer to a drm_buf_free structure.
1503 * \return zero on success or a negative number on failure.
1504 *
1505 * Calls free_buffer() for each used buffer.
1506 * This function is primarily used for debugging.
1507 */
1508 int drm_freebufs(struct drm_device *dev, void *data,
1509 struct drm_file *file_priv)
1510 {
1511 struct drm_device_dma *dma = dev->dma;
1512 struct drm_buf_free *request = data;
1513 int i;
1514 int idx;
1515 struct drm_buf *buf;
1516
1517 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1518 return -EINVAL;
1519
1520 if (!dma)
1521 return -EINVAL;
1522
1523 DRM_DEBUG("%d\n", request->count);
1524 for (i = 0; i < request->count; i++) {
1525 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1526 return -EFAULT;
1527 if (idx < 0 || idx >= dma->buf_count) {
1528 DRM_ERROR("Index %d (of %d max)\n",
1529 idx, dma->buf_count - 1);
1530 return -EINVAL;
1531 }
1532 buf = dma->buflist[idx];
1533 if (buf->file_priv != file_priv) {
1534 DRM_ERROR("Process %d freeing buffer not owned\n",
1535 task_pid_nr(current));
1536 return -EINVAL;
1537 }
1538 drm_free_buffer(dev, buf);
1539 }
1540
1541 return 0;
1542 }
1543
1544 /**
1545 * Maps all of the DMA buffers into client-virtual space (ioctl).
1546 *
1547 * \param inode device inode.
1548 * \param file_priv DRM file private.
1549 * \param cmd command.
1550 * \param arg pointer to a drm_buf_map structure.
1551 * \return zero on success or a negative number on failure.
1552 *
1553 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1554 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1555 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1556 * drm_mmap_dma().
1557 */
1558 int drm_mapbufs(struct drm_device *dev, void *data,
1559 struct drm_file *file_priv)
1560 {
1561 struct drm_device_dma *dma = dev->dma;
1562 int retcode = 0;
1563 const int zero = 0;
1564 unsigned long virtual;
1565 unsigned long address;
1566 struct drm_buf_map *request = data;
1567 int i;
1568
1569 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1570 return -EINVAL;
1571
1572 if (!dma)
1573 return -EINVAL;
1574
1575 spin_lock(&dev->count_lock);
1576 if (atomic_read(&dev->buf_alloc)) {
1577 spin_unlock(&dev->count_lock);
1578 return -EBUSY;
1579 }
1580 dev->buf_use++; /* Can't allocate more after this call */
1581 spin_unlock(&dev->count_lock);
1582
1583 if (request->count >= dma->buf_count) {
1584 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1585 || (drm_core_check_feature(dev, DRIVER_SG)
1586 && (dma->flags & _DRM_DMA_USE_SG))
1587 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1588 && (dma->flags & _DRM_DMA_USE_FB))) {
1589 struct drm_local_map *map = dev->agp_buffer_map;
1590 unsigned long token = dev->agp_buffer_token;
1591
1592 if (!map) {
1593 retcode = -EINVAL;
1594 goto done;
1595 }
1596 down_write(&current->mm->mmap_sem);
1597 virtual = do_mmap(file_priv->filp, 0, map->size,
1598 PROT_READ | PROT_WRITE,
1599 MAP_SHARED,
1600 token);
1601 up_write(&current->mm->mmap_sem);
1602 } else {
1603 down_write(&current->mm->mmap_sem);
1604 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1605 PROT_READ | PROT_WRITE,
1606 MAP_SHARED, 0);
1607 up_write(&current->mm->mmap_sem);
1608 }
1609 if (virtual > -1024UL) {
1610 /* Real error */
1611 retcode = (signed long)virtual;
1612 goto done;
1613 }
1614 request->virtual = (void __user *)virtual;
1615
1616 for (i = 0; i < dma->buf_count; i++) {
1617 if (copy_to_user(&request->list[i].idx,
1618 &dma->buflist[i]->idx,
1619 sizeof(request->list[0].idx))) {
1620 retcode = -EFAULT;
1621 goto done;
1622 }
1623 if (copy_to_user(&request->list[i].total,
1624 &dma->buflist[i]->total,
1625 sizeof(request->list[0].total))) {
1626 retcode = -EFAULT;
1627 goto done;
1628 }
1629 if (copy_to_user(&request->list[i].used,
1630 &zero, sizeof(zero))) {
1631 retcode = -EFAULT;
1632 goto done;
1633 }
1634 address = virtual + dma->buflist[i]->offset; /* *** */
1635 if (copy_to_user(&request->list[i].address,
1636 &address, sizeof(address))) {
1637 retcode = -EFAULT;
1638 goto done;
1639 }
1640 }
1641 }
1642 done:
1643 request->count = dma->buf_count;
1644 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1645
1646 return retcode;
1647 }
1648
1649 /**
1650 * Compute size order. Returns the exponent of the smaller power of two which
1651 * is greater or equal to given number.
1652 *
1653 * \param size size.
1654 * \return order.
1655 *
1656 * \todo Can be made faster.
1657 */
1658 int drm_order(unsigned long size)
1659 {
1660 int order;
1661 unsigned long tmp;
1662
1663 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1664
1665 if (size & (size - 1))
1666 ++order;
1667
1668 return order;
1669 }
1670 EXPORT_SYMBOL(drm_order);
This page took 0.230626 seconds and 4 git commands to generate.