3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
37 #include <linux/export.h>
39 #include <linux/efi.h>
40 #include <linux/slab.h>
43 static void drm_vm_open(struct vm_area_struct
*vma
);
44 static void drm_vm_close(struct vm_area_struct
*vma
);
46 static pgprot_t
drm_io_prot(struct drm_local_map
*map
,
47 struct vm_area_struct
*vma
)
49 pgprot_t tmp
= vm_get_page_prot(vma
->vm_flags
);
51 #if defined(__i386__) || defined(__x86_64__)
52 if (map
->type
!= _DRM_AGP
) {
53 if (map
->type
== _DRM_FRAME_BUFFER
||
54 map
->flags
& _DRM_WRITE_COMBINING
)
55 tmp
= pgprot_writecombine(tmp
);
57 tmp
= pgprot_noncached(tmp
);
59 #elif defined(__powerpc__)
60 pgprot_val(tmp
) |= _PAGE_NO_CACHE
;
61 if (map
->type
== _DRM_REGISTERS
)
62 pgprot_val(tmp
) |= _PAGE_GUARDED
;
63 #elif defined(__ia64__)
64 if (efi_range_is_wc(vma
->vm_start
, vma
->vm_end
-
66 tmp
= pgprot_writecombine(tmp
);
68 tmp
= pgprot_noncached(tmp
);
69 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
70 tmp
= pgprot_noncached(tmp
);
75 static pgprot_t
drm_dma_prot(uint32_t map_type
, struct vm_area_struct
*vma
)
77 pgprot_t tmp
= vm_get_page_prot(vma
->vm_flags
);
79 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
80 tmp
|= _PAGE_NO_CACHE
;
86 * \c fault method for AGP virtual memory.
88 * \param vma virtual memory area.
89 * \param address access address.
90 * \return pointer to the page structure.
92 * Find the right map and if it's AGP memory find the real physical page to
93 * map, get the page, increment the use count and return it.
96 static int drm_do_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
98 struct drm_file
*priv
= vma
->vm_file
->private_data
;
99 struct drm_device
*dev
= priv
->minor
->dev
;
100 struct drm_local_map
*map
= NULL
;
101 struct drm_map_list
*r_list
;
102 struct drm_hash_item
*hash
;
107 if (!drm_core_has_AGP(dev
))
110 if (!dev
->agp
|| !dev
->agp
->cant_use_aperture
)
113 if (drm_ht_find_item(&dev
->map_hash
, vma
->vm_pgoff
, &hash
))
116 r_list
= drm_hash_entry(hash
, struct drm_map_list
, hash
);
119 if (map
&& map
->type
== _DRM_AGP
) {
121 * Using vm_pgoff as a selector forces us to use this unusual
124 resource_size_t offset
= (unsigned long)vmf
->virtual_address
-
126 resource_size_t baddr
= map
->offset
+ offset
;
127 struct drm_agp_mem
*agpmem
;
132 * Adjust to a bus-relative address
134 baddr
-= dev
->hose
->mem_space
->start
;
138 * It's AGP memory - find the real physical page to map
140 list_for_each_entry(agpmem
, &dev
->agp
->memory
, head
) {
141 if (agpmem
->bound
<= baddr
&&
142 agpmem
->bound
+ agpmem
->pages
* PAGE_SIZE
> baddr
)
146 if (&agpmem
->head
== &dev
->agp
->memory
)
150 * Get the page, inc the use count, and return it
152 offset
= (baddr
- agpmem
->bound
) >> PAGE_SHIFT
;
153 page
= agpmem
->memory
->pages
[offset
];
158 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
159 (unsigned long long)baddr
,
160 agpmem
->memory
->pages
[offset
],
161 (unsigned long long)offset
,
166 return VM_FAULT_SIGBUS
; /* Disallow mremap */
168 #else /* __OS_HAS_AGP */
169 static int drm_do_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
171 return VM_FAULT_SIGBUS
;
173 #endif /* __OS_HAS_AGP */
176 * \c nopage method for shared virtual memory.
178 * \param vma virtual memory area.
179 * \param address access address.
180 * \return pointer to the page structure.
182 * Get the mapping, find the real physical page to map, get the page, and
185 static int drm_do_vm_shm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
187 struct drm_local_map
*map
= vma
->vm_private_data
;
188 unsigned long offset
;
193 return VM_FAULT_SIGBUS
; /* Nothing allocated */
195 offset
= (unsigned long)vmf
->virtual_address
- vma
->vm_start
;
196 i
= (unsigned long)map
->handle
+ offset
;
197 page
= vmalloc_to_page((void *)i
);
199 return VM_FAULT_SIGBUS
;
203 DRM_DEBUG("shm_fault 0x%lx\n", offset
);
208 * \c close method for shared virtual memory.
210 * \param vma virtual memory area.
212 * Deletes map information if we are the last
213 * person to close a mapping and it's not in the global maplist.
215 static void drm_vm_shm_close(struct vm_area_struct
*vma
)
217 struct drm_file
*priv
= vma
->vm_file
->private_data
;
218 struct drm_device
*dev
= priv
->minor
->dev
;
219 struct drm_vma_entry
*pt
, *temp
;
220 struct drm_local_map
*map
;
221 struct drm_map_list
*r_list
;
224 DRM_DEBUG("0x%08lx,0x%08lx\n",
225 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
226 atomic_dec(&dev
->vma_count
);
228 map
= vma
->vm_private_data
;
230 mutex_lock(&dev
->struct_mutex
);
231 list_for_each_entry_safe(pt
, temp
, &dev
->vmalist
, head
) {
232 if (pt
->vma
->vm_private_data
== map
)
234 if (pt
->vma
== vma
) {
240 /* We were the only map that was found */
241 if (found_maps
== 1 && map
->flags
& _DRM_REMOVABLE
) {
242 /* Check to see if we are in the maplist, if we are not, then
243 * we delete this mappings information.
246 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
247 if (r_list
->map
== map
)
252 drm_dma_handle_t dmah
;
256 case _DRM_FRAME_BUFFER
:
257 if (drm_core_has_MTRR(dev
))
258 arch_phys_wc_del(map
->mtrr
);
259 iounmap(map
->handle
);
265 case _DRM_SCATTER_GATHER
:
267 case _DRM_CONSISTENT
:
268 dmah
.vaddr
= map
->handle
;
269 dmah
.busaddr
= map
->offset
;
270 dmah
.size
= map
->size
;
271 __drm_pci_free(dev
, &dmah
);
274 DRM_ERROR("tried to rmmap GEM object\n");
280 mutex_unlock(&dev
->struct_mutex
);
284 * \c fault method for DMA virtual memory.
286 * \param vma virtual memory area.
287 * \param address access address.
288 * \return pointer to the page structure.
290 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
292 static int drm_do_vm_dma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
294 struct drm_file
*priv
= vma
->vm_file
->private_data
;
295 struct drm_device
*dev
= priv
->minor
->dev
;
296 struct drm_device_dma
*dma
= dev
->dma
;
297 unsigned long offset
;
298 unsigned long page_nr
;
302 return VM_FAULT_SIGBUS
; /* Error */
304 return VM_FAULT_SIGBUS
; /* Nothing allocated */
306 offset
= (unsigned long)vmf
->virtual_address
- vma
->vm_start
; /* vm_[pg]off[set] should be 0 */
307 page_nr
= offset
>> PAGE_SHIFT
; /* page_nr could just be vmf->pgoff */
308 page
= virt_to_page((dma
->pagelist
[page_nr
] + (offset
& (~PAGE_MASK
))));
313 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset
, page_nr
);
318 * \c fault method for scatter-gather virtual memory.
320 * \param vma virtual memory area.
321 * \param address access address.
322 * \return pointer to the page structure.
324 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
326 static int drm_do_vm_sg_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
328 struct drm_local_map
*map
= vma
->vm_private_data
;
329 struct drm_file
*priv
= vma
->vm_file
->private_data
;
330 struct drm_device
*dev
= priv
->minor
->dev
;
331 struct drm_sg_mem
*entry
= dev
->sg
;
332 unsigned long offset
;
333 unsigned long map_offset
;
334 unsigned long page_offset
;
338 return VM_FAULT_SIGBUS
; /* Error */
339 if (!entry
->pagelist
)
340 return VM_FAULT_SIGBUS
; /* Nothing allocated */
342 offset
= (unsigned long)vmf
->virtual_address
- vma
->vm_start
;
343 map_offset
= map
->offset
- (unsigned long)dev
->sg
->virtual;
344 page_offset
= (offset
>> PAGE_SHIFT
) + (map_offset
>> PAGE_SHIFT
);
345 page
= entry
->pagelist
[page_offset
];
352 static int drm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
354 return drm_do_vm_fault(vma
, vmf
);
357 static int drm_vm_shm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
359 return drm_do_vm_shm_fault(vma
, vmf
);
362 static int drm_vm_dma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
364 return drm_do_vm_dma_fault(vma
, vmf
);
367 static int drm_vm_sg_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
369 return drm_do_vm_sg_fault(vma
, vmf
);
372 /** AGP virtual memory operations */
373 static const struct vm_operations_struct drm_vm_ops
= {
374 .fault
= drm_vm_fault
,
376 .close
= drm_vm_close
,
379 /** Shared virtual memory operations */
380 static const struct vm_operations_struct drm_vm_shm_ops
= {
381 .fault
= drm_vm_shm_fault
,
383 .close
= drm_vm_shm_close
,
386 /** DMA virtual memory operations */
387 static const struct vm_operations_struct drm_vm_dma_ops
= {
388 .fault
= drm_vm_dma_fault
,
390 .close
= drm_vm_close
,
393 /** Scatter-gather virtual memory operations */
394 static const struct vm_operations_struct drm_vm_sg_ops
= {
395 .fault
= drm_vm_sg_fault
,
397 .close
= drm_vm_close
,
401 * \c open method for shared virtual memory.
403 * \param vma virtual memory area.
405 * Create a new drm_vma_entry structure as the \p vma private data entry and
406 * add it to drm_device::vmalist.
408 void drm_vm_open_locked(struct drm_device
*dev
,
409 struct vm_area_struct
*vma
)
411 struct drm_vma_entry
*vma_entry
;
413 DRM_DEBUG("0x%08lx,0x%08lx\n",
414 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
415 atomic_inc(&dev
->vma_count
);
417 vma_entry
= kmalloc(sizeof(*vma_entry
), GFP_KERNEL
);
419 vma_entry
->vma
= vma
;
420 vma_entry
->pid
= current
->pid
;
421 list_add(&vma_entry
->head
, &dev
->vmalist
);
424 EXPORT_SYMBOL_GPL(drm_vm_open_locked
);
426 static void drm_vm_open(struct vm_area_struct
*vma
)
428 struct drm_file
*priv
= vma
->vm_file
->private_data
;
429 struct drm_device
*dev
= priv
->minor
->dev
;
431 mutex_lock(&dev
->struct_mutex
);
432 drm_vm_open_locked(dev
, vma
);
433 mutex_unlock(&dev
->struct_mutex
);
436 void drm_vm_close_locked(struct drm_device
*dev
,
437 struct vm_area_struct
*vma
)
439 struct drm_vma_entry
*pt
, *temp
;
441 DRM_DEBUG("0x%08lx,0x%08lx\n",
442 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
443 atomic_dec(&dev
->vma_count
);
445 list_for_each_entry_safe(pt
, temp
, &dev
->vmalist
, head
) {
446 if (pt
->vma
== vma
) {
455 * \c close method for all virtual memory types.
457 * \param vma virtual memory area.
459 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
462 static void drm_vm_close(struct vm_area_struct
*vma
)
464 struct drm_file
*priv
= vma
->vm_file
->private_data
;
465 struct drm_device
*dev
= priv
->minor
->dev
;
467 mutex_lock(&dev
->struct_mutex
);
468 drm_vm_close_locked(dev
, vma
);
469 mutex_unlock(&dev
->struct_mutex
);
475 * \param file_priv DRM file private.
476 * \param vma virtual memory area.
477 * \return zero on success or a negative number on failure.
479 * Sets the virtual memory area operations structure to vm_dma_ops, the file
480 * pointer, and calls vm_open().
482 static int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
484 struct drm_file
*priv
= filp
->private_data
;
485 struct drm_device
*dev
;
486 struct drm_device_dma
*dma
;
487 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
489 dev
= priv
->minor
->dev
;
491 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
492 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
);
494 /* Length must match exact page count */
495 if (!dma
|| (length
>> PAGE_SHIFT
) != dma
->page_count
) {
499 if (!capable(CAP_SYS_ADMIN
) &&
500 (dma
->flags
& _DRM_DMA_USE_PCI_RO
)) {
501 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
502 #if defined(__i386__) || defined(__x86_64__)
503 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
505 /* Ye gads this is ugly. With more thought
506 we could move this up higher and use
507 `protection_map' instead. */
511 (__pte(pgprot_val(vma
->vm_page_prot
)))));
515 vma
->vm_ops
= &drm_vm_dma_ops
;
517 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
519 drm_vm_open_locked(dev
, vma
);
523 static resource_size_t
drm_core_get_reg_ofs(struct drm_device
*dev
)
526 return dev
->hose
->dense_mem_base
;
535 * \param file_priv DRM file private.
536 * \param vma virtual memory area.
537 * \return zero on success or a negative number on failure.
539 * If the virtual memory area has no offset associated with it then it's a DMA
540 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
541 * checks that the restricted flag is not set, sets the virtual memory operations
542 * according to the mapping type and remaps the pages. Finally sets the file
543 * pointer and calls vm_open().
545 int drm_mmap_locked(struct file
*filp
, struct vm_area_struct
*vma
)
547 struct drm_file
*priv
= filp
->private_data
;
548 struct drm_device
*dev
= priv
->minor
->dev
;
549 struct drm_local_map
*map
= NULL
;
550 resource_size_t offset
= 0;
551 struct drm_hash_item
*hash
;
553 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
554 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
);
556 if (!priv
->authenticated
)
559 /* We check for "dma". On Apple's UniNorth, it's valid to have
560 * the AGP mapped at physical address 0
566 || dev
->agp
->agp_info
.device
->vendor
!= PCI_VENDOR_ID_APPLE
)
569 return drm_mmap_dma(filp
, vma
);
571 if (drm_ht_find_item(&dev
->map_hash
, vma
->vm_pgoff
, &hash
)) {
572 DRM_ERROR("Could not find map\n");
576 map
= drm_hash_entry(hash
, struct drm_map_list
, hash
)->map
;
577 if (!map
|| ((map
->flags
& _DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
580 /* Check for valid size. */
581 if (map
->size
< vma
->vm_end
- vma
->vm_start
)
584 if (!capable(CAP_SYS_ADMIN
) && (map
->flags
& _DRM_READ_ONLY
)) {
585 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
586 #if defined(__i386__) || defined(__x86_64__)
587 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
589 /* Ye gads this is ugly. With more thought
590 we could move this up higher and use
591 `protection_map' instead. */
595 (__pte(pgprot_val(vma
->vm_page_prot
)))));
600 #if !defined(__arm__)
602 if (drm_core_has_AGP(dev
) && dev
->agp
->cant_use_aperture
) {
604 * On some platforms we can't talk to bus dma address from the CPU, so for
605 * memory of type DRM_AGP, we'll deal with sorting out the real physical
606 * pages and mappings in fault()
608 #if defined(__powerpc__)
609 pgprot_val(vma
->vm_page_prot
) |= _PAGE_NO_CACHE
;
611 vma
->vm_ops
= &drm_vm_ops
;
614 /* fall through to _DRM_FRAME_BUFFER... */
616 case _DRM_FRAME_BUFFER
:
618 offset
= drm_core_get_reg_ofs(dev
);
619 vma
->vm_flags
|= VM_IO
; /* not in core dump */
620 vma
->vm_page_prot
= drm_io_prot(map
, vma
);
621 if (io_remap_pfn_range(vma
, vma
->vm_start
,
622 (map
->offset
+ offset
) >> PAGE_SHIFT
,
623 vma
->vm_end
- vma
->vm_start
,
626 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
627 " offset = 0x%llx\n",
629 vma
->vm_start
, vma
->vm_end
, (unsigned long long)(map
->offset
+ offset
));
631 vma
->vm_ops
= &drm_vm_ops
;
633 case _DRM_CONSISTENT
:
634 /* Consistent memory is really like shared memory. But
635 * it's allocated in a different way, so avoid fault */
636 if (remap_pfn_range(vma
, vma
->vm_start
,
637 page_to_pfn(virt_to_page(map
->handle
)),
638 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
640 vma
->vm_page_prot
= drm_dma_prot(map
->type
, vma
);
641 /* fall through to _DRM_SHM */
643 vma
->vm_ops
= &drm_vm_shm_ops
;
644 vma
->vm_private_data
= (void *)map
;
646 case _DRM_SCATTER_GATHER
:
647 vma
->vm_ops
= &drm_vm_sg_ops
;
648 vma
->vm_private_data
= (void *)map
;
649 vma
->vm_page_prot
= drm_dma_prot(map
->type
, vma
);
652 return -EINVAL
; /* This should never happen. */
654 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
656 drm_vm_open_locked(dev
, vma
);
660 int drm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
662 struct drm_file
*priv
= filp
->private_data
;
663 struct drm_device
*dev
= priv
->minor
->dev
;
666 if (drm_device_is_unplugged(dev
))
669 mutex_lock(&dev
->struct_mutex
);
670 ret
= drm_mmap_locked(filp
, vma
);
671 mutex_unlock(&dev
->struct_mutex
);
675 EXPORT_SYMBOL(drm_mmap
);