3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
37 #include <linux/export.h>
38 #include <linux/seq_file.h>
40 #include <linux/efi.h>
41 #include <linux/slab.h>
43 #include <asm/pgtable.h>
44 #include "drm_legacy.h"
46 struct drm_vma_entry
{
47 struct list_head head
;
48 struct vm_area_struct
*vma
;
52 static void drm_vm_open(struct vm_area_struct
*vma
);
53 static void drm_vm_close(struct vm_area_struct
*vma
);
55 static pgprot_t
drm_io_prot(struct drm_local_map
*map
,
56 struct vm_area_struct
*vma
)
58 pgprot_t tmp
= vm_get_page_prot(vma
->vm_flags
);
60 #if defined(__i386__) || defined(__x86_64__)
61 if (map
->type
== _DRM_REGISTERS
&& !(map
->flags
& _DRM_WRITE_COMBINING
))
62 tmp
= pgprot_noncached(tmp
);
64 tmp
= pgprot_writecombine(tmp
);
65 #elif defined(__powerpc__)
66 pgprot_val(tmp
) |= _PAGE_NO_CACHE
;
67 if (map
->type
== _DRM_REGISTERS
)
68 pgprot_val(tmp
) |= _PAGE_GUARDED
;
69 #elif defined(__ia64__)
70 if (efi_range_is_wc(vma
->vm_start
, vma
->vm_end
-
72 tmp
= pgprot_writecombine(tmp
);
74 tmp
= pgprot_noncached(tmp
);
75 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
76 tmp
= pgprot_noncached(tmp
);
81 static pgprot_t
drm_dma_prot(uint32_t map_type
, struct vm_area_struct
*vma
)
83 pgprot_t tmp
= vm_get_page_prot(vma
->vm_flags
);
85 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
86 tmp
|= _PAGE_NO_CACHE
;
92 * \c fault method for AGP virtual memory.
94 * \param vma virtual memory area.
95 * \param address access address.
96 * \return pointer to the page structure.
98 * Find the right map and if it's AGP memory find the real physical page to
99 * map, get the page, increment the use count and return it.
102 static int drm_do_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
104 struct drm_file
*priv
= vma
->vm_file
->private_data
;
105 struct drm_device
*dev
= priv
->minor
->dev
;
106 struct drm_local_map
*map
= NULL
;
107 struct drm_map_list
*r_list
;
108 struct drm_hash_item
*hash
;
116 if (!dev
->agp
|| !dev
->agp
->cant_use_aperture
)
119 if (drm_ht_find_item(&dev
->map_hash
, vma
->vm_pgoff
, &hash
))
122 r_list
= drm_hash_entry(hash
, struct drm_map_list
, hash
);
125 if (map
&& map
->type
== _DRM_AGP
) {
127 * Using vm_pgoff as a selector forces us to use this unusual
130 resource_size_t offset
= (unsigned long)vmf
->virtual_address
-
132 resource_size_t baddr
= map
->offset
+ offset
;
133 struct drm_agp_mem
*agpmem
;
138 * Adjust to a bus-relative address
140 baddr
-= dev
->hose
->mem_space
->start
;
144 * It's AGP memory - find the real physical page to map
146 list_for_each_entry(agpmem
, &dev
->agp
->memory
, head
) {
147 if (agpmem
->bound
<= baddr
&&
148 agpmem
->bound
+ agpmem
->pages
* PAGE_SIZE
> baddr
)
152 if (&agpmem
->head
== &dev
->agp
->memory
)
156 * Get the page, inc the use count, and return it
158 offset
= (baddr
- agpmem
->bound
) >> PAGE_SHIFT
;
159 page
= agpmem
->memory
->pages
[offset
];
164 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
165 (unsigned long long)baddr
,
166 agpmem
->memory
->pages
[offset
],
167 (unsigned long long)offset
,
172 return VM_FAULT_SIGBUS
; /* Disallow mremap */
174 #else /* __OS_HAS_AGP */
175 static int drm_do_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
177 return VM_FAULT_SIGBUS
;
179 #endif /* __OS_HAS_AGP */
182 * \c nopage method for shared virtual memory.
184 * \param vma virtual memory area.
185 * \param address access address.
186 * \return pointer to the page structure.
188 * Get the mapping, find the real physical page to map, get the page, and
191 static int drm_do_vm_shm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
193 struct drm_local_map
*map
= vma
->vm_private_data
;
194 unsigned long offset
;
199 return VM_FAULT_SIGBUS
; /* Nothing allocated */
201 offset
= (unsigned long)vmf
->virtual_address
- vma
->vm_start
;
202 i
= (unsigned long)map
->handle
+ offset
;
203 page
= vmalloc_to_page((void *)i
);
205 return VM_FAULT_SIGBUS
;
209 DRM_DEBUG("shm_fault 0x%lx\n", offset
);
214 * \c close method for shared virtual memory.
216 * \param vma virtual memory area.
218 * Deletes map information if we are the last
219 * person to close a mapping and it's not in the global maplist.
221 static void drm_vm_shm_close(struct vm_area_struct
*vma
)
223 struct drm_file
*priv
= vma
->vm_file
->private_data
;
224 struct drm_device
*dev
= priv
->minor
->dev
;
225 struct drm_vma_entry
*pt
, *temp
;
226 struct drm_local_map
*map
;
227 struct drm_map_list
*r_list
;
230 DRM_DEBUG("0x%08lx,0x%08lx\n",
231 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
233 map
= vma
->vm_private_data
;
235 mutex_lock(&dev
->struct_mutex
);
236 list_for_each_entry_safe(pt
, temp
, &dev
->vmalist
, head
) {
237 if (pt
->vma
->vm_private_data
== map
)
239 if (pt
->vma
== vma
) {
245 /* We were the only map that was found */
246 if (found_maps
== 1 && map
->flags
& _DRM_REMOVABLE
) {
247 /* Check to see if we are in the maplist, if we are not, then
248 * we delete this mappings information.
251 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
252 if (r_list
->map
== map
)
257 drm_dma_handle_t dmah
;
261 case _DRM_FRAME_BUFFER
:
262 arch_phys_wc_del(map
->mtrr
);
263 iounmap(map
->handle
);
269 case _DRM_SCATTER_GATHER
:
271 case _DRM_CONSISTENT
:
272 dmah
.vaddr
= map
->handle
;
273 dmah
.busaddr
= map
->offset
;
274 dmah
.size
= map
->size
;
275 __drm_pci_free(dev
, &dmah
);
281 mutex_unlock(&dev
->struct_mutex
);
285 * \c fault method for DMA virtual memory.
287 * \param vma virtual memory area.
288 * \param address access address.
289 * \return pointer to the page structure.
291 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
293 static int drm_do_vm_dma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
295 struct drm_file
*priv
= vma
->vm_file
->private_data
;
296 struct drm_device
*dev
= priv
->minor
->dev
;
297 struct drm_device_dma
*dma
= dev
->dma
;
298 unsigned long offset
;
299 unsigned long page_nr
;
303 return VM_FAULT_SIGBUS
; /* Error */
305 return VM_FAULT_SIGBUS
; /* Nothing allocated */
307 offset
= (unsigned long)vmf
->virtual_address
- vma
->vm_start
; /* vm_[pg]off[set] should be 0 */
308 page_nr
= offset
>> PAGE_SHIFT
; /* page_nr could just be vmf->pgoff */
309 page
= virt_to_page((void *)dma
->pagelist
[page_nr
]);
314 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset
, page_nr
);
319 * \c fault method for scatter-gather virtual memory.
321 * \param vma virtual memory area.
322 * \param address access address.
323 * \return pointer to the page structure.
325 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
327 static int drm_do_vm_sg_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
329 struct drm_local_map
*map
= vma
->vm_private_data
;
330 struct drm_file
*priv
= vma
->vm_file
->private_data
;
331 struct drm_device
*dev
= priv
->minor
->dev
;
332 struct drm_sg_mem
*entry
= dev
->sg
;
333 unsigned long offset
;
334 unsigned long map_offset
;
335 unsigned long page_offset
;
339 return VM_FAULT_SIGBUS
; /* Error */
340 if (!entry
->pagelist
)
341 return VM_FAULT_SIGBUS
; /* Nothing allocated */
343 offset
= (unsigned long)vmf
->virtual_address
- vma
->vm_start
;
344 map_offset
= map
->offset
- (unsigned long)dev
->sg
->virtual;
345 page_offset
= (offset
>> PAGE_SHIFT
) + (map_offset
>> PAGE_SHIFT
);
346 page
= entry
->pagelist
[page_offset
];
353 static int drm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
355 return drm_do_vm_fault(vma
, vmf
);
358 static int drm_vm_shm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
360 return drm_do_vm_shm_fault(vma
, vmf
);
363 static int drm_vm_dma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
365 return drm_do_vm_dma_fault(vma
, vmf
);
368 static int drm_vm_sg_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
370 return drm_do_vm_sg_fault(vma
, vmf
);
373 /** AGP virtual memory operations */
374 static const struct vm_operations_struct drm_vm_ops
= {
375 .fault
= drm_vm_fault
,
377 .close
= drm_vm_close
,
380 /** Shared virtual memory operations */
381 static const struct vm_operations_struct drm_vm_shm_ops
= {
382 .fault
= drm_vm_shm_fault
,
384 .close
= drm_vm_shm_close
,
387 /** DMA virtual memory operations */
388 static const struct vm_operations_struct drm_vm_dma_ops
= {
389 .fault
= drm_vm_dma_fault
,
391 .close
= drm_vm_close
,
394 /** Scatter-gather virtual memory operations */
395 static const struct vm_operations_struct drm_vm_sg_ops
= {
396 .fault
= drm_vm_sg_fault
,
398 .close
= drm_vm_close
,
402 * \c open method for shared virtual memory.
404 * \param vma virtual memory area.
406 * Create a new drm_vma_entry structure as the \p vma private data entry and
407 * add it to drm_device::vmalist.
409 void drm_vm_open_locked(struct drm_device
*dev
,
410 struct vm_area_struct
*vma
)
412 struct drm_vma_entry
*vma_entry
;
414 DRM_DEBUG("0x%08lx,0x%08lx\n",
415 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
417 vma_entry
= kmalloc(sizeof(*vma_entry
), GFP_KERNEL
);
419 vma_entry
->vma
= vma
;
420 vma_entry
->pid
= current
->pid
;
421 list_add(&vma_entry
->head
, &dev
->vmalist
);
424 EXPORT_SYMBOL_GPL(drm_vm_open_locked
);
426 static void drm_vm_open(struct vm_area_struct
*vma
)
428 struct drm_file
*priv
= vma
->vm_file
->private_data
;
429 struct drm_device
*dev
= priv
->minor
->dev
;
431 mutex_lock(&dev
->struct_mutex
);
432 drm_vm_open_locked(dev
, vma
);
433 mutex_unlock(&dev
->struct_mutex
);
436 void drm_vm_close_locked(struct drm_device
*dev
,
437 struct vm_area_struct
*vma
)
439 struct drm_vma_entry
*pt
, *temp
;
441 DRM_DEBUG("0x%08lx,0x%08lx\n",
442 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
444 list_for_each_entry_safe(pt
, temp
, &dev
->vmalist
, head
) {
445 if (pt
->vma
== vma
) {
454 * \c close method for all virtual memory types.
456 * \param vma virtual memory area.
458 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
461 static void drm_vm_close(struct vm_area_struct
*vma
)
463 struct drm_file
*priv
= vma
->vm_file
->private_data
;
464 struct drm_device
*dev
= priv
->minor
->dev
;
466 mutex_lock(&dev
->struct_mutex
);
467 drm_vm_close_locked(dev
, vma
);
468 mutex_unlock(&dev
->struct_mutex
);
474 * \param file_priv DRM file private.
475 * \param vma virtual memory area.
476 * \return zero on success or a negative number on failure.
478 * Sets the virtual memory area operations structure to vm_dma_ops, the file
479 * pointer, and calls vm_open().
481 static int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
483 struct drm_file
*priv
= filp
->private_data
;
484 struct drm_device
*dev
;
485 struct drm_device_dma
*dma
;
486 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
488 dev
= priv
->minor
->dev
;
490 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
491 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
);
493 /* Length must match exact page count */
494 if (!dma
|| (length
>> PAGE_SHIFT
) != dma
->page_count
) {
498 if (!capable(CAP_SYS_ADMIN
) &&
499 (dma
->flags
& _DRM_DMA_USE_PCI_RO
)) {
500 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
501 #if defined(__i386__) || defined(__x86_64__)
502 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
504 /* Ye gads this is ugly. With more thought
505 we could move this up higher and use
506 `protection_map' instead. */
510 (__pte(pgprot_val(vma
->vm_page_prot
)))));
514 vma
->vm_ops
= &drm_vm_dma_ops
;
516 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
518 drm_vm_open_locked(dev
, vma
);
522 static resource_size_t
drm_core_get_reg_ofs(struct drm_device
*dev
)
525 return dev
->hose
->dense_mem_base
;
534 * \param file_priv DRM file private.
535 * \param vma virtual memory area.
536 * \return zero on success or a negative number on failure.
538 * If the virtual memory area has no offset associated with it then it's a DMA
539 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
540 * checks that the restricted flag is not set, sets the virtual memory operations
541 * according to the mapping type and remaps the pages. Finally sets the file
542 * pointer and calls vm_open().
544 int drm_mmap_locked(struct file
*filp
, struct vm_area_struct
*vma
)
546 struct drm_file
*priv
= filp
->private_data
;
547 struct drm_device
*dev
= priv
->minor
->dev
;
548 struct drm_local_map
*map
= NULL
;
549 resource_size_t offset
= 0;
550 struct drm_hash_item
*hash
;
552 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
553 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
);
555 if (!priv
->authenticated
)
558 /* We check for "dma". On Apple's UniNorth, it's valid to have
559 * the AGP mapped at physical address 0
565 || dev
->agp
->agp_info
.device
->vendor
!= PCI_VENDOR_ID_APPLE
)
568 return drm_mmap_dma(filp
, vma
);
570 if (drm_ht_find_item(&dev
->map_hash
, vma
->vm_pgoff
, &hash
)) {
571 DRM_ERROR("Could not find map\n");
575 map
= drm_hash_entry(hash
, struct drm_map_list
, hash
)->map
;
576 if (!map
|| ((map
->flags
& _DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
579 /* Check for valid size. */
580 if (map
->size
< vma
->vm_end
- vma
->vm_start
)
583 if (!capable(CAP_SYS_ADMIN
) && (map
->flags
& _DRM_READ_ONLY
)) {
584 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
585 #if defined(__i386__) || defined(__x86_64__)
586 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
588 /* Ye gads this is ugly. With more thought
589 we could move this up higher and use
590 `protection_map' instead. */
594 (__pte(pgprot_val(vma
->vm_page_prot
)))));
599 #if !defined(__arm__)
601 if (dev
->agp
&& dev
->agp
->cant_use_aperture
) {
603 * On some platforms we can't talk to bus dma address from the CPU, so for
604 * memory of type DRM_AGP, we'll deal with sorting out the real physical
605 * pages and mappings in fault()
607 #if defined(__powerpc__)
608 pgprot_val(vma
->vm_page_prot
) |= _PAGE_NO_CACHE
;
610 vma
->vm_ops
= &drm_vm_ops
;
613 /* fall through to _DRM_FRAME_BUFFER... */
615 case _DRM_FRAME_BUFFER
:
617 offset
= drm_core_get_reg_ofs(dev
);
618 vma
->vm_page_prot
= drm_io_prot(map
, vma
);
619 if (io_remap_pfn_range(vma
, vma
->vm_start
,
620 (map
->offset
+ offset
) >> PAGE_SHIFT
,
621 vma
->vm_end
- vma
->vm_start
,
624 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
625 " offset = 0x%llx\n",
627 vma
->vm_start
, vma
->vm_end
, (unsigned long long)(map
->offset
+ offset
));
629 vma
->vm_ops
= &drm_vm_ops
;
631 case _DRM_CONSISTENT
:
632 /* Consistent memory is really like shared memory. But
633 * it's allocated in a different way, so avoid fault */
634 if (remap_pfn_range(vma
, vma
->vm_start
,
635 page_to_pfn(virt_to_page(map
->handle
)),
636 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
638 vma
->vm_page_prot
= drm_dma_prot(map
->type
, vma
);
639 /* fall through to _DRM_SHM */
641 vma
->vm_ops
= &drm_vm_shm_ops
;
642 vma
->vm_private_data
= (void *)map
;
644 case _DRM_SCATTER_GATHER
:
645 vma
->vm_ops
= &drm_vm_sg_ops
;
646 vma
->vm_private_data
= (void *)map
;
647 vma
->vm_page_prot
= drm_dma_prot(map
->type
, vma
);
650 return -EINVAL
; /* This should never happen. */
652 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
654 drm_vm_open_locked(dev
, vma
);
658 int drm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
660 struct drm_file
*priv
= filp
->private_data
;
661 struct drm_device
*dev
= priv
->minor
->dev
;
664 if (drm_device_is_unplugged(dev
))
667 mutex_lock(&dev
->struct_mutex
);
668 ret
= drm_mmap_locked(filp
, vma
);
669 mutex_unlock(&dev
->struct_mutex
);
673 EXPORT_SYMBOL(drm_mmap
);
675 void drm_legacy_vma_flush(struct drm_device
*dev
)
677 struct drm_vma_entry
*vma
, *vma_temp
;
679 /* Clear vma list (only needed for legacy drivers) */
680 list_for_each_entry_safe(vma
, vma_temp
, &dev
->vmalist
, head
) {
681 list_del(&vma
->head
);
688 int drm_vma_info(struct seq_file
*m
, void *data
)
690 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
691 struct drm_device
*dev
= node
->minor
->dev
;
692 struct drm_vma_entry
*pt
;
693 struct vm_area_struct
*vma
;
694 unsigned long vma_count
= 0;
695 #if defined(__i386__)
699 mutex_lock(&dev
->struct_mutex
);
700 list_for_each_entry(pt
, &dev
->vmalist
, head
)
703 seq_printf(m
, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
704 vma_count
, high_memory
,
705 (void *)(unsigned long)virt_to_phys(high_memory
));
707 list_for_each_entry(pt
, &dev
->vmalist
, head
) {
712 "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
714 (void *)vma
->vm_start
, (void *)vma
->vm_end
,
715 vma
->vm_flags
& VM_READ
? 'r' : '-',
716 vma
->vm_flags
& VM_WRITE
? 'w' : '-',
717 vma
->vm_flags
& VM_EXEC
? 'x' : '-',
718 vma
->vm_flags
& VM_MAYSHARE
? 's' : 'p',
719 vma
->vm_flags
& VM_LOCKED
? 'l' : '-',
720 vma
->vm_flags
& VM_IO
? 'i' : '-',
723 #if defined(__i386__)
724 pgprot
= pgprot_val(vma
->vm_page_prot
);
725 seq_printf(m
, " %c%c%c%c%c%c%c%c%c",
726 pgprot
& _PAGE_PRESENT
? 'p' : '-',
727 pgprot
& _PAGE_RW
? 'w' : 'r',
728 pgprot
& _PAGE_USER
? 'u' : 's',
729 pgprot
& _PAGE_PWT
? 't' : 'b',
730 pgprot
& _PAGE_PCD
? 'u' : 'c',
731 pgprot
& _PAGE_ACCESSED
? 'a' : '-',
732 pgprot
& _PAGE_DIRTY
? 'd' : '-',
733 pgprot
& _PAGE_PSE
? 'm' : 'k',
734 pgprot
& _PAGE_GLOBAL
? 'g' : 'l');
738 mutex_unlock(&dev
->struct_mutex
);