3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/efi.h>
41 static void drm_vm_open(struct vm_area_struct
*vma
);
42 static void drm_vm_close(struct vm_area_struct
*vma
);
45 * \c nopage method for AGP virtual memory.
47 * \param vma virtual memory area.
48 * \param address access address.
49 * \return pointer to the page structure.
51 * Find the right map and if it's AGP memory find the real physical page to
52 * map, get the page, increment the use count and return it.
55 static __inline__
struct page
*drm_do_vm_nopage(struct vm_area_struct
*vma
,
56 unsigned long address
)
58 drm_file_t
*priv
= vma
->vm_file
->private_data
;
59 drm_device_t
*dev
= priv
->head
->dev
;
60 drm_map_t
*map
= NULL
;
61 drm_map_list_t
*r_list
;
62 struct list_head
*list
;
67 if (!drm_core_has_AGP(dev
))
70 if(!dev
->agp
|| !dev
->agp
->cant_use_aperture
) goto vm_nopage_error
;
72 list_for_each(list
, &dev
->maplist
->head
) {
73 r_list
= list_entry(list
, drm_map_list_t
, head
);
76 if (r_list
->user_token
== VM_OFFSET(vma
))
80 if (map
&& map
->type
== _DRM_AGP
) {
81 unsigned long offset
= address
- vma
->vm_start
;
82 unsigned long baddr
= map
->offset
+ offset
;
83 struct drm_agp_mem
*agpmem
;
88 * Adjust to a bus-relative address
90 baddr
-= dev
->hose
->mem_space
->start
;
94 * It's AGP memory - find the real physical page to map
96 for(agpmem
= dev
->agp
->memory
; agpmem
; agpmem
= agpmem
->next
) {
97 if (agpmem
->bound
<= baddr
&&
98 agpmem
->bound
+ agpmem
->pages
* PAGE_SIZE
> baddr
)
102 if (!agpmem
) goto vm_nopage_error
;
105 * Get the page, inc the use count, and return it
107 offset
= (baddr
- agpmem
->bound
) >> PAGE_SHIFT
;
108 page
= virt_to_page(__va(agpmem
->memory
->memory
[offset
]));
111 DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
112 baddr
, __va(agpmem
->memory
->memory
[offset
]), offset
,
118 return NOPAGE_SIGBUS
; /* Disallow mremap */
120 #else /* __OS_HAS_AGP */
121 static __inline__
struct page
*drm_do_vm_nopage(struct vm_area_struct
*vma
,
122 unsigned long address
)
124 return NOPAGE_SIGBUS
;
126 #endif /* __OS_HAS_AGP */
129 * \c nopage method for shared virtual memory.
131 * \param vma virtual memory area.
132 * \param address access address.
133 * \return pointer to the page structure.
135 * Get the the mapping, find the real physical page to map, get the page, and
138 static __inline__
struct page
*drm_do_vm_shm_nopage(struct vm_area_struct
*vma
,
139 unsigned long address
)
141 drm_map_t
*map
= (drm_map_t
*)vma
->vm_private_data
;
142 unsigned long offset
;
146 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
147 if (!map
) return NOPAGE_OOM
; /* Nothing allocated */
149 offset
= address
- vma
->vm_start
;
150 i
= (unsigned long)map
->handle
+ offset
;
151 page
= vmalloc_to_page((void *)i
);
156 DRM_DEBUG("shm_nopage 0x%lx\n", address
);
162 * \c close method for shared virtual memory.
164 * \param vma virtual memory area.
166 * Deletes map information if we are the last
167 * person to close a mapping and it's not in the global maplist.
169 static void drm_vm_shm_close(struct vm_area_struct
*vma
)
171 drm_file_t
*priv
= vma
->vm_file
->private_data
;
172 drm_device_t
*dev
= priv
->head
->dev
;
173 drm_vma_entry_t
*pt
, *prev
, *next
;
175 drm_map_list_t
*r_list
;
176 struct list_head
*list
;
179 DRM_DEBUG("0x%08lx,0x%08lx\n",
180 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
181 atomic_dec(&dev
->vma_count
);
183 map
= vma
->vm_private_data
;
185 down(&dev
->struct_sem
);
186 for (pt
= dev
->vmalist
, prev
= NULL
; pt
; pt
= next
) {
188 if (pt
->vma
->vm_private_data
== map
) found_maps
++;
189 if (pt
->vma
== vma
) {
191 prev
->next
= pt
->next
;
193 dev
->vmalist
= pt
->next
;
195 drm_free(pt
, sizeof(*pt
), DRM_MEM_VMAS
);
200 /* We were the only map that was found */
201 if(found_maps
== 1 &&
202 map
->flags
& _DRM_REMOVABLE
) {
203 /* Check to see if we are in the maplist, if we are not, then
204 * we delete this mappings information.
207 list
= &dev
->maplist
->head
;
208 list_for_each(list
, &dev
->maplist
->head
) {
209 r_list
= list_entry(list
, drm_map_list_t
, head
);
210 if (r_list
->map
== map
) found_maps
++;
214 drm_dma_handle_t dmah
;
218 case _DRM_FRAME_BUFFER
:
219 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
221 retcode
= mtrr_del(map
->mtrr
,
224 DRM_DEBUG("mtrr_del = %d\n", retcode
);
226 drm_ioremapfree(map
->handle
, map
->size
, dev
);
232 case _DRM_SCATTER_GATHER
:
234 case _DRM_CONSISTENT
:
235 dmah
.vaddr
= map
->handle
;
236 dmah
.busaddr
= map
->offset
;
237 dmah
.size
= map
->size
;
238 __drm_pci_free(dev
, &dmah
);
241 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
244 up(&dev
->struct_sem
);
248 * \c nopage method for DMA virtual memory.
250 * \param vma virtual memory area.
251 * \param address access address.
252 * \return pointer to the page structure.
254 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
256 static __inline__
struct page
*drm_do_vm_dma_nopage(struct vm_area_struct
*vma
,
257 unsigned long address
)
259 drm_file_t
*priv
= vma
->vm_file
->private_data
;
260 drm_device_t
*dev
= priv
->head
->dev
;
261 drm_device_dma_t
*dma
= dev
->dma
;
262 unsigned long offset
;
263 unsigned long page_nr
;
266 if (!dma
) return NOPAGE_SIGBUS
; /* Error */
267 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
268 if (!dma
->pagelist
) return NOPAGE_OOM
; /* Nothing allocated */
270 offset
= address
- vma
->vm_start
; /* vm_[pg]off[set] should be 0 */
271 page_nr
= offset
>> PAGE_SHIFT
;
272 page
= virt_to_page((dma
->pagelist
[page_nr
] +
273 (offset
& (~PAGE_MASK
))));
277 DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address
, page_nr
);
282 * \c nopage method for scatter-gather virtual memory.
284 * \param vma virtual memory area.
285 * \param address access address.
286 * \return pointer to the page structure.
288 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
290 static __inline__
struct page
*drm_do_vm_sg_nopage(struct vm_area_struct
*vma
,
291 unsigned long address
)
293 drm_map_t
*map
= (drm_map_t
*)vma
->vm_private_data
;
294 drm_file_t
*priv
= vma
->vm_file
->private_data
;
295 drm_device_t
*dev
= priv
->head
->dev
;
296 drm_sg_mem_t
*entry
= dev
->sg
;
297 unsigned long offset
;
298 unsigned long map_offset
;
299 unsigned long page_offset
;
302 if (!entry
) return NOPAGE_SIGBUS
; /* Error */
303 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
304 if (!entry
->pagelist
) return NOPAGE_OOM
; /* Nothing allocated */
307 offset
= address
- vma
->vm_start
;
308 map_offset
= map
->offset
- (unsigned long)dev
->sg
->virtual;
309 page_offset
= (offset
>> PAGE_SHIFT
) + (map_offset
>> PAGE_SHIFT
);
310 page
= entry
->pagelist
[page_offset
];
317 static struct page
*drm_vm_nopage(struct vm_area_struct
*vma
,
318 unsigned long address
,
320 if (type
) *type
= VM_FAULT_MINOR
;
321 return drm_do_vm_nopage(vma
, address
);
324 static struct page
*drm_vm_shm_nopage(struct vm_area_struct
*vma
,
325 unsigned long address
,
327 if (type
) *type
= VM_FAULT_MINOR
;
328 return drm_do_vm_shm_nopage(vma
, address
);
331 static struct page
*drm_vm_dma_nopage(struct vm_area_struct
*vma
,
332 unsigned long address
,
334 if (type
) *type
= VM_FAULT_MINOR
;
335 return drm_do_vm_dma_nopage(vma
, address
);
338 static struct page
*drm_vm_sg_nopage(struct vm_area_struct
*vma
,
339 unsigned long address
,
341 if (type
) *type
= VM_FAULT_MINOR
;
342 return drm_do_vm_sg_nopage(vma
, address
);
345 /** AGP virtual memory operations */
346 static struct vm_operations_struct drm_vm_ops
= {
347 .nopage
= drm_vm_nopage
,
349 .close
= drm_vm_close
,
352 /** Shared virtual memory operations */
353 static struct vm_operations_struct drm_vm_shm_ops
= {
354 .nopage
= drm_vm_shm_nopage
,
356 .close
= drm_vm_shm_close
,
359 /** DMA virtual memory operations */
360 static struct vm_operations_struct drm_vm_dma_ops
= {
361 .nopage
= drm_vm_dma_nopage
,
363 .close
= drm_vm_close
,
366 /** Scatter-gather virtual memory operations */
367 static struct vm_operations_struct drm_vm_sg_ops
= {
368 .nopage
= drm_vm_sg_nopage
,
370 .close
= drm_vm_close
,
375 * \c open method for shared virtual memory.
377 * \param vma virtual memory area.
379 * Create a new drm_vma_entry structure as the \p vma private data entry and
380 * add it to drm_device::vmalist.
382 static void drm_vm_open(struct vm_area_struct
*vma
)
384 drm_file_t
*priv
= vma
->vm_file
->private_data
;
385 drm_device_t
*dev
= priv
->head
->dev
;
386 drm_vma_entry_t
*vma_entry
;
388 DRM_DEBUG("0x%08lx,0x%08lx\n",
389 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
390 atomic_inc(&dev
->vma_count
);
392 vma_entry
= drm_alloc(sizeof(*vma_entry
), DRM_MEM_VMAS
);
394 down(&dev
->struct_sem
);
395 vma_entry
->vma
= vma
;
396 vma_entry
->next
= dev
->vmalist
;
397 vma_entry
->pid
= current
->pid
;
398 dev
->vmalist
= vma_entry
;
399 up(&dev
->struct_sem
);
404 * \c close method for all virtual memory types.
406 * \param vma virtual memory area.
408 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
411 static void drm_vm_close(struct vm_area_struct
*vma
)
413 drm_file_t
*priv
= vma
->vm_file
->private_data
;
414 drm_device_t
*dev
= priv
->head
->dev
;
415 drm_vma_entry_t
*pt
, *prev
;
417 DRM_DEBUG("0x%08lx,0x%08lx\n",
418 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
419 atomic_dec(&dev
->vma_count
);
421 down(&dev
->struct_sem
);
422 for (pt
= dev
->vmalist
, prev
= NULL
; pt
; prev
= pt
, pt
= pt
->next
) {
423 if (pt
->vma
== vma
) {
425 prev
->next
= pt
->next
;
427 dev
->vmalist
= pt
->next
;
429 drm_free(pt
, sizeof(*pt
), DRM_MEM_VMAS
);
433 up(&dev
->struct_sem
);
439 * \param filp file pointer.
440 * \param vma virtual memory area.
441 * \return zero on success or a negative number on failure.
443 * Sets the virtual memory area operations structure to vm_dma_ops, the file
444 * pointer, and calls vm_open().
446 static int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
448 drm_file_t
*priv
= filp
->private_data
;
450 drm_device_dma_t
*dma
;
451 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
454 dev
= priv
->head
->dev
;
456 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
457 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
459 /* Length must match exact page count */
460 if (!dma
|| (length
>> PAGE_SHIFT
) != dma
->page_count
) {
466 vma
->vm_ops
= &drm_vm_dma_ops
;
468 vma
->vm_flags
|= VM_RESERVED
; /* Don't swap */
470 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
475 unsigned long drm_core_get_map_ofs(drm_map_t
*map
)
479 EXPORT_SYMBOL(drm_core_get_map_ofs
);
481 unsigned long drm_core_get_reg_ofs(struct drm_device
*dev
)
484 return dev
->hose
->dense_mem_base
- dev
->hose
->mem_space
->start
;
489 EXPORT_SYMBOL(drm_core_get_reg_ofs
);
494 * \param filp file pointer.
495 * \param vma virtual memory area.
496 * \return zero on success or a negative number on failure.
498 * If the virtual memory area has no offset associated with it then it's a DMA
499 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
500 * checks that the restricted flag is not set, sets the virtual memory operations
501 * according to the mapping type and remaps the pages. Finally sets the file
502 * pointer and calls vm_open().
504 int drm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
506 drm_file_t
*priv
= filp
->private_data
;
507 drm_device_t
*dev
= priv
->head
->dev
;
508 drm_map_t
*map
= NULL
;
509 drm_map_list_t
*r_list
;
510 unsigned long offset
= 0;
511 struct list_head
*list
;
513 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
514 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
516 if ( !priv
->authenticated
) return -EACCES
;
518 /* We check for "dma". On Apple's UniNorth, it's valid to have
519 * the AGP mapped at physical address 0
524 && (!dev
->agp
|| dev
->agp
->agp_info
.device
->vendor
!= PCI_VENDOR_ID_APPLE
)
527 return drm_mmap_dma(filp
, vma
);
529 /* A sequential search of a linked list is
530 fine here because: 1) there will only be
531 about 5-10 entries in the list and, 2) a
532 DRI client only has to do this mapping
533 once, so it doesn't have to be optimized
534 for performance, even if the list was a
536 list_for_each(list
, &dev
->maplist
->head
) {
538 r_list
= list_entry(list
, drm_map_list_t
, head
);
541 if (r_list
->user_token
== VM_OFFSET(vma
))
545 if (!map
|| ((map
->flags
&_DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
548 /* Check for valid size. */
549 if (map
->size
!= vma
->vm_end
- vma
->vm_start
) return -EINVAL
;
551 if (!capable(CAP_SYS_ADMIN
) && (map
->flags
& _DRM_READ_ONLY
)) {
552 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
553 #if defined(__i386__) || defined(__x86_64__)
554 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
556 /* Ye gads this is ugly. With more thought
557 we could move this up higher and use
558 `protection_map' instead. */
559 vma
->vm_page_prot
= __pgprot(pte_val(pte_wrprotect(
560 __pte(pgprot_val(vma
->vm_page_prot
)))));
566 if (drm_core_has_AGP(dev
) && dev
->agp
->cant_use_aperture
) {
568 * On some platforms we can't talk to bus dma address from the CPU, so for
569 * memory of type DRM_AGP, we'll deal with sorting out the real physical
570 * pages and mappings in nopage()
572 #if defined(__powerpc__)
573 pgprot_val(vma
->vm_page_prot
) |= _PAGE_NO_CACHE
;
575 vma
->vm_ops
= &drm_vm_ops
;
578 /* fall through to _DRM_FRAME_BUFFER... */
579 case _DRM_FRAME_BUFFER
:
581 #if defined(__i386__) || defined(__x86_64__)
582 if (boot_cpu_data
.x86
> 3 && map
->type
!= _DRM_AGP
) {
583 pgprot_val(vma
->vm_page_prot
) |= _PAGE_PCD
;
584 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_PWT
;
586 #elif defined(__powerpc__)
587 pgprot_val(vma
->vm_page_prot
) |= _PAGE_NO_CACHE
;
588 if (map
->type
== _DRM_REGISTERS
)
589 pgprot_val(vma
->vm_page_prot
) |= _PAGE_GUARDED
;
591 vma
->vm_flags
|= VM_IO
; /* not in core dump */
592 #if defined(__ia64__)
593 if (efi_range_is_wc(vma
->vm_start
, vma
->vm_end
-
596 pgprot_writecombine(vma
->vm_page_prot
);
599 pgprot_noncached(vma
->vm_page_prot
);
601 offset
= dev
->driver
->get_reg_ofs(dev
);
603 if (io_remap_pfn_range(DRM_RPR_ARG(vma
) vma
->vm_start
,
604 (map
->offset
+ offset
) >> PAGE_SHIFT
,
605 vma
->vm_end
- vma
->vm_start
,
608 if (io_remap_pfn_range(vma
, vma
->vm_start
,
609 (map
->offset
+ offset
) >> PAGE_SHIFT
,
610 vma
->vm_end
- vma
->vm_start
,
614 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
617 vma
->vm_start
, vma
->vm_end
, map
->offset
+ offset
);
618 vma
->vm_ops
= &drm_vm_ops
;
621 case _DRM_CONSISTENT
:
622 /* Consistent memory is really like shared memory. It's only
623 * allocate in a different way */
624 vma
->vm_ops
= &drm_vm_shm_ops
;
625 vma
->vm_private_data
= (void *)map
;
626 /* Don't let this area swap. Change when
627 DRM_KERNEL advisory is supported. */
628 vma
->vm_flags
|= VM_RESERVED
;
630 case _DRM_SCATTER_GATHER
:
631 vma
->vm_ops
= &drm_vm_sg_ops
;
632 vma
->vm_private_data
= (void *)map
;
633 vma
->vm_flags
|= VM_RESERVED
;
636 return -EINVAL
; /* This should never happen. */
638 vma
->vm_flags
|= VM_RESERVED
; /* Don't swap */
640 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
644 EXPORT_SYMBOL(drm_mmap
);