Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/mm/ioremap.c | |
3 | * | |
4 | * Re-map IO memory to kernel address space so that we can access it. | |
5 | * This is needed for high PCI addresses that aren't mapped in the | |
6 | * 640k-1MB IO memory area on PC's | |
7 | * | |
8 | * (C) Copyright 1995 1996 Linus Torvalds | |
b66c1a39 PM |
9 | * (C) Copyright 2005, 2006 Paul Mundt |
10 | * | |
11 | * This file is subject to the terms and conditions of the GNU General | |
12 | * Public License. See the file "COPYING" in the main directory of this | |
13 | * archive for more details. | |
1da177e4 | 14 | */ |
1da177e4 | 15 | #include <linux/vmalloc.h> |
b66c1a39 | 16 | #include <linux/module.h> |
1da177e4 | 17 | #include <linux/mm.h> |
a3e61d50 | 18 | #include <linux/pci.h> |
5b3e1a85 | 19 | #include <linux/io.h> |
1da177e4 LT |
20 | #include <asm/page.h> |
21 | #include <asm/pgalloc.h> | |
b66c1a39 | 22 | #include <asm/addrspace.h> |
1da177e4 LT |
23 | #include <asm/cacheflush.h> |
24 | #include <asm/tlbflush.h> | |
0fd14754 | 25 | #include <asm/mmu.h> |
1da177e4 | 26 | |
1da177e4 LT |
27 | /* |
28 | * Remap an arbitrary physical address space into the kernel virtual | |
29 | * address space. Needed when the kernel wants to access high addresses | |
30 | * directly. | |
31 | * | |
32 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
33 | * have to convert them into an offset in a page-aligned mapping, but the | |
34 | * caller shouldn't need to know that small detail. | |
35 | */ | |
b66c1a39 PM |
36 | void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, |
37 | unsigned long flags) | |
1da177e4 | 38 | { |
1da177e4 | 39 | struct vm_struct * area; |
b66c1a39 | 40 | unsigned long offset, last_addr, addr, orig_addr; |
5b3e1a85 | 41 | pgprot_t pgprot; |
1da177e4 LT |
42 | |
43 | /* Don't allow wraparound or zero size */ | |
44 | last_addr = phys_addr + size - 1; | |
45 | if (!size || last_addr < phys_addr) | |
46 | return NULL; | |
47 | ||
a3e61d50 | 48 | /* |
99f95f11 PM |
49 | * If we're in the fixed PCI memory range, mapping through page |
50 | * tables is not only pointless, but also fundamentally broken. | |
51 | * Just return the physical address instead. | |
a3e61d50 PM |
52 | * |
53 | * For boards that map a small PCI memory aperture somewhere in | |
54 | * P1/P2 space, ioremap() will already do the right thing, | |
55 | * and we'll never get this far. | |
56 | */ | |
99f95f11 | 57 | if (is_pci_memory_fixed_range(phys_addr, size)) |
a3e61d50 PM |
58 | return (void __iomem *)phys_addr; |
59 | ||
1da177e4 LT |
60 | /* |
61 | * Mappings have to be page-aligned | |
62 | */ | |
63 | offset = phys_addr & ~PAGE_MASK; | |
64 | phys_addr &= PAGE_MASK; | |
65 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | |
66 | ||
67 | /* | |
68 | * Ok, go for it.. | |
69 | */ | |
70 | area = get_vm_area(size, VM_IOREMAP); | |
71 | if (!area) | |
72 | return NULL; | |
73 | area->phys_addr = phys_addr; | |
b66c1a39 PM |
74 | orig_addr = addr = (unsigned long)area->addr; |
75 | ||
2f47f447 | 76 | #ifdef CONFIG_PMB |
b66c1a39 PM |
77 | /* |
78 | * First try to remap through the PMB once a valid VMA has been | |
79 | * established. Smaller allocations (or the rest of the size | |
80 | * remaining after a PMB mapping due to the size not being | |
81 | * perfectly aligned on a PMB size boundary) are then mapped | |
82 | * through the UTLB using conventional page tables. | |
83 | * | |
84 | * PMB entries are all pre-faulted. | |
85 | */ | |
2bea7ea7 | 86 | if (unlikely(phys_addr >= P1SEG)) { |
b66c1a39 PM |
87 | unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); |
88 | ||
89 | if (likely(mapped)) { | |
90 | addr += mapped; | |
91 | phys_addr += mapped; | |
92 | size -= mapped; | |
93 | } | |
1da177e4 | 94 | } |
b66c1a39 PM |
95 | #endif |
96 | ||
5b3e1a85 | 97 | pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); |
b66c1a39 | 98 | if (likely(size)) |
5b3e1a85 | 99 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { |
b66c1a39 PM |
100 | vunmap((void *)orig_addr); |
101 | return NULL; | |
102 | } | |
103 | ||
104 | return (void __iomem *)(offset + (char *)orig_addr); | |
1da177e4 | 105 | } |
b66c1a39 | 106 | EXPORT_SYMBOL(__ioremap); |
1da177e4 | 107 | |
b66c1a39 | 108 | void __iounmap(void __iomem *addr) |
1da177e4 | 109 | { |
b66c1a39 | 110 | unsigned long vaddr = (unsigned long __force)addr; |
716777db | 111 | unsigned long seg = PXSEG(vaddr); |
b66c1a39 PM |
112 | struct vm_struct *p; |
113 | ||
99f95f11 PM |
114 | if (seg < P3SEG || vaddr >= P3_ADDR_MAX) |
115 | return; | |
116 | if (is_pci_memory_fixed_range(vaddr, 0)) | |
b66c1a39 PM |
117 | return; |
118 | ||
2f47f447 | 119 | #ifdef CONFIG_PMB |
b66c1a39 PM |
120 | /* |
121 | * Purge any PMB entries that may have been established for this | |
122 | * mapping, then proceed with conventional VMA teardown. | |
123 | * | |
124 | * XXX: Note that due to the way that remove_vm_area() does | |
125 | * matching of the resultant VMA, we aren't able to fast-forward | |
126 | * the address past the PMB space until the end of the VMA where | |
127 | * the page tables reside. As such, unmap_vm_area() will be | |
128 | * forced to linearly scan over the area until it finds the page | |
129 | * tables where PTEs that need to be unmapped actually reside, | |
130 | * which is far from optimal. Perhaps we need to use a separate | |
131 | * VMA for the PMB mappings? | |
132 | * -- PFM. | |
133 | */ | |
134 | pmb_unmap(vaddr); | |
135 | #endif | |
136 | ||
137 | p = remove_vm_area((void *)(vaddr & PAGE_MASK)); | |
138 | if (!p) { | |
866e6b9e | 139 | printk(KERN_ERR "%s: bad address %p\n", __func__, addr); |
b66c1a39 PM |
140 | return; |
141 | } | |
142 | ||
143 | kfree(p); | |
1da177e4 | 144 | } |
b66c1a39 | 145 | EXPORT_SYMBOL(__iounmap); |