Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/ioremap.c | |
3 | * | |
4 | * Re-map IO memory to kernel address space so that we can access it. | |
5 | * | |
6 | * (C) Copyright 1995 1996 Linus Torvalds | |
7 | * | |
8 | * Hacked for ARM by Phil Blundell <philb@gnu.org> | |
9 | * Hacked to allow all architectures to build, and various cleanups | |
10 | * by Russell King | |
11 | * | |
12 | * This allows a driver to remap an arbitrary region of bus memory into | |
13 | * virtual space. One should *only* use readl, writel, memcpy_toio and | |
14 | * so on with such remapped areas. | |
15 | * | |
16 | * Because the ARM only has a 32-bit address space we can't address the | |
17 | * whole of the (physical) PCI space at once. PCI huge-mode addressing | |
18 | * allows us to circumvent this restriction by splitting PCI space into | |
19 | * two 2GB chunks and mapping only one at a time into processor memory. | |
20 | * We use MMU protection domains to trap any attempt to access the bank | |
21 | * that is not currently mapped. (This isn't fully implemented yet.) | |
22 | */ | |
23 | #include <linux/module.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/vmalloc.h> | |
fced80c7 | 27 | #include <linux/io.h> |
1da177e4 | 28 | |
0ba8b9b2 | 29 | #include <asm/cputype.h> |
1da177e4 | 30 | #include <asm/cacheflush.h> |
ff0daca5 RK |
31 | #include <asm/mmu_context.h> |
32 | #include <asm/pgalloc.h> | |
1da177e4 | 33 | #include <asm/tlbflush.h> |
ff0daca5 RK |
34 | #include <asm/sizes.h> |
35 | ||
b29e9f5e RK |
36 | #include <asm/mach/map.h> |
37 | #include "mm.h" | |
38 | ||
ff0daca5 | 39 | /* |
a069c896 LB |
40 | * Used by ioremap() and iounmap() code to mark (super)section-mapped |
41 | * I/O regions in vm_struct->flags field. | |
ff0daca5 RK |
42 | */ |
43 | #define VM_ARM_SECTION_MAPPING 0x80000000 | |
1da177e4 | 44 | |
69d3a84a HD |
45 | int ioremap_page(unsigned long virt, unsigned long phys, |
46 | const struct mem_type *mtype) | |
47 | { | |
d7461963 RK |
48 | return ioremap_page_range(virt, virt + PAGE_SIZE, phys, |
49 | __pgprot(mtype->prot_pte)); | |
69d3a84a HD |
50 | } |
51 | EXPORT_SYMBOL(ioremap_page); | |
ff0daca5 RK |
52 | |
53 | void __check_kvm_seq(struct mm_struct *mm) | |
54 | { | |
55 | unsigned int seq; | |
56 | ||
57 | do { | |
58 | seq = init_mm.context.kvm_seq; | |
59 | memcpy(pgd_offset(mm, VMALLOC_START), | |
60 | pgd_offset_k(VMALLOC_START), | |
61 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - | |
62 | pgd_index(VMALLOC_START))); | |
63 | mm->context.kvm_seq = seq; | |
64 | } while (seq != init_mm.context.kvm_seq); | |
65 | } | |
66 | ||
67 | #ifndef CONFIG_SMP | |
68 | /* | |
69 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | |
70 | * the other CPUs will not see this change until their next context switch. | |
71 | * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs | |
72 | * which requires the new ioremap'd region to be referenced, the CPU will | |
73 | * reference the _old_ region. | |
74 | * | |
31aa8fd6 RK |
75 | * Note that get_vm_area_caller() allocates a guard 4K page, so we need to |
76 | * mask the size back to 1MB aligned or we will overflow in the loop below. | |
ff0daca5 RK |
77 | */ |
78 | static void unmap_area_sections(unsigned long virt, unsigned long size) | |
79 | { | |
24f11ec0 | 80 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); |
ff0daca5 RK |
81 | pgd_t *pgd; |
82 | ||
83 | flush_cache_vunmap(addr, end); | |
84 | pgd = pgd_offset_k(addr); | |
85 | do { | |
86 | pmd_t pmd, *pmdp = pmd_offset(pgd, addr); | |
87 | ||
88 | pmd = *pmdp; | |
89 | if (!pmd_none(pmd)) { | |
90 | /* | |
91 | * Clear the PMD from the page table, and | |
92 | * increment the kvm sequence so others | |
93 | * notice this change. | |
94 | * | |
95 | * Note: this is still racy on SMP machines. | |
96 | */ | |
97 | pmd_clear(pmdp); | |
98 | init_mm.context.kvm_seq++; | |
99 | ||
100 | /* | |
101 | * Free the page table, if there was one. | |
102 | */ | |
103 | if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) | |
5e541973 | 104 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); |
ff0daca5 RK |
105 | } |
106 | ||
107 | addr += PGDIR_SIZE; | |
108 | pgd++; | |
109 | } while (addr < end); | |
110 | ||
111 | /* | |
112 | * Ensure that the active_mm is up to date - we want to | |
113 | * catch any use-after-iounmap cases. | |
114 | */ | |
115 | if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) | |
116 | __check_kvm_seq(current->active_mm); | |
117 | ||
118 | flush_tlb_kernel_range(virt, end); | |
119 | } | |
120 | ||
121 | static int | |
122 | remap_area_sections(unsigned long virt, unsigned long pfn, | |
b29e9f5e | 123 | size_t size, const struct mem_type *type) |
ff0daca5 | 124 | { |
b29e9f5e | 125 | unsigned long addr = virt, end = virt + size; |
ff0daca5 RK |
126 | pgd_t *pgd; |
127 | ||
128 | /* | |
129 | * Remove and free any PTE-based mapping, and | |
130 | * sync the current kernel mapping. | |
131 | */ | |
132 | unmap_area_sections(virt, size); | |
133 | ||
ff0daca5 RK |
134 | pgd = pgd_offset_k(addr); |
135 | do { | |
136 | pmd_t *pmd = pmd_offset(pgd, addr); | |
137 | ||
b29e9f5e | 138 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
ff0daca5 | 139 | pfn += SZ_1M >> PAGE_SHIFT; |
b29e9f5e | 140 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
ff0daca5 RK |
141 | pfn += SZ_1M >> PAGE_SHIFT; |
142 | flush_pmd_entry(pmd); | |
143 | ||
144 | addr += PGDIR_SIZE; | |
145 | pgd++; | |
146 | } while (addr < end); | |
147 | ||
148 | return 0; | |
149 | } | |
a069c896 LB |
150 | |
151 | static int | |
152 | remap_area_supersections(unsigned long virt, unsigned long pfn, | |
b29e9f5e | 153 | size_t size, const struct mem_type *type) |
a069c896 | 154 | { |
b29e9f5e | 155 | unsigned long addr = virt, end = virt + size; |
a069c896 LB |
156 | pgd_t *pgd; |
157 | ||
158 | /* | |
159 | * Remove and free any PTE-based mapping, and | |
160 | * sync the current kernel mapping. | |
161 | */ | |
162 | unmap_area_sections(virt, size); | |
163 | ||
a069c896 LB |
164 | pgd = pgd_offset_k(virt); |
165 | do { | |
166 | unsigned long super_pmd_val, i; | |
167 | ||
b29e9f5e RK |
168 | super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | |
169 | PMD_SECT_SUPER; | |
a069c896 LB |
170 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; |
171 | ||
172 | for (i = 0; i < 8; i++) { | |
173 | pmd_t *pmd = pmd_offset(pgd, addr); | |
174 | ||
175 | pmd[0] = __pmd(super_pmd_val); | |
176 | pmd[1] = __pmd(super_pmd_val); | |
177 | flush_pmd_entry(pmd); | |
178 | ||
179 | addr += PGDIR_SIZE; | |
180 | pgd++; | |
181 | } | |
182 | ||
183 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; | |
184 | } while (addr < end); | |
185 | ||
186 | return 0; | |
187 | } | |
ff0daca5 RK |
188 | #endif |
189 | ||
31aa8fd6 RK |
190 | void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, |
191 | unsigned long offset, size_t size, unsigned int mtype, void *caller) | |
9d4ae727 | 192 | { |
b29e9f5e | 193 | const struct mem_type *type; |
ff0daca5 | 194 | int err; |
9d4ae727 DS |
195 | unsigned long addr; |
196 | struct vm_struct * area; | |
a069c896 LB |
197 | |
198 | /* | |
199 | * High mappings must be supersection aligned | |
200 | */ | |
201 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | |
202 | return NULL; | |
9d4ae727 | 203 | |
309caa9c RK |
204 | /* |
205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | |
206 | */ | |
67cfa23a RK |
207 | if (WARN_ON(pfn_valid(pfn))) |
208 | return NULL; | |
309caa9c | 209 | |
3603ab2b RK |
210 | type = get_mem_type(mtype); |
211 | if (!type) | |
212 | return NULL; | |
b29e9f5e | 213 | |
6d78b5f9 RK |
214 | /* |
215 | * Page align the mapping size, taking account of any offset. | |
216 | */ | |
217 | size = PAGE_ALIGN(offset + size); | |
c924aff8 | 218 | |
31aa8fd6 | 219 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
9d4ae727 DS |
220 | if (!area) |
221 | return NULL; | |
222 | addr = (unsigned long)area->addr; | |
ff0daca5 RK |
223 | |
224 | #ifndef CONFIG_SMP | |
412489af CM |
225 | if (DOMAIN_IO == 0 && |
226 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | |
4a56c1e4 | 227 | cpu_is_xsc3()) && pfn >= 0x100000 && |
a069c896 LB |
228 | !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { |
229 | area->flags |= VM_ARM_SECTION_MAPPING; | |
b29e9f5e | 230 | err = remap_area_supersections(addr, pfn, size, type); |
a069c896 | 231 | } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { |
ff0daca5 | 232 | area->flags |= VM_ARM_SECTION_MAPPING; |
b29e9f5e | 233 | err = remap_area_sections(addr, pfn, size, type); |
ff0daca5 RK |
234 | } else |
235 | #endif | |
d7461963 RK |
236 | err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), |
237 | __pgprot(type->prot_pte)); | |
ff0daca5 RK |
238 | |
239 | if (err) { | |
478922c2 | 240 | vunmap((void *)addr); |
9d4ae727 DS |
241 | return NULL; |
242 | } | |
ff0daca5 RK |
243 | |
244 | flush_cache_vmap(addr, addr + size); | |
245 | return (void __iomem *) (offset + addr); | |
9d4ae727 | 246 | } |
9d4ae727 | 247 | |
31aa8fd6 RK |
248 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, |
249 | unsigned int mtype, void *caller) | |
1da177e4 | 250 | { |
9d4ae727 DS |
251 | unsigned long last_addr; |
252 | unsigned long offset = phys_addr & ~PAGE_MASK; | |
253 | unsigned long pfn = __phys_to_pfn(phys_addr); | |
1da177e4 | 254 | |
9d4ae727 DS |
255 | /* |
256 | * Don't allow wraparound or zero size | |
257 | */ | |
1da177e4 LT |
258 | last_addr = phys_addr + size - 1; |
259 | if (!size || last_addr < phys_addr) | |
260 | return NULL; | |
261 | ||
31aa8fd6 RK |
262 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, |
263 | caller); | |
264 | } | |
265 | ||
266 | /* | |
267 | * Remap an arbitrary physical address space into the kernel virtual | |
268 | * address space. Needed when the kernel wants to access high addresses | |
269 | * directly. | |
270 | * | |
271 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
272 | * have to convert them into an offset in a page-aligned mapping, but the | |
273 | * caller shouldn't need to know that small detail. | |
274 | */ | |
275 | void __iomem * | |
276 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |
277 | unsigned int mtype) | |
278 | { | |
279 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, | |
280 | __builtin_return_address(0)); | |
281 | } | |
282 | EXPORT_SYMBOL(__arm_ioremap_pfn); | |
283 | ||
284 | void __iomem * | |
285 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | |
286 | { | |
287 | return __arm_ioremap_caller(phys_addr, size, mtype, | |
288 | __builtin_return_address(0)); | |
1da177e4 | 289 | } |
3603ab2b | 290 | EXPORT_SYMBOL(__arm_ioremap); |
1da177e4 | 291 | |
09d9bae0 | 292 | void __iounmap(volatile void __iomem *io_addr) |
1da177e4 | 293 | { |
09d9bae0 | 294 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
ceaccbd2 | 295 | #ifndef CONFIG_SMP |
ff0daca5 | 296 | struct vm_struct **p, *tmp; |
ff0daca5 | 297 | |
ff0daca5 RK |
298 | /* |
299 | * If this is a section based mapping we need to handle it | |
6cbdc8c5 | 300 | * specially as the VM subsystem does not know how to handle |
ff0daca5 RK |
301 | * such a beast. We need the lock here b/c we need to clear |
302 | * all the mappings before the area can be reclaimed | |
303 | * by someone else. | |
304 | */ | |
305 | write_lock(&vmlist_lock); | |
306 | for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { | |
09d9bae0 | 307 | if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { |
ff0daca5 | 308 | if (tmp->flags & VM_ARM_SECTION_MAPPING) { |
ff0daca5 RK |
309 | unmap_area_sections((unsigned long)tmp->addr, |
310 | tmp->size); | |
ff0daca5 RK |
311 | } |
312 | break; | |
313 | } | |
314 | } | |
315 | write_unlock(&vmlist_lock); | |
7cddc397 | 316 | #endif |
ff0daca5 | 317 | |
24f11ec0 | 318 | vunmap(addr); |
1da177e4 LT |
319 | } |
320 | EXPORT_SYMBOL(__iounmap); |