Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/ioremap.c | |
3 | * | |
4 | * Re-map IO memory to kernel address space so that we can access it. | |
5 | * | |
6 | * (C) Copyright 1995 1996 Linus Torvalds | |
7 | * | |
8 | * Hacked for ARM by Phil Blundell <philb@gnu.org> | |
9 | * Hacked to allow all architectures to build, and various cleanups | |
10 | * by Russell King | |
11 | * | |
12 | * This allows a driver to remap an arbitrary region of bus memory into | |
13 | * virtual space. One should *only* use readl, writel, memcpy_toio and | |
14 | * so on with such remapped areas. | |
15 | * | |
16 | * Because the ARM only has a 32-bit address space we can't address the | |
17 | * whole of the (physical) PCI space at once. PCI huge-mode addressing | |
18 | * allows us to circumvent this restriction by splitting PCI space into | |
19 | * two 2GB chunks and mapping only one at a time into processor memory. | |
20 | * We use MMU protection domains to trap any attempt to access the bank | |
21 | * that is not currently mapped. (This isn't fully implemented yet.) | |
22 | */ | |
23 | #include <linux/module.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/vmalloc.h> | |
27 | ||
28 | #include <asm/cacheflush.h> | |
29 | #include <asm/io.h> | |
ff0daca5 RK |
30 | #include <asm/mmu_context.h> |
31 | #include <asm/pgalloc.h> | |
1da177e4 | 32 | #include <asm/tlbflush.h> |
ff0daca5 RK |
33 | #include <asm/sizes.h> |
34 | ||
35 | /* | |
a069c896 LB |
36 | * Used by ioremap() and iounmap() code to mark (super)section-mapped |
37 | * I/O regions in vm_struct->flags field. | |
ff0daca5 RK |
38 | */ |
39 | #define VM_ARM_SECTION_MAPPING 0x80000000 | |
1da177e4 | 40 | |
da2c12a2 RK |
41 | static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, |
42 | unsigned long phys_addr, pgprot_t prot) | |
1da177e4 | 43 | { |
da2c12a2 RK |
44 | pte_t *pte; |
45 | ||
46 | pte = pte_alloc_kernel(pmd, addr); | |
47 | if (!pte) | |
48 | return -ENOMEM; | |
1da177e4 | 49 | |
1da177e4 LT |
50 | do { |
51 | if (!pte_none(*pte)) | |
52 | goto bad; | |
53 | ||
ad1ae2fe | 54 | set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); |
1da177e4 | 55 | phys_addr += PAGE_SIZE; |
da2c12a2 RK |
56 | } while (pte++, addr += PAGE_SIZE, addr != end); |
57 | return 0; | |
1da177e4 LT |
58 | |
59 | bad: | |
da2c12a2 | 60 | printk(KERN_CRIT "remap_area_pte: page already exists\n"); |
1da177e4 LT |
61 | BUG(); |
62 | } | |
63 | ||
da2c12a2 RK |
64 | static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, |
65 | unsigned long end, unsigned long phys_addr, | |
66 | pgprot_t prot) | |
1da177e4 | 67 | { |
da2c12a2 RK |
68 | unsigned long next; |
69 | pmd_t *pmd; | |
70 | int ret = 0; | |
1da177e4 | 71 | |
da2c12a2 RK |
72 | pmd = pmd_alloc(&init_mm, pgd, addr); |
73 | if (!pmd) | |
74 | return -ENOMEM; | |
1da177e4 | 75 | |
1da177e4 | 76 | do { |
da2c12a2 RK |
77 | next = pmd_addr_end(addr, end); |
78 | ret = remap_area_pte(pmd, addr, next, phys_addr, prot); | |
79 | if (ret) | |
80 | return ret; | |
81 | phys_addr += next - addr; | |
82 | } while (pmd++, addr = next, addr != end); | |
83 | return ret; | |
1da177e4 LT |
84 | } |
85 | ||
da2c12a2 RK |
86 | static int remap_area_pages(unsigned long start, unsigned long pfn, |
87 | unsigned long size, unsigned long flags) | |
1da177e4 | 88 | { |
da2c12a2 RK |
89 | unsigned long addr = start; |
90 | unsigned long next, end = start + size; | |
9d4ae727 | 91 | unsigned long phys_addr = __pfn_to_phys(pfn); |
da2c12a2 RK |
92 | pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
93 | L_PTE_DIRTY | L_PTE_WRITE | flags); | |
94 | pgd_t *pgd; | |
1da177e4 | 95 | int err = 0; |
1da177e4 | 96 | |
da2c12a2 RK |
97 | BUG_ON(addr >= end); |
98 | pgd = pgd_offset_k(addr); | |
1da177e4 | 99 | do { |
da2c12a2 RK |
100 | next = pgd_addr_end(addr, end); |
101 | err = remap_area_pmd(pgd, addr, next, phys_addr, prot); | |
102 | if (err) | |
1da177e4 | 103 | break; |
da2c12a2 RK |
104 | phys_addr += next - addr; |
105 | } while (pgd++, addr = next, addr != end); | |
1da177e4 | 106 | |
1da177e4 LT |
107 | return err; |
108 | } | |
109 | ||
ff0daca5 RK |
110 | |
111 | void __check_kvm_seq(struct mm_struct *mm) | |
112 | { | |
113 | unsigned int seq; | |
114 | ||
115 | do { | |
116 | seq = init_mm.context.kvm_seq; | |
117 | memcpy(pgd_offset(mm, VMALLOC_START), | |
118 | pgd_offset_k(VMALLOC_START), | |
119 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - | |
120 | pgd_index(VMALLOC_START))); | |
121 | mm->context.kvm_seq = seq; | |
122 | } while (seq != init_mm.context.kvm_seq); | |
123 | } | |
124 | ||
125 | #ifndef CONFIG_SMP | |
126 | /* | |
127 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | |
128 | * the other CPUs will not see this change until their next context switch. | |
129 | * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs | |
130 | * which requires the new ioremap'd region to be referenced, the CPU will | |
131 | * reference the _old_ region. | |
132 | * | |
133 | * Note that get_vm_area() allocates a guard 4K page, so we need to mask | |
134 | * the size back to 1MB aligned or we will overflow in the loop below. | |
135 | */ | |
136 | static void unmap_area_sections(unsigned long virt, unsigned long size) | |
137 | { | |
138 | unsigned long addr = virt, end = virt + (size & ~SZ_1M); | |
139 | pgd_t *pgd; | |
140 | ||
141 | flush_cache_vunmap(addr, end); | |
142 | pgd = pgd_offset_k(addr); | |
143 | do { | |
144 | pmd_t pmd, *pmdp = pmd_offset(pgd, addr); | |
145 | ||
146 | pmd = *pmdp; | |
147 | if (!pmd_none(pmd)) { | |
148 | /* | |
149 | * Clear the PMD from the page table, and | |
150 | * increment the kvm sequence so others | |
151 | * notice this change. | |
152 | * | |
153 | * Note: this is still racy on SMP machines. | |
154 | */ | |
155 | pmd_clear(pmdp); | |
156 | init_mm.context.kvm_seq++; | |
157 | ||
158 | /* | |
159 | * Free the page table, if there was one. | |
160 | */ | |
161 | if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) | |
46a82b2d | 162 | pte_free_kernel(pmd_page_vaddr(pmd)); |
ff0daca5 RK |
163 | } |
164 | ||
165 | addr += PGDIR_SIZE; | |
166 | pgd++; | |
167 | } while (addr < end); | |
168 | ||
169 | /* | |
170 | * Ensure that the active_mm is up to date - we want to | |
171 | * catch any use-after-iounmap cases. | |
172 | */ | |
173 | if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) | |
174 | __check_kvm_seq(current->active_mm); | |
175 | ||
176 | flush_tlb_kernel_range(virt, end); | |
177 | } | |
178 | ||
179 | static int | |
180 | remap_area_sections(unsigned long virt, unsigned long pfn, | |
181 | unsigned long size, unsigned long flags) | |
182 | { | |
183 | unsigned long prot, addr = virt, end = virt + size; | |
184 | pgd_t *pgd; | |
185 | ||
186 | /* | |
187 | * Remove and free any PTE-based mapping, and | |
188 | * sync the current kernel mapping. | |
189 | */ | |
190 | unmap_area_sections(virt, size); | |
191 | ||
192 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) | | |
193 | (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE)); | |
194 | ||
195 | /* | |
196 | * ARMv6 and above need XN set to prevent speculative prefetches | |
197 | * hitting IO. | |
198 | */ | |
199 | if (cpu_architecture() >= CPU_ARCH_ARMv6) | |
200 | prot |= PMD_SECT_XN; | |
201 | ||
202 | pgd = pgd_offset_k(addr); | |
203 | do { | |
204 | pmd_t *pmd = pmd_offset(pgd, addr); | |
205 | ||
206 | pmd[0] = __pmd(__pfn_to_phys(pfn) | prot); | |
207 | pfn += SZ_1M >> PAGE_SHIFT; | |
208 | pmd[1] = __pmd(__pfn_to_phys(pfn) | prot); | |
209 | pfn += SZ_1M >> PAGE_SHIFT; | |
210 | flush_pmd_entry(pmd); | |
211 | ||
212 | addr += PGDIR_SIZE; | |
213 | pgd++; | |
214 | } while (addr < end); | |
215 | ||
216 | return 0; | |
217 | } | |
a069c896 LB |
218 | |
219 | static int | |
220 | remap_area_supersections(unsigned long virt, unsigned long pfn, | |
221 | unsigned long size, unsigned long flags) | |
222 | { | |
223 | unsigned long prot, addr = virt, end = virt + size; | |
224 | pgd_t *pgd; | |
225 | ||
226 | /* | |
227 | * Remove and free any PTE-based mapping, and | |
228 | * sync the current kernel mapping. | |
229 | */ | |
230 | unmap_area_sections(virt, size); | |
231 | ||
232 | prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE | | |
233 | PMD_DOMAIN(DOMAIN_IO) | | |
234 | (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE)); | |
235 | ||
236 | /* | |
237 | * ARMv6 and above need XN set to prevent speculative prefetches | |
238 | * hitting IO. | |
239 | */ | |
240 | if (cpu_architecture() >= CPU_ARCH_ARMv6) | |
241 | prot |= PMD_SECT_XN; | |
242 | ||
243 | pgd = pgd_offset_k(virt); | |
244 | do { | |
245 | unsigned long super_pmd_val, i; | |
246 | ||
247 | super_pmd_val = __pfn_to_phys(pfn) | prot; | |
248 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; | |
249 | ||
250 | for (i = 0; i < 8; i++) { | |
251 | pmd_t *pmd = pmd_offset(pgd, addr); | |
252 | ||
253 | pmd[0] = __pmd(super_pmd_val); | |
254 | pmd[1] = __pmd(super_pmd_val); | |
255 | flush_pmd_entry(pmd); | |
256 | ||
257 | addr += PGDIR_SIZE; | |
258 | pgd++; | |
259 | } | |
260 | ||
261 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; | |
262 | } while (addr < end); | |
263 | ||
264 | return 0; | |
265 | } | |
ff0daca5 RK |
266 | #endif |
267 | ||
268 | ||
1da177e4 LT |
269 | /* |
270 | * Remap an arbitrary physical address space into the kernel virtual | |
271 | * address space. Needed when the kernel wants to access high addresses | |
272 | * directly. | |
273 | * | |
274 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
275 | * have to convert them into an offset in a page-aligned mapping, but the | |
276 | * caller shouldn't need to know that small detail. | |
277 | * | |
278 | * 'flags' are the extra L_PTE_ flags that you want to specify for this | |
279 | * mapping. See include/asm-arm/proc-armv/pgtable.h for more information. | |
280 | */ | |
9d4ae727 DS |
281 | void __iomem * |
282 | __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |
283 | unsigned long flags) | |
284 | { | |
ff0daca5 | 285 | int err; |
9d4ae727 DS |
286 | unsigned long addr; |
287 | struct vm_struct * area; | |
a069c896 LB |
288 | |
289 | /* | |
290 | * High mappings must be supersection aligned | |
291 | */ | |
292 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | |
293 | return NULL; | |
9d4ae727 DS |
294 | |
295 | area = get_vm_area(size, VM_IOREMAP); | |
296 | if (!area) | |
297 | return NULL; | |
298 | addr = (unsigned long)area->addr; | |
ff0daca5 RK |
299 | |
300 | #ifndef CONFIG_SMP | |
67f3a588 | 301 | if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
a069c896 LB |
302 | cpu_is_xsc3()) && |
303 | !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { | |
304 | area->flags |= VM_ARM_SECTION_MAPPING; | |
305 | err = remap_area_supersections(addr, pfn, size, flags); | |
306 | } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { | |
ff0daca5 RK |
307 | area->flags |= VM_ARM_SECTION_MAPPING; |
308 | err = remap_area_sections(addr, pfn, size, flags); | |
309 | } else | |
310 | #endif | |
311 | err = remap_area_pages(addr, pfn, size, flags); | |
312 | ||
313 | if (err) { | |
478922c2 | 314 | vunmap((void *)addr); |
9d4ae727 DS |
315 | return NULL; |
316 | } | |
ff0daca5 RK |
317 | |
318 | flush_cache_vmap(addr, addr + size); | |
319 | return (void __iomem *) (offset + addr); | |
9d4ae727 DS |
320 | } |
321 | EXPORT_SYMBOL(__ioremap_pfn); | |
322 | ||
1da177e4 | 323 | void __iomem * |
67a1901f | 324 | __ioremap(unsigned long phys_addr, size_t size, unsigned long flags) |
1da177e4 | 325 | { |
9d4ae727 DS |
326 | unsigned long last_addr; |
327 | unsigned long offset = phys_addr & ~PAGE_MASK; | |
328 | unsigned long pfn = __phys_to_pfn(phys_addr); | |
1da177e4 | 329 | |
9d4ae727 DS |
330 | /* |
331 | * Don't allow wraparound or zero size | |
332 | */ | |
1da177e4 LT |
333 | last_addr = phys_addr + size - 1; |
334 | if (!size || last_addr < phys_addr) | |
335 | return NULL; | |
336 | ||
337 | /* | |
9d4ae727 | 338 | * Page align the mapping size |
1da177e4 | 339 | */ |
1da177e4 LT |
340 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
341 | ||
9d4ae727 | 342 | return __ioremap_pfn(pfn, offset, size, flags); |
1da177e4 LT |
343 | } |
344 | EXPORT_SYMBOL(__ioremap); | |
345 | ||
1622605c | 346 | void __iounmap(volatile void __iomem *addr) |
1da177e4 | 347 | { |
ceaccbd2 | 348 | #ifndef CONFIG_SMP |
ff0daca5 | 349 | struct vm_struct **p, *tmp; |
ceaccbd2 | 350 | #endif |
ff0daca5 RK |
351 | unsigned int section_mapping = 0; |
352 | ||
1622605c | 353 | addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr); |
ff0daca5 | 354 | |
7cddc397 | 355 | #ifndef CONFIG_SMP |
ff0daca5 RK |
356 | /* |
357 | * If this is a section based mapping we need to handle it | |
358 | * specially as the VM subysystem does not know how to handle | |
359 | * such a beast. We need the lock here b/c we need to clear | |
360 | * all the mappings before the area can be reclaimed | |
361 | * by someone else. | |
362 | */ | |
363 | write_lock(&vmlist_lock); | |
364 | for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { | |
365 | if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { | |
366 | if (tmp->flags & VM_ARM_SECTION_MAPPING) { | |
367 | *p = tmp->next; | |
368 | unmap_area_sections((unsigned long)tmp->addr, | |
369 | tmp->size); | |
370 | kfree(tmp); | |
371 | section_mapping = 1; | |
372 | } | |
373 | break; | |
374 | } | |
375 | } | |
376 | write_unlock(&vmlist_lock); | |
7cddc397 | 377 | #endif |
ff0daca5 RK |
378 | |
379 | if (!section_mapping) | |
1622605c | 380 | vunmap((void __force *)addr); |
1da177e4 LT |
381 | } |
382 | EXPORT_SYMBOL(__iounmap); |