ARM: provide runtime hook for ioremap/iounmap
[deliverable/linux.git] / arch / arm / mm / ioremap.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
11 *
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
15 *
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
22 */
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
fced80c7 27#include <linux/io.h>
1da177e4 28
0ba8b9b2 29#include <asm/cputype.h>
1da177e4 30#include <asm/cacheflush.h>
ff0daca5
RK
31#include <asm/mmu_context.h>
32#include <asm/pgalloc.h>
1da177e4 33#include <asm/tlbflush.h>
ff0daca5
RK
34#include <asm/sizes.h>
35
b29e9f5e
RK
36#include <asm/mach/map.h>
37#include "mm.h"
38
69d3a84a
HD
39int ioremap_page(unsigned long virt, unsigned long phys,
40 const struct mem_type *mtype)
41{
d7461963
RK
42 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
43 __pgprot(mtype->prot_pte));
69d3a84a
HD
44}
45EXPORT_SYMBOL(ioremap_page);
ff0daca5
RK
46
47void __check_kvm_seq(struct mm_struct *mm)
48{
49 unsigned int seq;
50
51 do {
52 seq = init_mm.context.kvm_seq;
53 memcpy(pgd_offset(mm, VMALLOC_START),
54 pgd_offset_k(VMALLOC_START),
55 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
56 pgd_index(VMALLOC_START)));
57 mm->context.kvm_seq = seq;
58 } while (seq != init_mm.context.kvm_seq);
59}
60
da028779 61#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
ff0daca5
RK
62/*
63 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
64 * the other CPUs will not see this change until their next context switch.
65 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
66 * which requires the new ioremap'd region to be referenced, the CPU will
67 * reference the _old_ region.
68 *
31aa8fd6
RK
69 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
70 * mask the size back to 1MB aligned or we will overflow in the loop below.
ff0daca5
RK
71 */
72static void unmap_area_sections(unsigned long virt, unsigned long size)
73{
24f11ec0 74 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
ff0daca5 75 pgd_t *pgd;
03a6b827
CM
76 pud_t *pud;
77 pmd_t *pmdp;
ff0daca5
RK
78
79 flush_cache_vunmap(addr, end);
80 pgd = pgd_offset_k(addr);
03a6b827
CM
81 pud = pud_offset(pgd, addr);
82 pmdp = pmd_offset(pud, addr);
ff0daca5 83 do {
03a6b827 84 pmd_t pmd = *pmdp;
ff0daca5 85
ff0daca5
RK
86 if (!pmd_none(pmd)) {
87 /*
88 * Clear the PMD from the page table, and
89 * increment the kvm sequence so others
90 * notice this change.
91 *
92 * Note: this is still racy on SMP machines.
93 */
94 pmd_clear(pmdp);
95 init_mm.context.kvm_seq++;
96
97 /*
98 * Free the page table, if there was one.
99 */
100 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
5e541973 101 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
ff0daca5
RK
102 }
103
03a6b827
CM
104 addr += PMD_SIZE;
105 pmdp += 2;
ff0daca5
RK
106 } while (addr < end);
107
108 /*
109 * Ensure that the active_mm is up to date - we want to
110 * catch any use-after-iounmap cases.
111 */
112 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
113 __check_kvm_seq(current->active_mm);
114
115 flush_tlb_kernel_range(virt, end);
116}
117
118static int
119remap_area_sections(unsigned long virt, unsigned long pfn,
b29e9f5e 120 size_t size, const struct mem_type *type)
ff0daca5 121{
b29e9f5e 122 unsigned long addr = virt, end = virt + size;
ff0daca5 123 pgd_t *pgd;
03a6b827
CM
124 pud_t *pud;
125 pmd_t *pmd;
ff0daca5
RK
126
127 /*
128 * Remove and free any PTE-based mapping, and
129 * sync the current kernel mapping.
130 */
131 unmap_area_sections(virt, size);
132
ff0daca5 133 pgd = pgd_offset_k(addr);
03a6b827
CM
134 pud = pud_offset(pgd, addr);
135 pmd = pmd_offset(pud, addr);
ff0daca5 136 do {
b29e9f5e 137 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
ff0daca5 138 pfn += SZ_1M >> PAGE_SHIFT;
b29e9f5e 139 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
ff0daca5
RK
140 pfn += SZ_1M >> PAGE_SHIFT;
141 flush_pmd_entry(pmd);
142
03a6b827
CM
143 addr += PMD_SIZE;
144 pmd += 2;
ff0daca5
RK
145 } while (addr < end);
146
147 return 0;
148}
a069c896
LB
149
150static int
151remap_area_supersections(unsigned long virt, unsigned long pfn,
b29e9f5e 152 size_t size, const struct mem_type *type)
a069c896 153{
b29e9f5e 154 unsigned long addr = virt, end = virt + size;
a069c896 155 pgd_t *pgd;
03a6b827
CM
156 pud_t *pud;
157 pmd_t *pmd;
a069c896
LB
158
159 /*
160 * Remove and free any PTE-based mapping, and
161 * sync the current kernel mapping.
162 */
163 unmap_area_sections(virt, size);
164
a069c896 165 pgd = pgd_offset_k(virt);
03a6b827
CM
166 pud = pud_offset(pgd, addr);
167 pmd = pmd_offset(pud, addr);
a069c896
LB
168 do {
169 unsigned long super_pmd_val, i;
170
b29e9f5e
RK
171 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
172 PMD_SECT_SUPER;
a069c896
LB
173 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
174
175 for (i = 0; i < 8; i++) {
a069c896
LB
176 pmd[0] = __pmd(super_pmd_val);
177 pmd[1] = __pmd(super_pmd_val);
178 flush_pmd_entry(pmd);
179
03a6b827
CM
180 addr += PMD_SIZE;
181 pmd += 2;
a069c896
LB
182 }
183
184 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
185 } while (addr < end);
186
187 return 0;
188}
ff0daca5
RK
189#endif
190
31aa8fd6
RK
191void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
192 unsigned long offset, size_t size, unsigned int mtype, void *caller)
9d4ae727 193{
b29e9f5e 194 const struct mem_type *type;
ff0daca5 195 int err;
9d4ae727
DS
196 unsigned long addr;
197 struct vm_struct * area;
a069c896 198
da028779 199#ifndef CONFIG_ARM_LPAE
a069c896
LB
200 /*
201 * High mappings must be supersection aligned
202 */
203 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
204 return NULL;
da028779 205#endif
9d4ae727 206
3603ab2b
RK
207 type = get_mem_type(mtype);
208 if (!type)
209 return NULL;
b29e9f5e 210
6d78b5f9
RK
211 /*
212 * Page align the mapping size, taking account of any offset.
213 */
214 size = PAGE_ALIGN(offset + size);
c924aff8 215
576d2f25
NP
216 /*
217 * Try to reuse one of the static mapping whenever possible.
218 */
219 read_lock(&vmlist_lock);
220 for (area = vmlist; area; area = area->next) {
221 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
222 break;
223 if (!(area->flags & VM_ARM_STATIC_MAPPING))
224 continue;
225 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
226 continue;
227 if (__phys_to_pfn(area->phys_addr) > pfn ||
97f10409 228 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
576d2f25
NP
229 continue;
230 /* we can drop the lock here as we know *area is static */
231 read_unlock(&vmlist_lock);
232 addr = (unsigned long)area->addr;
233 addr += __pfn_to_phys(pfn) - area->phys_addr;
234 return (void __iomem *) (offset + addr);
235 }
236 read_unlock(&vmlist_lock);
237
238 /*
239 * Don't allow RAM to be mapped - this causes problems with ARMv6+
240 */
241 if (WARN_ON(pfn_valid(pfn)))
242 return NULL;
243
31aa8fd6 244 area = get_vm_area_caller(size, VM_IOREMAP, caller);
9d4ae727
DS
245 if (!area)
246 return NULL;
247 addr = (unsigned long)area->addr;
ff0daca5 248
da028779 249#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
412489af
CM
250 if (DOMAIN_IO == 0 &&
251 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
4a56c1e4 252 cpu_is_xsc3()) && pfn >= 0x100000 &&
a069c896
LB
253 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
254 area->flags |= VM_ARM_SECTION_MAPPING;
b29e9f5e 255 err = remap_area_supersections(addr, pfn, size, type);
a069c896 256 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
ff0daca5 257 area->flags |= VM_ARM_SECTION_MAPPING;
b29e9f5e 258 err = remap_area_sections(addr, pfn, size, type);
ff0daca5
RK
259 } else
260#endif
d7461963
RK
261 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
262 __pgprot(type->prot_pte));
ff0daca5
RK
263
264 if (err) {
478922c2 265 vunmap((void *)addr);
9d4ae727
DS
266 return NULL;
267 }
ff0daca5
RK
268
269 flush_cache_vmap(addr, addr + size);
270 return (void __iomem *) (offset + addr);
9d4ae727 271}
9d4ae727 272
31aa8fd6
RK
273void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
274 unsigned int mtype, void *caller)
1da177e4 275{
9d4ae727
DS
276 unsigned long last_addr;
277 unsigned long offset = phys_addr & ~PAGE_MASK;
278 unsigned long pfn = __phys_to_pfn(phys_addr);
1da177e4 279
9d4ae727
DS
280 /*
281 * Don't allow wraparound or zero size
282 */
1da177e4
LT
283 last_addr = phys_addr + size - 1;
284 if (!size || last_addr < phys_addr)
285 return NULL;
286
31aa8fd6
RK
287 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
288 caller);
289}
290
291/*
292 * Remap an arbitrary physical address space into the kernel virtual
293 * address space. Needed when the kernel wants to access high addresses
294 * directly.
295 *
296 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
297 * have to convert them into an offset in a page-aligned mapping, but the
298 * caller shouldn't need to know that small detail.
299 */
300void __iomem *
301__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
302 unsigned int mtype)
303{
304 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
305 __builtin_return_address(0));
306}
307EXPORT_SYMBOL(__arm_ioremap_pfn);
308
4fe7ef3a
RH
309void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
310 unsigned int, void *) =
311 __arm_ioremap_caller;
312
31aa8fd6
RK
313void __iomem *
314__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
315{
4fe7ef3a
RH
316 return arch_ioremap_caller(phys_addr, size, mtype,
317 __builtin_return_address(0));
1da177e4 318}
3603ab2b 319EXPORT_SYMBOL(__arm_ioremap);
1da177e4 320
6c5482d5
TL
321/*
322 * Remap an arbitrary physical address space into the kernel virtual
323 * address space as memory. Needed when the kernel wants to execute
324 * code in external memory. This is needed for reprogramming source
325 * clocks that would affect normal memory for example. Please see
326 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
327 */
328void __iomem *
329__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
330{
331 unsigned int mtype;
332
333 if (cached)
334 mtype = MT_MEMORY;
335 else
336 mtype = MT_MEMORY_NONCACHED;
337
338 return __arm_ioremap_caller(phys_addr, size, mtype,
339 __builtin_return_address(0));
340}
341
09d9bae0 342void __iounmap(volatile void __iomem *io_addr)
1da177e4 343{
09d9bae0 344 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
6ee723a6 345 struct vm_struct *vm;
ff0daca5 346
6ee723a6
NP
347 read_lock(&vmlist_lock);
348 for (vm = vmlist; vm; vm = vm->next) {
576d2f25 349 if (vm->addr > addr)
ff0daca5 350 break;
576d2f25
NP
351 if (!(vm->flags & VM_IOREMAP))
352 continue;
353 /* If this is a static mapping we must leave it alone */
354 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
355 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
356 read_unlock(&vmlist_lock);
357 return;
ff0daca5 358 }
6ae25a5b 359#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
576d2f25
NP
360 /*
361 * If this is a section based mapping we need to handle it
362 * specially as the VM subsystem does not know how to handle
363 * such a beast.
364 */
365 if ((vm->addr == addr) &&
366 (vm->flags & VM_ARM_SECTION_MAPPING)) {
367 unmap_area_sections((unsigned long)vm->addr, vm->size);
368 break;
369 }
370#endif
ff0daca5 371 }
6ee723a6 372 read_unlock(&vmlist_lock);
ff0daca5 373
24f11ec0 374 vunmap(addr);
1da177e4 375}
4fe7ef3a
RH
376
377void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
378
379void __arm_iounmap(volatile void __iomem *io_addr)
380{
381 arch_iounmap(io_addr);
382}
383EXPORT_SYMBOL(__arm_iounmap);
This page took 0.559992 seconds and 5 git commands to generate.