ARM: move CP15 definitions to separate header file
[deliverable/linux.git] / arch / arm / mm / ioremap.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
11 *
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
15 *
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
22 */
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
fced80c7 27#include <linux/io.h>
1da177e4 28
15d07dc9 29#include <asm/cp15.h>
0ba8b9b2 30#include <asm/cputype.h>
1da177e4 31#include <asm/cacheflush.h>
ff0daca5
RK
32#include <asm/mmu_context.h>
33#include <asm/pgalloc.h>
1da177e4 34#include <asm/tlbflush.h>
ff0daca5
RK
35#include <asm/sizes.h>
36
b29e9f5e
RK
37#include <asm/mach/map.h>
38#include "mm.h"
39
69d3a84a
HD
40int ioremap_page(unsigned long virt, unsigned long phys,
41 const struct mem_type *mtype)
42{
d7461963
RK
43 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
44 __pgprot(mtype->prot_pte));
69d3a84a
HD
45}
46EXPORT_SYMBOL(ioremap_page);
ff0daca5
RK
47
48void __check_kvm_seq(struct mm_struct *mm)
49{
50 unsigned int seq;
51
52 do {
53 seq = init_mm.context.kvm_seq;
54 memcpy(pgd_offset(mm, VMALLOC_START),
55 pgd_offset_k(VMALLOC_START),
56 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
57 pgd_index(VMALLOC_START)));
58 mm->context.kvm_seq = seq;
59 } while (seq != init_mm.context.kvm_seq);
60}
61
da028779 62#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
ff0daca5
RK
63/*
64 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
65 * the other CPUs will not see this change until their next context switch.
66 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
67 * which requires the new ioremap'd region to be referenced, the CPU will
68 * reference the _old_ region.
69 *
31aa8fd6
RK
70 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
71 * mask the size back to 1MB aligned or we will overflow in the loop below.
ff0daca5
RK
72 */
73static void unmap_area_sections(unsigned long virt, unsigned long size)
74{
24f11ec0 75 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
ff0daca5 76 pgd_t *pgd;
03a6b827
CM
77 pud_t *pud;
78 pmd_t *pmdp;
ff0daca5
RK
79
80 flush_cache_vunmap(addr, end);
81 pgd = pgd_offset_k(addr);
03a6b827
CM
82 pud = pud_offset(pgd, addr);
83 pmdp = pmd_offset(pud, addr);
ff0daca5 84 do {
03a6b827 85 pmd_t pmd = *pmdp;
ff0daca5 86
ff0daca5
RK
87 if (!pmd_none(pmd)) {
88 /*
89 * Clear the PMD from the page table, and
90 * increment the kvm sequence so others
91 * notice this change.
92 *
93 * Note: this is still racy on SMP machines.
94 */
95 pmd_clear(pmdp);
96 init_mm.context.kvm_seq++;
97
98 /*
99 * Free the page table, if there was one.
100 */
101 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
5e541973 102 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
ff0daca5
RK
103 }
104
03a6b827
CM
105 addr += PMD_SIZE;
106 pmdp += 2;
ff0daca5
RK
107 } while (addr < end);
108
109 /*
110 * Ensure that the active_mm is up to date - we want to
111 * catch any use-after-iounmap cases.
112 */
113 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
114 __check_kvm_seq(current->active_mm);
115
116 flush_tlb_kernel_range(virt, end);
117}
118
119static int
120remap_area_sections(unsigned long virt, unsigned long pfn,
b29e9f5e 121 size_t size, const struct mem_type *type)
ff0daca5 122{
b29e9f5e 123 unsigned long addr = virt, end = virt + size;
ff0daca5 124 pgd_t *pgd;
03a6b827
CM
125 pud_t *pud;
126 pmd_t *pmd;
ff0daca5
RK
127
128 /*
129 * Remove and free any PTE-based mapping, and
130 * sync the current kernel mapping.
131 */
132 unmap_area_sections(virt, size);
133
ff0daca5 134 pgd = pgd_offset_k(addr);
03a6b827
CM
135 pud = pud_offset(pgd, addr);
136 pmd = pmd_offset(pud, addr);
ff0daca5 137 do {
b29e9f5e 138 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
ff0daca5 139 pfn += SZ_1M >> PAGE_SHIFT;
b29e9f5e 140 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
ff0daca5
RK
141 pfn += SZ_1M >> PAGE_SHIFT;
142 flush_pmd_entry(pmd);
143
03a6b827
CM
144 addr += PMD_SIZE;
145 pmd += 2;
ff0daca5
RK
146 } while (addr < end);
147
148 return 0;
149}
a069c896
LB
150
151static int
152remap_area_supersections(unsigned long virt, unsigned long pfn,
b29e9f5e 153 size_t size, const struct mem_type *type)
a069c896 154{
b29e9f5e 155 unsigned long addr = virt, end = virt + size;
a069c896 156 pgd_t *pgd;
03a6b827
CM
157 pud_t *pud;
158 pmd_t *pmd;
a069c896
LB
159
160 /*
161 * Remove and free any PTE-based mapping, and
162 * sync the current kernel mapping.
163 */
164 unmap_area_sections(virt, size);
165
a069c896 166 pgd = pgd_offset_k(virt);
03a6b827
CM
167 pud = pud_offset(pgd, addr);
168 pmd = pmd_offset(pud, addr);
a069c896
LB
169 do {
170 unsigned long super_pmd_val, i;
171
b29e9f5e
RK
172 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
173 PMD_SECT_SUPER;
a069c896
LB
174 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
175
176 for (i = 0; i < 8; i++) {
a069c896
LB
177 pmd[0] = __pmd(super_pmd_val);
178 pmd[1] = __pmd(super_pmd_val);
179 flush_pmd_entry(pmd);
180
03a6b827
CM
181 addr += PMD_SIZE;
182 pmd += 2;
a069c896
LB
183 }
184
185 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
186 } while (addr < end);
187
188 return 0;
189}
ff0daca5
RK
190#endif
191
31aa8fd6
RK
192void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
193 unsigned long offset, size_t size, unsigned int mtype, void *caller)
9d4ae727 194{
b29e9f5e 195 const struct mem_type *type;
ff0daca5 196 int err;
9d4ae727
DS
197 unsigned long addr;
198 struct vm_struct * area;
a069c896 199
da028779 200#ifndef CONFIG_ARM_LPAE
a069c896
LB
201 /*
202 * High mappings must be supersection aligned
203 */
204 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
205 return NULL;
da028779 206#endif
9d4ae727 207
3603ab2b
RK
208 type = get_mem_type(mtype);
209 if (!type)
210 return NULL;
b29e9f5e 211
6d78b5f9
RK
212 /*
213 * Page align the mapping size, taking account of any offset.
214 */
215 size = PAGE_ALIGN(offset + size);
c924aff8 216
576d2f25
NP
217 /*
218 * Try to reuse one of the static mapping whenever possible.
219 */
220 read_lock(&vmlist_lock);
221 for (area = vmlist; area; area = area->next) {
222 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
223 break;
224 if (!(area->flags & VM_ARM_STATIC_MAPPING))
225 continue;
226 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
227 continue;
228 if (__phys_to_pfn(area->phys_addr) > pfn ||
97f10409 229 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
576d2f25
NP
230 continue;
231 /* we can drop the lock here as we know *area is static */
232 read_unlock(&vmlist_lock);
233 addr = (unsigned long)area->addr;
234 addr += __pfn_to_phys(pfn) - area->phys_addr;
235 return (void __iomem *) (offset + addr);
236 }
237 read_unlock(&vmlist_lock);
238
239 /*
240 * Don't allow RAM to be mapped - this causes problems with ARMv6+
241 */
242 if (WARN_ON(pfn_valid(pfn)))
243 return NULL;
244
31aa8fd6 245 area = get_vm_area_caller(size, VM_IOREMAP, caller);
9d4ae727
DS
246 if (!area)
247 return NULL;
248 addr = (unsigned long)area->addr;
ff0daca5 249
da028779 250#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
412489af
CM
251 if (DOMAIN_IO == 0 &&
252 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
4a56c1e4 253 cpu_is_xsc3()) && pfn >= 0x100000 &&
a069c896
LB
254 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
255 area->flags |= VM_ARM_SECTION_MAPPING;
b29e9f5e 256 err = remap_area_supersections(addr, pfn, size, type);
a069c896 257 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
ff0daca5 258 area->flags |= VM_ARM_SECTION_MAPPING;
b29e9f5e 259 err = remap_area_sections(addr, pfn, size, type);
ff0daca5
RK
260 } else
261#endif
d7461963
RK
262 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
263 __pgprot(type->prot_pte));
ff0daca5
RK
264
265 if (err) {
478922c2 266 vunmap((void *)addr);
9d4ae727
DS
267 return NULL;
268 }
ff0daca5
RK
269
270 flush_cache_vmap(addr, addr + size);
271 return (void __iomem *) (offset + addr);
9d4ae727 272}
9d4ae727 273
31aa8fd6
RK
274void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
275 unsigned int mtype, void *caller)
1da177e4 276{
9d4ae727
DS
277 unsigned long last_addr;
278 unsigned long offset = phys_addr & ~PAGE_MASK;
279 unsigned long pfn = __phys_to_pfn(phys_addr);
1da177e4 280
9d4ae727
DS
281 /*
282 * Don't allow wraparound or zero size
283 */
1da177e4
LT
284 last_addr = phys_addr + size - 1;
285 if (!size || last_addr < phys_addr)
286 return NULL;
287
31aa8fd6
RK
288 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
289 caller);
290}
291
292/*
293 * Remap an arbitrary physical address space into the kernel virtual
294 * address space. Needed when the kernel wants to access high addresses
295 * directly.
296 *
297 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
298 * have to convert them into an offset in a page-aligned mapping, but the
299 * caller shouldn't need to know that small detail.
300 */
301void __iomem *
302__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
303 unsigned int mtype)
304{
305 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
306 __builtin_return_address(0));
307}
308EXPORT_SYMBOL(__arm_ioremap_pfn);
309
310void __iomem *
311__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
312{
313 return __arm_ioremap_caller(phys_addr, size, mtype,
314 __builtin_return_address(0));
1da177e4 315}
3603ab2b 316EXPORT_SYMBOL(__arm_ioremap);
1da177e4 317
6c5482d5
TL
318/*
319 * Remap an arbitrary physical address space into the kernel virtual
320 * address space as memory. Needed when the kernel wants to execute
321 * code in external memory. This is needed for reprogramming source
322 * clocks that would affect normal memory for example. Please see
323 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
324 */
325void __iomem *
326__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
327{
328 unsigned int mtype;
329
330 if (cached)
331 mtype = MT_MEMORY;
332 else
333 mtype = MT_MEMORY_NONCACHED;
334
335 return __arm_ioremap_caller(phys_addr, size, mtype,
336 __builtin_return_address(0));
337}
338
09d9bae0 339void __iounmap(volatile void __iomem *io_addr)
1da177e4 340{
09d9bae0 341 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
6ee723a6 342 struct vm_struct *vm;
ff0daca5 343
6ee723a6
NP
344 read_lock(&vmlist_lock);
345 for (vm = vmlist; vm; vm = vm->next) {
576d2f25 346 if (vm->addr > addr)
ff0daca5 347 break;
576d2f25
NP
348 if (!(vm->flags & VM_IOREMAP))
349 continue;
350 /* If this is a static mapping we must leave it alone */
351 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
352 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
353 read_unlock(&vmlist_lock);
354 return;
ff0daca5 355 }
6ae25a5b 356#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
576d2f25
NP
357 /*
358 * If this is a section based mapping we need to handle it
359 * specially as the VM subsystem does not know how to handle
360 * such a beast.
361 */
362 if ((vm->addr == addr) &&
363 (vm->flags & VM_ARM_SECTION_MAPPING)) {
364 unmap_area_sections((unsigned long)vm->addr, vm->size);
365 break;
366 }
367#endif
ff0daca5 368 }
6ee723a6 369 read_unlock(&vmlist_lock);
ff0daca5 370
24f11ec0 371 vunmap(addr);
1da177e4
LT
372}
373EXPORT_SYMBOL(__iounmap);
This page took 0.551547 seconds and 5 git commands to generate.