Merge branch 'topic/asoc' into for-linus
[deliverable/linux.git] / arch / arm / mm / ioremap.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
11 *
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
15 *
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
22 */
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
fced80c7 27#include <linux/io.h>
1da177e4 28
0ba8b9b2 29#include <asm/cputype.h>
1da177e4 30#include <asm/cacheflush.h>
ff0daca5
RK
31#include <asm/mmu_context.h>
32#include <asm/pgalloc.h>
1da177e4 33#include <asm/tlbflush.h>
ff0daca5
RK
34#include <asm/sizes.h>
35
b29e9f5e
RK
36#include <asm/mach/map.h>
37#include "mm.h"
38
ff0daca5 39/*
a069c896
LB
40 * Used by ioremap() and iounmap() code to mark (super)section-mapped
41 * I/O regions in vm_struct->flags field.
ff0daca5
RK
42 */
43#define VM_ARM_SECTION_MAPPING 0x80000000
1da177e4 44
da2c12a2 45static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
b29e9f5e 46 unsigned long phys_addr, const struct mem_type *type)
1da177e4 47{
b29e9f5e 48 pgprot_t prot = __pgprot(type->prot_pte);
da2c12a2
RK
49 pte_t *pte;
50
51 pte = pte_alloc_kernel(pmd, addr);
52 if (!pte)
53 return -ENOMEM;
1da177e4 54
1da177e4
LT
55 do {
56 if (!pte_none(*pte))
57 goto bad;
58
40d192b6 59 set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
1da177e4 60 phys_addr += PAGE_SIZE;
da2c12a2
RK
61 } while (pte++, addr += PAGE_SIZE, addr != end);
62 return 0;
1da177e4
LT
63
64 bad:
da2c12a2 65 printk(KERN_CRIT "remap_area_pte: page already exists\n");
1da177e4
LT
66 BUG();
67}
68
da2c12a2
RK
69static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
70 unsigned long end, unsigned long phys_addr,
b29e9f5e 71 const struct mem_type *type)
1da177e4 72{
da2c12a2
RK
73 unsigned long next;
74 pmd_t *pmd;
75 int ret = 0;
1da177e4 76
da2c12a2
RK
77 pmd = pmd_alloc(&init_mm, pgd, addr);
78 if (!pmd)
79 return -ENOMEM;
1da177e4 80
1da177e4 81 do {
da2c12a2 82 next = pmd_addr_end(addr, end);
b29e9f5e 83 ret = remap_area_pte(pmd, addr, next, phys_addr, type);
da2c12a2
RK
84 if (ret)
85 return ret;
86 phys_addr += next - addr;
87 } while (pmd++, addr = next, addr != end);
88 return ret;
1da177e4
LT
89}
90
da2c12a2 91static int remap_area_pages(unsigned long start, unsigned long pfn,
b29e9f5e 92 size_t size, const struct mem_type *type)
1da177e4 93{
da2c12a2
RK
94 unsigned long addr = start;
95 unsigned long next, end = start + size;
9d4ae727 96 unsigned long phys_addr = __pfn_to_phys(pfn);
da2c12a2 97 pgd_t *pgd;
1da177e4 98 int err = 0;
1da177e4 99
da2c12a2
RK
100 BUG_ON(addr >= end);
101 pgd = pgd_offset_k(addr);
1da177e4 102 do {
da2c12a2 103 next = pgd_addr_end(addr, end);
b29e9f5e 104 err = remap_area_pmd(pgd, addr, next, phys_addr, type);
da2c12a2 105 if (err)
1da177e4 106 break;
da2c12a2
RK
107 phys_addr += next - addr;
108 } while (pgd++, addr = next, addr != end);
1da177e4 109
1da177e4
LT
110 return err;
111}
112
ff0daca5
RK
113
114void __check_kvm_seq(struct mm_struct *mm)
115{
116 unsigned int seq;
117
118 do {
119 seq = init_mm.context.kvm_seq;
120 memcpy(pgd_offset(mm, VMALLOC_START),
121 pgd_offset_k(VMALLOC_START),
122 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
123 pgd_index(VMALLOC_START)));
124 mm->context.kvm_seq = seq;
125 } while (seq != init_mm.context.kvm_seq);
126}
127
128#ifndef CONFIG_SMP
129/*
130 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
131 * the other CPUs will not see this change until their next context switch.
132 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
133 * which requires the new ioremap'd region to be referenced, the CPU will
134 * reference the _old_ region.
135 *
136 * Note that get_vm_area() allocates a guard 4K page, so we need to mask
137 * the size back to 1MB aligned or we will overflow in the loop below.
138 */
139static void unmap_area_sections(unsigned long virt, unsigned long size)
140{
24f11ec0 141 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
ff0daca5
RK
142 pgd_t *pgd;
143
144 flush_cache_vunmap(addr, end);
145 pgd = pgd_offset_k(addr);
146 do {
147 pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
148
149 pmd = *pmdp;
150 if (!pmd_none(pmd)) {
151 /*
152 * Clear the PMD from the page table, and
153 * increment the kvm sequence so others
154 * notice this change.
155 *
156 * Note: this is still racy on SMP machines.
157 */
158 pmd_clear(pmdp);
159 init_mm.context.kvm_seq++;
160
161 /*
162 * Free the page table, if there was one.
163 */
164 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
5e541973 165 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
ff0daca5
RK
166 }
167
168 addr += PGDIR_SIZE;
169 pgd++;
170 } while (addr < end);
171
172 /*
173 * Ensure that the active_mm is up to date - we want to
174 * catch any use-after-iounmap cases.
175 */
176 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
177 __check_kvm_seq(current->active_mm);
178
179 flush_tlb_kernel_range(virt, end);
180}
181
182static int
183remap_area_sections(unsigned long virt, unsigned long pfn,
b29e9f5e 184 size_t size, const struct mem_type *type)
ff0daca5 185{
b29e9f5e 186 unsigned long addr = virt, end = virt + size;
ff0daca5
RK
187 pgd_t *pgd;
188
189 /*
190 * Remove and free any PTE-based mapping, and
191 * sync the current kernel mapping.
192 */
193 unmap_area_sections(virt, size);
194
ff0daca5
RK
195 pgd = pgd_offset_k(addr);
196 do {
197 pmd_t *pmd = pmd_offset(pgd, addr);
198
b29e9f5e 199 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
ff0daca5 200 pfn += SZ_1M >> PAGE_SHIFT;
b29e9f5e 201 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
ff0daca5
RK
202 pfn += SZ_1M >> PAGE_SHIFT;
203 flush_pmd_entry(pmd);
204
205 addr += PGDIR_SIZE;
206 pgd++;
207 } while (addr < end);
208
209 return 0;
210}
a069c896
LB
211
212static int
213remap_area_supersections(unsigned long virt, unsigned long pfn,
b29e9f5e 214 size_t size, const struct mem_type *type)
a069c896 215{
b29e9f5e 216 unsigned long addr = virt, end = virt + size;
a069c896
LB
217 pgd_t *pgd;
218
219 /*
220 * Remove and free any PTE-based mapping, and
221 * sync the current kernel mapping.
222 */
223 unmap_area_sections(virt, size);
224
a069c896
LB
225 pgd = pgd_offset_k(virt);
226 do {
227 unsigned long super_pmd_val, i;
228
b29e9f5e
RK
229 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
230 PMD_SECT_SUPER;
a069c896
LB
231 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
232
233 for (i = 0; i < 8; i++) {
234 pmd_t *pmd = pmd_offset(pgd, addr);
235
236 pmd[0] = __pmd(super_pmd_val);
237 pmd[1] = __pmd(super_pmd_val);
238 flush_pmd_entry(pmd);
239
240 addr += PGDIR_SIZE;
241 pgd++;
242 }
243
244 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
245 } while (addr < end);
246
247 return 0;
248}
ff0daca5
RK
249#endif
250
251
1da177e4
LT
252/*
253 * Remap an arbitrary physical address space into the kernel virtual
254 * address space. Needed when the kernel wants to access high addresses
255 * directly.
256 *
257 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
258 * have to convert them into an offset in a page-aligned mapping, but the
259 * caller shouldn't need to know that small detail.
260 *
261 * 'flags' are the extra L_PTE_ flags that you want to specify for this
4baa9922 262 * mapping. See <asm/pgtable.h> for more information.
1da177e4 263 */
9d4ae727 264void __iomem *
3603ab2b
RK
265__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
266 unsigned int mtype)
9d4ae727 267{
b29e9f5e 268 const struct mem_type *type;
ff0daca5 269 int err;
9d4ae727
DS
270 unsigned long addr;
271 struct vm_struct * area;
a069c896
LB
272
273 /*
274 * High mappings must be supersection aligned
275 */
276 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
277 return NULL;
9d4ae727 278
3603ab2b
RK
279 type = get_mem_type(mtype);
280 if (!type)
281 return NULL;
b29e9f5e 282
6d78b5f9
RK
283 /*
284 * Page align the mapping size, taking account of any offset.
285 */
286 size = PAGE_ALIGN(offset + size);
c924aff8 287
9d4ae727
DS
288 area = get_vm_area(size, VM_IOREMAP);
289 if (!area)
290 return NULL;
291 addr = (unsigned long)area->addr;
ff0daca5
RK
292
293#ifndef CONFIG_SMP
412489af
CM
294 if (DOMAIN_IO == 0 &&
295 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
4a56c1e4 296 cpu_is_xsc3()) && pfn >= 0x100000 &&
a069c896
LB
297 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
298 area->flags |= VM_ARM_SECTION_MAPPING;
b29e9f5e 299 err = remap_area_supersections(addr, pfn, size, type);
a069c896 300 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
ff0daca5 301 area->flags |= VM_ARM_SECTION_MAPPING;
b29e9f5e 302 err = remap_area_sections(addr, pfn, size, type);
ff0daca5
RK
303 } else
304#endif
b29e9f5e 305 err = remap_area_pages(addr, pfn, size, type);
ff0daca5
RK
306
307 if (err) {
478922c2 308 vunmap((void *)addr);
9d4ae727
DS
309 return NULL;
310 }
ff0daca5
RK
311
312 flush_cache_vmap(addr, addr + size);
313 return (void __iomem *) (offset + addr);
9d4ae727 314}
3603ab2b 315EXPORT_SYMBOL(__arm_ioremap_pfn);
9d4ae727 316
1da177e4 317void __iomem *
3603ab2b 318__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
1da177e4 319{
9d4ae727
DS
320 unsigned long last_addr;
321 unsigned long offset = phys_addr & ~PAGE_MASK;
322 unsigned long pfn = __phys_to_pfn(phys_addr);
1da177e4 323
9d4ae727
DS
324 /*
325 * Don't allow wraparound or zero size
326 */
1da177e4
LT
327 last_addr = phys_addr + size - 1;
328 if (!size || last_addr < phys_addr)
329 return NULL;
330
3603ab2b 331 return __arm_ioremap_pfn(pfn, offset, size, mtype);
1da177e4 332}
3603ab2b 333EXPORT_SYMBOL(__arm_ioremap);
1da177e4 334
09d9bae0 335void __iounmap(volatile void __iomem *io_addr)
1da177e4 336{
09d9bae0 337 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
ceaccbd2 338#ifndef CONFIG_SMP
ff0daca5 339 struct vm_struct **p, *tmp;
ff0daca5 340
ff0daca5
RK
341 /*
342 * If this is a section based mapping we need to handle it
6cbdc8c5 343 * specially as the VM subsystem does not know how to handle
ff0daca5
RK
344 * such a beast. We need the lock here b/c we need to clear
345 * all the mappings before the area can be reclaimed
346 * by someone else.
347 */
348 write_lock(&vmlist_lock);
349 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
09d9bae0 350 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
ff0daca5 351 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
ff0daca5
RK
352 unmap_area_sections((unsigned long)tmp->addr,
353 tmp->size);
ff0daca5
RK
354 }
355 break;
356 }
357 }
358 write_unlock(&vmlist_lock);
7cddc397 359#endif
ff0daca5 360
24f11ec0 361 vunmap(addr);
1da177e4
LT
362}
363EXPORT_SYMBOL(__iounmap);
This page took 0.521268 seconds and 5 git commands to generate.