Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/ioremap.c | |
3 | * | |
4 | * Re-map IO memory to kernel address space so that we can access it. | |
5 | * | |
6 | * (C) Copyright 1995 1996 Linus Torvalds | |
7 | * | |
8 | * Hacked for ARM by Phil Blundell <philb@gnu.org> | |
9 | * Hacked to allow all architectures to build, and various cleanups | |
10 | * by Russell King | |
11 | * | |
12 | * This allows a driver to remap an arbitrary region of bus memory into | |
13 | * virtual space. One should *only* use readl, writel, memcpy_toio and | |
14 | * so on with such remapped areas. | |
15 | * | |
16 | * Because the ARM only has a 32-bit address space we can't address the | |
17 | * whole of the (physical) PCI space at once. PCI huge-mode addressing | |
18 | * allows us to circumvent this restriction by splitting PCI space into | |
19 | * two 2GB chunks and mapping only one at a time into processor memory. | |
20 | * We use MMU protection domains to trap any attempt to access the bank | |
21 | * that is not currently mapped. (This isn't fully implemented yet.) | |
22 | */ | |
23 | #include <linux/module.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/vmalloc.h> | |
fced80c7 | 27 | #include <linux/io.h> |
158e8bfe | 28 | #include <linux/sizes.h> |
1da177e4 | 29 | |
15d07dc9 | 30 | #include <asm/cp15.h> |
0ba8b9b2 | 31 | #include <asm/cputype.h> |
1da177e4 | 32 | #include <asm/cacheflush.h> |
ff0daca5 RK |
33 | #include <asm/mmu_context.h> |
34 | #include <asm/pgalloc.h> | |
1da177e4 | 35 | #include <asm/tlbflush.h> |
9f97da78 | 36 | #include <asm/system_info.h> |
ff0daca5 | 37 | |
b29e9f5e | 38 | #include <asm/mach/map.h> |
c2794437 | 39 | #include <asm/mach/pci.h> |
b29e9f5e RK |
40 | #include "mm.h" |
41 | ||
69d3a84a HD |
42 | int ioremap_page(unsigned long virt, unsigned long phys, |
43 | const struct mem_type *mtype) | |
44 | { | |
d7461963 RK |
45 | return ioremap_page_range(virt, virt + PAGE_SIZE, phys, |
46 | __pgprot(mtype->prot_pte)); | |
69d3a84a HD |
47 | } |
48 | EXPORT_SYMBOL(ioremap_page); | |
ff0daca5 | 49 | |
3e99675a | 50 | void __check_vmalloc_seq(struct mm_struct *mm) |
ff0daca5 RK |
51 | { |
52 | unsigned int seq; | |
53 | ||
54 | do { | |
3e99675a | 55 | seq = init_mm.context.vmalloc_seq; |
ff0daca5 RK |
56 | memcpy(pgd_offset(mm, VMALLOC_START), |
57 | pgd_offset_k(VMALLOC_START), | |
58 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - | |
59 | pgd_index(VMALLOC_START))); | |
3e99675a NP |
60 | mm->context.vmalloc_seq = seq; |
61 | } while (seq != init_mm.context.vmalloc_seq); | |
ff0daca5 RK |
62 | } |
63 | ||
da028779 | 64 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
ff0daca5 RK |
65 | /* |
66 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | |
67 | * the other CPUs will not see this change until their next context switch. | |
68 | * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs | |
69 | * which requires the new ioremap'd region to be referenced, the CPU will | |
70 | * reference the _old_ region. | |
71 | * | |
31aa8fd6 RK |
72 | * Note that get_vm_area_caller() allocates a guard 4K page, so we need to |
73 | * mask the size back to 1MB aligned or we will overflow in the loop below. | |
ff0daca5 RK |
74 | */ |
75 | static void unmap_area_sections(unsigned long virt, unsigned long size) | |
76 | { | |
24f11ec0 | 77 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); |
ff0daca5 | 78 | pgd_t *pgd; |
03a6b827 CM |
79 | pud_t *pud; |
80 | pmd_t *pmdp; | |
ff0daca5 RK |
81 | |
82 | flush_cache_vunmap(addr, end); | |
83 | pgd = pgd_offset_k(addr); | |
03a6b827 CM |
84 | pud = pud_offset(pgd, addr); |
85 | pmdp = pmd_offset(pud, addr); | |
ff0daca5 | 86 | do { |
03a6b827 | 87 | pmd_t pmd = *pmdp; |
ff0daca5 | 88 | |
ff0daca5 RK |
89 | if (!pmd_none(pmd)) { |
90 | /* | |
91 | * Clear the PMD from the page table, and | |
3e99675a | 92 | * increment the vmalloc sequence so others |
ff0daca5 RK |
93 | * notice this change. |
94 | * | |
95 | * Note: this is still racy on SMP machines. | |
96 | */ | |
97 | pmd_clear(pmdp); | |
3e99675a | 98 | init_mm.context.vmalloc_seq++; |
ff0daca5 RK |
99 | |
100 | /* | |
101 | * Free the page table, if there was one. | |
102 | */ | |
103 | if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) | |
5e541973 | 104 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); |
ff0daca5 RK |
105 | } |
106 | ||
03a6b827 CM |
107 | addr += PMD_SIZE; |
108 | pmdp += 2; | |
ff0daca5 RK |
109 | } while (addr < end); |
110 | ||
111 | /* | |
112 | * Ensure that the active_mm is up to date - we want to | |
113 | * catch any use-after-iounmap cases. | |
114 | */ | |
3e99675a NP |
115 | if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq) |
116 | __check_vmalloc_seq(current->active_mm); | |
ff0daca5 RK |
117 | |
118 | flush_tlb_kernel_range(virt, end); | |
119 | } | |
120 | ||
121 | static int | |
122 | remap_area_sections(unsigned long virt, unsigned long pfn, | |
b29e9f5e | 123 | size_t size, const struct mem_type *type) |
ff0daca5 | 124 | { |
b29e9f5e | 125 | unsigned long addr = virt, end = virt + size; |
ff0daca5 | 126 | pgd_t *pgd; |
03a6b827 CM |
127 | pud_t *pud; |
128 | pmd_t *pmd; | |
ff0daca5 RK |
129 | |
130 | /* | |
131 | * Remove and free any PTE-based mapping, and | |
132 | * sync the current kernel mapping. | |
133 | */ | |
134 | unmap_area_sections(virt, size); | |
135 | ||
ff0daca5 | 136 | pgd = pgd_offset_k(addr); |
03a6b827 CM |
137 | pud = pud_offset(pgd, addr); |
138 | pmd = pmd_offset(pud, addr); | |
ff0daca5 | 139 | do { |
b29e9f5e | 140 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
ff0daca5 | 141 | pfn += SZ_1M >> PAGE_SHIFT; |
b29e9f5e | 142 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
ff0daca5 RK |
143 | pfn += SZ_1M >> PAGE_SHIFT; |
144 | flush_pmd_entry(pmd); | |
145 | ||
03a6b827 CM |
146 | addr += PMD_SIZE; |
147 | pmd += 2; | |
ff0daca5 RK |
148 | } while (addr < end); |
149 | ||
150 | return 0; | |
151 | } | |
a069c896 LB |
152 | |
153 | static int | |
154 | remap_area_supersections(unsigned long virt, unsigned long pfn, | |
b29e9f5e | 155 | size_t size, const struct mem_type *type) |
a069c896 | 156 | { |
b29e9f5e | 157 | unsigned long addr = virt, end = virt + size; |
a069c896 | 158 | pgd_t *pgd; |
03a6b827 CM |
159 | pud_t *pud; |
160 | pmd_t *pmd; | |
a069c896 LB |
161 | |
162 | /* | |
163 | * Remove and free any PTE-based mapping, and | |
164 | * sync the current kernel mapping. | |
165 | */ | |
166 | unmap_area_sections(virt, size); | |
167 | ||
a069c896 | 168 | pgd = pgd_offset_k(virt); |
03a6b827 CM |
169 | pud = pud_offset(pgd, addr); |
170 | pmd = pmd_offset(pud, addr); | |
a069c896 LB |
171 | do { |
172 | unsigned long super_pmd_val, i; | |
173 | ||
b29e9f5e RK |
174 | super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | |
175 | PMD_SECT_SUPER; | |
a069c896 LB |
176 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; |
177 | ||
178 | for (i = 0; i < 8; i++) { | |
a069c896 LB |
179 | pmd[0] = __pmd(super_pmd_val); |
180 | pmd[1] = __pmd(super_pmd_val); | |
181 | flush_pmd_entry(pmd); | |
182 | ||
03a6b827 CM |
183 | addr += PMD_SIZE; |
184 | pmd += 2; | |
a069c896 LB |
185 | } |
186 | ||
187 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; | |
188 | } while (addr < end); | |
189 | ||
190 | return 0; | |
191 | } | |
ff0daca5 RK |
192 | #endif |
193 | ||
31aa8fd6 RK |
194 | void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, |
195 | unsigned long offset, size_t size, unsigned int mtype, void *caller) | |
9d4ae727 | 196 | { |
b29e9f5e | 197 | const struct mem_type *type; |
ff0daca5 | 198 | int err; |
9d4ae727 DS |
199 | unsigned long addr; |
200 | struct vm_struct * area; | |
a069c896 | 201 | |
da028779 | 202 | #ifndef CONFIG_ARM_LPAE |
a069c896 LB |
203 | /* |
204 | * High mappings must be supersection aligned | |
205 | */ | |
206 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | |
207 | return NULL; | |
da028779 | 208 | #endif |
9d4ae727 | 209 | |
3603ab2b RK |
210 | type = get_mem_type(mtype); |
211 | if (!type) | |
212 | return NULL; | |
b29e9f5e | 213 | |
6d78b5f9 RK |
214 | /* |
215 | * Page align the mapping size, taking account of any offset. | |
216 | */ | |
217 | size = PAGE_ALIGN(offset + size); | |
c924aff8 | 218 | |
576d2f25 NP |
219 | /* |
220 | * Try to reuse one of the static mapping whenever possible. | |
221 | */ | |
222 | read_lock(&vmlist_lock); | |
223 | for (area = vmlist; area; area = area->next) { | |
224 | if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) | |
225 | break; | |
226 | if (!(area->flags & VM_ARM_STATIC_MAPPING)) | |
227 | continue; | |
228 | if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | |
229 | continue; | |
230 | if (__phys_to_pfn(area->phys_addr) > pfn || | |
97f10409 | 231 | __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) |
576d2f25 NP |
232 | continue; |
233 | /* we can drop the lock here as we know *area is static */ | |
234 | read_unlock(&vmlist_lock); | |
235 | addr = (unsigned long)area->addr; | |
236 | addr += __pfn_to_phys(pfn) - area->phys_addr; | |
237 | return (void __iomem *) (offset + addr); | |
238 | } | |
239 | read_unlock(&vmlist_lock); | |
240 | ||
241 | /* | |
242 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | |
243 | */ | |
244 | if (WARN_ON(pfn_valid(pfn))) | |
245 | return NULL; | |
246 | ||
31aa8fd6 | 247 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
9d4ae727 DS |
248 | if (!area) |
249 | return NULL; | |
250 | addr = (unsigned long)area->addr; | |
a3d7193e | 251 | area->phys_addr = __pfn_to_phys(pfn); |
ff0daca5 | 252 | |
da028779 | 253 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
412489af CM |
254 | if (DOMAIN_IO == 0 && |
255 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | |
4a56c1e4 | 256 | cpu_is_xsc3()) && pfn >= 0x100000 && |
a069c896 LB |
257 | !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { |
258 | area->flags |= VM_ARM_SECTION_MAPPING; | |
b29e9f5e | 259 | err = remap_area_supersections(addr, pfn, size, type); |
a069c896 | 260 | } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { |
ff0daca5 | 261 | area->flags |= VM_ARM_SECTION_MAPPING; |
b29e9f5e | 262 | err = remap_area_sections(addr, pfn, size, type); |
ff0daca5 RK |
263 | } else |
264 | #endif | |
d7461963 RK |
265 | err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), |
266 | __pgprot(type->prot_pte)); | |
ff0daca5 RK |
267 | |
268 | if (err) { | |
478922c2 | 269 | vunmap((void *)addr); |
9d4ae727 DS |
270 | return NULL; |
271 | } | |
ff0daca5 RK |
272 | |
273 | flush_cache_vmap(addr, addr + size); | |
274 | return (void __iomem *) (offset + addr); | |
9d4ae727 | 275 | } |
9d4ae727 | 276 | |
31aa8fd6 RK |
277 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, |
278 | unsigned int mtype, void *caller) | |
1da177e4 | 279 | { |
9d4ae727 DS |
280 | unsigned long last_addr; |
281 | unsigned long offset = phys_addr & ~PAGE_MASK; | |
282 | unsigned long pfn = __phys_to_pfn(phys_addr); | |
1da177e4 | 283 | |
9d4ae727 DS |
284 | /* |
285 | * Don't allow wraparound or zero size | |
286 | */ | |
1da177e4 LT |
287 | last_addr = phys_addr + size - 1; |
288 | if (!size || last_addr < phys_addr) | |
289 | return NULL; | |
290 | ||
31aa8fd6 RK |
291 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, |
292 | caller); | |
293 | } | |
294 | ||
295 | /* | |
296 | * Remap an arbitrary physical address space into the kernel virtual | |
297 | * address space. Needed when the kernel wants to access high addresses | |
298 | * directly. | |
299 | * | |
300 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
301 | * have to convert them into an offset in a page-aligned mapping, but the | |
302 | * caller shouldn't need to know that small detail. | |
303 | */ | |
304 | void __iomem * | |
305 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |
306 | unsigned int mtype) | |
307 | { | |
308 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, | |
309 | __builtin_return_address(0)); | |
310 | } | |
311 | EXPORT_SYMBOL(__arm_ioremap_pfn); | |
312 | ||
4fe7ef3a RH |
313 | void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, |
314 | unsigned int, void *) = | |
315 | __arm_ioremap_caller; | |
316 | ||
31aa8fd6 RK |
317 | void __iomem * |
318 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | |
319 | { | |
4fe7ef3a RH |
320 | return arch_ioremap_caller(phys_addr, size, mtype, |
321 | __builtin_return_address(0)); | |
1da177e4 | 322 | } |
3603ab2b | 323 | EXPORT_SYMBOL(__arm_ioremap); |
1da177e4 | 324 | |
6c5482d5 TL |
325 | /* |
326 | * Remap an arbitrary physical address space into the kernel virtual | |
327 | * address space as memory. Needed when the kernel wants to execute | |
328 | * code in external memory. This is needed for reprogramming source | |
329 | * clocks that would affect normal memory for example. Please see | |
330 | * CONFIG_GENERIC_ALLOCATOR for allocating external memory. | |
331 | */ | |
332 | void __iomem * | |
333 | __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | |
334 | { | |
335 | unsigned int mtype; | |
336 | ||
337 | if (cached) | |
338 | mtype = MT_MEMORY; | |
339 | else | |
340 | mtype = MT_MEMORY_NONCACHED; | |
341 | ||
342 | return __arm_ioremap_caller(phys_addr, size, mtype, | |
343 | __builtin_return_address(0)); | |
344 | } | |
345 | ||
09d9bae0 | 346 | void __iounmap(volatile void __iomem *io_addr) |
1da177e4 | 347 | { |
09d9bae0 | 348 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
6ee723a6 | 349 | struct vm_struct *vm; |
ff0daca5 | 350 | |
6ee723a6 NP |
351 | read_lock(&vmlist_lock); |
352 | for (vm = vmlist; vm; vm = vm->next) { | |
576d2f25 | 353 | if (vm->addr > addr) |
ff0daca5 | 354 | break; |
576d2f25 NP |
355 | if (!(vm->flags & VM_IOREMAP)) |
356 | continue; | |
357 | /* If this is a static mapping we must leave it alone */ | |
358 | if ((vm->flags & VM_ARM_STATIC_MAPPING) && | |
359 | (vm->addr <= addr) && (vm->addr + vm->size > addr)) { | |
360 | read_unlock(&vmlist_lock); | |
361 | return; | |
ff0daca5 | 362 | } |
6ae25a5b | 363 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
576d2f25 NP |
364 | /* |
365 | * If this is a section based mapping we need to handle it | |
366 | * specially as the VM subsystem does not know how to handle | |
367 | * such a beast. | |
368 | */ | |
369 | if ((vm->addr == addr) && | |
370 | (vm->flags & VM_ARM_SECTION_MAPPING)) { | |
371 | unmap_area_sections((unsigned long)vm->addr, vm->size); | |
372 | break; | |
373 | } | |
374 | #endif | |
ff0daca5 | 375 | } |
6ee723a6 | 376 | read_unlock(&vmlist_lock); |
ff0daca5 | 377 | |
24f11ec0 | 378 | vunmap(addr); |
1da177e4 | 379 | } |
4fe7ef3a RH |
380 | |
381 | void (*arch_iounmap)(volatile void __iomem *) = __iounmap; | |
382 | ||
383 | void __arm_iounmap(volatile void __iomem *io_addr) | |
384 | { | |
385 | arch_iounmap(io_addr); | |
386 | } | |
387 | EXPORT_SYMBOL(__arm_iounmap); | |
c2794437 RH |
388 | |
389 | #ifdef CONFIG_PCI | |
390 | int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) | |
391 | { | |
392 | BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); | |
393 | ||
394 | return ioremap_page_range(PCI_IO_VIRT_BASE + offset, | |
395 | PCI_IO_VIRT_BASE + offset + SZ_64K, | |
396 | phys_addr, | |
397 | __pgprot(get_mem_type(MT_DEVICE)->prot_pte)); | |
398 | } | |
399 | EXPORT_SYMBOL_GPL(pci_ioremap_io); | |
400 | #endif |