Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Re-map IO memory to kernel address space so that we can access it. |
3 | * This is needed for high PCI addresses that aren't mapped in the | |
4 | * 640k-1MB IO memory area on PC's | |
5 | * | |
6 | * (C) Copyright 1995 1996 Linus Torvalds | |
7 | */ | |
8 | ||
e9332cac | 9 | #include <linux/bootmem.h> |
1da177e4 | 10 | #include <linux/init.h> |
a148ecfd | 11 | #include <linux/io.h> |
3cbd09e4 TG |
12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | |
14 | #include <linux/vmalloc.h> | |
d61fc448 | 15 | #include <linux/mmiotrace.h> |
3cbd09e4 | 16 | |
1da177e4 | 17 | #include <asm/cacheflush.h> |
3cbd09e4 TG |
18 | #include <asm/e820.h> |
19 | #include <asm/fixmap.h> | |
1da177e4 | 20 | #include <asm/pgtable.h> |
3cbd09e4 | 21 | #include <asm/tlbflush.h> |
f6df72e7 | 22 | #include <asm/pgalloc.h> |
d7677d40 | 23 | #include <asm/pat.h> |
1da177e4 | 24 | |
78c86e5e | 25 | #include "physaddr.h" |
240d3a7c | 26 | |
e9332cac TG |
27 | /* |
28 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | |
29 | * conflicts. | |
30 | */ | |
3a96ce8c | 31 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
32 | unsigned long prot_val) | |
e9332cac | 33 | { |
d806e5ee | 34 | unsigned long nrpages = size >> PAGE_SHIFT; |
93809be8 | 35 | int err; |
e9332cac | 36 | |
3a96ce8c | 37 | switch (prot_val) { |
38 | case _PAGE_CACHE_UC: | |
d806e5ee | 39 | default: |
1219333d | 40 | err = _set_memory_uc(vaddr, nrpages); |
d806e5ee | 41 | break; |
b310f381 | 42 | case _PAGE_CACHE_WC: |
43 | err = _set_memory_wc(vaddr, nrpages); | |
44 | break; | |
3a96ce8c | 45 | case _PAGE_CACHE_WB: |
1219333d | 46 | err = _set_memory_wb(vaddr, nrpages); |
d806e5ee TG |
47 | break; |
48 | } | |
e9332cac TG |
49 | |
50 | return err; | |
51 | } | |
52 | ||
c81c8a1e RD |
53 | static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, |
54 | void *arg) | |
55 | { | |
56 | unsigned long i; | |
57 | ||
58 | for (i = 0; i < nr_pages; ++i) | |
59 | if (pfn_valid(start_pfn + i) && | |
60 | !PageReserved(pfn_to_page(start_pfn + i))) | |
61 | return 1; | |
62 | ||
63 | WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); | |
64 | ||
65 | return 0; | |
66 | } | |
67 | ||
1da177e4 LT |
68 | /* |
69 | * Remap an arbitrary physical address space into the kernel virtual | |
70 | * address space. Needed when the kernel wants to access high addresses | |
71 | * directly. | |
72 | * | |
73 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
74 | * have to convert them into an offset in a page-aligned mapping, but the | |
75 | * caller shouldn't need to know that small detail. | |
76 | */ | |
23016969 CL |
77 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
78 | unsigned long size, unsigned long prot_val, void *caller) | |
1da177e4 | 79 | { |
ffa71f33 KK |
80 | unsigned long offset, vaddr; |
81 | resource_size_t pfn, last_pfn, last_addr; | |
87e547fe PP |
82 | const resource_size_t unaligned_phys_addr = phys_addr; |
83 | const unsigned long unaligned_size = size; | |
91eebf40 | 84 | struct vm_struct *area; |
d7677d40 | 85 | unsigned long new_prot_val; |
d806e5ee | 86 | pgprot_t prot; |
dee7cbb2 | 87 | int retval; |
d61fc448 | 88 | void __iomem *ret_addr; |
906e36c5 | 89 | int ram_region; |
1da177e4 LT |
90 | |
91 | /* Don't allow wraparound or zero size */ | |
92 | last_addr = phys_addr + size - 1; | |
93 | if (!size || last_addr < phys_addr) | |
94 | return NULL; | |
95 | ||
e3100c82 | 96 | if (!phys_addr_valid(phys_addr)) { |
6997ab49 | 97 | printk(KERN_WARNING "ioremap: invalid physical address %llx\n", |
4c8337ac | 98 | (unsigned long long)phys_addr); |
e3100c82 TG |
99 | WARN_ON_ONCE(1); |
100 | return NULL; | |
101 | } | |
102 | ||
1da177e4 LT |
103 | /* |
104 | * Don't remap the low PCI/ISA area, it's always mapped.. | |
105 | */ | |
bcc643dc | 106 | if (is_ISA_range(phys_addr, last_addr)) |
4b40fcee | 107 | return (__force void __iomem *)phys_to_virt(phys_addr); |
1da177e4 LT |
108 | |
109 | /* | |
110 | * Don't allow anybody to remap normal RAM that we're using.. | |
111 | */ | |
906e36c5 MT |
112 | /* First check if whole region can be identified as RAM or not */ |
113 | ram_region = region_is_ram(phys_addr, size); | |
114 | if (ram_region > 0) { | |
115 | WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", | |
116 | (unsigned long int)phys_addr, | |
117 | (unsigned long int)last_addr); | |
c81c8a1e | 118 | return NULL; |
906e36c5 | 119 | } |
1da177e4 | 120 | |
906e36c5 MT |
121 | /* If could not be identified(-1), check page by page */ |
122 | if (ram_region < 0) { | |
123 | pfn = phys_addr >> PAGE_SHIFT; | |
124 | last_pfn = last_addr >> PAGE_SHIFT; | |
125 | if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, | |
126 | __ioremap_check_ram) == 1) | |
127 | return NULL; | |
128 | } | |
d7677d40 | 129 | /* |
130 | * Mappings have to be page-aligned | |
131 | */ | |
132 | offset = phys_addr & ~PAGE_MASK; | |
ffa71f33 | 133 | phys_addr &= PHYSICAL_PAGE_MASK; |
d7677d40 | 134 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
135 | ||
e213e877 | 136 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, |
dee7cbb2 VP |
137 | prot_val, &new_prot_val); |
138 | if (retval) { | |
279e669b | 139 | printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); |
dee7cbb2 VP |
140 | return NULL; |
141 | } | |
142 | ||
143 | if (prot_val != new_prot_val) { | |
b855192c | 144 | if (!is_new_memtype_allowed(phys_addr, size, |
d85f3334 JG |
145 | pgprot2cachemode(__pgprot(prot_val)), |
146 | pgprot2cachemode(__pgprot(new_prot_val)))) { | |
279e669b | 147 | printk(KERN_ERR |
6997ab49 | 148 | "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", |
4c8337ac RD |
149 | (unsigned long long)phys_addr, |
150 | (unsigned long long)(phys_addr + size), | |
6997ab49 | 151 | prot_val, new_prot_val); |
de2a47cf | 152 | goto err_free_memtype; |
d7677d40 | 153 | } |
154 | prot_val = new_prot_val; | |
155 | } | |
156 | ||
3a96ce8c | 157 | switch (prot_val) { |
158 | case _PAGE_CACHE_UC: | |
d806e5ee | 159 | default: |
be43d728 | 160 | prot = PAGE_KERNEL_IO_NOCACHE; |
d806e5ee | 161 | break; |
de33c442 | 162 | case _PAGE_CACHE_UC_MINUS: |
be43d728 | 163 | prot = PAGE_KERNEL_IO_UC_MINUS; |
de33c442 | 164 | break; |
b310f381 | 165 | case _PAGE_CACHE_WC: |
be43d728 | 166 | prot = PAGE_KERNEL_IO_WC; |
b310f381 | 167 | break; |
3a96ce8c | 168 | case _PAGE_CACHE_WB: |
be43d728 | 169 | prot = PAGE_KERNEL_IO; |
d806e5ee TG |
170 | break; |
171 | } | |
a148ecfd | 172 | |
1da177e4 LT |
173 | /* |
174 | * Ok, go for it.. | |
175 | */ | |
23016969 | 176 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
1da177e4 | 177 | if (!area) |
de2a47cf | 178 | goto err_free_memtype; |
1da177e4 | 179 | area->phys_addr = phys_addr; |
e66aadbe | 180 | vaddr = (unsigned long) area->addr; |
43a432b1 | 181 | |
de2a47cf XF |
182 | if (kernel_map_sync_memtype(phys_addr, size, prot_val)) |
183 | goto err_free_area; | |
e9332cac | 184 | |
de2a47cf XF |
185 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) |
186 | goto err_free_area; | |
e9332cac | 187 | |
d61fc448 | 188 | ret_addr = (void __iomem *) (vaddr + offset); |
87e547fe | 189 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); |
d61fc448 | 190 | |
c7a7b814 TG |
191 | /* |
192 | * Check if the request spans more than any BAR in the iomem resource | |
193 | * tree. | |
194 | */ | |
195 | WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), | |
196 | KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); | |
197 | ||
d61fc448 | 198 | return ret_addr; |
de2a47cf XF |
199 | err_free_area: |
200 | free_vm_area(area); | |
201 | err_free_memtype: | |
202 | free_memtype(phys_addr, phys_addr + size); | |
203 | return NULL; | |
1da177e4 | 204 | } |
1da177e4 LT |
205 | |
206 | /** | |
207 | * ioremap_nocache - map bus memory into CPU space | |
9efc31b8 | 208 | * @phys_addr: bus address of the memory |
1da177e4 LT |
209 | * @size: size of the resource to map |
210 | * | |
211 | * ioremap_nocache performs a platform specific sequence of operations to | |
212 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
213 | * writew/writel functions and the other mmio helpers. The returned | |
214 | * address is not guaranteed to be usable directly as a virtual | |
91eebf40 | 215 | * address. |
1da177e4 LT |
216 | * |
217 | * This version of ioremap ensures that the memory is marked uncachable | |
218 | * on the CPU as well as honouring existing caching rules from things like | |
91eebf40 | 219 | * the PCI bus. Note that there are other caches and buffers on many |
1da177e4 LT |
220 | * busses. In particular driver authors should read up on PCI writes |
221 | * | |
222 | * It's useful if some control registers are in such an area and | |
223 | * write combining or read caching is not desirable: | |
91eebf40 | 224 | * |
1da177e4 LT |
225 | * Must be freed with iounmap. |
226 | */ | |
b9e76a00 | 227 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) |
1da177e4 | 228 | { |
de33c442 SS |
229 | /* |
230 | * Ideally, this should be: | |
499f8f84 | 231 | * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; |
de33c442 SS |
232 | * |
233 | * Till we fix all X drivers to use ioremap_wc(), we will use | |
234 | * UC MINUS. | |
235 | */ | |
236 | unsigned long val = _PAGE_CACHE_UC_MINUS; | |
237 | ||
238 | return __ioremap_caller(phys_addr, size, val, | |
23016969 | 239 | __builtin_return_address(0)); |
1da177e4 | 240 | } |
129f6946 | 241 | EXPORT_SYMBOL(ioremap_nocache); |
1da177e4 | 242 | |
b310f381 | 243 | /** |
244 | * ioremap_wc - map memory into CPU space write combined | |
9efc31b8 | 245 | * @phys_addr: bus address of the memory |
b310f381 | 246 | * @size: size of the resource to map |
247 | * | |
248 | * This version of ioremap ensures that the memory is marked write combining. | |
249 | * Write combining allows faster writes to some hardware devices. | |
250 | * | |
251 | * Must be freed with iounmap. | |
252 | */ | |
d639bab8 | 253 | void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) |
b310f381 | 254 | { |
499f8f84 | 255 | if (pat_enabled) |
23016969 CL |
256 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, |
257 | __builtin_return_address(0)); | |
b310f381 | 258 | else |
259 | return ioremap_nocache(phys_addr, size); | |
260 | } | |
261 | EXPORT_SYMBOL(ioremap_wc); | |
262 | ||
b9e76a00 | 263 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
5f868152 | 264 | { |
23016969 CL |
265 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, |
266 | __builtin_return_address(0)); | |
5f868152 TG |
267 | } |
268 | EXPORT_SYMBOL(ioremap_cache); | |
269 | ||
28b2ee20 RR |
270 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
271 | unsigned long prot_val) | |
272 | { | |
273 | return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), | |
274 | __builtin_return_address(0)); | |
275 | } | |
276 | EXPORT_SYMBOL(ioremap_prot); | |
277 | ||
bf5421c3 AK |
278 | /** |
279 | * iounmap - Free a IO remapping | |
280 | * @addr: virtual address from ioremap_* | |
281 | * | |
282 | * Caller must ensure there is only one unmapping for the same pointer. | |
283 | */ | |
1da177e4 LT |
284 | void iounmap(volatile void __iomem *addr) |
285 | { | |
bf5421c3 | 286 | struct vm_struct *p, *o; |
c23a4e96 AM |
287 | |
288 | if ((void __force *)addr <= high_memory) | |
1da177e4 LT |
289 | return; |
290 | ||
291 | /* | |
292 | * __ioremap special-cases the PCI/ISA range by not instantiating a | |
293 | * vm_area and by simply returning an address into the kernel mapping | |
294 | * of ISA space. So handle that here. | |
295 | */ | |
6e92a5a6 TG |
296 | if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && |
297 | (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) | |
1da177e4 LT |
298 | return; |
299 | ||
91eebf40 TG |
300 | addr = (volatile void __iomem *) |
301 | (PAGE_MASK & (unsigned long __force)addr); | |
bf5421c3 | 302 | |
d61fc448 PP |
303 | mmiotrace_iounmap(addr); |
304 | ||
bf5421c3 AK |
305 | /* Use the vm area unlocked, assuming the caller |
306 | ensures there isn't another iounmap for the same address | |
307 | in parallel. Reuse of the virtual address is prevented by | |
308 | leaving it in the global lists until we're done with it. | |
309 | cpa takes care of the direct mappings. */ | |
ef932473 | 310 | p = find_vm_area((void __force *)addr); |
bf5421c3 AK |
311 | |
312 | if (!p) { | |
91eebf40 | 313 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
c23a4e96 | 314 | dump_stack(); |
bf5421c3 | 315 | return; |
1da177e4 LT |
316 | } |
317 | ||
d7677d40 | 318 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); |
319 | ||
bf5421c3 | 320 | /* Finally remove it */ |
6e92a5a6 | 321 | o = remove_vm_area((void __force *)addr); |
bf5421c3 | 322 | BUG_ON(p != o || o == NULL); |
91eebf40 | 323 | kfree(p); |
1da177e4 | 324 | } |
129f6946 | 325 | EXPORT_SYMBOL(iounmap); |
1da177e4 | 326 | |
e045fb2a | 327 | /* |
328 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
329 | * access | |
330 | */ | |
331 | void *xlate_dev_mem_ptr(unsigned long phys) | |
332 | { | |
333 | void *addr; | |
334 | unsigned long start = phys & PAGE_MASK; | |
335 | ||
336 | /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ | |
337 | if (page_is_ram(start >> PAGE_SHIFT)) | |
338 | return __va(phys); | |
339 | ||
2fb8f4e6 | 340 | addr = (void __force *)ioremap_cache(start, PAGE_SIZE); |
e045fb2a | 341 | if (addr) |
342 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); | |
343 | ||
344 | return addr; | |
345 | } | |
346 | ||
347 | void unxlate_dev_mem_ptr(unsigned long phys, void *addr) | |
348 | { | |
349 | if (page_is_ram(phys >> PAGE_SHIFT)) | |
350 | return; | |
351 | ||
352 | iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); | |
353 | return; | |
354 | } | |
355 | ||
45c7b28f | 356 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; |
0947b2f3 | 357 | |
551889a6 | 358 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
0947b2f3 | 359 | { |
37cc8d7f JF |
360 | /* Don't assume we're using swapper_pg_dir at this point */ |
361 | pgd_t *base = __va(read_cr3()); | |
362 | pgd_t *pgd = &base[pgd_index(addr)]; | |
551889a6 IC |
363 | pud_t *pud = pud_offset(pgd, addr); |
364 | pmd_t *pmd = pmd_offset(pud, addr); | |
365 | ||
366 | return pmd; | |
0947b2f3 HY |
367 | } |
368 | ||
551889a6 | 369 | static inline pte_t * __init early_ioremap_pte(unsigned long addr) |
0947b2f3 | 370 | { |
551889a6 | 371 | return &bm_pte[pte_index(addr)]; |
0947b2f3 HY |
372 | } |
373 | ||
fef5ba79 JF |
374 | bool __init is_early_ioremap_ptep(pte_t *ptep) |
375 | { | |
376 | return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; | |
377 | } | |
378 | ||
beacfaac | 379 | void __init early_ioremap_init(void) |
0947b2f3 | 380 | { |
551889a6 | 381 | pmd_t *pmd; |
0947b2f3 | 382 | |
73159fdc AL |
383 | #ifdef CONFIG_X86_64 |
384 | BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); | |
385 | #else | |
386 | WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); | |
387 | #endif | |
388 | ||
5b7c73e0 | 389 | early_ioremap_setup(); |
8827247f | 390 | |
551889a6 | 391 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
45c7b28f JF |
392 | memset(bm_pte, 0, sizeof(bm_pte)); |
393 | pmd_populate_kernel(&init_mm, pmd, bm_pte); | |
551889a6 | 394 | |
0e3a9549 | 395 | /* |
551889a6 | 396 | * The boot-ioremap range spans multiple pmds, for which |
0e3a9549 IM |
397 | * we are not prepared: |
398 | */ | |
499a5f1e JB |
399 | #define __FIXADDR_TOP (-PAGE_SIZE) |
400 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) | |
401 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); | |
402 | #undef __FIXADDR_TOP | |
551889a6 | 403 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
0e3a9549 | 404 | WARN_ON(1); |
551889a6 IC |
405 | printk(KERN_WARNING "pmd %p != %p\n", |
406 | pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); | |
91eebf40 | 407 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
551889a6 | 408 | fix_to_virt(FIX_BTMAP_BEGIN)); |
91eebf40 | 409 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", |
551889a6 | 410 | fix_to_virt(FIX_BTMAP_END)); |
91eebf40 TG |
411 | |
412 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | |
413 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", | |
414 | FIX_BTMAP_BEGIN); | |
0e3a9549 | 415 | } |
0947b2f3 HY |
416 | } |
417 | ||
5b7c73e0 MS |
418 | void __init __early_set_fixmap(enum fixed_addresses idx, |
419 | phys_addr_t phys, pgprot_t flags) | |
0947b2f3 | 420 | { |
551889a6 IC |
421 | unsigned long addr = __fix_to_virt(idx); |
422 | pte_t *pte; | |
0947b2f3 HY |
423 | |
424 | if (idx >= __end_of_fixed_addresses) { | |
425 | BUG(); | |
426 | return; | |
427 | } | |
beacfaac | 428 | pte = early_ioremap_pte(addr); |
4583ed51 | 429 | |
0947b2f3 | 430 | if (pgprot_val(flags)) |
551889a6 | 431 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
0947b2f3 | 432 | else |
4f9c11dd | 433 | pte_clear(&init_mm, addr, pte); |
0947b2f3 HY |
434 | __flush_tlb_one(addr); |
435 | } |