Merge branch 'auto-ftrace-next' into tracing/for-linus
[deliverable/linux.git] / arch / x86 / mm / ioremap.c
1 /*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #ifdef CONFIG_X86_64
26
27 unsigned long __phys_addr(unsigned long x)
28 {
29 if (x >= __START_KERNEL_map)
30 return x - __START_KERNEL_map + phys_base;
31 return x - PAGE_OFFSET;
32 }
33 EXPORT_SYMBOL(__phys_addr);
34
35 static inline int phys_addr_valid(unsigned long addr)
36 {
37 return addr < (1UL << boot_cpu_data.x86_phys_bits);
38 }
39
40 #else
41
42 static inline int phys_addr_valid(unsigned long addr)
43 {
44 return 1;
45 }
46
47 #endif
48
49 int page_is_ram(unsigned long pagenr)
50 {
51 resource_size_t addr, end;
52 int i;
53
54 /*
55 * A special case is the first 4Kb of memory;
56 * This is a BIOS owned area, not kernel ram, but generally
57 * not listed as such in the E820 table.
58 */
59 if (pagenr == 0)
60 return 0;
61
62 /*
63 * Second special case: Some BIOSen report the PC BIOS
64 * area (640->1Mb) as ram even though it is not.
65 */
66 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
67 pagenr < (BIOS_END >> PAGE_SHIFT))
68 return 0;
69
70 for (i = 0; i < e820.nr_map; i++) {
71 /*
72 * Not usable memory:
73 */
74 if (e820.map[i].type != E820_RAM)
75 continue;
76 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
77 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
78
79
80 if ((pagenr >= addr) && (pagenr < end))
81 return 1;
82 }
83 return 0;
84 }
85
86 /*
87 * Fix up the linear direct mapping of the kernel to avoid cache attribute
88 * conflicts.
89 */
90 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
91 unsigned long prot_val)
92 {
93 unsigned long nrpages = size >> PAGE_SHIFT;
94 int err;
95
96 switch (prot_val) {
97 case _PAGE_CACHE_UC:
98 default:
99 err = _set_memory_uc(vaddr, nrpages);
100 break;
101 case _PAGE_CACHE_WC:
102 err = _set_memory_wc(vaddr, nrpages);
103 break;
104 case _PAGE_CACHE_WB:
105 err = _set_memory_wb(vaddr, nrpages);
106 break;
107 }
108
109 return err;
110 }
111
112 /*
113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
115 * directly.
116 *
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
120 */
121 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
122 unsigned long size, unsigned long prot_val, void *caller)
123 {
124 unsigned long pfn, offset, vaddr;
125 resource_size_t last_addr;
126 const resource_size_t unaligned_phys_addr = phys_addr;
127 const unsigned long unaligned_size = size;
128 struct vm_struct *area;
129 unsigned long new_prot_val;
130 pgprot_t prot;
131 int retval;
132 void __iomem *ret_addr;
133
134 /* Don't allow wraparound or zero size */
135 last_addr = phys_addr + size - 1;
136 if (!size || last_addr < phys_addr)
137 return NULL;
138
139 if (!phys_addr_valid(phys_addr)) {
140 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
141 (unsigned long long)phys_addr);
142 WARN_ON_ONCE(1);
143 return NULL;
144 }
145
146 /*
147 * Don't remap the low PCI/ISA area, it's always mapped..
148 */
149 if (is_ISA_range(phys_addr, last_addr))
150 return (__force void __iomem *)phys_to_virt(phys_addr);
151
152 /*
153 * Don't allow anybody to remap normal RAM that we're using..
154 */
155 for (pfn = phys_addr >> PAGE_SHIFT;
156 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
157 pfn++) {
158
159 int is_ram = page_is_ram(pfn);
160
161 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
162 return NULL;
163 WARN_ON_ONCE(is_ram);
164 }
165
166 /*
167 * Mappings have to be page-aligned
168 */
169 offset = phys_addr & ~PAGE_MASK;
170 phys_addr &= PAGE_MASK;
171 size = PAGE_ALIGN(last_addr+1) - phys_addr;
172
173 retval = reserve_memtype(phys_addr, phys_addr + size,
174 prot_val, &new_prot_val);
175 if (retval) {
176 pr_debug("Warning: reserve_memtype returned %d\n", retval);
177 return NULL;
178 }
179
180 if (prot_val != new_prot_val) {
181 /*
182 * Do not fallback to certain memory types with certain
183 * requested type:
184 * - request is uc-, return cannot be write-back
185 * - request is uc-, return cannot be write-combine
186 * - request is write-combine, return cannot be write-back
187 */
188 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
189 (new_prot_val == _PAGE_CACHE_WB ||
190 new_prot_val == _PAGE_CACHE_WC)) ||
191 (prot_val == _PAGE_CACHE_WC &&
192 new_prot_val == _PAGE_CACHE_WB)) {
193 pr_debug(
194 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
195 (unsigned long long)phys_addr,
196 (unsigned long long)(phys_addr + size),
197 prot_val, new_prot_val);
198 free_memtype(phys_addr, phys_addr + size);
199 return NULL;
200 }
201 prot_val = new_prot_val;
202 }
203
204 switch (prot_val) {
205 case _PAGE_CACHE_UC:
206 default:
207 prot = PAGE_KERNEL_NOCACHE;
208 break;
209 case _PAGE_CACHE_UC_MINUS:
210 prot = PAGE_KERNEL_UC_MINUS;
211 break;
212 case _PAGE_CACHE_WC:
213 prot = PAGE_KERNEL_WC;
214 break;
215 case _PAGE_CACHE_WB:
216 prot = PAGE_KERNEL;
217 break;
218 }
219
220 /*
221 * Ok, go for it..
222 */
223 area = get_vm_area_caller(size, VM_IOREMAP, caller);
224 if (!area)
225 return NULL;
226 area->phys_addr = phys_addr;
227 vaddr = (unsigned long) area->addr;
228 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
229 free_memtype(phys_addr, phys_addr + size);
230 free_vm_area(area);
231 return NULL;
232 }
233
234 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
235 free_memtype(phys_addr, phys_addr + size);
236 vunmap(area->addr);
237 return NULL;
238 }
239
240 ret_addr = (void __iomem *) (vaddr + offset);
241 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
242
243 return ret_addr;
244 }
245
246 /**
247 * ioremap_nocache - map bus memory into CPU space
248 * @offset: bus address of the memory
249 * @size: size of the resource to map
250 *
251 * ioremap_nocache performs a platform specific sequence of operations to
252 * make bus memory CPU accessible via the readb/readw/readl/writeb/
253 * writew/writel functions and the other mmio helpers. The returned
254 * address is not guaranteed to be usable directly as a virtual
255 * address.
256 *
257 * This version of ioremap ensures that the memory is marked uncachable
258 * on the CPU as well as honouring existing caching rules from things like
259 * the PCI bus. Note that there are other caches and buffers on many
260 * busses. In particular driver authors should read up on PCI writes
261 *
262 * It's useful if some control registers are in such an area and
263 * write combining or read caching is not desirable:
264 *
265 * Must be freed with iounmap.
266 */
267 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
268 {
269 /*
270 * Ideally, this should be:
271 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
272 *
273 * Till we fix all X drivers to use ioremap_wc(), we will use
274 * UC MINUS.
275 */
276 unsigned long val = _PAGE_CACHE_UC_MINUS;
277
278 return __ioremap_caller(phys_addr, size, val,
279 __builtin_return_address(0));
280 }
281 EXPORT_SYMBOL(ioremap_nocache);
282
283 /**
284 * ioremap_wc - map memory into CPU space write combined
285 * @offset: bus address of the memory
286 * @size: size of the resource to map
287 *
288 * This version of ioremap ensures that the memory is marked write combining.
289 * Write combining allows faster writes to some hardware devices.
290 *
291 * Must be freed with iounmap.
292 */
293 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
294 {
295 if (pat_enabled)
296 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
297 __builtin_return_address(0));
298 else
299 return ioremap_nocache(phys_addr, size);
300 }
301 EXPORT_SYMBOL(ioremap_wc);
302
303 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
304 {
305 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
306 __builtin_return_address(0));
307 }
308 EXPORT_SYMBOL(ioremap_cache);
309
310 static void __iomem *ioremap_default(resource_size_t phys_addr,
311 unsigned long size)
312 {
313 unsigned long flags;
314 void *ret;
315 int err;
316
317 /*
318 * - WB for WB-able memory and no other conflicting mappings
319 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
320 * - Inherit from confliting mappings otherwise
321 */
322 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
323 if (err < 0)
324 return NULL;
325
326 ret = (void *) __ioremap_caller(phys_addr, size, flags,
327 __builtin_return_address(0));
328
329 free_memtype(phys_addr, phys_addr + size);
330 return (void __iomem *)ret;
331 }
332
333 /**
334 * iounmap - Free a IO remapping
335 * @addr: virtual address from ioremap_*
336 *
337 * Caller must ensure there is only one unmapping for the same pointer.
338 */
339 void iounmap(volatile void __iomem *addr)
340 {
341 struct vm_struct *p, *o;
342
343 if ((void __force *)addr <= high_memory)
344 return;
345
346 /*
347 * __ioremap special-cases the PCI/ISA range by not instantiating a
348 * vm_area and by simply returning an address into the kernel mapping
349 * of ISA space. So handle that here.
350 */
351 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
352 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
353 return;
354
355 addr = (volatile void __iomem *)
356 (PAGE_MASK & (unsigned long __force)addr);
357
358 mmiotrace_iounmap(addr);
359
360 /* Use the vm area unlocked, assuming the caller
361 ensures there isn't another iounmap for the same address
362 in parallel. Reuse of the virtual address is prevented by
363 leaving it in the global lists until we're done with it.
364 cpa takes care of the direct mappings. */
365 read_lock(&vmlist_lock);
366 for (p = vmlist; p; p = p->next) {
367 if (p->addr == (void __force *)addr)
368 break;
369 }
370 read_unlock(&vmlist_lock);
371
372 if (!p) {
373 printk(KERN_ERR "iounmap: bad address %p\n", addr);
374 dump_stack();
375 return;
376 }
377
378 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
379
380 /* Finally remove it */
381 o = remove_vm_area((void __force *)addr);
382 BUG_ON(p != o || o == NULL);
383 kfree(p);
384 }
385 EXPORT_SYMBOL(iounmap);
386
387 /*
388 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
389 * access
390 */
391 void *xlate_dev_mem_ptr(unsigned long phys)
392 {
393 void *addr;
394 unsigned long start = phys & PAGE_MASK;
395
396 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
397 if (page_is_ram(start >> PAGE_SHIFT))
398 return __va(phys);
399
400 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
401 if (addr)
402 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
403
404 return addr;
405 }
406
407 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
408 {
409 if (page_is_ram(phys >> PAGE_SHIFT))
410 return;
411
412 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
413 return;
414 }
415
416 int __initdata early_ioremap_debug;
417
418 static int __init early_ioremap_debug_setup(char *str)
419 {
420 early_ioremap_debug = 1;
421
422 return 0;
423 }
424 early_param("early_ioremap_debug", early_ioremap_debug_setup);
425
426 static __initdata int after_paging_init;
427 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
428
429 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
430 {
431 /* Don't assume we're using swapper_pg_dir at this point */
432 pgd_t *base = __va(read_cr3());
433 pgd_t *pgd = &base[pgd_index(addr)];
434 pud_t *pud = pud_offset(pgd, addr);
435 pmd_t *pmd = pmd_offset(pud, addr);
436
437 return pmd;
438 }
439
440 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
441 {
442 return &bm_pte[pte_index(addr)];
443 }
444
445 void __init early_ioremap_init(void)
446 {
447 pmd_t *pmd;
448
449 if (early_ioremap_debug)
450 printk(KERN_INFO "early_ioremap_init()\n");
451
452 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
453 memset(bm_pte, 0, sizeof(bm_pte));
454 pmd_populate_kernel(&init_mm, pmd, bm_pte);
455
456 /*
457 * The boot-ioremap range spans multiple pmds, for which
458 * we are not prepared:
459 */
460 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
461 WARN_ON(1);
462 printk(KERN_WARNING "pmd %p != %p\n",
463 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
464 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
465 fix_to_virt(FIX_BTMAP_BEGIN));
466 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
467 fix_to_virt(FIX_BTMAP_END));
468
469 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
470 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
471 FIX_BTMAP_BEGIN);
472 }
473 }
474
475 void __init early_ioremap_clear(void)
476 {
477 pmd_t *pmd;
478
479 if (early_ioremap_debug)
480 printk(KERN_INFO "early_ioremap_clear()\n");
481
482 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
483 pmd_clear(pmd);
484 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
485 __flush_tlb_all();
486 }
487
488 void __init early_ioremap_reset(void)
489 {
490 enum fixed_addresses idx;
491 unsigned long addr, phys;
492 pte_t *pte;
493
494 after_paging_init = 1;
495 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
496 addr = fix_to_virt(idx);
497 pte = early_ioremap_pte(addr);
498 if (pte_present(*pte)) {
499 phys = pte_val(*pte) & PAGE_MASK;
500 set_fixmap(idx, phys);
501 }
502 }
503 }
504
505 static void __init __early_set_fixmap(enum fixed_addresses idx,
506 unsigned long phys, pgprot_t flags)
507 {
508 unsigned long addr = __fix_to_virt(idx);
509 pte_t *pte;
510
511 if (idx >= __end_of_fixed_addresses) {
512 BUG();
513 return;
514 }
515 pte = early_ioremap_pte(addr);
516
517 if (pgprot_val(flags))
518 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
519 else
520 pte_clear(&init_mm, addr, pte);
521 __flush_tlb_one(addr);
522 }
523
524 static inline void __init early_set_fixmap(enum fixed_addresses idx,
525 unsigned long phys)
526 {
527 if (after_paging_init)
528 set_fixmap(idx, phys);
529 else
530 __early_set_fixmap(idx, phys, PAGE_KERNEL);
531 }
532
533 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
534 {
535 if (after_paging_init)
536 clear_fixmap(idx);
537 else
538 __early_set_fixmap(idx, 0, __pgprot(0));
539 }
540
541
542 int __initdata early_ioremap_nested;
543
544 static int __init check_early_ioremap_leak(void)
545 {
546 if (!early_ioremap_nested)
547 return 0;
548
549 printk(KERN_WARNING
550 "Debug warning: early ioremap leak of %d areas detected.\n",
551 early_ioremap_nested);
552 printk(KERN_WARNING
553 "please boot with early_ioremap_debug and report the dmesg.\n");
554 WARN_ON(1);
555
556 return 1;
557 }
558 late_initcall(check_early_ioremap_leak);
559
560 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
561 {
562 unsigned long offset, last_addr;
563 unsigned int nrpages, nesting;
564 enum fixed_addresses idx0, idx;
565
566 WARN_ON(system_state != SYSTEM_BOOTING);
567
568 nesting = early_ioremap_nested;
569 if (early_ioremap_debug) {
570 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
571 phys_addr, size, nesting);
572 dump_stack();
573 }
574
575 /* Don't allow wraparound or zero size */
576 last_addr = phys_addr + size - 1;
577 if (!size || last_addr < phys_addr) {
578 WARN_ON(1);
579 return NULL;
580 }
581
582 if (nesting >= FIX_BTMAPS_NESTING) {
583 WARN_ON(1);
584 return NULL;
585 }
586 early_ioremap_nested++;
587 /*
588 * Mappings have to be page-aligned
589 */
590 offset = phys_addr & ~PAGE_MASK;
591 phys_addr &= PAGE_MASK;
592 size = PAGE_ALIGN(last_addr) - phys_addr;
593
594 /*
595 * Mappings have to fit in the FIX_BTMAP area.
596 */
597 nrpages = size >> PAGE_SHIFT;
598 if (nrpages > NR_FIX_BTMAPS) {
599 WARN_ON(1);
600 return NULL;
601 }
602
603 /*
604 * Ok, go for it..
605 */
606 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
607 idx = idx0;
608 while (nrpages > 0) {
609 early_set_fixmap(idx, phys_addr);
610 phys_addr += PAGE_SIZE;
611 --idx;
612 --nrpages;
613 }
614 if (early_ioremap_debug)
615 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
616
617 return (void *) (offset + fix_to_virt(idx0));
618 }
619
620 void __init early_iounmap(void *addr, unsigned long size)
621 {
622 unsigned long virt_addr;
623 unsigned long offset;
624 unsigned int nrpages;
625 enum fixed_addresses idx;
626 int nesting;
627
628 nesting = --early_ioremap_nested;
629 if (WARN_ON(nesting < 0))
630 return;
631
632 if (early_ioremap_debug) {
633 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
634 size, nesting);
635 dump_stack();
636 }
637
638 virt_addr = (unsigned long)addr;
639 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
640 WARN_ON(1);
641 return;
642 }
643 offset = virt_addr & ~PAGE_MASK;
644 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
645
646 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
647 while (nrpages > 0) {
648 early_clear_fixmap(idx);
649 --idx;
650 --nrpages;
651 }
652 }
653
654 void __this_fixmap_does_not_exist(void)
655 {
656 WARN_ON(1);
657 }
This page took 0.051632 seconds and 5 git commands to generate.