Merge branch 'linus' into x86/spinlocks
[deliverable/linux.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
240d3a7c
TG
25#ifdef CONFIG_X86_64
26
27unsigned long __phys_addr(unsigned long x)
28{
29 if (x >= __START_KERNEL_map)
30 return x - __START_KERNEL_map + phys_base;
31 return x - PAGE_OFFSET;
32}
33EXPORT_SYMBOL(__phys_addr);
34
e3100c82
TG
35static inline int phys_addr_valid(unsigned long addr)
36{
37 return addr < (1UL << boot_cpu_data.x86_phys_bits);
38}
39
40#else
41
42static inline int phys_addr_valid(unsigned long addr)
43{
44 return 1;
45}
46
240d3a7c
TG
47#endif
48
5f5192b9
TG
49int page_is_ram(unsigned long pagenr)
50{
756a6c68 51 resource_size_t addr, end;
5f5192b9
TG
52 int i;
53
d8a9e6a5
AV
54 /*
55 * A special case is the first 4Kb of memory;
56 * This is a BIOS owned area, not kernel ram, but generally
57 * not listed as such in the E820 table.
58 */
59 if (pagenr == 0)
60 return 0;
61
156fbc3f
AV
62 /*
63 * Second special case: Some BIOSen report the PC BIOS
64 * area (640->1Mb) as ram even though it is not.
65 */
66 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
67 pagenr < (BIOS_END >> PAGE_SHIFT))
68 return 0;
d8a9e6a5 69
5f5192b9
TG
70 for (i = 0; i < e820.nr_map; i++) {
71 /*
72 * Not usable memory:
73 */
74 if (e820.map[i].type != E820_RAM)
75 continue;
5f5192b9
TG
76 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
77 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95 78
950f9d95 79
5f5192b9
TG
80 if ((pagenr >= addr) && (pagenr < end))
81 return 1;
82 }
83 return 0;
84}
85
9542ada8
SS
86int pagerange_is_ram(unsigned long start, unsigned long end)
87{
88 int ram_page = 0, not_rampage = 0;
89 unsigned long page_nr;
90
91 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
92 ++page_nr) {
93 if (page_is_ram(page_nr))
94 ram_page = 1;
95 else
96 not_rampage = 1;
97
98 if (ram_page == not_rampage)
99 return -1;
100 }
101
102 return ram_page;
103}
104
e9332cac
TG
105/*
106 * Fix up the linear direct mapping of the kernel to avoid cache attribute
107 * conflicts.
108 */
3a96ce8c 109int ioremap_change_attr(unsigned long vaddr, unsigned long size,
110 unsigned long prot_val)
e9332cac 111{
d806e5ee 112 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 113 int err;
e9332cac 114
3a96ce8c 115 switch (prot_val) {
116 case _PAGE_CACHE_UC:
d806e5ee 117 default:
1219333d 118 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 119 break;
b310f381 120 case _PAGE_CACHE_WC:
121 err = _set_memory_wc(vaddr, nrpages);
122 break;
3a96ce8c 123 case _PAGE_CACHE_WB:
1219333d 124 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
125 break;
126 }
e9332cac
TG
127
128 return err;
129}
130
1da177e4
LT
131/*
132 * Remap an arbitrary physical address space into the kernel virtual
133 * address space. Needed when the kernel wants to access high addresses
134 * directly.
135 *
136 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
137 * have to convert them into an offset in a page-aligned mapping, but the
138 * caller shouldn't need to know that small detail.
139 */
23016969
CL
140static void __iomem *__ioremap_caller(resource_size_t phys_addr,
141 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 142{
756a6c68
IM
143 unsigned long pfn, offset, vaddr;
144 resource_size_t last_addr;
87e547fe
PP
145 const resource_size_t unaligned_phys_addr = phys_addr;
146 const unsigned long unaligned_size = size;
91eebf40 147 struct vm_struct *area;
d7677d40 148 unsigned long new_prot_val;
d806e5ee 149 pgprot_t prot;
dee7cbb2 150 int retval;
d61fc448 151 void __iomem *ret_addr;
1da177e4
LT
152
153 /* Don't allow wraparound or zero size */
154 last_addr = phys_addr + size - 1;
155 if (!size || last_addr < phys_addr)
156 return NULL;
157
e3100c82 158 if (!phys_addr_valid(phys_addr)) {
6997ab49 159 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 160 (unsigned long long)phys_addr);
e3100c82
TG
161 WARN_ON_ONCE(1);
162 return NULL;
163 }
164
1da177e4
LT
165 /*
166 * Don't remap the low PCI/ISA area, it's always mapped..
167 */
bcc643dc 168 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 169 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
170
171 /*
172 * Don't allow anybody to remap normal RAM that we're using..
173 */
cb8ab687
AS
174 for (pfn = phys_addr >> PAGE_SHIFT;
175 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
176 pfn++) {
bdd3cee2 177
ba748d22
IM
178 int is_ram = page_is_ram(pfn);
179
180 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 181 return NULL;
ba748d22 182 WARN_ON_ONCE(is_ram);
1da177e4
LT
183 }
184
d7677d40 185 /*
186 * Mappings have to be page-aligned
187 */
188 offset = phys_addr & ~PAGE_MASK;
189 phys_addr &= PAGE_MASK;
190 size = PAGE_ALIGN(last_addr+1) - phys_addr;
191
e213e877 192 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
dee7cbb2
VP
193 prot_val, &new_prot_val);
194 if (retval) {
b450e5e8 195 pr_debug("Warning: reserve_memtype returned %d\n", retval);
dee7cbb2
VP
196 return NULL;
197 }
198
199 if (prot_val != new_prot_val) {
d7677d40 200 /*
201 * Do not fallback to certain memory types with certain
202 * requested type:
de33c442
SS
203 * - request is uc-, return cannot be write-back
204 * - request is uc-, return cannot be write-combine
b310f381 205 * - request is write-combine, return cannot be write-back
d7677d40 206 */
de33c442 207 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
b310f381 208 (new_prot_val == _PAGE_CACHE_WB ||
209 new_prot_val == _PAGE_CACHE_WC)) ||
210 (prot_val == _PAGE_CACHE_WC &&
d7677d40 211 new_prot_val == _PAGE_CACHE_WB)) {
b450e5e8 212 pr_debug(
6997ab49 213 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
214 (unsigned long long)phys_addr,
215 (unsigned long long)(phys_addr + size),
6997ab49 216 prot_val, new_prot_val);
d7677d40 217 free_memtype(phys_addr, phys_addr + size);
218 return NULL;
219 }
220 prot_val = new_prot_val;
221 }
222
3a96ce8c 223 switch (prot_val) {
224 case _PAGE_CACHE_UC:
d806e5ee 225 default:
55c62682 226 prot = PAGE_KERNEL_NOCACHE;
d806e5ee 227 break;
de33c442
SS
228 case _PAGE_CACHE_UC_MINUS:
229 prot = PAGE_KERNEL_UC_MINUS;
230 break;
b310f381 231 case _PAGE_CACHE_WC:
232 prot = PAGE_KERNEL_WC;
233 break;
3a96ce8c 234 case _PAGE_CACHE_WB:
d806e5ee
TG
235 prot = PAGE_KERNEL;
236 break;
237 }
a148ecfd 238
1da177e4
LT
239 /*
240 * Ok, go for it..
241 */
23016969 242 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4
LT
243 if (!area)
244 return NULL;
245 area->phys_addr = phys_addr;
e66aadbe
TG
246 vaddr = (unsigned long) area->addr;
247 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
d7677d40 248 free_memtype(phys_addr, phys_addr + size);
b16bf712 249 free_vm_area(area);
1da177e4
LT
250 return NULL;
251 }
e9332cac 252
3a96ce8c 253 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
d7677d40 254 free_memtype(phys_addr, phys_addr + size);
e66aadbe 255 vunmap(area->addr);
e9332cac
TG
256 return NULL;
257 }
258
d61fc448 259 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 260 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448
PP
261
262 return ret_addr;
1da177e4 263}
1da177e4
LT
264
265/**
266 * ioremap_nocache - map bus memory into CPU space
267 * @offset: bus address of the memory
268 * @size: size of the resource to map
269 *
270 * ioremap_nocache performs a platform specific sequence of operations to
271 * make bus memory CPU accessible via the readb/readw/readl/writeb/
272 * writew/writel functions and the other mmio helpers. The returned
273 * address is not guaranteed to be usable directly as a virtual
91eebf40 274 * address.
1da177e4
LT
275 *
276 * This version of ioremap ensures that the memory is marked uncachable
277 * on the CPU as well as honouring existing caching rules from things like
91eebf40 278 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
279 * busses. In particular driver authors should read up on PCI writes
280 *
281 * It's useful if some control registers are in such an area and
282 * write combining or read caching is not desirable:
91eebf40 283 *
1da177e4
LT
284 * Must be freed with iounmap.
285 */
b9e76a00 286void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 287{
de33c442
SS
288 /*
289 * Ideally, this should be:
499f8f84 290 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
de33c442
SS
291 *
292 * Till we fix all X drivers to use ioremap_wc(), we will use
293 * UC MINUS.
294 */
295 unsigned long val = _PAGE_CACHE_UC_MINUS;
296
297 return __ioremap_caller(phys_addr, size, val,
23016969 298 __builtin_return_address(0));
1da177e4 299}
129f6946 300EXPORT_SYMBOL(ioremap_nocache);
1da177e4 301
b310f381 302/**
303 * ioremap_wc - map memory into CPU space write combined
304 * @offset: bus address of the memory
305 * @size: size of the resource to map
306 *
307 * This version of ioremap ensures that the memory is marked write combining.
308 * Write combining allows faster writes to some hardware devices.
309 *
310 * Must be freed with iounmap.
311 */
312void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
313{
499f8f84 314 if (pat_enabled)
23016969
CL
315 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
316 __builtin_return_address(0));
b310f381 317 else
318 return ioremap_nocache(phys_addr, size);
319}
320EXPORT_SYMBOL(ioremap_wc);
321
b9e76a00 322void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 323{
23016969
CL
324 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
325 __builtin_return_address(0));
5f868152
TG
326}
327EXPORT_SYMBOL(ioremap_cache);
328
a361ee5c
VP
329static void __iomem *ioremap_default(resource_size_t phys_addr,
330 unsigned long size)
331{
332 unsigned long flags;
333 void *ret;
334 int err;
335
336 /*
337 * - WB for WB-able memory and no other conflicting mappings
338 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
339 * - Inherit from confliting mappings otherwise
340 */
341 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
342 if (err < 0)
343 return NULL;
344
345 ret = (void *) __ioremap_caller(phys_addr, size, flags,
346 __builtin_return_address(0));
347
348 free_memtype(phys_addr, phys_addr + size);
349 return (void __iomem *)ret;
350}
351
28b2ee20
RR
352void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
353 unsigned long prot_val)
354{
355 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
356 __builtin_return_address(0));
357}
358EXPORT_SYMBOL(ioremap_prot);
359
bf5421c3
AK
360/**
361 * iounmap - Free a IO remapping
362 * @addr: virtual address from ioremap_*
363 *
364 * Caller must ensure there is only one unmapping for the same pointer.
365 */
1da177e4
LT
366void iounmap(volatile void __iomem *addr)
367{
bf5421c3 368 struct vm_struct *p, *o;
c23a4e96
AM
369
370 if ((void __force *)addr <= high_memory)
1da177e4
LT
371 return;
372
373 /*
374 * __ioremap special-cases the PCI/ISA range by not instantiating a
375 * vm_area and by simply returning an address into the kernel mapping
376 * of ISA space. So handle that here.
377 */
6e92a5a6
TG
378 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
379 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
380 return;
381
91eebf40
TG
382 addr = (volatile void __iomem *)
383 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 384
d61fc448
PP
385 mmiotrace_iounmap(addr);
386
bf5421c3
AK
387 /* Use the vm area unlocked, assuming the caller
388 ensures there isn't another iounmap for the same address
389 in parallel. Reuse of the virtual address is prevented by
390 leaving it in the global lists until we're done with it.
391 cpa takes care of the direct mappings. */
392 read_lock(&vmlist_lock);
393 for (p = vmlist; p; p = p->next) {
6e92a5a6 394 if (p->addr == (void __force *)addr)
bf5421c3
AK
395 break;
396 }
397 read_unlock(&vmlist_lock);
398
399 if (!p) {
91eebf40 400 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 401 dump_stack();
bf5421c3 402 return;
1da177e4
LT
403 }
404
d7677d40 405 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
406
bf5421c3 407 /* Finally remove it */
6e92a5a6 408 o = remove_vm_area((void __force *)addr);
bf5421c3 409 BUG_ON(p != o || o == NULL);
91eebf40 410 kfree(p);
1da177e4 411}
129f6946 412EXPORT_SYMBOL(iounmap);
1da177e4 413
e045fb2a 414/*
415 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
416 * access
417 */
418void *xlate_dev_mem_ptr(unsigned long phys)
419{
420 void *addr;
421 unsigned long start = phys & PAGE_MASK;
422
423 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
424 if (page_is_ram(start >> PAGE_SHIFT))
425 return __va(phys);
426
ae94b807 427 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
e045fb2a 428 if (addr)
429 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
430
431 return addr;
432}
433
434void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
435{
436 if (page_is_ram(phys >> PAGE_SHIFT))
437 return;
438
439 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
440 return;
441}
442
4b6e9f27 443static int __initdata early_ioremap_debug;
d18d6d65
IM
444
445static int __init early_ioremap_debug_setup(char *str)
446{
447 early_ioremap_debug = 1;
448
793b24a2 449 return 0;
d18d6d65 450}
793b24a2 451early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 452
0947b2f3 453static __initdata int after_paging_init;
a7bf0bd5 454static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 455
551889a6 456static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 457{
37cc8d7f
JF
458 /* Don't assume we're using swapper_pg_dir at this point */
459 pgd_t *base = __va(read_cr3());
460 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
461 pud_t *pud = pud_offset(pgd, addr);
462 pmd_t *pmd = pmd_offset(pud, addr);
463
464 return pmd;
0947b2f3
HY
465}
466
551889a6 467static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 468{
551889a6 469 return &bm_pte[pte_index(addr)];
0947b2f3
HY
470}
471
beacfaac 472void __init early_ioremap_init(void)
0947b2f3 473{
551889a6 474 pmd_t *pmd;
0947b2f3 475
d18d6d65 476 if (early_ioremap_debug)
adafdf6a 477 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 478
551889a6 479 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3 480 memset(bm_pte, 0, sizeof(bm_pte));
b6fbb669 481 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 482
0e3a9549 483 /*
551889a6 484 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
485 * we are not prepared:
486 */
551889a6 487 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 488 WARN_ON(1);
551889a6
IC
489 printk(KERN_WARNING "pmd %p != %p\n",
490 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 491 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 492 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 493 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 494 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
495
496 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
497 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
498 FIX_BTMAP_BEGIN);
0e3a9549 499 }
0947b2f3
HY
500}
501
beacfaac 502void __init early_ioremap_clear(void)
0947b2f3 503{
551889a6 504 pmd_t *pmd;
0947b2f3 505
d18d6d65 506 if (early_ioremap_debug)
adafdf6a 507 printk(KERN_INFO "early_ioremap_clear()\n");
d18d6d65 508
551889a6
IC
509 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
510 pmd_clear(pmd);
6944a9c8 511 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
0947b2f3
HY
512 __flush_tlb_all();
513}
514
beacfaac 515void __init early_ioremap_reset(void)
0947b2f3
HY
516{
517 enum fixed_addresses idx;
551889a6
IC
518 unsigned long addr, phys;
519 pte_t *pte;
0947b2f3
HY
520
521 after_paging_init = 1;
64a8f852 522 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 523 addr = fix_to_virt(idx);
beacfaac 524 pte = early_ioremap_pte(addr);
551889a6
IC
525 if (pte_present(*pte)) {
526 phys = pte_val(*pte) & PAGE_MASK;
0947b2f3
HY
527 set_fixmap(idx, phys);
528 }
529 }
530}
531
beacfaac 532static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
533 unsigned long phys, pgprot_t flags)
534{
551889a6
IC
535 unsigned long addr = __fix_to_virt(idx);
536 pte_t *pte;
0947b2f3
HY
537
538 if (idx >= __end_of_fixed_addresses) {
539 BUG();
540 return;
541 }
beacfaac 542 pte = early_ioremap_pte(addr);
4583ed51 543
0947b2f3 544 if (pgprot_val(flags))
551889a6 545 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 546 else
4f9c11dd 547 pte_clear(&init_mm, addr, pte);
0947b2f3
HY
548 __flush_tlb_one(addr);
549}
550
beacfaac 551static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
552 unsigned long phys)
553{
554 if (after_paging_init)
555 set_fixmap(idx, phys);
556 else
beacfaac 557 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
558}
559
beacfaac 560static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
561{
562 if (after_paging_init)
563 clear_fixmap(idx);
564 else
beacfaac 565 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
566}
567
1b42f516 568
4b6e9f27 569static int __initdata early_ioremap_nested;
1b42f516 570
d690b2af
IM
571static int __init check_early_ioremap_leak(void)
572{
573 if (!early_ioremap_nested)
574 return 0;
0c072bb4 575 WARN(1, KERN_WARNING
91eebf40 576 "Debug warning: early ioremap leak of %d areas detected.\n",
0c072bb4 577 early_ioremap_nested);
d690b2af 578 printk(KERN_WARNING
0c072bb4 579 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
580
581 return 1;
582}
583late_initcall(check_early_ioremap_leak);
584
beacfaac 585void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
586{
587 unsigned long offset, last_addr;
1b42f516
IM
588 unsigned int nrpages, nesting;
589 enum fixed_addresses idx0, idx;
590
591 WARN_ON(system_state != SYSTEM_BOOTING);
592
593 nesting = early_ioremap_nested;
d18d6d65 594 if (early_ioremap_debug) {
adafdf6a 595 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
91eebf40 596 phys_addr, size, nesting);
d18d6d65
IM
597 dump_stack();
598 }
1da177e4
LT
599
600 /* Don't allow wraparound or zero size */
601 last_addr = phys_addr + size - 1;
bd796ed0
IM
602 if (!size || last_addr < phys_addr) {
603 WARN_ON(1);
1da177e4 604 return NULL;
bd796ed0 605 }
1da177e4 606
bd796ed0
IM
607 if (nesting >= FIX_BTMAPS_NESTING) {
608 WARN_ON(1);
1b42f516 609 return NULL;
bd796ed0 610 }
1b42f516 611 early_ioremap_nested++;
1da177e4
LT
612 /*
613 * Mappings have to be page-aligned
614 */
615 offset = phys_addr & ~PAGE_MASK;
616 phys_addr &= PAGE_MASK;
617 size = PAGE_ALIGN(last_addr) - phys_addr;
618
619 /*
620 * Mappings have to fit in the FIX_BTMAP area.
621 */
622 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
623 if (nrpages > NR_FIX_BTMAPS) {
624 WARN_ON(1);
1da177e4 625 return NULL;
bd796ed0 626 }
1da177e4
LT
627
628 /*
629 * Ok, go for it..
630 */
1b42f516
IM
631 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
632 idx = idx0;
1da177e4 633 while (nrpages > 0) {
beacfaac 634 early_set_fixmap(idx, phys_addr);
1da177e4
LT
635 phys_addr += PAGE_SIZE;
636 --idx;
637 --nrpages;
638 }
d18d6d65
IM
639 if (early_ioremap_debug)
640 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 641
91eebf40 642 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
643}
644
beacfaac 645void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
646{
647 unsigned long virt_addr;
648 unsigned long offset;
649 unsigned int nrpages;
650 enum fixed_addresses idx;
226e9a93 651 int nesting;
1b42f516
IM
652
653 nesting = --early_ioremap_nested;
226e9a93
IM
654 if (WARN_ON(nesting < 0))
655 return;
1da177e4 656
d18d6d65 657 if (early_ioremap_debug) {
adafdf6a 658 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
91eebf40 659 size, nesting);
d18d6d65
IM
660 dump_stack();
661 }
662
1da177e4 663 virt_addr = (unsigned long)addr;
bd796ed0
IM
664 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
665 WARN_ON(1);
1da177e4 666 return;
bd796ed0 667 }
1da177e4
LT
668 offset = virt_addr & ~PAGE_MASK;
669 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
670
1b42f516 671 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 672 while (nrpages > 0) {
beacfaac 673 early_clear_fixmap(idx);
1da177e4
LT
674 --idx;
675 --nrpages;
676 }
677}
1b42f516
IM
678
679void __this_fixmap_does_not_exist(void)
680{
681 WARN_ON(1);
682}
This page took 0.456345 seconds and 5 git commands to generate.