Merge branch 'x86/prototypes' into x86-v28-for-linus-phase1
[deliverable/linux.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
240d3a7c
TG
25#ifdef CONFIG_X86_64
26
27unsigned long __phys_addr(unsigned long x)
28{
29 if (x >= __START_KERNEL_map)
30 return x - __START_KERNEL_map + phys_base;
31 return x - PAGE_OFFSET;
32}
33EXPORT_SYMBOL(__phys_addr);
34
e3100c82
TG
35static inline int phys_addr_valid(unsigned long addr)
36{
37 return addr < (1UL << boot_cpu_data.x86_phys_bits);
38}
39
40#else
41
42static inline int phys_addr_valid(unsigned long addr)
43{
44 return 1;
45}
46
240d3a7c
TG
47#endif
48
5f5192b9
TG
49int page_is_ram(unsigned long pagenr)
50{
756a6c68 51 resource_size_t addr, end;
5f5192b9
TG
52 int i;
53
d8a9e6a5
AV
54 /*
55 * A special case is the first 4Kb of memory;
56 * This is a BIOS owned area, not kernel ram, but generally
57 * not listed as such in the E820 table.
58 */
59 if (pagenr == 0)
60 return 0;
61
156fbc3f
AV
62 /*
63 * Second special case: Some BIOSen report the PC BIOS
64 * area (640->1Mb) as ram even though it is not.
65 */
66 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
67 pagenr < (BIOS_END >> PAGE_SHIFT))
68 return 0;
d8a9e6a5 69
5f5192b9
TG
70 for (i = 0; i < e820.nr_map; i++) {
71 /*
72 * Not usable memory:
73 */
74 if (e820.map[i].type != E820_RAM)
75 continue;
5f5192b9
TG
76 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
77 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95 78
950f9d95 79
5f5192b9
TG
80 if ((pagenr >= addr) && (pagenr < end))
81 return 1;
82 }
83 return 0;
84}
85
e9332cac
TG
86/*
87 * Fix up the linear direct mapping of the kernel to avoid cache attribute
88 * conflicts.
89 */
3a96ce8c 90int ioremap_change_attr(unsigned long vaddr, unsigned long size,
91 unsigned long prot_val)
e9332cac 92{
d806e5ee 93 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 94 int err;
e9332cac 95
3a96ce8c 96 switch (prot_val) {
97 case _PAGE_CACHE_UC:
d806e5ee 98 default:
1219333d 99 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 100 break;
b310f381 101 case _PAGE_CACHE_WC:
102 err = _set_memory_wc(vaddr, nrpages);
103 break;
3a96ce8c 104 case _PAGE_CACHE_WB:
1219333d 105 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
106 break;
107 }
e9332cac
TG
108
109 return err;
110}
111
1da177e4
LT
112/*
113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
115 * directly.
116 *
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
120 */
23016969
CL
121static void __iomem *__ioremap_caller(resource_size_t phys_addr,
122 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 123{
756a6c68
IM
124 unsigned long pfn, offset, vaddr;
125 resource_size_t last_addr;
87e547fe
PP
126 const resource_size_t unaligned_phys_addr = phys_addr;
127 const unsigned long unaligned_size = size;
91eebf40 128 struct vm_struct *area;
d7677d40 129 unsigned long new_prot_val;
d806e5ee 130 pgprot_t prot;
dee7cbb2 131 int retval;
d61fc448 132 void __iomem *ret_addr;
1da177e4
LT
133
134 /* Don't allow wraparound or zero size */
135 last_addr = phys_addr + size - 1;
136 if (!size || last_addr < phys_addr)
137 return NULL;
138
e3100c82 139 if (!phys_addr_valid(phys_addr)) {
6997ab49 140 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 141 (unsigned long long)phys_addr);
e3100c82
TG
142 WARN_ON_ONCE(1);
143 return NULL;
144 }
145
1da177e4
LT
146 /*
147 * Don't remap the low PCI/ISA area, it's always mapped..
148 */
bcc643dc 149 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 150 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
151
152 /*
153 * Don't allow anybody to remap normal RAM that we're using..
154 */
cb8ab687
AS
155 for (pfn = phys_addr >> PAGE_SHIFT;
156 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
157 pfn++) {
bdd3cee2 158
ba748d22
IM
159 int is_ram = page_is_ram(pfn);
160
161 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 162 return NULL;
ba748d22 163 WARN_ON_ONCE(is_ram);
1da177e4
LT
164 }
165
d7677d40 166 /*
167 * Mappings have to be page-aligned
168 */
169 offset = phys_addr & ~PAGE_MASK;
170 phys_addr &= PAGE_MASK;
171 size = PAGE_ALIGN(last_addr+1) - phys_addr;
172
e213e877 173 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
dee7cbb2
VP
174 prot_val, &new_prot_val);
175 if (retval) {
b450e5e8 176 pr_debug("Warning: reserve_memtype returned %d\n", retval);
dee7cbb2
VP
177 return NULL;
178 }
179
180 if (prot_val != new_prot_val) {
d7677d40 181 /*
182 * Do not fallback to certain memory types with certain
183 * requested type:
de33c442
SS
184 * - request is uc-, return cannot be write-back
185 * - request is uc-, return cannot be write-combine
b310f381 186 * - request is write-combine, return cannot be write-back
d7677d40 187 */
de33c442 188 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
b310f381 189 (new_prot_val == _PAGE_CACHE_WB ||
190 new_prot_val == _PAGE_CACHE_WC)) ||
191 (prot_val == _PAGE_CACHE_WC &&
d7677d40 192 new_prot_val == _PAGE_CACHE_WB)) {
b450e5e8 193 pr_debug(
6997ab49 194 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
195 (unsigned long long)phys_addr,
196 (unsigned long long)(phys_addr + size),
6997ab49 197 prot_val, new_prot_val);
d7677d40 198 free_memtype(phys_addr, phys_addr + size);
199 return NULL;
200 }
201 prot_val = new_prot_val;
202 }
203
3a96ce8c 204 switch (prot_val) {
205 case _PAGE_CACHE_UC:
d806e5ee 206 default:
55c62682 207 prot = PAGE_KERNEL_NOCACHE;
d806e5ee 208 break;
de33c442
SS
209 case _PAGE_CACHE_UC_MINUS:
210 prot = PAGE_KERNEL_UC_MINUS;
211 break;
b310f381 212 case _PAGE_CACHE_WC:
213 prot = PAGE_KERNEL_WC;
214 break;
3a96ce8c 215 case _PAGE_CACHE_WB:
d806e5ee
TG
216 prot = PAGE_KERNEL;
217 break;
218 }
a148ecfd 219
1da177e4
LT
220 /*
221 * Ok, go for it..
222 */
23016969 223 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4
LT
224 if (!area)
225 return NULL;
226 area->phys_addr = phys_addr;
e66aadbe
TG
227 vaddr = (unsigned long) area->addr;
228 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
d7677d40 229 free_memtype(phys_addr, phys_addr + size);
b16bf712 230 free_vm_area(area);
1da177e4
LT
231 return NULL;
232 }
e9332cac 233
3a96ce8c 234 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
d7677d40 235 free_memtype(phys_addr, phys_addr + size);
e66aadbe 236 vunmap(area->addr);
e9332cac
TG
237 return NULL;
238 }
239
d61fc448 240 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 241 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448
PP
242
243 return ret_addr;
1da177e4 244}
1da177e4
LT
245
246/**
247 * ioremap_nocache - map bus memory into CPU space
248 * @offset: bus address of the memory
249 * @size: size of the resource to map
250 *
251 * ioremap_nocache performs a platform specific sequence of operations to
252 * make bus memory CPU accessible via the readb/readw/readl/writeb/
253 * writew/writel functions and the other mmio helpers. The returned
254 * address is not guaranteed to be usable directly as a virtual
91eebf40 255 * address.
1da177e4
LT
256 *
257 * This version of ioremap ensures that the memory is marked uncachable
258 * on the CPU as well as honouring existing caching rules from things like
91eebf40 259 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
260 * busses. In particular driver authors should read up on PCI writes
261 *
262 * It's useful if some control registers are in such an area and
263 * write combining or read caching is not desirable:
91eebf40 264 *
1da177e4
LT
265 * Must be freed with iounmap.
266 */
b9e76a00 267void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 268{
de33c442
SS
269 /*
270 * Ideally, this should be:
499f8f84 271 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
de33c442
SS
272 *
273 * Till we fix all X drivers to use ioremap_wc(), we will use
274 * UC MINUS.
275 */
276 unsigned long val = _PAGE_CACHE_UC_MINUS;
277
278 return __ioremap_caller(phys_addr, size, val,
23016969 279 __builtin_return_address(0));
1da177e4 280}
129f6946 281EXPORT_SYMBOL(ioremap_nocache);
1da177e4 282
b310f381 283/**
284 * ioremap_wc - map memory into CPU space write combined
285 * @offset: bus address of the memory
286 * @size: size of the resource to map
287 *
288 * This version of ioremap ensures that the memory is marked write combining.
289 * Write combining allows faster writes to some hardware devices.
290 *
291 * Must be freed with iounmap.
292 */
293void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
294{
499f8f84 295 if (pat_enabled)
23016969
CL
296 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
297 __builtin_return_address(0));
b310f381 298 else
299 return ioremap_nocache(phys_addr, size);
300}
301EXPORT_SYMBOL(ioremap_wc);
302
b9e76a00 303void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 304{
23016969
CL
305 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
306 __builtin_return_address(0));
5f868152
TG
307}
308EXPORT_SYMBOL(ioremap_cache);
309
a361ee5c
VP
310static void __iomem *ioremap_default(resource_size_t phys_addr,
311 unsigned long size)
312{
313 unsigned long flags;
314 void *ret;
315 int err;
316
317 /*
318 * - WB for WB-able memory and no other conflicting mappings
319 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
320 * - Inherit from confliting mappings otherwise
321 */
322 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
323 if (err < 0)
324 return NULL;
325
326 ret = (void *) __ioremap_caller(phys_addr, size, flags,
327 __builtin_return_address(0));
328
329 free_memtype(phys_addr, phys_addr + size);
330 return (void __iomem *)ret;
331}
332
28b2ee20
RR
333void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
334 unsigned long prot_val)
335{
336 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
337 __builtin_return_address(0));
338}
339EXPORT_SYMBOL(ioremap_prot);
340
bf5421c3
AK
341/**
342 * iounmap - Free a IO remapping
343 * @addr: virtual address from ioremap_*
344 *
345 * Caller must ensure there is only one unmapping for the same pointer.
346 */
1da177e4
LT
347void iounmap(volatile void __iomem *addr)
348{
bf5421c3 349 struct vm_struct *p, *o;
c23a4e96
AM
350
351 if ((void __force *)addr <= high_memory)
1da177e4
LT
352 return;
353
354 /*
355 * __ioremap special-cases the PCI/ISA range by not instantiating a
356 * vm_area and by simply returning an address into the kernel mapping
357 * of ISA space. So handle that here.
358 */
6e92a5a6
TG
359 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
360 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
361 return;
362
91eebf40
TG
363 addr = (volatile void __iomem *)
364 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 365
d61fc448
PP
366 mmiotrace_iounmap(addr);
367
bf5421c3
AK
368 /* Use the vm area unlocked, assuming the caller
369 ensures there isn't another iounmap for the same address
370 in parallel. Reuse of the virtual address is prevented by
371 leaving it in the global lists until we're done with it.
372 cpa takes care of the direct mappings. */
373 read_lock(&vmlist_lock);
374 for (p = vmlist; p; p = p->next) {
6e92a5a6 375 if (p->addr == (void __force *)addr)
bf5421c3
AK
376 break;
377 }
378 read_unlock(&vmlist_lock);
379
380 if (!p) {
91eebf40 381 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 382 dump_stack();
bf5421c3 383 return;
1da177e4
LT
384 }
385
d7677d40 386 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
387
bf5421c3 388 /* Finally remove it */
6e92a5a6 389 o = remove_vm_area((void __force *)addr);
bf5421c3 390 BUG_ON(p != o || o == NULL);
91eebf40 391 kfree(p);
1da177e4 392}
129f6946 393EXPORT_SYMBOL(iounmap);
1da177e4 394
e045fb2a 395/*
396 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
397 * access
398 */
399void *xlate_dev_mem_ptr(unsigned long phys)
400{
401 void *addr;
402 unsigned long start = phys & PAGE_MASK;
403
404 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
405 if (page_is_ram(start >> PAGE_SHIFT))
406 return __va(phys);
407
ae94b807 408 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
e045fb2a 409 if (addr)
410 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
411
412 return addr;
413}
414
415void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
416{
417 if (page_is_ram(phys >> PAGE_SHIFT))
418 return;
419
420 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
421 return;
422}
423
4b6e9f27 424static int __initdata early_ioremap_debug;
d18d6d65
IM
425
426static int __init early_ioremap_debug_setup(char *str)
427{
428 early_ioremap_debug = 1;
429
793b24a2 430 return 0;
d18d6d65 431}
793b24a2 432early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 433
0947b2f3 434static __initdata int after_paging_init;
a7bf0bd5 435static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 436
551889a6 437static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 438{
37cc8d7f
JF
439 /* Don't assume we're using swapper_pg_dir at this point */
440 pgd_t *base = __va(read_cr3());
441 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
442 pud_t *pud = pud_offset(pgd, addr);
443 pmd_t *pmd = pmd_offset(pud, addr);
444
445 return pmd;
0947b2f3
HY
446}
447
551889a6 448static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 449{
551889a6 450 return &bm_pte[pte_index(addr)];
0947b2f3
HY
451}
452
beacfaac 453void __init early_ioremap_init(void)
0947b2f3 454{
551889a6 455 pmd_t *pmd;
0947b2f3 456
d18d6d65 457 if (early_ioremap_debug)
adafdf6a 458 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 459
551889a6 460 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3 461 memset(bm_pte, 0, sizeof(bm_pte));
b6fbb669 462 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 463
0e3a9549 464 /*
551889a6 465 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
466 * we are not prepared:
467 */
551889a6 468 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 469 WARN_ON(1);
551889a6
IC
470 printk(KERN_WARNING "pmd %p != %p\n",
471 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 472 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 473 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 474 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 475 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
476
477 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
478 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
479 FIX_BTMAP_BEGIN);
0e3a9549 480 }
0947b2f3
HY
481}
482
beacfaac 483void __init early_ioremap_clear(void)
0947b2f3 484{
551889a6 485 pmd_t *pmd;
0947b2f3 486
d18d6d65 487 if (early_ioremap_debug)
adafdf6a 488 printk(KERN_INFO "early_ioremap_clear()\n");
d18d6d65 489
551889a6
IC
490 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
491 pmd_clear(pmd);
6944a9c8 492 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
0947b2f3
HY
493 __flush_tlb_all();
494}
495
beacfaac 496void __init early_ioremap_reset(void)
0947b2f3
HY
497{
498 enum fixed_addresses idx;
551889a6
IC
499 unsigned long addr, phys;
500 pte_t *pte;
0947b2f3
HY
501
502 after_paging_init = 1;
64a8f852 503 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 504 addr = fix_to_virt(idx);
beacfaac 505 pte = early_ioremap_pte(addr);
551889a6
IC
506 if (pte_present(*pte)) {
507 phys = pte_val(*pte) & PAGE_MASK;
0947b2f3
HY
508 set_fixmap(idx, phys);
509 }
510 }
511}
512
beacfaac 513static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
514 unsigned long phys, pgprot_t flags)
515{
551889a6
IC
516 unsigned long addr = __fix_to_virt(idx);
517 pte_t *pte;
0947b2f3
HY
518
519 if (idx >= __end_of_fixed_addresses) {
520 BUG();
521 return;
522 }
beacfaac 523 pte = early_ioremap_pte(addr);
4583ed51 524
0947b2f3 525 if (pgprot_val(flags))
551889a6 526 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 527 else
4f9c11dd 528 pte_clear(&init_mm, addr, pte);
0947b2f3
HY
529 __flush_tlb_one(addr);
530}
531
beacfaac 532static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
533 unsigned long phys)
534{
535 if (after_paging_init)
536 set_fixmap(idx, phys);
537 else
beacfaac 538 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
539}
540
beacfaac 541static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
542{
543 if (after_paging_init)
544 clear_fixmap(idx);
545 else
beacfaac 546 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
547}
548
1b42f516 549
4b6e9f27 550static int __initdata early_ioremap_nested;
1b42f516 551
d690b2af
IM
552static int __init check_early_ioremap_leak(void)
553{
554 if (!early_ioremap_nested)
555 return 0;
0c072bb4 556 WARN(1, KERN_WARNING
91eebf40 557 "Debug warning: early ioremap leak of %d areas detected.\n",
0c072bb4 558 early_ioremap_nested);
d690b2af 559 printk(KERN_WARNING
0c072bb4 560 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
561
562 return 1;
563}
564late_initcall(check_early_ioremap_leak);
565
beacfaac 566void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
567{
568 unsigned long offset, last_addr;
1b42f516
IM
569 unsigned int nrpages, nesting;
570 enum fixed_addresses idx0, idx;
571
572 WARN_ON(system_state != SYSTEM_BOOTING);
573
574 nesting = early_ioremap_nested;
d18d6d65 575 if (early_ioremap_debug) {
adafdf6a 576 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
91eebf40 577 phys_addr, size, nesting);
d18d6d65
IM
578 dump_stack();
579 }
1da177e4
LT
580
581 /* Don't allow wraparound or zero size */
582 last_addr = phys_addr + size - 1;
bd796ed0
IM
583 if (!size || last_addr < phys_addr) {
584 WARN_ON(1);
1da177e4 585 return NULL;
bd796ed0 586 }
1da177e4 587
bd796ed0
IM
588 if (nesting >= FIX_BTMAPS_NESTING) {
589 WARN_ON(1);
1b42f516 590 return NULL;
bd796ed0 591 }
1b42f516 592 early_ioremap_nested++;
1da177e4
LT
593 /*
594 * Mappings have to be page-aligned
595 */
596 offset = phys_addr & ~PAGE_MASK;
597 phys_addr &= PAGE_MASK;
598 size = PAGE_ALIGN(last_addr) - phys_addr;
599
600 /*
601 * Mappings have to fit in the FIX_BTMAP area.
602 */
603 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
604 if (nrpages > NR_FIX_BTMAPS) {
605 WARN_ON(1);
1da177e4 606 return NULL;
bd796ed0 607 }
1da177e4
LT
608
609 /*
610 * Ok, go for it..
611 */
1b42f516
IM
612 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
613 idx = idx0;
1da177e4 614 while (nrpages > 0) {
beacfaac 615 early_set_fixmap(idx, phys_addr);
1da177e4
LT
616 phys_addr += PAGE_SIZE;
617 --idx;
618 --nrpages;
619 }
d18d6d65
IM
620 if (early_ioremap_debug)
621 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 622
91eebf40 623 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
624}
625
beacfaac 626void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
627{
628 unsigned long virt_addr;
629 unsigned long offset;
630 unsigned int nrpages;
631 enum fixed_addresses idx;
226e9a93 632 int nesting;
1b42f516
IM
633
634 nesting = --early_ioremap_nested;
226e9a93
IM
635 if (WARN_ON(nesting < 0))
636 return;
1da177e4 637
d18d6d65 638 if (early_ioremap_debug) {
adafdf6a 639 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
91eebf40 640 size, nesting);
d18d6d65
IM
641 dump_stack();
642 }
643
1da177e4 644 virt_addr = (unsigned long)addr;
bd796ed0
IM
645 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
646 WARN_ON(1);
1da177e4 647 return;
bd796ed0 648 }
1da177e4
LT
649 offset = virt_addr & ~PAGE_MASK;
650 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
651
1b42f516 652 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 653 while (nrpages > 0) {
beacfaac 654 early_clear_fixmap(idx);
1da177e4
LT
655 --idx;
656 --nrpages;
657 }
658}
1b42f516
IM
659
660void __this_fixmap_does_not_exist(void)
661{
662 WARN_ON(1);
663}
This page took 0.494527 seconds and 5 git commands to generate.