Merge branch 'clk/mxs-for-3.6' of git://git.linaro.org/people/shawnguo/linux-2.6...
[deliverable/linux.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
78c86e5e 25#include "physaddr.h"
240d3a7c 26
e9332cac
TG
27/*
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
30 */
3a96ce8c 31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 unsigned long prot_val)
e9332cac 33{
d806e5ee 34 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 35 int err;
e9332cac 36
3a96ce8c 37 switch (prot_val) {
38 case _PAGE_CACHE_UC:
d806e5ee 39 default:
1219333d 40 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 41 break;
b310f381 42 case _PAGE_CACHE_WC:
43 err = _set_memory_wc(vaddr, nrpages);
44 break;
3a96ce8c 45 case _PAGE_CACHE_WB:
1219333d 46 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
47 break;
48 }
e9332cac
TG
49
50 return err;
51}
52
1da177e4
LT
53/*
54 * Remap an arbitrary physical address space into the kernel virtual
55 * address space. Needed when the kernel wants to access high addresses
56 * directly.
57 *
58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
59 * have to convert them into an offset in a page-aligned mapping, but the
60 * caller shouldn't need to know that small detail.
61 */
23016969
CL
62static void __iomem *__ioremap_caller(resource_size_t phys_addr,
63 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 64{
ffa71f33
KK
65 unsigned long offset, vaddr;
66 resource_size_t pfn, last_pfn, last_addr;
87e547fe
PP
67 const resource_size_t unaligned_phys_addr = phys_addr;
68 const unsigned long unaligned_size = size;
91eebf40 69 struct vm_struct *area;
d7677d40 70 unsigned long new_prot_val;
d806e5ee 71 pgprot_t prot;
dee7cbb2 72 int retval;
d61fc448 73 void __iomem *ret_addr;
1da177e4
LT
74
75 /* Don't allow wraparound or zero size */
76 last_addr = phys_addr + size - 1;
77 if (!size || last_addr < phys_addr)
78 return NULL;
79
e3100c82 80 if (!phys_addr_valid(phys_addr)) {
6997ab49 81 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 82 (unsigned long long)phys_addr);
e3100c82
TG
83 WARN_ON_ONCE(1);
84 return NULL;
85 }
86
1da177e4
LT
87 /*
88 * Don't remap the low PCI/ISA area, it's always mapped..
89 */
bcc643dc 90 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 91 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
92
93 /*
94 * Don't allow anybody to remap normal RAM that we're using..
95 */
ffa71f33 96 last_pfn = last_addr >> PAGE_SHIFT;
35be1b71 97 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
ba748d22
IM
98 int is_ram = page_is_ram(pfn);
99
100 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 101 return NULL;
ba748d22 102 WARN_ON_ONCE(is_ram);
1da177e4
LT
103 }
104
d7677d40 105 /*
106 * Mappings have to be page-aligned
107 */
108 offset = phys_addr & ~PAGE_MASK;
ffa71f33 109 phys_addr &= PHYSICAL_PAGE_MASK;
d7677d40 110 size = PAGE_ALIGN(last_addr+1) - phys_addr;
111
e213e877 112 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
dee7cbb2
VP
113 prot_val, &new_prot_val);
114 if (retval) {
279e669b 115 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
dee7cbb2
VP
116 return NULL;
117 }
118
119 if (prot_val != new_prot_val) {
b855192c
PA
120 if (!is_new_memtype_allowed(phys_addr, size,
121 prot_val, new_prot_val)) {
279e669b 122 printk(KERN_ERR
6997ab49 123 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
124 (unsigned long long)phys_addr,
125 (unsigned long long)(phys_addr + size),
6997ab49 126 prot_val, new_prot_val);
de2a47cf 127 goto err_free_memtype;
d7677d40 128 }
129 prot_val = new_prot_val;
130 }
131
3a96ce8c 132 switch (prot_val) {
133 case _PAGE_CACHE_UC:
d806e5ee 134 default:
be43d728 135 prot = PAGE_KERNEL_IO_NOCACHE;
d806e5ee 136 break;
de33c442 137 case _PAGE_CACHE_UC_MINUS:
be43d728 138 prot = PAGE_KERNEL_IO_UC_MINUS;
de33c442 139 break;
b310f381 140 case _PAGE_CACHE_WC:
be43d728 141 prot = PAGE_KERNEL_IO_WC;
b310f381 142 break;
3a96ce8c 143 case _PAGE_CACHE_WB:
be43d728 144 prot = PAGE_KERNEL_IO;
d806e5ee
TG
145 break;
146 }
a148ecfd 147
1da177e4
LT
148 /*
149 * Ok, go for it..
150 */
23016969 151 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4 152 if (!area)
de2a47cf 153 goto err_free_memtype;
1da177e4 154 area->phys_addr = phys_addr;
e66aadbe 155 vaddr = (unsigned long) area->addr;
43a432b1 156
de2a47cf
XF
157 if (kernel_map_sync_memtype(phys_addr, size, prot_val))
158 goto err_free_area;
e9332cac 159
de2a47cf
XF
160 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
161 goto err_free_area;
e9332cac 162
d61fc448 163 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 164 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448 165
c7a7b814
TG
166 /*
167 * Check if the request spans more than any BAR in the iomem resource
168 * tree.
169 */
170 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
171 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
172
d61fc448 173 return ret_addr;
de2a47cf
XF
174err_free_area:
175 free_vm_area(area);
176err_free_memtype:
177 free_memtype(phys_addr, phys_addr + size);
178 return NULL;
1da177e4 179}
1da177e4
LT
180
181/**
182 * ioremap_nocache - map bus memory into CPU space
9efc31b8 183 * @phys_addr: bus address of the memory
1da177e4
LT
184 * @size: size of the resource to map
185 *
186 * ioremap_nocache performs a platform specific sequence of operations to
187 * make bus memory CPU accessible via the readb/readw/readl/writeb/
188 * writew/writel functions and the other mmio helpers. The returned
189 * address is not guaranteed to be usable directly as a virtual
91eebf40 190 * address.
1da177e4
LT
191 *
192 * This version of ioremap ensures that the memory is marked uncachable
193 * on the CPU as well as honouring existing caching rules from things like
91eebf40 194 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
195 * busses. In particular driver authors should read up on PCI writes
196 *
197 * It's useful if some control registers are in such an area and
198 * write combining or read caching is not desirable:
91eebf40 199 *
1da177e4
LT
200 * Must be freed with iounmap.
201 */
b9e76a00 202void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 203{
de33c442
SS
204 /*
205 * Ideally, this should be:
499f8f84 206 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
de33c442
SS
207 *
208 * Till we fix all X drivers to use ioremap_wc(), we will use
209 * UC MINUS.
210 */
211 unsigned long val = _PAGE_CACHE_UC_MINUS;
212
213 return __ioremap_caller(phys_addr, size, val,
23016969 214 __builtin_return_address(0));
1da177e4 215}
129f6946 216EXPORT_SYMBOL(ioremap_nocache);
1da177e4 217
b310f381 218/**
219 * ioremap_wc - map memory into CPU space write combined
9efc31b8 220 * @phys_addr: bus address of the memory
b310f381 221 * @size: size of the resource to map
222 *
223 * This version of ioremap ensures that the memory is marked write combining.
224 * Write combining allows faster writes to some hardware devices.
225 *
226 * Must be freed with iounmap.
227 */
d639bab8 228void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
b310f381 229{
499f8f84 230 if (pat_enabled)
23016969
CL
231 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
232 __builtin_return_address(0));
b310f381 233 else
234 return ioremap_nocache(phys_addr, size);
235}
236EXPORT_SYMBOL(ioremap_wc);
237
b9e76a00 238void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 239{
23016969
CL
240 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
241 __builtin_return_address(0));
5f868152
TG
242}
243EXPORT_SYMBOL(ioremap_cache);
244
28b2ee20
RR
245void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
246 unsigned long prot_val)
247{
248 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
249 __builtin_return_address(0));
250}
251EXPORT_SYMBOL(ioremap_prot);
252
bf5421c3
AK
253/**
254 * iounmap - Free a IO remapping
255 * @addr: virtual address from ioremap_*
256 *
257 * Caller must ensure there is only one unmapping for the same pointer.
258 */
1da177e4
LT
259void iounmap(volatile void __iomem *addr)
260{
bf5421c3 261 struct vm_struct *p, *o;
c23a4e96
AM
262
263 if ((void __force *)addr <= high_memory)
1da177e4
LT
264 return;
265
266 /*
267 * __ioremap special-cases the PCI/ISA range by not instantiating a
268 * vm_area and by simply returning an address into the kernel mapping
269 * of ISA space. So handle that here.
270 */
6e92a5a6
TG
271 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
272 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
273 return;
274
91eebf40
TG
275 addr = (volatile void __iomem *)
276 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 277
d61fc448
PP
278 mmiotrace_iounmap(addr);
279
bf5421c3
AK
280 /* Use the vm area unlocked, assuming the caller
281 ensures there isn't another iounmap for the same address
282 in parallel. Reuse of the virtual address is prevented by
283 leaving it in the global lists until we're done with it.
284 cpa takes care of the direct mappings. */
285 read_lock(&vmlist_lock);
286 for (p = vmlist; p; p = p->next) {
6e92a5a6 287 if (p->addr == (void __force *)addr)
bf5421c3
AK
288 break;
289 }
290 read_unlock(&vmlist_lock);
291
292 if (!p) {
91eebf40 293 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 294 dump_stack();
bf5421c3 295 return;
1da177e4
LT
296 }
297
d7677d40 298 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
299
bf5421c3 300 /* Finally remove it */
6e92a5a6 301 o = remove_vm_area((void __force *)addr);
bf5421c3 302 BUG_ON(p != o || o == NULL);
91eebf40 303 kfree(p);
1da177e4 304}
129f6946 305EXPORT_SYMBOL(iounmap);
1da177e4 306
e045fb2a 307/*
308 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
309 * access
310 */
311void *xlate_dev_mem_ptr(unsigned long phys)
312{
313 void *addr;
314 unsigned long start = phys & PAGE_MASK;
315
316 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
317 if (page_is_ram(start >> PAGE_SHIFT))
318 return __va(phys);
319
2fb8f4e6 320 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
e045fb2a 321 if (addr)
322 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
323
324 return addr;
325}
326
327void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
328{
329 if (page_is_ram(phys >> PAGE_SHIFT))
330 return;
331
332 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
333 return;
334}
335
4b6e9f27 336static int __initdata early_ioremap_debug;
d18d6d65
IM
337
338static int __init early_ioremap_debug_setup(char *str)
339{
340 early_ioremap_debug = 1;
341
793b24a2 342 return 0;
d18d6d65 343}
793b24a2 344early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 345
0947b2f3 346static __initdata int after_paging_init;
45c7b28f 347static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 348
551889a6 349static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 350{
37cc8d7f
JF
351 /* Don't assume we're using swapper_pg_dir at this point */
352 pgd_t *base = __va(read_cr3());
353 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
354 pud_t *pud = pud_offset(pgd, addr);
355 pmd_t *pmd = pmd_offset(pud, addr);
356
357 return pmd;
0947b2f3
HY
358}
359
551889a6 360static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 361{
551889a6 362 return &bm_pte[pte_index(addr)];
0947b2f3
HY
363}
364
fef5ba79
JF
365bool __init is_early_ioremap_ptep(pte_t *ptep)
366{
367 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
368}
369
8827247f
WC
370static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
371
beacfaac 372void __init early_ioremap_init(void)
0947b2f3 373{
551889a6 374 pmd_t *pmd;
8827247f 375 int i;
0947b2f3 376
d18d6d65 377 if (early_ioremap_debug)
adafdf6a 378 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 379
8827247f 380 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
9f4f25c8 381 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
8827247f 382
551889a6 383 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
45c7b28f
JF
384 memset(bm_pte, 0, sizeof(bm_pte));
385 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 386
0e3a9549 387 /*
551889a6 388 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
389 * we are not prepared:
390 */
499a5f1e
JB
391#define __FIXADDR_TOP (-PAGE_SIZE)
392 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
393 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
394#undef __FIXADDR_TOP
551889a6 395 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 396 WARN_ON(1);
551889a6
IC
397 printk(KERN_WARNING "pmd %p != %p\n",
398 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 399 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 400 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 401 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 402 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
403
404 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
405 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
406 FIX_BTMAP_BEGIN);
0e3a9549 407 }
0947b2f3
HY
408}
409
beacfaac 410void __init early_ioremap_reset(void)
0947b2f3 411{
0947b2f3 412 after_paging_init = 1;
0947b2f3
HY
413}
414
beacfaac 415static void __init __early_set_fixmap(enum fixed_addresses idx,
9b987aeb 416 phys_addr_t phys, pgprot_t flags)
0947b2f3 417{
551889a6
IC
418 unsigned long addr = __fix_to_virt(idx);
419 pte_t *pte;
0947b2f3
HY
420
421 if (idx >= __end_of_fixed_addresses) {
422 BUG();
423 return;
424 }
beacfaac 425 pte = early_ioremap_pte(addr);
4583ed51 426
0947b2f3 427 if (pgprot_val(flags))
551889a6 428 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 429 else
4f9c11dd 430 pte_clear(&init_mm, addr, pte);
0947b2f3
HY
431 __flush_tlb_one(addr);
432}
433
beacfaac 434static inline void __init early_set_fixmap(enum fixed_addresses idx,
9b987aeb 435 phys_addr_t phys, pgprot_t prot)
0947b2f3
HY
436{
437 if (after_paging_init)
14941779 438 __set_fixmap(idx, phys, prot);
0947b2f3 439 else
14941779 440 __early_set_fixmap(idx, phys, prot);
0947b2f3
HY
441}
442
beacfaac 443static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
444{
445 if (after_paging_init)
446 clear_fixmap(idx);
447 else
beacfaac 448 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
449}
450
1d6cf1fe 451static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
c1a2f4b1 452static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
8827247f 453
e67a807f
LL
454void __init fixup_early_ioremap(void)
455{
456 int i;
457
458 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
459 if (prev_map[i]) {
460 WARN_ON(1);
461 break;
462 }
463 }
464
465 early_ioremap_init();
466}
467
d690b2af
IM
468static int __init check_early_ioremap_leak(void)
469{
c1a2f4b1
YL
470 int count = 0;
471 int i;
472
473 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
474 if (prev_map[i])
475 count++;
476
477 if (!count)
d690b2af 478 return 0;
0c072bb4 479 WARN(1, KERN_WARNING
91eebf40 480 "Debug warning: early ioremap leak of %d areas detected.\n",
c1a2f4b1 481 count);
d690b2af 482 printk(KERN_WARNING
0c072bb4 483 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
484
485 return 1;
486}
487late_initcall(check_early_ioremap_leak);
488
8827247f 489static void __init __iomem *
9b987aeb 490__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
1da177e4 491{
9b987aeb
MH
492 unsigned long offset;
493 resource_size_t last_addr;
c1a2f4b1 494 unsigned int nrpages;
1b42f516 495 enum fixed_addresses idx0, idx;
c1a2f4b1 496 int i, slot;
1b42f516
IM
497
498 WARN_ON(system_state != SYSTEM_BOOTING);
499
c1a2f4b1
YL
500 slot = -1;
501 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
502 if (!prev_map[i]) {
503 slot = i;
504 break;
505 }
506 }
507
508 if (slot < 0) {
9b987aeb
MH
509 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
510 (u64)phys_addr, size);
c1a2f4b1
YL
511 WARN_ON(1);
512 return NULL;
513 }
514
d18d6d65 515 if (early_ioremap_debug) {
9b987aeb
MH
516 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
517 (u64)phys_addr, size, slot);
d18d6d65
IM
518 dump_stack();
519 }
1da177e4
LT
520
521 /* Don't allow wraparound or zero size */
522 last_addr = phys_addr + size - 1;
bd796ed0
IM
523 if (!size || last_addr < phys_addr) {
524 WARN_ON(1);
1da177e4 525 return NULL;
bd796ed0 526 }
1da177e4 527
c1a2f4b1 528 prev_size[slot] = size;
1da177e4
LT
529 /*
530 * Mappings have to be page-aligned
531 */
532 offset = phys_addr & ~PAGE_MASK;
533 phys_addr &= PAGE_MASK;
c613ec1a 534 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
1da177e4
LT
535
536 /*
537 * Mappings have to fit in the FIX_BTMAP area.
538 */
539 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
540 if (nrpages > NR_FIX_BTMAPS) {
541 WARN_ON(1);
1da177e4 542 return NULL;
bd796ed0 543 }
1da177e4
LT
544
545 /*
546 * Ok, go for it..
547 */
c1a2f4b1 548 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
1b42f516 549 idx = idx0;
1da177e4 550 while (nrpages > 0) {
14941779 551 early_set_fixmap(idx, phys_addr, prot);
1da177e4
LT
552 phys_addr += PAGE_SIZE;
553 --idx;
554 --nrpages;
555 }
d18d6d65 556 if (early_ioremap_debug)
8827247f 557 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
1b42f516 558
8827247f 559 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
c1a2f4b1 560 return prev_map[slot];
1da177e4
LT
561}
562
14941779 563/* Remap an IO device */
9b987aeb
MH
564void __init __iomem *
565early_ioremap(resource_size_t phys_addr, unsigned long size)
14941779
JF
566{
567 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
568}
569
570/* Remap memory */
9b987aeb
MH
571void __init __iomem *
572early_memremap(resource_size_t phys_addr, unsigned long size)
14941779
JF
573{
574 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
575}
576
1d6cf1fe 577void __init early_iounmap(void __iomem *addr, unsigned long size)
1da177e4
LT
578{
579 unsigned long virt_addr;
580 unsigned long offset;
581 unsigned int nrpages;
582 enum fixed_addresses idx;
c1a2f4b1
YL
583 int i, slot;
584
585 slot = -1;
586 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
587 if (prev_map[i] == addr) {
588 slot = i;
589 break;
590 }
591 }
1b42f516 592
c1a2f4b1
YL
593 if (slot < 0) {
594 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
595 addr, size);
596 WARN_ON(1);
597 return;
598 }
599
600 if (prev_size[slot] != size) {
601 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
602 addr, size, slot, prev_size[slot]);
603 WARN_ON(1);
226e9a93 604 return;
c1a2f4b1 605 }
1da177e4 606
d18d6d65 607 if (early_ioremap_debug) {
adafdf6a 608 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
c1a2f4b1 609 size, slot);
d18d6d65
IM
610 dump_stack();
611 }
612
1da177e4 613 virt_addr = (unsigned long)addr;
bd796ed0
IM
614 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
615 WARN_ON(1);
1da177e4 616 return;
bd796ed0 617 }
1da177e4 618 offset = virt_addr & ~PAGE_MASK;
468c30f2 619 nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
1da177e4 620
c1a2f4b1 621 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
1da177e4 622 while (nrpages > 0) {
beacfaac 623 early_clear_fixmap(idx);
1da177e4
LT
624 --idx;
625 --nrpages;
626 }
1d6cf1fe 627 prev_map[slot] = NULL;
1da177e4 628}
This page took 1.037511 seconds and 5 git commands to generate.