x86: start annotating early ioremap pointers with __iomem
[deliverable/linux.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
240d3a7c
TG
25#ifdef CONFIG_X86_64
26
59ea7463 27static inline int phys_addr_valid(unsigned long addr)
240d3a7c 28{
59ea7463 29 return addr < (1UL << boot_cpu_data.x86_phys_bits);
240d3a7c 30}
240d3a7c 31
59ea7463 32unsigned long __phys_addr(unsigned long x)
e3100c82 33{
59ea7463
JS
34 if (x >= __START_KERNEL_map) {
35 x -= __START_KERNEL_map;
36 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
37 x += phys_base;
38 } else {
39 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40 x -= PAGE_OFFSET;
41 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
42 !phys_addr_valid(x));
43 }
44 return x;
e3100c82 45}
59ea7463 46EXPORT_SYMBOL(__phys_addr);
e3100c82 47
af5c2bd1
VN
48bool __virt_addr_valid(unsigned long x)
49{
50 if (x >= __START_KERNEL_map) {
51 x -= __START_KERNEL_map;
52 if (x >= KERNEL_IMAGE_SIZE)
53 return false;
54 x += phys_base;
55 } else {
56 if (x < PAGE_OFFSET)
57 return false;
58 x -= PAGE_OFFSET;
59 if (system_state == SYSTEM_BOOTING ?
60 x > MAXMEM : !phys_addr_valid(x)) {
61 return false;
62 }
63 }
64
65 return pfn_valid(x >> PAGE_SHIFT);
66}
67EXPORT_SYMBOL(__virt_addr_valid);
68
e3100c82
TG
69#else
70
71static inline int phys_addr_valid(unsigned long addr)
72{
73 return 1;
74}
75
a1bf9631 76#ifdef CONFIG_DEBUG_VIRTUAL
59ea7463
JS
77unsigned long __phys_addr(unsigned long x)
78{
79 /* VMALLOC_* aren't constants; not available at the boot time */
af5c2bd1
VN
80 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
81 VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
82 is_vmalloc_addr((void *) x));
59ea7463
JS
83 return x - PAGE_OFFSET;
84}
85EXPORT_SYMBOL(__phys_addr);
a1bf9631 86#endif
59ea7463 87
af5c2bd1
VN
88bool __virt_addr_valid(unsigned long x)
89{
90 if (x < PAGE_OFFSET)
91 return false;
92 if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
93 return false;
94 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
95}
96EXPORT_SYMBOL(__virt_addr_valid);
97
240d3a7c
TG
98#endif
99
5f5192b9
TG
100int page_is_ram(unsigned long pagenr)
101{
756a6c68 102 resource_size_t addr, end;
5f5192b9
TG
103 int i;
104
d8a9e6a5
AV
105 /*
106 * A special case is the first 4Kb of memory;
107 * This is a BIOS owned area, not kernel ram, but generally
108 * not listed as such in the E820 table.
109 */
110 if (pagenr == 0)
111 return 0;
112
156fbc3f
AV
113 /*
114 * Second special case: Some BIOSen report the PC BIOS
115 * area (640->1Mb) as ram even though it is not.
116 */
117 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
118 pagenr < (BIOS_END >> PAGE_SHIFT))
119 return 0;
d8a9e6a5 120
5f5192b9
TG
121 for (i = 0; i < e820.nr_map; i++) {
122 /*
123 * Not usable memory:
124 */
125 if (e820.map[i].type != E820_RAM)
126 continue;
5f5192b9
TG
127 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
128 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95 129
950f9d95 130
5f5192b9
TG
131 if ((pagenr >= addr) && (pagenr < end))
132 return 1;
133 }
134 return 0;
135}
136
9542ada8
SS
137int pagerange_is_ram(unsigned long start, unsigned long end)
138{
139 int ram_page = 0, not_rampage = 0;
140 unsigned long page_nr;
141
142 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
143 ++page_nr) {
144 if (page_is_ram(page_nr))
145 ram_page = 1;
146 else
147 not_rampage = 1;
148
149 if (ram_page == not_rampage)
150 return -1;
151 }
152
153 return ram_page;
154}
155
e9332cac
TG
156/*
157 * Fix up the linear direct mapping of the kernel to avoid cache attribute
158 * conflicts.
159 */
3a96ce8c 160int ioremap_change_attr(unsigned long vaddr, unsigned long size,
161 unsigned long prot_val)
e9332cac 162{
d806e5ee 163 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 164 int err;
e9332cac 165
3a96ce8c 166 switch (prot_val) {
167 case _PAGE_CACHE_UC:
d806e5ee 168 default:
1219333d 169 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 170 break;
b310f381 171 case _PAGE_CACHE_WC:
172 err = _set_memory_wc(vaddr, nrpages);
173 break;
3a96ce8c 174 case _PAGE_CACHE_WB:
1219333d 175 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
176 break;
177 }
e9332cac
TG
178
179 return err;
180}
181
1da177e4
LT
182/*
183 * Remap an arbitrary physical address space into the kernel virtual
184 * address space. Needed when the kernel wants to access high addresses
185 * directly.
186 *
187 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
188 * have to convert them into an offset in a page-aligned mapping, but the
189 * caller shouldn't need to know that small detail.
190 */
23016969
CL
191static void __iomem *__ioremap_caller(resource_size_t phys_addr,
192 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 193{
756a6c68
IM
194 unsigned long pfn, offset, vaddr;
195 resource_size_t last_addr;
87e547fe
PP
196 const resource_size_t unaligned_phys_addr = phys_addr;
197 const unsigned long unaligned_size = size;
91eebf40 198 struct vm_struct *area;
d7677d40 199 unsigned long new_prot_val;
d806e5ee 200 pgprot_t prot;
dee7cbb2 201 int retval;
d61fc448 202 void __iomem *ret_addr;
1da177e4
LT
203
204 /* Don't allow wraparound or zero size */
205 last_addr = phys_addr + size - 1;
206 if (!size || last_addr < phys_addr)
207 return NULL;
208
e3100c82 209 if (!phys_addr_valid(phys_addr)) {
6997ab49 210 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 211 (unsigned long long)phys_addr);
e3100c82
TG
212 WARN_ON_ONCE(1);
213 return NULL;
214 }
215
1da177e4
LT
216 /*
217 * Don't remap the low PCI/ISA area, it's always mapped..
218 */
bcc643dc 219 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 220 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4 221
379daf62
SS
222 /*
223 * Check if the request spans more than any BAR in the iomem resource
224 * tree.
225 */
226 WARN_ON(iomem_map_sanity_check(phys_addr, size));
227
1da177e4
LT
228 /*
229 * Don't allow anybody to remap normal RAM that we're using..
230 */
cb8ab687
AS
231 for (pfn = phys_addr >> PAGE_SHIFT;
232 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
233 pfn++) {
bdd3cee2 234
ba748d22
IM
235 int is_ram = page_is_ram(pfn);
236
237 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 238 return NULL;
ba748d22 239 WARN_ON_ONCE(is_ram);
1da177e4
LT
240 }
241
d7677d40 242 /*
243 * Mappings have to be page-aligned
244 */
245 offset = phys_addr & ~PAGE_MASK;
246 phys_addr &= PAGE_MASK;
247 size = PAGE_ALIGN(last_addr+1) - phys_addr;
248
e213e877 249 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
dee7cbb2
VP
250 prot_val, &new_prot_val);
251 if (retval) {
b450e5e8 252 pr_debug("Warning: reserve_memtype returned %d\n", retval);
dee7cbb2
VP
253 return NULL;
254 }
255
256 if (prot_val != new_prot_val) {
d7677d40 257 /*
258 * Do not fallback to certain memory types with certain
259 * requested type:
de33c442
SS
260 * - request is uc-, return cannot be write-back
261 * - request is uc-, return cannot be write-combine
b310f381 262 * - request is write-combine, return cannot be write-back
d7677d40 263 */
de33c442 264 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
b310f381 265 (new_prot_val == _PAGE_CACHE_WB ||
266 new_prot_val == _PAGE_CACHE_WC)) ||
267 (prot_val == _PAGE_CACHE_WC &&
d7677d40 268 new_prot_val == _PAGE_CACHE_WB)) {
b450e5e8 269 pr_debug(
6997ab49 270 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
271 (unsigned long long)phys_addr,
272 (unsigned long long)(phys_addr + size),
6997ab49 273 prot_val, new_prot_val);
d7677d40 274 free_memtype(phys_addr, phys_addr + size);
275 return NULL;
276 }
277 prot_val = new_prot_val;
278 }
279
3a96ce8c 280 switch (prot_val) {
281 case _PAGE_CACHE_UC:
d806e5ee 282 default:
be43d728 283 prot = PAGE_KERNEL_IO_NOCACHE;
d806e5ee 284 break;
de33c442 285 case _PAGE_CACHE_UC_MINUS:
be43d728 286 prot = PAGE_KERNEL_IO_UC_MINUS;
de33c442 287 break;
b310f381 288 case _PAGE_CACHE_WC:
be43d728 289 prot = PAGE_KERNEL_IO_WC;
b310f381 290 break;
3a96ce8c 291 case _PAGE_CACHE_WB:
be43d728 292 prot = PAGE_KERNEL_IO;
d806e5ee
TG
293 break;
294 }
a148ecfd 295
1da177e4
LT
296 /*
297 * Ok, go for it..
298 */
23016969 299 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4
LT
300 if (!area)
301 return NULL;
302 area->phys_addr = phys_addr;
e66aadbe
TG
303 vaddr = (unsigned long) area->addr;
304 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
d7677d40 305 free_memtype(phys_addr, phys_addr + size);
b16bf712 306 free_vm_area(area);
1da177e4
LT
307 return NULL;
308 }
e9332cac 309
3a96ce8c 310 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
d7677d40 311 free_memtype(phys_addr, phys_addr + size);
e66aadbe 312 vunmap(area->addr);
e9332cac
TG
313 return NULL;
314 }
315
d61fc448 316 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 317 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448
PP
318
319 return ret_addr;
1da177e4 320}
1da177e4
LT
321
322/**
323 * ioremap_nocache - map bus memory into CPU space
324 * @offset: bus address of the memory
325 * @size: size of the resource to map
326 *
327 * ioremap_nocache performs a platform specific sequence of operations to
328 * make bus memory CPU accessible via the readb/readw/readl/writeb/
329 * writew/writel functions and the other mmio helpers. The returned
330 * address is not guaranteed to be usable directly as a virtual
91eebf40 331 * address.
1da177e4
LT
332 *
333 * This version of ioremap ensures that the memory is marked uncachable
334 * on the CPU as well as honouring existing caching rules from things like
91eebf40 335 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
336 * busses. In particular driver authors should read up on PCI writes
337 *
338 * It's useful if some control registers are in such an area and
339 * write combining or read caching is not desirable:
91eebf40 340 *
1da177e4
LT
341 * Must be freed with iounmap.
342 */
b9e76a00 343void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 344{
de33c442
SS
345 /*
346 * Ideally, this should be:
499f8f84 347 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
de33c442
SS
348 *
349 * Till we fix all X drivers to use ioremap_wc(), we will use
350 * UC MINUS.
351 */
352 unsigned long val = _PAGE_CACHE_UC_MINUS;
353
354 return __ioremap_caller(phys_addr, size, val,
23016969 355 __builtin_return_address(0));
1da177e4 356}
129f6946 357EXPORT_SYMBOL(ioremap_nocache);
1da177e4 358
b310f381 359/**
360 * ioremap_wc - map memory into CPU space write combined
361 * @offset: bus address of the memory
362 * @size: size of the resource to map
363 *
364 * This version of ioremap ensures that the memory is marked write combining.
365 * Write combining allows faster writes to some hardware devices.
366 *
367 * Must be freed with iounmap.
368 */
369void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
370{
499f8f84 371 if (pat_enabled)
23016969
CL
372 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
373 __builtin_return_address(0));
b310f381 374 else
375 return ioremap_nocache(phys_addr, size);
376}
377EXPORT_SYMBOL(ioremap_wc);
378
b9e76a00 379void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 380{
23016969
CL
381 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
382 __builtin_return_address(0));
5f868152
TG
383}
384EXPORT_SYMBOL(ioremap_cache);
385
a361ee5c
VP
386static void __iomem *ioremap_default(resource_size_t phys_addr,
387 unsigned long size)
388{
389 unsigned long flags;
1d6cf1fe 390 void __iomem *ret;
a361ee5c
VP
391 int err;
392
393 /*
394 * - WB for WB-able memory and no other conflicting mappings
395 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
396 * - Inherit from confliting mappings otherwise
397 */
398 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
399 if (err < 0)
400 return NULL;
401
1d6cf1fe
HH
402 ret = __ioremap_caller(phys_addr, size, flags,
403 __builtin_return_address(0));
a361ee5c
VP
404
405 free_memtype(phys_addr, phys_addr + size);
1d6cf1fe 406 return ret;
a361ee5c
VP
407}
408
28b2ee20
RR
409void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
410 unsigned long prot_val)
411{
412 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
413 __builtin_return_address(0));
414}
415EXPORT_SYMBOL(ioremap_prot);
416
bf5421c3
AK
417/**
418 * iounmap - Free a IO remapping
419 * @addr: virtual address from ioremap_*
420 *
421 * Caller must ensure there is only one unmapping for the same pointer.
422 */
1da177e4
LT
423void iounmap(volatile void __iomem *addr)
424{
bf5421c3 425 struct vm_struct *p, *o;
c23a4e96
AM
426
427 if ((void __force *)addr <= high_memory)
1da177e4
LT
428 return;
429
430 /*
431 * __ioremap special-cases the PCI/ISA range by not instantiating a
432 * vm_area and by simply returning an address into the kernel mapping
433 * of ISA space. So handle that here.
434 */
6e92a5a6
TG
435 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
436 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
437 return;
438
91eebf40
TG
439 addr = (volatile void __iomem *)
440 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 441
d61fc448
PP
442 mmiotrace_iounmap(addr);
443
bf5421c3
AK
444 /* Use the vm area unlocked, assuming the caller
445 ensures there isn't another iounmap for the same address
446 in parallel. Reuse of the virtual address is prevented by
447 leaving it in the global lists until we're done with it.
448 cpa takes care of the direct mappings. */
449 read_lock(&vmlist_lock);
450 for (p = vmlist; p; p = p->next) {
6e92a5a6 451 if (p->addr == (void __force *)addr)
bf5421c3
AK
452 break;
453 }
454 read_unlock(&vmlist_lock);
455
456 if (!p) {
91eebf40 457 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 458 dump_stack();
bf5421c3 459 return;
1da177e4
LT
460 }
461
d7677d40 462 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
463
bf5421c3 464 /* Finally remove it */
6e92a5a6 465 o = remove_vm_area((void __force *)addr);
bf5421c3 466 BUG_ON(p != o || o == NULL);
91eebf40 467 kfree(p);
1da177e4 468}
129f6946 469EXPORT_SYMBOL(iounmap);
1da177e4 470
e045fb2a 471/*
472 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
473 * access
474 */
475void *xlate_dev_mem_ptr(unsigned long phys)
476{
477 void *addr;
478 unsigned long start = phys & PAGE_MASK;
479
480 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
481 if (page_is_ram(start >> PAGE_SHIFT))
482 return __va(phys);
483
ae94b807 484 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
e045fb2a 485 if (addr)
486 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
487
488 return addr;
489}
490
491void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
492{
493 if (page_is_ram(phys >> PAGE_SHIFT))
494 return;
495
496 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
497 return;
498}
499
4b6e9f27 500static int __initdata early_ioremap_debug;
d18d6d65
IM
501
502static int __init early_ioremap_debug_setup(char *str)
503{
504 early_ioremap_debug = 1;
505
793b24a2 506 return 0;
d18d6d65 507}
793b24a2 508early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 509
0947b2f3 510static __initdata int after_paging_init;
a7bf0bd5 511static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 512
551889a6 513static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 514{
37cc8d7f
JF
515 /* Don't assume we're using swapper_pg_dir at this point */
516 pgd_t *base = __va(read_cr3());
517 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
518 pud_t *pud = pud_offset(pgd, addr);
519 pmd_t *pmd = pmd_offset(pud, addr);
520
521 return pmd;
0947b2f3
HY
522}
523
551889a6 524static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 525{
551889a6 526 return &bm_pte[pte_index(addr)];
0947b2f3
HY
527}
528
beacfaac 529void __init early_ioremap_init(void)
0947b2f3 530{
551889a6 531 pmd_t *pmd;
0947b2f3 532
d18d6d65 533 if (early_ioremap_debug)
adafdf6a 534 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 535
551889a6 536 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3 537 memset(bm_pte, 0, sizeof(bm_pte));
b6fbb669 538 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 539
0e3a9549 540 /*
551889a6 541 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
542 * we are not prepared:
543 */
551889a6 544 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 545 WARN_ON(1);
551889a6
IC
546 printk(KERN_WARNING "pmd %p != %p\n",
547 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 548 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 549 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 550 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 551 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
552
553 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
554 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
555 FIX_BTMAP_BEGIN);
0e3a9549 556 }
0947b2f3
HY
557}
558
beacfaac 559void __init early_ioremap_clear(void)
0947b2f3 560{
551889a6 561 pmd_t *pmd;
0947b2f3 562
d18d6d65 563 if (early_ioremap_debug)
adafdf6a 564 printk(KERN_INFO "early_ioremap_clear()\n");
d18d6d65 565
551889a6
IC
566 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
567 pmd_clear(pmd);
6944a9c8 568 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
0947b2f3
HY
569 __flush_tlb_all();
570}
571
beacfaac 572void __init early_ioremap_reset(void)
0947b2f3
HY
573{
574 enum fixed_addresses idx;
551889a6
IC
575 unsigned long addr, phys;
576 pte_t *pte;
0947b2f3
HY
577
578 after_paging_init = 1;
64a8f852 579 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 580 addr = fix_to_virt(idx);
beacfaac 581 pte = early_ioremap_pte(addr);
551889a6
IC
582 if (pte_present(*pte)) {
583 phys = pte_val(*pte) & PAGE_MASK;
0947b2f3
HY
584 set_fixmap(idx, phys);
585 }
586 }
587}
588
beacfaac 589static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
590 unsigned long phys, pgprot_t flags)
591{
551889a6
IC
592 unsigned long addr = __fix_to_virt(idx);
593 pte_t *pte;
0947b2f3
HY
594
595 if (idx >= __end_of_fixed_addresses) {
596 BUG();
597 return;
598 }
beacfaac 599 pte = early_ioremap_pte(addr);
4583ed51 600
0947b2f3 601 if (pgprot_val(flags))
551889a6 602 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 603 else
4f9c11dd 604 pte_clear(&init_mm, addr, pte);
0947b2f3
HY
605 __flush_tlb_one(addr);
606}
607
beacfaac 608static inline void __init early_set_fixmap(enum fixed_addresses idx,
14941779 609 unsigned long phys, pgprot_t prot)
0947b2f3
HY
610{
611 if (after_paging_init)
14941779 612 __set_fixmap(idx, phys, prot);
0947b2f3 613 else
14941779 614 __early_set_fixmap(idx, phys, prot);
0947b2f3
HY
615}
616
beacfaac 617static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
618{
619 if (after_paging_init)
620 clear_fixmap(idx);
621 else
beacfaac 622 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
623}
624
1d6cf1fe 625static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
c1a2f4b1 626static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
d690b2af
IM
627static int __init check_early_ioremap_leak(void)
628{
c1a2f4b1
YL
629 int count = 0;
630 int i;
631
632 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
633 if (prev_map[i])
634 count++;
635
636 if (!count)
d690b2af 637 return 0;
0c072bb4 638 WARN(1, KERN_WARNING
91eebf40 639 "Debug warning: early ioremap leak of %d areas detected.\n",
c1a2f4b1 640 count);
d690b2af 641 printk(KERN_WARNING
0c072bb4 642 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
643
644 return 1;
645}
646late_initcall(check_early_ioremap_leak);
647
1d6cf1fe 648static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
1da177e4
LT
649{
650 unsigned long offset, last_addr;
c1a2f4b1 651 unsigned int nrpages;
1b42f516 652 enum fixed_addresses idx0, idx;
c1a2f4b1 653 int i, slot;
1b42f516
IM
654
655 WARN_ON(system_state != SYSTEM_BOOTING);
656
c1a2f4b1
YL
657 slot = -1;
658 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
659 if (!prev_map[i]) {
660 slot = i;
661 break;
662 }
663 }
664
665 if (slot < 0) {
666 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
667 phys_addr, size);
668 WARN_ON(1);
669 return NULL;
670 }
671
d18d6d65 672 if (early_ioremap_debug) {
adafdf6a 673 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
c1a2f4b1 674 phys_addr, size, slot);
d18d6d65
IM
675 dump_stack();
676 }
1da177e4
LT
677
678 /* Don't allow wraparound or zero size */
679 last_addr = phys_addr + size - 1;
bd796ed0
IM
680 if (!size || last_addr < phys_addr) {
681 WARN_ON(1);
1da177e4 682 return NULL;
bd796ed0 683 }
1da177e4 684
c1a2f4b1 685 prev_size[slot] = size;
1da177e4
LT
686 /*
687 * Mappings have to be page-aligned
688 */
689 offset = phys_addr & ~PAGE_MASK;
690 phys_addr &= PAGE_MASK;
c613ec1a 691 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
1da177e4
LT
692
693 /*
694 * Mappings have to fit in the FIX_BTMAP area.
695 */
696 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
697 if (nrpages > NR_FIX_BTMAPS) {
698 WARN_ON(1);
1da177e4 699 return NULL;
bd796ed0 700 }
1da177e4
LT
701
702 /*
703 * Ok, go for it..
704 */
c1a2f4b1 705 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
1b42f516 706 idx = idx0;
1da177e4 707 while (nrpages > 0) {
14941779 708 early_set_fixmap(idx, phys_addr, prot);
1da177e4
LT
709 phys_addr += PAGE_SIZE;
710 --idx;
711 --nrpages;
712 }
d18d6d65
IM
713 if (early_ioremap_debug)
714 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 715
1d6cf1fe 716 prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
c1a2f4b1 717 return prev_map[slot];
1da177e4
LT
718}
719
14941779 720/* Remap an IO device */
1d6cf1fe 721void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
14941779
JF
722{
723 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
724}
725
726/* Remap memory */
1d6cf1fe 727void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
14941779
JF
728{
729 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
730}
731
1d6cf1fe 732void __init early_iounmap(void __iomem *addr, unsigned long size)
1da177e4
LT
733{
734 unsigned long virt_addr;
735 unsigned long offset;
736 unsigned int nrpages;
737 enum fixed_addresses idx;
c1a2f4b1
YL
738 int i, slot;
739
740 slot = -1;
741 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
742 if (prev_map[i] == addr) {
743 slot = i;
744 break;
745 }
746 }
1b42f516 747
c1a2f4b1
YL
748 if (slot < 0) {
749 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
750 addr, size);
751 WARN_ON(1);
752 return;
753 }
754
755 if (prev_size[slot] != size) {
756 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
757 addr, size, slot, prev_size[slot]);
758 WARN_ON(1);
226e9a93 759 return;
c1a2f4b1 760 }
1da177e4 761
d18d6d65 762 if (early_ioremap_debug) {
adafdf6a 763 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
c1a2f4b1 764 size, slot);
d18d6d65
IM
765 dump_stack();
766 }
767
1da177e4 768 virt_addr = (unsigned long)addr;
bd796ed0
IM
769 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
770 WARN_ON(1);
1da177e4 771 return;
bd796ed0 772 }
1da177e4
LT
773 offset = virt_addr & ~PAGE_MASK;
774 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
775
c1a2f4b1 776 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
1da177e4 777 while (nrpages > 0) {
beacfaac 778 early_clear_fixmap(idx);
1da177e4
LT
779 --idx;
780 --nrpages;
781 }
1d6cf1fe 782 prev_map[slot] = NULL;
1da177e4 783}
1b42f516
IM
784
785void __this_fixmap_does_not_exist(void)
786{
787 WARN_ON(1);
788}
This page took 0.518213 seconds and 5 git commands to generate.