2 * This file contains ioremap and related functions for 64-bit machines.
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
25 #include <linux/config.h>
26 #include <linux/signal.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
29 #include <linux/errno.h>
30 #include <linux/string.h>
31 #include <linux/types.h>
32 #include <linux/mman.h>
34 #include <linux/swap.h>
35 #include <linux/stddef.h>
36 #include <linux/vmalloc.h>
37 #include <linux/init.h>
38 #include <linux/delay.h>
39 #include <linux/bootmem.h>
40 #include <linux/highmem.h>
41 #include <linux/idr.h>
42 #include <linux/nodemask.h>
43 #include <linux/module.h>
45 #include <asm/pgalloc.h>
51 #include <asm/mmu_context.h>
52 #include <asm/pgtable.h>
54 #include <asm/uaccess.h>
56 #include <asm/machdep.h>
59 #include <asm/processor.h>
60 #include <asm/mmzone.h>
61 #include <asm/cputable.h>
62 #include <asm/ppcdebug.h>
63 #include <asm/sections.h>
64 #include <asm/system.h>
65 #include <asm/iommu.h>
66 #include <asm/abs_addr.h>
68 #include <asm/imalloc.h>
70 #if PGTABLE_RANGE > USER_VSID_RANGE
71 #warning Limited user VSID range means pagetable space is wasted
74 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
75 #warning TASK_SIZE is smaller than it needs to be.
79 unsigned long ioremap_bot
= IMALLOC_BASE
;
80 static unsigned long phbs_io_bot
= PHBS_IO_BASE
;
82 extern pgd_t swapper_pg_dir
[];
83 extern struct task_struct
*current_set
[NR_CPUS
];
85 unsigned long klimit
= (unsigned long)_end
;
87 /* max amount of RAM to use */
88 unsigned long __max_memory
;
90 /* info on what we think the IO hole is */
91 unsigned long io_hole_start
;
92 unsigned long io_hole_size
;
94 #ifdef CONFIG_PPC_ISERIES
96 void __iomem
*ioremap(unsigned long addr
, unsigned long size
)
98 return (void __iomem
*)addr
;
101 extern void __iomem
*__ioremap(unsigned long addr
, unsigned long size
,
104 return (void __iomem
*)addr
;
107 void iounmap(volatile void __iomem
*addr
)
115 * map_io_page currently only called by __ioremap
116 * map_io_page adds an entry to the ioremap page table
117 * and adds an entry to the HPT, possibly bolting it
119 static int map_io_page(unsigned long ea
, unsigned long pa
, int flags
)
128 spin_lock(&init_mm
.page_table_lock
);
129 pgdp
= pgd_offset_k(ea
);
130 pudp
= pud_alloc(&init_mm
, pgdp
, ea
);
133 pmdp
= pmd_alloc(&init_mm
, pudp
, ea
);
136 ptep
= pte_alloc_kernel(&init_mm
, pmdp
, ea
);
139 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pa
>> PAGE_SHIFT
,
141 spin_unlock(&init_mm
.page_table_lock
);
143 unsigned long va
, vpn
, hash
, hpteg
;
146 * If the mm subsystem is not fully up, we cannot create a
147 * linux page table entry for this mapping. Simply bolt an
148 * entry in the hardware page table.
150 vsid
= get_kernel_vsid(ea
);
151 va
= (vsid
<< 28) | (ea
& 0xFFFFFFF);
152 vpn
= va
>> PAGE_SHIFT
;
154 hash
= hpt_hash(vpn
, 0);
156 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
158 /* Panic if a pte grpup is full */
159 if (ppc_md
.hpte_insert(hpteg
, va
, pa
>> PAGE_SHIFT
,
161 _PAGE_NO_CACHE
|_PAGE_GUARDED
|PP_RWXX
)
163 panic("map_io_page: could not insert mapping");
170 static void __iomem
* __ioremap_com(unsigned long addr
, unsigned long pa
,
171 unsigned long ea
, unsigned long size
,
176 if ((flags
& _PAGE_PRESENT
) == 0)
177 flags
|= pgprot_val(PAGE_KERNEL
);
179 for (i
= 0; i
< size
; i
+= PAGE_SIZE
)
180 if (map_io_page(ea
+i
, pa
+i
, flags
))
183 return (void __iomem
*) (ea
+ (addr
& ~PAGE_MASK
));
188 ioremap(unsigned long addr
, unsigned long size
)
190 return __ioremap(addr
, size
, _PAGE_NO_CACHE
| _PAGE_GUARDED
);
193 void __iomem
* __ioremap(unsigned long addr
, unsigned long size
,
196 unsigned long pa
, ea
;
200 * Choose an address to map it to.
201 * Once the imalloc system is running, we use it.
202 * Before that, we map using addresses going
203 * up from ioremap_bot. imalloc will use
204 * the addresses from ioremap_bot through
208 pa
= addr
& PAGE_MASK
;
209 size
= PAGE_ALIGN(addr
+ size
) - pa
;
215 struct vm_struct
*area
;
216 area
= im_get_free_area(size
);
219 ea
= (unsigned long)(area
->addr
);
220 ret
= __ioremap_com(addr
, pa
, ea
, size
, flags
);
225 ret
= __ioremap_com(addr
, pa
, ea
, size
, flags
);
232 #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
234 int __ioremap_explicit(unsigned long pa
, unsigned long ea
,
235 unsigned long size
, unsigned long flags
)
237 struct vm_struct
*area
;
240 /* For now, require page-aligned values for pa, ea, and size */
241 if (!IS_PAGE_ALIGNED(pa
) || !IS_PAGE_ALIGNED(ea
) ||
242 !IS_PAGE_ALIGNED(size
)) {
243 printk(KERN_ERR
"unaligned value in %s\n", __FUNCTION__
);
247 if (!mem_init_done
) {
248 /* Two things to consider in this case:
249 * 1) No records will be kept (imalloc, etc) that the region
251 * 2) It won't be easy to iounmap() the region later (because
256 area
= im_get_area(ea
, size
,
257 IM_REGION_UNUSED
|IM_REGION_SUBSET
|IM_REGION_EXISTS
);
259 /* Expected when PHB-dlpar is in play */
262 if (ea
!= (unsigned long) area
->addr
) {
263 printk(KERN_ERR
"unexpected addr return from "
269 ret
= __ioremap_com(pa
, pa
, ea
, size
, flags
);
271 printk(KERN_ERR
"ioremap_explicit() allocation failure !\n");
274 if (ret
!= (void *) ea
) {
275 printk(KERN_ERR
"__ioremap_com() returned unexpected addr\n");
283 * Unmap an IO region and remove it from imalloc'd list.
284 * Access to IO memory should be serialized by driver.
285 * This code is modeled after vmalloc code - unmap_vm_area()
287 * XXX what about calls before mem_init_done (ie python_countermeasures())
289 void iounmap(volatile void __iomem
*token
)
296 addr
= (void *) ((unsigned long __force
) token
& PAGE_MASK
);
301 static int iounmap_subset_regions(unsigned long addr
, unsigned long size
)
303 struct vm_struct
*area
;
305 /* Check whether subsets of this region exist */
306 area
= im_get_area(addr
, size
, IM_REGION_SUPERSET
);
311 iounmap((void __iomem
*) area
->addr
);
312 area
= im_get_area(addr
, size
,
319 int iounmap_explicit(volatile void __iomem
*start
, unsigned long size
)
321 struct vm_struct
*area
;
325 addr
= (unsigned long __force
) start
& PAGE_MASK
;
327 /* Verify that the region either exists or is a subset of an existing
328 * region. In the latter case, split the parent region to create
331 area
= im_get_area(addr
, size
,
332 IM_REGION_EXISTS
| IM_REGION_SUBSET
);
334 /* Determine whether subset regions exist. If so, unmap */
335 rc
= iounmap_subset_regions(addr
, size
);
338 "%s() cannot unmap nonexistent range 0x%lx\n",
343 iounmap((void __iomem
*) area
->addr
);
346 * FIXME! This can't be right:
348 * Maybe it should be "iounmap(area);"
355 EXPORT_SYMBOL(ioremap
);
356 EXPORT_SYMBOL(__ioremap
);
357 EXPORT_SYMBOL(iounmap
);
This page took 0.042222 seconds and 5 git commands to generate.