[PATCH] Generic ioremap_page_range: s390 conversion
[deliverable/linux.git] / arch / sh / mm / ioremap.c
CommitLineData
1da177e4
LT
1/*
2 * arch/sh/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
b66c1a39
PM
9 * (C) Copyright 2005, 2006 Paul Mundt
10 *
11 * This file is subject to the terms and conditions of the GNU General
12 * Public License. See the file "COPYING" in the main directory of this
13 * archive for more details.
1da177e4 14 */
1da177e4 15#include <linux/vmalloc.h>
b66c1a39 16#include <linux/module.h>
1da177e4 17#include <linux/mm.h>
a3e61d50 18#include <linux/pci.h>
1da177e4
LT
19#include <asm/io.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
b66c1a39 22#include <asm/addrspace.h>
1da177e4
LT
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25
26static inline void remap_area_pte(pte_t * pte, unsigned long address,
27 unsigned long size, unsigned long phys_addr, unsigned long flags)
28{
29 unsigned long end;
30 unsigned long pfn;
21440cf0 31 pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
1da177e4
LT
32
33 address &= ~PMD_MASK;
34 end = address + size;
35 if (end > PMD_SIZE)
36 end = PMD_SIZE;
37 if (address >= end)
38 BUG();
39 pfn = phys_addr >> PAGE_SHIFT;
40 do {
41 if (!pte_none(*pte)) {
42 printk("remap_area_pte: page already exists\n");
43 BUG();
44 }
45 set_pte(pte, pfn_pte(pfn, pgprot));
46 address += PAGE_SIZE;
47 pfn++;
48 pte++;
49 } while (address && (address < end));
50}
51
52static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
53 unsigned long size, unsigned long phys_addr, unsigned long flags)
54{
55 unsigned long end;
56
57 address &= ~PGDIR_MASK;
58 end = address + size;
59 if (end > PGDIR_SIZE)
60 end = PGDIR_SIZE;
61 phys_addr -= address;
62 if (address >= end)
63 BUG();
64 do {
872fec16 65 pte_t * pte = pte_alloc_kernel(pmd, address);
1da177e4
LT
66 if (!pte)
67 return -ENOMEM;
68 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
69 address = (address + PMD_SIZE) & PMD_MASK;
70 pmd++;
71 } while (address && (address < end));
72 return 0;
73}
74
75int remap_area_pages(unsigned long address, unsigned long phys_addr,
76 unsigned long size, unsigned long flags)
77{
78 int error;
79 pgd_t * dir;
80 unsigned long end = address + size;
81
82 phys_addr -= address;
83 dir = pgd_offset_k(address);
84 flush_cache_all();
85 if (address >= end)
86 BUG();
1da177e4 87 do {
b66c1a39 88 pud_t *pud;
1da177e4 89 pmd_t *pmd;
b66c1a39 90
1da177e4 91 error = -ENOMEM;
b66c1a39
PM
92
93 pud = pud_alloc(&init_mm, dir, address);
94 if (!pud)
95 break;
96 pmd = pmd_alloc(&init_mm, pud, address);
1da177e4
LT
97 if (!pmd)
98 break;
99 if (remap_area_pmd(pmd, address, end - address,
100 phys_addr + address, flags))
101 break;
102 error = 0;
103 address = (address + PGDIR_SIZE) & PGDIR_MASK;
104 dir++;
105 } while (address && (address < end));
1da177e4
LT
106 flush_tlb_all();
107 return error;
108}
109
1da177e4
LT
110/*
111 * Remap an arbitrary physical address space into the kernel virtual
112 * address space. Needed when the kernel wants to access high addresses
113 * directly.
114 *
115 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
116 * have to convert them into an offset in a page-aligned mapping, but the
117 * caller shouldn't need to know that small detail.
118 */
b66c1a39
PM
119void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
120 unsigned long flags)
1da177e4 121{
1da177e4 122 struct vm_struct * area;
b66c1a39 123 unsigned long offset, last_addr, addr, orig_addr;
1da177e4
LT
124
125 /* Don't allow wraparound or zero size */
126 last_addr = phys_addr + size - 1;
127 if (!size || last_addr < phys_addr)
128 return NULL;
129
130 /*
131 * Don't remap the low PCI/ISA area, it's always mapped..
132 */
133 if (phys_addr >= 0xA0000 && last_addr < 0x100000)
b66c1a39 134 return (void __iomem *)phys_to_virt(phys_addr);
1da177e4 135
a3e61d50
PM
136 /*
137 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
138 * mapped at the end of the address space (typically 0xfd000000)
139 * in a non-translatable area, so mapping through page tables for
140 * this area is not only pointless, but also fundamentally
141 * broken. Just return the physical address instead.
142 *
143 * For boards that map a small PCI memory aperture somewhere in
144 * P1/P2 space, ioremap() will already do the right thing,
145 * and we'll never get this far.
146 */
147 if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
148 return (void __iomem *)phys_addr;
149
1da177e4
LT
150 /*
151 * Don't allow anybody to remap normal RAM that we're using..
152 */
153 if (phys_addr < virt_to_phys(high_memory))
154 return NULL;
155
156 /*
157 * Mappings have to be page-aligned
158 */
159 offset = phys_addr & ~PAGE_MASK;
160 phys_addr &= PAGE_MASK;
161 size = PAGE_ALIGN(last_addr+1) - phys_addr;
162
163 /*
164 * Ok, go for it..
165 */
166 area = get_vm_area(size, VM_IOREMAP);
167 if (!area)
168 return NULL;
169 area->phys_addr = phys_addr;
b66c1a39
PM
170 orig_addr = addr = (unsigned long)area->addr;
171
172#ifdef CONFIG_32BIT
173 /*
174 * First try to remap through the PMB once a valid VMA has been
175 * established. Smaller allocations (or the rest of the size
176 * remaining after a PMB mapping due to the size not being
177 * perfectly aligned on a PMB size boundary) are then mapped
178 * through the UTLB using conventional page tables.
179 *
180 * PMB entries are all pre-faulted.
181 */
182 if (unlikely(size >= 0x1000000)) {
183 unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
184
185 if (likely(mapped)) {
186 addr += mapped;
187 phys_addr += mapped;
188 size -= mapped;
189 }
1da177e4 190 }
b66c1a39
PM
191#endif
192
193 if (likely(size))
194 if (remap_area_pages(addr, phys_addr, size, flags)) {
195 vunmap((void *)orig_addr);
196 return NULL;
197 }
198
199 return (void __iomem *)(offset + (char *)orig_addr);
1da177e4 200}
b66c1a39 201EXPORT_SYMBOL(__ioremap);
1da177e4 202
b66c1a39 203void __iounmap(void __iomem *addr)
1da177e4 204{
b66c1a39
PM
205 unsigned long vaddr = (unsigned long __force)addr;
206 struct vm_struct *p;
207
a3e61d50 208 if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr))
b66c1a39
PM
209 return;
210
211#ifdef CONFIG_32BIT
212 /*
213 * Purge any PMB entries that may have been established for this
214 * mapping, then proceed with conventional VMA teardown.
215 *
216 * XXX: Note that due to the way that remove_vm_area() does
217 * matching of the resultant VMA, we aren't able to fast-forward
218 * the address past the PMB space until the end of the VMA where
219 * the page tables reside. As such, unmap_vm_area() will be
220 * forced to linearly scan over the area until it finds the page
221 * tables where PTEs that need to be unmapped actually reside,
222 * which is far from optimal. Perhaps we need to use a separate
223 * VMA for the PMB mappings?
224 * -- PFM.
225 */
226 pmb_unmap(vaddr);
227#endif
228
229 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
230 if (!p) {
231 printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
232 return;
233 }
234
235 kfree(p);
1da177e4 236}
b66c1a39 237EXPORT_SYMBOL(__iounmap);
This page took 0.197454 seconds and 5 git commands to generate.