Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/parisc/mm/ioremap.c | |
3 | * | |
1da177e4 | 4 | * (C) Copyright 1995 1996 Linus Torvalds |
b2d6b9fb | 5 | * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de> |
e0565a1c | 6 | * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org> |
1da177e4 LT |
7 | */ |
8 | ||
9 | #include <linux/vmalloc.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/module.h> | |
12 | #include <asm/io.h> | |
13 | #include <asm/pgalloc.h> | |
e0565a1c KM |
14 | #include <asm/tlbflush.h> |
15 | #include <asm/cacheflush.h> | |
1da177e4 | 16 | |
e0565a1c KM |
17 | static inline void |
18 | remap_area_pte(pte_t *pte, unsigned long address, unsigned long size, | |
19 | unsigned long phys_addr, unsigned long flags) | |
1da177e4 | 20 | { |
e0565a1c KM |
21 | unsigned long end, pfn; |
22 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | | |
23 | _PAGE_ACCESSED | flags); | |
1da177e4 LT |
24 | |
25 | address &= ~PMD_MASK; | |
e0565a1c | 26 | |
1da177e4 LT |
27 | end = address + size; |
28 | if (end > PMD_SIZE) | |
29 | end = PMD_SIZE; | |
e0565a1c KM |
30 | |
31 | BUG_ON(address >= end); | |
32 | ||
33 | pfn = phys_addr >> PAGE_SHIFT; | |
1da177e4 | 34 | do { |
e0565a1c KM |
35 | BUG_ON(!pte_none(*pte)); |
36 | ||
37 | set_pte(pte, pfn_pte(pfn, pgprot)); | |
38 | ||
1da177e4 | 39 | address += PAGE_SIZE; |
e0565a1c | 40 | pfn++; |
1da177e4 LT |
41 | pte++; |
42 | } while (address && (address < end)); | |
43 | } | |
44 | ||
e0565a1c KM |
45 | static inline int |
46 | remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size, | |
47 | unsigned long phys_addr, unsigned long flags) | |
1da177e4 LT |
48 | { |
49 | unsigned long end; | |
50 | ||
51 | address &= ~PGDIR_MASK; | |
e0565a1c | 52 | |
1da177e4 LT |
53 | end = address + size; |
54 | if (end > PGDIR_SIZE) | |
55 | end = PGDIR_SIZE; | |
e0565a1c KM |
56 | |
57 | BUG_ON(address >= end); | |
58 | ||
1da177e4 | 59 | phys_addr -= address; |
1da177e4 | 60 | do { |
e0565a1c | 61 | pte_t *pte = pte_alloc_kernel(pmd, address); |
1da177e4 LT |
62 | if (!pte) |
63 | return -ENOMEM; | |
e0565a1c KM |
64 | |
65 | remap_area_pte(pte, address, end - address, | |
66 | address + phys_addr, flags); | |
67 | ||
1da177e4 LT |
68 | address = (address + PMD_SIZE) & PMD_MASK; |
69 | pmd++; | |
70 | } while (address && (address < end)); | |
e0565a1c | 71 | |
1da177e4 LT |
72 | return 0; |
73 | } | |
74 | ||
e0565a1c KM |
75 | static int |
76 | remap_area_pages(unsigned long address, unsigned long phys_addr, | |
77 | unsigned long size, unsigned long flags) | |
1da177e4 | 78 | { |
e0565a1c KM |
79 | pgd_t *dir; |
80 | int error = 0; | |
1da177e4 LT |
81 | unsigned long end = address + size; |
82 | ||
e0565a1c KM |
83 | BUG_ON(address >= end); |
84 | ||
1da177e4 | 85 | phys_addr -= address; |
e0565a1c KM |
86 | dir = pgd_offset_k(address); |
87 | ||
1da177e4 | 88 | flush_cache_all(); |
e0565a1c | 89 | |
1da177e4 | 90 | do { |
e0565a1c | 91 | pud_t *pud; |
1da177e4 | 92 | pmd_t *pmd; |
e0565a1c | 93 | |
1da177e4 | 94 | error = -ENOMEM; |
e0565a1c KM |
95 | pud = pud_alloc(&init_mm, dir, address); |
96 | if (!pud) | |
97 | break; | |
98 | ||
99 | pmd = pmd_alloc(&init_mm, pud, address); | |
1da177e4 LT |
100 | if (!pmd) |
101 | break; | |
e0565a1c | 102 | |
1da177e4 | 103 | if (remap_area_pmd(pmd, address, end - address, |
e0565a1c | 104 | phys_addr + address, flags)) |
1da177e4 | 105 | break; |
e0565a1c | 106 | |
1da177e4 LT |
107 | error = 0; |
108 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | |
109 | dir++; | |
110 | } while (address && (address < end)); | |
e0565a1c | 111 | |
1da177e4 | 112 | flush_tlb_all(); |
e0565a1c | 113 | |
1da177e4 LT |
114 | return error; |
115 | } | |
1da177e4 | 116 | |
1da177e4 LT |
117 | /* |
118 | * Generic mapping function (not visible outside): | |
119 | */ | |
120 | ||
121 | /* | |
122 | * Remap an arbitrary physical address space into the kernel virtual | |
e0565a1c | 123 | * address space. |
1da177e4 LT |
124 | * |
125 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
126 | * have to convert them into an offset in a page-aligned mapping, but the | |
127 | * caller shouldn't need to know that small detail. | |
128 | */ | |
129 | void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | |
130 | { | |
cb4ab59c HD |
131 | void *addr; |
132 | struct vm_struct *area; | |
133 | unsigned long offset, last_addr; | |
134 | ||
29ef8295 | 135 | #ifdef CONFIG_EISA |
1da177e4 LT |
136 | unsigned long end = phys_addr + size - 1; |
137 | /* Support EISA addresses */ | |
10267cdd HD |
138 | if ((phys_addr >= 0x00080000 && end < 0x000fffff) || |
139 | (phys_addr >= 0x00500000 && end < 0x03bfffff)) { | |
140 | phys_addr |= F_EXTEND(0xfc000000); | |
b2d6b9fb | 141 | flags |= _PAGE_NO_CACHE; |
1da177e4 | 142 | } |
1da177e4 LT |
143 | #endif |
144 | ||
1da177e4 LT |
145 | /* Don't allow wraparound or zero size */ |
146 | last_addr = phys_addr + size - 1; | |
147 | if (!size || last_addr < phys_addr) | |
148 | return NULL; | |
149 | ||
150 | /* | |
151 | * Don't allow anybody to remap normal RAM that we're using.. | |
152 | */ | |
153 | if (phys_addr < virt_to_phys(high_memory)) { | |
154 | char *t_addr, *t_end; | |
155 | struct page *page; | |
156 | ||
157 | t_addr = __va(phys_addr); | |
158 | t_end = t_addr + (size - 1); | |
159 | ||
e0565a1c KM |
160 | for (page = virt_to_page(t_addr); |
161 | page <= virt_to_page(t_end); page++) { | |
1da177e4 LT |
162 | if(!PageReserved(page)) |
163 | return NULL; | |
e0565a1c | 164 | } |
1da177e4 LT |
165 | } |
166 | ||
167 | /* | |
168 | * Mappings have to be page-aligned | |
169 | */ | |
170 | offset = phys_addr & ~PAGE_MASK; | |
171 | phys_addr &= PAGE_MASK; | |
172 | size = PAGE_ALIGN(last_addr) - phys_addr; | |
173 | ||
174 | /* | |
175 | * Ok, go for it.. | |
176 | */ | |
177 | area = get_vm_area(size, VM_IOREMAP); | |
178 | if (!area) | |
179 | return NULL; | |
e0565a1c | 180 | |
1da177e4 LT |
181 | addr = area->addr; |
182 | if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { | |
183 | vfree(addr); | |
184 | return NULL; | |
185 | } | |
e0565a1c | 186 | |
1da177e4 | 187 | return (void __iomem *) (offset + (char *)addr); |
1da177e4 | 188 | } |
d345fd36 | 189 | EXPORT_SYMBOL(__ioremap); |
1da177e4 | 190 | |
01232e93 | 191 | void iounmap(const volatile void __iomem *addr) |
1da177e4 | 192 | { |
1da177e4 LT |
193 | if (addr > high_memory) |
194 | return vfree((void *) (PAGE_MASK & (unsigned long __force) addr)); | |
1da177e4 | 195 | } |
d345fd36 | 196 | EXPORT_SYMBOL(iounmap); |