[PARISC] Make local_t use atomic_long_t
[deliverable/linux.git] / arch / parisc / mm / ioremap.c
CommitLineData
1da177e4
LT
1/*
2 * arch/parisc/mm/ioremap.c
3 *
1da177e4
LT
4 * (C) Copyright 1995 1996 Linus Torvalds
5 * (C) Copyright 2001 Helge Deller <deller@gmx.de>
e0565a1c 6 * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
1da177e4
LT
7 */
8
9#include <linux/vmalloc.h>
10#include <linux/errno.h>
11#include <linux/module.h>
12#include <asm/io.h>
13#include <asm/pgalloc.h>
e0565a1c
KM
14#include <asm/tlbflush.h>
15#include <asm/cacheflush.h>
1da177e4 16
e0565a1c
KM
17static inline void
18remap_area_pte(pte_t *pte, unsigned long address, unsigned long size,
19 unsigned long phys_addr, unsigned long flags)
1da177e4 20{
e0565a1c
KM
21 unsigned long end, pfn;
22 pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
23 _PAGE_ACCESSED | flags);
1da177e4
LT
24
25 address &= ~PMD_MASK;
e0565a1c 26
1da177e4
LT
27 end = address + size;
28 if (end > PMD_SIZE)
29 end = PMD_SIZE;
e0565a1c
KM
30
31 BUG_ON(address >= end);
32
33 pfn = phys_addr >> PAGE_SHIFT;
1da177e4 34 do {
e0565a1c
KM
35 BUG_ON(!pte_none(*pte));
36
37 set_pte(pte, pfn_pte(pfn, pgprot));
38
1da177e4 39 address += PAGE_SIZE;
e0565a1c 40 pfn++;
1da177e4
LT
41 pte++;
42 } while (address && (address < end));
43}
44
e0565a1c
KM
45static inline int
46remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size,
47 unsigned long phys_addr, unsigned long flags)
1da177e4
LT
48{
49 unsigned long end;
50
51 address &= ~PGDIR_MASK;
e0565a1c 52
1da177e4
LT
53 end = address + size;
54 if (end > PGDIR_SIZE)
55 end = PGDIR_SIZE;
e0565a1c
KM
56
57 BUG_ON(address >= end);
58
1da177e4 59 phys_addr -= address;
1da177e4 60 do {
e0565a1c 61 pte_t *pte = pte_alloc_kernel(pmd, address);
1da177e4
LT
62 if (!pte)
63 return -ENOMEM;
e0565a1c
KM
64
65 remap_area_pte(pte, address, end - address,
66 address + phys_addr, flags);
67
1da177e4
LT
68 address = (address + PMD_SIZE) & PMD_MASK;
69 pmd++;
70 } while (address && (address < end));
e0565a1c 71
1da177e4
LT
72 return 0;
73}
74
e0565a1c
KM
75static int
76remap_area_pages(unsigned long address, unsigned long phys_addr,
77 unsigned long size, unsigned long flags)
1da177e4 78{
e0565a1c
KM
79 pgd_t *dir;
80 int error = 0;
1da177e4
LT
81 unsigned long end = address + size;
82
e0565a1c
KM
83 BUG_ON(address >= end);
84
1da177e4 85 phys_addr -= address;
e0565a1c
KM
86 dir = pgd_offset_k(address);
87
1da177e4 88 flush_cache_all();
e0565a1c 89
1da177e4 90 do {
e0565a1c 91 pud_t *pud;
1da177e4 92 pmd_t *pmd;
e0565a1c 93
1da177e4 94 error = -ENOMEM;
e0565a1c
KM
95 pud = pud_alloc(&init_mm, dir, address);
96 if (!pud)
97 break;
98
99 pmd = pmd_alloc(&init_mm, pud, address);
1da177e4
LT
100 if (!pmd)
101 break;
e0565a1c 102
1da177e4 103 if (remap_area_pmd(pmd, address, end - address,
e0565a1c 104 phys_addr + address, flags))
1da177e4 105 break;
e0565a1c 106
1da177e4
LT
107 error = 0;
108 address = (address + PGDIR_SIZE) & PGDIR_MASK;
109 dir++;
110 } while (address && (address < end));
e0565a1c 111
1da177e4 112 flush_tlb_all();
e0565a1c 113
1da177e4
LT
114 return error;
115}
1da177e4 116
1da177e4
LT
117/*
118 * Generic mapping function (not visible outside):
119 */
120
121/*
122 * Remap an arbitrary physical address space into the kernel virtual
e0565a1c 123 * address space.
1da177e4
LT
124 *
125 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
126 * have to convert them into an offset in a page-aligned mapping, but the
127 * caller shouldn't need to know that small detail.
128 */
129void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
130{
cb4ab59c
HD
131 void *addr;
132 struct vm_struct *area;
133 unsigned long offset, last_addr;
134
29ef8295 135#ifdef CONFIG_EISA
1da177e4
LT
136 unsigned long end = phys_addr + size - 1;
137 /* Support EISA addresses */
10267cdd
HD
138 if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
139 (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
140 phys_addr |= F_EXTEND(0xfc000000);
1da177e4 141 }
1da177e4
LT
142#endif
143
1da177e4
LT
144 /* Don't allow wraparound or zero size */
145 last_addr = phys_addr + size - 1;
146 if (!size || last_addr < phys_addr)
147 return NULL;
148
149 /*
150 * Don't allow anybody to remap normal RAM that we're using..
151 */
152 if (phys_addr < virt_to_phys(high_memory)) {
153 char *t_addr, *t_end;
154 struct page *page;
155
156 t_addr = __va(phys_addr);
157 t_end = t_addr + (size - 1);
158
e0565a1c
KM
159 for (page = virt_to_page(t_addr);
160 page <= virt_to_page(t_end); page++) {
1da177e4
LT
161 if(!PageReserved(page))
162 return NULL;
e0565a1c 163 }
1da177e4
LT
164 }
165
166 /*
167 * Mappings have to be page-aligned
168 */
169 offset = phys_addr & ~PAGE_MASK;
170 phys_addr &= PAGE_MASK;
171 size = PAGE_ALIGN(last_addr) - phys_addr;
172
173 /*
174 * Ok, go for it..
175 */
176 area = get_vm_area(size, VM_IOREMAP);
177 if (!area)
178 return NULL;
e0565a1c 179
1da177e4
LT
180 addr = area->addr;
181 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
182 vfree(addr);
183 return NULL;
184 }
e0565a1c 185
1da177e4 186 return (void __iomem *) (offset + (char *)addr);
1da177e4
LT
187}
188
189void iounmap(void __iomem *addr)
190{
1da177e4
LT
191 if (addr > high_memory)
192 return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
1da177e4 193}
This page took 0.12072 seconds and 5 git commands to generate.