[SPARC64]: Run ctrl-alt-del action for sun4v powerdown request.
[deliverable/linux.git] / arch / sparc / mm / io-unit.c
CommitLineData
1da177e4
LT
1/* $Id: io-unit.c,v 1.24 2001/12/17 07:05:09 davem Exp $
2 * io-unit.c: IO-UNIT specific routines for memory management.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
1da177e4
LT
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/spinlock.h>
11#include <linux/mm.h>
12#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
13#include <linux/bitops.h>
14
15#include <asm/scatterlist.h>
16#include <asm/pgalloc.h>
17#include <asm/pgtable.h>
18#include <asm/sbus.h>
19#include <asm/io.h>
20#include <asm/io-unit.h>
21#include <asm/mxcc.h>
22#include <asm/cacheflush.h>
23#include <asm/tlbflush.h>
24#include <asm/dma.h>
25
26/* #define IOUNIT_DEBUG */
27#ifdef IOUNIT_DEBUG
28#define IOD(x) printk(x)
29#else
30#define IOD(x) do { } while (0)
31#endif
32
33#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
34#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
35
36void __init
37iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
38{
39 iopte_t *xpt, *xptend;
40 struct iounit_struct *iounit;
41 struct linux_prom_registers iommu_promregs[PROMREG_MAX];
42 struct resource r;
43
44 iounit = kmalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
45
46 memset(iounit, 0, sizeof(*iounit));
47 iounit->limit[0] = IOUNIT_BMAP1_START;
48 iounit->limit[1] = IOUNIT_BMAP2_START;
49 iounit->limit[2] = IOUNIT_BMAPM_START;
50 iounit->limit[3] = IOUNIT_BMAPM_END;
51 iounit->rotor[1] = IOUNIT_BMAP2_START;
52 iounit->rotor[2] = IOUNIT_BMAPM_START;
53
54 xpt = NULL;
55 if(prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
56 sizeof(iommu_promregs)) != -1) {
57 prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
58 memset(&r, 0, sizeof(r));
59 r.flags = iommu_promregs[2].which_io;
60 r.start = iommu_promregs[2].phys_addr;
61 xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT");
62 }
63 if(!xpt) panic("Cannot map External Page Table.");
64
65 sbus->iommu = (struct iommu_struct *)iounit;
66 iounit->page_table = xpt;
2f72ba43 67 spin_lock_init(&iounit->lock);
1da177e4
LT
68
69 for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
70 xpt < xptend;)
71 iopte_val(*xpt++) = 0;
72}
73
74/* One has to hold iounit->lock to call this */
75static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
76{
77 int i, j, k, npages;
78 unsigned long rotor, scan, limit;
79 iopte_t iopte;
80
81 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
82
83 /* A tiny bit of magic ingredience :) */
84 switch (npages) {
85 case 1: i = 0x0231; break;
86 case 2: i = 0x0132; break;
87 default: i = 0x0213; break;
88 }
89
90 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
91
92next: j = (i & 15);
93 rotor = iounit->rotor[j - 1];
94 limit = iounit->limit[j];
95 scan = rotor;
96nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
97 if (scan + npages > limit) {
98 if (limit != rotor) {
99 limit = rotor;
100 scan = iounit->limit[j - 1];
101 goto nexti;
102 }
103 i >>= 4;
104 if (!(i & 15))
105 panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
106 goto next;
107 }
108 for (k = 1, scan++; k < npages; k++)
109 if (test_bit(scan++, iounit->bmap))
110 goto nexti;
111 iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
112 scan -= npages;
113 iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
114 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
115 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
116 set_bit(scan, iounit->bmap);
117 iounit->page_table[scan] = iopte;
118 }
119 IOD(("%08lx\n", vaddr));
120 return vaddr;
121}
122
123static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus)
124{
125 unsigned long ret, flags;
126 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
127
128 spin_lock_irqsave(&iounit->lock, flags);
129 ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
130 spin_unlock_irqrestore(&iounit->lock, flags);
131 return ret;
132}
133
134static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
135{
136 unsigned long flags;
137 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
138
139 /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
140 spin_lock_irqsave(&iounit->lock, flags);
141 while (sz != 0) {
142 --sz;
143 sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
144 sg[sz].dvma_length = sg[sz].length;
145 }
146 spin_unlock_irqrestore(&iounit->lock, flags);
147}
148
149static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
150{
151 unsigned long flags;
152 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
153
154 spin_lock_irqsave(&iounit->lock, flags);
155 len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
156 vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
157 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
158 for (len += vaddr; vaddr < len; vaddr++)
159 clear_bit(vaddr, iounit->bmap);
160 spin_unlock_irqrestore(&iounit->lock, flags);
161}
162
163static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
164{
165 unsigned long flags;
166 unsigned long vaddr, len;
167 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
168
169 spin_lock_irqsave(&iounit->lock, flags);
170 while (sz != 0) {
171 --sz;
172 len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
173 vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
174 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
175 for (len += vaddr; vaddr < len; vaddr++)
176 clear_bit(vaddr, iounit->bmap);
177 }
178 spin_unlock_irqrestore(&iounit->lock, flags);
179}
180
181#ifdef CONFIG_SBUS
182static int iounit_map_dma_area(dma_addr_t *pba, unsigned long va, __u32 addr, int len)
183{
184 unsigned long page, end;
185 pgprot_t dvma_prot;
186 iopte_t *iopte;
187 struct sbus_bus *sbus;
188
189 *pba = addr;
190
191 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
192 end = PAGE_ALIGN((addr + len));
193 while(addr < end) {
194 page = va;
195 {
196 pgd_t *pgdp;
197 pmd_t *pmdp;
198 pte_t *ptep;
199 long i;
200
201 pgdp = pgd_offset(&init_mm, addr);
202 pmdp = pmd_offset(pgdp, addr);
203 ptep = pte_offset_map(pmdp, addr);
204
205 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
206
207 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
208
209 for_each_sbus(sbus) {
210 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
211
212 iopte = (iopte_t *)(iounit->page_table + i);
213 *iopte = MKIOPTE(__pa(page));
214 }
215 }
216 addr += PAGE_SIZE;
217 va += PAGE_SIZE;
218 }
219 flush_cache_all();
220 flush_tlb_all();
221
222 return 0;
223}
224
225static void iounit_unmap_dma_area(unsigned long addr, int len)
226{
227 /* XXX Somebody please fill this in */
228}
229
230/* XXX We do not pass sbus device here, bad. */
231static struct page *iounit_translate_dvma(unsigned long addr)
232{
233 struct sbus_bus *sbus = sbus_root; /* They are all the same */
234 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
235 int i;
236 iopte_t *iopte;
237
238 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
239 iopte = (iopte_t *)(iounit->page_table + i);
240 return pfn_to_page(iopte_val(*iopte) >> (PAGE_SHIFT-4)); /* XXX sun4d guru, help */
241}
242#endif
243
244static char *iounit_lockarea(char *vaddr, unsigned long len)
245{
246/* FIXME: Write this */
247 return vaddr;
248}
249
250static void iounit_unlockarea(char *vaddr, unsigned long len)
251{
252/* FIXME: Write this */
253}
254
255void __init ld_mmu_iounit(void)
256{
257 BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
258 BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
259
260 BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
261 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
262 BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
263 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
264
265#ifdef CONFIG_SBUS
266 BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
267 BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
268 BTFIXUPSET_CALL(mmu_translate_dvma, iounit_translate_dvma, BTFIXUPCALL_NORM);
269#endif
270}
271
272__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
273{
274 int i, j, k, npages;
275 unsigned long rotor, scan, limit;
276 unsigned long flags;
277 __u32 ret;
278 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
279
280 npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
281 i = 0x0213;
282 spin_lock_irqsave(&iounit->lock, flags);
283next: j = (i & 15);
284 rotor = iounit->rotor[j - 1];
285 limit = iounit->limit[j];
286 scan = rotor;
287nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
288 if (scan + npages > limit) {
289 if (limit != rotor) {
290 limit = rotor;
291 scan = iounit->limit[j - 1];
292 goto nexti;
293 }
294 i >>= 4;
295 if (!(i & 15))
296 panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size);
297 goto next;
298 }
299 for (k = 1, scan++; k < npages; k++)
300 if (test_bit(scan++, iounit->bmap))
301 goto nexti;
302 iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
303 scan -= npages;
304 ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT);
305 for (k = 0; k < npages; k++, scan++)
306 set_bit(scan, iounit->bmap);
307 spin_unlock_irqrestore(&iounit->lock, flags);
308 return ret;
309}
310
311__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
312{
313 int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
314 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
315
316 iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK));
317 return vaddr + (((unsigned long)addr) & ~PAGE_MASK);
318}
This page took 0.170524 seconds and 5 git commands to generate.