sparc: Use asm-generic/pci-dma-compat
[deliverable/linux.git] / arch / sparc / kernel / ioport.c
CommitLineData
88278ca2 1/*
1da177e4
LT
2 * ioport.c: Simple io mapping allocator.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 *
7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
8 *
9 * 2000/01/29
10 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
11 * things are ok.
12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
13 * pointer into the big page mapping
14 * <rth> zait: so what?
15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
16 * <zaitcev> Hmm
17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
18 * So far so good.
19 * <zaitcev> Now, driver calls pci_free_consistent(with result of
20 * remap_it_my_way()).
21 * <zaitcev> How do you find the address to pass to free_pages()?
22 * <rth> zait: walk the page tables? It's only two or three level after all.
23 * <rth> zait: you have to walk them anyway to remove the mapping.
24 * <zaitcev> Hmm
25 * <zaitcev> Sounds reasonable
26 */
27
3ca9fab4 28#include <linux/module.h>
1da177e4
LT
29#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/types.h>
33#include <linux/ioport.h>
34#include <linux/mm.h>
35#include <linux/slab.h>
36#include <linux/pci.h> /* struct pci_dev */
37#include <linux/proc_fs.h>
0912a5db 38#include <linux/scatterlist.h>
764f2579 39#include <linux/of_device.h>
1da177e4
LT
40
41#include <asm/io.h>
42#include <asm/vaddrs.h>
43#include <asm/oplib.h>
576c352e 44#include <asm/prom.h>
1da177e4
LT
45#include <asm/page.h>
46#include <asm/pgalloc.h>
47#include <asm/dma.h>
e0039348
DM
48#include <asm/iommu.h>
49#include <asm/io-unit.h>
1da177e4
LT
50
51#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
52
c61c65cd
AB
53static struct resource *_sparc_find_resource(struct resource *r,
54 unsigned long);
1da177e4
LT
55
56static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
57static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
58 unsigned long size, char *name);
59static void _sparc_free_io(struct resource *res);
60
c61c65cd
AB
61static void register_proc_sparc_ioport(void);
62
1da177e4
LT
63/* This points to the next to use virtual memory for DVMA mappings */
64static struct resource _sparc_dvma = {
65 .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
66};
67/* This points to the start of I/O mappings, cluable from outside. */
68/*ext*/ struct resource sparc_iomap = {
69 .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
70};
71
72/*
73 * Our mini-allocator...
74 * Boy this is gross! We need it because we must map I/O for
75 * timers and interrupt controller before the kmalloc is available.
76 */
77
78#define XNMLN 15
79#define XNRES 10 /* SS-10 uses 8 */
80
81struct xresource {
82 struct resource xres; /* Must be first */
83 int xflag; /* 1 == used */
84 char xname[XNMLN+1];
85};
86
87static struct xresource xresv[XNRES];
88
89static struct xresource *xres_alloc(void) {
90 struct xresource *xrp;
91 int n;
92
93 xrp = xresv;
94 for (n = 0; n < XNRES; n++) {
95 if (xrp->xflag == 0) {
96 xrp->xflag = 1;
97 return xrp;
98 }
99 xrp++;
100 }
101 return NULL;
102}
103
104static void xres_free(struct xresource *xrp) {
105 xrp->xflag = 0;
106}
107
108/*
109 * These are typically used in PCI drivers
110 * which are trying to be cross-platform.
111 *
112 * Bus type is always zero on IIep.
113 */
114void __iomem *ioremap(unsigned long offset, unsigned long size)
115{
116 char name[14];
117
118 sprintf(name, "phys_%08x", (u32)offset);
119 return _sparc_alloc_io(0, offset, size, name);
120}
6943f3da 121EXPORT_SYMBOL(ioremap);
1da177e4
LT
122
123/*
124 * Comlimentary to ioremap().
125 */
126void iounmap(volatile void __iomem *virtual)
127{
128 unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
129 struct resource *res;
130
131 if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
132 printk("free_io/iounmap: cannot free %lx\n", vaddr);
133 return;
134 }
135 _sparc_free_io(res);
136
137 if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
138 xres_free((struct xresource *)res);
139 } else {
140 kfree(res);
141 }
142}
6943f3da 143EXPORT_SYMBOL(iounmap);
1da177e4 144
3ca9fab4
DM
145void __iomem *of_ioremap(struct resource *res, unsigned long offset,
146 unsigned long size, char *name)
147{
148 return _sparc_alloc_io(res->flags & 0xF,
149 res->start + offset,
150 size, name);
151}
152EXPORT_SYMBOL(of_ioremap);
153
e3a411a3 154void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
3ca9fab4
DM
155{
156 iounmap(base);
157}
158EXPORT_SYMBOL(of_iounmap);
159
1da177e4
LT
160/*
161 * Meat of mapping
162 */
163static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
164 unsigned long size, char *name)
165{
166 static int printed_full;
167 struct xresource *xres;
168 struct resource *res;
169 char *tack;
170 int tlen;
171 void __iomem *va; /* P3 diag */
172
173 if (name == NULL) name = "???";
174
175 if ((xres = xres_alloc()) != 0) {
176 tack = xres->xname;
177 res = &xres->xres;
178 } else {
179 if (!printed_full) {
180 printk("ioremap: done with statics, switching to malloc\n");
181 printed_full = 1;
182 }
183 tlen = strlen(name);
184 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
185 if (tack == NULL) return NULL;
186 memset(tack, 0, sizeof(struct resource));
187 res = (struct resource *) tack;
188 tack += sizeof (struct resource);
189 }
190
191 strlcpy(tack, name, XNMLN+1);
192 res->name = tack;
193
194 va = _sparc_ioremap(res, busno, phys, size);
195 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
196 return va;
197}
198
199/*
200 */
201static void __iomem *
202_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
203{
204 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
205
206 if (allocate_resource(&sparc_iomap, res,
207 (offset + sz + PAGE_SIZE-1) & PAGE_MASK,
208 sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
209 /* Usually we cannot see printks in this case. */
210 prom_printf("alloc_io_res(%s): cannot occupy\n",
211 (res->name != NULL)? res->name: "???");
212 prom_halt();
213 }
214
215 pa &= PAGE_MASK;
216 sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
217
d75fc8bb 218 return (void __iomem *)(unsigned long)(res->start + offset);
1da177e4
LT
219}
220
221/*
222 * Comlimentary to _sparc_ioremap().
223 */
224static void _sparc_free_io(struct resource *res)
225{
226 unsigned long plen;
227
228 plen = res->end - res->start + 1;
30d4d1ff 229 BUG_ON((plen & (PAGE_SIZE-1)) != 0);
1da177e4
LT
230 sparc_unmapiorange(res->start, plen);
231 release_resource(res);
232}
233
234#ifdef CONFIG_SBUS
235
63237eeb 236void sbus_set_sbus64(struct device *dev, int x)
8fae097d 237{
1da177e4
LT
238 printk("sbus_set_sbus64: unsupported\n");
239}
6943f3da 240EXPORT_SYMBOL(sbus_set_sbus64);
1da177e4
LT
241
242/*
243 * Allocate a chunk of memory suitable for DMA.
244 * Typically devices use them for control blocks.
245 * CPU may access them without any explicit flushing.
1da177e4 246 */
ee664a92
FT
247static void *sbus_alloc_coherent(struct device *dev, size_t len,
248 dma_addr_t *dma_addrp, gfp_t gfp)
1da177e4 249{
7a715f46 250 struct of_device *op = to_of_device(dev);
1da177e4
LT
251 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
252 unsigned long va;
253 struct resource *res;
254 int order;
255
efad798b 256 /* XXX why are some lengths signed, others unsigned? */
1da177e4
LT
257 if (len <= 0) {
258 return NULL;
259 }
260 /* XXX So what is maxphys for us and how do drivers know it? */
261 if (len > 256*1024) { /* __get_free_pages() limit */
262 return NULL;
263 }
264
265 order = get_order(len_total);
f3d48f03 266 if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
1da177e4
LT
267 goto err_nopages;
268
c80892d1 269 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
1da177e4 270 goto err_nomem;
1da177e4
LT
271
272 if (allocate_resource(&_sparc_dvma, res, len_total,
273 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
274 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
275 goto err_nova;
276 }
277 mmu_inval_dma_area(va, len_total);
278 // XXX The mmu_map_dma_area does this for us below, see comments.
279 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
280 /*
281 * XXX That's where sdev would be used. Currently we load
282 * all iommu tables with the same translations.
283 */
4b1c5df2 284 if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
1da177e4
LT
285 goto err_noiommu;
286
7a715f46 287 res->name = op->node->name;
4cfbd7eb 288
d75fc8bb 289 return (void *)(unsigned long)res->start;
1da177e4
LT
290
291err_noiommu:
292 release_resource(res);
293err_nova:
294 free_pages(va, order);
295err_nomem:
296 kfree(res);
297err_nopages:
298 return NULL;
299}
300
ee664a92
FT
301static void sbus_free_coherent(struct device *dev, size_t n, void *p,
302 dma_addr_t ba)
1da177e4
LT
303{
304 struct resource *res;
305 struct page *pgv;
306
307 if ((res = _sparc_find_resource(&_sparc_dvma,
308 (unsigned long)p)) == NULL) {
309 printk("sbus_free_consistent: cannot free %p\n", p);
310 return;
311 }
312
313 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
314 printk("sbus_free_consistent: unaligned va %p\n", p);
315 return;
316 }
317
318 n = (n + PAGE_SIZE-1) & PAGE_MASK;
319 if ((res->end-res->start)+1 != n) {
ee664a92 320 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
1da177e4
LT
321 (long)((res->end-res->start)+1), n);
322 return;
323 }
324
325 release_resource(res);
326 kfree(res);
327
328 /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
aba945e7 329 pgv = virt_to_page(p);
4b1c5df2 330 mmu_unmap_dma_area(dev, ba, n);
1da177e4
LT
331
332 __free_pages(pgv, get_order(n));
333}
334
335/*
336 * Map a chunk of memory so that devices can see it.
337 * CPU view of this memory may be inconsistent with
338 * a device view and explicit flushing is necessary.
339 */
ee664a92
FT
340static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
341 unsigned long offset, size_t len,
342 enum dma_data_direction dir,
343 struct dma_attrs *attrs)
1da177e4 344{
c2c07dbd
FT
345 void *va = page_address(page) + offset;
346
efad798b 347 /* XXX why are some lengths signed, others unsigned? */
1da177e4
LT
348 if (len <= 0) {
349 return 0;
350 }
351 /* XXX So what is maxphys for us and how do drivers know it? */
352 if (len > 256*1024) { /* __get_free_pages() limit */
353 return 0;
354 }
260489fa 355 return mmu_get_scsi_one(dev, va, len);
1da177e4
LT
356}
357
ee664a92
FT
358static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
359 enum dma_data_direction dir, struct dma_attrs *attrs)
1da177e4 360{
260489fa 361 mmu_release_scsi_one(dev, ba, n);
1da177e4
LT
362}
363
ee664a92
FT
364static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
365 enum dma_data_direction dir, struct dma_attrs *attrs)
1da177e4 366{
260489fa 367 mmu_get_scsi_sgl(dev, sg, n);
1da177e4
LT
368
369 /*
370 * XXX sparc64 can return a partial length here. sun4c should do this
371 * but it currently panics if it can't fulfill the request - Anton
372 */
373 return n;
374}
375
ee664a92
FT
376static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
377 enum dma_data_direction dir, struct dma_attrs *attrs)
1da177e4 378{
260489fa 379 mmu_release_scsi_sgl(dev, sg, n);
1da177e4
LT
380}
381
ee664a92
FT
382static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
383 int n, enum dma_data_direction dir)
1da177e4 384{
ee664a92 385 BUG();
1da177e4
LT
386}
387
ee664a92
FT
388static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
389 int n, enum dma_data_direction dir)
1da177e4 390{
ee664a92 391 BUG();
1da177e4
LT
392}
393
ee664a92
FT
394struct dma_map_ops sbus_dma_ops = {
395 .alloc_coherent = sbus_alloc_coherent,
396 .free_coherent = sbus_free_coherent,
397 .map_page = sbus_map_page,
398 .unmap_page = sbus_unmap_page,
399 .map_sg = sbus_map_sg,
400 .unmap_sg = sbus_unmap_sg,
401 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
402 .sync_sg_for_device = sbus_sync_sg_for_device,
403};
404
405struct dma_map_ops *dma_ops = &sbus_dma_ops;
406EXPORT_SYMBOL(dma_ops);
407
f8e4d32c 408static int __init sparc_register_ioport(void)
576c352e 409{
576c352e
DM
410 register_proc_sparc_ioport();
411
576c352e 412 return 0;
576c352e
DM
413}
414
f8e4d32c
DM
415arch_initcall(sparc_register_ioport);
416
1da177e4
LT
417#endif /* CONFIG_SBUS */
418
419#ifdef CONFIG_PCI
420
421/* Allocate and map kernel buffer using consistent mode DMA for a device.
422 * hwdev should be valid struct pci_dev pointer for PCI devices.
423 */
ee664a92
FT
424static void *pci32_alloc_coherent(struct device *dev, size_t len,
425 dma_addr_t *pba, gfp_t gfp)
1da177e4
LT
426{
427 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
428 unsigned long va;
429 struct resource *res;
430 int order;
431
432 if (len == 0) {
433 return NULL;
434 }
435 if (len > 256*1024) { /* __get_free_pages() limit */
436 return NULL;
437 }
438
439 order = get_order(len_total);
440 va = __get_free_pages(GFP_KERNEL, order);
441 if (va == 0) {
442 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
443 return NULL;
444 }
445
c80892d1 446 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
1da177e4
LT
447 free_pages(va, order);
448 printk("pci_alloc_consistent: no core\n");
449 return NULL;
450 }
1da177e4
LT
451
452 if (allocate_resource(&_sparc_dvma, res, len_total,
453 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
454 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
455 free_pages(va, order);
456 kfree(res);
457 return NULL;
458 }
459 mmu_inval_dma_area(va, len_total);
460#if 0
461/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
462 (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
463#endif
464 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
465
466 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
467 return (void *) res->start;
468}
469
470/* Free and unmap a consistent DMA buffer.
471 * cpu_addr is what was returned from pci_alloc_consistent,
472 * size must be the same as what as passed into pci_alloc_consistent,
473 * and likewise dma_addr must be the same as what *dma_addrp was set to.
474 *
d1a78c32 475 * References to the memory and mappings associated with cpu_addr/dma_addr
1da177e4
LT
476 * past this call are illegal.
477 */
ee664a92
FT
478static void pci32_free_coherent(struct device *dev, size_t n, void *p,
479 dma_addr_t ba)
1da177e4
LT
480{
481 struct resource *res;
482 unsigned long pgp;
483
484 if ((res = _sparc_find_resource(&_sparc_dvma,
485 (unsigned long)p)) == NULL) {
486 printk("pci_free_consistent: cannot free %p\n", p);
487 return;
488 }
489
490 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
491 printk("pci_free_consistent: unaligned va %p\n", p);
492 return;
493 }
494
495 n = (n + PAGE_SIZE-1) & PAGE_MASK;
496 if ((res->end-res->start)+1 != n) {
497 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
498 (long)((res->end-res->start)+1), (long)n);
499 return;
500 }
501
502 pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
503 mmu_inval_dma_area(pgp, n);
504 sparc_unmapiorange((unsigned long)p, n);
505
506 release_resource(res);
507 kfree(res);
508
509 free_pages(pgp, get_order(n));
510}
1da177e4
LT
511
512/*
513 * Same as pci_map_single, but with pages.
514 */
ee664a92
FT
515static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
516 unsigned long offset, size_t size,
517 enum dma_data_direction dir,
518 struct dma_attrs *attrs)
1da177e4 519{
1da177e4
LT
520 /* IIep is write-through, not flushing. */
521 return page_to_phys(page) + offset;
522}
1da177e4
LT
523
524/* Map a set of buffers described by scatterlist in streaming
525 * mode for DMA. This is the scather-gather version of the
526 * above pci_map_single interface. Here the scatter gather list
527 * elements are each tagged with the appropriate dma address
528 * and length. They are obtained via sg_dma_{address,length}(SG).
529 *
530 * NOTE: An implementation may be able to use a smaller number of
531 * DMA address/length pairs than there are SG table elements.
532 * (for example via virtual mapping capabilities)
533 * The routine returns the number of addr/length pairs actually
534 * used, at most nents.
535 *
536 * Device ownership issues as mentioned above for pci_map_single are
537 * the same here.
538 */
ee664a92
FT
539static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
540 int nents, enum dma_data_direction dir,
541 struct dma_attrs *attrs)
1da177e4 542{
0912a5db 543 struct scatterlist *sg;
1da177e4
LT
544 int n;
545
1da177e4 546 /* IIep is write-through, not flushing. */
0912a5db 547 for_each_sg(sgl, sg, nents, n) {
58b053e4 548 BUG_ON(page_address(sg_page(sg)) == NULL);
aa83a26a
RR
549 sg->dma_address = virt_to_phys(sg_virt(sg));
550 sg->dma_length = sg->length;
1da177e4
LT
551 }
552 return nents;
553}
554
555/* Unmap a set of streaming mode DMA translations.
556 * Again, cpu read rules concerning calls here are the same as for
557 * pci_unmap_single() above.
558 */
ee664a92
FT
559static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
560 int nents, enum dma_data_direction dir,
561 struct dma_attrs *attrs)
1da177e4 562{
0912a5db 563 struct scatterlist *sg;
1da177e4
LT
564 int n;
565
ee664a92 566 if (dir != PCI_DMA_TODEVICE) {
0912a5db 567 for_each_sg(sgl, sg, nents, n) {
58b053e4 568 BUG_ON(page_address(sg_page(sg)) == NULL);
1da177e4 569 mmu_inval_dma_area(
58b053e4 570 (unsigned long) page_address(sg_page(sg)),
1da177e4 571 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
1da177e4
LT
572 }
573 }
574}
575
576/* Make physical memory consistent for a single
577 * streaming mode DMA translation before or after a transfer.
578 *
579 * If you perform a pci_map_single() but wish to interrogate the
580 * buffer using the cpu, yet do not wish to teardown the PCI dma
581 * mapping, you must call this function before doing so. At the
582 * next point you give the PCI dma address back to the card, you
583 * must first perform a pci_dma_sync_for_device, and then the
584 * device again owns the buffer.
585 */
ee664a92
FT
586static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
587 size_t size, enum dma_data_direction dir)
1da177e4 588{
ee664a92 589 if (dir != PCI_DMA_TODEVICE) {
1da177e4
LT
590 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
591 (size + PAGE_SIZE-1) & PAGE_MASK);
592 }
593}
594
ee664a92
FT
595static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
596 size_t size, enum dma_data_direction dir)
1da177e4 597{
ee664a92 598 if (dir != PCI_DMA_TODEVICE) {
1da177e4
LT
599 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
600 (size + PAGE_SIZE-1) & PAGE_MASK);
601 }
602}
603
604/* Make physical memory consistent for a set of streaming
605 * mode DMA translations after a transfer.
606 *
607 * The same as pci_dma_sync_single_* but for a scatter-gather list,
608 * same rules and usage.
609 */
ee664a92
FT
610static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
611 int nents, enum dma_data_direction dir)
1da177e4 612{
0912a5db 613 struct scatterlist *sg;
1da177e4
LT
614 int n;
615
ee664a92 616 if (dir != PCI_DMA_TODEVICE) {
0912a5db 617 for_each_sg(sgl, sg, nents, n) {
58b053e4 618 BUG_ON(page_address(sg_page(sg)) == NULL);
1da177e4 619 mmu_inval_dma_area(
58b053e4 620 (unsigned long) page_address(sg_page(sg)),
1da177e4 621 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
1da177e4
LT
622 }
623 }
624}
625
ee664a92
FT
626static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
627 int nents, enum dma_data_direction dir)
1da177e4 628{
0912a5db 629 struct scatterlist *sg;
1da177e4
LT
630 int n;
631
ee664a92 632 if (dir != PCI_DMA_TODEVICE) {
0912a5db 633 for_each_sg(sgl, sg, nents, n) {
58b053e4 634 BUG_ON(page_address(sg_page(sg)) == NULL);
1da177e4 635 mmu_inval_dma_area(
58b053e4 636 (unsigned long) page_address(sg_page(sg)),
1da177e4 637 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
1da177e4
LT
638 }
639 }
640}
ee664a92
FT
641
642struct dma_map_ops pci32_dma_ops = {
643 .alloc_coherent = pci32_alloc_coherent,
644 .free_coherent = pci32_free_coherent,
645 .map_page = pci32_map_page,
646 .map_sg = pci32_map_sg,
647 .unmap_sg = pci32_unmap_sg,
648 .sync_single_for_cpu = pci32_sync_single_for_cpu,
649 .sync_single_for_device = pci32_sync_single_for_device,
650 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
651 .sync_sg_for_device = pci32_sync_sg_for_device,
652};
653EXPORT_SYMBOL(pci32_dma_ops);
654
1da177e4
LT
655#endif /* CONFIG_PCI */
656
657#ifdef CONFIG_PROC_FS
658
659static int
660_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
661 void *data)
662{
663 char *p = buf, *e = buf + length;
664 struct resource *r;
665 const char *nm;
666
667 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
668 if (p + 32 >= e) /* Better than nothing */
669 break;
670 if ((nm = r->name) == 0) nm = "???";
685143ac
GKH
671 p += sprintf(p, "%016llx-%016llx: %s\n",
672 (unsigned long long)r->start,
673 (unsigned long long)r->end, nm);
1da177e4
LT
674 }
675
676 return p-buf;
677}
678
679#endif /* CONFIG_PROC_FS */
680
681/*
682 * This is a version of find_resource and it belongs to kernel/resource.c.
683 * Until we have agreement with Linus and Martin, it lingers here.
684 *
685 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
686 * This probably warrants some sort of hashing.
687 */
c61c65cd
AB
688static struct resource *_sparc_find_resource(struct resource *root,
689 unsigned long hit)
1da177e4
LT
690{
691 struct resource *tmp;
692
693 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
694 if (tmp->start <= hit && tmp->end >= hit)
695 return tmp;
696 }
697 return NULL;
698}
699
c61c65cd 700static void register_proc_sparc_ioport(void)
1da177e4
LT
701{
702#ifdef CONFIG_PROC_FS
703 create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap);
704 create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma);
705#endif
706}
This page took 0.410315 seconds and 5 git commands to generate.