2 * ioport.c: Simple io mapping allocator.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
10 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
13 * pointer into the big page mapping
14 * <rth> zait: so what?
15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
19 * <zaitcev> Now, driver calls pci_free_consistent(with result of
21 * <zaitcev> How do you find the address to pass to free_pages()?
22 * <rth> zait: walk the page tables? It's only two or three level after all.
23 * <rth> zait: you have to walk them anyway to remove the mapping.
25 * <zaitcev> Sounds reasonable
28 #include <linux/module.h>
29 #include <linux/sched.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/ioport.h>
35 #include <linux/slab.h>
36 #include <linux/pci.h> /* struct pci_dev */
37 #include <linux/proc_fs.h>
38 #include <linux/scatterlist.h>
39 #include <linux/of_device.h>
42 #include <asm/vaddrs.h>
43 #include <asm/oplib.h>
46 #include <asm/pgalloc.h>
48 #include <asm/iommu.h>
49 #include <asm/io-unit.h>
51 #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
53 static struct resource
*_sparc_find_resource(struct resource
*r
,
56 static void __iomem
*_sparc_ioremap(struct resource
*res
, u32 bus
, u32 pa
, int sz
);
57 static void __iomem
*_sparc_alloc_io(unsigned int busno
, unsigned long phys
,
58 unsigned long size
, char *name
);
59 static void _sparc_free_io(struct resource
*res
);
61 static void register_proc_sparc_ioport(void);
63 /* This points to the next to use virtual memory for DVMA mappings */
64 static struct resource _sparc_dvma
= {
65 .name
= "sparc_dvma", .start
= DVMA_VADDR
, .end
= DVMA_END
- 1
67 /* This points to the start of I/O mappings, cluable from outside. */
68 /*ext*/ struct resource sparc_iomap
= {
69 .name
= "sparc_iomap", .start
= IOBASE_VADDR
, .end
= IOBASE_END
- 1
73 * Our mini-allocator...
74 * Boy this is gross! We need it because we must map I/O for
75 * timers and interrupt controller before the kmalloc is available.
79 #define XNRES 10 /* SS-10 uses 8 */
82 struct resource xres
; /* Must be first */
83 int xflag
; /* 1 == used */
87 static struct xresource xresv
[XNRES
];
89 static struct xresource
*xres_alloc(void) {
90 struct xresource
*xrp
;
94 for (n
= 0; n
< XNRES
; n
++) {
95 if (xrp
->xflag
== 0) {
104 static void xres_free(struct xresource
*xrp
) {
109 * These are typically used in PCI drivers
110 * which are trying to be cross-platform.
112 * Bus type is always zero on IIep.
114 void __iomem
*ioremap(unsigned long offset
, unsigned long size
)
118 sprintf(name
, "phys_%08x", (u32
)offset
);
119 return _sparc_alloc_io(0, offset
, size
, name
);
121 EXPORT_SYMBOL(ioremap
);
124 * Comlimentary to ioremap().
126 void iounmap(volatile void __iomem
*virtual)
128 unsigned long vaddr
= (unsigned long) virtual & PAGE_MASK
;
129 struct resource
*res
;
131 if ((res
= _sparc_find_resource(&sparc_iomap
, vaddr
)) == NULL
) {
132 printk("free_io/iounmap: cannot free %lx\n", vaddr
);
137 if ((char *)res
>= (char*)xresv
&& (char *)res
< (char *)&xresv
[XNRES
]) {
138 xres_free((struct xresource
*)res
);
143 EXPORT_SYMBOL(iounmap
);
145 void __iomem
*of_ioremap(struct resource
*res
, unsigned long offset
,
146 unsigned long size
, char *name
)
148 return _sparc_alloc_io(res
->flags
& 0xF,
152 EXPORT_SYMBOL(of_ioremap
);
154 void of_iounmap(struct resource
*res
, void __iomem
*base
, unsigned long size
)
158 EXPORT_SYMBOL(of_iounmap
);
163 static void __iomem
*_sparc_alloc_io(unsigned int busno
, unsigned long phys
,
164 unsigned long size
, char *name
)
166 static int printed_full
;
167 struct xresource
*xres
;
168 struct resource
*res
;
171 void __iomem
*va
; /* P3 diag */
173 if (name
== NULL
) name
= "???";
175 if ((xres
= xres_alloc()) != 0) {
180 printk("ioremap: done with statics, switching to malloc\n");
184 tack
= kmalloc(sizeof (struct resource
) + tlen
+ 1, GFP_KERNEL
);
185 if (tack
== NULL
) return NULL
;
186 memset(tack
, 0, sizeof(struct resource
));
187 res
= (struct resource
*) tack
;
188 tack
+= sizeof (struct resource
);
191 strlcpy(tack
, name
, XNMLN
+1);
194 va
= _sparc_ioremap(res
, busno
, phys
, size
);
195 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
201 static void __iomem
*
202 _sparc_ioremap(struct resource
*res
, u32 bus
, u32 pa
, int sz
)
204 unsigned long offset
= ((unsigned long) pa
) & (~PAGE_MASK
);
206 if (allocate_resource(&sparc_iomap
, res
,
207 (offset
+ sz
+ PAGE_SIZE
-1) & PAGE_MASK
,
208 sparc_iomap
.start
, sparc_iomap
.end
, PAGE_SIZE
, NULL
, NULL
) != 0) {
209 /* Usually we cannot see printks in this case. */
210 prom_printf("alloc_io_res(%s): cannot occupy\n",
211 (res
->name
!= NULL
)? res
->name
: "???");
216 sparc_mapiorange(bus
, pa
, res
->start
, res
->end
- res
->start
+ 1);
218 return (void __iomem
*)(unsigned long)(res
->start
+ offset
);
222 * Comlimentary to _sparc_ioremap().
224 static void _sparc_free_io(struct resource
*res
)
228 plen
= res
->end
- res
->start
+ 1;
229 BUG_ON((plen
& (PAGE_SIZE
-1)) != 0);
230 sparc_unmapiorange(res
->start
, plen
);
231 release_resource(res
);
236 void sbus_set_sbus64(struct device
*dev
, int x
)
238 printk("sbus_set_sbus64: unsupported\n");
240 EXPORT_SYMBOL(sbus_set_sbus64
);
243 * Allocate a chunk of memory suitable for DMA.
244 * Typically devices use them for control blocks.
245 * CPU may access them without any explicit flushing.
247 static void *sbus_alloc_coherent(struct device
*dev
, size_t len
,
248 dma_addr_t
*dma_addrp
, gfp_t gfp
)
250 struct of_device
*op
= to_of_device(dev
);
251 unsigned long len_total
= (len
+ PAGE_SIZE
-1) & PAGE_MASK
;
253 struct resource
*res
;
256 /* XXX why are some lengths signed, others unsigned? */
260 /* XXX So what is maxphys for us and how do drivers know it? */
261 if (len
> 256*1024) { /* __get_free_pages() limit */
265 order
= get_order(len_total
);
266 if ((va
= __get_free_pages(GFP_KERNEL
|__GFP_COMP
, order
)) == 0)
269 if ((res
= kzalloc(sizeof(struct resource
), GFP_KERNEL
)) == NULL
)
272 if (allocate_resource(&_sparc_dvma
, res
, len_total
,
273 _sparc_dvma
.start
, _sparc_dvma
.end
, PAGE_SIZE
, NULL
, NULL
) != 0) {
274 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total
);
277 mmu_inval_dma_area(va
, len_total
);
278 // XXX The mmu_map_dma_area does this for us below, see comments.
279 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
281 * XXX That's where sdev would be used. Currently we load
282 * all iommu tables with the same translations.
284 if (mmu_map_dma_area(dev
, dma_addrp
, va
, res
->start
, len_total
) != 0)
287 res
->name
= op
->node
->name
;
289 return (void *)(unsigned long)res
->start
;
292 release_resource(res
);
294 free_pages(va
, order
);
301 static void sbus_free_coherent(struct device
*dev
, size_t n
, void *p
,
304 struct resource
*res
;
307 if ((res
= _sparc_find_resource(&_sparc_dvma
,
308 (unsigned long)p
)) == NULL
) {
309 printk("sbus_free_consistent: cannot free %p\n", p
);
313 if (((unsigned long)p
& (PAGE_SIZE
-1)) != 0) {
314 printk("sbus_free_consistent: unaligned va %p\n", p
);
318 n
= (n
+ PAGE_SIZE
-1) & PAGE_MASK
;
319 if ((res
->end
-res
->start
)+1 != n
) {
320 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
321 (long)((res
->end
-res
->start
)+1), n
);
325 release_resource(res
);
328 /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
329 pgv
= virt_to_page(p
);
330 mmu_unmap_dma_area(dev
, ba
, n
);
332 __free_pages(pgv
, get_order(n
));
336 * Map a chunk of memory so that devices can see it.
337 * CPU view of this memory may be inconsistent with
338 * a device view and explicit flushing is necessary.
340 static dma_addr_t
sbus_map_page(struct device
*dev
, struct page
*page
,
341 unsigned long offset
, size_t len
,
342 enum dma_data_direction dir
,
343 struct dma_attrs
*attrs
)
345 void *va
= page_address(page
) + offset
;
347 /* XXX why are some lengths signed, others unsigned? */
351 /* XXX So what is maxphys for us and how do drivers know it? */
352 if (len
> 256*1024) { /* __get_free_pages() limit */
355 return mmu_get_scsi_one(dev
, va
, len
);
358 static void sbus_unmap_page(struct device
*dev
, dma_addr_t ba
, size_t n
,
359 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
361 mmu_release_scsi_one(dev
, ba
, n
);
364 static int sbus_map_sg(struct device
*dev
, struct scatterlist
*sg
, int n
,
365 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
367 mmu_get_scsi_sgl(dev
, sg
, n
);
370 * XXX sparc64 can return a partial length here. sun4c should do this
371 * but it currently panics if it can't fulfill the request - Anton
376 static void sbus_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int n
,
377 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
379 mmu_release_scsi_sgl(dev
, sg
, n
);
382 static void sbus_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
383 int n
, enum dma_data_direction dir
)
388 static void sbus_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
389 int n
, enum dma_data_direction dir
)
394 struct dma_map_ops sbus_dma_ops
= {
395 .alloc_coherent
= sbus_alloc_coherent
,
396 .free_coherent
= sbus_free_coherent
,
397 .map_page
= sbus_map_page
,
398 .unmap_page
= sbus_unmap_page
,
399 .map_sg
= sbus_map_sg
,
400 .unmap_sg
= sbus_unmap_sg
,
401 .sync_sg_for_cpu
= sbus_sync_sg_for_cpu
,
402 .sync_sg_for_device
= sbus_sync_sg_for_device
,
405 struct dma_map_ops
*dma_ops
= &sbus_dma_ops
;
406 EXPORT_SYMBOL(dma_ops
);
408 static int __init
sparc_register_ioport(void)
410 register_proc_sparc_ioport();
415 arch_initcall(sparc_register_ioport
);
417 #endif /* CONFIG_SBUS */
421 /* Allocate and map kernel buffer using consistent mode DMA for a device.
422 * hwdev should be valid struct pci_dev pointer for PCI devices.
424 static void *pci32_alloc_coherent(struct device
*dev
, size_t len
,
425 dma_addr_t
*pba
, gfp_t gfp
)
427 unsigned long len_total
= (len
+ PAGE_SIZE
-1) & PAGE_MASK
;
429 struct resource
*res
;
435 if (len
> 256*1024) { /* __get_free_pages() limit */
439 order
= get_order(len_total
);
440 va
= __get_free_pages(GFP_KERNEL
, order
);
442 printk("pci_alloc_consistent: no %ld pages\n", len_total
>>PAGE_SHIFT
);
446 if ((res
= kzalloc(sizeof(struct resource
), GFP_KERNEL
)) == NULL
) {
447 free_pages(va
, order
);
448 printk("pci_alloc_consistent: no core\n");
452 if (allocate_resource(&_sparc_dvma
, res
, len_total
,
453 _sparc_dvma
.start
, _sparc_dvma
.end
, PAGE_SIZE
, NULL
, NULL
) != 0) {
454 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total
);
455 free_pages(va
, order
);
459 mmu_inval_dma_area(va
, len_total
);
461 /* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
462 (long)va
, (long)res
->start
, (long)virt_to_phys(va
), len_total
);
464 sparc_mapiorange(0, virt_to_phys(va
), res
->start
, len_total
);
466 *pba
= virt_to_phys(va
); /* equals virt_to_bus (R.I.P.) for us. */
467 return (void *) res
->start
;
470 /* Free and unmap a consistent DMA buffer.
471 * cpu_addr is what was returned from pci_alloc_consistent,
472 * size must be the same as what as passed into pci_alloc_consistent,
473 * and likewise dma_addr must be the same as what *dma_addrp was set to.
475 * References to the memory and mappings associated with cpu_addr/dma_addr
476 * past this call are illegal.
478 static void pci32_free_coherent(struct device
*dev
, size_t n
, void *p
,
481 struct resource
*res
;
484 if ((res
= _sparc_find_resource(&_sparc_dvma
,
485 (unsigned long)p
)) == NULL
) {
486 printk("pci_free_consistent: cannot free %p\n", p
);
490 if (((unsigned long)p
& (PAGE_SIZE
-1)) != 0) {
491 printk("pci_free_consistent: unaligned va %p\n", p
);
495 n
= (n
+ PAGE_SIZE
-1) & PAGE_MASK
;
496 if ((res
->end
-res
->start
)+1 != n
) {
497 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
498 (long)((res
->end
-res
->start
)+1), (long)n
);
502 pgp
= (unsigned long) phys_to_virt(ba
); /* bus_to_virt actually */
503 mmu_inval_dma_area(pgp
, n
);
504 sparc_unmapiorange((unsigned long)p
, n
);
506 release_resource(res
);
509 free_pages(pgp
, get_order(n
));
513 * Same as pci_map_single, but with pages.
515 static dma_addr_t
pci32_map_page(struct device
*dev
, struct page
*page
,
516 unsigned long offset
, size_t size
,
517 enum dma_data_direction dir
,
518 struct dma_attrs
*attrs
)
520 /* IIep is write-through, not flushing. */
521 return page_to_phys(page
) + offset
;
524 /* Map a set of buffers described by scatterlist in streaming
525 * mode for DMA. This is the scather-gather version of the
526 * above pci_map_single interface. Here the scatter gather list
527 * elements are each tagged with the appropriate dma address
528 * and length. They are obtained via sg_dma_{address,length}(SG).
530 * NOTE: An implementation may be able to use a smaller number of
531 * DMA address/length pairs than there are SG table elements.
532 * (for example via virtual mapping capabilities)
533 * The routine returns the number of addr/length pairs actually
534 * used, at most nents.
536 * Device ownership issues as mentioned above for pci_map_single are
539 static int pci32_map_sg(struct device
*device
, struct scatterlist
*sgl
,
540 int nents
, enum dma_data_direction dir
,
541 struct dma_attrs
*attrs
)
543 struct scatterlist
*sg
;
546 /* IIep is write-through, not flushing. */
547 for_each_sg(sgl
, sg
, nents
, n
) {
548 BUG_ON(page_address(sg_page(sg
)) == NULL
);
549 sg
->dma_address
= virt_to_phys(sg_virt(sg
));
550 sg
->dma_length
= sg
->length
;
555 /* Unmap a set of streaming mode DMA translations.
556 * Again, cpu read rules concerning calls here are the same as for
557 * pci_unmap_single() above.
559 static void pci32_unmap_sg(struct device
*dev
, struct scatterlist
*sgl
,
560 int nents
, enum dma_data_direction dir
,
561 struct dma_attrs
*attrs
)
563 struct scatterlist
*sg
;
566 if (dir
!= PCI_DMA_TODEVICE
) {
567 for_each_sg(sgl
, sg
, nents
, n
) {
568 BUG_ON(page_address(sg_page(sg
)) == NULL
);
570 (unsigned long) page_address(sg_page(sg
)),
571 (sg
->length
+ PAGE_SIZE
-1) & PAGE_MASK
);
576 /* Make physical memory consistent for a single
577 * streaming mode DMA translation before or after a transfer.
579 * If you perform a pci_map_single() but wish to interrogate the
580 * buffer using the cpu, yet do not wish to teardown the PCI dma
581 * mapping, you must call this function before doing so. At the
582 * next point you give the PCI dma address back to the card, you
583 * must first perform a pci_dma_sync_for_device, and then the
584 * device again owns the buffer.
586 static void pci32_sync_single_for_cpu(struct device
*dev
, dma_addr_t ba
,
587 size_t size
, enum dma_data_direction dir
)
589 if (dir
!= PCI_DMA_TODEVICE
) {
590 mmu_inval_dma_area((unsigned long)phys_to_virt(ba
),
591 (size
+ PAGE_SIZE
-1) & PAGE_MASK
);
595 static void pci32_sync_single_for_device(struct device
*dev
, dma_addr_t ba
,
596 size_t size
, enum dma_data_direction dir
)
598 if (dir
!= PCI_DMA_TODEVICE
) {
599 mmu_inval_dma_area((unsigned long)phys_to_virt(ba
),
600 (size
+ PAGE_SIZE
-1) & PAGE_MASK
);
604 /* Make physical memory consistent for a set of streaming
605 * mode DMA translations after a transfer.
607 * The same as pci_dma_sync_single_* but for a scatter-gather list,
608 * same rules and usage.
610 static void pci32_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sgl
,
611 int nents
, enum dma_data_direction dir
)
613 struct scatterlist
*sg
;
616 if (dir
!= PCI_DMA_TODEVICE
) {
617 for_each_sg(sgl
, sg
, nents
, n
) {
618 BUG_ON(page_address(sg_page(sg
)) == NULL
);
620 (unsigned long) page_address(sg_page(sg
)),
621 (sg
->length
+ PAGE_SIZE
-1) & PAGE_MASK
);
626 static void pci32_sync_sg_for_device(struct device
*device
, struct scatterlist
*sgl
,
627 int nents
, enum dma_data_direction dir
)
629 struct scatterlist
*sg
;
632 if (dir
!= PCI_DMA_TODEVICE
) {
633 for_each_sg(sgl
, sg
, nents
, n
) {
634 BUG_ON(page_address(sg_page(sg
)) == NULL
);
636 (unsigned long) page_address(sg_page(sg
)),
637 (sg
->length
+ PAGE_SIZE
-1) & PAGE_MASK
);
642 struct dma_map_ops pci32_dma_ops
= {
643 .alloc_coherent
= pci32_alloc_coherent
,
644 .free_coherent
= pci32_free_coherent
,
645 .map_page
= pci32_map_page
,
646 .map_sg
= pci32_map_sg
,
647 .unmap_sg
= pci32_unmap_sg
,
648 .sync_single_for_cpu
= pci32_sync_single_for_cpu
,
649 .sync_single_for_device
= pci32_sync_single_for_device
,
650 .sync_sg_for_cpu
= pci32_sync_sg_for_cpu
,
651 .sync_sg_for_device
= pci32_sync_sg_for_device
,
653 EXPORT_SYMBOL(pci32_dma_ops
);
655 #endif /* CONFIG_PCI */
657 #ifdef CONFIG_PROC_FS
660 _sparc_io_get_info(char *buf
, char **start
, off_t fpos
, int length
, int *eof
,
663 char *p
= buf
, *e
= buf
+ length
;
667 for (r
= ((struct resource
*)data
)->child
; r
!= NULL
; r
= r
->sibling
) {
668 if (p
+ 32 >= e
) /* Better than nothing */
670 if ((nm
= r
->name
) == 0) nm
= "???";
671 p
+= sprintf(p
, "%016llx-%016llx: %s\n",
672 (unsigned long long)r
->start
,
673 (unsigned long long)r
->end
, nm
);
679 #endif /* CONFIG_PROC_FS */
682 * This is a version of find_resource and it belongs to kernel/resource.c.
683 * Until we have agreement with Linus and Martin, it lingers here.
685 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
686 * This probably warrants some sort of hashing.
688 static struct resource
*_sparc_find_resource(struct resource
*root
,
691 struct resource
*tmp
;
693 for (tmp
= root
->child
; tmp
!= 0; tmp
= tmp
->sibling
) {
694 if (tmp
->start
<= hit
&& tmp
->end
>= hit
)
700 static void register_proc_sparc_ioport(void)
702 #ifdef CONFIG_PROC_FS
703 create_proc_read_entry("io_map",0,NULL
,_sparc_io_get_info
,&sparc_iomap
);
704 create_proc_read_entry("dvma_map",0,NULL
,_sparc_io_get_info
,&_sparc_dvma
);