1 /* Fallback functions when the main IOMMU code is not compiled in. This
2 code is roughly equivalent to i386. */
4 #include <linux/init.h>
6 #include <linux/string.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/scatterlist.h>
10 #include <asm/iommu.h>
11 #include <asm/processor.h>
15 check_addr(char *name
, struct device
*hwdev
, dma_addr_t bus
, size_t size
)
17 if (hwdev
&& !is_buffer_dma_capable(*hwdev
->dma_mask
, bus
, size
)) {
18 if (*hwdev
->dma_mask
>= DMA_32BIT_MASK
)
20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
21 name
, (long long)bus
, size
,
22 (long long)*hwdev
->dma_mask
);
29 nommu_map_single(struct device
*hwdev
, phys_addr_t paddr
, size_t size
,
32 dma_addr_t bus
= paddr
;
34 if (!check_addr("map_single", hwdev
, bus
, size
))
35 return bad_dma_address
;
36 flush_write_buffers();
41 /* Map a set of buffers described by scatterlist in streaming
42 * mode for DMA. This is the scatter-gather version of the
43 * above pci_map_single interface. Here the scatter gather list
44 * elements are each tagged with the appropriate dma address
45 * and length. They are obtained via sg_dma_{address,length}(SG).
47 * NOTE: An implementation may be able to use a smaller number of
48 * DMA address/length pairs than there are SG table elements.
49 * (for example via virtual mapping capabilities)
50 * The routine returns the number of addr/length pairs actually
51 * used, at most nents.
53 * Device ownership issues as mentioned above for pci_map_single are
56 static int nommu_map_sg(struct device
*hwdev
, struct scatterlist
*sg
,
57 int nents
, int direction
)
59 struct scatterlist
*s
;
62 WARN_ON(nents
== 0 || sg
[0].length
== 0);
64 for_each_sg(sg
, s
, nents
, i
) {
66 s
->dma_address
= sg_phys(s
);
67 if (!check_addr("map_sg", hwdev
, s
->dma_address
, s
->length
))
69 s
->dma_length
= s
->length
;
71 flush_write_buffers();
76 nommu_alloc_coherent(struct device
*hwdev
, size_t size
,
77 dma_addr_t
*dma_addr
, gfp_t gfp
)
79 unsigned long dma_mask
;
84 dma_mask
= dma_alloc_coherent_mask(hwdev
, gfp
);
88 node
= dev_to_node(hwdev
);
90 page
= alloc_pages_node(node
, gfp
, get_order(size
));
94 addr
= page_to_phys(page
);
95 if (!is_buffer_dma_capable(dma_mask
, addr
, size
) && !(gfp
& GFP_DMA
)) {
96 free_pages((unsigned long)page_address(page
), get_order(size
));
101 if (check_addr("alloc_coherent", hwdev
, addr
, size
)) {
103 flush_write_buffers();
104 return page_address(page
);
107 free_pages((unsigned long)page_address(page
), get_order(size
));
112 static void nommu_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
115 free_pages((unsigned long)vaddr
, get_order(size
));
118 struct dma_mapping_ops nommu_dma_ops
= {
119 .alloc_coherent
= nommu_alloc_coherent
,
120 .free_coherent
= nommu_free_coherent
,
121 .map_single
= nommu_map_single
,
122 .map_sg
= nommu_map_sg
,
126 void __init
no_iommu_init(void)
131 force_iommu
= 0; /* no HW IOMMU */
132 dma_ops
= &nommu_dma_ops
;