dma-mapping: replace all DMA_32BIT_MASK macro with DMA_BIT_MASK(32)
[deliverable/linux.git] / arch / powerpc / kernel / dma.c
CommitLineData
1da177e4 1/*
12d04eef 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
1da177e4 3 *
12d04eef 4 * Provide default implementations of the DMA mapping callbacks for
8dd0e952 5 * directly mapped busses.
1da177e4
LT
6 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
1da177e4 10#include <asm/bug.h>
12d04eef 11#include <asm/abs_addr.h>
1da177e4 12
12d04eef
BH
13/*
14 * Generic direct DMA implementation
92b20c40 15 *
31d1b493
ME
16 * This implementation supports a per-device offset that can be applied if
17 * the address at which memory is visible to devices is not 0. Platform code
18 * can set archdata.dma_data to an unsigned long holding the offset. By
4fc665b8 19 * default the offset is PCI_DRAM_OFFSET.
12d04eef 20 */
5d33eebe 21
35e4a6e2
ME
22static unsigned long get_dma_direct_offset(struct device *dev)
23{
4fc665b8
BB
24 if (dev)
25 return (unsigned long)dev->archdata.dma_data;
26
27 return PCI_DRAM_OFFSET;
35e4a6e2
ME
28}
29
4fc665b8
BB
30void *dma_direct_alloc_coherent(struct device *dev, size_t size,
31 dma_addr_t *dma_handle, gfp_t flag)
12d04eef 32{
8aa26590 33 void *ret;
4fc665b8 34#ifdef CONFIG_NOT_COHERENT_CACHE
8aa26590
BH
35 ret = __dma_alloc_coherent(size, dma_handle, flag);
36 if (ret == NULL)
37 return NULL;
38 *dma_handle += get_dma_direct_offset(dev);
39 return ret;
4fc665b8 40#else
c80d9133 41 struct page *page;
8fae0353 42 int node = dev_to_node(dev);
12d04eef 43
4fc665b8
BB
44 /* ignore region specifiers */
45 flag &= ~(__GFP_HIGHMEM);
46
c80d9133
BH
47 page = alloc_pages_node(node, flag, get_order(size));
48 if (page == NULL)
49 return NULL;
50 ret = page_address(page);
51 memset(ret, 0, size);
35e4a6e2 52 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
c80d9133 53
12d04eef 54 return ret;
4fc665b8 55#endif
1da177e4 56}
1da177e4 57
4fc665b8
BB
58void dma_direct_free_coherent(struct device *dev, size_t size,
59 void *vaddr, dma_addr_t dma_handle)
1da177e4 60{
4fc665b8
BB
61#ifdef CONFIG_NOT_COHERENT_CACHE
62 __dma_free_coherent(size, vaddr);
63#else
12d04eef 64 free_pages((unsigned long)vaddr, get_order(size));
4fc665b8 65#endif
1da177e4 66}
1da177e4 67
78bdc310 68static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
3affedc4
MN
69 int nents, enum dma_data_direction direction,
70 struct dma_attrs *attrs)
1da177e4 71{
78bdc310 72 struct scatterlist *sg;
12d04eef 73 int i;
1da177e4 74
78bdc310 75 for_each_sg(sgl, sg, nents, i) {
35e4a6e2 76 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
12d04eef 77 sg->dma_length = sg->length;
2434bbb3 78 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
12d04eef 79 }
5d33eebe 80
12d04eef 81 return nents;
1da177e4 82}
1da177e4 83
12d04eef 84static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
3affedc4
MN
85 int nents, enum dma_data_direction direction,
86 struct dma_attrs *attrs)
1da177e4 87{
12d04eef 88}
5d33eebe 89
12d04eef
BH
90static int dma_direct_dma_supported(struct device *dev, u64 mask)
91{
4fc665b8 92#ifdef CONFIG_PPC64
12d04eef
BH
93 /* Could be improved to check for memory though it better be
94 * done via some global so platforms can set the limit in case
95 * they have limited DMA windows
96 */
284901a9 97 return mask >= DMA_BIT_MASK(32);
4fc665b8
BB
98#else
99 return 1;
100#endif
101}
102
103static inline dma_addr_t dma_direct_map_page(struct device *dev,
104 struct page *page,
105 unsigned long offset,
106 size_t size,
107 enum dma_data_direction dir,
108 struct dma_attrs *attrs)
109{
110 BUG_ON(dir == DMA_NONE);
111 __dma_sync_page(page, offset, size, dir);
112 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
113}
114
115static inline void dma_direct_unmap_page(struct device *dev,
116 dma_addr_t dma_address,
117 size_t size,
118 enum dma_data_direction direction,
119 struct dma_attrs *attrs)
120{
1da177e4 121}
12d04eef 122
15e09c0e
BB
123#ifdef CONFIG_NOT_COHERENT_CACHE
124static inline void dma_direct_sync_sg(struct device *dev,
125 struct scatterlist *sgl, int nents,
126 enum dma_data_direction direction)
127{
128 struct scatterlist *sg;
129 int i;
130
131 for_each_sg(sgl, sg, nents, i)
132 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
133}
134
135static inline void dma_direct_sync_single_range(struct device *dev,
136 dma_addr_t dma_handle, unsigned long offset, size_t size,
137 enum dma_data_direction direction)
138{
139 __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
140}
141#endif
142
12d04eef
BH
143struct dma_mapping_ops dma_direct_ops = {
144 .alloc_coherent = dma_direct_alloc_coherent,
145 .free_coherent = dma_direct_free_coherent,
12d04eef
BH
146 .map_sg = dma_direct_map_sg,
147 .unmap_sg = dma_direct_unmap_sg,
148 .dma_supported = dma_direct_dma_supported,
4fc665b8
BB
149 .map_page = dma_direct_map_page,
150 .unmap_page = dma_direct_unmap_page,
15e09c0e
BB
151#ifdef CONFIG_NOT_COHERENT_CACHE
152 .sync_single_range_for_cpu = dma_direct_sync_single_range,
153 .sync_single_range_for_device = dma_direct_sync_single_range,
154 .sync_sg_for_cpu = dma_direct_sync_sg,
155 .sync_sg_for_device = dma_direct_sync_sg,
156#endif
12d04eef
BH
157};
158EXPORT_SYMBOL(dma_direct_ops);
This page took 0.409949 seconds and 5 git commands to generate.