PowerPC: adapt for dma_map_ops changes
[deliverable/linux.git] / arch / powerpc / kernel / dma.c
CommitLineData
1da177e4 1/*
12d04eef 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
1da177e4 3 *
12d04eef 4 * Provide default implementations of the DMA mapping callbacks for
8dd0e952 5 * directly mapped busses.
1da177e4
LT
6 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
80d3e8ab 10#include <linux/dma-debug.h>
5a0e3ad6 11#include <linux/gfp.h>
95f72d1e 12#include <linux/memblock.h>
66b15db6 13#include <linux/export.h>
1da177e4 14#include <asm/bug.h>
12d04eef 15#include <asm/abs_addr.h>
5b6e9ff6 16#include <asm/machdep.h>
1da177e4 17
12d04eef
BH
18/*
19 * Generic direct DMA implementation
92b20c40 20 *
31d1b493
ME
21 * This implementation supports a per-device offset that can be applied if
22 * the address at which memory is visible to devices is not 0. Platform code
23 * can set archdata.dma_data to an unsigned long holding the offset. By
4fc665b8 24 * default the offset is PCI_DRAM_OFFSET.
12d04eef 25 */
5d33eebe 26
35e4a6e2 27
4fc665b8 28void *dma_direct_alloc_coherent(struct device *dev, size_t size,
bfbf7d61
AP
29 dma_addr_t *dma_handle, gfp_t flag,
30 struct dma_attrs *attrs)
12d04eef 31{
8aa26590 32 void *ret;
4fc665b8 33#ifdef CONFIG_NOT_COHERENT_CACHE
8b31e49d 34 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
8aa26590
BH
35 if (ret == NULL)
36 return NULL;
1cebd7a0 37 *dma_handle += get_dma_offset(dev);
8aa26590 38 return ret;
4fc665b8 39#else
c80d9133 40 struct page *page;
8fae0353 41 int node = dev_to_node(dev);
12d04eef 42
4fc665b8
BB
43 /* ignore region specifiers */
44 flag &= ~(__GFP_HIGHMEM);
45
c80d9133
BH
46 page = alloc_pages_node(node, flag, get_order(size));
47 if (page == NULL)
48 return NULL;
49 ret = page_address(page);
50 memset(ret, 0, size);
1cebd7a0 51 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
c80d9133 52
12d04eef 53 return ret;
4fc665b8 54#endif
1da177e4 55}
1da177e4 56
4fc665b8 57void dma_direct_free_coherent(struct device *dev, size_t size,
bfbf7d61
AP
58 void *vaddr, dma_addr_t dma_handle,
59 struct dma_attrs *attrs)
1da177e4 60{
4fc665b8
BB
61#ifdef CONFIG_NOT_COHERENT_CACHE
62 __dma_free_coherent(size, vaddr);
63#else
12d04eef 64 free_pages((unsigned long)vaddr, get_order(size));
4fc665b8 65#endif
1da177e4 66}
1da177e4 67
78bdc310 68static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
3affedc4
MN
69 int nents, enum dma_data_direction direction,
70 struct dma_attrs *attrs)
1da177e4 71{
78bdc310 72 struct scatterlist *sg;
12d04eef 73 int i;
1da177e4 74
78bdc310 75 for_each_sg(sgl, sg, nents, i) {
1cebd7a0 76 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
12d04eef 77 sg->dma_length = sg->length;
2434bbb3 78 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
12d04eef 79 }
5d33eebe 80
12d04eef 81 return nents;
1da177e4 82}
1da177e4 83
12d04eef 84static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
3affedc4
MN
85 int nents, enum dma_data_direction direction,
86 struct dma_attrs *attrs)
1da177e4 87{
12d04eef 88}
5d33eebe 89
12d04eef
BH
90static int dma_direct_dma_supported(struct device *dev, u64 mask)
91{
4fc665b8 92#ifdef CONFIG_PPC64
b2f2e8fe 93 /* Could be improved so platforms can set the limit in case
12d04eef
BH
94 * they have limited DMA windows
95 */
ffa56e55 96 return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
4fc665b8
BB
97#else
98 return 1;
99#endif
100}
101
d24f9c69
MM
102static u64 dma_direct_get_required_mask(struct device *dev)
103{
104 u64 end, mask;
105
106 end = memblock_end_of_DRAM() + get_dma_offset(dev);
107
108 mask = 1ULL << (fls64(end) - 1);
109 mask += mask - 1;
110
111 return mask;
112}
113
4fc665b8
BB
114static inline dma_addr_t dma_direct_map_page(struct device *dev,
115 struct page *page,
116 unsigned long offset,
117 size_t size,
118 enum dma_data_direction dir,
119 struct dma_attrs *attrs)
120{
121 BUG_ON(dir == DMA_NONE);
122 __dma_sync_page(page, offset, size, dir);
1cebd7a0 123 return page_to_phys(page) + offset + get_dma_offset(dev);
4fc665b8
BB
124}
125
126static inline void dma_direct_unmap_page(struct device *dev,
127 dma_addr_t dma_address,
128 size_t size,
129 enum dma_data_direction direction,
130 struct dma_attrs *attrs)
131{
1da177e4 132}
12d04eef 133
15e09c0e
BB
134#ifdef CONFIG_NOT_COHERENT_CACHE
135static inline void dma_direct_sync_sg(struct device *dev,
136 struct scatterlist *sgl, int nents,
137 enum dma_data_direction direction)
138{
139 struct scatterlist *sg;
140 int i;
141
142 for_each_sg(sgl, sg, nents, i)
143 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
144}
145
712d3e22
FT
146static inline void dma_direct_sync_single(struct device *dev,
147 dma_addr_t dma_handle, size_t size,
148 enum dma_data_direction direction)
15e09c0e 149{
712d3e22 150 __dma_sync(bus_to_virt(dma_handle), size, direction);
15e09c0e
BB
151}
152#endif
153
45223c54 154struct dma_map_ops dma_direct_ops = {
bfbf7d61
AP
155 .alloc = dma_direct_alloc_coherent,
156 .free = dma_direct_free_coherent,
2eccacd0
MM
157 .map_sg = dma_direct_map_sg,
158 .unmap_sg = dma_direct_unmap_sg,
159 .dma_supported = dma_direct_dma_supported,
160 .map_page = dma_direct_map_page,
161 .unmap_page = dma_direct_unmap_page,
162 .get_required_mask = dma_direct_get_required_mask,
15e09c0e 163#ifdef CONFIG_NOT_COHERENT_CACHE
712d3e22
FT
164 .sync_single_for_cpu = dma_direct_sync_single,
165 .sync_single_for_device = dma_direct_sync_single,
15e09c0e
BB
166 .sync_sg_for_cpu = dma_direct_sync_sg,
167 .sync_sg_for_device = dma_direct_sync_sg,
168#endif
12d04eef
BH
169};
170EXPORT_SYMBOL(dma_direct_ops);
80d3e8ab
FT
171
172#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
173
5b6e9ff6
BH
174int dma_set_mask(struct device *dev, u64 dma_mask)
175{
176 struct dma_map_ops *dma_ops = get_dma_ops(dev);
177
178 if (ppc_md.dma_set_mask)
179 return ppc_md.dma_set_mask(dev, dma_mask);
6471fc66 180 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
5b6e9ff6
BH
181 return dma_ops->set_dma_mask(dev, dma_mask);
182 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
183 return -EIO;
184 *dev->dma_mask = dma_mask;
185 return 0;
186}
187EXPORT_SYMBOL(dma_set_mask);
188
6a5c7be5
MM
189u64 dma_get_required_mask(struct device *dev)
190{
191 struct dma_map_ops *dma_ops = get_dma_ops(dev);
6a5c7be5
MM
192
193 if (ppc_md.dma_get_required_mask)
194 return ppc_md.dma_get_required_mask(dev);
195
196 if (unlikely(dma_ops == NULL))
197 return 0;
198
d24f9c69
MM
199 if (dma_ops->get_required_mask)
200 return dma_ops->get_required_mask(dev);
6a5c7be5 201
d24f9c69 202 return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
6a5c7be5
MM
203}
204EXPORT_SYMBOL_GPL(dma_get_required_mask);
205
80d3e8ab
FT
206static int __init dma_init(void)
207{
208 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
209
210 return 0;
211}
212fs_initcall(dma_init);
6090912c
BH
213
214int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
215 void *cpu_addr, dma_addr_t handle, size_t size)
216{
217 unsigned long pfn;
218
219#ifdef CONFIG_NOT_COHERENT_CACHE
220 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
221 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
222#else
223 pfn = page_to_pfn(virt_to_page(cpu_addr));
224#endif
225 return remap_pfn_range(vma, vma->vm_start,
226 pfn + vma->vm_pgoff,
227 vma->vm_end - vma->vm_start,
228 vma->vm_page_prot);
229}
230EXPORT_SYMBOL_GPL(dma_mmap_coherent);
This page took 0.566457 seconds and 5 git commands to generate.