Merge branch 'linus' of master.kernel.org:/pub/scm/linux/kernel/git/perex/alsa
[deliverable/linux.git] / include / asm-sh64 / dma-mapping.h
1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
3
4 #include <linux/mm.h>
5 #include <asm/scatterlist.h>
6 #include <asm/io.h>
7
8 struct pci_dev;
9 extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
10 dma_addr_t *dma_handle);
11 extern void consistent_free(struct pci_dev *hwdev, size_t size,
12 void *vaddr, dma_addr_t dma_handle);
13
14 #define dma_supported(dev, mask) (1)
15
16 static inline int dma_set_mask(struct device *dev, u64 mask)
17 {
18 if (!dev->dma_mask || !dma_supported(dev, mask))
19 return -EIO;
20
21 *dev->dma_mask = mask;
22
23 return 0;
24 }
25
26 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *dma_handle, gfp_t flag)
28 {
29 return consistent_alloc(NULL, size, dma_handle);
30 }
31
32 static inline void dma_free_coherent(struct device *dev, size_t size,
33 void *vaddr, dma_addr_t dma_handle)
34 {
35 consistent_free(NULL, size, vaddr, dma_handle);
36 }
37
38 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
39 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
40 #define dma_is_consistent(d, h) (1)
41
42 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
43 enum dma_data_direction dir)
44 {
45 unsigned long s = (unsigned long) vaddr & L1_CACHE_ALIGN_MASK;
46 unsigned long e = (vaddr + size) & L1_CACHE_ALIGN_MASK;
47
48 for (; s <= e; s += L1_CACHE_BYTES)
49 asm volatile ("ocbp %0, 0" : : "r" (s));
50 }
51
52 static inline dma_addr_t dma_map_single(struct device *dev,
53 void *ptr, size_t size,
54 enum dma_data_direction dir)
55 {
56 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
57 if (dev->bus == &pci_bus_type)
58 return virt_to_phys(ptr);
59 #endif
60 dma_cache_sync(dev, ptr, size, dir);
61
62 return virt_to_phys(ptr);
63 }
64
65 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
66
67 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
68 int nents, enum dma_data_direction dir)
69 {
70 int i;
71
72 for (i = 0; i < nents; i++) {
73 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
74 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
75 sg[i].length, dir);
76 #endif
77 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
78 }
79
80 return nents;
81 }
82
83 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
84
85 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
86 unsigned long offset, size_t size,
87 enum dma_data_direction dir)
88 {
89 return dma_map_single(dev, page_address(page) + offset, size, dir);
90 }
91
92 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
93 size_t size, enum dma_data_direction dir)
94 {
95 dma_unmap_single(dev, dma_address, size, dir);
96 }
97
98 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
99 size_t size, enum dma_data_direction dir)
100 {
101 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
102 if (dev->bus == &pci_bus_type)
103 return;
104 #endif
105 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
106 }
107
108 static inline void dma_sync_single_range(struct device *dev,
109 dma_addr_t dma_handle,
110 unsigned long offset, size_t size,
111 enum dma_data_direction dir)
112 {
113 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
114 if (dev->bus == &pci_bus_type)
115 return;
116 #endif
117 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
118 }
119
120 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
121 int nelems, enum dma_data_direction dir)
122 {
123 int i;
124
125 for (i = 0; i < nelems; i++) {
126 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
127 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
128 sg[i].length, dir);
129 #endif
130 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
131 }
132 }
133
134 static inline void dma_sync_single_for_cpu(struct device *dev,
135 dma_addr_t dma_handle, size_t size,
136 enum dma_data_direction dir)
137 {
138 dma_sync_single(dev, dma_handle, size, dir);
139 }
140
141 static inline void dma_sync_single_for_device(struct device *dev,
142 dma_addr_t dma_handle, size_t size,
143 enum dma_data_direction dir)
144 {
145 dma_sync_single(dev, dma_handle, size, dir);
146 }
147
148 static inline void dma_sync_single_range_for_cpu(struct device *dev,
149 dma_addr_t dma_handle,
150 unsigned long offset,
151 size_t size,
152 enum dma_data_direction direction)
153 {
154 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
155 }
156
157 static inline void dma_sync_single_range_for_device(struct device *dev,
158 dma_addr_t dma_handle,
159 unsigned long offset,
160 size_t size,
161 enum dma_data_direction direction)
162 {
163 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
164 }
165
166 static inline void dma_sync_sg_for_cpu(struct device *dev,
167 struct scatterlist *sg, int nelems,
168 enum dma_data_direction dir)
169 {
170 dma_sync_sg(dev, sg, nelems, dir);
171 }
172
173 static inline void dma_sync_sg_for_device(struct device *dev,
174 struct scatterlist *sg, int nelems,
175 enum dma_data_direction dir)
176 {
177 dma_sync_sg(dev, sg, nelems, dir);
178 }
179
180 static inline int dma_get_cache_alignment(void)
181 {
182 /*
183 * Each processor family will define its own L1_CACHE_SHIFT,
184 * L1_CACHE_BYTES wraps to this, so this is always safe.
185 */
186 return L1_CACHE_BYTES;
187 }
188
189 static inline int dma_mapping_error(dma_addr_t dma_addr)
190 {
191 return dma_addr == 0;
192 }
193
194 #endif /* __ASM_SH_DMA_MAPPING_H */
195
This page took 0.039883 seconds and 5 git commands to generate.