Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_DMA_MAPPING_H |
2 | #define __ASM_SH_DMA_MAPPING_H | |
3 | ||
1da177e4 | 4 | #include <linux/mm.h> |
71df50a4 | 5 | #include <linux/scatterlist.h> |
0d831770 | 6 | #include <asm/cacheflush.h> |
1da177e4 LT |
7 | #include <asm/io.h> |
8 | ||
9 | extern struct bus_type pci_bus_type; | |
10 | ||
1da177e4 LT |
11 | #define dma_supported(dev, mask) (1) |
12 | ||
13 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
14 | { | |
15 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
16 | return -EIO; | |
17 | ||
18 | *dev->dma_mask = mask; | |
19 | ||
20 | return 0; | |
21 | } | |
22 | ||
f93e97ea MD |
23 | void *dma_alloc_coherent(struct device *dev, size_t size, |
24 | dma_addr_t *dma_handle, gfp_t flag); | |
1da177e4 | 25 | |
f93e97ea MD |
26 | void dma_free_coherent(struct device *dev, size_t size, |
27 | void *vaddr, dma_addr_t dma_handle); | |
1da177e4 | 28 | |
f93e97ea MD |
29 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
30 | enum dma_data_direction dir); | |
1da177e4 | 31 | |
c7666e72 PM |
32 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
33 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
34 | #define dma_is_consistent(d, h) (1) | |
35 | ||
1da177e4 LT |
36 | static inline dma_addr_t dma_map_single(struct device *dev, |
37 | void *ptr, size_t size, | |
38 | enum dma_data_direction dir) | |
39 | { | |
40 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
41 | if (dev->bus == &pci_bus_type) | |
e257ad06 | 42 | return virt_to_phys(ptr); |
1da177e4 | 43 | #endif |
54321434 | 44 | dma_cache_sync(dev, ptr, size, dir); |
1da177e4 | 45 | |
e257ad06 | 46 | return virt_to_phys(ptr); |
1da177e4 LT |
47 | } |
48 | ||
49 | #define dma_unmap_single(dev, addr, size, dir) do { } while (0) | |
50 | ||
51 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | |
52 | int nents, enum dma_data_direction dir) | |
53 | { | |
54 | int i; | |
55 | ||
56 | for (i = 0; i < nents; i++) { | |
57 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
71df50a4 | 58 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); |
1da177e4 | 59 | #endif |
71df50a4 | 60 | sg[i].dma_address = sg_phys(&sg[i]); |
1da177e4 LT |
61 | } |
62 | ||
63 | return nents; | |
64 | } | |
65 | ||
66 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) | |
67 | ||
68 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
69 | unsigned long offset, size_t size, | |
70 | enum dma_data_direction dir) | |
71 | { | |
72 | return dma_map_single(dev, page_address(page) + offset, size, dir); | |
73 | } | |
74 | ||
75 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |
76 | size_t size, enum dma_data_direction dir) | |
77 | { | |
78 | dma_unmap_single(dev, dma_address, size, dir); | |
79 | } | |
80 | ||
81 | static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, | |
82 | size_t size, enum dma_data_direction dir) | |
83 | { | |
84 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
85 | if (dev->bus == &pci_bus_type) | |
86 | return; | |
87 | #endif | |
e257ad06 | 88 | dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); |
1da177e4 LT |
89 | } |
90 | ||
91 | static inline void dma_sync_single_range(struct device *dev, | |
92 | dma_addr_t dma_handle, | |
93 | unsigned long offset, size_t size, | |
94 | enum dma_data_direction dir) | |
95 | { | |
96 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
97 | if (dev->bus == &pci_bus_type) | |
98 | return; | |
99 | #endif | |
e257ad06 | 100 | dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); |
1da177e4 LT |
101 | } |
102 | ||
103 | static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, | |
104 | int nelems, enum dma_data_direction dir) | |
105 | { | |
106 | int i; | |
107 | ||
108 | for (i = 0; i < nelems; i++) { | |
109 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
71df50a4 | 110 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); |
1da177e4 | 111 | #endif |
71df50a4 | 112 | sg[i].dma_address = sg_phys(&sg[i]); |
1da177e4 LT |
113 | } |
114 | } | |
115 | ||
87b0ef91 PM |
116 | static inline void dma_sync_single_for_cpu(struct device *dev, |
117 | dma_addr_t dma_handle, size_t size, | |
118 | enum dma_data_direction dir) | |
119 | { | |
120 | dma_sync_single(dev, dma_handle, size, dir); | |
121 | } | |
122 | ||
123 | static inline void dma_sync_single_for_device(struct device *dev, | |
124 | dma_addr_t dma_handle, | |
125 | size_t size, | |
126 | enum dma_data_direction dir) | |
127 | { | |
128 | dma_sync_single(dev, dma_handle, size, dir); | |
129 | } | |
1da177e4 | 130 | |
32239264 PM |
131 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
132 | dma_addr_t dma_handle, | |
133 | unsigned long offset, | |
134 | size_t size, | |
135 | enum dma_data_direction direction) | |
136 | { | |
137 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); | |
138 | } | |
139 | ||
140 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
141 | dma_addr_t dma_handle, | |
142 | unsigned long offset, | |
143 | size_t size, | |
144 | enum dma_data_direction direction) | |
145 | { | |
146 | dma_sync_single_for_device(dev, dma_handle+offset, size, direction); | |
147 | } | |
148 | ||
149 | ||
87b0ef91 PM |
150 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
151 | struct scatterlist *sg, int nelems, | |
0d831770 | 152 | enum dma_data_direction dir) |
87b0ef91 PM |
153 | { |
154 | dma_sync_sg(dev, sg, nelems, dir); | |
155 | } | |
1da177e4 | 156 | |
87b0ef91 PM |
157 | static inline void dma_sync_sg_for_device(struct device *dev, |
158 | struct scatterlist *sg, int nelems, | |
159 | enum dma_data_direction dir) | |
160 | { | |
161 | dma_sync_sg(dev, sg, nelems, dir); | |
162 | } | |
1da177e4 | 163 | |
1da177e4 LT |
164 | |
165 | static inline int dma_get_cache_alignment(void) | |
166 | { | |
167 | /* | |
168 | * Each processor family will define its own L1_CACHE_SHIFT, | |
169 | * L1_CACHE_BYTES wraps to this, so this is always safe. | |
170 | */ | |
171 | return L1_CACHE_BYTES; | |
172 | } | |
173 | ||
8d8bb39b | 174 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1da177e4 LT |
175 | { |
176 | return dma_addr == 0; | |
177 | } | |
f93e97ea MD |
178 | |
179 | #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | |
180 | ||
181 | extern int | |
182 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |
183 | dma_addr_t device_addr, size_t size, int flags); | |
184 | ||
185 | extern void | |
186 | dma_release_declared_memory(struct device *dev); | |
187 | ||
188 | extern void * | |
189 | dma_mark_declared_memory_occupied(struct device *dev, | |
190 | dma_addr_t device_addr, size_t size); | |
191 | ||
1da177e4 | 192 | #endif /* __ASM_SH_DMA_MAPPING_H */ |