Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_DMA_MAPPING_H |
2 | #define __ASM_SH_DMA_MAPPING_H | |
3 | ||
1da177e4 | 4 | #include <linux/mm.h> |
71df50a4 | 5 | #include <linux/scatterlist.h> |
1da177e4 LT |
6 | #include <asm/io.h> |
7 | ||
8 | struct pci_dev; | |
9 | extern void *consistent_alloc(struct pci_dev *hwdev, size_t size, | |
10 | dma_addr_t *dma_handle); | |
11 | extern void consistent_free(struct pci_dev *hwdev, size_t size, | |
12 | void *vaddr, dma_addr_t dma_handle); | |
13 | ||
14 | #define dma_supported(dev, mask) (1) | |
15 | ||
16 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
17 | { | |
18 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
19 | return -EIO; | |
20 | ||
21 | *dev->dma_mask = mask; | |
22 | ||
23 | return 0; | |
24 | } | |
25 | ||
26 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
970a9e73 | 27 | dma_addr_t *dma_handle, gfp_t flag) |
1da177e4 LT |
28 | { |
29 | return consistent_alloc(NULL, size, dma_handle); | |
30 | } | |
31 | ||
32 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
33 | void *vaddr, dma_addr_t dma_handle) | |
34 | { | |
35 | consistent_free(NULL, size, vaddr, dma_handle); | |
36 | } | |
37 | ||
599c26d3 PM |
38 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
39 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
40 | #define dma_is_consistent(d, h) (1) | |
41 | ||
d3fa72e4 | 42 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
1da177e4 LT |
43 | enum dma_data_direction dir) |
44 | { | |
622a9edd RB |
45 | unsigned long s = (unsigned long) vaddr & L1_CACHE_ALIGN_MASK; |
46 | unsigned long e = (vaddr + size) & L1_CACHE_ALIGN_MASK; | |
47 | ||
48 | for (; s <= e; s += L1_CACHE_BYTES) | |
49 | asm volatile ("ocbp %0, 0" : : "r" (s)); | |
1da177e4 LT |
50 | } |
51 | ||
52 | static inline dma_addr_t dma_map_single(struct device *dev, | |
53 | void *ptr, size_t size, | |
54 | enum dma_data_direction dir) | |
55 | { | |
56 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
57 | if (dev->bus == &pci_bus_type) | |
75c46162 | 58 | return virt_to_phys(ptr); |
1da177e4 | 59 | #endif |
599c26d3 | 60 | dma_cache_sync(dev, ptr, size, dir); |
1da177e4 | 61 | |
75c46162 | 62 | return virt_to_phys(ptr); |
1da177e4 LT |
63 | } |
64 | ||
65 | #define dma_unmap_single(dev, addr, size, dir) do { } while (0) | |
66 | ||
67 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | |
68 | int nents, enum dma_data_direction dir) | |
69 | { | |
70 | int i; | |
71 | ||
72 | for (i = 0; i < nents; i++) { | |
73 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
71df50a4 | 74 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); |
1da177e4 | 75 | #endif |
71df50a4 | 76 | sg[i].dma_address = sg_phys(&sg[i]); |
1da177e4 LT |
77 | } |
78 | ||
79 | return nents; | |
80 | } | |
81 | ||
82 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) | |
83 | ||
84 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
85 | unsigned long offset, size_t size, | |
86 | enum dma_data_direction dir) | |
87 | { | |
88 | return dma_map_single(dev, page_address(page) + offset, size, dir); | |
89 | } | |
90 | ||
91 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |
92 | size_t size, enum dma_data_direction dir) | |
93 | { | |
94 | dma_unmap_single(dev, dma_address, size, dir); | |
95 | } | |
96 | ||
97 | static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, | |
98 | size_t size, enum dma_data_direction dir) | |
99 | { | |
100 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
101 | if (dev->bus == &pci_bus_type) | |
102 | return; | |
103 | #endif | |
75c46162 | 104 | dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); |
1da177e4 LT |
105 | } |
106 | ||
107 | static inline void dma_sync_single_range(struct device *dev, | |
108 | dma_addr_t dma_handle, | |
109 | unsigned long offset, size_t size, | |
110 | enum dma_data_direction dir) | |
111 | { | |
112 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
113 | if (dev->bus == &pci_bus_type) | |
114 | return; | |
115 | #endif | |
75c46162 | 116 | dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); |
1da177e4 LT |
117 | } |
118 | ||
119 | static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, | |
120 | int nelems, enum dma_data_direction dir) | |
121 | { | |
122 | int i; | |
123 | ||
124 | for (i = 0; i < nelems; i++) { | |
125 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
71df50a4 | 126 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); |
1da177e4 | 127 | #endif |
71df50a4 | 128 | sg[i].dma_address = sg_phys(&sg[i]); |
1da177e4 LT |
129 | } |
130 | } | |
131 | ||
132 | static inline void dma_sync_single_for_cpu(struct device *dev, | |
133 | dma_addr_t dma_handle, size_t size, | |
134 | enum dma_data_direction dir) | |
21264136 PM |
135 | { |
136 | dma_sync_single(dev, dma_handle, size, dir); | |
137 | } | |
1da177e4 LT |
138 | |
139 | static inline void dma_sync_single_for_device(struct device *dev, | |
140 | dma_addr_t dma_handle, size_t size, | |
141 | enum dma_data_direction dir) | |
21264136 PM |
142 | { |
143 | dma_sync_single(dev, dma_handle, size, dir); | |
144 | } | |
1da177e4 | 145 | |
757e2d60 PM |
146 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
147 | dma_addr_t dma_handle, | |
148 | unsigned long offset, | |
149 | size_t size, | |
150 | enum dma_data_direction direction) | |
151 | { | |
152 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); | |
153 | } | |
154 | ||
155 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
156 | dma_addr_t dma_handle, | |
157 | unsigned long offset, | |
158 | size_t size, | |
159 | enum dma_data_direction direction) | |
160 | { | |
161 | dma_sync_single_for_device(dev, dma_handle+offset, size, direction); | |
162 | } | |
163 | ||
1da177e4 LT |
164 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
165 | struct scatterlist *sg, int nelems, | |
166 | enum dma_data_direction dir) | |
21264136 PM |
167 | { |
168 | dma_sync_sg(dev, sg, nelems, dir); | |
169 | } | |
1da177e4 LT |
170 | |
171 | static inline void dma_sync_sg_for_device(struct device *dev, | |
172 | struct scatterlist *sg, int nelems, | |
173 | enum dma_data_direction dir) | |
21264136 PM |
174 | { |
175 | dma_sync_sg(dev, sg, nelems, dir); | |
176 | } | |
1da177e4 LT |
177 | |
178 | static inline int dma_get_cache_alignment(void) | |
179 | { | |
180 | /* | |
181 | * Each processor family will define its own L1_CACHE_SHIFT, | |
182 | * L1_CACHE_BYTES wraps to this, so this is always safe. | |
183 | */ | |
184 | return L1_CACHE_BYTES; | |
185 | } | |
186 | ||
187 | static inline int dma_mapping_error(dma_addr_t dma_addr) | |
188 | { | |
189 | return dma_addr == 0; | |
190 | } | |
191 | ||
192 | #endif /* __ASM_SH_DMA_MAPPING_H */ | |
193 |