Commit | Line | Data |
---|---|---|
b6147490 GL |
1 | /* |
2 | * linux/drivers/mmc/tmio_mmc_dma.c | |
3 | * | |
4 | * Copyright (C) 2010-2011 Guennadi Liakhovetski | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * DMA function for TMIO MMC implementations | |
11 | */ | |
12 | ||
13 | #include <linux/device.h> | |
b7f080cf | 14 | #include <linux/dma-mapping.h> |
b6147490 GL |
15 | #include <linux/dmaengine.h> |
16 | #include <linux/mfd/tmio.h> | |
17 | #include <linux/mmc/host.h> | |
18 | #include <linux/pagemap.h> | |
19 | #include <linux/scatterlist.h> | |
20 | ||
21 | #include "tmio_mmc.h" | |
22 | ||
23 | #define TMIO_MMC_MIN_DMA_LEN 8 | |
24 | ||
162f43e3 | 25 | void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) |
b6147490 | 26 | { |
162f43e3 GL |
27 | if (!host->chan_tx || !host->chan_rx) |
28 | return; | |
29 | ||
5add2aca KM |
30 | if (host->dma->enable) |
31 | host->dma->enable(host, enable); | |
b6147490 GL |
32 | } |
33 | ||
e3de2be7 GL |
34 | void tmio_mmc_abort_dma(struct tmio_mmc_host *host) |
35 | { | |
36 | tmio_mmc_enable_dma(host, false); | |
37 | ||
38 | if (host->chan_rx) | |
39 | dmaengine_terminate_all(host->chan_rx); | |
40 | if (host->chan_tx) | |
41 | dmaengine_terminate_all(host->chan_tx); | |
42 | ||
43 | tmio_mmc_enable_dma(host, true); | |
44 | } | |
45 | ||
b6147490 GL |
46 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) |
47 | { | |
48 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | |
49 | struct dma_async_tx_descriptor *desc = NULL; | |
50 | struct dma_chan *chan = host->chan_rx; | |
b6147490 GL |
51 | dma_cookie_t cookie; |
52 | int ret, i; | |
53 | bool aligned = true, multiple = true; | |
e471df0b | 54 | unsigned int align = (1 << host->pdata->alignment_shift) - 1; |
b6147490 GL |
55 | |
56 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | |
57 | if (sg_tmp->offset & align) | |
58 | aligned = false; | |
59 | if (sg_tmp->length & align) { | |
60 | multiple = false; | |
61 | break; | |
62 | } | |
63 | } | |
64 | ||
09cbfeaf | 65 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || |
b6147490 GL |
66 | (align & PAGE_MASK))) || !multiple) { |
67 | ret = -EINVAL; | |
68 | goto pio; | |
69 | } | |
70 | ||
71 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | |
72 | host->force_pio = true; | |
73 | return; | |
74 | } | |
75 | ||
76 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); | |
77 | ||
78 | /* The only sg element can be unaligned, use our bounce buffer then */ | |
79 | if (!aligned) { | |
80 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | |
81 | host->sg_ptr = &host->bounce_sg; | |
82 | sg = host->sg_ptr; | |
83 | } | |
84 | ||
85 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | |
86 | if (ret > 0) | |
16052827 | 87 | desc = dmaengine_prep_slave_sg(chan, sg, ret, |
05f5799c | 88 | DMA_DEV_TO_MEM, DMA_CTRL_ACK); |
b6147490 GL |
89 | |
90 | if (desc) { | |
91 | cookie = dmaengine_submit(desc); | |
92 | if (cookie < 0) { | |
93 | desc = NULL; | |
94 | ret = cookie; | |
95 | } | |
96 | } | |
b6147490 GL |
97 | pio: |
98 | if (!desc) { | |
99 | /* DMA failed, fall back to PIO */ | |
f936f9b6 | 100 | tmio_mmc_enable_dma(host, false); |
b6147490 GL |
101 | if (ret >= 0) |
102 | ret = -EIO; | |
103 | host->chan_rx = NULL; | |
104 | dma_release_channel(chan); | |
105 | /* Free the Tx channel too */ | |
106 | chan = host->chan_tx; | |
107 | if (chan) { | |
108 | host->chan_tx = NULL; | |
109 | dma_release_channel(chan); | |
110 | } | |
111 | dev_warn(&host->pdev->dev, | |
112 | "DMA failed: %d, falling back to PIO\n", ret); | |
b6147490 | 113 | } |
b6147490 GL |
114 | } |
115 | ||
116 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | |
117 | { | |
118 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | |
119 | struct dma_async_tx_descriptor *desc = NULL; | |
120 | struct dma_chan *chan = host->chan_tx; | |
b6147490 GL |
121 | dma_cookie_t cookie; |
122 | int ret, i; | |
123 | bool aligned = true, multiple = true; | |
e471df0b | 124 | unsigned int align = (1 << host->pdata->alignment_shift) - 1; |
b6147490 GL |
125 | |
126 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | |
127 | if (sg_tmp->offset & align) | |
128 | aligned = false; | |
129 | if (sg_tmp->length & align) { | |
130 | multiple = false; | |
131 | break; | |
132 | } | |
133 | } | |
134 | ||
09cbfeaf | 135 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || |
b6147490 GL |
136 | (align & PAGE_MASK))) || !multiple) { |
137 | ret = -EINVAL; | |
138 | goto pio; | |
139 | } | |
140 | ||
141 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | |
142 | host->force_pio = true; | |
143 | return; | |
144 | } | |
145 | ||
146 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); | |
147 | ||
148 | /* The only sg element can be unaligned, use our bounce buffer then */ | |
149 | if (!aligned) { | |
150 | unsigned long flags; | |
151 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | |
152 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | |
153 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | |
154 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | |
155 | host->sg_ptr = &host->bounce_sg; | |
156 | sg = host->sg_ptr; | |
157 | } | |
158 | ||
159 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | |
160 | if (ret > 0) | |
16052827 | 161 | desc = dmaengine_prep_slave_sg(chan, sg, ret, |
05f5799c | 162 | DMA_MEM_TO_DEV, DMA_CTRL_ACK); |
b6147490 GL |
163 | |
164 | if (desc) { | |
165 | cookie = dmaengine_submit(desc); | |
166 | if (cookie < 0) { | |
167 | desc = NULL; | |
168 | ret = cookie; | |
169 | } | |
170 | } | |
b6147490 GL |
171 | pio: |
172 | if (!desc) { | |
173 | /* DMA failed, fall back to PIO */ | |
f936f9b6 | 174 | tmio_mmc_enable_dma(host, false); |
b6147490 GL |
175 | if (ret >= 0) |
176 | ret = -EIO; | |
177 | host->chan_tx = NULL; | |
178 | dma_release_channel(chan); | |
179 | /* Free the Rx channel too */ | |
180 | chan = host->chan_rx; | |
181 | if (chan) { | |
182 | host->chan_rx = NULL; | |
183 | dma_release_channel(chan); | |
184 | } | |
185 | dev_warn(&host->pdev->dev, | |
186 | "DMA failed: %d, falling back to PIO\n", ret); | |
b6147490 | 187 | } |
b6147490 GL |
188 | } |
189 | ||
190 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, | |
191 | struct mmc_data *data) | |
192 | { | |
193 | if (data->flags & MMC_DATA_READ) { | |
194 | if (host->chan_rx) | |
195 | tmio_mmc_start_dma_rx(host); | |
196 | } else { | |
197 | if (host->chan_tx) | |
198 | tmio_mmc_start_dma_tx(host); | |
199 | } | |
200 | } | |
201 | ||
202 | static void tmio_mmc_issue_tasklet_fn(unsigned long priv) | |
203 | { | |
204 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | |
205 | struct dma_chan *chan = NULL; | |
206 | ||
207 | spin_lock_irq(&host->lock); | |
208 | ||
209 | if (host && host->data) { | |
210 | if (host->data->flags & MMC_DATA_READ) | |
211 | chan = host->chan_rx; | |
212 | else | |
213 | chan = host->chan_tx; | |
214 | } | |
215 | ||
216 | spin_unlock_irq(&host->lock); | |
217 | ||
218 | tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); | |
219 | ||
220 | if (chan) | |
221 | dma_async_issue_pending(chan); | |
222 | } | |
223 | ||
224 | static void tmio_mmc_tasklet_fn(unsigned long arg) | |
225 | { | |
226 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | |
227 | ||
228 | spin_lock_irq(&host->lock); | |
229 | ||
230 | if (!host->data) | |
231 | goto out; | |
232 | ||
233 | if (host->data->flags & MMC_DATA_READ) | |
234 | dma_unmap_sg(host->chan_rx->device->dev, | |
235 | host->sg_ptr, host->sg_len, | |
236 | DMA_FROM_DEVICE); | |
237 | else | |
238 | dma_unmap_sg(host->chan_tx->device->dev, | |
239 | host->sg_ptr, host->sg_len, | |
240 | DMA_TO_DEVICE); | |
241 | ||
242 | tmio_mmc_do_data_irq(host); | |
243 | out: | |
244 | spin_unlock_irq(&host->lock); | |
245 | } | |
246 | ||
b6147490 GL |
247 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) |
248 | { | |
249 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | |
7ecc09ba | 250 | if (!host->dma || (!host->pdev->dev.of_node && |
f33c9d65 | 251 | (!pdata->chan_priv_tx || !pdata->chan_priv_rx))) |
e6ee7182 GL |
252 | return; |
253 | ||
254 | if (!host->chan_tx && !host->chan_rx) { | |
eec95ee2 GL |
255 | struct resource *res = platform_get_resource(host->pdev, |
256 | IORESOURCE_MEM, 0); | |
257 | struct dma_slave_config cfg = {}; | |
b6147490 | 258 | dma_cap_mask_t mask; |
eec95ee2 GL |
259 | int ret; |
260 | ||
261 | if (!res) | |
262 | return; | |
b6147490 GL |
263 | |
264 | dma_cap_zero(mask); | |
265 | dma_cap_set(DMA_SLAVE, mask); | |
266 | ||
87ae7bbe | 267 | host->chan_tx = dma_request_slave_channel_compat(mask, |
f33c9d65 | 268 | host->dma->filter, pdata->chan_priv_tx, |
87ae7bbe | 269 | &host->pdev->dev, "tx"); |
b6147490 GL |
270 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, |
271 | host->chan_tx); | |
272 | ||
273 | if (!host->chan_tx) | |
274 | return; | |
275 | ||
eec95ee2 | 276 | cfg.direction = DMA_MEM_TO_DEV; |
7445bf9e | 277 | cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift); |
361936ef KM |
278 | cfg.dst_addr_width = host->dma->dma_buswidth; |
279 | if (!cfg.dst_addr_width) | |
280 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | |
eec95ee2 GL |
281 | cfg.src_addr = 0; |
282 | ret = dmaengine_slave_config(host->chan_tx, &cfg); | |
283 | if (ret < 0) | |
284 | goto ecfgtx; | |
285 | ||
87ae7bbe | 286 | host->chan_rx = dma_request_slave_channel_compat(mask, |
f33c9d65 | 287 | host->dma->filter, pdata->chan_priv_rx, |
87ae7bbe | 288 | &host->pdev->dev, "rx"); |
b6147490 GL |
289 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, |
290 | host->chan_rx); | |
291 | ||
292 | if (!host->chan_rx) | |
293 | goto ereqrx; | |
294 | ||
eec95ee2 | 295 | cfg.direction = DMA_DEV_TO_MEM; |
8b4c8f32 | 296 | cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; |
361936ef KM |
297 | cfg.src_addr_width = host->dma->dma_buswidth; |
298 | if (!cfg.src_addr_width) | |
299 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | |
eec95ee2 GL |
300 | cfg.dst_addr = 0; |
301 | ret = dmaengine_slave_config(host->chan_rx, &cfg); | |
302 | if (ret < 0) | |
303 | goto ecfgrx; | |
304 | ||
b6147490 GL |
305 | host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); |
306 | if (!host->bounce_buf) | |
307 | goto ebouncebuf; | |
308 | ||
309 | tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); | |
310 | tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); | |
e6ee7182 | 311 | } |
b6147490 | 312 | |
e6ee7182 GL |
313 | tmio_mmc_enable_dma(host, true); |
314 | ||
315 | return; | |
b6147490 | 316 | |
b6147490 | 317 | ebouncebuf: |
eec95ee2 | 318 | ecfgrx: |
e6ee7182 GL |
319 | dma_release_channel(host->chan_rx); |
320 | host->chan_rx = NULL; | |
b6147490 | 321 | ereqrx: |
eec95ee2 | 322 | ecfgtx: |
e6ee7182 GL |
323 | dma_release_channel(host->chan_tx); |
324 | host->chan_tx = NULL; | |
b6147490 GL |
325 | } |
326 | ||
327 | void tmio_mmc_release_dma(struct tmio_mmc_host *host) | |
328 | { | |
329 | if (host->chan_tx) { | |
330 | struct dma_chan *chan = host->chan_tx; | |
331 | host->chan_tx = NULL; | |
332 | dma_release_channel(chan); | |
333 | } | |
334 | if (host->chan_rx) { | |
335 | struct dma_chan *chan = host->chan_rx; | |
336 | host->chan_rx = NULL; | |
337 | dma_release_channel(chan); | |
338 | } | |
339 | if (host->bounce_buf) { | |
340 | free_pages((unsigned long)host->bounce_buf, 0); | |
341 | host->bounce_buf = NULL; | |
342 | } | |
343 | } |