Commit | Line | Data |
---|---|---|
550a7375 FB |
1 | /* |
2 | * Copyright (C) 2005-2006 by Texas Instruments | |
3 | * | |
4 | * This file implements a DMA interface using TI's CPPI DMA. | |
5 | * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. | |
6 | * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. | |
7 | */ | |
8 | ||
9 | #include <linux/usb.h> | |
10 | ||
11 | #include "musb_core.h" | |
704a1485 | 12 | #include "musb_debug.h" |
550a7375 FB |
13 | #include "cppi_dma.h" |
14 | ||
15 | ||
16 | /* CPPI DMA status 7-mar-2006: | |
17 | * | |
18 | * - See musb_{host,gadget}.c for more info | |
19 | * | |
20 | * - Correct RX DMA generally forces the engine into irq-per-packet mode, | |
21 | * which can easily saturate the CPU under non-mass-storage loads. | |
22 | * | |
23 | * NOTES 24-aug-2006 (2.6.18-rc4): | |
24 | * | |
25 | * - peripheral RXDMA wedged in a test with packets of length 512/512/1. | |
26 | * evidently after the 1 byte packet was received and acked, the queue | |
27 | * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, | |
28 | * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 | |
29 | * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx | |
30 | * of its next (512 byte) packet. IRQ issues? | |
31 | * | |
32 | * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will | |
33 | * evidently also directly update the RX and TX CSRs ... so audit all | |
34 | * host and peripheral side DMA code to avoid CSR access after DMA has | |
35 | * been started. | |
36 | */ | |
37 | ||
38 | /* REVISIT now we can avoid preallocating these descriptors; or | |
39 | * more simply, switch to a global freelist not per-channel ones. | |
40 | * Note: at full speed, 64 descriptors == 4K bulk data. | |
41 | */ | |
42 | #define NUM_TXCHAN_BD 64 | |
43 | #define NUM_RXCHAN_BD 64 | |
44 | ||
45 | static inline void cpu_drain_writebuffer(void) | |
46 | { | |
47 | wmb(); | |
48 | #ifdef CONFIG_CPU_ARM926T | |
49 | /* REVISIT this "should not be needed", | |
50 | * but lack of it sure seemed to hurt ... | |
51 | */ | |
52 | asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); | |
53 | #endif | |
54 | } | |
55 | ||
56 | static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) | |
57 | { | |
58 | struct cppi_descriptor *bd = c->freelist; | |
59 | ||
60 | if (bd) | |
61 | c->freelist = bd->next; | |
62 | return bd; | |
63 | } | |
64 | ||
65 | static inline void | |
66 | cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) | |
67 | { | |
68 | if (!bd) | |
69 | return; | |
70 | bd->next = c->freelist; | |
71 | c->freelist = bd; | |
72 | } | |
73 | ||
74 | /* | |
75 | * Start DMA controller | |
76 | * | |
77 | * Initialize the DMA controller as necessary. | |
78 | */ | |
79 | ||
80 | /* zero out entire rx state RAM entry for the channel */ | |
81 | static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) | |
82 | { | |
83 | musb_writel(&rx->rx_skipbytes, 0, 0); | |
84 | musb_writel(&rx->rx_head, 0, 0); | |
85 | musb_writel(&rx->rx_sop, 0, 0); | |
86 | musb_writel(&rx->rx_current, 0, 0); | |
87 | musb_writel(&rx->rx_buf_current, 0, 0); | |
88 | musb_writel(&rx->rx_len_len, 0, 0); | |
89 | musb_writel(&rx->rx_cnt_cnt, 0, 0); | |
90 | } | |
91 | ||
92 | /* zero out entire tx state RAM entry for the channel */ | |
93 | static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) | |
94 | { | |
95 | musb_writel(&tx->tx_head, 0, 0); | |
96 | musb_writel(&tx->tx_buf, 0, 0); | |
97 | musb_writel(&tx->tx_current, 0, 0); | |
98 | musb_writel(&tx->tx_buf_current, 0, 0); | |
99 | musb_writel(&tx->tx_info, 0, 0); | |
100 | musb_writel(&tx->tx_rem_len, 0, 0); | |
101 | /* musb_writel(&tx->tx_dummy, 0, 0); */ | |
102 | musb_writel(&tx->tx_complete, 0, ptr); | |
103 | } | |
104 | ||
105 | static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) | |
106 | { | |
107 | int j; | |
108 | ||
109 | /* initialize channel fields */ | |
110 | c->head = NULL; | |
111 | c->tail = NULL; | |
112 | c->last_processed = NULL; | |
113 | c->channel.status = MUSB_DMA_STATUS_UNKNOWN; | |
114 | c->controller = cppi; | |
115 | c->is_rndis = 0; | |
116 | c->freelist = NULL; | |
117 | ||
118 | /* build the BD Free list for the channel */ | |
119 | for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { | |
120 | struct cppi_descriptor *bd; | |
121 | dma_addr_t dma; | |
122 | ||
123 | bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); | |
124 | bd->dma = dma; | |
125 | cppi_bd_free(c, bd); | |
126 | } | |
127 | } | |
128 | ||
129 | static int cppi_channel_abort(struct dma_channel *); | |
130 | ||
131 | static void cppi_pool_free(struct cppi_channel *c) | |
132 | { | |
133 | struct cppi *cppi = c->controller; | |
134 | struct cppi_descriptor *bd; | |
135 | ||
136 | (void) cppi_channel_abort(&c->channel); | |
137 | c->channel.status = MUSB_DMA_STATUS_UNKNOWN; | |
138 | c->controller = NULL; | |
139 | ||
140 | /* free all its bds */ | |
141 | bd = c->last_processed; | |
142 | do { | |
143 | if (bd) | |
144 | dma_pool_free(cppi->pool, bd, bd->dma); | |
145 | bd = cppi_bd_alloc(c); | |
146 | } while (bd); | |
147 | c->last_processed = NULL; | |
148 | } | |
149 | ||
150 | static int __init cppi_controller_start(struct dma_controller *c) | |
151 | { | |
152 | struct cppi *controller; | |
153 | void __iomem *tibase; | |
154 | int i; | |
155 | ||
156 | controller = container_of(c, struct cppi, controller); | |
157 | ||
158 | /* do whatever is necessary to start controller */ | |
159 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { | |
160 | controller->tx[i].transmit = true; | |
161 | controller->tx[i].index = i; | |
162 | } | |
163 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { | |
164 | controller->rx[i].transmit = false; | |
165 | controller->rx[i].index = i; | |
166 | } | |
167 | ||
168 | /* setup BD list on a per channel basis */ | |
169 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) | |
170 | cppi_pool_init(controller, controller->tx + i); | |
171 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) | |
172 | cppi_pool_init(controller, controller->rx + i); | |
173 | ||
174 | tibase = controller->tibase; | |
175 | INIT_LIST_HEAD(&controller->tx_complete); | |
176 | ||
177 | /* initialise tx/rx channel head pointers to zero */ | |
178 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { | |
179 | struct cppi_channel *tx_ch = controller->tx + i; | |
180 | struct cppi_tx_stateram __iomem *tx; | |
181 | ||
182 | INIT_LIST_HEAD(&tx_ch->tx_complete); | |
183 | ||
184 | tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); | |
185 | tx_ch->state_ram = tx; | |
186 | cppi_reset_tx(tx, 0); | |
187 | } | |
188 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { | |
189 | struct cppi_channel *rx_ch = controller->rx + i; | |
190 | struct cppi_rx_stateram __iomem *rx; | |
191 | ||
192 | INIT_LIST_HEAD(&rx_ch->tx_complete); | |
193 | ||
194 | rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); | |
195 | rx_ch->state_ram = rx; | |
196 | cppi_reset_rx(rx); | |
197 | } | |
198 | ||
199 | /* enable individual cppi channels */ | |
200 | musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, | |
201 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | |
202 | musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, | |
203 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | |
204 | ||
205 | /* enable tx/rx CPPI control */ | |
206 | musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); | |
207 | musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); | |
208 | ||
209 | /* disable RNDIS mode, also host rx RNDIS autorequest */ | |
210 | musb_writel(tibase, DAVINCI_RNDIS_REG, 0); | |
211 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); | |
212 | ||
213 | return 0; | |
214 | } | |
215 | ||
216 | /* | |
217 | * Stop DMA controller | |
218 | * | |
219 | * De-Init the DMA controller as necessary. | |
220 | */ | |
221 | ||
222 | static int cppi_controller_stop(struct dma_controller *c) | |
223 | { | |
224 | struct cppi *controller; | |
225 | void __iomem *tibase; | |
226 | int i; | |
227 | ||
228 | controller = container_of(c, struct cppi, controller); | |
229 | ||
230 | tibase = controller->tibase; | |
231 | /* DISABLE INDIVIDUAL CHANNEL Interrupts */ | |
232 | musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, | |
233 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | |
234 | musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, | |
235 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | |
236 | ||
237 | DBG(1, "Tearing down RX and TX Channels\n"); | |
238 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { | |
239 | /* FIXME restructure of txdma to use bds like rxdma */ | |
240 | controller->tx[i].last_processed = NULL; | |
241 | cppi_pool_free(controller->tx + i); | |
242 | } | |
243 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) | |
244 | cppi_pool_free(controller->rx + i); | |
245 | ||
246 | /* in Tx Case proper teardown is supported. We resort to disabling | |
247 | * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is | |
248 | * complete TX CPPI cannot be disabled. | |
249 | */ | |
250 | /*disable tx/rx cppi */ | |
251 | musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); | |
252 | musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); | |
253 | ||
254 | return 0; | |
255 | } | |
256 | ||
257 | /* While dma channel is allocated, we only want the core irqs active | |
258 | * for fault reports, otherwise we'd get irqs that we don't care about. | |
259 | * Except for TX irqs, where dma done != fifo empty and reusable ... | |
260 | * | |
261 | * NOTE: docs don't say either way, but irq masking **enables** irqs. | |
262 | * | |
263 | * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... | |
264 | */ | |
265 | static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) | |
266 | { | |
267 | musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); | |
268 | } | |
269 | ||
270 | static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) | |
271 | { | |
272 | musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); | |
273 | } | |
274 | ||
275 | ||
276 | /* | |
277 | * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to | |
278 | * each transfer direction of a non-control endpoint, so allocating | |
279 | * (and deallocating) is mostly a way to notice bad housekeeping on | |
280 | * the software side. We assume the irqs are always active. | |
281 | */ | |
282 | static struct dma_channel * | |
283 | cppi_channel_allocate(struct dma_controller *c, | |
284 | struct musb_hw_ep *ep, u8 transmit) | |
285 | { | |
286 | struct cppi *controller; | |
287 | u8 index; | |
288 | struct cppi_channel *cppi_ch; | |
289 | void __iomem *tibase; | |
290 | ||
291 | controller = container_of(c, struct cppi, controller); | |
292 | tibase = controller->tibase; | |
293 | ||
294 | /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ | |
295 | index = ep->epnum - 1; | |
296 | ||
297 | /* return the corresponding CPPI Channel Handle, and | |
298 | * probably disable the non-CPPI irq until we need it. | |
299 | */ | |
300 | if (transmit) { | |
301 | if (index >= ARRAY_SIZE(controller->tx)) { | |
302 | DBG(1, "no %cX%d CPPI channel\n", 'T', index); | |
303 | return NULL; | |
304 | } | |
305 | cppi_ch = controller->tx + index; | |
306 | } else { | |
307 | if (index >= ARRAY_SIZE(controller->rx)) { | |
308 | DBG(1, "no %cX%d CPPI channel\n", 'R', index); | |
309 | return NULL; | |
310 | } | |
311 | cppi_ch = controller->rx + index; | |
312 | core_rxirq_disable(tibase, ep->epnum); | |
313 | } | |
314 | ||
315 | /* REVISIT make this an error later once the same driver code works | |
316 | * with the other DMA engine too | |
317 | */ | |
318 | if (cppi_ch->hw_ep) | |
319 | DBG(1, "re-allocating DMA%d %cX channel %p\n", | |
320 | index, transmit ? 'T' : 'R', cppi_ch); | |
321 | cppi_ch->hw_ep = ep; | |
322 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; | |
323 | ||
324 | DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); | |
325 | return &cppi_ch->channel; | |
326 | } | |
327 | ||
328 | /* Release a CPPI Channel. */ | |
329 | static void cppi_channel_release(struct dma_channel *channel) | |
330 | { | |
331 | struct cppi_channel *c; | |
332 | void __iomem *tibase; | |
333 | ||
334 | /* REVISIT: for paranoia, check state and abort if needed... */ | |
335 | ||
336 | c = container_of(channel, struct cppi_channel, channel); | |
337 | tibase = c->controller->tibase; | |
338 | if (!c->hw_ep) | |
339 | DBG(1, "releasing idle DMA channel %p\n", c); | |
340 | else if (!c->transmit) | |
341 | core_rxirq_enable(tibase, c->index + 1); | |
342 | ||
343 | /* for now, leave its cppi IRQ enabled (we won't trigger it) */ | |
344 | c->hw_ep = NULL; | |
345 | channel->status = MUSB_DMA_STATUS_UNKNOWN; | |
346 | } | |
347 | ||
348 | /* Context: controller irqlocked */ | |
349 | static void | |
350 | cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) | |
351 | { | |
352 | void __iomem *base = c->controller->mregs; | |
353 | struct cppi_rx_stateram __iomem *rx = c->state_ram; | |
354 | ||
355 | musb_ep_select(base, c->index + 1); | |
356 | ||
357 | DBG(level, "RX DMA%d%s: %d left, csr %04x, " | |
358 | "%08x H%08x S%08x C%08x, " | |
359 | "B%08x L%08x %08x .. %08x" | |
360 | "\n", | |
361 | c->index, tag, | |
362 | musb_readl(c->controller->tibase, | |
363 | DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), | |
364 | musb_readw(c->hw_ep->regs, MUSB_RXCSR), | |
365 | ||
366 | musb_readl(&rx->rx_skipbytes, 0), | |
367 | musb_readl(&rx->rx_head, 0), | |
368 | musb_readl(&rx->rx_sop, 0), | |
369 | musb_readl(&rx->rx_current, 0), | |
370 | ||
371 | musb_readl(&rx->rx_buf_current, 0), | |
372 | musb_readl(&rx->rx_len_len, 0), | |
373 | musb_readl(&rx->rx_cnt_cnt, 0), | |
374 | musb_readl(&rx->rx_complete, 0) | |
375 | ); | |
376 | } | |
377 | ||
378 | /* Context: controller irqlocked */ | |
379 | static void | |
380 | cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) | |
381 | { | |
382 | void __iomem *base = c->controller->mregs; | |
383 | struct cppi_tx_stateram __iomem *tx = c->state_ram; | |
384 | ||
385 | musb_ep_select(base, c->index + 1); | |
386 | ||
387 | DBG(level, "TX DMA%d%s: csr %04x, " | |
388 | "H%08x S%08x C%08x %08x, " | |
389 | "F%08x L%08x .. %08x" | |
390 | "\n", | |
391 | c->index, tag, | |
392 | musb_readw(c->hw_ep->regs, MUSB_TXCSR), | |
393 | ||
394 | musb_readl(&tx->tx_head, 0), | |
395 | musb_readl(&tx->tx_buf, 0), | |
396 | musb_readl(&tx->tx_current, 0), | |
397 | musb_readl(&tx->tx_buf_current, 0), | |
398 | ||
399 | musb_readl(&tx->tx_info, 0), | |
400 | musb_readl(&tx->tx_rem_len, 0), | |
401 | /* dummy/unused word 6 */ | |
402 | musb_readl(&tx->tx_complete, 0) | |
403 | ); | |
404 | } | |
405 | ||
406 | /* Context: controller irqlocked */ | |
407 | static inline void | |
408 | cppi_rndis_update(struct cppi_channel *c, int is_rx, | |
409 | void __iomem *tibase, int is_rndis) | |
410 | { | |
411 | /* we may need to change the rndis flag for this cppi channel */ | |
412 | if (c->is_rndis != is_rndis) { | |
413 | u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); | |
414 | u32 temp = 1 << (c->index); | |
415 | ||
416 | if (is_rx) | |
417 | temp <<= 16; | |
418 | if (is_rndis) | |
419 | value |= temp; | |
420 | else | |
421 | value &= ~temp; | |
422 | musb_writel(tibase, DAVINCI_RNDIS_REG, value); | |
423 | c->is_rndis = is_rndis; | |
424 | } | |
425 | } | |
426 | ||
704a1485 | 427 | #ifdef CONFIG_USB_MUSB_DEBUG |
550a7375 FB |
428 | static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) |
429 | { | |
430 | pr_debug("RXBD/%s %08x: " | |
431 | "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", | |
432 | tag, bd->dma, | |
433 | bd->hw_next, bd->hw_bufp, bd->hw_off_len, | |
434 | bd->hw_options); | |
435 | } | |
704a1485 | 436 | #endif |
550a7375 FB |
437 | |
438 | static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) | |
439 | { | |
704a1485 | 440 | #ifdef CONFIG_USB_MUSB_DEBUG |
550a7375 FB |
441 | struct cppi_descriptor *bd; |
442 | ||
443 | if (!_dbg_level(level)) | |
444 | return; | |
445 | cppi_dump_rx(level, rx, tag); | |
446 | if (rx->last_processed) | |
447 | cppi_dump_rxbd("last", rx->last_processed); | |
448 | for (bd = rx->head; bd; bd = bd->next) | |
449 | cppi_dump_rxbd("active", bd); | |
450 | #endif | |
451 | } | |
452 | ||
453 | ||
454 | /* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; | |
455 | * so we won't ever use it (see "CPPI RX Woes" below). | |
456 | */ | |
457 | static inline int cppi_autoreq_update(struct cppi_channel *rx, | |
458 | void __iomem *tibase, int onepacket, unsigned n_bds) | |
459 | { | |
460 | u32 val; | |
461 | ||
462 | #ifdef RNDIS_RX_IS_USABLE | |
463 | u32 tmp; | |
464 | /* assert(is_host_active(musb)) */ | |
465 | ||
466 | /* start from "AutoReq never" */ | |
467 | tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); | |
468 | val = tmp & ~((0x3) << (rx->index * 2)); | |
469 | ||
470 | /* HCD arranged reqpkt for packet #1. we arrange int | |
471 | * for all but the last one, maybe in two segments. | |
472 | */ | |
473 | if (!onepacket) { | |
474 | #if 0 | |
475 | /* use two segments, autoreq "all" then the last "never" */ | |
476 | val |= ((0x3) << (rx->index * 2)); | |
477 | n_bds--; | |
478 | #else | |
479 | /* one segment, autoreq "all-but-last" */ | |
480 | val |= ((0x1) << (rx->index * 2)); | |
481 | #endif | |
482 | } | |
483 | ||
484 | if (val != tmp) { | |
485 | int n = 100; | |
486 | ||
487 | /* make sure that autoreq is updated before continuing */ | |
488 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); | |
489 | do { | |
490 | tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); | |
491 | if (tmp == val) | |
492 | break; | |
493 | cpu_relax(); | |
494 | } while (n-- > 0); | |
495 | } | |
496 | #endif | |
497 | ||
498 | /* REQPKT is turned off after each segment */ | |
499 | if (n_bds && rx->channel.actual_len) { | |
500 | void __iomem *regs = rx->hw_ep->regs; | |
501 | ||
502 | val = musb_readw(regs, MUSB_RXCSR); | |
503 | if (!(val & MUSB_RXCSR_H_REQPKT)) { | |
504 | val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; | |
505 | musb_writew(regs, MUSB_RXCSR, val); | |
506 | /* flush writebufer */ | |
507 | val = musb_readw(regs, MUSB_RXCSR); | |
508 | } | |
509 | } | |
510 | return n_bds; | |
511 | } | |
512 | ||
513 | ||
514 | /* Buffer enqueuing Logic: | |
515 | * | |
516 | * - RX builds new queues each time, to help handle routine "early | |
517 | * termination" cases (faults, including errors and short reads) | |
518 | * more correctly. | |
519 | * | |
520 | * - for now, TX reuses the same queue of BDs every time | |
521 | * | |
522 | * REVISIT long term, we want a normal dynamic model. | |
523 | * ... the goal will be to append to the | |
524 | * existing queue, processing completed "dma buffers" (segments) on the fly. | |
525 | * | |
526 | * Otherwise we force an IRQ latency between requests, which slows us a lot | |
527 | * (especially in "transparent" dma). Unfortunately that model seems to be | |
528 | * inherent in the DMA model from the Mentor code, except in the rare case | |
529 | * of transfers big enough (~128+ KB) that we could append "middle" segments | |
530 | * in the TX paths. (RX can't do this, see below.) | |
531 | * | |
532 | * That's true even in the CPPI- friendly iso case, where most urbs have | |
533 | * several small segments provided in a group and where the "packet at a time" | |
534 | * "transparent" DMA model is always correct, even on the RX side. | |
535 | */ | |
536 | ||
537 | /* | |
538 | * CPPI TX: | |
539 | * ======== | |
540 | * TX is a lot more reasonable than RX; it doesn't need to run in | |
541 | * irq-per-packet mode very often. RNDIS mode seems to behave too | |
542 | * (except how it handles the exactly-N-packets case). Building a | |
543 | * txdma queue with multiple requests (urb or usb_request) looks | |
544 | * like it would work ... but fault handling would need much testing. | |
545 | * | |
546 | * The main issue with TX mode RNDIS relates to transfer lengths that | |
547 | * are an exact multiple of the packet length. It appears that there's | |
548 | * a hiccup in that case (maybe the DMA completes before the ZLP gets | |
549 | * written?) boiling down to not being able to rely on CPPI writing any | |
550 | * terminating zero length packet before the next transfer is written. | |
551 | * So that's punted to PIO; better yet, gadget drivers can avoid it. | |
552 | * | |
553 | * Plus, there's allegedly an undocumented constraint that rndis transfer | |
554 | * length be a multiple of 64 bytes ... but the chip doesn't act that | |
555 | * way, and we really don't _want_ that behavior anyway. | |
556 | * | |
557 | * On TX, "transparent" mode works ... although experiments have shown | |
558 | * problems trying to use the SOP/EOP bits in different USB packets. | |
559 | * | |
560 | * REVISIT try to handle terminating zero length packets using CPPI | |
561 | * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet | |
562 | * links avoid that issue by forcing them to avoid zlps.) | |
563 | */ | |
564 | static void | |
565 | cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) | |
566 | { | |
567 | unsigned maxpacket = tx->maxpacket; | |
568 | dma_addr_t addr = tx->buf_dma + tx->offset; | |
569 | size_t length = tx->buf_len - tx->offset; | |
570 | struct cppi_descriptor *bd; | |
571 | unsigned n_bds; | |
572 | unsigned i; | |
573 | struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; | |
574 | int rndis; | |
575 | ||
576 | /* TX can use the CPPI "rndis" mode, where we can probably fit this | |
577 | * transfer in one BD and one IRQ. The only time we would NOT want | |
578 | * to use it is when hardware constraints prevent it, or if we'd | |
579 | * trigger the "send a ZLP?" confusion. | |
580 | */ | |
581 | rndis = (maxpacket & 0x3f) == 0 | |
6b6e9710 | 582 | && length > maxpacket |
550a7375 FB |
583 | && length < 0xffff |
584 | && (length % maxpacket) != 0; | |
585 | ||
586 | if (rndis) { | |
587 | maxpacket = length; | |
588 | n_bds = 1; | |
589 | } else { | |
590 | n_bds = length / maxpacket; | |
591 | if (!length || (length % maxpacket)) | |
592 | n_bds++; | |
593 | n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); | |
594 | length = min(n_bds * maxpacket, length); | |
595 | } | |
596 | ||
597 | DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", | |
598 | tx->index, | |
599 | maxpacket, | |
600 | rndis ? "rndis" : "transparent", | |
601 | n_bds, | |
602 | addr, length); | |
603 | ||
604 | cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); | |
605 | ||
606 | /* assuming here that channel_program is called during | |
607 | * transfer initiation ... current code maintains state | |
608 | * for one outstanding request only (no queues, not even | |
609 | * the implicit ones of an iso urb). | |
610 | */ | |
611 | ||
612 | bd = tx->freelist; | |
613 | tx->head = bd; | |
614 | tx->last_processed = NULL; | |
615 | ||
616 | /* FIXME use BD pool like RX side does, and just queue | |
617 | * the minimum number for this request. | |
618 | */ | |
619 | ||
620 | /* Prepare queue of BDs first, then hand it to hardware. | |
621 | * All BDs except maybe the last should be of full packet | |
622 | * size; for RNDIS there _is_ only that last packet. | |
623 | */ | |
624 | for (i = 0; i < n_bds; ) { | |
625 | if (++i < n_bds && bd->next) | |
626 | bd->hw_next = bd->next->dma; | |
627 | else | |
628 | bd->hw_next = 0; | |
629 | ||
630 | bd->hw_bufp = tx->buf_dma + tx->offset; | |
631 | ||
632 | /* FIXME set EOP only on the last packet, | |
633 | * SOP only on the first ... avoid IRQs | |
634 | */ | |
635 | if ((tx->offset + maxpacket) <= tx->buf_len) { | |
636 | tx->offset += maxpacket; | |
637 | bd->hw_off_len = maxpacket; | |
638 | bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET | |
639 | | CPPI_OWN_SET | maxpacket; | |
640 | } else { | |
641 | /* only this one may be a partial USB Packet */ | |
642 | u32 partial_len; | |
643 | ||
644 | partial_len = tx->buf_len - tx->offset; | |
645 | tx->offset = tx->buf_len; | |
646 | bd->hw_off_len = partial_len; | |
647 | ||
648 | bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET | |
649 | | CPPI_OWN_SET | partial_len; | |
650 | if (partial_len == 0) | |
651 | bd->hw_options |= CPPI_ZERO_SET; | |
652 | } | |
653 | ||
654 | DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", | |
655 | bd, bd->hw_next, bd->hw_bufp, | |
656 | bd->hw_off_len, bd->hw_options); | |
657 | ||
658 | /* update the last BD enqueued to the list */ | |
659 | tx->tail = bd; | |
660 | bd = bd->next; | |
661 | } | |
662 | ||
663 | /* BDs live in DMA-coherent memory, but writes might be pending */ | |
664 | cpu_drain_writebuffer(); | |
665 | ||
666 | /* Write to the HeadPtr in state RAM to trigger */ | |
667 | musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); | |
668 | ||
669 | cppi_dump_tx(5, tx, "/S"); | |
670 | } | |
671 | ||
672 | /* | |
673 | * CPPI RX Woes: | |
674 | * ============= | |
675 | * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte | |
676 | * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. | |
677 | * (Full speed transfers have similar scenarios.) | |
678 | * | |
679 | * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, | |
680 | * and the next packet goes into a buffer that's queued later; while (b) fills | |
681 | * the buffer with 1024 bytes. How to do that with CPPI? | |
682 | * | |
683 | * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but | |
684 | * (b) loses **BADLY** because nothing (!) happens when that second packet | |
685 | * fills the buffer, much less when a third one arrives. (Which makes this | |
686 | * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination | |
687 | * is optional, and it's fine if peripherals -- not hosts! -- pad messages | |
688 | * out to end-of-buffer. Standard PCI host controller DMA descriptors | |
689 | * implement that mode by default ... which is no accident.) | |
690 | * | |
691 | * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have | |
692 | * converse problems: (b) is handled right, but (a) loses badly. CPPI RX | |
693 | * ignores SOP/EOP markings and processes both of those BDs; so both packets | |
694 | * are loaded into the buffer (with a 212 byte gap between them), and the next | |
695 | * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP | |
696 | * are intended as outputs for RX queues, not inputs...) | |
697 | * | |
698 | * - A variant of "transparent" mode -- one BD at a time -- is the only way to | |
699 | * reliably make both cases work, with software handling both cases correctly | |
700 | * and at the significant penalty of needing an IRQ per packet. (The lack of | |
701 | * I/O overlap can be slightly ameliorated by enabling double buffering.) | |
702 | * | |
703 | * So how to get rid of IRQ-per-packet? The transparent multi-BD case could | |
704 | * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK | |
705 | * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors | |
706 | * with guaranteed driver level fault recovery and scrubbing out what's left | |
707 | * of that garbaged datastream. | |
708 | * | |
709 | * But there seems to be no way to identify the cases where CPPI RNDIS mode | |
710 | * is appropriate -- which do NOT include RNDIS host drivers, but do include | |
711 | * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. | |
712 | * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic | |
713 | * that applies best on the peripheral side (and which could fail rudely). | |
714 | * | |
715 | * Leaving only "transparent" mode; we avoid multi-bd modes in almost all | |
716 | * cases other than mass storage class. Otherwise we're correct but slow, | |
717 | * since CPPI penalizes our need for a "true RNDIS" default mode. | |
718 | */ | |
719 | ||
720 | ||
721 | /* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY | |
722 | * | |
723 | * IFF | |
724 | * (a) peripheral mode ... since rndis peripherals could pad their | |
725 | * writes to hosts, causing i/o failure; or we'd have to cope with | |
726 | * a largely unknowable variety of host side protocol variants | |
727 | * (b) and short reads are NOT errors ... since full reads would | |
728 | * cause those same i/o failures | |
729 | * (c) and read length is | |
730 | * - less than 64KB (max per cppi descriptor) | |
731 | * - not a multiple of 4096 (g_zero default, full reads typical) | |
732 | * - N (>1) packets long, ditto (full reads not EXPECTED) | |
733 | * THEN | |
734 | * try rx rndis mode | |
735 | * | |
736 | * Cost of heuristic failing: RXDMA wedges at the end of transfers that | |
737 | * fill out the whole buffer. Buggy host side usb network drivers could | |
738 | * trigger that, but "in the field" such bugs seem to be all but unknown. | |
739 | * | |
740 | * So this module parameter lets the heuristic be disabled. When using | |
741 | * gadgetfs, the heuristic will probably need to be disabled. | |
742 | */ | |
743 | static int cppi_rx_rndis = 1; | |
744 | ||
745 | module_param(cppi_rx_rndis, bool, 0); | |
746 | MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); | |
747 | ||
748 | ||
749 | /** | |
750 | * cppi_next_rx_segment - dma read for the next chunk of a buffer | |
751 | * @musb: the controller | |
752 | * @rx: dma channel | |
753 | * @onepacket: true unless caller treats short reads as errors, and | |
754 | * performs fault recovery above usbcore. | |
755 | * Context: controller irqlocked | |
756 | * | |
757 | * See above notes about why we can't use multi-BD RX queues except in | |
758 | * rare cases (mass storage class), and can never use the hardware "rndis" | |
759 | * mode (since it's not a "true" RNDIS mode) with complete safety.. | |
760 | * | |
761 | * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in | |
762 | * code to recover from corrupted datastreams after each short transfer. | |
763 | */ | |
764 | static void | |
765 | cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) | |
766 | { | |
767 | unsigned maxpacket = rx->maxpacket; | |
768 | dma_addr_t addr = rx->buf_dma + rx->offset; | |
769 | size_t length = rx->buf_len - rx->offset; | |
770 | struct cppi_descriptor *bd, *tail; | |
771 | unsigned n_bds; | |
772 | unsigned i; | |
773 | void __iomem *tibase = musb->ctrl_base; | |
774 | int is_rndis = 0; | |
775 | struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; | |
776 | ||
777 | if (onepacket) { | |
778 | /* almost every USB driver, host or peripheral side */ | |
779 | n_bds = 1; | |
780 | ||
781 | /* maybe apply the heuristic above */ | |
782 | if (cppi_rx_rndis | |
783 | && is_peripheral_active(musb) | |
784 | && length > maxpacket | |
785 | && (length & ~0xffff) == 0 | |
786 | && (length & 0x0fff) != 0 | |
787 | && (length & (maxpacket - 1)) == 0) { | |
788 | maxpacket = length; | |
789 | is_rndis = 1; | |
790 | } | |
791 | } else { | |
792 | /* virtually nothing except mass storage class */ | |
793 | if (length > 0xffff) { | |
794 | n_bds = 0xffff / maxpacket; | |
795 | length = n_bds * maxpacket; | |
796 | } else { | |
797 | n_bds = length / maxpacket; | |
798 | if (length % maxpacket) | |
799 | n_bds++; | |
800 | } | |
801 | if (n_bds == 1) | |
802 | onepacket = 1; | |
803 | else | |
804 | n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); | |
805 | } | |
806 | ||
807 | /* In host mode, autorequest logic can generate some IN tokens; it's | |
808 | * tricky since we can't leave REQPKT set in RXCSR after the transfer | |
809 | * finishes. So: multipacket transfers involve two or more segments. | |
810 | * And always at least two IRQs ... RNDIS mode is not an option. | |
811 | */ | |
812 | if (is_host_active(musb)) | |
813 | n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); | |
814 | ||
815 | cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); | |
816 | ||
817 | length = min(n_bds * maxpacket, length); | |
818 | ||
819 | DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " | |
820 | "dma 0x%x len %u %u/%u\n", | |
821 | rx->index, maxpacket, | |
822 | onepacket | |
823 | ? (is_rndis ? "rndis" : "onepacket") | |
824 | : "multipacket", | |
825 | n_bds, | |
826 | musb_readl(tibase, | |
827 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | |
828 | & 0xffff, | |
829 | addr, length, rx->channel.actual_len, rx->buf_len); | |
830 | ||
831 | /* only queue one segment at a time, since the hardware prevents | |
832 | * correct queue shutdown after unexpected short packets | |
833 | */ | |
834 | bd = cppi_bd_alloc(rx); | |
835 | rx->head = bd; | |
836 | ||
837 | /* Build BDs for all packets in this segment */ | |
838 | for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { | |
839 | u32 bd_len; | |
840 | ||
841 | if (i) { | |
842 | bd = cppi_bd_alloc(rx); | |
843 | if (!bd) | |
844 | break; | |
845 | tail->next = bd; | |
846 | tail->hw_next = bd->dma; | |
847 | } | |
848 | bd->hw_next = 0; | |
849 | ||
850 | /* all but the last packet will be maxpacket size */ | |
851 | if (maxpacket < length) | |
852 | bd_len = maxpacket; | |
853 | else | |
854 | bd_len = length; | |
855 | ||
856 | bd->hw_bufp = addr; | |
857 | addr += bd_len; | |
858 | rx->offset += bd_len; | |
859 | ||
860 | bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; | |
861 | bd->buflen = bd_len; | |
862 | ||
863 | bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); | |
864 | length -= bd_len; | |
865 | } | |
866 | ||
867 | /* we always expect at least one reusable BD! */ | |
868 | if (!tail) { | |
869 | WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); | |
870 | return; | |
871 | } else if (i < n_bds) | |
872 | WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); | |
873 | ||
874 | tail->next = NULL; | |
875 | tail->hw_next = 0; | |
876 | ||
877 | bd = rx->head; | |
878 | rx->tail = tail; | |
879 | ||
880 | /* short reads and other faults should terminate this entire | |
881 | * dma segment. we want one "dma packet" per dma segment, not | |
882 | * one per USB packet, terminating the whole queue at once... | |
883 | * NOTE that current hardware seems to ignore SOP and EOP. | |
884 | */ | |
885 | bd->hw_options |= CPPI_SOP_SET; | |
886 | tail->hw_options |= CPPI_EOP_SET; | |
887 | ||
704a1485 HV |
888 | #ifdef CONFIG_USB_MUSB_DEBUG |
889 | if (_dbg_level(5)) { | |
550a7375 FB |
890 | struct cppi_descriptor *d; |
891 | ||
892 | for (d = rx->head; d; d = d->next) | |
893 | cppi_dump_rxbd("S", d); | |
894 | } | |
704a1485 | 895 | #endif |
550a7375 FB |
896 | |
897 | /* in case the preceding transfer left some state... */ | |
898 | tail = rx->last_processed; | |
899 | if (tail) { | |
900 | tail->next = bd; | |
901 | tail->hw_next = bd->dma; | |
902 | } | |
903 | ||
904 | core_rxirq_enable(tibase, rx->index + 1); | |
905 | ||
906 | /* BDs live in DMA-coherent memory, but writes might be pending */ | |
907 | cpu_drain_writebuffer(); | |
908 | ||
909 | /* REVISIT specs say to write this AFTER the BUFCNT register | |
910 | * below ... but that loses badly. | |
911 | */ | |
912 | musb_writel(&rx_ram->rx_head, 0, bd->dma); | |
913 | ||
914 | /* bufferCount must be at least 3, and zeroes on completion | |
915 | * unless it underflows below zero, or stops at two, or keeps | |
916 | * growing ... grr. | |
917 | */ | |
918 | i = musb_readl(tibase, | |
919 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | |
920 | & 0xffff; | |
921 | ||
922 | if (!i) | |
923 | musb_writel(tibase, | |
924 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), | |
925 | n_bds + 2); | |
926 | else if (n_bds > (i - 3)) | |
927 | musb_writel(tibase, | |
928 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), | |
929 | n_bds - (i - 3)); | |
930 | ||
931 | i = musb_readl(tibase, | |
932 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | |
933 | & 0xffff; | |
934 | if (i < (2 + n_bds)) { | |
935 | DBG(2, "bufcnt%d underrun - %d (for %d)\n", | |
936 | rx->index, i, n_bds); | |
937 | musb_writel(tibase, | |
938 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), | |
939 | n_bds + 2); | |
940 | } | |
941 | ||
942 | cppi_dump_rx(4, rx, "/S"); | |
943 | } | |
944 | ||
945 | /** | |
946 | * cppi_channel_program - program channel for data transfer | |
947 | * @ch: the channel | |
948 | * @maxpacket: max packet size | |
949 | * @mode: For RX, 1 unless the usb protocol driver promised to treat | |
950 | * all short reads as errors and kick in high level fault recovery. | |
951 | * For TX, ignored because of RNDIS mode races/glitches. | |
952 | * @dma_addr: dma address of buffer | |
953 | * @len: length of buffer | |
954 | * Context: controller irqlocked | |
955 | */ | |
956 | static int cppi_channel_program(struct dma_channel *ch, | |
957 | u16 maxpacket, u8 mode, | |
958 | dma_addr_t dma_addr, u32 len) | |
959 | { | |
960 | struct cppi_channel *cppi_ch; | |
961 | struct cppi *controller; | |
962 | struct musb *musb; | |
963 | ||
964 | cppi_ch = container_of(ch, struct cppi_channel, channel); | |
965 | controller = cppi_ch->controller; | |
966 | musb = controller->musb; | |
967 | ||
968 | switch (ch->status) { | |
969 | case MUSB_DMA_STATUS_BUS_ABORT: | |
970 | case MUSB_DMA_STATUS_CORE_ABORT: | |
971 | /* fault irq handler should have handled cleanup */ | |
972 | WARNING("%cX DMA%d not cleaned up after abort!\n", | |
973 | cppi_ch->transmit ? 'T' : 'R', | |
974 | cppi_ch->index); | |
975 | /* WARN_ON(1); */ | |
976 | break; | |
977 | case MUSB_DMA_STATUS_BUSY: | |
978 | WARNING("program active channel? %cX DMA%d\n", | |
979 | cppi_ch->transmit ? 'T' : 'R', | |
980 | cppi_ch->index); | |
981 | /* WARN_ON(1); */ | |
982 | break; | |
983 | case MUSB_DMA_STATUS_UNKNOWN: | |
984 | DBG(1, "%cX DMA%d not allocated!\n", | |
985 | cppi_ch->transmit ? 'T' : 'R', | |
986 | cppi_ch->index); | |
987 | /* FALLTHROUGH */ | |
988 | case MUSB_DMA_STATUS_FREE: | |
989 | break; | |
990 | } | |
991 | ||
992 | ch->status = MUSB_DMA_STATUS_BUSY; | |
993 | ||
994 | /* set transfer parameters, then queue up its first segment */ | |
995 | cppi_ch->buf_dma = dma_addr; | |
996 | cppi_ch->offset = 0; | |
997 | cppi_ch->maxpacket = maxpacket; | |
998 | cppi_ch->buf_len = len; | |
191b7766 | 999 | cppi_ch->channel.actual_len = 0; |
550a7375 FB |
1000 | |
1001 | /* TX channel? or RX? */ | |
1002 | if (cppi_ch->transmit) | |
1003 | cppi_next_tx_segment(musb, cppi_ch); | |
1004 | else | |
1005 | cppi_next_rx_segment(musb, cppi_ch, mode); | |
1006 | ||
1007 | return true; | |
1008 | } | |
1009 | ||
1010 | static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) | |
1011 | { | |
1012 | struct cppi_channel *rx = &cppi->rx[ch]; | |
1013 | struct cppi_rx_stateram __iomem *state = rx->state_ram; | |
1014 | struct cppi_descriptor *bd; | |
1015 | struct cppi_descriptor *last = rx->last_processed; | |
1016 | bool completed = false; | |
1017 | bool acked = false; | |
1018 | int i; | |
1019 | dma_addr_t safe2ack; | |
1020 | void __iomem *regs = rx->hw_ep->regs; | |
1021 | ||
1022 | cppi_dump_rx(6, rx, "/K"); | |
1023 | ||
1024 | bd = last ? last->next : rx->head; | |
1025 | if (!bd) | |
1026 | return false; | |
1027 | ||
1028 | /* run through all completed BDs */ | |
1029 | for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); | |
1030 | (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; | |
1031 | i++, bd = bd->next) { | |
1032 | u16 len; | |
1033 | ||
1034 | /* catch latest BD writes from CPPI */ | |
1035 | rmb(); | |
1036 | if (!completed && (bd->hw_options & CPPI_OWN_SET)) | |
1037 | break; | |
1038 | ||
1039 | DBG(5, "C/RXBD %08x: nxt %08x buf %08x " | |
1040 | "off.len %08x opt.len %08x (%d)\n", | |
1041 | bd->dma, bd->hw_next, bd->hw_bufp, | |
1042 | bd->hw_off_len, bd->hw_options, | |
1043 | rx->channel.actual_len); | |
1044 | ||
1045 | /* actual packet received length */ | |
1046 | if ((bd->hw_options & CPPI_SOP_SET) && !completed) | |
1047 | len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; | |
1048 | else | |
1049 | len = 0; | |
1050 | ||
1051 | if (bd->hw_options & CPPI_EOQ_MASK) | |
1052 | completed = true; | |
1053 | ||
1054 | if (!completed && len < bd->buflen) { | |
1055 | /* NOTE: when we get a short packet, RXCSR_H_REQPKT | |
1056 | * must have been cleared, and no more DMA packets may | |
1057 | * active be in the queue... TI docs didn't say, but | |
1058 | * CPPI ignores those BDs even though OWN is still set. | |
1059 | */ | |
1060 | completed = true; | |
1061 | DBG(3, "rx short %d/%d (%d)\n", | |
1062 | len, bd->buflen, | |
1063 | rx->channel.actual_len); | |
1064 | } | |
1065 | ||
1066 | /* If we got here, we expect to ack at least one BD; meanwhile | |
1067 | * CPPI may completing other BDs while we scan this list... | |
1068 | * | |
1069 | * RACE: we can notice OWN cleared before CPPI raises the | |
1070 | * matching irq by writing that BD as the completion pointer. | |
1071 | * In such cases, stop scanning and wait for the irq, avoiding | |
1072 | * lost acks and states where BD ownership is unclear. | |
1073 | */ | |
1074 | if (bd->dma == safe2ack) { | |
1075 | musb_writel(&state->rx_complete, 0, safe2ack); | |
1076 | safe2ack = musb_readl(&state->rx_complete, 0); | |
1077 | acked = true; | |
1078 | if (bd->dma == safe2ack) | |
1079 | safe2ack = 0; | |
1080 | } | |
1081 | ||
1082 | rx->channel.actual_len += len; | |
1083 | ||
1084 | cppi_bd_free(rx, last); | |
1085 | last = bd; | |
1086 | ||
1087 | /* stop scanning on end-of-segment */ | |
1088 | if (bd->hw_next == 0) | |
1089 | completed = true; | |
1090 | } | |
1091 | rx->last_processed = last; | |
1092 | ||
1093 | /* dma abort, lost ack, or ... */ | |
1094 | if (!acked && last) { | |
1095 | int csr; | |
1096 | ||
1097 | if (safe2ack == 0 || safe2ack == rx->last_processed->dma) | |
1098 | musb_writel(&state->rx_complete, 0, safe2ack); | |
1099 | if (safe2ack == 0) { | |
1100 | cppi_bd_free(rx, last); | |
1101 | rx->last_processed = NULL; | |
1102 | ||
1103 | /* if we land here on the host side, H_REQPKT will | |
1104 | * be clear and we need to restart the queue... | |
1105 | */ | |
1106 | WARN_ON(rx->head); | |
1107 | } | |
1108 | musb_ep_select(cppi->mregs, rx->index + 1); | |
1109 | csr = musb_readw(regs, MUSB_RXCSR); | |
1110 | if (csr & MUSB_RXCSR_DMAENAB) { | |
1111 | DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", | |
1112 | rx->index, | |
1113 | rx->head, rx->tail, | |
1114 | rx->last_processed | |
1115 | ? rx->last_processed->dma | |
1116 | : 0, | |
1117 | completed ? ", completed" : "", | |
1118 | csr); | |
1119 | cppi_dump_rxq(4, "/what?", rx); | |
1120 | } | |
1121 | } | |
1122 | if (!completed) { | |
1123 | int csr; | |
1124 | ||
1125 | rx->head = bd; | |
1126 | ||
1127 | /* REVISIT seems like "autoreq all but EOP" doesn't... | |
1128 | * setting it here "should" be racey, but seems to work | |
1129 | */ | |
1130 | csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); | |
1131 | if (is_host_active(cppi->musb) | |
1132 | && bd | |
1133 | && !(csr & MUSB_RXCSR_H_REQPKT)) { | |
1134 | csr |= MUSB_RXCSR_H_REQPKT; | |
1135 | musb_writew(regs, MUSB_RXCSR, | |
1136 | MUSB_RXCSR_H_WZC_BITS | csr); | |
1137 | csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); | |
1138 | } | |
1139 | } else { | |
1140 | rx->head = NULL; | |
1141 | rx->tail = NULL; | |
1142 | } | |
1143 | ||
1144 | cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); | |
1145 | return completed; | |
1146 | } | |
1147 | ||
1148 | void cppi_completion(struct musb *musb, u32 rx, u32 tx) | |
1149 | { | |
1150 | void __iomem *tibase; | |
1151 | int i, index; | |
1152 | struct cppi *cppi; | |
1153 | struct musb_hw_ep *hw_ep = NULL; | |
1154 | ||
1155 | cppi = container_of(musb->dma_controller, struct cppi, controller); | |
1156 | ||
1157 | tibase = musb->ctrl_base; | |
1158 | ||
1159 | /* process TX channels */ | |
1160 | for (index = 0; tx; tx = tx >> 1, index++) { | |
1161 | struct cppi_channel *tx_ch; | |
1162 | struct cppi_tx_stateram __iomem *tx_ram; | |
1163 | bool completed = false; | |
1164 | struct cppi_descriptor *bd; | |
1165 | ||
1166 | if (!(tx & 1)) | |
1167 | continue; | |
1168 | ||
1169 | tx_ch = cppi->tx + index; | |
1170 | tx_ram = tx_ch->state_ram; | |
1171 | ||
1172 | /* FIXME need a cppi_tx_scan() routine, which | |
1173 | * can also be called from abort code | |
1174 | */ | |
1175 | ||
1176 | cppi_dump_tx(5, tx_ch, "/E"); | |
1177 | ||
1178 | bd = tx_ch->head; | |
1179 | ||
1180 | if (NULL == bd) { | |
1181 | DBG(1, "null BD\n"); | |
1182 | continue; | |
1183 | } | |
1184 | ||
1185 | /* run through all completed BDs */ | |
1186 | for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; | |
1187 | i++, bd = bd->next) { | |
1188 | u16 len; | |
1189 | ||
1190 | /* catch latest BD writes from CPPI */ | |
1191 | rmb(); | |
1192 | if (bd->hw_options & CPPI_OWN_SET) | |
1193 | break; | |
1194 | ||
1195 | DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", | |
1196 | bd, bd->hw_next, bd->hw_bufp, | |
1197 | bd->hw_off_len, bd->hw_options); | |
1198 | ||
1199 | len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; | |
1200 | tx_ch->channel.actual_len += len; | |
1201 | ||
1202 | tx_ch->last_processed = bd; | |
1203 | ||
1204 | /* write completion register to acknowledge | |
1205 | * processing of completed BDs, and possibly | |
1206 | * release the IRQ; EOQ might not be set ... | |
1207 | * | |
1208 | * REVISIT use the same ack strategy as rx | |
1209 | * | |
1210 | * REVISIT have observed bit 18 set; huh?? | |
1211 | */ | |
1212 | /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ | |
1213 | musb_writel(&tx_ram->tx_complete, 0, bd->dma); | |
1214 | ||
1215 | /* stop scanning on end-of-segment */ | |
1216 | if (bd->hw_next == 0) | |
1217 | completed = true; | |
1218 | } | |
1219 | ||
1220 | /* on end of segment, maybe go to next one */ | |
1221 | if (completed) { | |
1222 | /* cppi_dump_tx(4, tx_ch, "/complete"); */ | |
1223 | ||
1224 | /* transfer more, or report completion */ | |
1225 | if (tx_ch->offset >= tx_ch->buf_len) { | |
1226 | tx_ch->head = NULL; | |
1227 | tx_ch->tail = NULL; | |
1228 | tx_ch->channel.status = MUSB_DMA_STATUS_FREE; | |
1229 | ||
1230 | hw_ep = tx_ch->hw_ep; | |
1231 | ||
c7bbc056 | 1232 | musb_dma_completion(musb, index + 1, 1); |
550a7375 FB |
1233 | |
1234 | } else { | |
1235 | /* Bigger transfer than we could fit in | |
1236 | * that first batch of descriptors... | |
1237 | */ | |
1238 | cppi_next_tx_segment(musb, tx_ch); | |
1239 | } | |
1240 | } else | |
1241 | tx_ch->head = bd; | |
1242 | } | |
1243 | ||
1244 | /* Start processing the RX block */ | |
1245 | for (index = 0; rx; rx = rx >> 1, index++) { | |
1246 | ||
1247 | if (rx & 1) { | |
1248 | struct cppi_channel *rx_ch; | |
1249 | ||
1250 | rx_ch = cppi->rx + index; | |
1251 | ||
1252 | /* let incomplete dma segments finish */ | |
1253 | if (!cppi_rx_scan(cppi, index)) | |
1254 | continue; | |
1255 | ||
1256 | /* start another dma segment if needed */ | |
1257 | if (rx_ch->channel.actual_len != rx_ch->buf_len | |
1258 | && rx_ch->channel.actual_len | |
1259 | == rx_ch->offset) { | |
1260 | cppi_next_rx_segment(musb, rx_ch, 1); | |
1261 | continue; | |
1262 | } | |
1263 | ||
1264 | /* all segments completed! */ | |
1265 | rx_ch->channel.status = MUSB_DMA_STATUS_FREE; | |
1266 | ||
1267 | hw_ep = rx_ch->hw_ep; | |
1268 | ||
1269 | core_rxirq_disable(tibase, index + 1); | |
1270 | musb_dma_completion(musb, index + 1, 0); | |
1271 | } | |
1272 | } | |
1273 | ||
1274 | /* write to CPPI EOI register to re-enable interrupts */ | |
1275 | musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); | |
1276 | } | |
1277 | ||
1278 | /* Instantiate a software object representing a DMA controller. */ | |
1279 | struct dma_controller *__init | |
1280 | dma_controller_create(struct musb *musb, void __iomem *mregs) | |
1281 | { | |
1282 | struct cppi *controller; | |
1283 | ||
1284 | controller = kzalloc(sizeof *controller, GFP_KERNEL); | |
1285 | if (!controller) | |
1286 | return NULL; | |
1287 | ||
1288 | controller->mregs = mregs; | |
1289 | controller->tibase = mregs - DAVINCI_BASE_OFFSET; | |
1290 | ||
1291 | controller->musb = musb; | |
1292 | controller->controller.start = cppi_controller_start; | |
1293 | controller->controller.stop = cppi_controller_stop; | |
1294 | controller->controller.channel_alloc = cppi_channel_allocate; | |
1295 | controller->controller.channel_release = cppi_channel_release; | |
1296 | controller->controller.channel_program = cppi_channel_program; | |
1297 | controller->controller.channel_abort = cppi_channel_abort; | |
1298 | ||
1299 | /* NOTE: allocating from on-chip SRAM would give the least | |
1300 | * contention for memory access, if that ever matters here. | |
1301 | */ | |
1302 | ||
1303 | /* setup BufferPool */ | |
1304 | controller->pool = dma_pool_create("cppi", | |
1305 | controller->musb->controller, | |
1306 | sizeof(struct cppi_descriptor), | |
1307 | CPPI_DESCRIPTOR_ALIGN, 0); | |
1308 | if (!controller->pool) { | |
1309 | kfree(controller); | |
1310 | return NULL; | |
1311 | } | |
1312 | ||
1313 | return &controller->controller; | |
1314 | } | |
1315 | ||
1316 | /* | |
1317 | * Destroy a previously-instantiated DMA controller. | |
1318 | */ | |
1319 | void dma_controller_destroy(struct dma_controller *c) | |
1320 | { | |
1321 | struct cppi *cppi; | |
1322 | ||
1323 | cppi = container_of(c, struct cppi, controller); | |
1324 | ||
1325 | /* assert: caller stopped the controller first */ | |
1326 | dma_pool_destroy(cppi->pool); | |
1327 | ||
1328 | kfree(cppi); | |
1329 | } | |
1330 | ||
1331 | /* | |
1332 | * Context: controller irqlocked, endpoint selected | |
1333 | */ | |
1334 | static int cppi_channel_abort(struct dma_channel *channel) | |
1335 | { | |
1336 | struct cppi_channel *cppi_ch; | |
1337 | struct cppi *controller; | |
1338 | void __iomem *mbase; | |
1339 | void __iomem *tibase; | |
1340 | void __iomem *regs; | |
1341 | u32 value; | |
1342 | struct cppi_descriptor *queue; | |
1343 | ||
1344 | cppi_ch = container_of(channel, struct cppi_channel, channel); | |
1345 | ||
1346 | controller = cppi_ch->controller; | |
1347 | ||
1348 | switch (channel->status) { | |
1349 | case MUSB_DMA_STATUS_BUS_ABORT: | |
1350 | case MUSB_DMA_STATUS_CORE_ABORT: | |
1351 | /* from RX or TX fault irq handler */ | |
1352 | case MUSB_DMA_STATUS_BUSY: | |
1353 | /* the hardware needs shutting down */ | |
1354 | regs = cppi_ch->hw_ep->regs; | |
1355 | break; | |
1356 | case MUSB_DMA_STATUS_UNKNOWN: | |
1357 | case MUSB_DMA_STATUS_FREE: | |
1358 | return 0; | |
1359 | default: | |
1360 | return -EINVAL; | |
1361 | } | |
1362 | ||
1363 | if (!cppi_ch->transmit && cppi_ch->head) | |
1364 | cppi_dump_rxq(3, "/abort", cppi_ch); | |
1365 | ||
1366 | mbase = controller->mregs; | |
1367 | tibase = controller->tibase; | |
1368 | ||
1369 | queue = cppi_ch->head; | |
1370 | cppi_ch->head = NULL; | |
1371 | cppi_ch->tail = NULL; | |
1372 | ||
1373 | /* REVISIT should rely on caller having done this, | |
1374 | * and caller should rely on us not changing it. | |
1375 | * peripheral code is safe ... check host too. | |
1376 | */ | |
1377 | musb_ep_select(mbase, cppi_ch->index + 1); | |
1378 | ||
1379 | if (cppi_ch->transmit) { | |
1380 | struct cppi_tx_stateram __iomem *tx_ram; | |
1381 | int enabled; | |
1382 | ||
1383 | /* mask interrupts raised to signal teardown complete. */ | |
1384 | enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) | |
1385 | & (1 << cppi_ch->index); | |
1386 | if (enabled) | |
1387 | musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, | |
1388 | (1 << cppi_ch->index)); | |
1389 | ||
1390 | /* REVISIT put timeouts on these controller handshakes */ | |
1391 | ||
1392 | cppi_dump_tx(6, cppi_ch, " (teardown)"); | |
1393 | ||
1394 | /* teardown DMA engine then usb core */ | |
1395 | do { | |
1396 | value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); | |
1397 | } while (!(value & CPPI_TEAR_READY)); | |
1398 | musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); | |
1399 | ||
1400 | tx_ram = cppi_ch->state_ram; | |
1401 | do { | |
1402 | value = musb_readl(&tx_ram->tx_complete, 0); | |
1403 | } while (0xFFFFFFFC != value); | |
1404 | musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC); | |
1405 | ||
1406 | /* FIXME clean up the transfer state ... here? | |
1407 | * the completion routine should get called with | |
1408 | * an appropriate status code. | |
1409 | */ | |
1410 | ||
1411 | value = musb_readw(regs, MUSB_TXCSR); | |
1412 | value &= ~MUSB_TXCSR_DMAENAB; | |
1413 | value |= MUSB_TXCSR_FLUSHFIFO; | |
1414 | musb_writew(regs, MUSB_TXCSR, value); | |
1415 | musb_writew(regs, MUSB_TXCSR, value); | |
1416 | ||
1417 | /* re-enable interrupt */ | |
1418 | if (enabled) | |
1419 | musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, | |
1420 | (1 << cppi_ch->index)); | |
1421 | ||
1422 | /* While we scrub the TX state RAM, ensure that we clean | |
1423 | * up any interrupt that's currently asserted: | |
1424 | * 1. Write to completion Ptr value 0x1(bit 0 set) | |
1425 | * (write back mode) | |
1426 | * 2. Write to completion Ptr value 0x0(bit 0 cleared) | |
1427 | * (compare mode) | |
1428 | * Value written is compared(for bits 31:2) and when | |
1429 | * equal, interrupt is deasserted. | |
1430 | */ | |
1431 | cppi_reset_tx(tx_ram, 1); | |
1432 | musb_writel(&tx_ram->tx_complete, 0, 0); | |
1433 | ||
1434 | cppi_dump_tx(5, cppi_ch, " (done teardown)"); | |
1435 | ||
1436 | /* REVISIT tx side _should_ clean up the same way | |
1437 | * as the RX side ... this does no cleanup at all! | |
1438 | */ | |
1439 | ||
1440 | } else /* RX */ { | |
1441 | u16 csr; | |
1442 | ||
1443 | /* NOTE: docs don't guarantee any of this works ... we | |
1444 | * expect that if the usb core stops telling the cppi core | |
1445 | * to pull more data from it, then it'll be safe to flush | |
1446 | * current RX DMA state iff any pending fifo transfer is done. | |
1447 | */ | |
1448 | ||
1449 | core_rxirq_disable(tibase, cppi_ch->index + 1); | |
1450 | ||
1451 | /* for host, ensure ReqPkt is never set again */ | |
1452 | if (is_host_active(cppi_ch->controller->musb)) { | |
1453 | value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); | |
1454 | value &= ~((0x3) << (cppi_ch->index * 2)); | |
1455 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); | |
1456 | } | |
1457 | ||
1458 | csr = musb_readw(regs, MUSB_RXCSR); | |
1459 | ||
1460 | /* for host, clear (just) ReqPkt at end of current packet(s) */ | |
1461 | if (is_host_active(cppi_ch->controller->musb)) { | |
1462 | csr |= MUSB_RXCSR_H_WZC_BITS; | |
1463 | csr &= ~MUSB_RXCSR_H_REQPKT; | |
1464 | } else | |
1465 | csr |= MUSB_RXCSR_P_WZC_BITS; | |
1466 | ||
1467 | /* clear dma enable */ | |
1468 | csr &= ~(MUSB_RXCSR_DMAENAB); | |
1469 | musb_writew(regs, MUSB_RXCSR, csr); | |
1470 | csr = musb_readw(regs, MUSB_RXCSR); | |
1471 | ||
1472 | /* Quiesce: wait for current dma to finish (if not cleanup). | |
1473 | * We can't use bit zero of stateram->rx_sop, since that | |
1474 | * refers to an entire "DMA packet" not just emptying the | |
1475 | * current fifo. Most segments need multiple usb packets. | |
1476 | */ | |
1477 | if (channel->status == MUSB_DMA_STATUS_BUSY) | |
1478 | udelay(50); | |
1479 | ||
1480 | /* scan the current list, reporting any data that was | |
1481 | * transferred and acking any IRQ | |
1482 | */ | |
1483 | cppi_rx_scan(controller, cppi_ch->index); | |
1484 | ||
1485 | /* clobber the existing state once it's idle | |
1486 | * | |
1487 | * NOTE: arguably, we should also wait for all the other | |
1488 | * RX channels to quiesce (how??) and then temporarily | |
1489 | * disable RXCPPI_CTRL_REG ... but it seems that we can | |
1490 | * rely on the controller restarting from state ram, with | |
1491 | * only RXCPPI_BUFCNT state being bogus. BUFCNT will | |
1492 | * correct itself after the next DMA transfer though. | |
1493 | * | |
1494 | * REVISIT does using rndis mode change that? | |
1495 | */ | |
1496 | cppi_reset_rx(cppi_ch->state_ram); | |
1497 | ||
1498 | /* next DMA request _should_ load cppi head ptr */ | |
1499 | ||
1500 | /* ... we don't "free" that list, only mutate it in place. */ | |
1501 | cppi_dump_rx(5, cppi_ch, " (done abort)"); | |
1502 | ||
1503 | /* clean up previously pending bds */ | |
1504 | cppi_bd_free(cppi_ch, cppi_ch->last_processed); | |
1505 | cppi_ch->last_processed = NULL; | |
1506 | ||
1507 | while (queue) { | |
1508 | struct cppi_descriptor *tmp = queue->next; | |
1509 | ||
1510 | cppi_bd_free(cppi_ch, queue); | |
1511 | queue = tmp; | |
1512 | } | |
1513 | } | |
1514 | ||
1515 | channel->status = MUSB_DMA_STATUS_FREE; | |
1516 | cppi_ch->buf_dma = 0; | |
1517 | cppi_ch->offset = 0; | |
1518 | cppi_ch->buf_len = 0; | |
1519 | cppi_ch->maxpacket = 0; | |
1520 | return 0; | |
1521 | } | |
1522 | ||
1523 | /* TBD Queries: | |
1524 | * | |
1525 | * Power Management ... probably turn off cppi during suspend, restart; | |
1526 | * check state ram? Clocking is presumably shared with usb core. | |
1527 | */ |