usb: musb: musb_host: Enable ISOCH IN handling for AM335x host
[deliverable/linux.git] / drivers / usb / musb / musb_cppi41.c
CommitLineData
9b3452d1
SAS
1#include <linux/device.h>
2#include <linux/dma-mapping.h>
3#include <linux/dmaengine.h>
4#include <linux/sizes.h>
5#include <linux/platform_device.h>
6#include <linux/of.h>
7
8#include "musb_core.h"
9
10#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
11
12#define EP_MODE_AUTOREG_NONE 0
13#define EP_MODE_AUTOREG_ALL_NEOP 1
14#define EP_MODE_AUTOREG_ALWAYS 3
15
16#define EP_MODE_DMA_TRANSPARENT 0
17#define EP_MODE_DMA_RNDIS 1
18#define EP_MODE_DMA_GEN_RNDIS 3
19
20#define USB_CTRL_TX_MODE 0x70
21#define USB_CTRL_RX_MODE 0x74
22#define USB_CTRL_AUTOREQ 0xd0
23#define USB_TDOWN 0xd8
24
25struct cppi41_dma_channel {
26 struct dma_channel channel;
27 struct cppi41_dma_controller *controller;
28 struct musb_hw_ep *hw_ep;
29 struct dma_chan *dc;
30 dma_cookie_t cookie;
31 u8 port_num;
32 u8 is_tx;
33 u8 is_allocated;
34 u8 usb_toggle;
35
36 dma_addr_t buf_addr;
37 u32 total_len;
38 u32 prog_len;
39 u32 transferred;
40 u32 packet_sz;
a655f481 41 struct list_head tx_check;
9b3452d1
SAS
42};
43
44#define MUSB_DMA_NUM_CHANNELS 15
45
46struct cppi41_dma_controller {
47 struct dma_controller controller;
48 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
49 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
50 struct musb *musb;
a655f481
SAS
51 struct hrtimer early_tx;
52 struct list_head early_tx_list;
9b3452d1
SAS
53 u32 rx_mode;
54 u32 tx_mode;
55 u32 auto_req;
56};
57
58static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
59{
60 u16 csr;
61 u8 toggle;
62
63 if (cppi41_channel->is_tx)
64 return;
65 if (!is_host_active(cppi41_channel->controller->musb))
66 return;
67
68 csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
69 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
70
71 cppi41_channel->usb_toggle = toggle;
72}
73
74static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
75{
76 u16 csr;
77 u8 toggle;
78
79 if (cppi41_channel->is_tx)
80 return;
81 if (!is_host_active(cppi41_channel->controller->musb))
82 return;
83
84 csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
85 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
86
87 /*
88 * AM335x Advisory 1.0.13: Due to internal synchronisation error the
89 * data toggle may reset from DATA1 to DATA0 during receiving data from
90 * more than one endpoint.
91 */
92 if (!toggle && toggle == cppi41_channel->usb_toggle) {
93 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
94 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
95 dev_dbg(cppi41_channel->controller->musb->controller,
96 "Restoring DATA1 toggle.\n");
97 }
98
99 cppi41_channel->usb_toggle = toggle;
100}
101
a655f481
SAS
102static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
103{
104 u8 epnum = hw_ep->epnum;
105 struct musb *musb = hw_ep->musb;
106 void __iomem *epio = musb->endpoints[epnum].regs;
107 u16 csr;
108
109 csr = musb_readw(epio, MUSB_TXCSR);
110 if (csr & MUSB_TXCSR_TXPKTRDY)
111 return false;
112 return true;
113}
114
d373a853
SAS
115static void cppi41_dma_callback(void *private_data);
116
a655f481 117static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
9b3452d1 118{
9b3452d1
SAS
119 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
120 struct musb *musb = hw_ep->musb;
9b3452d1 121
d373a853 122 if (!cppi41_channel->prog_len) {
9b3452d1
SAS
123
124 /* done, complete */
125 cppi41_channel->channel.actual_len =
126 cppi41_channel->transferred;
127 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
128 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
129 } else {
130 /* next iteration, reload */
131 struct dma_chan *dc = cppi41_channel->dc;
132 struct dma_async_tx_descriptor *dma_desc;
133 enum dma_transfer_direction direction;
134 u16 csr;
135 u32 remain_bytes;
136 void __iomem *epio = cppi41_channel->hw_ep->regs;
137
138 cppi41_channel->buf_addr += cppi41_channel->packet_sz;
139
140 remain_bytes = cppi41_channel->total_len;
141 remain_bytes -= cppi41_channel->transferred;
142 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
143 cppi41_channel->prog_len = remain_bytes;
144
145 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
146 : DMA_DEV_TO_MEM;
147 dma_desc = dmaengine_prep_slave_single(dc,
148 cppi41_channel->buf_addr,
149 remain_bytes,
150 direction,
151 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
d373a853 152 if (WARN_ON(!dma_desc))
9b3452d1
SAS
153 return;
154
155 dma_desc->callback = cppi41_dma_callback;
a655f481 156 dma_desc->callback_param = &cppi41_channel->channel;
9b3452d1
SAS
157 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
158 dma_async_issue_pending(dc);
159
160 if (!cppi41_channel->is_tx) {
161 csr = musb_readw(epio, MUSB_RXCSR);
162 csr |= MUSB_RXCSR_H_REQPKT;
163 musb_writew(epio, MUSB_RXCSR, csr);
164 }
165 }
d373a853
SAS
166}
167
a655f481
SAS
168static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
169{
170 struct cppi41_dma_controller *controller;
171 struct cppi41_dma_channel *cppi41_channel, *n;
172 struct musb *musb;
173 unsigned long flags;
174 enum hrtimer_restart ret = HRTIMER_NORESTART;
175
176 controller = container_of(timer, struct cppi41_dma_controller,
177 early_tx);
178 musb = controller->musb;
179
180 spin_lock_irqsave(&musb->lock, flags);
181 list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
182 tx_check) {
183 bool empty;
184 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
185
186 empty = musb_is_tx_fifo_empty(hw_ep);
187 if (empty) {
188 list_del_init(&cppi41_channel->tx_check);
189 cppi41_trans_done(cppi41_channel);
190 }
191 }
192
193 if (!list_empty(&controller->early_tx_list)) {
194 ret = HRTIMER_RESTART;
195 hrtimer_forward_now(&controller->early_tx,
196 ktime_set(0, 150 * NSEC_PER_USEC));
197 }
198
199 spin_unlock_irqrestore(&musb->lock, flags);
200 return ret;
201}
202
d373a853
SAS
203static void cppi41_dma_callback(void *private_data)
204{
205 struct dma_channel *channel = private_data;
206 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
207 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
208 struct musb *musb = hw_ep->musb;
209 unsigned long flags;
210 struct dma_tx_state txstate;
211 u32 transferred;
a655f481 212 bool empty;
d373a853
SAS
213
214 spin_lock_irqsave(&musb->lock, flags);
215
216 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
217 &txstate);
218 transferred = cppi41_channel->prog_len - txstate.residue;
219 cppi41_channel->transferred += transferred;
220
221 dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
222 hw_ep->epnum, cppi41_channel->transferred,
223 cppi41_channel->total_len);
224
225 update_rx_toggle(cppi41_channel);
226
227 if (cppi41_channel->transferred == cppi41_channel->total_len ||
228 transferred < cppi41_channel->packet_sz)
229 cppi41_channel->prog_len = 0;
230
a655f481
SAS
231 empty = musb_is_tx_fifo_empty(hw_ep);
232 if (empty) {
233 cppi41_trans_done(cppi41_channel);
234 } else {
235 struct cppi41_dma_controller *controller;
236 /*
237 * On AM335x it has been observed that the TX interrupt fires
238 * too early that means the TXFIFO is not yet empty but the DMA
239 * engine says that it is done with the transfer. We don't
240 * receive a FIFO empty interrupt so the only thing we can do is
241 * to poll for the bit. On HS it usually takes 2us, on FS around
242 * 110us - 150us depending on the transfer size.
243 * We spin on HS (no longer than than 25us and setup a timer on
244 * FS to check for the bit and complete the transfer.
245 */
246 controller = cppi41_channel->controller;
247
248 if (musb->g.speed == USB_SPEED_HIGH) {
249 unsigned wait = 25;
250
251 do {
252 empty = musb_is_tx_fifo_empty(hw_ep);
253 if (empty)
254 break;
255 wait--;
256 if (!wait)
257 break;
258 udelay(1);
259 } while (1);
260
261 empty = musb_is_tx_fifo_empty(hw_ep);
262 if (empty) {
263 cppi41_trans_done(cppi41_channel);
264 goto out;
265 }
266 }
267 list_add_tail(&cppi41_channel->tx_check,
268 &controller->early_tx_list);
269 if (!hrtimer_active(&controller->early_tx)) {
270 hrtimer_start_range_ns(&controller->early_tx,
271 ktime_set(0, 140 * NSEC_PER_USEC),
272 40 * NSEC_PER_USEC,
273 HRTIMER_MODE_REL);
274 }
275 }
276out:
9b3452d1
SAS
277 spin_unlock_irqrestore(&musb->lock, flags);
278}
279
280static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
281{
282 unsigned shift;
283
284 shift = (ep - 1) * 2;
285 old &= ~(3 << shift);
286 old |= mode << shift;
287 return old;
288}
289
290static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
291 unsigned mode)
292{
293 struct cppi41_dma_controller *controller = cppi41_channel->controller;
294 u32 port;
295 u32 new_mode;
296 u32 old_mode;
297
298 if (cppi41_channel->is_tx)
299 old_mode = controller->tx_mode;
300 else
301 old_mode = controller->rx_mode;
302 port = cppi41_channel->port_num;
303 new_mode = update_ep_mode(port, mode, old_mode);
304
305 if (new_mode == old_mode)
306 return;
307 if (cppi41_channel->is_tx) {
308 controller->tx_mode = new_mode;
309 musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
310 new_mode);
311 } else {
312 controller->rx_mode = new_mode;
313 musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
314 new_mode);
315 }
316}
317
318static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
319 unsigned mode)
320{
321 struct cppi41_dma_controller *controller = cppi41_channel->controller;
322 u32 port;
323 u32 new_mode;
324 u32 old_mode;
325
326 old_mode = controller->auto_req;
327 port = cppi41_channel->port_num;
328 new_mode = update_ep_mode(port, mode, old_mode);
329
330 if (new_mode == old_mode)
331 return;
332 controller->auto_req = new_mode;
333 musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
334}
335
336static bool cppi41_configure_channel(struct dma_channel *channel,
337 u16 packet_sz, u8 mode,
338 dma_addr_t dma_addr, u32 len)
339{
340 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
341 struct dma_chan *dc = cppi41_channel->dc;
342 struct dma_async_tx_descriptor *dma_desc;
343 enum dma_transfer_direction direction;
344 struct musb *musb = cppi41_channel->controller->musb;
345 unsigned use_gen_rndis = 0;
346
347 dev_dbg(musb->controller,
348 "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
349 cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
350 packet_sz, mode, (unsigned long long) dma_addr,
351 len, cppi41_channel->is_tx);
352
353 cppi41_channel->buf_addr = dma_addr;
354 cppi41_channel->total_len = len;
355 cppi41_channel->transferred = 0;
356 cppi41_channel->packet_sz = packet_sz;
357
358 /*
359 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
360 * than max packet size at a time.
361 */
362 if (cppi41_channel->is_tx)
363 use_gen_rndis = 1;
364
365 if (use_gen_rndis) {
366 /* RNDIS mode */
367 if (len > packet_sz) {
368 musb_writel(musb->ctrl_base,
369 RNDIS_REG(cppi41_channel->port_num), len);
370 /* gen rndis */
371 cppi41_set_dma_mode(cppi41_channel,
372 EP_MODE_DMA_GEN_RNDIS);
373
374 /* auto req */
375 cppi41_set_autoreq_mode(cppi41_channel,
376 EP_MODE_AUTOREG_ALL_NEOP);
377 } else {
378 musb_writel(musb->ctrl_base,
379 RNDIS_REG(cppi41_channel->port_num), 0);
380 cppi41_set_dma_mode(cppi41_channel,
381 EP_MODE_DMA_TRANSPARENT);
382 cppi41_set_autoreq_mode(cppi41_channel,
383 EP_MODE_AUTOREG_NONE);
384 }
385 } else {
386 /* fallback mode */
387 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
388 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
389 len = min_t(u32, packet_sz, len);
390 }
391 cppi41_channel->prog_len = len;
392 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
393 dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
394 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
395 if (!dma_desc)
396 return false;
397
398 dma_desc->callback = cppi41_dma_callback;
399 dma_desc->callback_param = channel;
400 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
401
402 save_rx_toggle(cppi41_channel);
403 dma_async_issue_pending(dc);
404 return true;
405}
406
407static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
408 struct musb_hw_ep *hw_ep, u8 is_tx)
409{
410 struct cppi41_dma_controller *controller = container_of(c,
411 struct cppi41_dma_controller, controller);
412 struct cppi41_dma_channel *cppi41_channel = NULL;
413 u8 ch_num = hw_ep->epnum - 1;
414
415 if (ch_num >= MUSB_DMA_NUM_CHANNELS)
416 return NULL;
417
418 if (is_tx)
419 cppi41_channel = &controller->tx_channel[ch_num];
420 else
421 cppi41_channel = &controller->rx_channel[ch_num];
422
423 if (!cppi41_channel->dc)
424 return NULL;
425
426 if (cppi41_channel->is_allocated)
427 return NULL;
428
429 cppi41_channel->hw_ep = hw_ep;
430 cppi41_channel->is_allocated = 1;
431
432 return &cppi41_channel->channel;
433}
434
435static void cppi41_dma_channel_release(struct dma_channel *channel)
436{
437 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
438
439 if (cppi41_channel->is_allocated) {
440 cppi41_channel->is_allocated = 0;
441 channel->status = MUSB_DMA_STATUS_FREE;
442 channel->actual_len = 0;
443 }
444}
445
446static int cppi41_dma_channel_program(struct dma_channel *channel,
447 u16 packet_sz, u8 mode,
448 dma_addr_t dma_addr, u32 len)
449{
450 int ret;
451
452 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
453 channel->status == MUSB_DMA_STATUS_BUSY);
454
455 channel->status = MUSB_DMA_STATUS_BUSY;
456 channel->actual_len = 0;
457 ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
458 if (!ret)
459 channel->status = MUSB_DMA_STATUS_FREE;
460
461 return ret;
462}
463
464static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
465 void *buf, u32 length)
466{
467 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
468 struct cppi41_dma_controller *controller = cppi41_channel->controller;
469 struct musb *musb = controller->musb;
470
471 if (is_host_active(musb)) {
472 WARN_ON(1);
473 return 1;
474 }
a655f481
SAS
475 if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
476 return 0;
13266fea
SAS
477 if (cppi41_channel->is_tx)
478 return 1;
479 /* AM335x Advisory 1.0.13. No workaround for device RX mode */
9b3452d1
SAS
480 return 0;
481}
482
483static int cppi41_dma_channel_abort(struct dma_channel *channel)
484{
485 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
486 struct cppi41_dma_controller *controller = cppi41_channel->controller;
487 struct musb *musb = controller->musb;
488 void __iomem *epio = cppi41_channel->hw_ep->regs;
489 int tdbit;
490 int ret;
491 unsigned is_tx;
492 u16 csr;
493
494 is_tx = cppi41_channel->is_tx;
495 dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
496 cppi41_channel->port_num, is_tx);
497
498 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
499 return 0;
500
a655f481 501 list_del_init(&cppi41_channel->tx_check);
9b3452d1
SAS
502 if (is_tx) {
503 csr = musb_readw(epio, MUSB_TXCSR);
504 csr &= ~MUSB_TXCSR_DMAENAB;
505 musb_writew(epio, MUSB_TXCSR, csr);
506 } else {
507 csr = musb_readw(epio, MUSB_RXCSR);
508 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
509 musb_writew(epio, MUSB_RXCSR, csr);
510
511 csr = musb_readw(epio, MUSB_RXCSR);
512 if (csr & MUSB_RXCSR_RXPKTRDY) {
513 csr |= MUSB_RXCSR_FLUSHFIFO;
514 musb_writew(epio, MUSB_RXCSR, csr);
515 musb_writew(epio, MUSB_RXCSR, csr);
516 }
517 }
518
519 tdbit = 1 << cppi41_channel->port_num;
520 if (is_tx)
521 tdbit <<= 16;
522
523 do {
524 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
525 ret = dmaengine_terminate_all(cppi41_channel->dc);
526 } while (ret == -EAGAIN);
527
528 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
529
530 if (is_tx) {
531 csr = musb_readw(epio, MUSB_TXCSR);
532 if (csr & MUSB_TXCSR_TXPKTRDY) {
533 csr |= MUSB_TXCSR_FLUSHFIFO;
534 musb_writew(epio, MUSB_TXCSR, csr);
535 }
536 }
537
538 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
539 return 0;
540}
541
542static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
543{
544 struct dma_chan *dc;
545 int i;
546
547 for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
548 dc = ctrl->tx_channel[i].dc;
549 if (dc)
550 dma_release_channel(dc);
551 dc = ctrl->rx_channel[i].dc;
552 if (dc)
553 dma_release_channel(dc);
554 }
555}
556
557static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
558{
559 cppi41_release_all_dma_chans(controller);
560}
561
562static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
563{
564 struct musb *musb = controller->musb;
565 struct device *dev = musb->controller;
566 struct device_node *np = dev->of_node;
567 struct cppi41_dma_channel *cppi41_channel;
568 int count;
569 int i;
570 int ret;
571
572 count = of_property_count_strings(np, "dma-names");
573 if (count < 0)
574 return count;
575
576 for (i = 0; i < count; i++) {
577 struct dma_chan *dc;
578 struct dma_channel *musb_dma;
579 const char *str;
580 unsigned is_tx;
581 unsigned int port;
582
583 ret = of_property_read_string_index(np, "dma-names", i, &str);
584 if (ret)
585 goto err;
586 if (!strncmp(str, "tx", 2))
587 is_tx = 1;
588 else if (!strncmp(str, "rx", 2))
589 is_tx = 0;
590 else {
591 dev_err(dev, "Wrong dmatype %s\n", str);
592 goto err;
593 }
594 ret = kstrtouint(str + 2, 0, &port);
595 if (ret)
596 goto err;
597
48054147 598 ret = -EINVAL;
9b3452d1
SAS
599 if (port > MUSB_DMA_NUM_CHANNELS || !port)
600 goto err;
601 if (is_tx)
602 cppi41_channel = &controller->tx_channel[port - 1];
603 else
604 cppi41_channel = &controller->rx_channel[port - 1];
605
606 cppi41_channel->controller = controller;
607 cppi41_channel->port_num = port;
608 cppi41_channel->is_tx = is_tx;
a655f481 609 INIT_LIST_HEAD(&cppi41_channel->tx_check);
9b3452d1
SAS
610
611 musb_dma = &cppi41_channel->channel;
612 musb_dma->private_data = cppi41_channel;
613 musb_dma->status = MUSB_DMA_STATUS_FREE;
614 musb_dma->max_len = SZ_4M;
615
616 dc = dma_request_slave_channel(dev, str);
617 if (!dc) {
5ae477b0 618 dev_err(dev, "Failed to request %s.\n", str);
48054147 619 ret = -EPROBE_DEFER;
9b3452d1
SAS
620 goto err;
621 }
622 cppi41_channel->dc = dc;
623 }
624 return 0;
625err:
626 cppi41_release_all_dma_chans(controller);
48054147 627 return ret;
9b3452d1
SAS
628}
629
630void dma_controller_destroy(struct dma_controller *c)
631{
632 struct cppi41_dma_controller *controller = container_of(c,
633 struct cppi41_dma_controller, controller);
634
a655f481 635 hrtimer_cancel(&controller->early_tx);
9b3452d1
SAS
636 cppi41_dma_controller_stop(controller);
637 kfree(controller);
638}
639
640struct dma_controller *dma_controller_create(struct musb *musb,
641 void __iomem *base)
642{
643 struct cppi41_dma_controller *controller;
48054147 644 int ret = 0;
9b3452d1
SAS
645
646 if (!musb->controller->of_node) {
647 dev_err(musb->controller, "Need DT for the DMA engine.\n");
648 return NULL;
649 }
650
651 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
652 if (!controller)
653 goto kzalloc_fail;
654
a655f481
SAS
655 hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
656 controller->early_tx.function = cppi41_recheck_tx_req;
657 INIT_LIST_HEAD(&controller->early_tx_list);
9b3452d1
SAS
658 controller->musb = musb;
659
660 controller->controller.channel_alloc = cppi41_dma_channel_allocate;
661 controller->controller.channel_release = cppi41_dma_channel_release;
662 controller->controller.channel_program = cppi41_dma_channel_program;
663 controller->controller.channel_abort = cppi41_dma_channel_abort;
664 controller->controller.is_compatible = cppi41_is_compatible;
665
666 ret = cppi41_dma_controller_start(controller);
667 if (ret)
668 goto plat_get_fail;
669 return &controller->controller;
670
671plat_get_fail:
672 kfree(controller);
673kzalloc_fail:
48054147
SAS
674 if (ret == -EPROBE_DEFER)
675 return ERR_PTR(ret);
9b3452d1
SAS
676 return NULL;
677}
This page took 0.086384 seconds and 5 git commands to generate.