Merge remote-tracking branch 'spi/topic/rspi' into spi-pdata
[deliverable/linux.git] / drivers / usb / dwc3 / gadget.c
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <linux/kernel.h>
40 #include <linux/delay.h>
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/platform_device.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/interrupt.h>
46 #include <linux/io.h>
47 #include <linux/list.h>
48 #include <linux/dma-mapping.h>
49
50 #include <linux/usb/ch9.h>
51 #include <linux/usb/gadget.h>
52
53 #include "core.h"
54 #include "gadget.h"
55 #include "io.h"
56
57 /**
58 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
59 * @dwc: pointer to our context structure
60 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
61 *
62 * Caller should take care of locking. This function will
63 * return 0 on success or -EINVAL if wrong Test Selector
64 * is passed
65 */
66 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
67 {
68 u32 reg;
69
70 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
71 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
72
73 switch (mode) {
74 case TEST_J:
75 case TEST_K:
76 case TEST_SE0_NAK:
77 case TEST_PACKET:
78 case TEST_FORCE_EN:
79 reg |= mode << 1;
80 break;
81 default:
82 return -EINVAL;
83 }
84
85 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
86
87 return 0;
88 }
89
90 /**
91 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
92 * @dwc: pointer to our context structure
93 * @state: the state to put link into
94 *
95 * Caller should take care of locking. This function will
96 * return 0 on success or -ETIMEDOUT.
97 */
98 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
99 {
100 int retries = 10000;
101 u32 reg;
102
103 /*
104 * Wait until device controller is ready. Only applies to 1.94a and
105 * later RTL.
106 */
107 if (dwc->revision >= DWC3_REVISION_194A) {
108 while (--retries) {
109 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
110 if (reg & DWC3_DSTS_DCNRD)
111 udelay(5);
112 else
113 break;
114 }
115
116 if (retries <= 0)
117 return -ETIMEDOUT;
118 }
119
120 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
121 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
122
123 /* set requested state */
124 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
125 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
126
127 /*
128 * The following code is racy when called from dwc3_gadget_wakeup,
129 * and is not needed, at least on newer versions
130 */
131 if (dwc->revision >= DWC3_REVISION_194A)
132 return 0;
133
134 /* wait for a change in DSTS */
135 retries = 10000;
136 while (--retries) {
137 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
138
139 if (DWC3_DSTS_USBLNKST(reg) == state)
140 return 0;
141
142 udelay(5);
143 }
144
145 dev_vdbg(dwc->dev, "link state change request timed out\n");
146
147 return -ETIMEDOUT;
148 }
149
150 /**
151 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
152 * @dwc: pointer to our context structure
153 *
154 * This function will a best effort FIFO allocation in order
155 * to improve FIFO usage and throughput, while still allowing
156 * us to enable as many endpoints as possible.
157 *
158 * Keep in mind that this operation will be highly dependent
159 * on the configured size for RAM1 - which contains TxFifo -,
160 * the amount of endpoints enabled on coreConsultant tool, and
161 * the width of the Master Bus.
162 *
163 * In the ideal world, we would always be able to satisfy the
164 * following equation:
165 *
166 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
167 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
168 *
169 * Unfortunately, due to many variables that's not always the case.
170 */
171 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
172 {
173 int last_fifo_depth = 0;
174 int ram1_depth;
175 int fifo_size;
176 int mdwidth;
177 int num;
178
179 if (!dwc->needs_fifo_resize)
180 return 0;
181
182 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
183 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
184
185 /* MDWIDTH is represented in bits, we need it in bytes */
186 mdwidth >>= 3;
187
188 /*
189 * FIXME For now we will only allocate 1 wMaxPacketSize space
190 * for each enabled endpoint, later patches will come to
191 * improve this algorithm so that we better use the internal
192 * FIFO space
193 */
194 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
195 struct dwc3_ep *dep = dwc->eps[num];
196 int fifo_number = dep->number >> 1;
197 int mult = 1;
198 int tmp;
199
200 if (!(dep->number & 1))
201 continue;
202
203 if (!(dep->flags & DWC3_EP_ENABLED))
204 continue;
205
206 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
207 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
208 mult = 3;
209
210 /*
211 * REVISIT: the following assumes we will always have enough
212 * space available on the FIFO RAM for all possible use cases.
213 * Make sure that's true somehow and change FIFO allocation
214 * accordingly.
215 *
216 * If we have Bulk or Isochronous endpoints, we want
217 * them to be able to be very, very fast. So we're giving
218 * those endpoints a fifo_size which is enough for 3 full
219 * packets
220 */
221 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
222 tmp += mdwidth;
223
224 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
225
226 fifo_size |= (last_fifo_depth << 16);
227
228 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
229 dep->name, last_fifo_depth, fifo_size & 0xffff);
230
231 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
232 fifo_size);
233
234 last_fifo_depth += (fifo_size & 0xffff);
235 }
236
237 return 0;
238 }
239
240 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
241 int status)
242 {
243 struct dwc3 *dwc = dep->dwc;
244 int i;
245
246 if (req->queued) {
247 i = 0;
248 do {
249 dep->busy_slot++;
250 /*
251 * Skip LINK TRB. We can't use req->trb and check for
252 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
253 * just completed (not the LINK TRB).
254 */
255 if (((dep->busy_slot & DWC3_TRB_MASK) ==
256 DWC3_TRB_NUM- 1) &&
257 usb_endpoint_xfer_isoc(dep->endpoint.desc))
258 dep->busy_slot++;
259 } while(++i < req->request.num_mapped_sgs);
260 req->queued = false;
261 }
262 list_del(&req->list);
263 req->trb = NULL;
264
265 if (req->request.status == -EINPROGRESS)
266 req->request.status = status;
267
268 if (dwc->ep0_bounced && dep->number == 0)
269 dwc->ep0_bounced = false;
270 else
271 usb_gadget_unmap_request(&dwc->gadget, &req->request,
272 req->direction);
273
274 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
275 req, dep->name, req->request.actual,
276 req->request.length, status);
277
278 spin_unlock(&dwc->lock);
279 req->request.complete(&dep->endpoint, &req->request);
280 spin_lock(&dwc->lock);
281 }
282
283 static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
284 {
285 switch (cmd) {
286 case DWC3_DEPCMD_DEPSTARTCFG:
287 return "Start New Configuration";
288 case DWC3_DEPCMD_ENDTRANSFER:
289 return "End Transfer";
290 case DWC3_DEPCMD_UPDATETRANSFER:
291 return "Update Transfer";
292 case DWC3_DEPCMD_STARTTRANSFER:
293 return "Start Transfer";
294 case DWC3_DEPCMD_CLEARSTALL:
295 return "Clear Stall";
296 case DWC3_DEPCMD_SETSTALL:
297 return "Set Stall";
298 case DWC3_DEPCMD_GETEPSTATE:
299 return "Get Endpoint State";
300 case DWC3_DEPCMD_SETTRANSFRESOURCE:
301 return "Set Endpoint Transfer Resource";
302 case DWC3_DEPCMD_SETEPCONFIG:
303 return "Set Endpoint Configuration";
304 default:
305 return "UNKNOWN command";
306 }
307 }
308
309 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
310 {
311 u32 timeout = 500;
312 u32 reg;
313
314 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
315 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
316
317 do {
318 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
319 if (!(reg & DWC3_DGCMD_CMDACT)) {
320 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
321 DWC3_DGCMD_STATUS(reg));
322 return 0;
323 }
324
325 /*
326 * We can't sleep here, because it's also called from
327 * interrupt context.
328 */
329 timeout--;
330 if (!timeout)
331 return -ETIMEDOUT;
332 udelay(1);
333 } while (1);
334 }
335
336 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
337 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
338 {
339 struct dwc3_ep *dep = dwc->eps[ep];
340 u32 timeout = 500;
341 u32 reg;
342
343 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
344 dep->name,
345 dwc3_gadget_ep_cmd_string(cmd), params->param0,
346 params->param1, params->param2);
347
348 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
349 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
350 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
351
352 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
353 do {
354 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
355 if (!(reg & DWC3_DEPCMD_CMDACT)) {
356 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
357 DWC3_DEPCMD_STATUS(reg));
358 return 0;
359 }
360
361 /*
362 * We can't sleep here, because it is also called from
363 * interrupt context.
364 */
365 timeout--;
366 if (!timeout)
367 return -ETIMEDOUT;
368
369 udelay(1);
370 } while (1);
371 }
372
373 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
374 struct dwc3_trb *trb)
375 {
376 u32 offset = (char *) trb - (char *) dep->trb_pool;
377
378 return dep->trb_pool_dma + offset;
379 }
380
381 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
382 {
383 struct dwc3 *dwc = dep->dwc;
384
385 if (dep->trb_pool)
386 return 0;
387
388 if (dep->number == 0 || dep->number == 1)
389 return 0;
390
391 dep->trb_pool = dma_alloc_coherent(dwc->dev,
392 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
393 &dep->trb_pool_dma, GFP_KERNEL);
394 if (!dep->trb_pool) {
395 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
396 dep->name);
397 return -ENOMEM;
398 }
399
400 return 0;
401 }
402
403 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
404 {
405 struct dwc3 *dwc = dep->dwc;
406
407 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
408 dep->trb_pool, dep->trb_pool_dma);
409
410 dep->trb_pool = NULL;
411 dep->trb_pool_dma = 0;
412 }
413
414 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
415 {
416 struct dwc3_gadget_ep_cmd_params params;
417 u32 cmd;
418
419 memset(&params, 0x00, sizeof(params));
420
421 if (dep->number != 1) {
422 cmd = DWC3_DEPCMD_DEPSTARTCFG;
423 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
424 if (dep->number > 1) {
425 if (dwc->start_config_issued)
426 return 0;
427 dwc->start_config_issued = true;
428 cmd |= DWC3_DEPCMD_PARAM(2);
429 }
430
431 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
432 }
433
434 return 0;
435 }
436
437 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
438 const struct usb_endpoint_descriptor *desc,
439 const struct usb_ss_ep_comp_descriptor *comp_desc,
440 bool ignore)
441 {
442 struct dwc3_gadget_ep_cmd_params params;
443
444 memset(&params, 0x00, sizeof(params));
445
446 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
447 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
448
449 /* Burst size is only needed in SuperSpeed mode */
450 if (dwc->gadget.speed == USB_SPEED_SUPER) {
451 u32 burst = dep->endpoint.maxburst - 1;
452
453 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
454 }
455
456 if (ignore)
457 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
458
459 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
460 | DWC3_DEPCFG_XFER_NOT_READY_EN;
461
462 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
463 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
464 | DWC3_DEPCFG_STREAM_EVENT_EN;
465 dep->stream_capable = true;
466 }
467
468 if (usb_endpoint_xfer_isoc(desc))
469 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
470
471 /*
472 * We are doing 1:1 mapping for endpoints, meaning
473 * Physical Endpoints 2 maps to Logical Endpoint 2 and
474 * so on. We consider the direction bit as part of the physical
475 * endpoint number. So USB endpoint 0x81 is 0x03.
476 */
477 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
478
479 /*
480 * We must use the lower 16 TX FIFOs even though
481 * HW might have more
482 */
483 if (dep->direction)
484 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
485
486 if (desc->bInterval) {
487 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
488 dep->interval = 1 << (desc->bInterval - 1);
489 }
490
491 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
492 DWC3_DEPCMD_SETEPCONFIG, &params);
493 }
494
495 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
496 {
497 struct dwc3_gadget_ep_cmd_params params;
498
499 memset(&params, 0x00, sizeof(params));
500
501 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
502
503 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
504 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
505 }
506
507 /**
508 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
509 * @dep: endpoint to be initialized
510 * @desc: USB Endpoint Descriptor
511 *
512 * Caller should take care of locking
513 */
514 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
515 const struct usb_endpoint_descriptor *desc,
516 const struct usb_ss_ep_comp_descriptor *comp_desc,
517 bool ignore)
518 {
519 struct dwc3 *dwc = dep->dwc;
520 u32 reg;
521 int ret = -ENOMEM;
522
523 if (!(dep->flags & DWC3_EP_ENABLED)) {
524 ret = dwc3_gadget_start_config(dwc, dep);
525 if (ret)
526 return ret;
527 }
528
529 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
530 if (ret)
531 return ret;
532
533 if (!(dep->flags & DWC3_EP_ENABLED)) {
534 struct dwc3_trb *trb_st_hw;
535 struct dwc3_trb *trb_link;
536
537 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
538 if (ret)
539 return ret;
540
541 dep->endpoint.desc = desc;
542 dep->comp_desc = comp_desc;
543 dep->type = usb_endpoint_type(desc);
544 dep->flags |= DWC3_EP_ENABLED;
545
546 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
547 reg |= DWC3_DALEPENA_EP(dep->number);
548 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
549
550 if (!usb_endpoint_xfer_isoc(desc))
551 return 0;
552
553 memset(&trb_link, 0, sizeof(trb_link));
554
555 /* Link TRB for ISOC. The HWO bit is never reset */
556 trb_st_hw = &dep->trb_pool[0];
557
558 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
559
560 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
561 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
562 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
563 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
564 }
565
566 return 0;
567 }
568
569 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
570 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
571 {
572 struct dwc3_request *req;
573
574 if (!list_empty(&dep->req_queued)) {
575 dwc3_stop_active_transfer(dwc, dep->number);
576
577 /* - giveback all requests to gadget driver */
578 while (!list_empty(&dep->req_queued)) {
579 req = next_request(&dep->req_queued);
580
581 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
582 }
583 }
584
585 while (!list_empty(&dep->request_list)) {
586 req = next_request(&dep->request_list);
587
588 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
589 }
590 }
591
592 /**
593 * __dwc3_gadget_ep_disable - Disables a HW endpoint
594 * @dep: the endpoint to disable
595 *
596 * This function also removes requests which are currently processed ny the
597 * hardware and those which are not yet scheduled.
598 * Caller should take care of locking.
599 */
600 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
601 {
602 struct dwc3 *dwc = dep->dwc;
603 u32 reg;
604
605 dwc3_remove_requests(dwc, dep);
606
607 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
608 reg &= ~DWC3_DALEPENA_EP(dep->number);
609 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
610
611 dep->stream_capable = false;
612 dep->endpoint.desc = NULL;
613 dep->comp_desc = NULL;
614 dep->type = 0;
615 dep->flags = 0;
616
617 return 0;
618 }
619
620 /* -------------------------------------------------------------------------- */
621
622 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
623 const struct usb_endpoint_descriptor *desc)
624 {
625 return -EINVAL;
626 }
627
628 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
629 {
630 return -EINVAL;
631 }
632
633 /* -------------------------------------------------------------------------- */
634
635 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
636 const struct usb_endpoint_descriptor *desc)
637 {
638 struct dwc3_ep *dep;
639 struct dwc3 *dwc;
640 unsigned long flags;
641 int ret;
642
643 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
644 pr_debug("dwc3: invalid parameters\n");
645 return -EINVAL;
646 }
647
648 if (!desc->wMaxPacketSize) {
649 pr_debug("dwc3: missing wMaxPacketSize\n");
650 return -EINVAL;
651 }
652
653 dep = to_dwc3_ep(ep);
654 dwc = dep->dwc;
655
656 if (dep->flags & DWC3_EP_ENABLED) {
657 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
658 dep->name);
659 return 0;
660 }
661
662 switch (usb_endpoint_type(desc)) {
663 case USB_ENDPOINT_XFER_CONTROL:
664 strlcat(dep->name, "-control", sizeof(dep->name));
665 break;
666 case USB_ENDPOINT_XFER_ISOC:
667 strlcat(dep->name, "-isoc", sizeof(dep->name));
668 break;
669 case USB_ENDPOINT_XFER_BULK:
670 strlcat(dep->name, "-bulk", sizeof(dep->name));
671 break;
672 case USB_ENDPOINT_XFER_INT:
673 strlcat(dep->name, "-int", sizeof(dep->name));
674 break;
675 default:
676 dev_err(dwc->dev, "invalid endpoint transfer type\n");
677 }
678
679 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
680
681 spin_lock_irqsave(&dwc->lock, flags);
682 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
683 spin_unlock_irqrestore(&dwc->lock, flags);
684
685 return ret;
686 }
687
688 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
689 {
690 struct dwc3_ep *dep;
691 struct dwc3 *dwc;
692 unsigned long flags;
693 int ret;
694
695 if (!ep) {
696 pr_debug("dwc3: invalid parameters\n");
697 return -EINVAL;
698 }
699
700 dep = to_dwc3_ep(ep);
701 dwc = dep->dwc;
702
703 if (!(dep->flags & DWC3_EP_ENABLED)) {
704 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
705 dep->name);
706 return 0;
707 }
708
709 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
710 dep->number >> 1,
711 (dep->number & 1) ? "in" : "out");
712
713 spin_lock_irqsave(&dwc->lock, flags);
714 ret = __dwc3_gadget_ep_disable(dep);
715 spin_unlock_irqrestore(&dwc->lock, flags);
716
717 return ret;
718 }
719
720 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
721 gfp_t gfp_flags)
722 {
723 struct dwc3_request *req;
724 struct dwc3_ep *dep = to_dwc3_ep(ep);
725 struct dwc3 *dwc = dep->dwc;
726
727 req = kzalloc(sizeof(*req), gfp_flags);
728 if (!req) {
729 dev_err(dwc->dev, "not enough memory\n");
730 return NULL;
731 }
732
733 req->epnum = dep->number;
734 req->dep = dep;
735
736 return &req->request;
737 }
738
739 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
740 struct usb_request *request)
741 {
742 struct dwc3_request *req = to_dwc3_request(request);
743
744 kfree(req);
745 }
746
747 /**
748 * dwc3_prepare_one_trb - setup one TRB from one request
749 * @dep: endpoint for which this request is prepared
750 * @req: dwc3_request pointer
751 */
752 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
753 struct dwc3_request *req, dma_addr_t dma,
754 unsigned length, unsigned last, unsigned chain, unsigned node)
755 {
756 struct dwc3 *dwc = dep->dwc;
757 struct dwc3_trb *trb;
758
759 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
760 dep->name, req, (unsigned long long) dma,
761 length, last ? " last" : "",
762 chain ? " chain" : "");
763
764 /* Skip the LINK-TRB on ISOC */
765 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
766 usb_endpoint_xfer_isoc(dep->endpoint.desc))
767 dep->free_slot++;
768
769 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
770
771 if (!req->trb) {
772 dwc3_gadget_move_request_queued(req);
773 req->trb = trb;
774 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
775 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
776 }
777
778 dep->free_slot++;
779
780 trb->size = DWC3_TRB_SIZE_LENGTH(length);
781 trb->bpl = lower_32_bits(dma);
782 trb->bph = upper_32_bits(dma);
783
784 switch (usb_endpoint_type(dep->endpoint.desc)) {
785 case USB_ENDPOINT_XFER_CONTROL:
786 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
787 break;
788
789 case USB_ENDPOINT_XFER_ISOC:
790 if (!node)
791 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
792 else
793 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
794
795 if (!req->request.no_interrupt && !chain)
796 trb->ctrl |= DWC3_TRB_CTRL_IOC;
797 break;
798
799 case USB_ENDPOINT_XFER_BULK:
800 case USB_ENDPOINT_XFER_INT:
801 trb->ctrl = DWC3_TRBCTL_NORMAL;
802 break;
803 default:
804 /*
805 * This is only possible with faulty memory because we
806 * checked it already :)
807 */
808 BUG();
809 }
810
811 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
812 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
813 trb->ctrl |= DWC3_TRB_CTRL_CSP;
814 } else if (last) {
815 trb->ctrl |= DWC3_TRB_CTRL_LST;
816 }
817
818 if (chain)
819 trb->ctrl |= DWC3_TRB_CTRL_CHN;
820
821 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
822 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
823
824 trb->ctrl |= DWC3_TRB_CTRL_HWO;
825 }
826
827 /*
828 * dwc3_prepare_trbs - setup TRBs from requests
829 * @dep: endpoint for which requests are being prepared
830 * @starting: true if the endpoint is idle and no requests are queued.
831 *
832 * The function goes through the requests list and sets up TRBs for the
833 * transfers. The function returns once there are no more TRBs available or
834 * it runs out of requests.
835 */
836 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
837 {
838 struct dwc3_request *req, *n;
839 u32 trbs_left;
840 u32 max;
841 unsigned int last_one = 0;
842
843 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
844
845 /* the first request must not be queued */
846 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
847
848 /* Can't wrap around on a non-isoc EP since there's no link TRB */
849 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
850 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
851 if (trbs_left > max)
852 trbs_left = max;
853 }
854
855 /*
856 * If busy & slot are equal than it is either full or empty. If we are
857 * starting to process requests then we are empty. Otherwise we are
858 * full and don't do anything
859 */
860 if (!trbs_left) {
861 if (!starting)
862 return;
863 trbs_left = DWC3_TRB_NUM;
864 /*
865 * In case we start from scratch, we queue the ISOC requests
866 * starting from slot 1. This is done because we use ring
867 * buffer and have no LST bit to stop us. Instead, we place
868 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
869 * after the first request so we start at slot 1 and have
870 * 7 requests proceed before we hit the first IOC.
871 * Other transfer types don't use the ring buffer and are
872 * processed from the first TRB until the last one. Since we
873 * don't wrap around we have to start at the beginning.
874 */
875 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
876 dep->busy_slot = 1;
877 dep->free_slot = 1;
878 } else {
879 dep->busy_slot = 0;
880 dep->free_slot = 0;
881 }
882 }
883
884 /* The last TRB is a link TRB, not used for xfer */
885 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
886 return;
887
888 list_for_each_entry_safe(req, n, &dep->request_list, list) {
889 unsigned length;
890 dma_addr_t dma;
891 last_one = false;
892
893 if (req->request.num_mapped_sgs > 0) {
894 struct usb_request *request = &req->request;
895 struct scatterlist *sg = request->sg;
896 struct scatterlist *s;
897 int i;
898
899 for_each_sg(sg, s, request->num_mapped_sgs, i) {
900 unsigned chain = true;
901
902 length = sg_dma_len(s);
903 dma = sg_dma_address(s);
904
905 if (i == (request->num_mapped_sgs - 1) ||
906 sg_is_last(s)) {
907 if (list_is_last(&req->list,
908 &dep->request_list))
909 last_one = true;
910 chain = false;
911 }
912
913 trbs_left--;
914 if (!trbs_left)
915 last_one = true;
916
917 if (last_one)
918 chain = false;
919
920 dwc3_prepare_one_trb(dep, req, dma, length,
921 last_one, chain, i);
922
923 if (last_one)
924 break;
925 }
926 } else {
927 dma = req->request.dma;
928 length = req->request.length;
929 trbs_left--;
930
931 if (!trbs_left)
932 last_one = 1;
933
934 /* Is this the last request? */
935 if (list_is_last(&req->list, &dep->request_list))
936 last_one = 1;
937
938 dwc3_prepare_one_trb(dep, req, dma, length,
939 last_one, false, 0);
940
941 if (last_one)
942 break;
943 }
944 }
945 }
946
947 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
948 int start_new)
949 {
950 struct dwc3_gadget_ep_cmd_params params;
951 struct dwc3_request *req;
952 struct dwc3 *dwc = dep->dwc;
953 int ret;
954 u32 cmd;
955
956 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
957 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
958 return -EBUSY;
959 }
960 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
961
962 /*
963 * If we are getting here after a short-out-packet we don't enqueue any
964 * new requests as we try to set the IOC bit only on the last request.
965 */
966 if (start_new) {
967 if (list_empty(&dep->req_queued))
968 dwc3_prepare_trbs(dep, start_new);
969
970 /* req points to the first request which will be sent */
971 req = next_request(&dep->req_queued);
972 } else {
973 dwc3_prepare_trbs(dep, start_new);
974
975 /*
976 * req points to the first request where HWO changed from 0 to 1
977 */
978 req = next_request(&dep->req_queued);
979 }
980 if (!req) {
981 dep->flags |= DWC3_EP_PENDING_REQUEST;
982 return 0;
983 }
984
985 memset(&params, 0, sizeof(params));
986
987 if (start_new) {
988 params.param0 = upper_32_bits(req->trb_dma);
989 params.param1 = lower_32_bits(req->trb_dma);
990 cmd = DWC3_DEPCMD_STARTTRANSFER;
991 } else {
992 cmd = DWC3_DEPCMD_UPDATETRANSFER;
993 }
994
995 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
996 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
997 if (ret < 0) {
998 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
999
1000 /*
1001 * FIXME we need to iterate over the list of requests
1002 * here and stop, unmap, free and del each of the linked
1003 * requests instead of what we do now.
1004 */
1005 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1006 req->direction);
1007 list_del(&req->list);
1008 return ret;
1009 }
1010
1011 dep->flags |= DWC3_EP_BUSY;
1012
1013 if (start_new) {
1014 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
1015 dep->number);
1016 WARN_ON_ONCE(!dep->resource_index);
1017 }
1018
1019 return 0;
1020 }
1021
1022 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1023 struct dwc3_ep *dep, u32 cur_uf)
1024 {
1025 u32 uf;
1026
1027 if (list_empty(&dep->request_list)) {
1028 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1029 dep->name);
1030 dep->flags |= DWC3_EP_PENDING_REQUEST;
1031 return;
1032 }
1033
1034 /* 4 micro frames in the future */
1035 uf = cur_uf + dep->interval * 4;
1036
1037 __dwc3_gadget_kick_transfer(dep, uf, 1);
1038 }
1039
1040 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1041 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1042 {
1043 u32 cur_uf, mask;
1044
1045 mask = ~(dep->interval - 1);
1046 cur_uf = event->parameters & mask;
1047
1048 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1049 }
1050
1051 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1052 {
1053 struct dwc3 *dwc = dep->dwc;
1054 int ret;
1055
1056 req->request.actual = 0;
1057 req->request.status = -EINPROGRESS;
1058 req->direction = dep->direction;
1059 req->epnum = dep->number;
1060
1061 /*
1062 * We only add to our list of requests now and
1063 * start consuming the list once we get XferNotReady
1064 * IRQ.
1065 *
1066 * That way, we avoid doing anything that we don't need
1067 * to do now and defer it until the point we receive a
1068 * particular token from the Host side.
1069 *
1070 * This will also avoid Host cancelling URBs due to too
1071 * many NAKs.
1072 */
1073 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1074 dep->direction);
1075 if (ret)
1076 return ret;
1077
1078 list_add_tail(&req->list, &dep->request_list);
1079
1080 /*
1081 * There are a few special cases:
1082 *
1083 * 1. XferNotReady with empty list of requests. We need to kick the
1084 * transfer here in that situation, otherwise we will be NAKing
1085 * forever. If we get XferNotReady before gadget driver has a
1086 * chance to queue a request, we will ACK the IRQ but won't be
1087 * able to receive the data until the next request is queued.
1088 * The following code is handling exactly that.
1089 *
1090 */
1091 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1092 /*
1093 * If xfernotready is already elapsed and it is a case
1094 * of isoc transfer, then issue END TRANSFER, so that
1095 * you can receive xfernotready again and can have
1096 * notion of current microframe.
1097 */
1098 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1099 if (list_empty(&dep->req_queued)) {
1100 dwc3_stop_active_transfer(dwc, dep->number);
1101 dep->flags = DWC3_EP_ENABLED;
1102 }
1103 return 0;
1104 }
1105
1106 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1107 if (ret && ret != -EBUSY)
1108 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1109 dep->name);
1110 return ret;
1111 }
1112
1113 /*
1114 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1115 * kick the transfer here after queuing a request, otherwise the
1116 * core may not see the modified TRB(s).
1117 */
1118 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1119 (dep->flags & DWC3_EP_BUSY) &&
1120 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1121 WARN_ON_ONCE(!dep->resource_index);
1122 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1123 false);
1124 if (ret && ret != -EBUSY)
1125 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1126 dep->name);
1127 return ret;
1128 }
1129
1130 return 0;
1131 }
1132
1133 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1134 gfp_t gfp_flags)
1135 {
1136 struct dwc3_request *req = to_dwc3_request(request);
1137 struct dwc3_ep *dep = to_dwc3_ep(ep);
1138 struct dwc3 *dwc = dep->dwc;
1139
1140 unsigned long flags;
1141
1142 int ret;
1143
1144 if (!dep->endpoint.desc) {
1145 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1146 request, ep->name);
1147 return -ESHUTDOWN;
1148 }
1149
1150 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1151 request, ep->name, request->length);
1152
1153 spin_lock_irqsave(&dwc->lock, flags);
1154 ret = __dwc3_gadget_ep_queue(dep, req);
1155 spin_unlock_irqrestore(&dwc->lock, flags);
1156
1157 return ret;
1158 }
1159
1160 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1161 struct usb_request *request)
1162 {
1163 struct dwc3_request *req = to_dwc3_request(request);
1164 struct dwc3_request *r = NULL;
1165
1166 struct dwc3_ep *dep = to_dwc3_ep(ep);
1167 struct dwc3 *dwc = dep->dwc;
1168
1169 unsigned long flags;
1170 int ret = 0;
1171
1172 spin_lock_irqsave(&dwc->lock, flags);
1173
1174 list_for_each_entry(r, &dep->request_list, list) {
1175 if (r == req)
1176 break;
1177 }
1178
1179 if (r != req) {
1180 list_for_each_entry(r, &dep->req_queued, list) {
1181 if (r == req)
1182 break;
1183 }
1184 if (r == req) {
1185 /* wait until it is processed */
1186 dwc3_stop_active_transfer(dwc, dep->number);
1187 goto out1;
1188 }
1189 dev_err(dwc->dev, "request %p was not queued to %s\n",
1190 request, ep->name);
1191 ret = -EINVAL;
1192 goto out0;
1193 }
1194
1195 out1:
1196 /* giveback the request */
1197 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1198
1199 out0:
1200 spin_unlock_irqrestore(&dwc->lock, flags);
1201
1202 return ret;
1203 }
1204
1205 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1206 {
1207 struct dwc3_gadget_ep_cmd_params params;
1208 struct dwc3 *dwc = dep->dwc;
1209 int ret;
1210
1211 memset(&params, 0x00, sizeof(params));
1212
1213 if (value) {
1214 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1215 DWC3_DEPCMD_SETSTALL, &params);
1216 if (ret)
1217 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1218 value ? "set" : "clear",
1219 dep->name);
1220 else
1221 dep->flags |= DWC3_EP_STALL;
1222 } else {
1223 if (dep->flags & DWC3_EP_WEDGE)
1224 return 0;
1225
1226 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1227 DWC3_DEPCMD_CLEARSTALL, &params);
1228 if (ret)
1229 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1230 value ? "set" : "clear",
1231 dep->name);
1232 else
1233 dep->flags &= ~DWC3_EP_STALL;
1234 }
1235
1236 return ret;
1237 }
1238
1239 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1240 {
1241 struct dwc3_ep *dep = to_dwc3_ep(ep);
1242 struct dwc3 *dwc = dep->dwc;
1243
1244 unsigned long flags;
1245
1246 int ret;
1247
1248 spin_lock_irqsave(&dwc->lock, flags);
1249
1250 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1251 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1252 ret = -EINVAL;
1253 goto out;
1254 }
1255
1256 ret = __dwc3_gadget_ep_set_halt(dep, value);
1257 out:
1258 spin_unlock_irqrestore(&dwc->lock, flags);
1259
1260 return ret;
1261 }
1262
1263 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1264 {
1265 struct dwc3_ep *dep = to_dwc3_ep(ep);
1266 struct dwc3 *dwc = dep->dwc;
1267 unsigned long flags;
1268
1269 spin_lock_irqsave(&dwc->lock, flags);
1270 dep->flags |= DWC3_EP_WEDGE;
1271 spin_unlock_irqrestore(&dwc->lock, flags);
1272
1273 if (dep->number == 0 || dep->number == 1)
1274 return dwc3_gadget_ep0_set_halt(ep, 1);
1275 else
1276 return dwc3_gadget_ep_set_halt(ep, 1);
1277 }
1278
1279 /* -------------------------------------------------------------------------- */
1280
1281 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1282 .bLength = USB_DT_ENDPOINT_SIZE,
1283 .bDescriptorType = USB_DT_ENDPOINT,
1284 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1285 };
1286
1287 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1288 .enable = dwc3_gadget_ep0_enable,
1289 .disable = dwc3_gadget_ep0_disable,
1290 .alloc_request = dwc3_gadget_ep_alloc_request,
1291 .free_request = dwc3_gadget_ep_free_request,
1292 .queue = dwc3_gadget_ep0_queue,
1293 .dequeue = dwc3_gadget_ep_dequeue,
1294 .set_halt = dwc3_gadget_ep0_set_halt,
1295 .set_wedge = dwc3_gadget_ep_set_wedge,
1296 };
1297
1298 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1299 .enable = dwc3_gadget_ep_enable,
1300 .disable = dwc3_gadget_ep_disable,
1301 .alloc_request = dwc3_gadget_ep_alloc_request,
1302 .free_request = dwc3_gadget_ep_free_request,
1303 .queue = dwc3_gadget_ep_queue,
1304 .dequeue = dwc3_gadget_ep_dequeue,
1305 .set_halt = dwc3_gadget_ep_set_halt,
1306 .set_wedge = dwc3_gadget_ep_set_wedge,
1307 };
1308
1309 /* -------------------------------------------------------------------------- */
1310
1311 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1312 {
1313 struct dwc3 *dwc = gadget_to_dwc(g);
1314 u32 reg;
1315
1316 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1317 return DWC3_DSTS_SOFFN(reg);
1318 }
1319
1320 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1321 {
1322 struct dwc3 *dwc = gadget_to_dwc(g);
1323
1324 unsigned long timeout;
1325 unsigned long flags;
1326
1327 u32 reg;
1328
1329 int ret = 0;
1330
1331 u8 link_state;
1332 u8 speed;
1333
1334 spin_lock_irqsave(&dwc->lock, flags);
1335
1336 /*
1337 * According to the Databook Remote wakeup request should
1338 * be issued only when the device is in early suspend state.
1339 *
1340 * We can check that via USB Link State bits in DSTS register.
1341 */
1342 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1343
1344 speed = reg & DWC3_DSTS_CONNECTSPD;
1345 if (speed == DWC3_DSTS_SUPERSPEED) {
1346 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1347 ret = -EINVAL;
1348 goto out;
1349 }
1350
1351 link_state = DWC3_DSTS_USBLNKST(reg);
1352
1353 switch (link_state) {
1354 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1355 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1356 break;
1357 default:
1358 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1359 link_state);
1360 ret = -EINVAL;
1361 goto out;
1362 }
1363
1364 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1365 if (ret < 0) {
1366 dev_err(dwc->dev, "failed to put link in Recovery\n");
1367 goto out;
1368 }
1369
1370 /* Recent versions do this automatically */
1371 if (dwc->revision < DWC3_REVISION_194A) {
1372 /* write zeroes to Link Change Request */
1373 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1374 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1375 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1376 }
1377
1378 /* poll until Link State changes to ON */
1379 timeout = jiffies + msecs_to_jiffies(100);
1380
1381 while (!time_after(jiffies, timeout)) {
1382 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1383
1384 /* in HS, means ON */
1385 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1386 break;
1387 }
1388
1389 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1390 dev_err(dwc->dev, "failed to send remote wakeup\n");
1391 ret = -EINVAL;
1392 }
1393
1394 out:
1395 spin_unlock_irqrestore(&dwc->lock, flags);
1396
1397 return ret;
1398 }
1399
1400 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1401 int is_selfpowered)
1402 {
1403 struct dwc3 *dwc = gadget_to_dwc(g);
1404 unsigned long flags;
1405
1406 spin_lock_irqsave(&dwc->lock, flags);
1407 dwc->is_selfpowered = !!is_selfpowered;
1408 spin_unlock_irqrestore(&dwc->lock, flags);
1409
1410 return 0;
1411 }
1412
1413 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1414 {
1415 u32 reg;
1416 u32 timeout = 500;
1417
1418 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1419 if (is_on) {
1420 if (dwc->revision <= DWC3_REVISION_187A) {
1421 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1422 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1423 }
1424
1425 if (dwc->revision >= DWC3_REVISION_194A)
1426 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1427 reg |= DWC3_DCTL_RUN_STOP;
1428 dwc->pullups_connected = true;
1429 } else {
1430 reg &= ~DWC3_DCTL_RUN_STOP;
1431 dwc->pullups_connected = false;
1432 }
1433
1434 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1435
1436 do {
1437 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1438 if (is_on) {
1439 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1440 break;
1441 } else {
1442 if (reg & DWC3_DSTS_DEVCTRLHLT)
1443 break;
1444 }
1445 timeout--;
1446 if (!timeout)
1447 return -ETIMEDOUT;
1448 udelay(1);
1449 } while (1);
1450
1451 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1452 dwc->gadget_driver
1453 ? dwc->gadget_driver->function : "no-function",
1454 is_on ? "connect" : "disconnect");
1455
1456 return 0;
1457 }
1458
1459 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1460 {
1461 struct dwc3 *dwc = gadget_to_dwc(g);
1462 unsigned long flags;
1463 int ret;
1464
1465 is_on = !!is_on;
1466
1467 spin_lock_irqsave(&dwc->lock, flags);
1468 ret = dwc3_gadget_run_stop(dwc, is_on);
1469 spin_unlock_irqrestore(&dwc->lock, flags);
1470
1471 return ret;
1472 }
1473
1474 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1475 {
1476 u32 reg;
1477
1478 /* Enable all but Start and End of Frame IRQs */
1479 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1480 DWC3_DEVTEN_EVNTOVERFLOWEN |
1481 DWC3_DEVTEN_CMDCMPLTEN |
1482 DWC3_DEVTEN_ERRTICERREN |
1483 DWC3_DEVTEN_WKUPEVTEN |
1484 DWC3_DEVTEN_ULSTCNGEN |
1485 DWC3_DEVTEN_CONNECTDONEEN |
1486 DWC3_DEVTEN_USBRSTEN |
1487 DWC3_DEVTEN_DISCONNEVTEN);
1488
1489 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1490 }
1491
1492 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1493 {
1494 /* mask all interrupts */
1495 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1496 }
1497
1498 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1499 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1500
1501 static int dwc3_gadget_start(struct usb_gadget *g,
1502 struct usb_gadget_driver *driver)
1503 {
1504 struct dwc3 *dwc = gadget_to_dwc(g);
1505 struct dwc3_ep *dep;
1506 unsigned long flags;
1507 int ret = 0;
1508 int irq;
1509 u32 reg;
1510
1511 spin_lock_irqsave(&dwc->lock, flags);
1512
1513 if (dwc->gadget_driver) {
1514 dev_err(dwc->dev, "%s is already bound to %s\n",
1515 dwc->gadget.name,
1516 dwc->gadget_driver->driver.name);
1517 ret = -EBUSY;
1518 goto err0;
1519 }
1520
1521 dwc->gadget_driver = driver;
1522
1523 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1524 reg &= ~(DWC3_DCFG_SPEED_MASK);
1525
1526 /**
1527 * WORKAROUND: DWC3 revision < 2.20a have an issue
1528 * which would cause metastability state on Run/Stop
1529 * bit if we try to force the IP to USB2-only mode.
1530 *
1531 * Because of that, we cannot configure the IP to any
1532 * speed other than the SuperSpeed
1533 *
1534 * Refers to:
1535 *
1536 * STAR#9000525659: Clock Domain Crossing on DCTL in
1537 * USB 2.0 Mode
1538 */
1539 if (dwc->revision < DWC3_REVISION_220A)
1540 reg |= DWC3_DCFG_SUPERSPEED;
1541 else
1542 reg |= dwc->maximum_speed;
1543 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1544
1545 dwc->start_config_issued = false;
1546
1547 /* Start with SuperSpeed Default */
1548 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1549
1550 dep = dwc->eps[0];
1551 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
1552 if (ret) {
1553 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1554 goto err0;
1555 }
1556
1557 dep = dwc->eps[1];
1558 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
1559 if (ret) {
1560 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1561 goto err1;
1562 }
1563
1564 /* begin to receive SETUP packets */
1565 dwc->ep0state = EP0_SETUP_PHASE;
1566 dwc3_ep0_out_start(dwc);
1567
1568 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1569 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1570 IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
1571 if (ret) {
1572 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1573 irq, ret);
1574 goto err1;
1575 }
1576
1577 dwc3_gadget_enable_irq(dwc);
1578
1579 spin_unlock_irqrestore(&dwc->lock, flags);
1580
1581 return 0;
1582
1583 err1:
1584 __dwc3_gadget_ep_disable(dwc->eps[0]);
1585
1586 err0:
1587 dwc->gadget_driver = NULL;
1588 spin_unlock_irqrestore(&dwc->lock, flags);
1589
1590 return ret;
1591 }
1592
1593 static int dwc3_gadget_stop(struct usb_gadget *g,
1594 struct usb_gadget_driver *driver)
1595 {
1596 struct dwc3 *dwc = gadget_to_dwc(g);
1597 unsigned long flags;
1598 int irq;
1599
1600 spin_lock_irqsave(&dwc->lock, flags);
1601
1602 dwc3_gadget_disable_irq(dwc);
1603 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1604 free_irq(irq, dwc);
1605
1606 __dwc3_gadget_ep_disable(dwc->eps[0]);
1607 __dwc3_gadget_ep_disable(dwc->eps[1]);
1608
1609 dwc->gadget_driver = NULL;
1610
1611 spin_unlock_irqrestore(&dwc->lock, flags);
1612
1613 return 0;
1614 }
1615
1616 static const struct usb_gadget_ops dwc3_gadget_ops = {
1617 .get_frame = dwc3_gadget_get_frame,
1618 .wakeup = dwc3_gadget_wakeup,
1619 .set_selfpowered = dwc3_gadget_set_selfpowered,
1620 .pullup = dwc3_gadget_pullup,
1621 .udc_start = dwc3_gadget_start,
1622 .udc_stop = dwc3_gadget_stop,
1623 };
1624
1625 /* -------------------------------------------------------------------------- */
1626
1627 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1628 u8 num, u32 direction)
1629 {
1630 struct dwc3_ep *dep;
1631 u8 i;
1632
1633 for (i = 0; i < num; i++) {
1634 u8 epnum = (i << 1) | (!!direction);
1635
1636 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1637 if (!dep) {
1638 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1639 epnum);
1640 return -ENOMEM;
1641 }
1642
1643 dep->dwc = dwc;
1644 dep->number = epnum;
1645 dwc->eps[epnum] = dep;
1646
1647 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1648 (epnum & 1) ? "in" : "out");
1649
1650 dep->endpoint.name = dep->name;
1651 dep->direction = (epnum & 1);
1652
1653 if (epnum == 0 || epnum == 1) {
1654 dep->endpoint.maxpacket = 512;
1655 dep->endpoint.maxburst = 1;
1656 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1657 if (!epnum)
1658 dwc->gadget.ep0 = &dep->endpoint;
1659 } else {
1660 int ret;
1661
1662 dep->endpoint.maxpacket = 1024;
1663 dep->endpoint.max_streams = 15;
1664 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1665 list_add_tail(&dep->endpoint.ep_list,
1666 &dwc->gadget.ep_list);
1667
1668 ret = dwc3_alloc_trb_pool(dep);
1669 if (ret)
1670 return ret;
1671 }
1672
1673 INIT_LIST_HEAD(&dep->request_list);
1674 INIT_LIST_HEAD(&dep->req_queued);
1675 }
1676
1677 return 0;
1678 }
1679
1680 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1681 {
1682 int ret;
1683
1684 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1685
1686 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1687 if (ret < 0) {
1688 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1689 return ret;
1690 }
1691
1692 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1693 if (ret < 0) {
1694 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1695 return ret;
1696 }
1697
1698 return 0;
1699 }
1700
1701 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1702 {
1703 struct dwc3_ep *dep;
1704 u8 epnum;
1705
1706 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1707 dep = dwc->eps[epnum];
1708 if (!dep)
1709 continue;
1710 /*
1711 * Physical endpoints 0 and 1 are special; they form the
1712 * bi-directional USB endpoint 0.
1713 *
1714 * For those two physical endpoints, we don't allocate a TRB
1715 * pool nor do we add them the endpoints list. Due to that, we
1716 * shouldn't do these two operations otherwise we would end up
1717 * with all sorts of bugs when removing dwc3.ko.
1718 */
1719 if (epnum != 0 && epnum != 1) {
1720 dwc3_free_trb_pool(dep);
1721 list_del(&dep->endpoint.ep_list);
1722 }
1723
1724 kfree(dep);
1725 }
1726 }
1727
1728 /* -------------------------------------------------------------------------- */
1729
1730 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1731 struct dwc3_request *req, struct dwc3_trb *trb,
1732 const struct dwc3_event_depevt *event, int status)
1733 {
1734 unsigned int count;
1735 unsigned int s_pkt = 0;
1736 unsigned int trb_status;
1737
1738 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1739 /*
1740 * We continue despite the error. There is not much we
1741 * can do. If we don't clean it up we loop forever. If
1742 * we skip the TRB then it gets overwritten after a
1743 * while since we use them in a ring buffer. A BUG()
1744 * would help. Lets hope that if this occurs, someone
1745 * fixes the root cause instead of looking away :)
1746 */
1747 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1748 dep->name, trb);
1749 count = trb->size & DWC3_TRB_SIZE_MASK;
1750
1751 if (dep->direction) {
1752 if (count) {
1753 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1754 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1755 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1756 dep->name);
1757 /*
1758 * If missed isoc occurred and there is
1759 * no request queued then issue END
1760 * TRANSFER, so that core generates
1761 * next xfernotready and we will issue
1762 * a fresh START TRANSFER.
1763 * If there are still queued request
1764 * then wait, do not issue either END
1765 * or UPDATE TRANSFER, just attach next
1766 * request in request_list during
1767 * giveback.If any future queued request
1768 * is successfully transferred then we
1769 * will issue UPDATE TRANSFER for all
1770 * request in the request_list.
1771 */
1772 dep->flags |= DWC3_EP_MISSED_ISOC;
1773 } else {
1774 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1775 dep->name);
1776 status = -ECONNRESET;
1777 }
1778 } else {
1779 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1780 }
1781 } else {
1782 if (count && (event->status & DEPEVT_STATUS_SHORT))
1783 s_pkt = 1;
1784 }
1785
1786 /*
1787 * We assume here we will always receive the entire data block
1788 * which we should receive. Meaning, if we program RX to
1789 * receive 4K but we receive only 2K, we assume that's all we
1790 * should receive and we simply bounce the request back to the
1791 * gadget driver for further processing.
1792 */
1793 req->request.actual += req->request.length - count;
1794 if (s_pkt)
1795 return 1;
1796 if ((event->status & DEPEVT_STATUS_LST) &&
1797 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1798 DWC3_TRB_CTRL_HWO)))
1799 return 1;
1800 if ((event->status & DEPEVT_STATUS_IOC) &&
1801 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1802 return 1;
1803 return 0;
1804 }
1805
1806 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1807 const struct dwc3_event_depevt *event, int status)
1808 {
1809 struct dwc3_request *req;
1810 struct dwc3_trb *trb;
1811 unsigned int slot;
1812 unsigned int i;
1813 int ret;
1814
1815 do {
1816 req = next_request(&dep->req_queued);
1817 if (!req) {
1818 WARN_ON_ONCE(1);
1819 return 1;
1820 }
1821 i = 0;
1822 do {
1823 slot = req->start_slot + i;
1824 if ((slot == DWC3_TRB_NUM - 1) &&
1825 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1826 slot++;
1827 slot %= DWC3_TRB_NUM;
1828 trb = &dep->trb_pool[slot];
1829
1830 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1831 event, status);
1832 if (ret)
1833 break;
1834 }while (++i < req->request.num_mapped_sgs);
1835
1836 dwc3_gadget_giveback(dep, req, status);
1837
1838 if (ret)
1839 break;
1840 } while (1);
1841
1842 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1843 list_empty(&dep->req_queued)) {
1844 if (list_empty(&dep->request_list)) {
1845 /*
1846 * If there is no entry in request list then do
1847 * not issue END TRANSFER now. Just set PENDING
1848 * flag, so that END TRANSFER is issued when an
1849 * entry is added into request list.
1850 */
1851 dep->flags = DWC3_EP_PENDING_REQUEST;
1852 } else {
1853 dwc3_stop_active_transfer(dwc, dep->number);
1854 dep->flags = DWC3_EP_ENABLED;
1855 }
1856 return 1;
1857 }
1858
1859 if ((event->status & DEPEVT_STATUS_IOC) &&
1860 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1861 return 0;
1862 return 1;
1863 }
1864
1865 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1866 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1867 int start_new)
1868 {
1869 unsigned status = 0;
1870 int clean_busy;
1871
1872 if (event->status & DEPEVT_STATUS_BUSERR)
1873 status = -ECONNRESET;
1874
1875 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1876 if (clean_busy)
1877 dep->flags &= ~DWC3_EP_BUSY;
1878
1879 /*
1880 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1881 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1882 */
1883 if (dwc->revision < DWC3_REVISION_183A) {
1884 u32 reg;
1885 int i;
1886
1887 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1888 dep = dwc->eps[i];
1889
1890 if (!(dep->flags & DWC3_EP_ENABLED))
1891 continue;
1892
1893 if (!list_empty(&dep->req_queued))
1894 return;
1895 }
1896
1897 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1898 reg |= dwc->u1u2;
1899 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1900
1901 dwc->u1u2 = 0;
1902 }
1903 }
1904
1905 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1906 const struct dwc3_event_depevt *event)
1907 {
1908 struct dwc3_ep *dep;
1909 u8 epnum = event->endpoint_number;
1910
1911 dep = dwc->eps[epnum];
1912
1913 if (!(dep->flags & DWC3_EP_ENABLED))
1914 return;
1915
1916 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1917 dwc3_ep_event_string(event->endpoint_event));
1918
1919 if (epnum == 0 || epnum == 1) {
1920 dwc3_ep0_interrupt(dwc, event);
1921 return;
1922 }
1923
1924 switch (event->endpoint_event) {
1925 case DWC3_DEPEVT_XFERCOMPLETE:
1926 dep->resource_index = 0;
1927
1928 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1929 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1930 dep->name);
1931 return;
1932 }
1933
1934 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1935 break;
1936 case DWC3_DEPEVT_XFERINPROGRESS:
1937 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1938 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1939 dep->name);
1940 return;
1941 }
1942
1943 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1944 break;
1945 case DWC3_DEPEVT_XFERNOTREADY:
1946 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1947 dwc3_gadget_start_isoc(dwc, dep, event);
1948 } else {
1949 int ret;
1950
1951 dev_vdbg(dwc->dev, "%s: reason %s\n",
1952 dep->name, event->status &
1953 DEPEVT_STATUS_TRANSFER_ACTIVE
1954 ? "Transfer Active"
1955 : "Transfer Not Active");
1956
1957 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1958 if (!ret || ret == -EBUSY)
1959 return;
1960
1961 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1962 dep->name);
1963 }
1964
1965 break;
1966 case DWC3_DEPEVT_STREAMEVT:
1967 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
1968 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1969 dep->name);
1970 return;
1971 }
1972
1973 switch (event->status) {
1974 case DEPEVT_STREAMEVT_FOUND:
1975 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1976 event->parameters);
1977
1978 break;
1979 case DEPEVT_STREAMEVT_NOTFOUND:
1980 /* FALLTHROUGH */
1981 default:
1982 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1983 }
1984 break;
1985 case DWC3_DEPEVT_RXTXFIFOEVT:
1986 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1987 break;
1988 case DWC3_DEPEVT_EPCMDCMPLT:
1989 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
1990 break;
1991 }
1992 }
1993
1994 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1995 {
1996 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1997 spin_unlock(&dwc->lock);
1998 dwc->gadget_driver->disconnect(&dwc->gadget);
1999 spin_lock(&dwc->lock);
2000 }
2001 }
2002
2003 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
2004 {
2005 struct dwc3_ep *dep;
2006 struct dwc3_gadget_ep_cmd_params params;
2007 u32 cmd;
2008 int ret;
2009
2010 dep = dwc->eps[epnum];
2011
2012 if (!dep->resource_index)
2013 return;
2014
2015 /*
2016 * NOTICE: We are violating what the Databook says about the
2017 * EndTransfer command. Ideally we would _always_ wait for the
2018 * EndTransfer Command Completion IRQ, but that's causing too
2019 * much trouble synchronizing between us and gadget driver.
2020 *
2021 * We have discussed this with the IP Provider and it was
2022 * suggested to giveback all requests here, but give HW some
2023 * extra time to synchronize with the interconnect. We're using
2024 * an arbitraty 100us delay for that.
2025 *
2026 * Note also that a similar handling was tested by Synopsys
2027 * (thanks a lot Paul) and nothing bad has come out of it.
2028 * In short, what we're doing is:
2029 *
2030 * - Issue EndTransfer WITH CMDIOC bit set
2031 * - Wait 100us
2032 */
2033
2034 cmd = DWC3_DEPCMD_ENDTRANSFER;
2035 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
2036 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2037 memset(&params, 0, sizeof(params));
2038 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2039 WARN_ON_ONCE(ret);
2040 dep->resource_index = 0;
2041 dep->flags &= ~DWC3_EP_BUSY;
2042 udelay(100);
2043 }
2044
2045 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2046 {
2047 u32 epnum;
2048
2049 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2050 struct dwc3_ep *dep;
2051
2052 dep = dwc->eps[epnum];
2053 if (!dep)
2054 continue;
2055
2056 if (!(dep->flags & DWC3_EP_ENABLED))
2057 continue;
2058
2059 dwc3_remove_requests(dwc, dep);
2060 }
2061 }
2062
2063 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2064 {
2065 u32 epnum;
2066
2067 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2068 struct dwc3_ep *dep;
2069 struct dwc3_gadget_ep_cmd_params params;
2070 int ret;
2071
2072 dep = dwc->eps[epnum];
2073 if (!dep)
2074 continue;
2075
2076 if (!(dep->flags & DWC3_EP_STALL))
2077 continue;
2078
2079 dep->flags &= ~DWC3_EP_STALL;
2080
2081 memset(&params, 0, sizeof(params));
2082 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2083 DWC3_DEPCMD_CLEARSTALL, &params);
2084 WARN_ON_ONCE(ret);
2085 }
2086 }
2087
2088 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2089 {
2090 int reg;
2091
2092 dev_vdbg(dwc->dev, "%s\n", __func__);
2093
2094 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2095 reg &= ~DWC3_DCTL_INITU1ENA;
2096 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2097
2098 reg &= ~DWC3_DCTL_INITU2ENA;
2099 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2100
2101 dwc3_disconnect_gadget(dwc);
2102 dwc->start_config_issued = false;
2103
2104 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2105 dwc->setup_packet_pending = false;
2106 }
2107
2108 static void dwc3_gadget_usb3_phy_suspend(struct dwc3 *dwc, int suspend)
2109 {
2110 u32 reg;
2111
2112 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
2113
2114 if (suspend)
2115 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
2116 else
2117 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
2118
2119 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
2120 }
2121
2122 static void dwc3_gadget_usb2_phy_suspend(struct dwc3 *dwc, int suspend)
2123 {
2124 u32 reg;
2125
2126 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2127
2128 if (suspend)
2129 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
2130 else
2131 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
2132
2133 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2134 }
2135
2136 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2137 {
2138 u32 reg;
2139
2140 dev_vdbg(dwc->dev, "%s\n", __func__);
2141
2142 /*
2143 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2144 * would cause a missing Disconnect Event if there's a
2145 * pending Setup Packet in the FIFO.
2146 *
2147 * There's no suggested workaround on the official Bug
2148 * report, which states that "unless the driver/application
2149 * is doing any special handling of a disconnect event,
2150 * there is no functional issue".
2151 *
2152 * Unfortunately, it turns out that we _do_ some special
2153 * handling of a disconnect event, namely complete all
2154 * pending transfers, notify gadget driver of the
2155 * disconnection, and so on.
2156 *
2157 * Our suggested workaround is to follow the Disconnect
2158 * Event steps here, instead, based on a setup_packet_pending
2159 * flag. Such flag gets set whenever we have a XferNotReady
2160 * event on EP0 and gets cleared on XferComplete for the
2161 * same endpoint.
2162 *
2163 * Refers to:
2164 *
2165 * STAR#9000466709: RTL: Device : Disconnect event not
2166 * generated if setup packet pending in FIFO
2167 */
2168 if (dwc->revision < DWC3_REVISION_188A) {
2169 if (dwc->setup_packet_pending)
2170 dwc3_gadget_disconnect_interrupt(dwc);
2171 }
2172
2173 /* after reset -> Default State */
2174 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
2175
2176 /* Recent versions support automatic phy suspend and don't need this */
2177 if (dwc->revision < DWC3_REVISION_194A) {
2178 /* Resume PHYs */
2179 dwc3_gadget_usb2_phy_suspend(dwc, false);
2180 dwc3_gadget_usb3_phy_suspend(dwc, false);
2181 }
2182
2183 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
2184 dwc3_disconnect_gadget(dwc);
2185
2186 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2187 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2188 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2189 dwc->test_mode = false;
2190
2191 dwc3_stop_active_transfers(dwc);
2192 dwc3_clear_stall_all_ep(dwc);
2193 dwc->start_config_issued = false;
2194
2195 /* Reset device address to zero */
2196 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2197 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2198 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2199 }
2200
2201 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2202 {
2203 u32 reg;
2204 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2205
2206 /*
2207 * We change the clock only at SS but I dunno why I would want to do
2208 * this. Maybe it becomes part of the power saving plan.
2209 */
2210
2211 if (speed != DWC3_DSTS_SUPERSPEED)
2212 return;
2213
2214 /*
2215 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2216 * each time on Connect Done.
2217 */
2218 if (!usb30_clock)
2219 return;
2220
2221 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2222 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2223 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2224 }
2225
2226 static void dwc3_gadget_phy_suspend(struct dwc3 *dwc, u8 speed)
2227 {
2228 switch (speed) {
2229 case USB_SPEED_SUPER:
2230 dwc3_gadget_usb2_phy_suspend(dwc, true);
2231 break;
2232 case USB_SPEED_HIGH:
2233 case USB_SPEED_FULL:
2234 case USB_SPEED_LOW:
2235 dwc3_gadget_usb3_phy_suspend(dwc, true);
2236 break;
2237 }
2238 }
2239
2240 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2241 {
2242 struct dwc3_ep *dep;
2243 int ret;
2244 u32 reg;
2245 u8 speed;
2246
2247 dev_vdbg(dwc->dev, "%s\n", __func__);
2248
2249 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2250 speed = reg & DWC3_DSTS_CONNECTSPD;
2251 dwc->speed = speed;
2252
2253 dwc3_update_ram_clk_sel(dwc, speed);
2254
2255 switch (speed) {
2256 case DWC3_DCFG_SUPERSPEED:
2257 /*
2258 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2259 * would cause a missing USB3 Reset event.
2260 *
2261 * In such situations, we should force a USB3 Reset
2262 * event by calling our dwc3_gadget_reset_interrupt()
2263 * routine.
2264 *
2265 * Refers to:
2266 *
2267 * STAR#9000483510: RTL: SS : USB3 reset event may
2268 * not be generated always when the link enters poll
2269 */
2270 if (dwc->revision < DWC3_REVISION_190A)
2271 dwc3_gadget_reset_interrupt(dwc);
2272
2273 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2274 dwc->gadget.ep0->maxpacket = 512;
2275 dwc->gadget.speed = USB_SPEED_SUPER;
2276 break;
2277 case DWC3_DCFG_HIGHSPEED:
2278 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2279 dwc->gadget.ep0->maxpacket = 64;
2280 dwc->gadget.speed = USB_SPEED_HIGH;
2281 break;
2282 case DWC3_DCFG_FULLSPEED2:
2283 case DWC3_DCFG_FULLSPEED1:
2284 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2285 dwc->gadget.ep0->maxpacket = 64;
2286 dwc->gadget.speed = USB_SPEED_FULL;
2287 break;
2288 case DWC3_DCFG_LOWSPEED:
2289 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2290 dwc->gadget.ep0->maxpacket = 8;
2291 dwc->gadget.speed = USB_SPEED_LOW;
2292 break;
2293 }
2294
2295 /* Enable USB2 LPM Capability */
2296
2297 if ((dwc->revision > DWC3_REVISION_194A)
2298 && (speed != DWC3_DCFG_SUPERSPEED)) {
2299 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2300 reg |= DWC3_DCFG_LPM_CAP;
2301 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2302
2303 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2304 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2305
2306 /*
2307 * TODO: This should be configurable. For now using
2308 * maximum allowed HIRD threshold value of 0b1100
2309 */
2310 reg |= DWC3_DCTL_HIRD_THRES(12);
2311
2312 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2313 }
2314
2315 /* Recent versions support automatic phy suspend and don't need this */
2316 if (dwc->revision < DWC3_REVISION_194A) {
2317 /* Suspend unneeded PHY */
2318 dwc3_gadget_phy_suspend(dwc, dwc->gadget.speed);
2319 }
2320
2321 dep = dwc->eps[0];
2322 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
2323 if (ret) {
2324 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2325 return;
2326 }
2327
2328 dep = dwc->eps[1];
2329 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
2330 if (ret) {
2331 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2332 return;
2333 }
2334
2335 /*
2336 * Configure PHY via GUSB3PIPECTLn if required.
2337 *
2338 * Update GTXFIFOSIZn
2339 *
2340 * In both cases reset values should be sufficient.
2341 */
2342 }
2343
2344 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2345 {
2346 dev_vdbg(dwc->dev, "%s\n", __func__);
2347
2348 /*
2349 * TODO take core out of low power mode when that's
2350 * implemented.
2351 */
2352
2353 dwc->gadget_driver->resume(&dwc->gadget);
2354 }
2355
2356 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2357 unsigned int evtinfo)
2358 {
2359 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2360 unsigned int pwropt;
2361
2362 /*
2363 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2364 * Hibernation mode enabled which would show up when device detects
2365 * host-initiated U3 exit.
2366 *
2367 * In that case, device will generate a Link State Change Interrupt
2368 * from U3 to RESUME which is only necessary if Hibernation is
2369 * configured in.
2370 *
2371 * There are no functional changes due to such spurious event and we
2372 * just need to ignore it.
2373 *
2374 * Refers to:
2375 *
2376 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2377 * operational mode
2378 */
2379 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2380 if ((dwc->revision < DWC3_REVISION_250A) &&
2381 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2382 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2383 (next == DWC3_LINK_STATE_RESUME)) {
2384 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2385 return;
2386 }
2387 }
2388
2389 /*
2390 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2391 * on the link partner, the USB session might do multiple entry/exit
2392 * of low power states before a transfer takes place.
2393 *
2394 * Due to this problem, we might experience lower throughput. The
2395 * suggested workaround is to disable DCTL[12:9] bits if we're
2396 * transitioning from U1/U2 to U0 and enable those bits again
2397 * after a transfer completes and there are no pending transfers
2398 * on any of the enabled endpoints.
2399 *
2400 * This is the first half of that workaround.
2401 *
2402 * Refers to:
2403 *
2404 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2405 * core send LGO_Ux entering U0
2406 */
2407 if (dwc->revision < DWC3_REVISION_183A) {
2408 if (next == DWC3_LINK_STATE_U0) {
2409 u32 u1u2;
2410 u32 reg;
2411
2412 switch (dwc->link_state) {
2413 case DWC3_LINK_STATE_U1:
2414 case DWC3_LINK_STATE_U2:
2415 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2416 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2417 | DWC3_DCTL_ACCEPTU2ENA
2418 | DWC3_DCTL_INITU1ENA
2419 | DWC3_DCTL_ACCEPTU1ENA);
2420
2421 if (!dwc->u1u2)
2422 dwc->u1u2 = reg & u1u2;
2423
2424 reg &= ~u1u2;
2425
2426 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2427 break;
2428 default:
2429 /* do nothing */
2430 break;
2431 }
2432 }
2433 }
2434
2435 dwc->link_state = next;
2436
2437 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
2438 }
2439
2440 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2441 const struct dwc3_event_devt *event)
2442 {
2443 switch (event->type) {
2444 case DWC3_DEVICE_EVENT_DISCONNECT:
2445 dwc3_gadget_disconnect_interrupt(dwc);
2446 break;
2447 case DWC3_DEVICE_EVENT_RESET:
2448 dwc3_gadget_reset_interrupt(dwc);
2449 break;
2450 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2451 dwc3_gadget_conndone_interrupt(dwc);
2452 break;
2453 case DWC3_DEVICE_EVENT_WAKEUP:
2454 dwc3_gadget_wakeup_interrupt(dwc);
2455 break;
2456 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2457 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2458 break;
2459 case DWC3_DEVICE_EVENT_EOPF:
2460 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2461 break;
2462 case DWC3_DEVICE_EVENT_SOF:
2463 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2464 break;
2465 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2466 dev_vdbg(dwc->dev, "Erratic Error\n");
2467 break;
2468 case DWC3_DEVICE_EVENT_CMD_CMPL:
2469 dev_vdbg(dwc->dev, "Command Complete\n");
2470 break;
2471 case DWC3_DEVICE_EVENT_OVERFLOW:
2472 dev_vdbg(dwc->dev, "Overflow\n");
2473 break;
2474 default:
2475 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2476 }
2477 }
2478
2479 static void dwc3_process_event_entry(struct dwc3 *dwc,
2480 const union dwc3_event *event)
2481 {
2482 /* Endpoint IRQ, handle it and return early */
2483 if (event->type.is_devspec == 0) {
2484 /* depevt */
2485 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2486 }
2487
2488 switch (event->type.type) {
2489 case DWC3_EVENT_TYPE_DEV:
2490 dwc3_gadget_interrupt(dwc, &event->devt);
2491 break;
2492 /* REVISIT what to do with Carkit and I2C events ? */
2493 default:
2494 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2495 }
2496 }
2497
2498 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2499 {
2500 struct dwc3 *dwc = _dwc;
2501 unsigned long flags;
2502 irqreturn_t ret = IRQ_NONE;
2503 int i;
2504
2505 spin_lock_irqsave(&dwc->lock, flags);
2506
2507 for (i = 0; i < dwc->num_event_buffers; i++) {
2508 struct dwc3_event_buffer *evt;
2509 int left;
2510
2511 evt = dwc->ev_buffs[i];
2512 left = evt->count;
2513
2514 if (!(evt->flags & DWC3_EVENT_PENDING))
2515 continue;
2516
2517 while (left > 0) {
2518 union dwc3_event event;
2519
2520 event.raw = *(u32 *) (evt->buf + evt->lpos);
2521
2522 dwc3_process_event_entry(dwc, &event);
2523
2524 /*
2525 * FIXME we wrap around correctly to the next entry as
2526 * almost all entries are 4 bytes in size. There is one
2527 * entry which has 12 bytes which is a regular entry
2528 * followed by 8 bytes data. ATM I don't know how
2529 * things are organized if we get next to the a
2530 * boundary so I worry about that once we try to handle
2531 * that.
2532 */
2533 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2534 left -= 4;
2535
2536 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(i), 4);
2537 }
2538
2539 evt->count = 0;
2540 evt->flags &= ~DWC3_EVENT_PENDING;
2541 ret = IRQ_HANDLED;
2542 }
2543
2544 spin_unlock_irqrestore(&dwc->lock, flags);
2545
2546 return ret;
2547 }
2548
2549 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2550 {
2551 struct dwc3_event_buffer *evt;
2552 u32 count;
2553
2554 evt = dwc->ev_buffs[buf];
2555
2556 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2557 count &= DWC3_GEVNTCOUNT_MASK;
2558 if (!count)
2559 return IRQ_NONE;
2560
2561 evt->count = count;
2562 evt->flags |= DWC3_EVENT_PENDING;
2563
2564 return IRQ_WAKE_THREAD;
2565 }
2566
2567 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2568 {
2569 struct dwc3 *dwc = _dwc;
2570 int i;
2571 irqreturn_t ret = IRQ_NONE;
2572
2573 spin_lock(&dwc->lock);
2574
2575 for (i = 0; i < dwc->num_event_buffers; i++) {
2576 irqreturn_t status;
2577
2578 status = dwc3_process_event_buf(dwc, i);
2579 if (status == IRQ_WAKE_THREAD)
2580 ret = status;
2581 }
2582
2583 spin_unlock(&dwc->lock);
2584
2585 return ret;
2586 }
2587
2588 /**
2589 * dwc3_gadget_init - Initializes gadget related registers
2590 * @dwc: pointer to our controller context structure
2591 *
2592 * Returns 0 on success otherwise negative errno.
2593 */
2594 int dwc3_gadget_init(struct dwc3 *dwc)
2595 {
2596 u32 reg;
2597 int ret;
2598
2599 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2600 &dwc->ctrl_req_addr, GFP_KERNEL);
2601 if (!dwc->ctrl_req) {
2602 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2603 ret = -ENOMEM;
2604 goto err0;
2605 }
2606
2607 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2608 &dwc->ep0_trb_addr, GFP_KERNEL);
2609 if (!dwc->ep0_trb) {
2610 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2611 ret = -ENOMEM;
2612 goto err1;
2613 }
2614
2615 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2616 if (!dwc->setup_buf) {
2617 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2618 ret = -ENOMEM;
2619 goto err2;
2620 }
2621
2622 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2623 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2624 GFP_KERNEL);
2625 if (!dwc->ep0_bounce) {
2626 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2627 ret = -ENOMEM;
2628 goto err3;
2629 }
2630
2631 dwc->gadget.ops = &dwc3_gadget_ops;
2632 dwc->gadget.max_speed = USB_SPEED_SUPER;
2633 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2634 dwc->gadget.sg_supported = true;
2635 dwc->gadget.name = "dwc3-gadget";
2636
2637 /*
2638 * REVISIT: Here we should clear all pending IRQs to be
2639 * sure we're starting from a well known location.
2640 */
2641
2642 ret = dwc3_gadget_init_endpoints(dwc);
2643 if (ret)
2644 goto err4;
2645
2646 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2647 reg |= DWC3_DCFG_LPM_CAP;
2648 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2649
2650 /* Enable USB2 LPM and automatic phy suspend only on recent versions */
2651 if (dwc->revision >= DWC3_REVISION_194A) {
2652 dwc3_gadget_usb2_phy_suspend(dwc, false);
2653 dwc3_gadget_usb3_phy_suspend(dwc, false);
2654 }
2655
2656 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2657 if (ret) {
2658 dev_err(dwc->dev, "failed to register udc\n");
2659 goto err5;
2660 }
2661
2662 return 0;
2663
2664 err5:
2665 dwc3_gadget_free_endpoints(dwc);
2666
2667 err4:
2668 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2669 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2670
2671 err3:
2672 kfree(dwc->setup_buf);
2673
2674 err2:
2675 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2676 dwc->ep0_trb, dwc->ep0_trb_addr);
2677
2678 err1:
2679 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2680 dwc->ctrl_req, dwc->ctrl_req_addr);
2681
2682 err0:
2683 return ret;
2684 }
2685
2686 /* -------------------------------------------------------------------------- */
2687
2688 void dwc3_gadget_exit(struct dwc3 *dwc)
2689 {
2690 usb_del_gadget_udc(&dwc->gadget);
2691
2692 dwc3_gadget_free_endpoints(dwc);
2693
2694 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2695 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2696
2697 kfree(dwc->setup_buf);
2698
2699 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2700 dwc->ep0_trb, dwc->ep0_trb_addr);
2701
2702 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2703 dwc->ctrl_req, dwc->ctrl_req_addr);
2704 }
2705
2706 int dwc3_gadget_prepare(struct dwc3 *dwc)
2707 {
2708 if (dwc->pullups_connected)
2709 dwc3_gadget_disable_irq(dwc);
2710
2711 return 0;
2712 }
2713
2714 void dwc3_gadget_complete(struct dwc3 *dwc)
2715 {
2716 if (dwc->pullups_connected) {
2717 dwc3_gadget_enable_irq(dwc);
2718 dwc3_gadget_run_stop(dwc, true);
2719 }
2720 }
2721
2722 int dwc3_gadget_suspend(struct dwc3 *dwc)
2723 {
2724 __dwc3_gadget_ep_disable(dwc->eps[0]);
2725 __dwc3_gadget_ep_disable(dwc->eps[1]);
2726
2727 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2728
2729 return 0;
2730 }
2731
2732 int dwc3_gadget_resume(struct dwc3 *dwc)
2733 {
2734 struct dwc3_ep *dep;
2735 int ret;
2736
2737 /* Start with SuperSpeed Default */
2738 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2739
2740 dep = dwc->eps[0];
2741 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2742 if (ret)
2743 goto err0;
2744
2745 dep = dwc->eps[1];
2746 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2747 if (ret)
2748 goto err1;
2749
2750 /* begin to receive SETUP packets */
2751 dwc->ep0state = EP0_SETUP_PHASE;
2752 dwc3_ep0_out_start(dwc);
2753
2754 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2755
2756 return 0;
2757
2758 err1:
2759 __dwc3_gadget_ep_disable(dwc->eps[0]);
2760
2761 err0:
2762 return ret;
2763 }
This page took 0.088319 seconds and 5 git commands to generate.