usb: dwc3: gadget: avoid memcpy()ing event buffer
[deliverable/linux.git] / drivers / usb / dwc3 / gadget.c
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <linux/kernel.h>
40 #include <linux/delay.h>
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/platform_device.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/interrupt.h>
46 #include <linux/io.h>
47 #include <linux/list.h>
48 #include <linux/dma-mapping.h>
49
50 #include <linux/usb/ch9.h>
51 #include <linux/usb/gadget.h>
52
53 #include "core.h"
54 #include "gadget.h"
55 #include "io.h"
56
57 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
58
59 /**
60 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
61 * @dwc: pointer to our context structure
62 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
63 *
64 * Caller should take care of locking. This function will
65 * return 0 on success or -EINVAL if wrong Test Selector
66 * is passed
67 */
68 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
69 {
70 u32 reg;
71
72 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
73 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
74
75 switch (mode) {
76 case TEST_J:
77 case TEST_K:
78 case TEST_SE0_NAK:
79 case TEST_PACKET:
80 case TEST_FORCE_EN:
81 reg |= mode << 1;
82 break;
83 default:
84 return -EINVAL;
85 }
86
87 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
88
89 return 0;
90 }
91
92 /**
93 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
94 * @dwc: pointer to our context structure
95 * @state: the state to put link into
96 *
97 * Caller should take care of locking. This function will
98 * return 0 on success or -EINVAL.
99 */
100 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
101 {
102 int retries = 100;
103 u32 reg;
104
105 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
106 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
107
108 /* set requested state */
109 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
110 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
111
112 /* wait for a change in DSTS */
113 while (--retries) {
114 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
115
116 /* in HS, means ON */
117 if (DWC3_DSTS_USBLNKST(reg) == state)
118 return 0;
119
120 udelay(500);
121 }
122
123 dev_vdbg(dwc->dev, "link state change request timed out\n");
124
125 return -ETIMEDOUT;
126 }
127
128 /**
129 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
130 * @dwc: pointer to our context structure
131 *
132 * This function will a best effort FIFO allocation in order
133 * to improve FIFO usage and throughput, while still allowing
134 * us to enable as many endpoints as possible.
135 *
136 * Keep in mind that this operation will be highly dependent
137 * on the configured size for RAM1 - which contains TxFifo -,
138 * the amount of endpoints enabled on coreConsultant tool, and
139 * the width of the Master Bus.
140 *
141 * In the ideal world, we would always be able to satisfy the
142 * following equation:
143 *
144 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
145 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
146 *
147 * Unfortunately, due to many variables that's not always the case.
148 */
149 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
150 {
151 int last_fifo_depth = 0;
152 int ram1_depth;
153 int fifo_size;
154 int mdwidth;
155 int num;
156
157 if (!dwc->needs_fifo_resize)
158 return 0;
159
160 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
161 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
162
163 /* MDWIDTH is represented in bits, we need it in bytes */
164 mdwidth >>= 3;
165
166 /*
167 * FIXME For now we will only allocate 1 wMaxPacketSize space
168 * for each enabled endpoint, later patches will come to
169 * improve this algorithm so that we better use the internal
170 * FIFO space
171 */
172 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
173 struct dwc3_ep *dep = dwc->eps[num];
174 int fifo_number = dep->number >> 1;
175 int tmp;
176
177 if (!(dep->number & 1))
178 continue;
179
180 if (!(dep->flags & DWC3_EP_ENABLED))
181 continue;
182
183 tmp = dep->endpoint.maxpacket;
184 tmp += mdwidth;
185 tmp += mdwidth;
186
187 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
188 fifo_size |= (last_fifo_depth << 16);
189
190 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
191 dep->name, last_fifo_depth, fifo_size & 0xffff);
192
193 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
194 fifo_size);
195
196 last_fifo_depth += (fifo_size & 0xffff);
197 }
198
199 return 0;
200 }
201
202 void dwc3_map_buffer_to_dma(struct dwc3_request *req)
203 {
204 struct dwc3 *dwc = req->dep->dwc;
205
206 if (req->request.length == 0) {
207 /* req->request.dma = dwc->setup_buf_addr; */
208 return;
209 }
210
211 if (req->request.num_sgs) {
212 int mapped;
213
214 mapped = dma_map_sg(dwc->dev, req->request.sg,
215 req->request.num_sgs,
216 req->direction ? DMA_TO_DEVICE
217 : DMA_FROM_DEVICE);
218 if (mapped < 0) {
219 dev_err(dwc->dev, "failed to map SGs\n");
220 return;
221 }
222
223 req->request.num_mapped_sgs = mapped;
224 return;
225 }
226
227 if (req->request.dma == DMA_ADDR_INVALID) {
228 req->request.dma = dma_map_single(dwc->dev, req->request.buf,
229 req->request.length, req->direction
230 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
231 req->mapped = true;
232 }
233 }
234
235 void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
236 {
237 struct dwc3 *dwc = req->dep->dwc;
238
239 if (req->request.length == 0) {
240 req->request.dma = DMA_ADDR_INVALID;
241 return;
242 }
243
244 if (req->request.num_mapped_sgs) {
245 req->request.dma = DMA_ADDR_INVALID;
246 dma_unmap_sg(dwc->dev, req->request.sg,
247 req->request.num_mapped_sgs,
248 req->direction ? DMA_TO_DEVICE
249 : DMA_FROM_DEVICE);
250
251 req->request.num_mapped_sgs = 0;
252 return;
253 }
254
255 if (req->mapped) {
256 dma_unmap_single(dwc->dev, req->request.dma,
257 req->request.length, req->direction
258 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
259 req->mapped = 0;
260 req->request.dma = DMA_ADDR_INVALID;
261 }
262 }
263
264 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
265 int status)
266 {
267 struct dwc3 *dwc = dep->dwc;
268
269 if (req->queued) {
270 if (req->request.num_mapped_sgs)
271 dep->busy_slot += req->request.num_mapped_sgs;
272 else
273 dep->busy_slot++;
274
275 /*
276 * Skip LINK TRB. We can't use req->trb and check for
277 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
278 * completed (not the LINK TRB).
279 */
280 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
281 usb_endpoint_xfer_isoc(dep->desc))
282 dep->busy_slot++;
283 }
284 list_del(&req->list);
285 req->trb = NULL;
286
287 if (req->request.status == -EINPROGRESS)
288 req->request.status = status;
289
290 dwc3_unmap_buffer_from_dma(req);
291
292 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
293 req, dep->name, req->request.actual,
294 req->request.length, status);
295
296 spin_unlock(&dwc->lock);
297 req->request.complete(&req->dep->endpoint, &req->request);
298 spin_lock(&dwc->lock);
299 }
300
301 static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
302 {
303 switch (cmd) {
304 case DWC3_DEPCMD_DEPSTARTCFG:
305 return "Start New Configuration";
306 case DWC3_DEPCMD_ENDTRANSFER:
307 return "End Transfer";
308 case DWC3_DEPCMD_UPDATETRANSFER:
309 return "Update Transfer";
310 case DWC3_DEPCMD_STARTTRANSFER:
311 return "Start Transfer";
312 case DWC3_DEPCMD_CLEARSTALL:
313 return "Clear Stall";
314 case DWC3_DEPCMD_SETSTALL:
315 return "Set Stall";
316 case DWC3_DEPCMD_GETSEQNUMBER:
317 return "Get Data Sequence Number";
318 case DWC3_DEPCMD_SETTRANSFRESOURCE:
319 return "Set Endpoint Transfer Resource";
320 case DWC3_DEPCMD_SETEPCONFIG:
321 return "Set Endpoint Configuration";
322 default:
323 return "UNKNOWN command";
324 }
325 }
326
327 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
328 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
329 {
330 struct dwc3_ep *dep = dwc->eps[ep];
331 u32 timeout = 500;
332 u32 reg;
333
334 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
335 dep->name,
336 dwc3_gadget_ep_cmd_string(cmd), params->param0,
337 params->param1, params->param2);
338
339 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
340 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
341 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
342
343 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
344 do {
345 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
346 if (!(reg & DWC3_DEPCMD_CMDACT)) {
347 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
348 DWC3_DEPCMD_STATUS(reg));
349 return 0;
350 }
351
352 /*
353 * We can't sleep here, because it is also called from
354 * interrupt context.
355 */
356 timeout--;
357 if (!timeout)
358 return -ETIMEDOUT;
359
360 udelay(1);
361 } while (1);
362 }
363
364 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
365 struct dwc3_trb_hw *trb)
366 {
367 u32 offset = (char *) trb - (char *) dep->trb_pool;
368
369 return dep->trb_pool_dma + offset;
370 }
371
372 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
373 {
374 struct dwc3 *dwc = dep->dwc;
375
376 if (dep->trb_pool)
377 return 0;
378
379 if (dep->number == 0 || dep->number == 1)
380 return 0;
381
382 dep->trb_pool = dma_alloc_coherent(dwc->dev,
383 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
384 &dep->trb_pool_dma, GFP_KERNEL);
385 if (!dep->trb_pool) {
386 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
387 dep->name);
388 return -ENOMEM;
389 }
390
391 return 0;
392 }
393
394 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
395 {
396 struct dwc3 *dwc = dep->dwc;
397
398 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
399 dep->trb_pool, dep->trb_pool_dma);
400
401 dep->trb_pool = NULL;
402 dep->trb_pool_dma = 0;
403 }
404
405 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
406 {
407 struct dwc3_gadget_ep_cmd_params params;
408 u32 cmd;
409
410 memset(&params, 0x00, sizeof(params));
411
412 if (dep->number != 1) {
413 cmd = DWC3_DEPCMD_DEPSTARTCFG;
414 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
415 if (dep->number > 1) {
416 if (dwc->start_config_issued)
417 return 0;
418 dwc->start_config_issued = true;
419 cmd |= DWC3_DEPCMD_PARAM(2);
420 }
421
422 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
423 }
424
425 return 0;
426 }
427
428 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
429 const struct usb_endpoint_descriptor *desc,
430 const struct usb_ss_ep_comp_descriptor *comp_desc)
431 {
432 struct dwc3_gadget_ep_cmd_params params;
433
434 memset(&params, 0x00, sizeof(params));
435
436 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
437 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
438 | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
439
440 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
441 | DWC3_DEPCFG_XFER_NOT_READY_EN;
442
443 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
444 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
445 | DWC3_DEPCFG_STREAM_EVENT_EN;
446 dep->stream_capable = true;
447 }
448
449 if (usb_endpoint_xfer_isoc(desc))
450 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
451
452 /*
453 * We are doing 1:1 mapping for endpoints, meaning
454 * Physical Endpoints 2 maps to Logical Endpoint 2 and
455 * so on. We consider the direction bit as part of the physical
456 * endpoint number. So USB endpoint 0x81 is 0x03.
457 */
458 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
459
460 /*
461 * We must use the lower 16 TX FIFOs even though
462 * HW might have more
463 */
464 if (dep->direction)
465 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
466
467 if (desc->bInterval) {
468 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
469 dep->interval = 1 << (desc->bInterval - 1);
470 }
471
472 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
473 DWC3_DEPCMD_SETEPCONFIG, &params);
474 }
475
476 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
477 {
478 struct dwc3_gadget_ep_cmd_params params;
479
480 memset(&params, 0x00, sizeof(params));
481
482 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
483
484 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
485 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
486 }
487
488 /**
489 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
490 * @dep: endpoint to be initialized
491 * @desc: USB Endpoint Descriptor
492 *
493 * Caller should take care of locking
494 */
495 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
496 const struct usb_endpoint_descriptor *desc,
497 const struct usb_ss_ep_comp_descriptor *comp_desc)
498 {
499 struct dwc3 *dwc = dep->dwc;
500 u32 reg;
501 int ret = -ENOMEM;
502
503 if (!(dep->flags & DWC3_EP_ENABLED)) {
504 ret = dwc3_gadget_start_config(dwc, dep);
505 if (ret)
506 return ret;
507 }
508
509 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
510 if (ret)
511 return ret;
512
513 if (!(dep->flags & DWC3_EP_ENABLED)) {
514 struct dwc3_trb_hw *trb_st_hw;
515 struct dwc3_trb_hw *trb_link_hw;
516 struct dwc3_trb trb_link;
517
518 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
519 if (ret)
520 return ret;
521
522 dep->desc = desc;
523 dep->comp_desc = comp_desc;
524 dep->type = usb_endpoint_type(desc);
525 dep->flags |= DWC3_EP_ENABLED;
526
527 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
528 reg |= DWC3_DALEPENA_EP(dep->number);
529 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
530
531 if (!usb_endpoint_xfer_isoc(desc))
532 return 0;
533
534 memset(&trb_link, 0, sizeof(trb_link));
535
536 /* Link TRB for ISOC. The HWO but is never reset */
537 trb_st_hw = &dep->trb_pool[0];
538
539 trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw);
540 trb_link.trbctl = DWC3_TRBCTL_LINK_TRB;
541 trb_link.hwo = true;
542
543 trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1];
544 dwc3_trb_to_hw(&trb_link, trb_link_hw);
545 }
546
547 return 0;
548 }
549
550 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
551 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
552 {
553 struct dwc3_request *req;
554
555 if (!list_empty(&dep->req_queued))
556 dwc3_stop_active_transfer(dwc, dep->number);
557
558 while (!list_empty(&dep->request_list)) {
559 req = next_request(&dep->request_list);
560
561 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
562 }
563 }
564
565 /**
566 * __dwc3_gadget_ep_disable - Disables a HW endpoint
567 * @dep: the endpoint to disable
568 *
569 * This function also removes requests which are currently processed ny the
570 * hardware and those which are not yet scheduled.
571 * Caller should take care of locking.
572 */
573 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
574 {
575 struct dwc3 *dwc = dep->dwc;
576 u32 reg;
577
578 dwc3_remove_requests(dwc, dep);
579
580 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
581 reg &= ~DWC3_DALEPENA_EP(dep->number);
582 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
583
584 dep->stream_capable = false;
585 dep->desc = NULL;
586 dep->comp_desc = NULL;
587 dep->type = 0;
588 dep->flags = 0;
589
590 return 0;
591 }
592
593 /* -------------------------------------------------------------------------- */
594
595 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
596 const struct usb_endpoint_descriptor *desc)
597 {
598 return -EINVAL;
599 }
600
601 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
602 {
603 return -EINVAL;
604 }
605
606 /* -------------------------------------------------------------------------- */
607
608 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
609 const struct usb_endpoint_descriptor *desc)
610 {
611 struct dwc3_ep *dep;
612 struct dwc3 *dwc;
613 unsigned long flags;
614 int ret;
615
616 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
617 pr_debug("dwc3: invalid parameters\n");
618 return -EINVAL;
619 }
620
621 if (!desc->wMaxPacketSize) {
622 pr_debug("dwc3: missing wMaxPacketSize\n");
623 return -EINVAL;
624 }
625
626 dep = to_dwc3_ep(ep);
627 dwc = dep->dwc;
628
629 switch (usb_endpoint_type(desc)) {
630 case USB_ENDPOINT_XFER_CONTROL:
631 strncat(dep->name, "-control", sizeof(dep->name));
632 break;
633 case USB_ENDPOINT_XFER_ISOC:
634 strncat(dep->name, "-isoc", sizeof(dep->name));
635 break;
636 case USB_ENDPOINT_XFER_BULK:
637 strncat(dep->name, "-bulk", sizeof(dep->name));
638 break;
639 case USB_ENDPOINT_XFER_INT:
640 strncat(dep->name, "-int", sizeof(dep->name));
641 break;
642 default:
643 dev_err(dwc->dev, "invalid endpoint transfer type\n");
644 }
645
646 if (dep->flags & DWC3_EP_ENABLED) {
647 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
648 dep->name);
649 return 0;
650 }
651
652 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
653
654 spin_lock_irqsave(&dwc->lock, flags);
655 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
656 spin_unlock_irqrestore(&dwc->lock, flags);
657
658 return ret;
659 }
660
661 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
662 {
663 struct dwc3_ep *dep;
664 struct dwc3 *dwc;
665 unsigned long flags;
666 int ret;
667
668 if (!ep) {
669 pr_debug("dwc3: invalid parameters\n");
670 return -EINVAL;
671 }
672
673 dep = to_dwc3_ep(ep);
674 dwc = dep->dwc;
675
676 if (!(dep->flags & DWC3_EP_ENABLED)) {
677 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
678 dep->name);
679 return 0;
680 }
681
682 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
683 dep->number >> 1,
684 (dep->number & 1) ? "in" : "out");
685
686 spin_lock_irqsave(&dwc->lock, flags);
687 ret = __dwc3_gadget_ep_disable(dep);
688 spin_unlock_irqrestore(&dwc->lock, flags);
689
690 return ret;
691 }
692
693 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
694 gfp_t gfp_flags)
695 {
696 struct dwc3_request *req;
697 struct dwc3_ep *dep = to_dwc3_ep(ep);
698 struct dwc3 *dwc = dep->dwc;
699
700 req = kzalloc(sizeof(*req), gfp_flags);
701 if (!req) {
702 dev_err(dwc->dev, "not enough memory\n");
703 return NULL;
704 }
705
706 req->epnum = dep->number;
707 req->dep = dep;
708 req->request.dma = DMA_ADDR_INVALID;
709
710 return &req->request;
711 }
712
713 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
714 struct usb_request *request)
715 {
716 struct dwc3_request *req = to_dwc3_request(request);
717
718 kfree(req);
719 }
720
721 /**
722 * dwc3_prepare_one_trb - setup one TRB from one request
723 * @dep: endpoint for which this request is prepared
724 * @req: dwc3_request pointer
725 */
726 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
727 struct dwc3_request *req, dma_addr_t dma,
728 unsigned length, unsigned last, unsigned chain)
729 {
730 struct dwc3 *dwc = dep->dwc;
731 struct dwc3_trb_hw *trb_hw;
732 struct dwc3_trb trb;
733
734 unsigned int cur_slot;
735
736 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
737 dep->name, req, (unsigned long long) dma,
738 length, last ? " last" : "",
739 chain ? " chain" : "");
740
741 trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
742 cur_slot = dep->free_slot;
743 dep->free_slot++;
744
745 /* Skip the LINK-TRB on ISOC */
746 if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
747 usb_endpoint_xfer_isoc(dep->desc))
748 return;
749
750 memset(&trb, 0, sizeof(trb));
751 if (!req->trb) {
752 dwc3_gadget_move_request_queued(req);
753 req->trb = trb_hw;
754 req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
755 }
756
757 if (usb_endpoint_xfer_isoc(dep->desc)) {
758 trb.isp_imi = true;
759 trb.csp = true;
760 } else {
761 trb.chn = chain;
762 trb.lst = last;
763 }
764
765 if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
766 trb.sid_sofn = req->request.stream_id;
767
768 switch (usb_endpoint_type(dep->desc)) {
769 case USB_ENDPOINT_XFER_CONTROL:
770 trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
771 break;
772
773 case USB_ENDPOINT_XFER_ISOC:
774 trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
775
776 /* IOC every DWC3_TRB_NUM / 4 so we can refill */
777 if (!(cur_slot % (DWC3_TRB_NUM / 4)))
778 trb.ioc = last;
779 break;
780
781 case USB_ENDPOINT_XFER_BULK:
782 case USB_ENDPOINT_XFER_INT:
783 trb.trbctl = DWC3_TRBCTL_NORMAL;
784 break;
785 default:
786 /*
787 * This is only possible with faulty memory because we
788 * checked it already :)
789 */
790 BUG();
791 }
792
793 trb.length = length;
794 trb.bplh = dma;
795 trb.hwo = true;
796
797 dwc3_trb_to_hw(&trb, trb_hw);
798 }
799
800 /*
801 * dwc3_prepare_trbs - setup TRBs from requests
802 * @dep: endpoint for which requests are being prepared
803 * @starting: true if the endpoint is idle and no requests are queued.
804 *
805 * The functions goes through the requests list and setups TRBs for the
806 * transfers. The functions returns once there are not more TRBs available or
807 * it run out of requests.
808 */
809 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
810 {
811 struct dwc3_request *req, *n;
812 u32 trbs_left;
813 unsigned int last_one = 0;
814
815 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
816
817 /* the first request must not be queued */
818 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
819
820 /*
821 * if busy & slot are equal than it is either full or empty. If we are
822 * starting to proceed requests then we are empty. Otherwise we ar
823 * full and don't do anything
824 */
825 if (!trbs_left) {
826 if (!starting)
827 return;
828 trbs_left = DWC3_TRB_NUM;
829 /*
830 * In case we start from scratch, we queue the ISOC requests
831 * starting from slot 1. This is done because we use ring
832 * buffer and have no LST bit to stop us. Instead, we place
833 * IOC bit TRB_NUM/4. We try to avoid to having an interrupt
834 * after the first request so we start at slot 1 and have
835 * 7 requests proceed before we hit the first IOC.
836 * Other transfer types don't use the ring buffer and are
837 * processed from the first TRB until the last one. Since we
838 * don't wrap around we have to start at the beginning.
839 */
840 if (usb_endpoint_xfer_isoc(dep->desc)) {
841 dep->busy_slot = 1;
842 dep->free_slot = 1;
843 } else {
844 dep->busy_slot = 0;
845 dep->free_slot = 0;
846 }
847 }
848
849 /* The last TRB is a link TRB, not used for xfer */
850 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
851 return;
852
853 list_for_each_entry_safe(req, n, &dep->request_list, list) {
854 unsigned length;
855 dma_addr_t dma;
856
857 if (req->request.num_mapped_sgs > 0) {
858 struct usb_request *request = &req->request;
859 struct scatterlist *sg = request->sg;
860 struct scatterlist *s;
861 int i;
862
863 for_each_sg(sg, s, request->num_mapped_sgs, i) {
864 unsigned chain = true;
865
866 length = sg_dma_len(s);
867 dma = sg_dma_address(s);
868
869 if (i == (request->num_mapped_sgs - 1)
870 || sg_is_last(s)) {
871 last_one = true;
872 chain = false;
873 }
874
875 trbs_left--;
876 if (!trbs_left)
877 last_one = true;
878
879 if (last_one)
880 chain = false;
881
882 dwc3_prepare_one_trb(dep, req, dma, length,
883 last_one, chain);
884
885 if (last_one)
886 break;
887 }
888 } else {
889 dma = req->request.dma;
890 length = req->request.length;
891 trbs_left--;
892
893 if (!trbs_left)
894 last_one = 1;
895
896 /* Is this the last request? */
897 if (list_is_last(&req->list, &dep->request_list))
898 last_one = 1;
899
900 dwc3_prepare_one_trb(dep, req, dma, length,
901 last_one, false);
902
903 if (last_one)
904 break;
905 }
906 }
907 }
908
909 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
910 int start_new)
911 {
912 struct dwc3_gadget_ep_cmd_params params;
913 struct dwc3_request *req;
914 struct dwc3 *dwc = dep->dwc;
915 int ret;
916 u32 cmd;
917
918 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
919 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
920 return -EBUSY;
921 }
922 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
923
924 /*
925 * If we are getting here after a short-out-packet we don't enqueue any
926 * new requests as we try to set the IOC bit only on the last request.
927 */
928 if (start_new) {
929 if (list_empty(&dep->req_queued))
930 dwc3_prepare_trbs(dep, start_new);
931
932 /* req points to the first request which will be sent */
933 req = next_request(&dep->req_queued);
934 } else {
935 dwc3_prepare_trbs(dep, start_new);
936
937 /*
938 * req points to the first request where HWO changed
939 * from 0 to 1
940 */
941 req = next_request(&dep->req_queued);
942 }
943 if (!req) {
944 dep->flags |= DWC3_EP_PENDING_REQUEST;
945 return 0;
946 }
947
948 memset(&params, 0, sizeof(params));
949 params.param0 = upper_32_bits(req->trb_dma);
950 params.param1 = lower_32_bits(req->trb_dma);
951
952 if (start_new)
953 cmd = DWC3_DEPCMD_STARTTRANSFER;
954 else
955 cmd = DWC3_DEPCMD_UPDATETRANSFER;
956
957 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
958 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
959 if (ret < 0) {
960 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
961
962 /*
963 * FIXME we need to iterate over the list of requests
964 * here and stop, unmap, free and del each of the linked
965 * requests instead of we do now.
966 */
967 dwc3_unmap_buffer_from_dma(req);
968 list_del(&req->list);
969 return ret;
970 }
971
972 dep->flags |= DWC3_EP_BUSY;
973 dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
974 dep->number);
975
976 WARN_ON_ONCE(!dep->res_trans_idx);
977
978 return 0;
979 }
980
981 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
982 {
983 req->request.actual = 0;
984 req->request.status = -EINPROGRESS;
985 req->direction = dep->direction;
986 req->epnum = dep->number;
987
988 /*
989 * We only add to our list of requests now and
990 * start consuming the list once we get XferNotReady
991 * IRQ.
992 *
993 * That way, we avoid doing anything that we don't need
994 * to do now and defer it until the point we receive a
995 * particular token from the Host side.
996 *
997 * This will also avoid Host cancelling URBs due to too
998 * many NACKs.
999 */
1000 dwc3_map_buffer_to_dma(req);
1001 list_add_tail(&req->list, &dep->request_list);
1002
1003 /*
1004 * There is one special case: XferNotReady with
1005 * empty list of requests. We need to kick the
1006 * transfer here in that situation, otherwise
1007 * we will be NAKing forever.
1008 *
1009 * If we get XferNotReady before gadget driver
1010 * has a chance to queue a request, we will ACK
1011 * the IRQ but won't be able to receive the data
1012 * until the next request is queued. The following
1013 * code is handling exactly that.
1014 */
1015 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1016 int ret;
1017 int start_trans;
1018
1019 start_trans = 1;
1020 if (usb_endpoint_xfer_isoc(dep->desc) &&
1021 dep->flags & DWC3_EP_BUSY)
1022 start_trans = 0;
1023
1024 ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
1025 if (ret && ret != -EBUSY) {
1026 struct dwc3 *dwc = dep->dwc;
1027
1028 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1029 dep->name);
1030 }
1031 };
1032
1033 return 0;
1034 }
1035
1036 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1037 gfp_t gfp_flags)
1038 {
1039 struct dwc3_request *req = to_dwc3_request(request);
1040 struct dwc3_ep *dep = to_dwc3_ep(ep);
1041 struct dwc3 *dwc = dep->dwc;
1042
1043 unsigned long flags;
1044
1045 int ret;
1046
1047 if (!dep->desc) {
1048 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1049 request, ep->name);
1050 return -ESHUTDOWN;
1051 }
1052
1053 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1054 request, ep->name, request->length);
1055
1056 spin_lock_irqsave(&dwc->lock, flags);
1057 ret = __dwc3_gadget_ep_queue(dep, req);
1058 spin_unlock_irqrestore(&dwc->lock, flags);
1059
1060 return ret;
1061 }
1062
1063 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1064 struct usb_request *request)
1065 {
1066 struct dwc3_request *req = to_dwc3_request(request);
1067 struct dwc3_request *r = NULL;
1068
1069 struct dwc3_ep *dep = to_dwc3_ep(ep);
1070 struct dwc3 *dwc = dep->dwc;
1071
1072 unsigned long flags;
1073 int ret = 0;
1074
1075 spin_lock_irqsave(&dwc->lock, flags);
1076
1077 list_for_each_entry(r, &dep->request_list, list) {
1078 if (r == req)
1079 break;
1080 }
1081
1082 if (r != req) {
1083 list_for_each_entry(r, &dep->req_queued, list) {
1084 if (r == req)
1085 break;
1086 }
1087 if (r == req) {
1088 /* wait until it is processed */
1089 dwc3_stop_active_transfer(dwc, dep->number);
1090 goto out0;
1091 }
1092 dev_err(dwc->dev, "request %p was not queued to %s\n",
1093 request, ep->name);
1094 ret = -EINVAL;
1095 goto out0;
1096 }
1097
1098 /* giveback the request */
1099 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1100
1101 out0:
1102 spin_unlock_irqrestore(&dwc->lock, flags);
1103
1104 return ret;
1105 }
1106
1107 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1108 {
1109 struct dwc3_gadget_ep_cmd_params params;
1110 struct dwc3 *dwc = dep->dwc;
1111 int ret;
1112
1113 memset(&params, 0x00, sizeof(params));
1114
1115 if (value) {
1116 if (dep->number == 0 || dep->number == 1) {
1117 /*
1118 * Whenever EP0 is stalled, we will restart
1119 * the state machine, thus moving back to
1120 * Setup Phase
1121 */
1122 dwc->ep0state = EP0_SETUP_PHASE;
1123 }
1124
1125 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1126 DWC3_DEPCMD_SETSTALL, &params);
1127 if (ret)
1128 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1129 value ? "set" : "clear",
1130 dep->name);
1131 else
1132 dep->flags |= DWC3_EP_STALL;
1133 } else {
1134 if (dep->flags & DWC3_EP_WEDGE)
1135 return 0;
1136
1137 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1138 DWC3_DEPCMD_CLEARSTALL, &params);
1139 if (ret)
1140 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1141 value ? "set" : "clear",
1142 dep->name);
1143 else
1144 dep->flags &= ~DWC3_EP_STALL;
1145 }
1146
1147 return ret;
1148 }
1149
1150 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1151 {
1152 struct dwc3_ep *dep = to_dwc3_ep(ep);
1153 struct dwc3 *dwc = dep->dwc;
1154
1155 unsigned long flags;
1156
1157 int ret;
1158
1159 spin_lock_irqsave(&dwc->lock, flags);
1160
1161 if (usb_endpoint_xfer_isoc(dep->desc)) {
1162 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1163 ret = -EINVAL;
1164 goto out;
1165 }
1166
1167 ret = __dwc3_gadget_ep_set_halt(dep, value);
1168 out:
1169 spin_unlock_irqrestore(&dwc->lock, flags);
1170
1171 return ret;
1172 }
1173
1174 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1175 {
1176 struct dwc3_ep *dep = to_dwc3_ep(ep);
1177
1178 dep->flags |= DWC3_EP_WEDGE;
1179
1180 return dwc3_gadget_ep_set_halt(ep, 1);
1181 }
1182
1183 /* -------------------------------------------------------------------------- */
1184
1185 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1186 .bLength = USB_DT_ENDPOINT_SIZE,
1187 .bDescriptorType = USB_DT_ENDPOINT,
1188 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1189 };
1190
1191 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1192 .enable = dwc3_gadget_ep0_enable,
1193 .disable = dwc3_gadget_ep0_disable,
1194 .alloc_request = dwc3_gadget_ep_alloc_request,
1195 .free_request = dwc3_gadget_ep_free_request,
1196 .queue = dwc3_gadget_ep0_queue,
1197 .dequeue = dwc3_gadget_ep_dequeue,
1198 .set_halt = dwc3_gadget_ep_set_halt,
1199 .set_wedge = dwc3_gadget_ep_set_wedge,
1200 };
1201
1202 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1203 .enable = dwc3_gadget_ep_enable,
1204 .disable = dwc3_gadget_ep_disable,
1205 .alloc_request = dwc3_gadget_ep_alloc_request,
1206 .free_request = dwc3_gadget_ep_free_request,
1207 .queue = dwc3_gadget_ep_queue,
1208 .dequeue = dwc3_gadget_ep_dequeue,
1209 .set_halt = dwc3_gadget_ep_set_halt,
1210 .set_wedge = dwc3_gadget_ep_set_wedge,
1211 };
1212
1213 /* -------------------------------------------------------------------------- */
1214
1215 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1216 {
1217 struct dwc3 *dwc = gadget_to_dwc(g);
1218 u32 reg;
1219
1220 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1221 return DWC3_DSTS_SOFFN(reg);
1222 }
1223
1224 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1225 {
1226 struct dwc3 *dwc = gadget_to_dwc(g);
1227
1228 unsigned long timeout;
1229 unsigned long flags;
1230
1231 u32 reg;
1232
1233 int ret = 0;
1234
1235 u8 link_state;
1236 u8 speed;
1237
1238 spin_lock_irqsave(&dwc->lock, flags);
1239
1240 /*
1241 * According to the Databook Remote wakeup request should
1242 * be issued only when the device is in early suspend state.
1243 *
1244 * We can check that via USB Link State bits in DSTS register.
1245 */
1246 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1247
1248 speed = reg & DWC3_DSTS_CONNECTSPD;
1249 if (speed == DWC3_DSTS_SUPERSPEED) {
1250 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1251 ret = -EINVAL;
1252 goto out;
1253 }
1254
1255 link_state = DWC3_DSTS_USBLNKST(reg);
1256
1257 switch (link_state) {
1258 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1259 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1260 break;
1261 default:
1262 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1263 link_state);
1264 ret = -EINVAL;
1265 goto out;
1266 }
1267
1268 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1269 if (ret < 0) {
1270 dev_err(dwc->dev, "failed to put link in Recovery\n");
1271 goto out;
1272 }
1273
1274 /* write zeroes to Link Change Request */
1275 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1276 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1277
1278 /* pool until Link State change to ON */
1279 timeout = jiffies + msecs_to_jiffies(100);
1280
1281 while (!(time_after(jiffies, timeout))) {
1282 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1283
1284 /* in HS, means ON */
1285 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1286 break;
1287 }
1288
1289 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1290 dev_err(dwc->dev, "failed to send remote wakeup\n");
1291 ret = -EINVAL;
1292 }
1293
1294 out:
1295 spin_unlock_irqrestore(&dwc->lock, flags);
1296
1297 return ret;
1298 }
1299
1300 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1301 int is_selfpowered)
1302 {
1303 struct dwc3 *dwc = gadget_to_dwc(g);
1304
1305 dwc->is_selfpowered = !!is_selfpowered;
1306
1307 return 0;
1308 }
1309
1310 static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1311 {
1312 u32 reg;
1313 u32 timeout = 500;
1314
1315 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1316 if (is_on) {
1317 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1318 reg |= (DWC3_DCTL_RUN_STOP
1319 | DWC3_DCTL_TRGTULST_RX_DET);
1320 } else {
1321 reg &= ~DWC3_DCTL_RUN_STOP;
1322 }
1323
1324 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1325
1326 do {
1327 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1328 if (is_on) {
1329 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1330 break;
1331 } else {
1332 if (reg & DWC3_DSTS_DEVCTRLHLT)
1333 break;
1334 }
1335 timeout--;
1336 if (!timeout)
1337 break;
1338 udelay(1);
1339 } while (1);
1340
1341 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1342 dwc->gadget_driver
1343 ? dwc->gadget_driver->function : "no-function",
1344 is_on ? "connect" : "disconnect");
1345 }
1346
1347 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1348 {
1349 struct dwc3 *dwc = gadget_to_dwc(g);
1350 unsigned long flags;
1351
1352 is_on = !!is_on;
1353
1354 spin_lock_irqsave(&dwc->lock, flags);
1355 dwc3_gadget_run_stop(dwc, is_on);
1356 spin_unlock_irqrestore(&dwc->lock, flags);
1357
1358 return 0;
1359 }
1360
1361 static int dwc3_gadget_start(struct usb_gadget *g,
1362 struct usb_gadget_driver *driver)
1363 {
1364 struct dwc3 *dwc = gadget_to_dwc(g);
1365 struct dwc3_ep *dep;
1366 unsigned long flags;
1367 int ret = 0;
1368 u32 reg;
1369
1370 spin_lock_irqsave(&dwc->lock, flags);
1371
1372 if (dwc->gadget_driver) {
1373 dev_err(dwc->dev, "%s is already bound to %s\n",
1374 dwc->gadget.name,
1375 dwc->gadget_driver->driver.name);
1376 ret = -EBUSY;
1377 goto err0;
1378 }
1379
1380 dwc->gadget_driver = driver;
1381 dwc->gadget.dev.driver = &driver->driver;
1382
1383 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1384 reg &= ~(DWC3_DCFG_SPEED_MASK);
1385 reg |= dwc->maximum_speed;
1386 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1387
1388 dwc->start_config_issued = false;
1389
1390 /* Start with SuperSpeed Default */
1391 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1392
1393 dep = dwc->eps[0];
1394 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1395 if (ret) {
1396 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1397 goto err0;
1398 }
1399
1400 dep = dwc->eps[1];
1401 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1402 if (ret) {
1403 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1404 goto err1;
1405 }
1406
1407 /* begin to receive SETUP packets */
1408 dwc->ep0state = EP0_SETUP_PHASE;
1409 dwc3_ep0_out_start(dwc);
1410
1411 spin_unlock_irqrestore(&dwc->lock, flags);
1412
1413 return 0;
1414
1415 err1:
1416 __dwc3_gadget_ep_disable(dwc->eps[0]);
1417
1418 err0:
1419 spin_unlock_irqrestore(&dwc->lock, flags);
1420
1421 return ret;
1422 }
1423
1424 static int dwc3_gadget_stop(struct usb_gadget *g,
1425 struct usb_gadget_driver *driver)
1426 {
1427 struct dwc3 *dwc = gadget_to_dwc(g);
1428 unsigned long flags;
1429
1430 spin_lock_irqsave(&dwc->lock, flags);
1431
1432 __dwc3_gadget_ep_disable(dwc->eps[0]);
1433 __dwc3_gadget_ep_disable(dwc->eps[1]);
1434
1435 dwc->gadget_driver = NULL;
1436 dwc->gadget.dev.driver = NULL;
1437
1438 spin_unlock_irqrestore(&dwc->lock, flags);
1439
1440 return 0;
1441 }
1442 static const struct usb_gadget_ops dwc3_gadget_ops = {
1443 .get_frame = dwc3_gadget_get_frame,
1444 .wakeup = dwc3_gadget_wakeup,
1445 .set_selfpowered = dwc3_gadget_set_selfpowered,
1446 .pullup = dwc3_gadget_pullup,
1447 .udc_start = dwc3_gadget_start,
1448 .udc_stop = dwc3_gadget_stop,
1449 };
1450
1451 /* -------------------------------------------------------------------------- */
1452
1453 static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1454 {
1455 struct dwc3_ep *dep;
1456 u8 epnum;
1457
1458 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1459
1460 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1461 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1462 if (!dep) {
1463 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1464 epnum);
1465 return -ENOMEM;
1466 }
1467
1468 dep->dwc = dwc;
1469 dep->number = epnum;
1470 dwc->eps[epnum] = dep;
1471
1472 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1473 (epnum & 1) ? "in" : "out");
1474 dep->endpoint.name = dep->name;
1475 dep->direction = (epnum & 1);
1476
1477 if (epnum == 0 || epnum == 1) {
1478 dep->endpoint.maxpacket = 512;
1479 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1480 if (!epnum)
1481 dwc->gadget.ep0 = &dep->endpoint;
1482 } else {
1483 int ret;
1484
1485 dep->endpoint.maxpacket = 1024;
1486 dep->endpoint.max_streams = 15;
1487 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1488 list_add_tail(&dep->endpoint.ep_list,
1489 &dwc->gadget.ep_list);
1490
1491 ret = dwc3_alloc_trb_pool(dep);
1492 if (ret)
1493 return ret;
1494 }
1495
1496 INIT_LIST_HEAD(&dep->request_list);
1497 INIT_LIST_HEAD(&dep->req_queued);
1498 }
1499
1500 return 0;
1501 }
1502
1503 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1504 {
1505 struct dwc3_ep *dep;
1506 u8 epnum;
1507
1508 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1509 dep = dwc->eps[epnum];
1510 dwc3_free_trb_pool(dep);
1511
1512 if (epnum != 0 && epnum != 1)
1513 list_del(&dep->endpoint.ep_list);
1514
1515 kfree(dep);
1516 }
1517 }
1518
1519 static void dwc3_gadget_release(struct device *dev)
1520 {
1521 dev_dbg(dev, "%s\n", __func__);
1522 }
1523
1524 /* -------------------------------------------------------------------------- */
1525 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1526 const struct dwc3_event_depevt *event, int status)
1527 {
1528 struct dwc3_request *req;
1529 struct dwc3_trb trb;
1530 unsigned int count;
1531 unsigned int s_pkt = 0;
1532
1533 do {
1534 req = next_request(&dep->req_queued);
1535 if (!req) {
1536 WARN_ON_ONCE(1);
1537 return 1;
1538 }
1539
1540 dwc3_trb_to_nat(req->trb, &trb);
1541
1542 if (trb.hwo && status != -ESHUTDOWN)
1543 /*
1544 * We continue despite the error. There is not much we
1545 * can do. If we don't clean in up we loop for ever. If
1546 * we skip the TRB than it gets overwritten reused after
1547 * a while since we use them in a ring buffer. a BUG()
1548 * would help. Lets hope that if this occures, someone
1549 * fixes the root cause instead of looking away :)
1550 */
1551 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1552 dep->name, req->trb);
1553 count = trb.length;
1554
1555 if (dep->direction) {
1556 if (count) {
1557 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1558 dep->name);
1559 status = -ECONNRESET;
1560 }
1561 } else {
1562 if (count && (event->status & DEPEVT_STATUS_SHORT))
1563 s_pkt = 1;
1564 }
1565
1566 /*
1567 * We assume here we will always receive the entire data block
1568 * which we should receive. Meaning, if we program RX to
1569 * receive 4K but we receive only 2K, we assume that's all we
1570 * should receive and we simply bounce the request back to the
1571 * gadget driver for further processing.
1572 */
1573 req->request.actual += req->request.length - count;
1574 dwc3_gadget_giveback(dep, req, status);
1575 if (s_pkt)
1576 break;
1577 if ((event->status & DEPEVT_STATUS_LST) && trb.lst)
1578 break;
1579 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1580 break;
1581 } while (1);
1582
1583 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1584 return 0;
1585 return 1;
1586 }
1587
1588 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1589 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1590 int start_new)
1591 {
1592 unsigned status = 0;
1593 int clean_busy;
1594
1595 if (event->status & DEPEVT_STATUS_BUSERR)
1596 status = -ECONNRESET;
1597
1598 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1599 if (clean_busy) {
1600 dep->flags &= ~DWC3_EP_BUSY;
1601 dep->res_trans_idx = 0;
1602 }
1603
1604 /*
1605 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1606 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1607 */
1608 if (dwc->revision < DWC3_REVISION_183A) {
1609 u32 reg;
1610 int i;
1611
1612 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1613 struct dwc3_ep *dep = dwc->eps[i];
1614
1615 if (!(dep->flags & DWC3_EP_ENABLED))
1616 continue;
1617
1618 if (!list_empty(&dep->req_queued))
1619 return;
1620 }
1621
1622 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1623 reg |= dwc->u1u2;
1624 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1625
1626 dwc->u1u2 = 0;
1627 }
1628 }
1629
1630 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1631 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1632 {
1633 u32 uf;
1634
1635 if (list_empty(&dep->request_list)) {
1636 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1637 dep->name);
1638 return;
1639 }
1640
1641 if (event->parameters) {
1642 u32 mask;
1643
1644 mask = ~(dep->interval - 1);
1645 uf = event->parameters & mask;
1646 /* 4 micro frames in the future */
1647 uf += dep->interval * 4;
1648 } else {
1649 uf = 0;
1650 }
1651
1652 __dwc3_gadget_kick_transfer(dep, uf, 1);
1653 }
1654
1655 static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1656 const struct dwc3_event_depevt *event)
1657 {
1658 struct dwc3 *dwc = dep->dwc;
1659 struct dwc3_event_depevt mod_ev = *event;
1660
1661 /*
1662 * We were asked to remove one requests. It is possible that this
1663 * request and a few other were started together and have the same
1664 * transfer index. Since we stopped the complete endpoint we don't
1665 * know how many requests were already completed (and not yet)
1666 * reported and how could be done (later). We purge them all until
1667 * the end of the list.
1668 */
1669 mod_ev.status = DEPEVT_STATUS_LST;
1670 dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1671 dep->flags &= ~DWC3_EP_BUSY;
1672 /* pending requets are ignored and are queued on XferNotReady */
1673 }
1674
1675 static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1676 const struct dwc3_event_depevt *event)
1677 {
1678 u32 param = event->parameters;
1679 u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1680
1681 switch (cmd_type) {
1682 case DWC3_DEPCMD_ENDTRANSFER:
1683 dwc3_process_ep_cmd_complete(dep, event);
1684 break;
1685 case DWC3_DEPCMD_STARTTRANSFER:
1686 dep->res_trans_idx = param & 0x7f;
1687 break;
1688 default:
1689 printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1690 __func__, cmd_type);
1691 break;
1692 };
1693 }
1694
1695 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1696 const struct dwc3_event_depevt *event)
1697 {
1698 struct dwc3_ep *dep;
1699 u8 epnum = event->endpoint_number;
1700
1701 dep = dwc->eps[epnum];
1702
1703 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1704 dwc3_ep_event_string(event->endpoint_event));
1705
1706 if (epnum == 0 || epnum == 1) {
1707 dwc3_ep0_interrupt(dwc, event);
1708 return;
1709 }
1710
1711 switch (event->endpoint_event) {
1712 case DWC3_DEPEVT_XFERCOMPLETE:
1713 if (usb_endpoint_xfer_isoc(dep->desc)) {
1714 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1715 dep->name);
1716 return;
1717 }
1718
1719 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1720 break;
1721 case DWC3_DEPEVT_XFERINPROGRESS:
1722 if (!usb_endpoint_xfer_isoc(dep->desc)) {
1723 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1724 dep->name);
1725 return;
1726 }
1727
1728 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1729 break;
1730 case DWC3_DEPEVT_XFERNOTREADY:
1731 if (usb_endpoint_xfer_isoc(dep->desc)) {
1732 dwc3_gadget_start_isoc(dwc, dep, event);
1733 } else {
1734 int ret;
1735
1736 dev_vdbg(dwc->dev, "%s: reason %s\n",
1737 dep->name, event->status &
1738 DEPEVT_STATUS_TRANSFER_ACTIVE
1739 ? "Transfer Active"
1740 : "Transfer Not Active");
1741
1742 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1743 if (!ret || ret == -EBUSY)
1744 return;
1745
1746 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1747 dep->name);
1748 }
1749
1750 break;
1751 case DWC3_DEPEVT_STREAMEVT:
1752 if (!usb_endpoint_xfer_bulk(dep->desc)) {
1753 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1754 dep->name);
1755 return;
1756 }
1757
1758 switch (event->status) {
1759 case DEPEVT_STREAMEVT_FOUND:
1760 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1761 event->parameters);
1762
1763 break;
1764 case DEPEVT_STREAMEVT_NOTFOUND:
1765 /* FALLTHROUGH */
1766 default:
1767 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1768 }
1769 break;
1770 case DWC3_DEPEVT_RXTXFIFOEVT:
1771 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1772 break;
1773 case DWC3_DEPEVT_EPCMDCMPLT:
1774 dwc3_ep_cmd_compl(dep, event);
1775 break;
1776 }
1777 }
1778
1779 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1780 {
1781 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1782 spin_unlock(&dwc->lock);
1783 dwc->gadget_driver->disconnect(&dwc->gadget);
1784 spin_lock(&dwc->lock);
1785 }
1786 }
1787
1788 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1789 {
1790 struct dwc3_ep *dep;
1791 struct dwc3_gadget_ep_cmd_params params;
1792 u32 cmd;
1793 int ret;
1794
1795 dep = dwc->eps[epnum];
1796
1797 WARN_ON(!dep->res_trans_idx);
1798 if (dep->res_trans_idx) {
1799 cmd = DWC3_DEPCMD_ENDTRANSFER;
1800 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1801 cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1802 memset(&params, 0, sizeof(params));
1803 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1804 WARN_ON_ONCE(ret);
1805 dep->res_trans_idx = 0;
1806 }
1807 }
1808
1809 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1810 {
1811 u32 epnum;
1812
1813 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1814 struct dwc3_ep *dep;
1815
1816 dep = dwc->eps[epnum];
1817 if (!(dep->flags & DWC3_EP_ENABLED))
1818 continue;
1819
1820 dwc3_remove_requests(dwc, dep);
1821 }
1822 }
1823
1824 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1825 {
1826 u32 epnum;
1827
1828 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1829 struct dwc3_ep *dep;
1830 struct dwc3_gadget_ep_cmd_params params;
1831 int ret;
1832
1833 dep = dwc->eps[epnum];
1834
1835 if (!(dep->flags & DWC3_EP_STALL))
1836 continue;
1837
1838 dep->flags &= ~DWC3_EP_STALL;
1839
1840 memset(&params, 0, sizeof(params));
1841 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1842 DWC3_DEPCMD_CLEARSTALL, &params);
1843 WARN_ON_ONCE(ret);
1844 }
1845 }
1846
1847 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1848 {
1849 dev_vdbg(dwc->dev, "%s\n", __func__);
1850 #if 0
1851 XXX
1852 U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1853 enable it before we can disable it.
1854
1855 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1856 reg &= ~DWC3_DCTL_INITU1ENA;
1857 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1858
1859 reg &= ~DWC3_DCTL_INITU2ENA;
1860 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1861 #endif
1862
1863 dwc3_stop_active_transfers(dwc);
1864 dwc3_disconnect_gadget(dwc);
1865 dwc->start_config_issued = false;
1866
1867 dwc->gadget.speed = USB_SPEED_UNKNOWN;
1868 dwc->setup_packet_pending = false;
1869 }
1870
1871 static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1872 {
1873 u32 reg;
1874
1875 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1876
1877 if (on)
1878 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1879 else
1880 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1881
1882 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1883 }
1884
1885 static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1886 {
1887 u32 reg;
1888
1889 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1890
1891 if (on)
1892 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1893 else
1894 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1895
1896 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1897 }
1898
1899 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1900 {
1901 u32 reg;
1902
1903 dev_vdbg(dwc->dev, "%s\n", __func__);
1904
1905 /*
1906 * WORKAROUND: DWC3 revisions <1.88a have an issue which
1907 * would cause a missing Disconnect Event if there's a
1908 * pending Setup Packet in the FIFO.
1909 *
1910 * There's no suggested workaround on the official Bug
1911 * report, which states that "unless the driver/application
1912 * is doing any special handling of a disconnect event,
1913 * there is no functional issue".
1914 *
1915 * Unfortunately, it turns out that we _do_ some special
1916 * handling of a disconnect event, namely complete all
1917 * pending transfers, notify gadget driver of the
1918 * disconnection, and so on.
1919 *
1920 * Our suggested workaround is to follow the Disconnect
1921 * Event steps here, instead, based on a setup_packet_pending
1922 * flag. Such flag gets set whenever we have a XferNotReady
1923 * event on EP0 and gets cleared on XferComplete for the
1924 * same endpoint.
1925 *
1926 * Refers to:
1927 *
1928 * STAR#9000466709: RTL: Device : Disconnect event not
1929 * generated if setup packet pending in FIFO
1930 */
1931 if (dwc->revision < DWC3_REVISION_188A) {
1932 if (dwc->setup_packet_pending)
1933 dwc3_gadget_disconnect_interrupt(dwc);
1934 }
1935
1936 /* after reset -> Default State */
1937 dwc->dev_state = DWC3_DEFAULT_STATE;
1938
1939 /* Enable PHYs */
1940 dwc3_gadget_usb2_phy_power(dwc, true);
1941 dwc3_gadget_usb3_phy_power(dwc, true);
1942
1943 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1944 dwc3_disconnect_gadget(dwc);
1945
1946 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1947 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1948 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1949
1950 dwc3_stop_active_transfers(dwc);
1951 dwc3_clear_stall_all_ep(dwc);
1952 dwc->start_config_issued = false;
1953
1954 /* Reset device address to zero */
1955 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1956 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1957 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1958 }
1959
1960 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1961 {
1962 u32 reg;
1963 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1964
1965 /*
1966 * We change the clock only at SS but I dunno why I would want to do
1967 * this. Maybe it becomes part of the power saving plan.
1968 */
1969
1970 if (speed != DWC3_DSTS_SUPERSPEED)
1971 return;
1972
1973 /*
1974 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1975 * each time on Connect Done.
1976 */
1977 if (!usb30_clock)
1978 return;
1979
1980 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1981 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
1982 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1983 }
1984
1985 static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
1986 {
1987 switch (speed) {
1988 case USB_SPEED_SUPER:
1989 dwc3_gadget_usb2_phy_power(dwc, false);
1990 break;
1991 case USB_SPEED_HIGH:
1992 case USB_SPEED_FULL:
1993 case USB_SPEED_LOW:
1994 dwc3_gadget_usb3_phy_power(dwc, false);
1995 break;
1996 }
1997 }
1998
1999 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2000 {
2001 struct dwc3_gadget_ep_cmd_params params;
2002 struct dwc3_ep *dep;
2003 int ret;
2004 u32 reg;
2005 u8 speed;
2006
2007 dev_vdbg(dwc->dev, "%s\n", __func__);
2008
2009 memset(&params, 0x00, sizeof(params));
2010
2011 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2012 speed = reg & DWC3_DSTS_CONNECTSPD;
2013 dwc->speed = speed;
2014
2015 dwc3_update_ram_clk_sel(dwc, speed);
2016
2017 switch (speed) {
2018 case DWC3_DCFG_SUPERSPEED:
2019 /*
2020 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2021 * would cause a missing USB3 Reset event.
2022 *
2023 * In such situations, we should force a USB3 Reset
2024 * event by calling our dwc3_gadget_reset_interrupt()
2025 * routine.
2026 *
2027 * Refers to:
2028 *
2029 * STAR#9000483510: RTL: SS : USB3 reset event may
2030 * not be generated always when the link enters poll
2031 */
2032 if (dwc->revision < DWC3_REVISION_190A)
2033 dwc3_gadget_reset_interrupt(dwc);
2034
2035 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2036 dwc->gadget.ep0->maxpacket = 512;
2037 dwc->gadget.speed = USB_SPEED_SUPER;
2038 break;
2039 case DWC3_DCFG_HIGHSPEED:
2040 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2041 dwc->gadget.ep0->maxpacket = 64;
2042 dwc->gadget.speed = USB_SPEED_HIGH;
2043 break;
2044 case DWC3_DCFG_FULLSPEED2:
2045 case DWC3_DCFG_FULLSPEED1:
2046 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2047 dwc->gadget.ep0->maxpacket = 64;
2048 dwc->gadget.speed = USB_SPEED_FULL;
2049 break;
2050 case DWC3_DCFG_LOWSPEED:
2051 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2052 dwc->gadget.ep0->maxpacket = 8;
2053 dwc->gadget.speed = USB_SPEED_LOW;
2054 break;
2055 }
2056
2057 /* Disable unneded PHY */
2058 dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
2059
2060 dep = dwc->eps[0];
2061 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2062 if (ret) {
2063 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2064 return;
2065 }
2066
2067 dep = dwc->eps[1];
2068 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2069 if (ret) {
2070 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2071 return;
2072 }
2073
2074 /*
2075 * Configure PHY via GUSB3PIPECTLn if required.
2076 *
2077 * Update GTXFIFOSIZn
2078 *
2079 * In both cases reset values should be sufficient.
2080 */
2081 }
2082
2083 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2084 {
2085 dev_vdbg(dwc->dev, "%s\n", __func__);
2086
2087 /*
2088 * TODO take core out of low power mode when that's
2089 * implemented.
2090 */
2091
2092 dwc->gadget_driver->resume(&dwc->gadget);
2093 }
2094
2095 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2096 unsigned int evtinfo)
2097 {
2098 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2099
2100 /*
2101 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2102 * on the link partner, the USB session might do multiple entry/exit
2103 * of low power states before a transfer takes place.
2104 *
2105 * Due to this problem, we might experience lower throughput. The
2106 * suggested workaround is to disable DCTL[12:9] bits if we're
2107 * transitioning from U1/U2 to U0 and enable those bits again
2108 * after a transfer completes and there are no pending transfers
2109 * on any of the enabled endpoints.
2110 *
2111 * This is the first half of that workaround.
2112 *
2113 * Refers to:
2114 *
2115 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2116 * core send LGO_Ux entering U0
2117 */
2118 if (dwc->revision < DWC3_REVISION_183A) {
2119 if (next == DWC3_LINK_STATE_U0) {
2120 u32 u1u2;
2121 u32 reg;
2122
2123 switch (dwc->link_state) {
2124 case DWC3_LINK_STATE_U1:
2125 case DWC3_LINK_STATE_U2:
2126 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2127 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2128 | DWC3_DCTL_ACCEPTU2ENA
2129 | DWC3_DCTL_INITU1ENA
2130 | DWC3_DCTL_ACCEPTU1ENA);
2131
2132 if (!dwc->u1u2)
2133 dwc->u1u2 = reg & u1u2;
2134
2135 reg &= ~u1u2;
2136
2137 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2138 break;
2139 default:
2140 /* do nothing */
2141 break;
2142 }
2143 }
2144 }
2145
2146 dwc->link_state = next;
2147
2148 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
2149 }
2150
2151 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2152 const struct dwc3_event_devt *event)
2153 {
2154 switch (event->type) {
2155 case DWC3_DEVICE_EVENT_DISCONNECT:
2156 dwc3_gadget_disconnect_interrupt(dwc);
2157 break;
2158 case DWC3_DEVICE_EVENT_RESET:
2159 dwc3_gadget_reset_interrupt(dwc);
2160 break;
2161 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2162 dwc3_gadget_conndone_interrupt(dwc);
2163 break;
2164 case DWC3_DEVICE_EVENT_WAKEUP:
2165 dwc3_gadget_wakeup_interrupt(dwc);
2166 break;
2167 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2168 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2169 break;
2170 case DWC3_DEVICE_EVENT_EOPF:
2171 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2172 break;
2173 case DWC3_DEVICE_EVENT_SOF:
2174 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2175 break;
2176 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2177 dev_vdbg(dwc->dev, "Erratic Error\n");
2178 break;
2179 case DWC3_DEVICE_EVENT_CMD_CMPL:
2180 dev_vdbg(dwc->dev, "Command Complete\n");
2181 break;
2182 case DWC3_DEVICE_EVENT_OVERFLOW:
2183 dev_vdbg(dwc->dev, "Overflow\n");
2184 break;
2185 default:
2186 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2187 }
2188 }
2189
2190 static void dwc3_process_event_entry(struct dwc3 *dwc,
2191 const union dwc3_event *event)
2192 {
2193 /* Endpoint IRQ, handle it and return early */
2194 if (event->type.is_devspec == 0) {
2195 /* depevt */
2196 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2197 }
2198
2199 switch (event->type.type) {
2200 case DWC3_EVENT_TYPE_DEV:
2201 dwc3_gadget_interrupt(dwc, &event->devt);
2202 break;
2203 /* REVISIT what to do with Carkit and I2C events ? */
2204 default:
2205 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2206 }
2207 }
2208
2209 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2210 {
2211 struct dwc3_event_buffer *evt;
2212 int left;
2213 u32 count;
2214
2215 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2216 count &= DWC3_GEVNTCOUNT_MASK;
2217 if (!count)
2218 return IRQ_NONE;
2219
2220 evt = dwc->ev_buffs[buf];
2221 left = count;
2222
2223 while (left > 0) {
2224 union dwc3_event event;
2225
2226 event.raw = *(u32 *) (evt->buf + evt->lpos);
2227
2228 dwc3_process_event_entry(dwc, &event);
2229 /*
2230 * XXX we wrap around correctly to the next entry as almost all
2231 * entries are 4 bytes in size. There is one entry which has 12
2232 * bytes which is a regular entry followed by 8 bytes data. ATM
2233 * I don't know how things are organized if were get next to the
2234 * a boundary so I worry about that once we try to handle that.
2235 */
2236 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2237 left -= 4;
2238
2239 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2240 }
2241
2242 return IRQ_HANDLED;
2243 }
2244
2245 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2246 {
2247 struct dwc3 *dwc = _dwc;
2248 int i;
2249 irqreturn_t ret = IRQ_NONE;
2250
2251 spin_lock(&dwc->lock);
2252
2253 for (i = 0; i < dwc->num_event_buffers; i++) {
2254 irqreturn_t status;
2255
2256 status = dwc3_process_event_buf(dwc, i);
2257 if (status == IRQ_HANDLED)
2258 ret = status;
2259 }
2260
2261 spin_unlock(&dwc->lock);
2262
2263 return ret;
2264 }
2265
2266 /**
2267 * dwc3_gadget_init - Initializes gadget related registers
2268 * @dwc: Pointer to out controller context structure
2269 *
2270 * Returns 0 on success otherwise negative errno.
2271 */
2272 int __devinit dwc3_gadget_init(struct dwc3 *dwc)
2273 {
2274 u32 reg;
2275 int ret;
2276 int irq;
2277
2278 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2279 &dwc->ctrl_req_addr, GFP_KERNEL);
2280 if (!dwc->ctrl_req) {
2281 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2282 ret = -ENOMEM;
2283 goto err0;
2284 }
2285
2286 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2287 &dwc->ep0_trb_addr, GFP_KERNEL);
2288 if (!dwc->ep0_trb) {
2289 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2290 ret = -ENOMEM;
2291 goto err1;
2292 }
2293
2294 dwc->setup_buf = dma_alloc_coherent(dwc->dev,
2295 sizeof(*dwc->setup_buf) * 2,
2296 &dwc->setup_buf_addr, GFP_KERNEL);
2297 if (!dwc->setup_buf) {
2298 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2299 ret = -ENOMEM;
2300 goto err2;
2301 }
2302
2303 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2304 512, &dwc->ep0_bounce_addr, GFP_KERNEL);
2305 if (!dwc->ep0_bounce) {
2306 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2307 ret = -ENOMEM;
2308 goto err3;
2309 }
2310
2311 dev_set_name(&dwc->gadget.dev, "gadget");
2312
2313 dwc->gadget.ops = &dwc3_gadget_ops;
2314 dwc->gadget.max_speed = USB_SPEED_SUPER;
2315 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2316 dwc->gadget.dev.parent = dwc->dev;
2317 dwc->gadget.sg_supported = true;
2318
2319 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2320
2321 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms;
2322 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask;
2323 dwc->gadget.dev.release = dwc3_gadget_release;
2324 dwc->gadget.name = "dwc3-gadget";
2325
2326 /*
2327 * REVISIT: Here we should clear all pending IRQs to be
2328 * sure we're starting from a well known location.
2329 */
2330
2331 ret = dwc3_gadget_init_endpoints(dwc);
2332 if (ret)
2333 goto err4;
2334
2335 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2336
2337 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2338 "dwc3", dwc);
2339 if (ret) {
2340 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2341 irq, ret);
2342 goto err5;
2343 }
2344
2345 /* Enable all but Start and End of Frame IRQs */
2346 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2347 DWC3_DEVTEN_EVNTOVERFLOWEN |
2348 DWC3_DEVTEN_CMDCMPLTEN |
2349 DWC3_DEVTEN_ERRTICERREN |
2350 DWC3_DEVTEN_WKUPEVTEN |
2351 DWC3_DEVTEN_ULSTCNGEN |
2352 DWC3_DEVTEN_CONNECTDONEEN |
2353 DWC3_DEVTEN_USBRSTEN |
2354 DWC3_DEVTEN_DISCONNEVTEN);
2355 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2356
2357 ret = device_register(&dwc->gadget.dev);
2358 if (ret) {
2359 dev_err(dwc->dev, "failed to register gadget device\n");
2360 put_device(&dwc->gadget.dev);
2361 goto err6;
2362 }
2363
2364 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2365 if (ret) {
2366 dev_err(dwc->dev, "failed to register udc\n");
2367 goto err7;
2368 }
2369
2370 return 0;
2371
2372 err7:
2373 device_unregister(&dwc->gadget.dev);
2374
2375 err6:
2376 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2377 free_irq(irq, dwc);
2378
2379 err5:
2380 dwc3_gadget_free_endpoints(dwc);
2381
2382 err4:
2383 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2384 dwc->ep0_bounce_addr);
2385
2386 err3:
2387 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2388 dwc->setup_buf, dwc->setup_buf_addr);
2389
2390 err2:
2391 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2392 dwc->ep0_trb, dwc->ep0_trb_addr);
2393
2394 err1:
2395 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2396 dwc->ctrl_req, dwc->ctrl_req_addr);
2397
2398 err0:
2399 return ret;
2400 }
2401
2402 void dwc3_gadget_exit(struct dwc3 *dwc)
2403 {
2404 int irq;
2405
2406 usb_del_gadget_udc(&dwc->gadget);
2407 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2408
2409 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2410 free_irq(irq, dwc);
2411
2412 dwc3_gadget_free_endpoints(dwc);
2413
2414 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2415 dwc->ep0_bounce_addr);
2416
2417 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2418 dwc->setup_buf, dwc->setup_buf_addr);
2419
2420 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2421 dwc->ep0_trb, dwc->ep0_trb_addr);
2422
2423 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2424 dwc->ctrl_req, dwc->ctrl_req_addr);
2425
2426 device_unregister(&dwc->gadget.dev);
2427 }
This page took 0.082255 seconds and 5 git commands to generate.