usb: dwc3: shorten long delay in dwc3_gadget_set_link_state()
[deliverable/linux.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
57#define DMA_ADDR_INVALID (~(dma_addr_t)0)
58
04a9bfcd
FB
59/**
60 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
61 * @dwc: pointer to our context structure
62 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
63 *
64 * Caller should take care of locking. This function will
65 * return 0 on success or -EINVAL if wrong Test Selector
66 * is passed
67 */
68int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
69{
70 u32 reg;
71
72 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
73 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
74
75 switch (mode) {
76 case TEST_J:
77 case TEST_K:
78 case TEST_SE0_NAK:
79 case TEST_PACKET:
80 case TEST_FORCE_EN:
81 reg |= mode << 1;
82 break;
83 default:
84 return -EINVAL;
85 }
86
87 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
88
89 return 0;
90}
91
8598bde7
FB
92/**
93 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
94 * @dwc: pointer to our context structure
95 * @state: the state to put link into
96 *
97 * Caller should take care of locking. This function will
aee63e3c 98 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
99 */
100int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
101{
aee63e3c 102 int retries = 10000;
8598bde7
FB
103 u32 reg;
104
105 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
106 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
107
108 /* set requested state */
109 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
110 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
111
112 /* wait for a change in DSTS */
113 while (--retries) {
114 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
115
8598bde7
FB
116 if (DWC3_DSTS_USBLNKST(reg) == state)
117 return 0;
118
aee63e3c 119 udelay(5);
8598bde7
FB
120 }
121
122 dev_vdbg(dwc->dev, "link state change request timed out\n");
123
124 return -ETIMEDOUT;
125}
126
457e84b6
FB
127/**
128 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
129 * @dwc: pointer to our context structure
130 *
131 * This function will a best effort FIFO allocation in order
132 * to improve FIFO usage and throughput, while still allowing
133 * us to enable as many endpoints as possible.
134 *
135 * Keep in mind that this operation will be highly dependent
136 * on the configured size for RAM1 - which contains TxFifo -,
137 * the amount of endpoints enabled on coreConsultant tool, and
138 * the width of the Master Bus.
139 *
140 * In the ideal world, we would always be able to satisfy the
141 * following equation:
142 *
143 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
144 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
145 *
146 * Unfortunately, due to many variables that's not always the case.
147 */
148int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
149{
150 int last_fifo_depth = 0;
151 int ram1_depth;
152 int fifo_size;
153 int mdwidth;
154 int num;
155
156 if (!dwc->needs_fifo_resize)
157 return 0;
158
159 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
160 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
161
162 /* MDWIDTH is represented in bits, we need it in bytes */
163 mdwidth >>= 3;
164
165 /*
166 * FIXME For now we will only allocate 1 wMaxPacketSize space
167 * for each enabled endpoint, later patches will come to
168 * improve this algorithm so that we better use the internal
169 * FIFO space
170 */
171 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
172 struct dwc3_ep *dep = dwc->eps[num];
173 int fifo_number = dep->number >> 1;
2e81c36a 174 int mult = 1;
457e84b6
FB
175 int tmp;
176
177 if (!(dep->number & 1))
178 continue;
179
180 if (!(dep->flags & DWC3_EP_ENABLED))
181 continue;
182
2e81c36a
FB
183 if (usb_endpoint_xfer_bulk(dep->desc)
184 || usb_endpoint_xfer_isoc(dep->desc))
185 mult = 3;
186
187 /*
188 * REVISIT: the following assumes we will always have enough
189 * space available on the FIFO RAM for all possible use cases.
190 * Make sure that's true somehow and change FIFO allocation
191 * accordingly.
192 *
193 * If we have Bulk or Isochronous endpoints, we want
194 * them to be able to be very, very fast. So we're giving
195 * those endpoints a fifo_size which is enough for 3 full
196 * packets
197 */
198 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
457e84b6
FB
199 tmp += mdwidth;
200
201 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
2e81c36a 202
457e84b6
FB
203 fifo_size |= (last_fifo_depth << 16);
204
205 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
206 dep->name, last_fifo_depth, fifo_size & 0xffff);
207
208 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
209 fifo_size);
210
211 last_fifo_depth += (fifo_size & 0xffff);
212 }
213
214 return 0;
215}
216
72246da4
FB
217void dwc3_map_buffer_to_dma(struct dwc3_request *req)
218{
219 struct dwc3 *dwc = req->dep->dwc;
220
78c58a53
SAS
221 if (req->request.length == 0) {
222 /* req->request.dma = dwc->setup_buf_addr; */
223 return;
224 }
225
eeb720fb
FB
226 if (req->request.num_sgs) {
227 int mapped;
228
229 mapped = dma_map_sg(dwc->dev, req->request.sg,
230 req->request.num_sgs,
231 req->direction ? DMA_TO_DEVICE
232 : DMA_FROM_DEVICE);
233 if (mapped < 0) {
234 dev_err(dwc->dev, "failed to map SGs\n");
235 return;
236 }
237
238 req->request.num_mapped_sgs = mapped;
239 return;
240 }
241
72246da4
FB
242 if (req->request.dma == DMA_ADDR_INVALID) {
243 req->request.dma = dma_map_single(dwc->dev, req->request.buf,
244 req->request.length, req->direction
245 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
246 req->mapped = true;
72246da4
FB
247 }
248}
249
250void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
251{
252 struct dwc3 *dwc = req->dep->dwc;
253
78c58a53
SAS
254 if (req->request.length == 0) {
255 req->request.dma = DMA_ADDR_INVALID;
256 return;
257 }
258
eeb720fb
FB
259 if (req->request.num_mapped_sgs) {
260 req->request.dma = DMA_ADDR_INVALID;
261 dma_unmap_sg(dwc->dev, req->request.sg,
c09d6b51 262 req->request.num_mapped_sgs,
eeb720fb
FB
263 req->direction ? DMA_TO_DEVICE
264 : DMA_FROM_DEVICE);
265
266 req->request.num_mapped_sgs = 0;
267 return;
268 }
269
72246da4
FB
270 if (req->mapped) {
271 dma_unmap_single(dwc->dev, req->request.dma,
272 req->request.length, req->direction
273 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
274 req->mapped = 0;
f198ead2 275 req->request.dma = DMA_ADDR_INVALID;
72246da4
FB
276 }
277}
278
279void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
280 int status)
281{
282 struct dwc3 *dwc = dep->dwc;
283
284 if (req->queued) {
eeb720fb
FB
285 if (req->request.num_mapped_sgs)
286 dep->busy_slot += req->request.num_mapped_sgs;
287 else
288 dep->busy_slot++;
289
72246da4
FB
290 /*
291 * Skip LINK TRB. We can't use req->trb and check for
292 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
293 * completed (not the LINK TRB).
294 */
295 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
296 usb_endpoint_xfer_isoc(dep->desc))
297 dep->busy_slot++;
298 }
299 list_del(&req->list);
eeb720fb 300 req->trb = NULL;
72246da4
FB
301
302 if (req->request.status == -EINPROGRESS)
303 req->request.status = status;
304
305 dwc3_unmap_buffer_from_dma(req);
306
307 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
308 req, dep->name, req->request.actual,
309 req->request.length, status);
310
311 spin_unlock(&dwc->lock);
312 req->request.complete(&req->dep->endpoint, &req->request);
313 spin_lock(&dwc->lock);
314}
315
316static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
317{
318 switch (cmd) {
319 case DWC3_DEPCMD_DEPSTARTCFG:
320 return "Start New Configuration";
321 case DWC3_DEPCMD_ENDTRANSFER:
322 return "End Transfer";
323 case DWC3_DEPCMD_UPDATETRANSFER:
324 return "Update Transfer";
325 case DWC3_DEPCMD_STARTTRANSFER:
326 return "Start Transfer";
327 case DWC3_DEPCMD_CLEARSTALL:
328 return "Clear Stall";
329 case DWC3_DEPCMD_SETSTALL:
330 return "Set Stall";
331 case DWC3_DEPCMD_GETSEQNUMBER:
332 return "Get Data Sequence Number";
333 case DWC3_DEPCMD_SETTRANSFRESOURCE:
334 return "Set Endpoint Transfer Resource";
335 case DWC3_DEPCMD_SETEPCONFIG:
336 return "Set Endpoint Configuration";
337 default:
338 return "UNKNOWN command";
339 }
340}
341
342int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
343 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
344{
345 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 346 u32 timeout = 500;
72246da4
FB
347 u32 reg;
348
349 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
350 dep->name,
dc1c70a7
FB
351 dwc3_gadget_ep_cmd_string(cmd), params->param0,
352 params->param1, params->param2);
72246da4 353
dc1c70a7
FB
354 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
355 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
356 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
357
358 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
359 do {
360 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
361 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
362 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
363 DWC3_DEPCMD_STATUS(reg));
72246da4
FB
364 return 0;
365 }
366
367 /*
72246da4
FB
368 * We can't sleep here, because it is also called from
369 * interrupt context.
370 */
371 timeout--;
372 if (!timeout)
373 return -ETIMEDOUT;
374
61d58242 375 udelay(1);
72246da4
FB
376 } while (1);
377}
378
379static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 380 struct dwc3_trb *trb)
72246da4 381{
c439ef87 382 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
383
384 return dep->trb_pool_dma + offset;
385}
386
387static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
388{
389 struct dwc3 *dwc = dep->dwc;
390
391 if (dep->trb_pool)
392 return 0;
393
394 if (dep->number == 0 || dep->number == 1)
395 return 0;
396
397 dep->trb_pool = dma_alloc_coherent(dwc->dev,
398 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
399 &dep->trb_pool_dma, GFP_KERNEL);
400 if (!dep->trb_pool) {
401 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
402 dep->name);
403 return -ENOMEM;
404 }
405
406 return 0;
407}
408
409static void dwc3_free_trb_pool(struct dwc3_ep *dep)
410{
411 struct dwc3 *dwc = dep->dwc;
412
413 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
414 dep->trb_pool, dep->trb_pool_dma);
415
416 dep->trb_pool = NULL;
417 dep->trb_pool_dma = 0;
418}
419
420static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
421{
422 struct dwc3_gadget_ep_cmd_params params;
423 u32 cmd;
424
425 memset(&params, 0x00, sizeof(params));
426
427 if (dep->number != 1) {
428 cmd = DWC3_DEPCMD_DEPSTARTCFG;
429 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
430 if (dep->number > 1) {
431 if (dwc->start_config_issued)
432 return 0;
433 dwc->start_config_issued = true;
72246da4 434 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 435 }
72246da4
FB
436
437 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
438 }
439
440 return 0;
441}
442
443static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec
FB
444 const struct usb_endpoint_descriptor *desc,
445 const struct usb_ss_ep_comp_descriptor *comp_desc)
72246da4
FB
446{
447 struct dwc3_gadget_ep_cmd_params params;
448
449 memset(&params, 0x00, sizeof(params));
450
dc1c70a7
FB
451 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
452 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
453 | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
72246da4 454
dc1c70a7
FB
455 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
456 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 457
18b7ede5 458 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
459 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
460 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
461 dep->stream_capable = true;
462 }
463
72246da4 464 if (usb_endpoint_xfer_isoc(desc))
dc1c70a7 465 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
466
467 /*
468 * We are doing 1:1 mapping for endpoints, meaning
469 * Physical Endpoints 2 maps to Logical Endpoint 2 and
470 * so on. We consider the direction bit as part of the physical
471 * endpoint number. So USB endpoint 0x81 is 0x03.
472 */
dc1c70a7 473 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
474
475 /*
476 * We must use the lower 16 TX FIFOs even though
477 * HW might have more
478 */
479 if (dep->direction)
dc1c70a7 480 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
481
482 if (desc->bInterval) {
dc1c70a7 483 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
484 dep->interval = 1 << (desc->bInterval - 1);
485 }
486
487 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
488 DWC3_DEPCMD_SETEPCONFIG, &params);
489}
490
491static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
492{
493 struct dwc3_gadget_ep_cmd_params params;
494
495 memset(&params, 0x00, sizeof(params));
496
dc1c70a7 497 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
498
499 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
500 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
501}
502
503/**
504 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
505 * @dep: endpoint to be initialized
506 * @desc: USB Endpoint Descriptor
507 *
508 * Caller should take care of locking
509 */
510static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec
FB
511 const struct usb_endpoint_descriptor *desc,
512 const struct usb_ss_ep_comp_descriptor *comp_desc)
72246da4
FB
513{
514 struct dwc3 *dwc = dep->dwc;
515 u32 reg;
516 int ret = -ENOMEM;
517
518 if (!(dep->flags & DWC3_EP_ENABLED)) {
519 ret = dwc3_gadget_start_config(dwc, dep);
520 if (ret)
521 return ret;
522 }
523
c90bfaec 524 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
72246da4
FB
525 if (ret)
526 return ret;
527
528 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
529 struct dwc3_trb *trb_st_hw;
530 struct dwc3_trb *trb_link;
72246da4
FB
531
532 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
533 if (ret)
534 return ret;
535
536 dep->desc = desc;
c90bfaec 537 dep->comp_desc = comp_desc;
72246da4
FB
538 dep->type = usb_endpoint_type(desc);
539 dep->flags |= DWC3_EP_ENABLED;
540
541 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
542 reg |= DWC3_DALEPENA_EP(dep->number);
543 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
544
545 if (!usb_endpoint_xfer_isoc(desc))
546 return 0;
547
548 memset(&trb_link, 0, sizeof(trb_link));
549
1d046793 550 /* Link TRB for ISOC. The HWO bit is never reset */
72246da4
FB
551 trb_st_hw = &dep->trb_pool[0];
552
f6bafc6a 553 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
72246da4 554
f6bafc6a
FB
555 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
556 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
557 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
558 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
559 }
560
561 return 0;
562}
563
624407f9
SAS
564static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
565static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
566{
567 struct dwc3_request *req;
568
624407f9
SAS
569 if (!list_empty(&dep->req_queued))
570 dwc3_stop_active_transfer(dwc, dep->number);
571
72246da4
FB
572 while (!list_empty(&dep->request_list)) {
573 req = next_request(&dep->request_list);
574
624407f9 575 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 576 }
72246da4
FB
577}
578
579/**
580 * __dwc3_gadget_ep_disable - Disables a HW endpoint
581 * @dep: the endpoint to disable
582 *
624407f9
SAS
583 * This function also removes requests which are currently processed ny the
584 * hardware and those which are not yet scheduled.
585 * Caller should take care of locking.
72246da4 586 */
72246da4
FB
587static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
588{
589 struct dwc3 *dwc = dep->dwc;
590 u32 reg;
591
624407f9 592 dwc3_remove_requests(dwc, dep);
72246da4
FB
593
594 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
595 reg &= ~DWC3_DALEPENA_EP(dep->number);
596 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
597
879631aa 598 dep->stream_capable = false;
72246da4 599 dep->desc = NULL;
c90bfaec 600 dep->comp_desc = NULL;
72246da4 601 dep->type = 0;
879631aa 602 dep->flags = 0;
72246da4
FB
603
604 return 0;
605}
606
607/* -------------------------------------------------------------------------- */
608
609static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
610 const struct usb_endpoint_descriptor *desc)
611{
612 return -EINVAL;
613}
614
615static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
616{
617 return -EINVAL;
618}
619
620/* -------------------------------------------------------------------------- */
621
622static int dwc3_gadget_ep_enable(struct usb_ep *ep,
623 const struct usb_endpoint_descriptor *desc)
624{
625 struct dwc3_ep *dep;
626 struct dwc3 *dwc;
627 unsigned long flags;
628 int ret;
629
630 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
631 pr_debug("dwc3: invalid parameters\n");
632 return -EINVAL;
633 }
634
635 if (!desc->wMaxPacketSize) {
636 pr_debug("dwc3: missing wMaxPacketSize\n");
637 return -EINVAL;
638 }
639
640 dep = to_dwc3_ep(ep);
641 dwc = dep->dwc;
642
643 switch (usb_endpoint_type(desc)) {
644 case USB_ENDPOINT_XFER_CONTROL:
27a78d6a 645 strlcat(dep->name, "-control", sizeof(dep->name));
72246da4
FB
646 break;
647 case USB_ENDPOINT_XFER_ISOC:
27a78d6a 648 strlcat(dep->name, "-isoc", sizeof(dep->name));
72246da4
FB
649 break;
650 case USB_ENDPOINT_XFER_BULK:
27a78d6a 651 strlcat(dep->name, "-bulk", sizeof(dep->name));
72246da4
FB
652 break;
653 case USB_ENDPOINT_XFER_INT:
27a78d6a 654 strlcat(dep->name, "-int", sizeof(dep->name));
72246da4
FB
655 break;
656 default:
657 dev_err(dwc->dev, "invalid endpoint transfer type\n");
658 }
659
660 if (dep->flags & DWC3_EP_ENABLED) {
661 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
662 dep->name);
663 return 0;
664 }
665
666 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
667
668 spin_lock_irqsave(&dwc->lock, flags);
c90bfaec 669 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
72246da4
FB
670 spin_unlock_irqrestore(&dwc->lock, flags);
671
672 return ret;
673}
674
675static int dwc3_gadget_ep_disable(struct usb_ep *ep)
676{
677 struct dwc3_ep *dep;
678 struct dwc3 *dwc;
679 unsigned long flags;
680 int ret;
681
682 if (!ep) {
683 pr_debug("dwc3: invalid parameters\n");
684 return -EINVAL;
685 }
686
687 dep = to_dwc3_ep(ep);
688 dwc = dep->dwc;
689
690 if (!(dep->flags & DWC3_EP_ENABLED)) {
691 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
692 dep->name);
693 return 0;
694 }
695
696 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
697 dep->number >> 1,
698 (dep->number & 1) ? "in" : "out");
699
700 spin_lock_irqsave(&dwc->lock, flags);
701 ret = __dwc3_gadget_ep_disable(dep);
702 spin_unlock_irqrestore(&dwc->lock, flags);
703
704 return ret;
705}
706
707static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
708 gfp_t gfp_flags)
709{
710 struct dwc3_request *req;
711 struct dwc3_ep *dep = to_dwc3_ep(ep);
712 struct dwc3 *dwc = dep->dwc;
713
714 req = kzalloc(sizeof(*req), gfp_flags);
715 if (!req) {
716 dev_err(dwc->dev, "not enough memory\n");
717 return NULL;
718 }
719
720 req->epnum = dep->number;
721 req->dep = dep;
722 req->request.dma = DMA_ADDR_INVALID;
723
724 return &req->request;
725}
726
727static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
728 struct usb_request *request)
729{
730 struct dwc3_request *req = to_dwc3_request(request);
731
732 kfree(req);
733}
734
c71fc37c
FB
735/**
736 * dwc3_prepare_one_trb - setup one TRB from one request
737 * @dep: endpoint for which this request is prepared
738 * @req: dwc3_request pointer
739 */
68e823e2 740static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb
FB
741 struct dwc3_request *req, dma_addr_t dma,
742 unsigned length, unsigned last, unsigned chain)
c71fc37c 743{
eeb720fb 744 struct dwc3 *dwc = dep->dwc;
f6bafc6a 745 struct dwc3_trb *trb;
c71fc37c
FB
746
747 unsigned int cur_slot;
748
eeb720fb
FB
749 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
750 dep->name, req, (unsigned long long) dma,
751 length, last ? " last" : "",
752 chain ? " chain" : "");
753
f6bafc6a 754 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
c71fc37c
FB
755 cur_slot = dep->free_slot;
756 dep->free_slot++;
757
758 /* Skip the LINK-TRB on ISOC */
759 if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
760 usb_endpoint_xfer_isoc(dep->desc))
68e823e2 761 return;
c71fc37c 762
eeb720fb
FB
763 if (!req->trb) {
764 dwc3_gadget_move_request_queued(req);
f6bafc6a
FB
765 req->trb = trb;
766 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
eeb720fb 767 }
c71fc37c 768
f6bafc6a
FB
769 trb->size = DWC3_TRB_SIZE_LENGTH(length);
770 trb->bpl = lower_32_bits(dma);
771 trb->bph = upper_32_bits(dma);
c71fc37c
FB
772
773 switch (usb_endpoint_type(dep->desc)) {
774 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 775 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
776 break;
777
778 case USB_ENDPOINT_XFER_ISOC:
f6bafc6a 779 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
c71fc37c
FB
780
781 /* IOC every DWC3_TRB_NUM / 4 so we can refill */
782 if (!(cur_slot % (DWC3_TRB_NUM / 4)))
f6bafc6a 783 trb->ctrl |= DWC3_TRB_CTRL_IOC;
c71fc37c
FB
784 break;
785
786 case USB_ENDPOINT_XFER_BULK:
787 case USB_ENDPOINT_XFER_INT:
f6bafc6a 788 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
789 break;
790 default:
791 /*
792 * This is only possible with faulty memory because we
793 * checked it already :)
794 */
795 BUG();
796 }
797
f6bafc6a
FB
798 if (usb_endpoint_xfer_isoc(dep->desc)) {
799 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
800 trb->ctrl |= DWC3_TRB_CTRL_CSP;
801 } else {
802 if (chain)
803 trb->ctrl |= DWC3_TRB_CTRL_CHN;
804
805 if (last)
806 trb->ctrl |= DWC3_TRB_CTRL_LST;
807 }
808
809 if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
810 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 811
f6bafc6a 812 trb->ctrl |= DWC3_TRB_CTRL_HWO;
c71fc37c
FB
813}
814
72246da4
FB
815/*
816 * dwc3_prepare_trbs - setup TRBs from requests
817 * @dep: endpoint for which requests are being prepared
818 * @starting: true if the endpoint is idle and no requests are queued.
819 *
1d046793
PZ
820 * The function goes through the requests list and sets up TRBs for the
821 * transfers. The function returns once there are no more TRBs available or
822 * it runs out of requests.
72246da4 823 */
68e823e2 824static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 825{
68e823e2 826 struct dwc3_request *req, *n;
72246da4 827 u32 trbs_left;
8d62cd65 828 u32 max;
c71fc37c 829 unsigned int last_one = 0;
72246da4
FB
830
831 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
832
833 /* the first request must not be queued */
834 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 835
8d62cd65
PZ
836 /* Can't wrap around on a non-isoc EP since there's no link TRB */
837 if (!usb_endpoint_xfer_isoc(dep->desc)) {
838 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
839 if (trbs_left > max)
840 trbs_left = max;
841 }
842
72246da4 843 /*
1d046793
PZ
844 * If busy & slot are equal than it is either full or empty. If we are
845 * starting to process requests then we are empty. Otherwise we are
72246da4
FB
846 * full and don't do anything
847 */
848 if (!trbs_left) {
849 if (!starting)
68e823e2 850 return;
72246da4
FB
851 trbs_left = DWC3_TRB_NUM;
852 /*
853 * In case we start from scratch, we queue the ISOC requests
854 * starting from slot 1. This is done because we use ring
855 * buffer and have no LST bit to stop us. Instead, we place
1d046793 856 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
72246da4
FB
857 * after the first request so we start at slot 1 and have
858 * 7 requests proceed before we hit the first IOC.
859 * Other transfer types don't use the ring buffer and are
860 * processed from the first TRB until the last one. Since we
861 * don't wrap around we have to start at the beginning.
862 */
863 if (usb_endpoint_xfer_isoc(dep->desc)) {
864 dep->busy_slot = 1;
865 dep->free_slot = 1;
866 } else {
867 dep->busy_slot = 0;
868 dep->free_slot = 0;
869 }
870 }
871
872 /* The last TRB is a link TRB, not used for xfer */
873 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
68e823e2 874 return;
72246da4
FB
875
876 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
877 unsigned length;
878 dma_addr_t dma;
72246da4 879
eeb720fb
FB
880 if (req->request.num_mapped_sgs > 0) {
881 struct usb_request *request = &req->request;
882 struct scatterlist *sg = request->sg;
883 struct scatterlist *s;
884 int i;
72246da4 885
eeb720fb
FB
886 for_each_sg(sg, s, request->num_mapped_sgs, i) {
887 unsigned chain = true;
72246da4 888
eeb720fb
FB
889 length = sg_dma_len(s);
890 dma = sg_dma_address(s);
72246da4 891
1d046793
PZ
892 if (i == (request->num_mapped_sgs - 1) ||
893 sg_is_last(s)) {
eeb720fb
FB
894 last_one = true;
895 chain = false;
896 }
72246da4 897
eeb720fb
FB
898 trbs_left--;
899 if (!trbs_left)
900 last_one = true;
72246da4 901
eeb720fb
FB
902 if (last_one)
903 chain = false;
72246da4 904
eeb720fb
FB
905 dwc3_prepare_one_trb(dep, req, dma, length,
906 last_one, chain);
72246da4 907
eeb720fb
FB
908 if (last_one)
909 break;
910 }
72246da4 911 } else {
eeb720fb
FB
912 dma = req->request.dma;
913 length = req->request.length;
914 trbs_left--;
72246da4 915
eeb720fb
FB
916 if (!trbs_left)
917 last_one = 1;
879631aa 918
eeb720fb
FB
919 /* Is this the last request? */
920 if (list_is_last(&req->list, &dep->request_list))
921 last_one = 1;
72246da4 922
eeb720fb
FB
923 dwc3_prepare_one_trb(dep, req, dma, length,
924 last_one, false);
72246da4 925
eeb720fb
FB
926 if (last_one)
927 break;
72246da4 928 }
72246da4 929 }
72246da4
FB
930}
931
932static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
933 int start_new)
934{
935 struct dwc3_gadget_ep_cmd_params params;
936 struct dwc3_request *req;
937 struct dwc3 *dwc = dep->dwc;
938 int ret;
939 u32 cmd;
940
941 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
942 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
943 return -EBUSY;
944 }
945 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
946
947 /*
948 * If we are getting here after a short-out-packet we don't enqueue any
949 * new requests as we try to set the IOC bit only on the last request.
950 */
951 if (start_new) {
952 if (list_empty(&dep->req_queued))
953 dwc3_prepare_trbs(dep, start_new);
954
955 /* req points to the first request which will be sent */
956 req = next_request(&dep->req_queued);
957 } else {
68e823e2
FB
958 dwc3_prepare_trbs(dep, start_new);
959
72246da4 960 /*
1d046793 961 * req points to the first request where HWO changed from 0 to 1
72246da4 962 */
68e823e2 963 req = next_request(&dep->req_queued);
72246da4
FB
964 }
965 if (!req) {
966 dep->flags |= DWC3_EP_PENDING_REQUEST;
967 return 0;
968 }
969
970 memset(&params, 0, sizeof(params));
dc1c70a7
FB
971 params.param0 = upper_32_bits(req->trb_dma);
972 params.param1 = lower_32_bits(req->trb_dma);
72246da4
FB
973
974 if (start_new)
975 cmd = DWC3_DEPCMD_STARTTRANSFER;
976 else
977 cmd = DWC3_DEPCMD_UPDATETRANSFER;
978
979 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
980 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
981 if (ret < 0) {
982 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
983
984 /*
985 * FIXME we need to iterate over the list of requests
986 * here and stop, unmap, free and del each of the linked
1d046793 987 * requests instead of what we do now.
72246da4
FB
988 */
989 dwc3_unmap_buffer_from_dma(req);
990 list_del(&req->list);
991 return ret;
992 }
993
994 dep->flags |= DWC3_EP_BUSY;
995 dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
996 dep->number);
25b8ff68
FB
997
998 WARN_ON_ONCE(!dep->res_trans_idx);
999
72246da4
FB
1000 return 0;
1001}
1002
1003static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1004{
1005 req->request.actual = 0;
1006 req->request.status = -EINPROGRESS;
1007 req->direction = dep->direction;
1008 req->epnum = dep->number;
1009
1010 /*
1011 * We only add to our list of requests now and
1012 * start consuming the list once we get XferNotReady
1013 * IRQ.
1014 *
1015 * That way, we avoid doing anything that we don't need
1016 * to do now and defer it until the point we receive a
1017 * particular token from the Host side.
1018 *
1019 * This will also avoid Host cancelling URBs due to too
1d046793 1020 * many NAKs.
72246da4
FB
1021 */
1022 dwc3_map_buffer_to_dma(req);
1023 list_add_tail(&req->list, &dep->request_list);
1024
1025 /*
1026 * There is one special case: XferNotReady with
1027 * empty list of requests. We need to kick the
1028 * transfer here in that situation, otherwise
1029 * we will be NAKing forever.
1030 *
1031 * If we get XferNotReady before gadget driver
1032 * has a chance to queue a request, we will ACK
1033 * the IRQ but won't be able to receive the data
1034 * until the next request is queued. The following
1035 * code is handling exactly that.
1036 */
1037 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1038 int ret;
1039 int start_trans;
1040
1041 start_trans = 1;
7b7dd025 1042 if (usb_endpoint_xfer_isoc(dep->desc) &&
1d046793 1043 (dep->flags & DWC3_EP_BUSY))
72246da4
FB
1044 start_trans = 0;
1045
1d046793 1046 ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
72246da4
FB
1047 if (ret && ret != -EBUSY) {
1048 struct dwc3 *dwc = dep->dwc;
1049
1050 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1051 dep->name);
1052 }
1053 };
1054
1055 return 0;
1056}
1057
1058static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1059 gfp_t gfp_flags)
1060{
1061 struct dwc3_request *req = to_dwc3_request(request);
1062 struct dwc3_ep *dep = to_dwc3_ep(ep);
1063 struct dwc3 *dwc = dep->dwc;
1064
1065 unsigned long flags;
1066
1067 int ret;
1068
1069 if (!dep->desc) {
1070 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1071 request, ep->name);
1072 return -ESHUTDOWN;
1073 }
1074
1075 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1076 request, ep->name, request->length);
1077
1078 spin_lock_irqsave(&dwc->lock, flags);
1079 ret = __dwc3_gadget_ep_queue(dep, req);
1080 spin_unlock_irqrestore(&dwc->lock, flags);
1081
1082 return ret;
1083}
1084
1085static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1086 struct usb_request *request)
1087{
1088 struct dwc3_request *req = to_dwc3_request(request);
1089 struct dwc3_request *r = NULL;
1090
1091 struct dwc3_ep *dep = to_dwc3_ep(ep);
1092 struct dwc3 *dwc = dep->dwc;
1093
1094 unsigned long flags;
1095 int ret = 0;
1096
1097 spin_lock_irqsave(&dwc->lock, flags);
1098
1099 list_for_each_entry(r, &dep->request_list, list) {
1100 if (r == req)
1101 break;
1102 }
1103
1104 if (r != req) {
1105 list_for_each_entry(r, &dep->req_queued, list) {
1106 if (r == req)
1107 break;
1108 }
1109 if (r == req) {
1110 /* wait until it is processed */
1111 dwc3_stop_active_transfer(dwc, dep->number);
1112 goto out0;
1113 }
1114 dev_err(dwc->dev, "request %p was not queued to %s\n",
1115 request, ep->name);
1116 ret = -EINVAL;
1117 goto out0;
1118 }
1119
1120 /* giveback the request */
1121 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1122
1123out0:
1124 spin_unlock_irqrestore(&dwc->lock, flags);
1125
1126 return ret;
1127}
1128
1129int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1130{
1131 struct dwc3_gadget_ep_cmd_params params;
1132 struct dwc3 *dwc = dep->dwc;
1133 int ret;
1134
1135 memset(&params, 0x00, sizeof(params));
1136
1137 if (value) {
0b7836a9
FB
1138 if (dep->number == 0 || dep->number == 1) {
1139 /*
1140 * Whenever EP0 is stalled, we will restart
1141 * the state machine, thus moving back to
1142 * Setup Phase
1143 */
1144 dwc->ep0state = EP0_SETUP_PHASE;
1145 }
72246da4
FB
1146
1147 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1148 DWC3_DEPCMD_SETSTALL, &params);
1149 if (ret)
1150 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1151 value ? "set" : "clear",
1152 dep->name);
1153 else
1154 dep->flags |= DWC3_EP_STALL;
1155 } else {
5275455a
PZ
1156 if (dep->flags & DWC3_EP_WEDGE)
1157 return 0;
1158
72246da4
FB
1159 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1160 DWC3_DEPCMD_CLEARSTALL, &params);
1161 if (ret)
1162 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1163 value ? "set" : "clear",
1164 dep->name);
1165 else
1166 dep->flags &= ~DWC3_EP_STALL;
1167 }
5275455a 1168
72246da4
FB
1169 return ret;
1170}
1171
1172static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1173{
1174 struct dwc3_ep *dep = to_dwc3_ep(ep);
1175 struct dwc3 *dwc = dep->dwc;
1176
1177 unsigned long flags;
1178
1179 int ret;
1180
1181 spin_lock_irqsave(&dwc->lock, flags);
1182
1183 if (usb_endpoint_xfer_isoc(dep->desc)) {
1184 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1185 ret = -EINVAL;
1186 goto out;
1187 }
1188
1189 ret = __dwc3_gadget_ep_set_halt(dep, value);
1190out:
1191 spin_unlock_irqrestore(&dwc->lock, flags);
1192
1193 return ret;
1194}
1195
1196static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1197{
1198 struct dwc3_ep *dep = to_dwc3_ep(ep);
1199
1200 dep->flags |= DWC3_EP_WEDGE;
1201
5275455a 1202 return dwc3_gadget_ep_set_halt(ep, 1);
72246da4
FB
1203}
1204
1205/* -------------------------------------------------------------------------- */
1206
1207static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1208 .bLength = USB_DT_ENDPOINT_SIZE,
1209 .bDescriptorType = USB_DT_ENDPOINT,
1210 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1211};
1212
1213static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1214 .enable = dwc3_gadget_ep0_enable,
1215 .disable = dwc3_gadget_ep0_disable,
1216 .alloc_request = dwc3_gadget_ep_alloc_request,
1217 .free_request = dwc3_gadget_ep_free_request,
1218 .queue = dwc3_gadget_ep0_queue,
1219 .dequeue = dwc3_gadget_ep_dequeue,
1220 .set_halt = dwc3_gadget_ep_set_halt,
1221 .set_wedge = dwc3_gadget_ep_set_wedge,
1222};
1223
1224static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1225 .enable = dwc3_gadget_ep_enable,
1226 .disable = dwc3_gadget_ep_disable,
1227 .alloc_request = dwc3_gadget_ep_alloc_request,
1228 .free_request = dwc3_gadget_ep_free_request,
1229 .queue = dwc3_gadget_ep_queue,
1230 .dequeue = dwc3_gadget_ep_dequeue,
1231 .set_halt = dwc3_gadget_ep_set_halt,
1232 .set_wedge = dwc3_gadget_ep_set_wedge,
1233};
1234
1235/* -------------------------------------------------------------------------- */
1236
1237static int dwc3_gadget_get_frame(struct usb_gadget *g)
1238{
1239 struct dwc3 *dwc = gadget_to_dwc(g);
1240 u32 reg;
1241
1242 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1243 return DWC3_DSTS_SOFFN(reg);
1244}
1245
1246static int dwc3_gadget_wakeup(struct usb_gadget *g)
1247{
1248 struct dwc3 *dwc = gadget_to_dwc(g);
1249
1250 unsigned long timeout;
1251 unsigned long flags;
1252
1253 u32 reg;
1254
1255 int ret = 0;
1256
1257 u8 link_state;
1258 u8 speed;
1259
1260 spin_lock_irqsave(&dwc->lock, flags);
1261
1262 /*
1263 * According to the Databook Remote wakeup request should
1264 * be issued only when the device is in early suspend state.
1265 *
1266 * We can check that via USB Link State bits in DSTS register.
1267 */
1268 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1269
1270 speed = reg & DWC3_DSTS_CONNECTSPD;
1271 if (speed == DWC3_DSTS_SUPERSPEED) {
1272 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1273 ret = -EINVAL;
1274 goto out;
1275 }
1276
1277 link_state = DWC3_DSTS_USBLNKST(reg);
1278
1279 switch (link_state) {
1280 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1281 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1282 break;
1283 default:
1284 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1285 link_state);
1286 ret = -EINVAL;
1287 goto out;
1288 }
1289
8598bde7
FB
1290 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1291 if (ret < 0) {
1292 dev_err(dwc->dev, "failed to put link in Recovery\n");
1293 goto out;
1294 }
72246da4
FB
1295
1296 /* write zeroes to Link Change Request */
1297 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1298 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1299
1d046793 1300 /* poll until Link State changes to ON */
72246da4
FB
1301 timeout = jiffies + msecs_to_jiffies(100);
1302
1d046793 1303 while (!time_after(jiffies, timeout)) {
72246da4
FB
1304 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1305
1306 /* in HS, means ON */
1307 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1308 break;
1309 }
1310
1311 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1312 dev_err(dwc->dev, "failed to send remote wakeup\n");
1313 ret = -EINVAL;
1314 }
1315
1316out:
1317 spin_unlock_irqrestore(&dwc->lock, flags);
1318
1319 return ret;
1320}
1321
1322static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1323 int is_selfpowered)
1324{
1325 struct dwc3 *dwc = gadget_to_dwc(g);
1326
1327 dwc->is_selfpowered = !!is_selfpowered;
1328
1329 return 0;
1330}
1331
1332static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1333{
1334 u32 reg;
61d58242 1335 u32 timeout = 500;
72246da4
FB
1336
1337 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15
FB
1338 if (is_on) {
1339 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1340 reg |= (DWC3_DCTL_RUN_STOP
1341 | DWC3_DCTL_TRGTULST_RX_DET);
1342 } else {
72246da4 1343 reg &= ~DWC3_DCTL_RUN_STOP;
8db7ed15 1344 }
72246da4
FB
1345
1346 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1347
1348 do {
1349 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1350 if (is_on) {
1351 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1352 break;
1353 } else {
1354 if (reg & DWC3_DSTS_DEVCTRLHLT)
1355 break;
1356 }
72246da4
FB
1357 timeout--;
1358 if (!timeout)
1359 break;
61d58242 1360 udelay(1);
72246da4
FB
1361 } while (1);
1362
1363 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1364 dwc->gadget_driver
1365 ? dwc->gadget_driver->function : "no-function",
1366 is_on ? "connect" : "disconnect");
1367}
1368
1369static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1370{
1371 struct dwc3 *dwc = gadget_to_dwc(g);
1372 unsigned long flags;
1373
1374 is_on = !!is_on;
1375
1376 spin_lock_irqsave(&dwc->lock, flags);
1377 dwc3_gadget_run_stop(dwc, is_on);
1378 spin_unlock_irqrestore(&dwc->lock, flags);
1379
1380 return 0;
1381}
1382
1383static int dwc3_gadget_start(struct usb_gadget *g,
1384 struct usb_gadget_driver *driver)
1385{
1386 struct dwc3 *dwc = gadget_to_dwc(g);
1387 struct dwc3_ep *dep;
1388 unsigned long flags;
1389 int ret = 0;
1390 u32 reg;
1391
1392 spin_lock_irqsave(&dwc->lock, flags);
1393
1394 if (dwc->gadget_driver) {
1395 dev_err(dwc->dev, "%s is already bound to %s\n",
1396 dwc->gadget.name,
1397 dwc->gadget_driver->driver.name);
1398 ret = -EBUSY;
1399 goto err0;
1400 }
1401
1402 dwc->gadget_driver = driver;
1403 dwc->gadget.dev.driver = &driver->driver;
1404
72246da4
FB
1405 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1406 reg &= ~(DWC3_DCFG_SPEED_MASK);
6c167fc9 1407 reg |= dwc->maximum_speed;
72246da4
FB
1408 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1409
b23c8439
PZ
1410 dwc->start_config_issued = false;
1411
72246da4
FB
1412 /* Start with SuperSpeed Default */
1413 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1414
1415 dep = dwc->eps[0];
c90bfaec 1416 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
72246da4
FB
1417 if (ret) {
1418 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1419 goto err0;
1420 }
1421
1422 dep = dwc->eps[1];
c90bfaec 1423 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
72246da4
FB
1424 if (ret) {
1425 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1426 goto err1;
1427 }
1428
1429 /* begin to receive SETUP packets */
c7fcdeb2 1430 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1431 dwc3_ep0_out_start(dwc);
1432
1433 spin_unlock_irqrestore(&dwc->lock, flags);
1434
1435 return 0;
1436
1437err1:
1438 __dwc3_gadget_ep_disable(dwc->eps[0]);
1439
1440err0:
1441 spin_unlock_irqrestore(&dwc->lock, flags);
1442
1443 return ret;
1444}
1445
1446static int dwc3_gadget_stop(struct usb_gadget *g,
1447 struct usb_gadget_driver *driver)
1448{
1449 struct dwc3 *dwc = gadget_to_dwc(g);
1450 unsigned long flags;
1451
1452 spin_lock_irqsave(&dwc->lock, flags);
1453
1454 __dwc3_gadget_ep_disable(dwc->eps[0]);
1455 __dwc3_gadget_ep_disable(dwc->eps[1]);
1456
1457 dwc->gadget_driver = NULL;
1458 dwc->gadget.dev.driver = NULL;
1459
1460 spin_unlock_irqrestore(&dwc->lock, flags);
1461
1462 return 0;
1463}
1464static const struct usb_gadget_ops dwc3_gadget_ops = {
1465 .get_frame = dwc3_gadget_get_frame,
1466 .wakeup = dwc3_gadget_wakeup,
1467 .set_selfpowered = dwc3_gadget_set_selfpowered,
1468 .pullup = dwc3_gadget_pullup,
1469 .udc_start = dwc3_gadget_start,
1470 .udc_stop = dwc3_gadget_stop,
1471};
1472
1473/* -------------------------------------------------------------------------- */
1474
1475static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1476{
1477 struct dwc3_ep *dep;
1478 u8 epnum;
1479
1480 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1481
1482 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1483 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1484 if (!dep) {
1485 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1486 epnum);
1487 return -ENOMEM;
1488 }
1489
1490 dep->dwc = dwc;
1491 dep->number = epnum;
1492 dwc->eps[epnum] = dep;
1493
1494 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1495 (epnum & 1) ? "in" : "out");
1496 dep->endpoint.name = dep->name;
1497 dep->direction = (epnum & 1);
1498
1499 if (epnum == 0 || epnum == 1) {
1500 dep->endpoint.maxpacket = 512;
1501 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1502 if (!epnum)
1503 dwc->gadget.ep0 = &dep->endpoint;
1504 } else {
1505 int ret;
1506
1507 dep->endpoint.maxpacket = 1024;
12d36c16 1508 dep->endpoint.max_streams = 15;
72246da4
FB
1509 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1510 list_add_tail(&dep->endpoint.ep_list,
1511 &dwc->gadget.ep_list);
1512
1513 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1514 if (ret)
72246da4 1515 return ret;
72246da4 1516 }
25b8ff68 1517
72246da4
FB
1518 INIT_LIST_HEAD(&dep->request_list);
1519 INIT_LIST_HEAD(&dep->req_queued);
1520 }
1521
1522 return 0;
1523}
1524
1525static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1526{
1527 struct dwc3_ep *dep;
1528 u8 epnum;
1529
1530 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1531 dep = dwc->eps[epnum];
1532 dwc3_free_trb_pool(dep);
1533
1534 if (epnum != 0 && epnum != 1)
1535 list_del(&dep->endpoint.ep_list);
1536
1537 kfree(dep);
1538 }
1539}
1540
1541static void dwc3_gadget_release(struct device *dev)
1542{
1543 dev_dbg(dev, "%s\n", __func__);
1544}
1545
1546/* -------------------------------------------------------------------------- */
1547static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1548 const struct dwc3_event_depevt *event, int status)
1549{
1550 struct dwc3_request *req;
f6bafc6a 1551 struct dwc3_trb *trb;
72246da4
FB
1552 unsigned int count;
1553 unsigned int s_pkt = 0;
1554
1555 do {
1556 req = next_request(&dep->req_queued);
d39ee7be
SAS
1557 if (!req) {
1558 WARN_ON_ONCE(1);
1559 return 1;
1560 }
72246da4 1561
f6bafc6a 1562 trb = req->trb;
72246da4 1563
f6bafc6a 1564 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
0d2f4758
SAS
1565 /*
1566 * We continue despite the error. There is not much we
1d046793
PZ
1567 * can do. If we don't clean it up we loop forever. If
1568 * we skip the TRB then it gets overwritten after a
1569 * while since we use them in a ring buffer. A BUG()
1570 * would help. Lets hope that if this occurs, someone
0d2f4758
SAS
1571 * fixes the root cause instead of looking away :)
1572 */
72246da4
FB
1573 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1574 dep->name, req->trb);
f6bafc6a 1575 count = trb->size & DWC3_TRB_SIZE_MASK;
72246da4
FB
1576
1577 if (dep->direction) {
1578 if (count) {
1579 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1580 dep->name);
1581 status = -ECONNRESET;
1582 }
1583 } else {
1584 if (count && (event->status & DEPEVT_STATUS_SHORT))
1585 s_pkt = 1;
1586 }
1587
1588 /*
1589 * We assume here we will always receive the entire data block
1590 * which we should receive. Meaning, if we program RX to
1591 * receive 4K but we receive only 2K, we assume that's all we
1592 * should receive and we simply bounce the request back to the
1593 * gadget driver for further processing.
1594 */
1595 req->request.actual += req->request.length - count;
1596 dwc3_gadget_giveback(dep, req, status);
1597 if (s_pkt)
1598 break;
f6bafc6a
FB
1599 if ((event->status & DEPEVT_STATUS_LST) &&
1600 (trb->ctrl & DWC3_TRB_CTRL_LST))
72246da4 1601 break;
f6bafc6a
FB
1602 if ((event->status & DEPEVT_STATUS_IOC) &&
1603 (trb->ctrl & DWC3_TRB_CTRL_IOC))
72246da4
FB
1604 break;
1605 } while (1);
1606
f6bafc6a
FB
1607 if ((event->status & DEPEVT_STATUS_IOC) &&
1608 (trb->ctrl & DWC3_TRB_CTRL_IOC))
72246da4
FB
1609 return 0;
1610 return 1;
1611}
1612
1613static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1614 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1615 int start_new)
1616{
1617 unsigned status = 0;
1618 int clean_busy;
1619
1620 if (event->status & DEPEVT_STATUS_BUSERR)
1621 status = -ECONNRESET;
1622
1d046793 1623 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
a1ae9be5 1624 if (clean_busy) {
72246da4 1625 dep->flags &= ~DWC3_EP_BUSY;
a1ae9be5
SAS
1626 dep->res_trans_idx = 0;
1627 }
fae2b904
FB
1628
1629 /*
1630 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1631 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1632 */
1633 if (dwc->revision < DWC3_REVISION_183A) {
1634 u32 reg;
1635 int i;
1636
1637 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1638 struct dwc3_ep *dep = dwc->eps[i];
1639
1640 if (!(dep->flags & DWC3_EP_ENABLED))
1641 continue;
1642
1643 if (!list_empty(&dep->req_queued))
1644 return;
1645 }
1646
1647 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1648 reg |= dwc->u1u2;
1649 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1650
1651 dwc->u1u2 = 0;
1652 }
72246da4
FB
1653}
1654
1655static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1656 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1657{
9bafa56c 1658 u32 uf, mask;
72246da4
FB
1659
1660 if (list_empty(&dep->request_list)) {
1661 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1662 dep->name);
1663 return;
1664 }
1665
9bafa56c
PZ
1666 mask = ~(dep->interval - 1);
1667 uf = event->parameters & mask;
1668 /* 4 micro frames in the future */
1669 uf += dep->interval * 4;
72246da4
FB
1670
1671 __dwc3_gadget_kick_transfer(dep, uf, 1);
1672}
1673
1674static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1675 const struct dwc3_event_depevt *event)
1676{
1677 struct dwc3 *dwc = dep->dwc;
1678 struct dwc3_event_depevt mod_ev = *event;
1679
1680 /*
1d046793
PZ
1681 * We were asked to remove one request. It is possible that this
1682 * request and a few others were started together and have the same
72246da4
FB
1683 * transfer index. Since we stopped the complete endpoint we don't
1684 * know how many requests were already completed (and not yet)
1685 * reported and how could be done (later). We purge them all until
1686 * the end of the list.
1687 */
1688 mod_ev.status = DEPEVT_STATUS_LST;
1689 dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1690 dep->flags &= ~DWC3_EP_BUSY;
1d046793 1691 /* pending requests are ignored and are queued on XferNotReady */
72246da4
FB
1692}
1693
1694static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1695 const struct dwc3_event_depevt *event)
1696{
1697 u32 param = event->parameters;
1698 u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1699
1700 switch (cmd_type) {
1701 case DWC3_DEPCMD_ENDTRANSFER:
1702 dwc3_process_ep_cmd_complete(dep, event);
1703 break;
1704 case DWC3_DEPCMD_STARTTRANSFER:
1705 dep->res_trans_idx = param & 0x7f;
1706 break;
1707 default:
1708 printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1709 __func__, cmd_type);
1710 break;
1711 };
1712}
1713
1714static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1715 const struct dwc3_event_depevt *event)
1716{
1717 struct dwc3_ep *dep;
1718 u8 epnum = event->endpoint_number;
1719
1720 dep = dwc->eps[epnum];
1721
1722 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1723 dwc3_ep_event_string(event->endpoint_event));
1724
1725 if (epnum == 0 || epnum == 1) {
1726 dwc3_ep0_interrupt(dwc, event);
1727 return;
1728 }
1729
1730 switch (event->endpoint_event) {
1731 case DWC3_DEPEVT_XFERCOMPLETE:
1732 if (usb_endpoint_xfer_isoc(dep->desc)) {
1733 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1734 dep->name);
1735 return;
1736 }
1737
1738 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1739 break;
1740 case DWC3_DEPEVT_XFERINPROGRESS:
1741 if (!usb_endpoint_xfer_isoc(dep->desc)) {
1742 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1743 dep->name);
1744 return;
1745 }
1746
1747 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1748 break;
1749 case DWC3_DEPEVT_XFERNOTREADY:
1750 if (usb_endpoint_xfer_isoc(dep->desc)) {
1751 dwc3_gadget_start_isoc(dwc, dep, event);
1752 } else {
1753 int ret;
1754
1755 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1756 dep->name, event->status &
1757 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1758 ? "Transfer Active"
1759 : "Transfer Not Active");
1760
1761 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1762 if (!ret || ret == -EBUSY)
1763 return;
1764
1765 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1766 dep->name);
1767 }
1768
879631aa
FB
1769 break;
1770 case DWC3_DEPEVT_STREAMEVT:
1771 if (!usb_endpoint_xfer_bulk(dep->desc)) {
1772 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1773 dep->name);
1774 return;
1775 }
1776
1777 switch (event->status) {
1778 case DEPEVT_STREAMEVT_FOUND:
1779 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1780 event->parameters);
1781
1782 break;
1783 case DEPEVT_STREAMEVT_NOTFOUND:
1784 /* FALLTHROUGH */
1785 default:
1786 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1787 }
72246da4
FB
1788 break;
1789 case DWC3_DEPEVT_RXTXFIFOEVT:
1790 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1791 break;
72246da4
FB
1792 case DWC3_DEPEVT_EPCMDCMPLT:
1793 dwc3_ep_cmd_compl(dep, event);
1794 break;
1795 }
1796}
1797
1798static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1799{
1800 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1801 spin_unlock(&dwc->lock);
1802 dwc->gadget_driver->disconnect(&dwc->gadget);
1803 spin_lock(&dwc->lock);
1804 }
1805}
1806
1807static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1808{
1809 struct dwc3_ep *dep;
1810 struct dwc3_gadget_ep_cmd_params params;
1811 u32 cmd;
1812 int ret;
1813
1814 dep = dwc->eps[epnum];
1815
624407f9 1816 WARN_ON(!dep->res_trans_idx);
72246da4
FB
1817 if (dep->res_trans_idx) {
1818 cmd = DWC3_DEPCMD_ENDTRANSFER;
1819 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1820 cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1821 memset(&params, 0, sizeof(params));
1822 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1823 WARN_ON_ONCE(ret);
a1ae9be5 1824 dep->res_trans_idx = 0;
72246da4
FB
1825 }
1826}
1827
1828static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1829{
1830 u32 epnum;
1831
1832 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1833 struct dwc3_ep *dep;
1834
1835 dep = dwc->eps[epnum];
1836 if (!(dep->flags & DWC3_EP_ENABLED))
1837 continue;
1838
624407f9 1839 dwc3_remove_requests(dwc, dep);
72246da4
FB
1840 }
1841}
1842
1843static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1844{
1845 u32 epnum;
1846
1847 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1848 struct dwc3_ep *dep;
1849 struct dwc3_gadget_ep_cmd_params params;
1850 int ret;
1851
1852 dep = dwc->eps[epnum];
1853
1854 if (!(dep->flags & DWC3_EP_STALL))
1855 continue;
1856
1857 dep->flags &= ~DWC3_EP_STALL;
1858
1859 memset(&params, 0, sizeof(params));
1860 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1861 DWC3_DEPCMD_CLEARSTALL, &params);
1862 WARN_ON_ONCE(ret);
1863 }
1864}
1865
1866static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1867{
1868 dev_vdbg(dwc->dev, "%s\n", __func__);
1869#if 0
1870 XXX
1871 U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1872 enable it before we can disable it.
1873
1874 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1875 reg &= ~DWC3_DCTL_INITU1ENA;
1876 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1877
1878 reg &= ~DWC3_DCTL_INITU2ENA;
1879 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1880#endif
1881
1882 dwc3_stop_active_transfers(dwc);
1883 dwc3_disconnect_gadget(dwc);
b23c8439 1884 dwc->start_config_issued = false;
72246da4
FB
1885
1886 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 1887 dwc->setup_packet_pending = false;
72246da4
FB
1888}
1889
1890static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1891{
1892 u32 reg;
1893
1894 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1895
1896 if (on)
1897 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1898 else
1899 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1900
1901 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1902}
1903
1904static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1905{
1906 u32 reg;
1907
1908 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1909
1910 if (on)
1911 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1912 else
1913 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1914
1915 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1916}
1917
1918static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1919{
1920 u32 reg;
1921
1922 dev_vdbg(dwc->dev, "%s\n", __func__);
1923
df62df56
FB
1924 /*
1925 * WORKAROUND: DWC3 revisions <1.88a have an issue which
1926 * would cause a missing Disconnect Event if there's a
1927 * pending Setup Packet in the FIFO.
1928 *
1929 * There's no suggested workaround on the official Bug
1930 * report, which states that "unless the driver/application
1931 * is doing any special handling of a disconnect event,
1932 * there is no functional issue".
1933 *
1934 * Unfortunately, it turns out that we _do_ some special
1935 * handling of a disconnect event, namely complete all
1936 * pending transfers, notify gadget driver of the
1937 * disconnection, and so on.
1938 *
1939 * Our suggested workaround is to follow the Disconnect
1940 * Event steps here, instead, based on a setup_packet_pending
1941 * flag. Such flag gets set whenever we have a XferNotReady
1942 * event on EP0 and gets cleared on XferComplete for the
1943 * same endpoint.
1944 *
1945 * Refers to:
1946 *
1947 * STAR#9000466709: RTL: Device : Disconnect event not
1948 * generated if setup packet pending in FIFO
1949 */
1950 if (dwc->revision < DWC3_REVISION_188A) {
1951 if (dwc->setup_packet_pending)
1952 dwc3_gadget_disconnect_interrupt(dwc);
1953 }
1954
961906ed
FB
1955 /* after reset -> Default State */
1956 dwc->dev_state = DWC3_DEFAULT_STATE;
1957
72246da4
FB
1958 /* Enable PHYs */
1959 dwc3_gadget_usb2_phy_power(dwc, true);
1960 dwc3_gadget_usb3_phy_power(dwc, true);
1961
1962 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1963 dwc3_disconnect_gadget(dwc);
1964
1965 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1966 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1967 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 1968 dwc->test_mode = false;
72246da4
FB
1969
1970 dwc3_stop_active_transfers(dwc);
1971 dwc3_clear_stall_all_ep(dwc);
b23c8439 1972 dwc->start_config_issued = false;
72246da4
FB
1973
1974 /* Reset device address to zero */
1975 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1976 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1977 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
1978}
1979
1980static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1981{
1982 u32 reg;
1983 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1984
1985 /*
1986 * We change the clock only at SS but I dunno why I would want to do
1987 * this. Maybe it becomes part of the power saving plan.
1988 */
1989
1990 if (speed != DWC3_DSTS_SUPERSPEED)
1991 return;
1992
1993 /*
1994 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1995 * each time on Connect Done.
1996 */
1997 if (!usb30_clock)
1998 return;
1999
2000 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2001 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2002 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2003}
2004
2005static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
2006{
2007 switch (speed) {
2008 case USB_SPEED_SUPER:
2009 dwc3_gadget_usb2_phy_power(dwc, false);
2010 break;
2011 case USB_SPEED_HIGH:
2012 case USB_SPEED_FULL:
2013 case USB_SPEED_LOW:
2014 dwc3_gadget_usb3_phy_power(dwc, false);
2015 break;
2016 }
2017}
2018
2019static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2020{
2021 struct dwc3_gadget_ep_cmd_params params;
2022 struct dwc3_ep *dep;
2023 int ret;
2024 u32 reg;
2025 u8 speed;
2026
2027 dev_vdbg(dwc->dev, "%s\n", __func__);
2028
2029 memset(&params, 0x00, sizeof(params));
2030
72246da4
FB
2031 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2032 speed = reg & DWC3_DSTS_CONNECTSPD;
2033 dwc->speed = speed;
2034
2035 dwc3_update_ram_clk_sel(dwc, speed);
2036
2037 switch (speed) {
2038 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2039 /*
2040 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2041 * would cause a missing USB3 Reset event.
2042 *
2043 * In such situations, we should force a USB3 Reset
2044 * event by calling our dwc3_gadget_reset_interrupt()
2045 * routine.
2046 *
2047 * Refers to:
2048 *
2049 * STAR#9000483510: RTL: SS : USB3 reset event may
2050 * not be generated always when the link enters poll
2051 */
2052 if (dwc->revision < DWC3_REVISION_190A)
2053 dwc3_gadget_reset_interrupt(dwc);
2054
72246da4
FB
2055 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2056 dwc->gadget.ep0->maxpacket = 512;
2057 dwc->gadget.speed = USB_SPEED_SUPER;
2058 break;
2059 case DWC3_DCFG_HIGHSPEED:
2060 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2061 dwc->gadget.ep0->maxpacket = 64;
2062 dwc->gadget.speed = USB_SPEED_HIGH;
2063 break;
2064 case DWC3_DCFG_FULLSPEED2:
2065 case DWC3_DCFG_FULLSPEED1:
2066 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2067 dwc->gadget.ep0->maxpacket = 64;
2068 dwc->gadget.speed = USB_SPEED_FULL;
2069 break;
2070 case DWC3_DCFG_LOWSPEED:
2071 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2072 dwc->gadget.ep0->maxpacket = 8;
2073 dwc->gadget.speed = USB_SPEED_LOW;
2074 break;
2075 }
2076
2077 /* Disable unneded PHY */
2078 dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
2079
2080 dep = dwc->eps[0];
c90bfaec 2081 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
72246da4
FB
2082 if (ret) {
2083 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2084 return;
2085 }
2086
2087 dep = dwc->eps[1];
c90bfaec 2088 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
72246da4
FB
2089 if (ret) {
2090 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2091 return;
2092 }
2093
2094 /*
2095 * Configure PHY via GUSB3PIPECTLn if required.
2096 *
2097 * Update GTXFIFOSIZn
2098 *
2099 * In both cases reset values should be sufficient.
2100 */
2101}
2102
2103static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2104{
2105 dev_vdbg(dwc->dev, "%s\n", __func__);
2106
2107 /*
2108 * TODO take core out of low power mode when that's
2109 * implemented.
2110 */
2111
2112 dwc->gadget_driver->resume(&dwc->gadget);
2113}
2114
2115static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2116 unsigned int evtinfo)
2117{
fae2b904
FB
2118 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2119
2120 /*
2121 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2122 * on the link partner, the USB session might do multiple entry/exit
2123 * of low power states before a transfer takes place.
2124 *
2125 * Due to this problem, we might experience lower throughput. The
2126 * suggested workaround is to disable DCTL[12:9] bits if we're
2127 * transitioning from U1/U2 to U0 and enable those bits again
2128 * after a transfer completes and there are no pending transfers
2129 * on any of the enabled endpoints.
2130 *
2131 * This is the first half of that workaround.
2132 *
2133 * Refers to:
2134 *
2135 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2136 * core send LGO_Ux entering U0
2137 */
2138 if (dwc->revision < DWC3_REVISION_183A) {
2139 if (next == DWC3_LINK_STATE_U0) {
2140 u32 u1u2;
2141 u32 reg;
2142
2143 switch (dwc->link_state) {
2144 case DWC3_LINK_STATE_U1:
2145 case DWC3_LINK_STATE_U2:
2146 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2147 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2148 | DWC3_DCTL_ACCEPTU2ENA
2149 | DWC3_DCTL_INITU1ENA
2150 | DWC3_DCTL_ACCEPTU1ENA);
2151
2152 if (!dwc->u1u2)
2153 dwc->u1u2 = reg & u1u2;
2154
2155 reg &= ~u1u2;
2156
2157 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2158 break;
2159 default:
2160 /* do nothing */
2161 break;
2162 }
2163 }
2164 }
2165
2166 dwc->link_state = next;
019ac832
FB
2167
2168 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
72246da4
FB
2169}
2170
2171static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2172 const struct dwc3_event_devt *event)
2173{
2174 switch (event->type) {
2175 case DWC3_DEVICE_EVENT_DISCONNECT:
2176 dwc3_gadget_disconnect_interrupt(dwc);
2177 break;
2178 case DWC3_DEVICE_EVENT_RESET:
2179 dwc3_gadget_reset_interrupt(dwc);
2180 break;
2181 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2182 dwc3_gadget_conndone_interrupt(dwc);
2183 break;
2184 case DWC3_DEVICE_EVENT_WAKEUP:
2185 dwc3_gadget_wakeup_interrupt(dwc);
2186 break;
2187 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2188 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2189 break;
2190 case DWC3_DEVICE_EVENT_EOPF:
2191 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2192 break;
2193 case DWC3_DEVICE_EVENT_SOF:
2194 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2195 break;
2196 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2197 dev_vdbg(dwc->dev, "Erratic Error\n");
2198 break;
2199 case DWC3_DEVICE_EVENT_CMD_CMPL:
2200 dev_vdbg(dwc->dev, "Command Complete\n");
2201 break;
2202 case DWC3_DEVICE_EVENT_OVERFLOW:
2203 dev_vdbg(dwc->dev, "Overflow\n");
2204 break;
2205 default:
2206 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2207 }
2208}
2209
2210static void dwc3_process_event_entry(struct dwc3 *dwc,
2211 const union dwc3_event *event)
2212{
2213 /* Endpoint IRQ, handle it and return early */
2214 if (event->type.is_devspec == 0) {
2215 /* depevt */
2216 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2217 }
2218
2219 switch (event->type.type) {
2220 case DWC3_EVENT_TYPE_DEV:
2221 dwc3_gadget_interrupt(dwc, &event->devt);
2222 break;
2223 /* REVISIT what to do with Carkit and I2C events ? */
2224 default:
2225 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2226 }
2227}
2228
2229static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2230{
2231 struct dwc3_event_buffer *evt;
2232 int left;
2233 u32 count;
2234
2235 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2236 count &= DWC3_GEVNTCOUNT_MASK;
2237 if (!count)
2238 return IRQ_NONE;
2239
2240 evt = dwc->ev_buffs[buf];
2241 left = count;
2242
2243 while (left > 0) {
2244 union dwc3_event event;
2245
d70d8442
FB
2246 event.raw = *(u32 *) (evt->buf + evt->lpos);
2247
72246da4
FB
2248 dwc3_process_event_entry(dwc, &event);
2249 /*
2250 * XXX we wrap around correctly to the next entry as almost all
2251 * entries are 4 bytes in size. There is one entry which has 12
2252 * bytes which is a regular entry followed by 8 bytes data. ATM
2253 * I don't know how things are organized if were get next to the
2254 * a boundary so I worry about that once we try to handle that.
2255 */
2256 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2257 left -= 4;
2258
2259 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2260 }
2261
2262 return IRQ_HANDLED;
2263}
2264
2265static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2266{
2267 struct dwc3 *dwc = _dwc;
2268 int i;
2269 irqreturn_t ret = IRQ_NONE;
2270
2271 spin_lock(&dwc->lock);
2272
9f622b2a 2273 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2274 irqreturn_t status;
2275
2276 status = dwc3_process_event_buf(dwc, i);
2277 if (status == IRQ_HANDLED)
2278 ret = status;
2279 }
2280
2281 spin_unlock(&dwc->lock);
2282
2283 return ret;
2284}
2285
2286/**
2287 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2288 * @dwc: pointer to our controller context structure
72246da4
FB
2289 *
2290 * Returns 0 on success otherwise negative errno.
2291 */
2292int __devinit dwc3_gadget_init(struct dwc3 *dwc)
2293{
2294 u32 reg;
2295 int ret;
2296 int irq;
2297
2298 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2299 &dwc->ctrl_req_addr, GFP_KERNEL);
2300 if (!dwc->ctrl_req) {
2301 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2302 ret = -ENOMEM;
2303 goto err0;
2304 }
2305
2306 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2307 &dwc->ep0_trb_addr, GFP_KERNEL);
2308 if (!dwc->ep0_trb) {
2309 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2310 ret = -ENOMEM;
2311 goto err1;
2312 }
2313
2314 dwc->setup_buf = dma_alloc_coherent(dwc->dev,
2315 sizeof(*dwc->setup_buf) * 2,
2316 &dwc->setup_buf_addr, GFP_KERNEL);
2317 if (!dwc->setup_buf) {
2318 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2319 ret = -ENOMEM;
2320 goto err2;
2321 }
2322
5812b1c2
FB
2323 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2324 512, &dwc->ep0_bounce_addr, GFP_KERNEL);
2325 if (!dwc->ep0_bounce) {
2326 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2327 ret = -ENOMEM;
2328 goto err3;
2329 }
2330
72246da4
FB
2331 dev_set_name(&dwc->gadget.dev, "gadget");
2332
2333 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2334 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4
FB
2335 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2336 dwc->gadget.dev.parent = dwc->dev;
eeb720fb 2337 dwc->gadget.sg_supported = true;
72246da4
FB
2338
2339 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2340
2341 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms;
2342 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask;
2343 dwc->gadget.dev.release = dwc3_gadget_release;
2344 dwc->gadget.name = "dwc3-gadget";
2345
2346 /*
2347 * REVISIT: Here we should clear all pending IRQs to be
2348 * sure we're starting from a well known location.
2349 */
2350
2351 ret = dwc3_gadget_init_endpoints(dwc);
2352 if (ret)
5812b1c2 2353 goto err4;
72246da4
FB
2354
2355 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2356
2357 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2358 "dwc3", dwc);
2359 if (ret) {
2360 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2361 irq, ret);
5812b1c2 2362 goto err5;
72246da4
FB
2363 }
2364
2365 /* Enable all but Start and End of Frame IRQs */
2366 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2367 DWC3_DEVTEN_EVNTOVERFLOWEN |
2368 DWC3_DEVTEN_CMDCMPLTEN |
2369 DWC3_DEVTEN_ERRTICERREN |
2370 DWC3_DEVTEN_WKUPEVTEN |
2371 DWC3_DEVTEN_ULSTCNGEN |
2372 DWC3_DEVTEN_CONNECTDONEEN |
2373 DWC3_DEVTEN_USBRSTEN |
2374 DWC3_DEVTEN_DISCONNEVTEN);
2375 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2376
2377 ret = device_register(&dwc->gadget.dev);
2378 if (ret) {
2379 dev_err(dwc->dev, "failed to register gadget device\n");
2380 put_device(&dwc->gadget.dev);
5812b1c2 2381 goto err6;
72246da4
FB
2382 }
2383
2384 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2385 if (ret) {
2386 dev_err(dwc->dev, "failed to register udc\n");
5812b1c2 2387 goto err7;
72246da4
FB
2388 }
2389
2390 return 0;
2391
5812b1c2 2392err7:
72246da4
FB
2393 device_unregister(&dwc->gadget.dev);
2394
5812b1c2 2395err6:
72246da4
FB
2396 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2397 free_irq(irq, dwc);
2398
5812b1c2 2399err5:
72246da4
FB
2400 dwc3_gadget_free_endpoints(dwc);
2401
5812b1c2
FB
2402err4:
2403 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2404 dwc->ep0_bounce_addr);
2405
72246da4
FB
2406err3:
2407 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2408 dwc->setup_buf, dwc->setup_buf_addr);
2409
2410err2:
2411 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2412 dwc->ep0_trb, dwc->ep0_trb_addr);
2413
2414err1:
2415 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2416 dwc->ctrl_req, dwc->ctrl_req_addr);
2417
2418err0:
2419 return ret;
2420}
2421
2422void dwc3_gadget_exit(struct dwc3 *dwc)
2423{
2424 int irq;
72246da4
FB
2425
2426 usb_del_gadget_udc(&dwc->gadget);
2427 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2428
2429 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2430 free_irq(irq, dwc);
2431
72246da4
FB
2432 dwc3_gadget_free_endpoints(dwc);
2433
5812b1c2
FB
2434 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2435 dwc->ep0_bounce_addr);
2436
72246da4
FB
2437 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2438 dwc->setup_buf, dwc->setup_buf_addr);
2439
2440 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2441 dwc->ep0_trb, dwc->ep0_trb_addr);
2442
2443 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2444 dwc->ctrl_req, dwc->ctrl_req_addr);
2445
2446 device_unregister(&dwc->gadget.dev);
2447}
This page took 0.318114 seconds and 5 git commands to generate.