Merge tag 'v3.5-rc6' into irqdomain/next
[deliverable/linux.git] / drivers / usb / gadget / mv_udc_core.c
1 /*
2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 * Neil Zhang <zhangwm@marvell.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16 #include <linux/kernel.h>
17 #include <linux/delay.h>
18 #include <linux/ioport.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/timer.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/moduleparam.h>
27 #include <linux/device.h>
28 #include <linux/usb/ch9.h>
29 #include <linux/usb/gadget.h>
30 #include <linux/usb/otg.h>
31 #include <linux/pm.h>
32 #include <linux/io.h>
33 #include <linux/irq.h>
34 #include <linux/platform_device.h>
35 #include <linux/clk.h>
36 #include <linux/platform_data/mv_usb.h>
37 #include <asm/unaligned.h>
38
39 #include "mv_udc.h"
40
41 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
42 #define DRIVER_VERSION "8 Nov 2010"
43
44 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
45 ((ep)->udc->ep0_dir) : ((ep)->direction))
46
47 /* timeout value -- usec */
48 #define RESET_TIMEOUT 10000
49 #define FLUSH_TIMEOUT 10000
50 #define EPSTATUS_TIMEOUT 10000
51 #define PRIME_TIMEOUT 10000
52 #define READSAFE_TIMEOUT 1000
53 #define DTD_TIMEOUT 1000
54
55 #define LOOPS_USEC_SHIFT 4
56 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
57 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
58
59 static DECLARE_COMPLETION(release_done);
60
61 static const char driver_name[] = "mv_udc";
62 static const char driver_desc[] = DRIVER_DESC;
63
64 /* controller device global variable */
65 static struct mv_udc *the_controller;
66 int mv_usb_otgsc;
67
68 static void nuke(struct mv_ep *ep, int status);
69 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
70
71 /* for endpoint 0 operations */
72 static const struct usb_endpoint_descriptor mv_ep0_desc = {
73 .bLength = USB_DT_ENDPOINT_SIZE,
74 .bDescriptorType = USB_DT_ENDPOINT,
75 .bEndpointAddress = 0,
76 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
77 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
78 };
79
80 static void ep0_reset(struct mv_udc *udc)
81 {
82 struct mv_ep *ep;
83 u32 epctrlx;
84 int i = 0;
85
86 /* ep0 in and out */
87 for (i = 0; i < 2; i++) {
88 ep = &udc->eps[i];
89 ep->udc = udc;
90
91 /* ep0 dQH */
92 ep->dqh = &udc->ep_dqh[i];
93
94 /* configure ep0 endpoint capabilities in dQH */
95 ep->dqh->max_packet_length =
96 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
97 | EP_QUEUE_HEAD_IOS;
98
99 ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
100
101 epctrlx = readl(&udc->op_regs->epctrlx[0]);
102 if (i) { /* TX */
103 epctrlx |= EPCTRL_TX_ENABLE
104 | (USB_ENDPOINT_XFER_CONTROL
105 << EPCTRL_TX_EP_TYPE_SHIFT);
106
107 } else { /* RX */
108 epctrlx |= EPCTRL_RX_ENABLE
109 | (USB_ENDPOINT_XFER_CONTROL
110 << EPCTRL_RX_EP_TYPE_SHIFT);
111 }
112
113 writel(epctrlx, &udc->op_regs->epctrlx[0]);
114 }
115 }
116
117 /* protocol ep0 stall, will automatically be cleared on new transaction */
118 static void ep0_stall(struct mv_udc *udc)
119 {
120 u32 epctrlx;
121
122 /* set TX and RX to stall */
123 epctrlx = readl(&udc->op_regs->epctrlx[0]);
124 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
125 writel(epctrlx, &udc->op_regs->epctrlx[0]);
126
127 /* update ep0 state */
128 udc->ep0_state = WAIT_FOR_SETUP;
129 udc->ep0_dir = EP_DIR_OUT;
130 }
131
132 static int process_ep_req(struct mv_udc *udc, int index,
133 struct mv_req *curr_req)
134 {
135 struct mv_dtd *curr_dtd;
136 struct mv_dqh *curr_dqh;
137 int td_complete, actual, remaining_length;
138 int i, direction;
139 int retval = 0;
140 u32 errors;
141 u32 bit_pos;
142
143 curr_dqh = &udc->ep_dqh[index];
144 direction = index % 2;
145
146 curr_dtd = curr_req->head;
147 td_complete = 0;
148 actual = curr_req->req.length;
149
150 for (i = 0; i < curr_req->dtd_count; i++) {
151 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
152 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
153 udc->eps[index].name);
154 return 1;
155 }
156
157 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
158 if (!errors) {
159 remaining_length =
160 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
161 >> DTD_LENGTH_BIT_POS;
162 actual -= remaining_length;
163
164 if (remaining_length) {
165 if (direction) {
166 dev_dbg(&udc->dev->dev,
167 "TX dTD remains data\n");
168 retval = -EPROTO;
169 break;
170 } else
171 break;
172 }
173 } else {
174 dev_info(&udc->dev->dev,
175 "complete_tr error: ep=%d %s: error = 0x%x\n",
176 index >> 1, direction ? "SEND" : "RECV",
177 errors);
178 if (errors & DTD_STATUS_HALTED) {
179 /* Clear the errors and Halt condition */
180 curr_dqh->size_ioc_int_sts &= ~errors;
181 retval = -EPIPE;
182 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
183 retval = -EPROTO;
184 } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
185 retval = -EILSEQ;
186 }
187 }
188 if (i != curr_req->dtd_count - 1)
189 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
190 }
191 if (retval)
192 return retval;
193
194 if (direction == EP_DIR_OUT)
195 bit_pos = 1 << curr_req->ep->ep_num;
196 else
197 bit_pos = 1 << (16 + curr_req->ep->ep_num);
198
199 while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
200 if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
201 while (readl(&udc->op_regs->epstatus) & bit_pos)
202 udelay(1);
203 break;
204 }
205 udelay(1);
206 }
207
208 curr_req->req.actual = actual;
209
210 return 0;
211 }
212
213 /*
214 * done() - retire a request; caller blocked irqs
215 * @status : request status to be set, only works when
216 * request is still in progress.
217 */
218 static void done(struct mv_ep *ep, struct mv_req *req, int status)
219 {
220 struct mv_udc *udc = NULL;
221 unsigned char stopped = ep->stopped;
222 struct mv_dtd *curr_td, *next_td;
223 int j;
224
225 udc = (struct mv_udc *)ep->udc;
226 /* Removed the req from fsl_ep->queue */
227 list_del_init(&req->queue);
228
229 /* req.status should be set as -EINPROGRESS in ep_queue() */
230 if (req->req.status == -EINPROGRESS)
231 req->req.status = status;
232 else
233 status = req->req.status;
234
235 /* Free dtd for the request */
236 next_td = req->head;
237 for (j = 0; j < req->dtd_count; j++) {
238 curr_td = next_td;
239 if (j != req->dtd_count - 1)
240 next_td = curr_td->next_dtd_virt;
241 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
242 }
243
244 if (req->mapped) {
245 dma_unmap_single(ep->udc->gadget.dev.parent,
246 req->req.dma, req->req.length,
247 ((ep_dir(ep) == EP_DIR_IN) ?
248 DMA_TO_DEVICE : DMA_FROM_DEVICE));
249 req->req.dma = DMA_ADDR_INVALID;
250 req->mapped = 0;
251 } else
252 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
253 req->req.dma, req->req.length,
254 ((ep_dir(ep) == EP_DIR_IN) ?
255 DMA_TO_DEVICE : DMA_FROM_DEVICE));
256
257 if (status && (status != -ESHUTDOWN))
258 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
259 ep->ep.name, &req->req, status,
260 req->req.actual, req->req.length);
261
262 ep->stopped = 1;
263
264 spin_unlock(&ep->udc->lock);
265 /*
266 * complete() is from gadget layer,
267 * eg fsg->bulk_in_complete()
268 */
269 if (req->req.complete)
270 req->req.complete(&ep->ep, &req->req);
271
272 spin_lock(&ep->udc->lock);
273 ep->stopped = stopped;
274 }
275
276 static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
277 {
278 struct mv_udc *udc;
279 struct mv_dqh *dqh;
280 u32 bit_pos, direction;
281 u32 usbcmd, epstatus;
282 unsigned int loops;
283 int retval = 0;
284
285 udc = ep->udc;
286 direction = ep_dir(ep);
287 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
288 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
289
290 /* check if the pipe is empty */
291 if (!(list_empty(&ep->queue))) {
292 struct mv_req *lastreq;
293 lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
294 lastreq->tail->dtd_next =
295 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
296
297 wmb();
298
299 if (readl(&udc->op_regs->epprime) & bit_pos)
300 goto done;
301
302 loops = LOOPS(READSAFE_TIMEOUT);
303 while (1) {
304 /* start with setting the semaphores */
305 usbcmd = readl(&udc->op_regs->usbcmd);
306 usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
307 writel(usbcmd, &udc->op_regs->usbcmd);
308
309 /* read the endpoint status */
310 epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
311
312 /*
313 * Reread the ATDTW semaphore bit to check if it is
314 * cleared. When hardware see a hazard, it will clear
315 * the bit or else we remain set to 1 and we can
316 * proceed with priming of endpoint if not already
317 * primed.
318 */
319 if (readl(&udc->op_regs->usbcmd)
320 & USBCMD_ATDTW_TRIPWIRE_SET)
321 break;
322
323 loops--;
324 if (loops == 0) {
325 dev_err(&udc->dev->dev,
326 "Timeout for ATDTW_TRIPWIRE...\n");
327 retval = -ETIME;
328 goto done;
329 }
330 udelay(LOOPS_USEC);
331 }
332
333 /* Clear the semaphore */
334 usbcmd = readl(&udc->op_regs->usbcmd);
335 usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
336 writel(usbcmd, &udc->op_regs->usbcmd);
337
338 if (epstatus)
339 goto done;
340 }
341
342 /* Write dQH next pointer and terminate bit to 0 */
343 dqh->next_dtd_ptr = req->head->td_dma
344 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
345
346 /* clear active and halt bit, in case set from a previous error */
347 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
348
349 /* Ensure that updates to the QH will occure before priming. */
350 wmb();
351
352 /* Prime the Endpoint */
353 writel(bit_pos, &udc->op_regs->epprime);
354
355 done:
356 return retval;
357 }
358
359
360 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
361 dma_addr_t *dma, int *is_last)
362 {
363 u32 temp;
364 struct mv_dtd *dtd;
365 struct mv_udc *udc;
366
367 /* how big will this transfer be? */
368 *length = min(req->req.length - req->req.actual,
369 (unsigned)EP_MAX_LENGTH_TRANSFER);
370
371 udc = req->ep->udc;
372
373 /*
374 * Be careful that no _GFP_HIGHMEM is set,
375 * or we can not use dma_to_virt
376 */
377 dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
378 if (dtd == NULL)
379 return dtd;
380
381 dtd->td_dma = *dma;
382 /* initialize buffer page pointers */
383 temp = (u32)(req->req.dma + req->req.actual);
384 dtd->buff_ptr0 = cpu_to_le32(temp);
385 temp &= ~0xFFF;
386 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
387 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
388 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
389 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
390
391 req->req.actual += *length;
392
393 /* zlp is needed if req->req.zero is set */
394 if (req->req.zero) {
395 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
396 *is_last = 1;
397 else
398 *is_last = 0;
399 } else if (req->req.length == req->req.actual)
400 *is_last = 1;
401 else
402 *is_last = 0;
403
404 /* Fill in the transfer size; set active bit */
405 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
406
407 /* Enable interrupt for the last dtd of a request */
408 if (*is_last && !req->req.no_interrupt)
409 temp |= DTD_IOC;
410
411 dtd->size_ioc_sts = temp;
412
413 mb();
414
415 return dtd;
416 }
417
418 /* generate dTD linked list for a request */
419 static int req_to_dtd(struct mv_req *req)
420 {
421 unsigned count;
422 int is_last, is_first = 1;
423 struct mv_dtd *dtd, *last_dtd = NULL;
424 struct mv_udc *udc;
425 dma_addr_t dma;
426
427 udc = req->ep->udc;
428
429 do {
430 dtd = build_dtd(req, &count, &dma, &is_last);
431 if (dtd == NULL)
432 return -ENOMEM;
433
434 if (is_first) {
435 is_first = 0;
436 req->head = dtd;
437 } else {
438 last_dtd->dtd_next = dma;
439 last_dtd->next_dtd_virt = dtd;
440 }
441 last_dtd = dtd;
442 req->dtd_count++;
443 } while (!is_last);
444
445 /* set terminate bit to 1 for the last dTD */
446 dtd->dtd_next = DTD_NEXT_TERMINATE;
447
448 req->tail = dtd;
449
450 return 0;
451 }
452
453 static int mv_ep_enable(struct usb_ep *_ep,
454 const struct usb_endpoint_descriptor *desc)
455 {
456 struct mv_udc *udc;
457 struct mv_ep *ep;
458 struct mv_dqh *dqh;
459 u16 max = 0;
460 u32 bit_pos, epctrlx, direction;
461 unsigned char zlt = 0, ios = 0, mult = 0;
462 unsigned long flags;
463
464 ep = container_of(_ep, struct mv_ep, ep);
465 udc = ep->udc;
466
467 if (!_ep || !desc
468 || desc->bDescriptorType != USB_DT_ENDPOINT)
469 return -EINVAL;
470
471 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
472 return -ESHUTDOWN;
473
474 direction = ep_dir(ep);
475 max = usb_endpoint_maxp(desc);
476
477 /*
478 * disable HW zero length termination select
479 * driver handles zero length packet through req->req.zero
480 */
481 zlt = 1;
482
483 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
484
485 /* Check if the Endpoint is Primed */
486 if ((readl(&udc->op_regs->epprime) & bit_pos)
487 || (readl(&udc->op_regs->epstatus) & bit_pos)) {
488 dev_info(&udc->dev->dev,
489 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
490 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
491 (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
492 (unsigned)readl(&udc->op_regs->epprime),
493 (unsigned)readl(&udc->op_regs->epstatus),
494 (unsigned)bit_pos);
495 goto en_done;
496 }
497 /* Set the max packet length, interrupt on Setup and Mult fields */
498 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
499 case USB_ENDPOINT_XFER_BULK:
500 zlt = 1;
501 mult = 0;
502 break;
503 case USB_ENDPOINT_XFER_CONTROL:
504 ios = 1;
505 case USB_ENDPOINT_XFER_INT:
506 mult = 0;
507 break;
508 case USB_ENDPOINT_XFER_ISOC:
509 /* Calculate transactions needed for high bandwidth iso */
510 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
511 max = max & 0x7ff; /* bit 0~10 */
512 /* 3 transactions at most */
513 if (mult > 3)
514 goto en_done;
515 break;
516 default:
517 goto en_done;
518 }
519
520 spin_lock_irqsave(&udc->lock, flags);
521 /* Get the endpoint queue head address */
522 dqh = ep->dqh;
523 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
524 | (mult << EP_QUEUE_HEAD_MULT_POS)
525 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
526 | (ios ? EP_QUEUE_HEAD_IOS : 0);
527 dqh->next_dtd_ptr = 1;
528 dqh->size_ioc_int_sts = 0;
529
530 ep->ep.maxpacket = max;
531 ep->ep.desc = desc;
532 ep->stopped = 0;
533
534 /* Enable the endpoint for Rx or Tx and set the endpoint type */
535 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
536 if (direction == EP_DIR_IN) {
537 epctrlx &= ~EPCTRL_TX_ALL_MASK;
538 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
539 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
540 << EPCTRL_TX_EP_TYPE_SHIFT);
541 } else {
542 epctrlx &= ~EPCTRL_RX_ALL_MASK;
543 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
544 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
545 << EPCTRL_RX_EP_TYPE_SHIFT);
546 }
547 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
548
549 /*
550 * Implement Guideline (GL# USB-7) The unused endpoint type must
551 * be programmed to bulk.
552 */
553 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
554 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
555 epctrlx |= (USB_ENDPOINT_XFER_BULK
556 << EPCTRL_RX_EP_TYPE_SHIFT);
557 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
558 }
559
560 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
561 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
562 epctrlx |= (USB_ENDPOINT_XFER_BULK
563 << EPCTRL_TX_EP_TYPE_SHIFT);
564 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
565 }
566
567 spin_unlock_irqrestore(&udc->lock, flags);
568
569 return 0;
570 en_done:
571 return -EINVAL;
572 }
573
574 static int mv_ep_disable(struct usb_ep *_ep)
575 {
576 struct mv_udc *udc;
577 struct mv_ep *ep;
578 struct mv_dqh *dqh;
579 u32 bit_pos, epctrlx, direction;
580 unsigned long flags;
581
582 ep = container_of(_ep, struct mv_ep, ep);
583 if ((_ep == NULL) || !ep->ep.desc)
584 return -EINVAL;
585
586 udc = ep->udc;
587
588 /* Get the endpoint queue head address */
589 dqh = ep->dqh;
590
591 spin_lock_irqsave(&udc->lock, flags);
592
593 direction = ep_dir(ep);
594 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
595
596 /* Reset the max packet length and the interrupt on Setup */
597 dqh->max_packet_length = 0;
598
599 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
600 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
601 epctrlx &= ~((direction == EP_DIR_IN)
602 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
603 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
604 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
605
606 /* nuke all pending requests (does flush) */
607 nuke(ep, -ESHUTDOWN);
608
609 ep->ep.desc = NULL;
610 ep->stopped = 1;
611
612 spin_unlock_irqrestore(&udc->lock, flags);
613
614 return 0;
615 }
616
617 static struct usb_request *
618 mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
619 {
620 struct mv_req *req = NULL;
621
622 req = kzalloc(sizeof *req, gfp_flags);
623 if (!req)
624 return NULL;
625
626 req->req.dma = DMA_ADDR_INVALID;
627 INIT_LIST_HEAD(&req->queue);
628
629 return &req->req;
630 }
631
632 static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
633 {
634 struct mv_req *req = NULL;
635
636 req = container_of(_req, struct mv_req, req);
637
638 if (_req)
639 kfree(req);
640 }
641
642 static void mv_ep_fifo_flush(struct usb_ep *_ep)
643 {
644 struct mv_udc *udc;
645 u32 bit_pos, direction;
646 struct mv_ep *ep;
647 unsigned int loops;
648
649 if (!_ep)
650 return;
651
652 ep = container_of(_ep, struct mv_ep, ep);
653 if (!ep->ep.desc)
654 return;
655
656 udc = ep->udc;
657 direction = ep_dir(ep);
658
659 if (ep->ep_num == 0)
660 bit_pos = (1 << 16) | 1;
661 else if (direction == EP_DIR_OUT)
662 bit_pos = 1 << ep->ep_num;
663 else
664 bit_pos = 1 << (16 + ep->ep_num);
665
666 loops = LOOPS(EPSTATUS_TIMEOUT);
667 do {
668 unsigned int inter_loops;
669
670 if (loops == 0) {
671 dev_err(&udc->dev->dev,
672 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
673 (unsigned)readl(&udc->op_regs->epstatus),
674 (unsigned)bit_pos);
675 return;
676 }
677 /* Write 1 to the Flush register */
678 writel(bit_pos, &udc->op_regs->epflush);
679
680 /* Wait until flushing completed */
681 inter_loops = LOOPS(FLUSH_TIMEOUT);
682 while (readl(&udc->op_regs->epflush)) {
683 /*
684 * ENDPTFLUSH bit should be cleared to indicate this
685 * operation is complete
686 */
687 if (inter_loops == 0) {
688 dev_err(&udc->dev->dev,
689 "TIMEOUT for ENDPTFLUSH=0x%x,"
690 "bit_pos=0x%x\n",
691 (unsigned)readl(&udc->op_regs->epflush),
692 (unsigned)bit_pos);
693 return;
694 }
695 inter_loops--;
696 udelay(LOOPS_USEC);
697 }
698 loops--;
699 } while (readl(&udc->op_regs->epstatus) & bit_pos);
700 }
701
702 /* queues (submits) an I/O request to an endpoint */
703 static int
704 mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
705 {
706 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
707 struct mv_req *req = container_of(_req, struct mv_req, req);
708 struct mv_udc *udc = ep->udc;
709 unsigned long flags;
710
711 /* catch various bogus parameters */
712 if (!_req || !req->req.complete || !req->req.buf
713 || !list_empty(&req->queue)) {
714 dev_err(&udc->dev->dev, "%s, bad params", __func__);
715 return -EINVAL;
716 }
717 if (unlikely(!_ep || !ep->ep.desc)) {
718 dev_err(&udc->dev->dev, "%s, bad ep", __func__);
719 return -EINVAL;
720 }
721 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
722 if (req->req.length > ep->ep.maxpacket)
723 return -EMSGSIZE;
724 }
725
726 udc = ep->udc;
727 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
728 return -ESHUTDOWN;
729
730 req->ep = ep;
731
732 /* map virtual address to hardware */
733 if (req->req.dma == DMA_ADDR_INVALID) {
734 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
735 req->req.buf,
736 req->req.length, ep_dir(ep)
737 ? DMA_TO_DEVICE
738 : DMA_FROM_DEVICE);
739 req->mapped = 1;
740 } else {
741 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
742 req->req.dma, req->req.length,
743 ep_dir(ep)
744 ? DMA_TO_DEVICE
745 : DMA_FROM_DEVICE);
746 req->mapped = 0;
747 }
748
749 req->req.status = -EINPROGRESS;
750 req->req.actual = 0;
751 req->dtd_count = 0;
752
753 spin_lock_irqsave(&udc->lock, flags);
754
755 /* build dtds and push them to device queue */
756 if (!req_to_dtd(req)) {
757 int retval;
758 retval = queue_dtd(ep, req);
759 if (retval) {
760 spin_unlock_irqrestore(&udc->lock, flags);
761 return retval;
762 }
763 } else {
764 spin_unlock_irqrestore(&udc->lock, flags);
765 return -ENOMEM;
766 }
767
768 /* Update ep0 state */
769 if (ep->ep_num == 0)
770 udc->ep0_state = DATA_STATE_XMIT;
771
772 /* irq handler advances the queue */
773 list_add_tail(&req->queue, &ep->queue);
774 spin_unlock_irqrestore(&udc->lock, flags);
775
776 return 0;
777 }
778
779 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
780 {
781 struct mv_dqh *dqh = ep->dqh;
782 u32 bit_pos;
783
784 /* Write dQH next pointer and terminate bit to 0 */
785 dqh->next_dtd_ptr = req->head->td_dma
786 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
787
788 /* clear active and halt bit, in case set from a previous error */
789 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
790
791 /* Ensure that updates to the QH will occure before priming. */
792 wmb();
793
794 bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
795
796 /* Prime the Endpoint */
797 writel(bit_pos, &ep->udc->op_regs->epprime);
798 }
799
800 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
801 static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
802 {
803 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
804 struct mv_req *req;
805 struct mv_udc *udc = ep->udc;
806 unsigned long flags;
807 int stopped, ret = 0;
808 u32 epctrlx;
809
810 if (!_ep || !_req)
811 return -EINVAL;
812
813 spin_lock_irqsave(&ep->udc->lock, flags);
814 stopped = ep->stopped;
815
816 /* Stop the ep before we deal with the queue */
817 ep->stopped = 1;
818 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
819 if (ep_dir(ep) == EP_DIR_IN)
820 epctrlx &= ~EPCTRL_TX_ENABLE;
821 else
822 epctrlx &= ~EPCTRL_RX_ENABLE;
823 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
824
825 /* make sure it's actually queued on this endpoint */
826 list_for_each_entry(req, &ep->queue, queue) {
827 if (&req->req == _req)
828 break;
829 }
830 if (&req->req != _req) {
831 ret = -EINVAL;
832 goto out;
833 }
834
835 /* The request is in progress, or completed but not dequeued */
836 if (ep->queue.next == &req->queue) {
837 _req->status = -ECONNRESET;
838 mv_ep_fifo_flush(_ep); /* flush current transfer */
839
840 /* The request isn't the last request in this ep queue */
841 if (req->queue.next != &ep->queue) {
842 struct mv_req *next_req;
843
844 next_req = list_entry(req->queue.next,
845 struct mv_req, queue);
846
847 /* Point the QH to the first TD of next request */
848 mv_prime_ep(ep, next_req);
849 } else {
850 struct mv_dqh *qh;
851
852 qh = ep->dqh;
853 qh->next_dtd_ptr = 1;
854 qh->size_ioc_int_sts = 0;
855 }
856
857 /* The request hasn't been processed, patch up the TD chain */
858 } else {
859 struct mv_req *prev_req;
860
861 prev_req = list_entry(req->queue.prev, struct mv_req, queue);
862 writel(readl(&req->tail->dtd_next),
863 &prev_req->tail->dtd_next);
864
865 }
866
867 done(ep, req, -ECONNRESET);
868
869 /* Enable EP */
870 out:
871 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
872 if (ep_dir(ep) == EP_DIR_IN)
873 epctrlx |= EPCTRL_TX_ENABLE;
874 else
875 epctrlx |= EPCTRL_RX_ENABLE;
876 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
877 ep->stopped = stopped;
878
879 spin_unlock_irqrestore(&ep->udc->lock, flags);
880 return ret;
881 }
882
883 static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
884 {
885 u32 epctrlx;
886
887 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
888
889 if (stall) {
890 if (direction == EP_DIR_IN)
891 epctrlx |= EPCTRL_TX_EP_STALL;
892 else
893 epctrlx |= EPCTRL_RX_EP_STALL;
894 } else {
895 if (direction == EP_DIR_IN) {
896 epctrlx &= ~EPCTRL_TX_EP_STALL;
897 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
898 } else {
899 epctrlx &= ~EPCTRL_RX_EP_STALL;
900 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
901 }
902 }
903 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
904 }
905
906 static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
907 {
908 u32 epctrlx;
909
910 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
911
912 if (direction == EP_DIR_OUT)
913 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
914 else
915 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
916 }
917
918 static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
919 {
920 struct mv_ep *ep;
921 unsigned long flags = 0;
922 int status = 0;
923 struct mv_udc *udc;
924
925 ep = container_of(_ep, struct mv_ep, ep);
926 udc = ep->udc;
927 if (!_ep || !ep->ep.desc) {
928 status = -EINVAL;
929 goto out;
930 }
931
932 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
933 status = -EOPNOTSUPP;
934 goto out;
935 }
936
937 /*
938 * Attempt to halt IN ep will fail if any transfer requests
939 * are still queue
940 */
941 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
942 status = -EAGAIN;
943 goto out;
944 }
945
946 spin_lock_irqsave(&ep->udc->lock, flags);
947 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
948 if (halt && wedge)
949 ep->wedge = 1;
950 else if (!halt)
951 ep->wedge = 0;
952 spin_unlock_irqrestore(&ep->udc->lock, flags);
953
954 if (ep->ep_num == 0) {
955 udc->ep0_state = WAIT_FOR_SETUP;
956 udc->ep0_dir = EP_DIR_OUT;
957 }
958 out:
959 return status;
960 }
961
962 static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
963 {
964 return mv_ep_set_halt_wedge(_ep, halt, 0);
965 }
966
967 static int mv_ep_set_wedge(struct usb_ep *_ep)
968 {
969 return mv_ep_set_halt_wedge(_ep, 1, 1);
970 }
971
972 static struct usb_ep_ops mv_ep_ops = {
973 .enable = mv_ep_enable,
974 .disable = mv_ep_disable,
975
976 .alloc_request = mv_alloc_request,
977 .free_request = mv_free_request,
978
979 .queue = mv_ep_queue,
980 .dequeue = mv_ep_dequeue,
981
982 .set_wedge = mv_ep_set_wedge,
983 .set_halt = mv_ep_set_halt,
984 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
985 };
986
987 static void udc_clock_enable(struct mv_udc *udc)
988 {
989 unsigned int i;
990
991 for (i = 0; i < udc->clknum; i++)
992 clk_enable(udc->clk[i]);
993 }
994
995 static void udc_clock_disable(struct mv_udc *udc)
996 {
997 unsigned int i;
998
999 for (i = 0; i < udc->clknum; i++)
1000 clk_disable(udc->clk[i]);
1001 }
1002
1003 static void udc_stop(struct mv_udc *udc)
1004 {
1005 u32 tmp;
1006
1007 /* Disable interrupts */
1008 tmp = readl(&udc->op_regs->usbintr);
1009 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1010 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1011 writel(tmp, &udc->op_regs->usbintr);
1012
1013 udc->stopped = 1;
1014
1015 /* Reset the Run the bit in the command register to stop VUSB */
1016 tmp = readl(&udc->op_regs->usbcmd);
1017 tmp &= ~USBCMD_RUN_STOP;
1018 writel(tmp, &udc->op_regs->usbcmd);
1019 }
1020
1021 static void udc_start(struct mv_udc *udc)
1022 {
1023 u32 usbintr;
1024
1025 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1026 | USBINTR_PORT_CHANGE_DETECT_EN
1027 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1028 /* Enable interrupts */
1029 writel(usbintr, &udc->op_regs->usbintr);
1030
1031 udc->stopped = 0;
1032
1033 /* Set the Run bit in the command register */
1034 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1035 }
1036
1037 static int udc_reset(struct mv_udc *udc)
1038 {
1039 unsigned int loops;
1040 u32 tmp, portsc;
1041
1042 /* Stop the controller */
1043 tmp = readl(&udc->op_regs->usbcmd);
1044 tmp &= ~USBCMD_RUN_STOP;
1045 writel(tmp, &udc->op_regs->usbcmd);
1046
1047 /* Reset the controller to get default values */
1048 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1049
1050 /* wait for reset to complete */
1051 loops = LOOPS(RESET_TIMEOUT);
1052 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1053 if (loops == 0) {
1054 dev_err(&udc->dev->dev,
1055 "Wait for RESET completed TIMEOUT\n");
1056 return -ETIMEDOUT;
1057 }
1058 loops--;
1059 udelay(LOOPS_USEC);
1060 }
1061
1062 /* set controller to device mode */
1063 tmp = readl(&udc->op_regs->usbmode);
1064 tmp |= USBMODE_CTRL_MODE_DEVICE;
1065
1066 /* turn setup lockout off, require setup tripwire in usbcmd */
1067 tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
1068
1069 writel(tmp, &udc->op_regs->usbmode);
1070
1071 writel(0x0, &udc->op_regs->epsetupstat);
1072
1073 /* Configure the Endpoint List Address */
1074 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1075 &udc->op_regs->eplistaddr);
1076
1077 portsc = readl(&udc->op_regs->portsc[0]);
1078 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1079 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1080
1081 if (udc->force_fs)
1082 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1083 else
1084 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1085
1086 writel(portsc, &udc->op_regs->portsc[0]);
1087
1088 tmp = readl(&udc->op_regs->epctrlx[0]);
1089 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1090 writel(tmp, &udc->op_regs->epctrlx[0]);
1091
1092 return 0;
1093 }
1094
1095 static int mv_udc_enable_internal(struct mv_udc *udc)
1096 {
1097 int retval;
1098
1099 if (udc->active)
1100 return 0;
1101
1102 dev_dbg(&udc->dev->dev, "enable udc\n");
1103 udc_clock_enable(udc);
1104 if (udc->pdata->phy_init) {
1105 retval = udc->pdata->phy_init(udc->phy_regs);
1106 if (retval) {
1107 dev_err(&udc->dev->dev,
1108 "init phy error %d\n", retval);
1109 udc_clock_disable(udc);
1110 return retval;
1111 }
1112 }
1113 udc->active = 1;
1114
1115 return 0;
1116 }
1117
1118 static int mv_udc_enable(struct mv_udc *udc)
1119 {
1120 if (udc->clock_gating)
1121 return mv_udc_enable_internal(udc);
1122
1123 return 0;
1124 }
1125
1126 static void mv_udc_disable_internal(struct mv_udc *udc)
1127 {
1128 if (udc->active) {
1129 dev_dbg(&udc->dev->dev, "disable udc\n");
1130 if (udc->pdata->phy_deinit)
1131 udc->pdata->phy_deinit(udc->phy_regs);
1132 udc_clock_disable(udc);
1133 udc->active = 0;
1134 }
1135 }
1136
1137 static void mv_udc_disable(struct mv_udc *udc)
1138 {
1139 if (udc->clock_gating)
1140 mv_udc_disable_internal(udc);
1141 }
1142
1143 static int mv_udc_get_frame(struct usb_gadget *gadget)
1144 {
1145 struct mv_udc *udc;
1146 u16 retval;
1147
1148 if (!gadget)
1149 return -ENODEV;
1150
1151 udc = container_of(gadget, struct mv_udc, gadget);
1152
1153 retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1154
1155 return retval;
1156 }
1157
1158 /* Tries to wake up the host connected to this gadget */
1159 static int mv_udc_wakeup(struct usb_gadget *gadget)
1160 {
1161 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1162 u32 portsc;
1163
1164 /* Remote wakeup feature not enabled by host */
1165 if (!udc->remote_wakeup)
1166 return -ENOTSUPP;
1167
1168 portsc = readl(&udc->op_regs->portsc);
1169 /* not suspended? */
1170 if (!(portsc & PORTSCX_PORT_SUSPEND))
1171 return 0;
1172 /* trigger force resume */
1173 portsc |= PORTSCX_PORT_FORCE_RESUME;
1174 writel(portsc, &udc->op_regs->portsc[0]);
1175 return 0;
1176 }
1177
1178 static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1179 {
1180 struct mv_udc *udc;
1181 unsigned long flags;
1182 int retval = 0;
1183
1184 udc = container_of(gadget, struct mv_udc, gadget);
1185 spin_lock_irqsave(&udc->lock, flags);
1186
1187 udc->vbus_active = (is_active != 0);
1188
1189 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1190 __func__, udc->softconnect, udc->vbus_active);
1191
1192 if (udc->driver && udc->softconnect && udc->vbus_active) {
1193 retval = mv_udc_enable(udc);
1194 if (retval == 0) {
1195 /* Clock is disabled, need re-init registers */
1196 udc_reset(udc);
1197 ep0_reset(udc);
1198 udc_start(udc);
1199 }
1200 } else if (udc->driver && udc->softconnect) {
1201 /* stop all the transfer in queue*/
1202 stop_activity(udc, udc->driver);
1203 udc_stop(udc);
1204 mv_udc_disable(udc);
1205 }
1206
1207 spin_unlock_irqrestore(&udc->lock, flags);
1208 return retval;
1209 }
1210
1211 static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1212 {
1213 struct mv_udc *udc;
1214 unsigned long flags;
1215 int retval = 0;
1216
1217 udc = container_of(gadget, struct mv_udc, gadget);
1218 spin_lock_irqsave(&udc->lock, flags);
1219
1220 udc->softconnect = (is_on != 0);
1221
1222 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1223 __func__, udc->softconnect, udc->vbus_active);
1224
1225 if (udc->driver && udc->softconnect && udc->vbus_active) {
1226 retval = mv_udc_enable(udc);
1227 if (retval == 0) {
1228 /* Clock is disabled, need re-init registers */
1229 udc_reset(udc);
1230 ep0_reset(udc);
1231 udc_start(udc);
1232 }
1233 } else if (udc->driver && udc->vbus_active) {
1234 /* stop all the transfer in queue*/
1235 stop_activity(udc, udc->driver);
1236 udc_stop(udc);
1237 mv_udc_disable(udc);
1238 }
1239
1240 spin_unlock_irqrestore(&udc->lock, flags);
1241 return retval;
1242 }
1243
1244 static int mv_udc_start(struct usb_gadget_driver *driver,
1245 int (*bind)(struct usb_gadget *));
1246 static int mv_udc_stop(struct usb_gadget_driver *driver);
1247 /* device controller usb_gadget_ops structure */
1248 static const struct usb_gadget_ops mv_ops = {
1249
1250 /* returns the current frame number */
1251 .get_frame = mv_udc_get_frame,
1252
1253 /* tries to wake up the host connected to this gadget */
1254 .wakeup = mv_udc_wakeup,
1255
1256 /* notify controller that VBUS is powered or not */
1257 .vbus_session = mv_udc_vbus_session,
1258
1259 /* D+ pullup, software-controlled connect/disconnect to USB host */
1260 .pullup = mv_udc_pullup,
1261 .start = mv_udc_start,
1262 .stop = mv_udc_stop,
1263 };
1264
1265 static int eps_init(struct mv_udc *udc)
1266 {
1267 struct mv_ep *ep;
1268 char name[14];
1269 int i;
1270
1271 /* initialize ep0 */
1272 ep = &udc->eps[0];
1273 ep->udc = udc;
1274 strncpy(ep->name, "ep0", sizeof(ep->name));
1275 ep->ep.name = ep->name;
1276 ep->ep.ops = &mv_ep_ops;
1277 ep->wedge = 0;
1278 ep->stopped = 0;
1279 ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1280 ep->ep_num = 0;
1281 ep->ep.desc = &mv_ep0_desc;
1282 INIT_LIST_HEAD(&ep->queue);
1283
1284 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1285
1286 /* initialize other endpoints */
1287 for (i = 2; i < udc->max_eps * 2; i++) {
1288 ep = &udc->eps[i];
1289 if (i % 2) {
1290 snprintf(name, sizeof(name), "ep%din", i / 2);
1291 ep->direction = EP_DIR_IN;
1292 } else {
1293 snprintf(name, sizeof(name), "ep%dout", i / 2);
1294 ep->direction = EP_DIR_OUT;
1295 }
1296 ep->udc = udc;
1297 strncpy(ep->name, name, sizeof(ep->name));
1298 ep->ep.name = ep->name;
1299
1300 ep->ep.ops = &mv_ep_ops;
1301 ep->stopped = 0;
1302 ep->ep.maxpacket = (unsigned short) ~0;
1303 ep->ep_num = i / 2;
1304
1305 INIT_LIST_HEAD(&ep->queue);
1306 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1307
1308 ep->dqh = &udc->ep_dqh[i];
1309 }
1310
1311 return 0;
1312 }
1313
1314 /* delete all endpoint requests, called with spinlock held */
1315 static void nuke(struct mv_ep *ep, int status)
1316 {
1317 /* called with spinlock held */
1318 ep->stopped = 1;
1319
1320 /* endpoint fifo flush */
1321 mv_ep_fifo_flush(&ep->ep);
1322
1323 while (!list_empty(&ep->queue)) {
1324 struct mv_req *req = NULL;
1325 req = list_entry(ep->queue.next, struct mv_req, queue);
1326 done(ep, req, status);
1327 }
1328 }
1329
1330 /* stop all USB activities */
1331 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1332 {
1333 struct mv_ep *ep;
1334
1335 nuke(&udc->eps[0], -ESHUTDOWN);
1336
1337 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1338 nuke(ep, -ESHUTDOWN);
1339 }
1340
1341 /* report disconnect; the driver is already quiesced */
1342 if (driver) {
1343 spin_unlock(&udc->lock);
1344 driver->disconnect(&udc->gadget);
1345 spin_lock(&udc->lock);
1346 }
1347 }
1348
1349 static int mv_udc_start(struct usb_gadget_driver *driver,
1350 int (*bind)(struct usb_gadget *))
1351 {
1352 struct mv_udc *udc = the_controller;
1353 int retval = 0;
1354 unsigned long flags;
1355
1356 if (!udc)
1357 return -ENODEV;
1358
1359 if (udc->driver)
1360 return -EBUSY;
1361
1362 spin_lock_irqsave(&udc->lock, flags);
1363
1364 /* hook up the driver ... */
1365 driver->driver.bus = NULL;
1366 udc->driver = driver;
1367 udc->gadget.dev.driver = &driver->driver;
1368
1369 udc->usb_state = USB_STATE_ATTACHED;
1370 udc->ep0_state = WAIT_FOR_SETUP;
1371 udc->ep0_dir = EP_DIR_OUT;
1372
1373 spin_unlock_irqrestore(&udc->lock, flags);
1374
1375 retval = bind(&udc->gadget);
1376 if (retval) {
1377 dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1378 driver->driver.name, retval);
1379 udc->driver = NULL;
1380 udc->gadget.dev.driver = NULL;
1381 return retval;
1382 }
1383
1384 if (udc->transceiver) {
1385 retval = otg_set_peripheral(udc->transceiver->otg,
1386 &udc->gadget);
1387 if (retval) {
1388 dev_err(&udc->dev->dev,
1389 "unable to register peripheral to otg\n");
1390 if (driver->unbind) {
1391 driver->unbind(&udc->gadget);
1392 udc->gadget.dev.driver = NULL;
1393 udc->driver = NULL;
1394 }
1395 return retval;
1396 }
1397 }
1398
1399 /* pullup is always on */
1400 mv_udc_pullup(&udc->gadget, 1);
1401
1402 /* When boot with cable attached, there will be no vbus irq occurred */
1403 if (udc->qwork)
1404 queue_work(udc->qwork, &udc->vbus_work);
1405
1406 return 0;
1407 }
1408
1409 static int mv_udc_stop(struct usb_gadget_driver *driver)
1410 {
1411 struct mv_udc *udc = the_controller;
1412 unsigned long flags;
1413
1414 if (!udc)
1415 return -ENODEV;
1416
1417 spin_lock_irqsave(&udc->lock, flags);
1418
1419 mv_udc_enable(udc);
1420 udc_stop(udc);
1421
1422 /* stop all usb activities */
1423 udc->gadget.speed = USB_SPEED_UNKNOWN;
1424 stop_activity(udc, driver);
1425 mv_udc_disable(udc);
1426
1427 spin_unlock_irqrestore(&udc->lock, flags);
1428
1429 /* unbind gadget driver */
1430 driver->unbind(&udc->gadget);
1431 udc->gadget.dev.driver = NULL;
1432 udc->driver = NULL;
1433
1434 return 0;
1435 }
1436
1437 static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1438 {
1439 u32 portsc;
1440
1441 portsc = readl(&udc->op_regs->portsc[0]);
1442 portsc |= mode << 16;
1443 writel(portsc, &udc->op_regs->portsc[0]);
1444 }
1445
1446 static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1447 {
1448 struct mv_udc *udc = the_controller;
1449 struct mv_req *req = container_of(_req, struct mv_req, req);
1450 unsigned long flags;
1451
1452 dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1453
1454 spin_lock_irqsave(&udc->lock, flags);
1455 if (req->test_mode) {
1456 mv_set_ptc(udc, req->test_mode);
1457 req->test_mode = 0;
1458 }
1459 spin_unlock_irqrestore(&udc->lock, flags);
1460 }
1461
1462 static int
1463 udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1464 {
1465 int retval = 0;
1466 struct mv_req *req;
1467 struct mv_ep *ep;
1468
1469 ep = &udc->eps[0];
1470 udc->ep0_dir = direction;
1471 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1472
1473 req = udc->status_req;
1474
1475 /* fill in the reqest structure */
1476 if (empty == false) {
1477 *((u16 *) req->req.buf) = cpu_to_le16(status);
1478 req->req.length = 2;
1479 } else
1480 req->req.length = 0;
1481
1482 req->ep = ep;
1483 req->req.status = -EINPROGRESS;
1484 req->req.actual = 0;
1485 if (udc->test_mode) {
1486 req->req.complete = prime_status_complete;
1487 req->test_mode = udc->test_mode;
1488 udc->test_mode = 0;
1489 } else
1490 req->req.complete = NULL;
1491 req->dtd_count = 0;
1492
1493 if (req->req.dma == DMA_ADDR_INVALID) {
1494 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1495 req->req.buf, req->req.length,
1496 ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1497 req->mapped = 1;
1498 }
1499
1500 /* prime the data phase */
1501 if (!req_to_dtd(req))
1502 retval = queue_dtd(ep, req);
1503 else{ /* no mem */
1504 retval = -ENOMEM;
1505 goto out;
1506 }
1507
1508 if (retval) {
1509 dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
1510 goto out;
1511 }
1512
1513 list_add_tail(&req->queue, &ep->queue);
1514
1515 return 0;
1516 out:
1517 return retval;
1518 }
1519
1520 static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1521 {
1522 if (index <= TEST_FORCE_EN) {
1523 udc->test_mode = index;
1524 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1525 ep0_stall(udc);
1526 } else
1527 dev_err(&udc->dev->dev,
1528 "This test mode(%d) is not supported\n", index);
1529 }
1530
1531 static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1532 {
1533 udc->dev_addr = (u8)setup->wValue;
1534
1535 /* update usb state */
1536 udc->usb_state = USB_STATE_ADDRESS;
1537
1538 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1539 ep0_stall(udc);
1540 }
1541
1542 static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1543 struct usb_ctrlrequest *setup)
1544 {
1545 u16 status = 0;
1546 int retval;
1547
1548 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1549 != (USB_DIR_IN | USB_TYPE_STANDARD))
1550 return;
1551
1552 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1553 status = 1 << USB_DEVICE_SELF_POWERED;
1554 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1555 } else if ((setup->bRequestType & USB_RECIP_MASK)
1556 == USB_RECIP_INTERFACE) {
1557 /* get interface status */
1558 status = 0;
1559 } else if ((setup->bRequestType & USB_RECIP_MASK)
1560 == USB_RECIP_ENDPOINT) {
1561 u8 ep_num, direction;
1562
1563 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1564 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1565 ? EP_DIR_IN : EP_DIR_OUT;
1566 status = ep_is_stall(udc, ep_num, direction)
1567 << USB_ENDPOINT_HALT;
1568 }
1569
1570 retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1571 if (retval)
1572 ep0_stall(udc);
1573 else
1574 udc->ep0_state = DATA_STATE_XMIT;
1575 }
1576
1577 static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1578 {
1579 u8 ep_num;
1580 u8 direction;
1581 struct mv_ep *ep;
1582
1583 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1584 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1585 switch (setup->wValue) {
1586 case USB_DEVICE_REMOTE_WAKEUP:
1587 udc->remote_wakeup = 0;
1588 break;
1589 default:
1590 goto out;
1591 }
1592 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1593 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1594 switch (setup->wValue) {
1595 case USB_ENDPOINT_HALT:
1596 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1597 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1598 ? EP_DIR_IN : EP_DIR_OUT;
1599 if (setup->wValue != 0 || setup->wLength != 0
1600 || ep_num > udc->max_eps)
1601 goto out;
1602 ep = &udc->eps[ep_num * 2 + direction];
1603 if (ep->wedge == 1)
1604 break;
1605 spin_unlock(&udc->lock);
1606 ep_set_stall(udc, ep_num, direction, 0);
1607 spin_lock(&udc->lock);
1608 break;
1609 default:
1610 goto out;
1611 }
1612 } else
1613 goto out;
1614
1615 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1616 ep0_stall(udc);
1617 out:
1618 return;
1619 }
1620
1621 static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1622 {
1623 u8 ep_num;
1624 u8 direction;
1625
1626 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1627 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1628 switch (setup->wValue) {
1629 case USB_DEVICE_REMOTE_WAKEUP:
1630 udc->remote_wakeup = 1;
1631 break;
1632 case USB_DEVICE_TEST_MODE:
1633 if (setup->wIndex & 0xFF
1634 || udc->gadget.speed != USB_SPEED_HIGH)
1635 ep0_stall(udc);
1636
1637 if (udc->usb_state != USB_STATE_CONFIGURED
1638 && udc->usb_state != USB_STATE_ADDRESS
1639 && udc->usb_state != USB_STATE_DEFAULT)
1640 ep0_stall(udc);
1641
1642 mv_udc_testmode(udc, (setup->wIndex >> 8));
1643 goto out;
1644 default:
1645 goto out;
1646 }
1647 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1648 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1649 switch (setup->wValue) {
1650 case USB_ENDPOINT_HALT:
1651 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1652 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1653 ? EP_DIR_IN : EP_DIR_OUT;
1654 if (setup->wValue != 0 || setup->wLength != 0
1655 || ep_num > udc->max_eps)
1656 goto out;
1657 spin_unlock(&udc->lock);
1658 ep_set_stall(udc, ep_num, direction, 1);
1659 spin_lock(&udc->lock);
1660 break;
1661 default:
1662 goto out;
1663 }
1664 } else
1665 goto out;
1666
1667 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1668 ep0_stall(udc);
1669 out:
1670 return;
1671 }
1672
1673 static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1674 struct usb_ctrlrequest *setup)
1675 {
1676 bool delegate = false;
1677
1678 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1679
1680 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1681 setup->bRequestType, setup->bRequest,
1682 setup->wValue, setup->wIndex, setup->wLength);
1683 /* We process some stardard setup requests here */
1684 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1685 switch (setup->bRequest) {
1686 case USB_REQ_GET_STATUS:
1687 ch9getstatus(udc, ep_num, setup);
1688 break;
1689
1690 case USB_REQ_SET_ADDRESS:
1691 ch9setaddress(udc, setup);
1692 break;
1693
1694 case USB_REQ_CLEAR_FEATURE:
1695 ch9clearfeature(udc, setup);
1696 break;
1697
1698 case USB_REQ_SET_FEATURE:
1699 ch9setfeature(udc, setup);
1700 break;
1701
1702 default:
1703 delegate = true;
1704 }
1705 } else
1706 delegate = true;
1707
1708 /* delegate USB standard requests to the gadget driver */
1709 if (delegate == true) {
1710 /* USB requests handled by gadget */
1711 if (setup->wLength) {
1712 /* DATA phase from gadget, STATUS phase from udc */
1713 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1714 ? EP_DIR_IN : EP_DIR_OUT;
1715 spin_unlock(&udc->lock);
1716 if (udc->driver->setup(&udc->gadget,
1717 &udc->local_setup_buff) < 0)
1718 ep0_stall(udc);
1719 spin_lock(&udc->lock);
1720 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1721 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1722 } else {
1723 /* no DATA phase, IN STATUS phase from gadget */
1724 udc->ep0_dir = EP_DIR_IN;
1725 spin_unlock(&udc->lock);
1726 if (udc->driver->setup(&udc->gadget,
1727 &udc->local_setup_buff) < 0)
1728 ep0_stall(udc);
1729 spin_lock(&udc->lock);
1730 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1731 }
1732 }
1733 }
1734
1735 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
1736 static void ep0_req_complete(struct mv_udc *udc,
1737 struct mv_ep *ep0, struct mv_req *req)
1738 {
1739 u32 new_addr;
1740
1741 if (udc->usb_state == USB_STATE_ADDRESS) {
1742 /* set the new address */
1743 new_addr = (u32)udc->dev_addr;
1744 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1745 &udc->op_regs->deviceaddr);
1746 }
1747
1748 done(ep0, req, 0);
1749
1750 switch (udc->ep0_state) {
1751 case DATA_STATE_XMIT:
1752 /* receive status phase */
1753 if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1754 ep0_stall(udc);
1755 break;
1756 case DATA_STATE_RECV:
1757 /* send status phase */
1758 if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1759 ep0_stall(udc);
1760 break;
1761 case WAIT_FOR_OUT_STATUS:
1762 udc->ep0_state = WAIT_FOR_SETUP;
1763 break;
1764 case WAIT_FOR_SETUP:
1765 dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1766 break;
1767 default:
1768 ep0_stall(udc);
1769 break;
1770 }
1771 }
1772
1773 static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1774 {
1775 u32 temp;
1776 struct mv_dqh *dqh;
1777
1778 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1779
1780 /* Clear bit in ENDPTSETUPSTAT */
1781 writel((1 << ep_num), &udc->op_regs->epsetupstat);
1782
1783 /* while a hazard exists when setup package arrives */
1784 do {
1785 /* Set Setup Tripwire */
1786 temp = readl(&udc->op_regs->usbcmd);
1787 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1788
1789 /* Copy the setup packet to local buffer */
1790 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1791 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1792
1793 /* Clear Setup Tripwire */
1794 temp = readl(&udc->op_regs->usbcmd);
1795 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1796 }
1797
1798 static void irq_process_tr_complete(struct mv_udc *udc)
1799 {
1800 u32 tmp, bit_pos;
1801 int i, ep_num = 0, direction = 0;
1802 struct mv_ep *curr_ep;
1803 struct mv_req *curr_req, *temp_req;
1804 int status;
1805
1806 /*
1807 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1808 * because the setup packets are to be read ASAP
1809 */
1810
1811 /* Process all Setup packet received interrupts */
1812 tmp = readl(&udc->op_regs->epsetupstat);
1813
1814 if (tmp) {
1815 for (i = 0; i < udc->max_eps; i++) {
1816 if (tmp & (1 << i)) {
1817 get_setup_data(udc, i,
1818 (u8 *)(&udc->local_setup_buff));
1819 handle_setup_packet(udc, i,
1820 &udc->local_setup_buff);
1821 }
1822 }
1823 }
1824
1825 /* Don't clear the endpoint setup status register here.
1826 * It is cleared as a setup packet is read out of the buffer
1827 */
1828
1829 /* Process non-setup transaction complete interrupts */
1830 tmp = readl(&udc->op_regs->epcomplete);
1831
1832 if (!tmp)
1833 return;
1834
1835 writel(tmp, &udc->op_regs->epcomplete);
1836
1837 for (i = 0; i < udc->max_eps * 2; i++) {
1838 ep_num = i >> 1;
1839 direction = i % 2;
1840
1841 bit_pos = 1 << (ep_num + 16 * direction);
1842
1843 if (!(bit_pos & tmp))
1844 continue;
1845
1846 if (i == 1)
1847 curr_ep = &udc->eps[0];
1848 else
1849 curr_ep = &udc->eps[i];
1850 /* process the req queue until an uncomplete request */
1851 list_for_each_entry_safe(curr_req, temp_req,
1852 &curr_ep->queue, queue) {
1853 status = process_ep_req(udc, i, curr_req);
1854 if (status)
1855 break;
1856
1857 /* write back status to req */
1858 curr_req->req.status = status;
1859
1860 /* ep0 request completion */
1861 if (ep_num == 0) {
1862 ep0_req_complete(udc, curr_ep, curr_req);
1863 break;
1864 } else {
1865 done(curr_ep, curr_req, status);
1866 }
1867 }
1868 }
1869 }
1870
1871 void irq_process_reset(struct mv_udc *udc)
1872 {
1873 u32 tmp;
1874 unsigned int loops;
1875
1876 udc->ep0_dir = EP_DIR_OUT;
1877 udc->ep0_state = WAIT_FOR_SETUP;
1878 udc->remote_wakeup = 0; /* default to 0 on reset */
1879
1880 /* The address bits are past bit 25-31. Set the address */
1881 tmp = readl(&udc->op_regs->deviceaddr);
1882 tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1883 writel(tmp, &udc->op_regs->deviceaddr);
1884
1885 /* Clear all the setup token semaphores */
1886 tmp = readl(&udc->op_regs->epsetupstat);
1887 writel(tmp, &udc->op_regs->epsetupstat);
1888
1889 /* Clear all the endpoint complete status bits */
1890 tmp = readl(&udc->op_regs->epcomplete);
1891 writel(tmp, &udc->op_regs->epcomplete);
1892
1893 /* wait until all endptprime bits cleared */
1894 loops = LOOPS(PRIME_TIMEOUT);
1895 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1896 if (loops == 0) {
1897 dev_err(&udc->dev->dev,
1898 "Timeout for ENDPTPRIME = 0x%x\n",
1899 readl(&udc->op_regs->epprime));
1900 break;
1901 }
1902 loops--;
1903 udelay(LOOPS_USEC);
1904 }
1905
1906 /* Write 1s to the Flush register */
1907 writel((u32)~0, &udc->op_regs->epflush);
1908
1909 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1910 dev_info(&udc->dev->dev, "usb bus reset\n");
1911 udc->usb_state = USB_STATE_DEFAULT;
1912 /* reset all the queues, stop all USB activities */
1913 stop_activity(udc, udc->driver);
1914 } else {
1915 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1916 readl(&udc->op_regs->portsc));
1917
1918 /*
1919 * re-initialize
1920 * controller reset
1921 */
1922 udc_reset(udc);
1923
1924 /* reset all the queues, stop all USB activities */
1925 stop_activity(udc, udc->driver);
1926
1927 /* reset ep0 dQH and endptctrl */
1928 ep0_reset(udc);
1929
1930 /* enable interrupt and set controller to run state */
1931 udc_start(udc);
1932
1933 udc->usb_state = USB_STATE_ATTACHED;
1934 }
1935 }
1936
1937 static void handle_bus_resume(struct mv_udc *udc)
1938 {
1939 udc->usb_state = udc->resume_state;
1940 udc->resume_state = 0;
1941
1942 /* report resume to the driver */
1943 if (udc->driver) {
1944 if (udc->driver->resume) {
1945 spin_unlock(&udc->lock);
1946 udc->driver->resume(&udc->gadget);
1947 spin_lock(&udc->lock);
1948 }
1949 }
1950 }
1951
1952 static void irq_process_suspend(struct mv_udc *udc)
1953 {
1954 udc->resume_state = udc->usb_state;
1955 udc->usb_state = USB_STATE_SUSPENDED;
1956
1957 if (udc->driver->suspend) {
1958 spin_unlock(&udc->lock);
1959 udc->driver->suspend(&udc->gadget);
1960 spin_lock(&udc->lock);
1961 }
1962 }
1963
1964 static void irq_process_port_change(struct mv_udc *udc)
1965 {
1966 u32 portsc;
1967
1968 portsc = readl(&udc->op_regs->portsc[0]);
1969 if (!(portsc & PORTSCX_PORT_RESET)) {
1970 /* Get the speed */
1971 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1972 switch (speed) {
1973 case PORTSCX_PORT_SPEED_HIGH:
1974 udc->gadget.speed = USB_SPEED_HIGH;
1975 break;
1976 case PORTSCX_PORT_SPEED_FULL:
1977 udc->gadget.speed = USB_SPEED_FULL;
1978 break;
1979 case PORTSCX_PORT_SPEED_LOW:
1980 udc->gadget.speed = USB_SPEED_LOW;
1981 break;
1982 default:
1983 udc->gadget.speed = USB_SPEED_UNKNOWN;
1984 break;
1985 }
1986 }
1987
1988 if (portsc & PORTSCX_PORT_SUSPEND) {
1989 udc->resume_state = udc->usb_state;
1990 udc->usb_state = USB_STATE_SUSPENDED;
1991 if (udc->driver->suspend) {
1992 spin_unlock(&udc->lock);
1993 udc->driver->suspend(&udc->gadget);
1994 spin_lock(&udc->lock);
1995 }
1996 }
1997
1998 if (!(portsc & PORTSCX_PORT_SUSPEND)
1999 && udc->usb_state == USB_STATE_SUSPENDED) {
2000 handle_bus_resume(udc);
2001 }
2002
2003 if (!udc->resume_state)
2004 udc->usb_state = USB_STATE_DEFAULT;
2005 }
2006
2007 static void irq_process_error(struct mv_udc *udc)
2008 {
2009 /* Increment the error count */
2010 udc->errors++;
2011 }
2012
2013 static irqreturn_t mv_udc_irq(int irq, void *dev)
2014 {
2015 struct mv_udc *udc = (struct mv_udc *)dev;
2016 u32 status, intr;
2017
2018 /* Disable ISR when stopped bit is set */
2019 if (udc->stopped)
2020 return IRQ_NONE;
2021
2022 spin_lock(&udc->lock);
2023
2024 status = readl(&udc->op_regs->usbsts);
2025 intr = readl(&udc->op_regs->usbintr);
2026 status &= intr;
2027
2028 if (status == 0) {
2029 spin_unlock(&udc->lock);
2030 return IRQ_NONE;
2031 }
2032
2033 /* Clear all the interrupts occurred */
2034 writel(status, &udc->op_regs->usbsts);
2035
2036 if (status & USBSTS_ERR)
2037 irq_process_error(udc);
2038
2039 if (status & USBSTS_RESET)
2040 irq_process_reset(udc);
2041
2042 if (status & USBSTS_PORT_CHANGE)
2043 irq_process_port_change(udc);
2044
2045 if (status & USBSTS_INT)
2046 irq_process_tr_complete(udc);
2047
2048 if (status & USBSTS_SUSPEND)
2049 irq_process_suspend(udc);
2050
2051 spin_unlock(&udc->lock);
2052
2053 return IRQ_HANDLED;
2054 }
2055
2056 static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2057 {
2058 struct mv_udc *udc = (struct mv_udc *)dev;
2059
2060 /* polling VBUS and init phy may cause too much time*/
2061 if (udc->qwork)
2062 queue_work(udc->qwork, &udc->vbus_work);
2063
2064 return IRQ_HANDLED;
2065 }
2066
2067 static void mv_udc_vbus_work(struct work_struct *work)
2068 {
2069 struct mv_udc *udc;
2070 unsigned int vbus;
2071
2072 udc = container_of(work, struct mv_udc, vbus_work);
2073 if (!udc->pdata->vbus)
2074 return;
2075
2076 vbus = udc->pdata->vbus->poll();
2077 dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2078
2079 if (vbus == VBUS_HIGH)
2080 mv_udc_vbus_session(&udc->gadget, 1);
2081 else if (vbus == VBUS_LOW)
2082 mv_udc_vbus_session(&udc->gadget, 0);
2083 }
2084
2085 /* release device structure */
2086 static void gadget_release(struct device *_dev)
2087 {
2088 struct mv_udc *udc = the_controller;
2089
2090 complete(udc->done);
2091 }
2092
2093 static int __devexit mv_udc_remove(struct platform_device *dev)
2094 {
2095 struct mv_udc *udc = the_controller;
2096 int clk_i;
2097
2098 usb_del_gadget_udc(&udc->gadget);
2099
2100 if (udc->qwork) {
2101 flush_workqueue(udc->qwork);
2102 destroy_workqueue(udc->qwork);
2103 }
2104
2105 /*
2106 * If we have transceiver inited,
2107 * then vbus irq will not be requested in udc driver.
2108 */
2109 if (udc->pdata && udc->pdata->vbus
2110 && udc->clock_gating && udc->transceiver == NULL)
2111 free_irq(udc->pdata->vbus->irq, &dev->dev);
2112
2113 /* free memory allocated in probe */
2114 if (udc->dtd_pool)
2115 dma_pool_destroy(udc->dtd_pool);
2116
2117 if (udc->ep_dqh)
2118 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2119 udc->ep_dqh, udc->ep_dqh_dma);
2120
2121 kfree(udc->eps);
2122
2123 if (udc->irq)
2124 free_irq(udc->irq, &dev->dev);
2125
2126 mv_udc_disable(udc);
2127
2128 if (udc->cap_regs)
2129 iounmap(udc->cap_regs);
2130
2131 if (udc->phy_regs)
2132 iounmap(udc->phy_regs);
2133
2134 if (udc->status_req) {
2135 kfree(udc->status_req->req.buf);
2136 kfree(udc->status_req);
2137 }
2138
2139 for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
2140 clk_put(udc->clk[clk_i]);
2141
2142 device_unregister(&udc->gadget.dev);
2143
2144 /* free dev, wait for the release() finished */
2145 wait_for_completion(udc->done);
2146 kfree(udc);
2147
2148 the_controller = NULL;
2149
2150 return 0;
2151 }
2152
2153 static int __devinit mv_udc_probe(struct platform_device *dev)
2154 {
2155 struct mv_usb_platform_data *pdata = dev->dev.platform_data;
2156 struct mv_udc *udc;
2157 int retval = 0;
2158 int clk_i = 0;
2159 struct resource *r;
2160 size_t size;
2161
2162 if (pdata == NULL) {
2163 dev_err(&dev->dev, "missing platform_data\n");
2164 return -ENODEV;
2165 }
2166
2167 size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
2168 udc = kzalloc(size, GFP_KERNEL);
2169 if (udc == NULL) {
2170 dev_err(&dev->dev, "failed to allocate memory for udc\n");
2171 return -ENOMEM;
2172 }
2173
2174 the_controller = udc;
2175 udc->done = &release_done;
2176 udc->pdata = dev->dev.platform_data;
2177 spin_lock_init(&udc->lock);
2178
2179 udc->dev = dev;
2180
2181 #ifdef CONFIG_USB_OTG_UTILS
2182 if (pdata->mode == MV_USB_MODE_OTG)
2183 udc->transceiver = usb_get_transceiver();
2184 #endif
2185
2186 udc->clknum = pdata->clknum;
2187 for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
2188 udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
2189 if (IS_ERR(udc->clk[clk_i])) {
2190 retval = PTR_ERR(udc->clk[clk_i]);
2191 goto err_put_clk;
2192 }
2193 }
2194
2195 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2196 if (r == NULL) {
2197 dev_err(&dev->dev, "no I/O memory resource defined\n");
2198 retval = -ENODEV;
2199 goto err_put_clk;
2200 }
2201
2202 udc->cap_regs = (struct mv_cap_regs __iomem *)
2203 ioremap(r->start, resource_size(r));
2204 if (udc->cap_regs == NULL) {
2205 dev_err(&dev->dev, "failed to map I/O memory\n");
2206 retval = -EBUSY;
2207 goto err_put_clk;
2208 }
2209
2210 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2211 if (r == NULL) {
2212 dev_err(&dev->dev, "no phy I/O memory resource defined\n");
2213 retval = -ENODEV;
2214 goto err_iounmap_capreg;
2215 }
2216
2217 udc->phy_regs = ioremap(r->start, resource_size(r));
2218 if (udc->phy_regs == NULL) {
2219 dev_err(&dev->dev, "failed to map phy I/O memory\n");
2220 retval = -EBUSY;
2221 goto err_iounmap_capreg;
2222 }
2223
2224 /* we will acces controller register, so enable the clk */
2225 retval = mv_udc_enable_internal(udc);
2226 if (retval)
2227 goto err_iounmap_phyreg;
2228
2229 udc->op_regs =
2230 (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2231 + (readl(&udc->cap_regs->caplength_hciversion)
2232 & CAPLENGTH_MASK));
2233 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2234
2235 /*
2236 * some platform will use usb to download image, it may not disconnect
2237 * usb gadget before loading kernel. So first stop udc here.
2238 */
2239 udc_stop(udc);
2240 writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2241
2242 size = udc->max_eps * sizeof(struct mv_dqh) *2;
2243 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2244 udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
2245 &udc->ep_dqh_dma, GFP_KERNEL);
2246
2247 if (udc->ep_dqh == NULL) {
2248 dev_err(&dev->dev, "allocate dQH memory failed\n");
2249 retval = -ENOMEM;
2250 goto err_disable_clock;
2251 }
2252 udc->ep_dqh_size = size;
2253
2254 /* create dTD dma_pool resource */
2255 udc->dtd_pool = dma_pool_create("mv_dtd",
2256 &dev->dev,
2257 sizeof(struct mv_dtd),
2258 DTD_ALIGNMENT,
2259 DMA_BOUNDARY);
2260
2261 if (!udc->dtd_pool) {
2262 retval = -ENOMEM;
2263 goto err_free_dma;
2264 }
2265
2266 size = udc->max_eps * sizeof(struct mv_ep) *2;
2267 udc->eps = kzalloc(size, GFP_KERNEL);
2268 if (udc->eps == NULL) {
2269 dev_err(&dev->dev, "allocate ep memory failed\n");
2270 retval = -ENOMEM;
2271 goto err_destroy_dma;
2272 }
2273
2274 /* initialize ep0 status request structure */
2275 udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2276 if (!udc->status_req) {
2277 dev_err(&dev->dev, "allocate status_req memory failed\n");
2278 retval = -ENOMEM;
2279 goto err_free_eps;
2280 }
2281 INIT_LIST_HEAD(&udc->status_req->queue);
2282
2283 /* allocate a small amount of memory to get valid address */
2284 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2285 udc->status_req->req.dma = DMA_ADDR_INVALID;
2286
2287 udc->resume_state = USB_STATE_NOTATTACHED;
2288 udc->usb_state = USB_STATE_POWERED;
2289 udc->ep0_dir = EP_DIR_OUT;
2290 udc->remote_wakeup = 0;
2291
2292 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2293 if (r == NULL) {
2294 dev_err(&dev->dev, "no IRQ resource defined\n");
2295 retval = -ENODEV;
2296 goto err_free_status_req;
2297 }
2298 udc->irq = r->start;
2299 if (request_irq(udc->irq, mv_udc_irq,
2300 IRQF_SHARED, driver_name, udc)) {
2301 dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2302 udc->irq);
2303 retval = -ENODEV;
2304 goto err_free_status_req;
2305 }
2306
2307 /* initialize gadget structure */
2308 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
2309 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
2310 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
2311 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
2312 udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
2313
2314 /* the "gadget" abstracts/virtualizes the controller */
2315 dev_set_name(&udc->gadget.dev, "gadget");
2316 udc->gadget.dev.parent = &dev->dev;
2317 udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2318 udc->gadget.dev.release = gadget_release;
2319 udc->gadget.name = driver_name; /* gadget name */
2320
2321 retval = device_register(&udc->gadget.dev);
2322 if (retval)
2323 goto err_free_irq;
2324
2325 eps_init(udc);
2326
2327 /* VBUS detect: we can disable/enable clock on demand.*/
2328 if (udc->transceiver)
2329 udc->clock_gating = 1;
2330 else if (pdata->vbus) {
2331 udc->clock_gating = 1;
2332 retval = request_threaded_irq(pdata->vbus->irq, NULL,
2333 mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2334 if (retval) {
2335 dev_info(&dev->dev,
2336 "Can not request irq for VBUS, "
2337 "disable clock gating\n");
2338 udc->clock_gating = 0;
2339 }
2340
2341 udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2342 if (!udc->qwork) {
2343 dev_err(&dev->dev, "cannot create workqueue\n");
2344 retval = -ENOMEM;
2345 goto err_unregister;
2346 }
2347
2348 INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2349 }
2350
2351 /*
2352 * When clock gating is supported, we can disable clk and phy.
2353 * If not, it means that VBUS detection is not supported, we
2354 * have to enable vbus active all the time to let controller work.
2355 */
2356 if (udc->clock_gating)
2357 mv_udc_disable_internal(udc);
2358 else
2359 udc->vbus_active = 1;
2360
2361 retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
2362 if (retval)
2363 goto err_unregister;
2364
2365 dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
2366 udc->clock_gating ? "with" : "without");
2367
2368 return 0;
2369
2370 err_unregister:
2371 if (udc->pdata && udc->pdata->vbus
2372 && udc->clock_gating && udc->transceiver == NULL)
2373 free_irq(pdata->vbus->irq, &dev->dev);
2374 device_unregister(&udc->gadget.dev);
2375 err_free_irq:
2376 free_irq(udc->irq, &dev->dev);
2377 err_free_status_req:
2378 kfree(udc->status_req->req.buf);
2379 kfree(udc->status_req);
2380 err_free_eps:
2381 kfree(udc->eps);
2382 err_destroy_dma:
2383 dma_pool_destroy(udc->dtd_pool);
2384 err_free_dma:
2385 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2386 udc->ep_dqh, udc->ep_dqh_dma);
2387 err_disable_clock:
2388 mv_udc_disable_internal(udc);
2389 err_iounmap_phyreg:
2390 iounmap(udc->phy_regs);
2391 err_iounmap_capreg:
2392 iounmap(udc->cap_regs);
2393 err_put_clk:
2394 for (clk_i--; clk_i >= 0; clk_i--)
2395 clk_put(udc->clk[clk_i]);
2396 the_controller = NULL;
2397 kfree(udc);
2398 return retval;
2399 }
2400
2401 #ifdef CONFIG_PM
2402 static int mv_udc_suspend(struct device *_dev)
2403 {
2404 struct mv_udc *udc = the_controller;
2405
2406 /* if OTG is enabled, the following will be done in OTG driver*/
2407 if (udc->transceiver)
2408 return 0;
2409
2410 if (udc->pdata->vbus && udc->pdata->vbus->poll)
2411 if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2412 dev_info(&udc->dev->dev, "USB cable is connected!\n");
2413 return -EAGAIN;
2414 }
2415
2416 /*
2417 * only cable is unplugged, udc can suspend.
2418 * So do not care about clock_gating == 1.
2419 */
2420 if (!udc->clock_gating) {
2421 udc_stop(udc);
2422
2423 spin_lock_irq(&udc->lock);
2424 /* stop all usb activities */
2425 stop_activity(udc, udc->driver);
2426 spin_unlock_irq(&udc->lock);
2427
2428 mv_udc_disable_internal(udc);
2429 }
2430
2431 return 0;
2432 }
2433
2434 static int mv_udc_resume(struct device *_dev)
2435 {
2436 struct mv_udc *udc = the_controller;
2437 int retval;
2438
2439 /* if OTG is enabled, the following will be done in OTG driver*/
2440 if (udc->transceiver)
2441 return 0;
2442
2443 if (!udc->clock_gating) {
2444 retval = mv_udc_enable_internal(udc);
2445 if (retval)
2446 return retval;
2447
2448 if (udc->driver && udc->softconnect) {
2449 udc_reset(udc);
2450 ep0_reset(udc);
2451 udc_start(udc);
2452 }
2453 }
2454
2455 return 0;
2456 }
2457
2458 static const struct dev_pm_ops mv_udc_pm_ops = {
2459 .suspend = mv_udc_suspend,
2460 .resume = mv_udc_resume,
2461 };
2462 #endif
2463
2464 static void mv_udc_shutdown(struct platform_device *dev)
2465 {
2466 struct mv_udc *udc = the_controller;
2467 u32 mode;
2468
2469 /* reset controller mode to IDLE */
2470 mode = readl(&udc->op_regs->usbmode);
2471 mode &= ~3;
2472 writel(mode, &udc->op_regs->usbmode);
2473 }
2474
2475 static struct platform_driver udc_driver = {
2476 .probe = mv_udc_probe,
2477 .remove = __exit_p(mv_udc_remove),
2478 .shutdown = mv_udc_shutdown,
2479 .driver = {
2480 .owner = THIS_MODULE,
2481 .name = "mv-udc",
2482 #ifdef CONFIG_PM
2483 .pm = &mv_udc_pm_ops,
2484 #endif
2485 },
2486 };
2487
2488 module_platform_driver(udc_driver);
2489 MODULE_ALIAS("platform:mv-udc");
2490 MODULE_DESCRIPTION(DRIVER_DESC);
2491 MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2492 MODULE_VERSION(DRIVER_VERSION);
2493 MODULE_LICENSE("GPL");
This page took 0.083287 seconds and 5 git commands to generate.