usb: s3c-hsotg: Use devm_* functions in s3c-hsotg.c file
[deliverable/linux.git] / drivers / usb / gadget / s3c-hsotg.c
CommitLineData
8b9bc460
LM
1/**
2 * linux/drivers/usb/gadget/s3c-hsotg.c
dfbc6fa3
AT
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
5b7d70c6
BD
6 *
7 * Copyright 2008 Openmoko, Inc.
8 * Copyright 2008 Simtec Electronics
9 * Ben Dooks <ben@simtec.co.uk>
10 * http://armlinux.simtec.co.uk/
11 *
12 * S3C USB2.0 High-speed / OtG driver
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
8b9bc460 17 */
5b7d70c6
BD
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/spinlock.h>
22#include <linux/interrupt.h>
23#include <linux/platform_device.h>
24#include <linux/dma-mapping.h>
25#include <linux/debugfs.h>
26#include <linux/seq_file.h>
27#include <linux/delay.h>
28#include <linux/io.h>
5a0e3ad6 29#include <linux/slab.h>
e50bf385 30#include <linux/clk.h>
fc9a731e 31#include <linux/regulator/consumer.h>
5b7d70c6
BD
32
33#include <linux/usb/ch9.h>
34#include <linux/usb/gadget.h>
126625e1 35#include <linux/platform_data/s3c-hsotg.h>
5b7d70c6
BD
36
37#include <mach/map.h>
38
127d42ae 39#include "s3c-hsotg.h"
5b7d70c6
BD
40
41#define DMA_ADDR_INVALID (~((dma_addr_t)0))
42
fc9a731e
LM
43static const char * const s3c_hsotg_supply_names[] = {
44 "vusb_d", /* digital USB supply, 1.2V */
45 "vusb_a", /* analog USB supply, 1.1V */
46};
47
8b9bc460
LM
48/*
49 * EP0_MPS_LIMIT
5b7d70c6
BD
50 *
51 * Unfortunately there seems to be a limit of the amount of data that can
25985edc
LDM
52 * be transferred by IN transactions on EP0. This is either 127 bytes or 3
53 * packets (which practically means 1 packet and 63 bytes of data) when the
5b7d70c6
BD
54 * MPS is set to 64.
55 *
56 * This means if we are wanting to move >127 bytes of data, we need to
57 * split the transactions up, but just doing one packet at a time does
58 * not work (this may be an implicit DATA0 PID on first packet of the
59 * transaction) and doing 2 packets is outside the controller's limits.
60 *
61 * If we try to lower the MPS size for EP0, then no transfers work properly
62 * for EP0, and the system will fail basic enumeration. As no cause for this
63 * has currently been found, we cannot support any large IN transfers for
64 * EP0.
65 */
66#define EP0_MPS_LIMIT 64
67
68struct s3c_hsotg;
69struct s3c_hsotg_req;
70
71/**
72 * struct s3c_hsotg_ep - driver endpoint definition.
73 * @ep: The gadget layer representation of the endpoint.
74 * @name: The driver generated name for the endpoint.
75 * @queue: Queue of requests for this endpoint.
76 * @parent: Reference back to the parent device structure.
77 * @req: The current request that the endpoint is processing. This is
78 * used to indicate an request has been loaded onto the endpoint
79 * and has yet to be completed (maybe due to data move, or simply
80 * awaiting an ack from the core all the data has been completed).
81 * @debugfs: File entry for debugfs file for this endpoint.
82 * @lock: State lock to protect contents of endpoint.
83 * @dir_in: Set to true if this endpoint is of the IN direction, which
84 * means that it is sending data to the Host.
85 * @index: The index for the endpoint registers.
86 * @name: The name array passed to the USB core.
87 * @halted: Set if the endpoint has been halted.
88 * @periodic: Set if this is a periodic ep, such as Interrupt
89 * @sent_zlp: Set if we've sent a zero-length packet.
90 * @total_data: The total number of data bytes done.
91 * @fifo_size: The size of the FIFO (for periodic IN endpoints)
92 * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
93 * @last_load: The offset of data for the last start of request.
94 * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
95 *
96 * This is the driver's state for each registered enpoint, allowing it
97 * to keep track of transactions that need doing. Each endpoint has a
98 * lock to protect the state, to try and avoid using an overall lock
99 * for the host controller as much as possible.
100 *
101 * For periodic IN endpoints, we have fifo_size and fifo_load to try
102 * and keep track of the amount of data in the periodic FIFO for each
103 * of these as we don't have a status register that tells us how much
e7a9ff54
BD
104 * is in each of them. (note, this may actually be useless information
105 * as in shared-fifo mode periodic in acts like a single-frame packet
106 * buffer than a fifo)
5b7d70c6
BD
107 */
108struct s3c_hsotg_ep {
109 struct usb_ep ep;
110 struct list_head queue;
111 struct s3c_hsotg *parent;
112 struct s3c_hsotg_req *req;
113 struct dentry *debugfs;
114
115 spinlock_t lock;
116
117 unsigned long total_data;
118 unsigned int size_loaded;
119 unsigned int last_load;
120 unsigned int fifo_load;
121 unsigned short fifo_size;
122
123 unsigned char dir_in;
124 unsigned char index;
125
126 unsigned int halted:1;
127 unsigned int periodic:1;
128 unsigned int sent_zlp:1;
129
130 char name[10];
131};
132
5b7d70c6
BD
133/**
134 * struct s3c_hsotg - driver state.
135 * @dev: The parent device supplied to the probe function
136 * @driver: USB gadget driver
137 * @plat: The platform specific configuration data.
138 * @regs: The memory area mapped for accessing registers.
5b7d70c6 139 * @irq: The IRQ number we are using
fc9a731e 140 * @supplies: Definition of USB power supplies
10aebc77 141 * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
b3f489b2 142 * @num_of_eps: Number of available EPs (excluding EP0)
5b7d70c6
BD
143 * @debug_root: root directrory for debugfs.
144 * @debug_file: main status file for debugfs.
145 * @debug_fifo: FIFO status file for debugfs.
146 * @ep0_reply: Request used for ep0 reply.
147 * @ep0_buff: Buffer for EP0 reply data, if needed.
148 * @ctrl_buff: Buffer for EP0 control requests.
149 * @ctrl_req: Request for EP0 control packets.
71225bee 150 * @setup: NAK management for EP0 SETUP
12a1f4dc 151 * @last_rst: Time of last reset
5b7d70c6
BD
152 * @eps: The endpoints being supplied to the gadget framework
153 */
154struct s3c_hsotg {
155 struct device *dev;
156 struct usb_gadget_driver *driver;
157 struct s3c_hsotg_plat *plat;
158
159 void __iomem *regs;
5b7d70c6 160 int irq;
31ee04de 161 struct clk *clk;
5b7d70c6 162
fc9a731e
LM
163 struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsotg_supply_names)];
164
10aebc77 165 unsigned int dedicated_fifos:1;
b3f489b2 166 unsigned char num_of_eps;
10aebc77 167
5b7d70c6
BD
168 struct dentry *debug_root;
169 struct dentry *debug_file;
170 struct dentry *debug_fifo;
171
172 struct usb_request *ep0_reply;
173 struct usb_request *ctrl_req;
174 u8 ep0_buff[8];
175 u8 ctrl_buff[8];
176
177 struct usb_gadget gadget;
71225bee 178 unsigned int setup;
12a1f4dc 179 unsigned long last_rst;
b3f489b2 180 struct s3c_hsotg_ep *eps;
5b7d70c6
BD
181};
182
183/**
184 * struct s3c_hsotg_req - data transfer request
185 * @req: The USB gadget request
186 * @queue: The list of requests for the endpoint this is queued for.
187 * @in_progress: Has already had size/packets written to core
188 * @mapped: DMA buffer for this request has been mapped via dma_map_single().
189 */
190struct s3c_hsotg_req {
191 struct usb_request req;
192 struct list_head queue;
193 unsigned char in_progress;
194 unsigned char mapped;
195};
196
197/* conversion functions */
198static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
199{
200 return container_of(req, struct s3c_hsotg_req, req);
201}
202
203static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
204{
205 return container_of(ep, struct s3c_hsotg_ep, ep);
206}
207
208static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
209{
210 return container_of(gadget, struct s3c_hsotg, gadget);
211}
212
213static inline void __orr32(void __iomem *ptr, u32 val)
214{
215 writel(readl(ptr) | val, ptr);
216}
217
218static inline void __bic32(void __iomem *ptr, u32 val)
219{
220 writel(readl(ptr) & ~val, ptr);
221}
222
223/* forward decleration of functions */
224static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
225
226/**
227 * using_dma - return the DMA status of the driver.
228 * @hsotg: The driver state.
229 *
230 * Return true if we're using DMA.
231 *
232 * Currently, we have the DMA support code worked into everywhere
233 * that needs it, but the AMBA DMA implementation in the hardware can
234 * only DMA from 32bit aligned addresses. This means that gadgets such
235 * as the CDC Ethernet cannot work as they often pass packets which are
236 * not 32bit aligned.
237 *
238 * Unfortunately the choice to use DMA or not is global to the controller
239 * and seems to be only settable when the controller is being put through
240 * a core reset. This means we either need to fix the gadgets to take
241 * account of DMA alignment, or add bounce buffers (yuerk).
242 *
243 * Until this issue is sorted out, we always return 'false'.
244 */
245static inline bool using_dma(struct s3c_hsotg *hsotg)
246{
247 return false; /* support is not complete */
248}
249
250/**
251 * s3c_hsotg_en_gsint - enable one or more of the general interrupt
252 * @hsotg: The device state
253 * @ints: A bitmask of the interrupts to enable
254 */
255static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
256{
94cb8fd6 257 u32 gsintmsk = readl(hsotg->regs + GINTMSK);
5b7d70c6
BD
258 u32 new_gsintmsk;
259
260 new_gsintmsk = gsintmsk | ints;
261
262 if (new_gsintmsk != gsintmsk) {
263 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
94cb8fd6 264 writel(new_gsintmsk, hsotg->regs + GINTMSK);
5b7d70c6
BD
265 }
266}
267
268/**
269 * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
270 * @hsotg: The device state
271 * @ints: A bitmask of the interrupts to enable
272 */
273static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
274{
94cb8fd6 275 u32 gsintmsk = readl(hsotg->regs + GINTMSK);
5b7d70c6
BD
276 u32 new_gsintmsk;
277
278 new_gsintmsk = gsintmsk & ~ints;
279
280 if (new_gsintmsk != gsintmsk)
94cb8fd6 281 writel(new_gsintmsk, hsotg->regs + GINTMSK);
5b7d70c6
BD
282}
283
284/**
285 * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
286 * @hsotg: The device state
287 * @ep: The endpoint index
288 * @dir_in: True if direction is in.
289 * @en: The enable value, true to enable
290 *
291 * Set or clear the mask for an individual endpoint's interrupt
292 * request.
293 */
294static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
295 unsigned int ep, unsigned int dir_in,
296 unsigned int en)
297{
298 unsigned long flags;
299 u32 bit = 1 << ep;
300 u32 daint;
301
302 if (!dir_in)
303 bit <<= 16;
304
305 local_irq_save(flags);
94cb8fd6 306 daint = readl(hsotg->regs + DAINTMSK);
5b7d70c6
BD
307 if (en)
308 daint |= bit;
309 else
310 daint &= ~bit;
94cb8fd6 311 writel(daint, hsotg->regs + DAINTMSK);
5b7d70c6
BD
312 local_irq_restore(flags);
313}
314
315/**
316 * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
317 * @hsotg: The device instance.
318 */
319static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
320{
0f002d20
BD
321 unsigned int ep;
322 unsigned int addr;
323 unsigned int size;
1703a6d3 324 int timeout;
0f002d20
BD
325 u32 val;
326
6d091ee7 327 /* set FIFO sizes to 2048/1024 */
5b7d70c6 328
94cb8fd6
LM
329 writel(2048, hsotg->regs + GRXFSIZ);
330 writel(GNPTXFSIZ_NPTxFStAddr(2048) |
331 GNPTXFSIZ_NPTxFDep(1024),
332 hsotg->regs + GNPTXFSIZ);
0f002d20 333
8b9bc460
LM
334 /*
335 * arange all the rest of the TX FIFOs, as some versions of this
0f002d20
BD
336 * block have overlapping default addresses. This also ensures
337 * that if the settings have been changed, then they are set to
8b9bc460
LM
338 * known values.
339 */
0f002d20
BD
340
341 /* start at the end of the GNPTXFSIZ, rounded up */
342 addr = 2048 + 1024;
343 size = 768;
344
8b9bc460
LM
345 /*
346 * currently we allocate TX FIFOs for all possible endpoints,
347 * and assume that they are all the same size.
348 */
0f002d20 349
f7a83fe1 350 for (ep = 1; ep <= 15; ep++) {
0f002d20 351 val = addr;
94cb8fd6 352 val |= size << DPTXFSIZn_DPTxFSize_SHIFT;
0f002d20
BD
353 addr += size;
354
94cb8fd6 355 writel(val, hsotg->regs + DPTXFSIZn(ep));
0f002d20 356 }
1703a6d3 357
8b9bc460
LM
358 /*
359 * according to p428 of the design guide, we need to ensure that
360 * all fifos are flushed before continuing
361 */
1703a6d3 362
94cb8fd6
LM
363 writel(GRSTCTL_TxFNum(0x10) | GRSTCTL_TxFFlsh |
364 GRSTCTL_RxFFlsh, hsotg->regs + GRSTCTL);
1703a6d3
BD
365
366 /* wait until the fifos are both flushed */
367 timeout = 100;
368 while (1) {
94cb8fd6 369 val = readl(hsotg->regs + GRSTCTL);
1703a6d3 370
94cb8fd6 371 if ((val & (GRSTCTL_TxFFlsh | GRSTCTL_RxFFlsh)) == 0)
1703a6d3
BD
372 break;
373
374 if (--timeout == 0) {
375 dev_err(hsotg->dev,
376 "%s: timeout flushing fifos (GRSTCTL=%08x)\n",
377 __func__, val);
378 }
379
380 udelay(1);
381 }
382
383 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
5b7d70c6
BD
384}
385
386/**
387 * @ep: USB endpoint to allocate request for.
388 * @flags: Allocation flags
389 *
390 * Allocate a new USB request structure appropriate for the specified endpoint
391 */
0978f8c5
MB
392static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
393 gfp_t flags)
5b7d70c6
BD
394{
395 struct s3c_hsotg_req *req;
396
397 req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
398 if (!req)
399 return NULL;
400
401 INIT_LIST_HEAD(&req->queue);
402
403 req->req.dma = DMA_ADDR_INVALID;
404 return &req->req;
405}
406
407/**
408 * is_ep_periodic - return true if the endpoint is in periodic mode.
409 * @hs_ep: The endpoint to query.
410 *
411 * Returns true if the endpoint is in periodic mode, meaning it is being
412 * used for an Interrupt or ISO transfer.
413 */
414static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
415{
416 return hs_ep->periodic;
417}
418
419/**
420 * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
421 * @hsotg: The device state.
422 * @hs_ep: The endpoint for the request
423 * @hs_req: The request being processed.
424 *
425 * This is the reverse of s3c_hsotg_map_dma(), called for the completion
426 * of a request to ensure the buffer is ready for access by the caller.
8b9bc460 427 */
5b7d70c6
BD
428static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
429 struct s3c_hsotg_ep *hs_ep,
430 struct s3c_hsotg_req *hs_req)
431{
432 struct usb_request *req = &hs_req->req;
433 enum dma_data_direction dir;
434
435 dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
436
437 /* ignore this if we're not moving any data */
438 if (hs_req->req.length == 0)
439 return;
440
441 if (hs_req->mapped) {
442 /* we mapped this, so unmap and remove the dma */
443
444 dma_unmap_single(hsotg->dev, req->dma, req->length, dir);
445
446 req->dma = DMA_ADDR_INVALID;
447 hs_req->mapped = 0;
448 } else {
5b520259 449 dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
5b7d70c6
BD
450 }
451}
452
453/**
454 * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
455 * @hsotg: The controller state.
456 * @hs_ep: The endpoint we're going to write for.
457 * @hs_req: The request to write data for.
458 *
459 * This is called when the TxFIFO has some space in it to hold a new
460 * transmission and we have something to give it. The actual setup of
461 * the data size is done elsewhere, so all we have to do is to actually
462 * write the data.
463 *
464 * The return value is zero if there is more space (or nothing was done)
465 * otherwise -ENOSPC is returned if the FIFO space was used up.
466 *
467 * This routine is only needed for PIO
8b9bc460 468 */
5b7d70c6
BD
469static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
470 struct s3c_hsotg_ep *hs_ep,
471 struct s3c_hsotg_req *hs_req)
472{
473 bool periodic = is_ep_periodic(hs_ep);
94cb8fd6 474 u32 gnptxsts = readl(hsotg->regs + GNPTXSTS);
5b7d70c6
BD
475 int buf_pos = hs_req->req.actual;
476 int to_write = hs_ep->size_loaded;
477 void *data;
478 int can_write;
479 int pkt_round;
480
481 to_write -= (buf_pos - hs_ep->last_load);
482
483 /* if there's nothing to write, get out early */
484 if (to_write == 0)
485 return 0;
486
10aebc77 487 if (periodic && !hsotg->dedicated_fifos) {
94cb8fd6 488 u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
5b7d70c6
BD
489 int size_left;
490 int size_done;
491
8b9bc460
LM
492 /*
493 * work out how much data was loaded so we can calculate
494 * how much data is left in the fifo.
495 */
5b7d70c6 496
94cb8fd6 497 size_left = DxEPTSIZ_XferSize_GET(epsize);
5b7d70c6 498
8b9bc460
LM
499 /*
500 * if shared fifo, we cannot write anything until the
e7a9ff54
BD
501 * previous data has been completely sent.
502 */
503 if (hs_ep->fifo_load != 0) {
94cb8fd6 504 s3c_hsotg_en_gsint(hsotg, GINTSTS_PTxFEmp);
e7a9ff54
BD
505 return -ENOSPC;
506 }
507
5b7d70c6
BD
508 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
509 __func__, size_left,
510 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
511
512 /* how much of the data has moved */
513 size_done = hs_ep->size_loaded - size_left;
514
515 /* how much data is left in the fifo */
516 can_write = hs_ep->fifo_load - size_done;
517 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
518 __func__, can_write);
519
520 can_write = hs_ep->fifo_size - can_write;
521 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
522 __func__, can_write);
523
524 if (can_write <= 0) {
94cb8fd6 525 s3c_hsotg_en_gsint(hsotg, GINTSTS_PTxFEmp);
5b7d70c6
BD
526 return -ENOSPC;
527 }
10aebc77 528 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
94cb8fd6 529 can_write = readl(hsotg->regs + DTXFSTS(hs_ep->index));
10aebc77
BD
530
531 can_write &= 0xffff;
532 can_write *= 4;
5b7d70c6 533 } else {
94cb8fd6 534 if (GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
5b7d70c6
BD
535 dev_dbg(hsotg->dev,
536 "%s: no queue slots available (0x%08x)\n",
537 __func__, gnptxsts);
538
94cb8fd6 539 s3c_hsotg_en_gsint(hsotg, GINTSTS_NPTxFEmp);
5b7d70c6
BD
540 return -ENOSPC;
541 }
542
94cb8fd6 543 can_write = GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
679f9b7c 544 can_write *= 4; /* fifo size is in 32bit quantities. */
5b7d70c6
BD
545 }
546
547 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
548 __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);
549
8b9bc460
LM
550 /*
551 * limit to 512 bytes of data, it seems at least on the non-periodic
5b7d70c6
BD
552 * FIFO, requests of >512 cause the endpoint to get stuck with a
553 * fragment of the end of the transfer in it.
554 */
555 if (can_write > 512)
556 can_write = 512;
557
8b9bc460
LM
558 /*
559 * limit the write to one max-packet size worth of data, but allow
03e10e5a 560 * the transfer to return that it did not run out of fifo space
8b9bc460
LM
561 * doing it.
562 */
03e10e5a
BD
563 if (to_write > hs_ep->ep.maxpacket) {
564 to_write = hs_ep->ep.maxpacket;
565
566 s3c_hsotg_en_gsint(hsotg,
94cb8fd6
LM
567 periodic ? GINTSTS_PTxFEmp :
568 GINTSTS_NPTxFEmp);
03e10e5a
BD
569 }
570
5b7d70c6
BD
571 /* see if we can write data */
572
573 if (to_write > can_write) {
574 to_write = can_write;
575 pkt_round = to_write % hs_ep->ep.maxpacket;
576
8b9bc460
LM
577 /*
578 * Round the write down to an
5b7d70c6
BD
579 * exact number of packets.
580 *
581 * Note, we do not currently check to see if we can ever
582 * write a full packet or not to the FIFO.
583 */
584
585 if (pkt_round)
586 to_write -= pkt_round;
587
8b9bc460
LM
588 /*
589 * enable correct FIFO interrupt to alert us when there
590 * is more room left.
591 */
5b7d70c6
BD
592
593 s3c_hsotg_en_gsint(hsotg,
94cb8fd6
LM
594 periodic ? GINTSTS_PTxFEmp :
595 GINTSTS_NPTxFEmp);
5b7d70c6
BD
596 }
597
598 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
599 to_write, hs_req->req.length, can_write, buf_pos);
600
601 if (to_write <= 0)
602 return -ENOSPC;
603
604 hs_req->req.actual = buf_pos + to_write;
605 hs_ep->total_data += to_write;
606
607 if (periodic)
608 hs_ep->fifo_load += to_write;
609
610 to_write = DIV_ROUND_UP(to_write, 4);
611 data = hs_req->req.buf + buf_pos;
612
94cb8fd6 613 writesl(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
5b7d70c6
BD
614
615 return (to_write >= can_write) ? -ENOSPC : 0;
616}
617
618/**
619 * get_ep_limit - get the maximum data legnth for this endpoint
620 * @hs_ep: The endpoint
621 *
622 * Return the maximum data that can be queued in one go on a given endpoint
623 * so that transfers that are too long can be split.
624 */
625static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
626{
627 int index = hs_ep->index;
628 unsigned maxsize;
629 unsigned maxpkt;
630
631 if (index != 0) {
94cb8fd6
LM
632 maxsize = DxEPTSIZ_XferSize_LIMIT + 1;
633 maxpkt = DxEPTSIZ_PktCnt_LIMIT + 1;
5b7d70c6 634 } else {
b05ca580 635 maxsize = 64+64;
66e5c643 636 if (hs_ep->dir_in)
94cb8fd6 637 maxpkt = DIEPTSIZ0_PktCnt_LIMIT + 1;
66e5c643 638 else
5b7d70c6 639 maxpkt = 2;
5b7d70c6
BD
640 }
641
642 /* we made the constant loading easier above by using +1 */
643 maxpkt--;
644 maxsize--;
645
8b9bc460
LM
646 /*
647 * constrain by packet count if maxpkts*pktsize is greater
648 * than the length register size.
649 */
5b7d70c6
BD
650
651 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
652 maxsize = maxpkt * hs_ep->ep.maxpacket;
653
654 return maxsize;
655}
656
657/**
658 * s3c_hsotg_start_req - start a USB request from an endpoint's queue
659 * @hsotg: The controller state.
660 * @hs_ep: The endpoint to process a request for
661 * @hs_req: The request to start.
662 * @continuing: True if we are doing more for the current request.
663 *
664 * Start the given request running by setting the endpoint registers
665 * appropriately, and writing any data to the FIFOs.
666 */
667static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
668 struct s3c_hsotg_ep *hs_ep,
669 struct s3c_hsotg_req *hs_req,
670 bool continuing)
671{
672 struct usb_request *ureq = &hs_req->req;
673 int index = hs_ep->index;
674 int dir_in = hs_ep->dir_in;
675 u32 epctrl_reg;
676 u32 epsize_reg;
677 u32 epsize;
678 u32 ctrl;
679 unsigned length;
680 unsigned packets;
681 unsigned maxreq;
682
683 if (index != 0) {
684 if (hs_ep->req && !continuing) {
685 dev_err(hsotg->dev, "%s: active request\n", __func__);
686 WARN_ON(1);
687 return;
688 } else if (hs_ep->req != hs_req && continuing) {
689 dev_err(hsotg->dev,
690 "%s: continue different req\n", __func__);
691 WARN_ON(1);
692 return;
693 }
694 }
695
94cb8fd6
LM
696 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
697 epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
5b7d70c6
BD
698
699 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
700 __func__, readl(hsotg->regs + epctrl_reg), index,
701 hs_ep->dir_in ? "in" : "out");
702
9c39ddc6
AT
703 /* If endpoint is stalled, we will restart request later */
704 ctrl = readl(hsotg->regs + epctrl_reg);
705
94cb8fd6 706 if (ctrl & DxEPCTL_Stall) {
9c39ddc6
AT
707 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
708 return;
709 }
710
5b7d70c6 711 length = ureq->length - ureq->actual;
71225bee
LM
712 dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
713 ureq->length, ureq->actual);
5b7d70c6
BD
714 if (0)
715 dev_dbg(hsotg->dev,
716 "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
717 ureq->buf, length, ureq->dma,
718 ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
719
720 maxreq = get_ep_limit(hs_ep);
721 if (length > maxreq) {
722 int round = maxreq % hs_ep->ep.maxpacket;
723
724 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
725 __func__, length, maxreq, round);
726
727 /* round down to multiple of packets */
728 if (round)
729 maxreq -= round;
730
731 length = maxreq;
732 }
733
734 if (length)
735 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
736 else
737 packets = 1; /* send one packet if length is zero. */
738
739 if (dir_in && index != 0)
94cb8fd6 740 epsize = DxEPTSIZ_MC(1);
5b7d70c6
BD
741 else
742 epsize = 0;
743
744 if (index != 0 && ureq->zero) {
8b9bc460
LM
745 /*
746 * test for the packets being exactly right for the
747 * transfer
748 */
5b7d70c6
BD
749
750 if (length == (packets * hs_ep->ep.maxpacket))
751 packets++;
752 }
753
94cb8fd6
LM
754 epsize |= DxEPTSIZ_PktCnt(packets);
755 epsize |= DxEPTSIZ_XferSize(length);
5b7d70c6
BD
756
757 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
758 __func__, packets, length, ureq->length, epsize, epsize_reg);
759
760 /* store the request as the current one we're doing */
761 hs_ep->req = hs_req;
762
763 /* write size / packets */
764 writel(epsize, hsotg->regs + epsize_reg);
765
db1d8ba3 766 if (using_dma(hsotg) && !continuing) {
5b7d70c6
BD
767 unsigned int dma_reg;
768
8b9bc460
LM
769 /*
770 * write DMA address to control register, buffer already
771 * synced by s3c_hsotg_ep_queue().
772 */
5b7d70c6 773
94cb8fd6 774 dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
5b7d70c6
BD
775 writel(ureq->dma, hsotg->regs + dma_reg);
776
777 dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
778 __func__, ureq->dma, dma_reg);
779 }
780
94cb8fd6
LM
781 ctrl |= DxEPCTL_EPEna; /* ensure ep enabled */
782 ctrl |= DxEPCTL_USBActEp;
71225bee
LM
783
784 dev_dbg(hsotg->dev, "setup req:%d\n", hsotg->setup);
785
786 /* For Setup request do not clear NAK */
787 if (hsotg->setup && index == 0)
788 hsotg->setup = 0;
789 else
94cb8fd6 790 ctrl |= DxEPCTL_CNAK; /* clear NAK set by core */
71225bee 791
5b7d70c6
BD
792
793 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
794 writel(ctrl, hsotg->regs + epctrl_reg);
795
8b9bc460
LM
796 /*
797 * set these, it seems that DMA support increments past the end
5b7d70c6 798 * of the packet buffer so we need to calculate the length from
8b9bc460
LM
799 * this information.
800 */
5b7d70c6
BD
801 hs_ep->size_loaded = length;
802 hs_ep->last_load = ureq->actual;
803
804 if (dir_in && !using_dma(hsotg)) {
805 /* set these anyway, we may need them for non-periodic in */
806 hs_ep->fifo_load = 0;
807
808 s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
809 }
810
8b9bc460
LM
811 /*
812 * clear the INTknTXFEmpMsk when we start request, more as a aide
813 * to debugging to see what is going on.
814 */
5b7d70c6 815 if (dir_in)
94cb8fd6
LM
816 writel(DIEPMSK_INTknTXFEmpMsk,
817 hsotg->regs + DIEPINT(index));
5b7d70c6 818
8b9bc460
LM
819 /*
820 * Note, trying to clear the NAK here causes problems with transmit
821 * on the S3C6400 ending up with the TXFIFO becoming full.
822 */
5b7d70c6
BD
823
824 /* check ep is enabled */
94cb8fd6 825 if (!(readl(hsotg->regs + epctrl_reg) & DxEPCTL_EPEna))
5b7d70c6
BD
826 dev_warn(hsotg->dev,
827 "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",
828 index, readl(hsotg->regs + epctrl_reg));
829
830 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
831 __func__, readl(hsotg->regs + epctrl_reg));
832}
833
834/**
835 * s3c_hsotg_map_dma - map the DMA memory being used for the request
836 * @hsotg: The device state.
837 * @hs_ep: The endpoint the request is on.
838 * @req: The request being processed.
839 *
840 * We've been asked to queue a request, so ensure that the memory buffer
841 * is correctly setup for DMA. If we've been passed an extant DMA address
842 * then ensure the buffer has been synced to memory. If our buffer has no
843 * DMA memory, then we map the memory and mark our request to allow us to
844 * cleanup on completion.
8b9bc460 845 */
5b7d70c6
BD
846static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
847 struct s3c_hsotg_ep *hs_ep,
848 struct usb_request *req)
849{
850 enum dma_data_direction dir;
851 struct s3c_hsotg_req *hs_req = our_req(req);
852
853 dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
854
855 /* if the length is zero, ignore the DMA data */
856 if (hs_req->req.length == 0)
857 return 0;
858
859 if (req->dma == DMA_ADDR_INVALID) {
860 dma_addr_t dma;
861
862 dma = dma_map_single(hsotg->dev, req->buf, req->length, dir);
863
864 if (unlikely(dma_mapping_error(hsotg->dev, dma)))
865 goto dma_error;
866
867 if (dma & 3) {
868 dev_err(hsotg->dev, "%s: unaligned dma buffer\n",
869 __func__);
870
871 dma_unmap_single(hsotg->dev, dma, req->length, dir);
872 return -EINVAL;
873 }
874
875 hs_req->mapped = 1;
876 req->dma = dma;
877 } else {
5b520259 878 dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
5b7d70c6
BD
879 hs_req->mapped = 0;
880 }
881
882 return 0;
883
884dma_error:
885 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
886 __func__, req->buf, req->length);
887
888 return -EIO;
889}
890
891static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
892 gfp_t gfp_flags)
893{
894 struct s3c_hsotg_req *hs_req = our_req(req);
895 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
896 struct s3c_hsotg *hs = hs_ep->parent;
897 unsigned long irqflags;
898 bool first;
899
900 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
901 ep->name, req, req->length, req->buf, req->no_interrupt,
902 req->zero, req->short_not_ok);
903
904 /* initialise status of the request */
905 INIT_LIST_HEAD(&hs_req->queue);
906 req->actual = 0;
907 req->status = -EINPROGRESS;
908
909 /* if we're using DMA, sync the buffers as necessary */
910 if (using_dma(hs)) {
911 int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
912 if (ret)
913 return ret;
914 }
915
916 spin_lock_irqsave(&hs_ep->lock, irqflags);
917
918 first = list_empty(&hs_ep->queue);
919 list_add_tail(&hs_req->queue, &hs_ep->queue);
920
921 if (first)
922 s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
923
924 spin_unlock_irqrestore(&hs_ep->lock, irqflags);
925
926 return 0;
927}
928
929static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
930 struct usb_request *req)
931{
932 struct s3c_hsotg_req *hs_req = our_req(req);
933
934 kfree(hs_req);
935}
936
937/**
938 * s3c_hsotg_complete_oursetup - setup completion callback
939 * @ep: The endpoint the request was on.
940 * @req: The request completed.
941 *
942 * Called on completion of any requests the driver itself
943 * submitted that need cleaning up.
944 */
945static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
946 struct usb_request *req)
947{
948 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
949 struct s3c_hsotg *hsotg = hs_ep->parent;
950
951 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
952
953 s3c_hsotg_ep_free_request(ep, req);
954}
955
956/**
957 * ep_from_windex - convert control wIndex value to endpoint
958 * @hsotg: The driver state.
959 * @windex: The control request wIndex field (in host order).
960 *
961 * Convert the given wIndex into a pointer to an driver endpoint
962 * structure, or return NULL if it is not a valid endpoint.
8b9bc460 963 */
5b7d70c6
BD
964static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
965 u32 windex)
966{
967 struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
968 int dir = (windex & USB_DIR_IN) ? 1 : 0;
969 int idx = windex & 0x7F;
970
971 if (windex >= 0x100)
972 return NULL;
973
b3f489b2 974 if (idx > hsotg->num_of_eps)
5b7d70c6
BD
975 return NULL;
976
977 if (idx && ep->dir_in != dir)
978 return NULL;
979
980 return ep;
981}
982
983/**
984 * s3c_hsotg_send_reply - send reply to control request
985 * @hsotg: The device state
986 * @ep: Endpoint 0
987 * @buff: Buffer for request
988 * @length: Length of reply.
989 *
990 * Create a request and queue it on the given endpoint. This is useful as
991 * an internal method of sending replies to certain control requests, etc.
992 */
993static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
994 struct s3c_hsotg_ep *ep,
995 void *buff,
996 int length)
997{
998 struct usb_request *req;
999 int ret;
1000
1001 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
1002
1003 req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
1004 hsotg->ep0_reply = req;
1005 if (!req) {
1006 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
1007 return -ENOMEM;
1008 }
1009
1010 req->buf = hsotg->ep0_buff;
1011 req->length = length;
1012 req->zero = 1; /* always do zero-length final transfer */
1013 req->complete = s3c_hsotg_complete_oursetup;
1014
1015 if (length)
1016 memcpy(req->buf, buff, length);
1017 else
1018 ep->sent_zlp = 1;
1019
1020 ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
1021 if (ret) {
1022 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
1023 return ret;
1024 }
1025
1026 return 0;
1027}
1028
1029/**
1030 * s3c_hsotg_process_req_status - process request GET_STATUS
1031 * @hsotg: The device state
1032 * @ctrl: USB control request
1033 */
1034static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
1035 struct usb_ctrlrequest *ctrl)
1036{
1037 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1038 struct s3c_hsotg_ep *ep;
1039 __le16 reply;
1040 int ret;
1041
1042 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
1043
1044 if (!ep0->dir_in) {
1045 dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
1046 return -EINVAL;
1047 }
1048
1049 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1050 case USB_RECIP_DEVICE:
1051 reply = cpu_to_le16(0); /* bit 0 => self powered,
1052 * bit 1 => remote wakeup */
1053 break;
1054
1055 case USB_RECIP_INTERFACE:
1056 /* currently, the data result should be zero */
1057 reply = cpu_to_le16(0);
1058 break;
1059
1060 case USB_RECIP_ENDPOINT:
1061 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1062 if (!ep)
1063 return -ENOENT;
1064
1065 reply = cpu_to_le16(ep->halted ? 1 : 0);
1066 break;
1067
1068 default:
1069 return 0;
1070 }
1071
1072 if (le16_to_cpu(ctrl->wLength) != 2)
1073 return -EINVAL;
1074
1075 ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
1076 if (ret) {
1077 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
1078 return ret;
1079 }
1080
1081 return 1;
1082}
1083
1084static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
1085
9c39ddc6
AT
1086/**
1087 * get_ep_head - return the first request on the endpoint
1088 * @hs_ep: The controller endpoint to get
1089 *
1090 * Get the first request on the endpoint.
1091 */
1092static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
1093{
1094 if (list_empty(&hs_ep->queue))
1095 return NULL;
1096
1097 return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
1098}
1099
5b7d70c6
BD
1100/**
1101 * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
1102 * @hsotg: The device state
1103 * @ctrl: USB control request
1104 */
1105static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
1106 struct usb_ctrlrequest *ctrl)
1107{
26ab3d0c 1108 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
9c39ddc6
AT
1109 struct s3c_hsotg_req *hs_req;
1110 bool restart;
5b7d70c6
BD
1111 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
1112 struct s3c_hsotg_ep *ep;
26ab3d0c 1113 int ret;
5b7d70c6
BD
1114
1115 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
1116 __func__, set ? "SET" : "CLEAR");
1117
1118 if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
1119 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1120 if (!ep) {
1121 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
1122 __func__, le16_to_cpu(ctrl->wIndex));
1123 return -ENOENT;
1124 }
1125
1126 switch (le16_to_cpu(ctrl->wValue)) {
1127 case USB_ENDPOINT_HALT:
1128 s3c_hsotg_ep_sethalt(&ep->ep, set);
26ab3d0c
AT
1129
1130 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1131 if (ret) {
1132 dev_err(hsotg->dev,
1133 "%s: failed to send reply\n", __func__);
1134 return ret;
1135 }
9c39ddc6
AT
1136
1137 if (!set) {
1138 /*
1139 * If we have request in progress,
1140 * then complete it
1141 */
1142 if (ep->req) {
1143 hs_req = ep->req;
1144 ep->req = NULL;
1145 list_del_init(&hs_req->queue);
1146 hs_req->req.complete(&ep->ep,
1147 &hs_req->req);
1148 }
1149
1150 /* If we have pending request, then start it */
1151 restart = !list_empty(&ep->queue);
1152 if (restart) {
1153 hs_req = get_ep_head(ep);
1154 s3c_hsotg_start_req(hsotg, ep,
1155 hs_req, false);
1156 }
1157 }
1158
5b7d70c6
BD
1159 break;
1160
1161 default:
1162 return -ENOENT;
1163 }
1164 } else
1165 return -ENOENT; /* currently only deal with endpoint */
1166
1167 return 1;
1168}
1169
1170/**
1171 * s3c_hsotg_process_control - process a control request
1172 * @hsotg: The device state
1173 * @ctrl: The control request received
1174 *
1175 * The controller has received the SETUP phase of a control request, and
1176 * needs to work out what to do next (and whether to pass it on to the
1177 * gadget driver).
1178 */
1179static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
1180 struct usb_ctrlrequest *ctrl)
1181{
1182 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1183 int ret = 0;
1184 u32 dcfg;
1185
1186 ep0->sent_zlp = 0;
1187
1188 dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
1189 ctrl->bRequest, ctrl->bRequestType,
1190 ctrl->wValue, ctrl->wLength);
1191
8b9bc460
LM
1192 /*
1193 * record the direction of the request, for later use when enquing
1194 * packets onto EP0.
1195 */
5b7d70c6
BD
1196
1197 ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
1198 dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
1199
8b9bc460
LM
1200 /*
1201 * if we've no data with this request, then the last part of the
1202 * transaction is going to implicitly be IN.
1203 */
5b7d70c6
BD
1204 if (ctrl->wLength == 0)
1205 ep0->dir_in = 1;
1206
1207 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1208 switch (ctrl->bRequest) {
1209 case USB_REQ_SET_ADDRESS:
94cb8fd6
LM
1210 dcfg = readl(hsotg->regs + DCFG);
1211 dcfg &= ~DCFG_DevAddr_MASK;
1212 dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT;
1213 writel(dcfg, hsotg->regs + DCFG);
5b7d70c6
BD
1214
1215 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1216
1217 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1218 return;
1219
1220 case USB_REQ_GET_STATUS:
1221 ret = s3c_hsotg_process_req_status(hsotg, ctrl);
1222 break;
1223
1224 case USB_REQ_CLEAR_FEATURE:
1225 case USB_REQ_SET_FEATURE:
1226 ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
1227 break;
1228 }
1229 }
1230
1231 /* as a fallback, try delivering it to the driver to deal with */
1232
1233 if (ret == 0 && hsotg->driver) {
1234 ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1235 if (ret < 0)
1236 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1237 }
1238
8b9bc460
LM
1239 /*
1240 * the request is either unhandlable, or is not formatted correctly
5b7d70c6
BD
1241 * so respond with a STALL for the status stage to indicate failure.
1242 */
1243
1244 if (ret < 0) {
1245 u32 reg;
1246 u32 ctrl;
1247
1248 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
94cb8fd6 1249 reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
5b7d70c6 1250
8b9bc460 1251 /*
94cb8fd6 1252 * DxEPCTL_Stall will be cleared by EP once it has
8b9bc460
LM
1253 * taken effect, so no need to clear later.
1254 */
5b7d70c6
BD
1255
1256 ctrl = readl(hsotg->regs + reg);
94cb8fd6
LM
1257 ctrl |= DxEPCTL_Stall;
1258 ctrl |= DxEPCTL_CNAK;
5b7d70c6
BD
1259 writel(ctrl, hsotg->regs + reg);
1260
1261 dev_dbg(hsotg->dev,
25985edc 1262 "written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
5b7d70c6
BD
1263 ctrl, reg, readl(hsotg->regs + reg));
1264
8b9bc460
LM
1265 /*
1266 * don't believe we need to anything more to get the EP
1267 * to reply with a STALL packet
1268 */
5b7d70c6
BD
1269 }
1270}
1271
1272static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
1273
1274/**
1275 * s3c_hsotg_complete_setup - completion of a setup transfer
1276 * @ep: The endpoint the request was on.
1277 * @req: The request completed.
1278 *
1279 * Called on completion of any requests the driver itself submitted for
1280 * EP0 setup packets
1281 */
1282static void s3c_hsotg_complete_setup(struct usb_ep *ep,
1283 struct usb_request *req)
1284{
1285 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
1286 struct s3c_hsotg *hsotg = hs_ep->parent;
1287
1288 if (req->status < 0) {
1289 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
1290 return;
1291 }
1292
1293 if (req->actual == 0)
1294 s3c_hsotg_enqueue_setup(hsotg);
1295 else
1296 s3c_hsotg_process_control(hsotg, req->buf);
1297}
1298
1299/**
1300 * s3c_hsotg_enqueue_setup - start a request for EP0 packets
1301 * @hsotg: The device state.
1302 *
1303 * Enqueue a request on EP0 if necessary to received any SETUP packets
1304 * received from the host.
1305 */
1306static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
1307{
1308 struct usb_request *req = hsotg->ctrl_req;
1309 struct s3c_hsotg_req *hs_req = our_req(req);
1310 int ret;
1311
1312 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
1313
1314 req->zero = 0;
1315 req->length = 8;
1316 req->buf = hsotg->ctrl_buff;
1317 req->complete = s3c_hsotg_complete_setup;
1318
1319 if (!list_empty(&hs_req->queue)) {
1320 dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
1321 return;
1322 }
1323
1324 hsotg->eps[0].dir_in = 0;
1325
1326 ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
1327 if (ret < 0) {
1328 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
8b9bc460
LM
1329 /*
1330 * Don't think there's much we can do other than watch the
1331 * driver fail.
1332 */
5b7d70c6
BD
1333 }
1334}
1335
5b7d70c6
BD
1336/**
1337 * s3c_hsotg_complete_request - complete a request given to us
1338 * @hsotg: The device state.
1339 * @hs_ep: The endpoint the request was on.
1340 * @hs_req: The request to complete.
1341 * @result: The result code (0 => Ok, otherwise errno)
1342 *
1343 * The given request has finished, so call the necessary completion
1344 * if it has one and then look to see if we can start a new request
1345 * on the endpoint.
1346 *
1347 * Note, expects the ep to already be locked as appropriate.
8b9bc460 1348 */
5b7d70c6
BD
1349static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
1350 struct s3c_hsotg_ep *hs_ep,
1351 struct s3c_hsotg_req *hs_req,
1352 int result)
1353{
1354 bool restart;
1355
1356 if (!hs_req) {
1357 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
1358 return;
1359 }
1360
1361 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
1362 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
1363
8b9bc460
LM
1364 /*
1365 * only replace the status if we've not already set an error
1366 * from a previous transaction
1367 */
5b7d70c6
BD
1368
1369 if (hs_req->req.status == -EINPROGRESS)
1370 hs_req->req.status = result;
1371
1372 hs_ep->req = NULL;
1373 list_del_init(&hs_req->queue);
1374
1375 if (using_dma(hsotg))
1376 s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
1377
8b9bc460
LM
1378 /*
1379 * call the complete request with the locks off, just in case the
1380 * request tries to queue more work for this endpoint.
1381 */
5b7d70c6
BD
1382
1383 if (hs_req->req.complete) {
1384 spin_unlock(&hs_ep->lock);
1385 hs_req->req.complete(&hs_ep->ep, &hs_req->req);
1386 spin_lock(&hs_ep->lock);
1387 }
1388
8b9bc460
LM
1389 /*
1390 * Look to see if there is anything else to do. Note, the completion
5b7d70c6 1391 * of the previous request may have caused a new request to be started
8b9bc460
LM
1392 * so be careful when doing this.
1393 */
5b7d70c6
BD
1394
1395 if (!hs_ep->req && result >= 0) {
1396 restart = !list_empty(&hs_ep->queue);
1397 if (restart) {
1398 hs_req = get_ep_head(hs_ep);
1399 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1400 }
1401 }
1402}
1403
1404/**
1405 * s3c_hsotg_complete_request_lock - complete a request given to us (locked)
1406 * @hsotg: The device state.
1407 * @hs_ep: The endpoint the request was on.
1408 * @hs_req: The request to complete.
1409 * @result: The result code (0 => Ok, otherwise errno)
1410 *
1411 * See s3c_hsotg_complete_request(), but called with the endpoint's
1412 * lock held.
8b9bc460 1413 */
5b7d70c6
BD
1414static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg,
1415 struct s3c_hsotg_ep *hs_ep,
1416 struct s3c_hsotg_req *hs_req,
1417 int result)
1418{
1419 unsigned long flags;
1420
1421 spin_lock_irqsave(&hs_ep->lock, flags);
1422 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
1423 spin_unlock_irqrestore(&hs_ep->lock, flags);
1424}
1425
1426/**
1427 * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
1428 * @hsotg: The device state.
1429 * @ep_idx: The endpoint index for the data
1430 * @size: The size of data in the fifo, in bytes
1431 *
1432 * The FIFO status shows there is data to read from the FIFO for a given
1433 * endpoint, so sort out whether we need to read the data into a request
1434 * that has been made for that endpoint.
1435 */
1436static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
1437{
1438 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
1439 struct s3c_hsotg_req *hs_req = hs_ep->req;
94cb8fd6 1440 void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx);
5b7d70c6
BD
1441 int to_read;
1442 int max_req;
1443 int read_ptr;
1444
1445 if (!hs_req) {
94cb8fd6 1446 u32 epctl = readl(hsotg->regs + DOEPCTL(ep_idx));
5b7d70c6
BD
1447 int ptr;
1448
1449 dev_warn(hsotg->dev,
1450 "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",
1451 __func__, size, ep_idx, epctl);
1452
1453 /* dump the data from the FIFO, we've nothing we can do */
1454 for (ptr = 0; ptr < size; ptr += 4)
1455 (void)readl(fifo);
1456
1457 return;
1458 }
1459
1460 spin_lock(&hs_ep->lock);
1461
1462 to_read = size;
1463 read_ptr = hs_req->req.actual;
1464 max_req = hs_req->req.length - read_ptr;
1465
a33e7136
BD
1466 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
1467 __func__, to_read, max_req, read_ptr, hs_req->req.length);
1468
5b7d70c6 1469 if (to_read > max_req) {
8b9bc460
LM
1470 /*
1471 * more data appeared than we where willing
5b7d70c6
BD
1472 * to deal with in this request.
1473 */
1474
1475 /* currently we don't deal this */
1476 WARN_ON_ONCE(1);
1477 }
1478
5b7d70c6
BD
1479 hs_ep->total_data += to_read;
1480 hs_req->req.actual += to_read;
1481 to_read = DIV_ROUND_UP(to_read, 4);
1482
8b9bc460
LM
1483 /*
1484 * note, we might over-write the buffer end by 3 bytes depending on
1485 * alignment of the data.
1486 */
5b7d70c6
BD
1487 readsl(fifo, hs_req->req.buf + read_ptr, to_read);
1488
1489 spin_unlock(&hs_ep->lock);
1490}
1491
1492/**
1493 * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
1494 * @hsotg: The device instance
1495 * @req: The request currently on this endpoint
1496 *
1497 * Generate a zero-length IN packet request for terminating a SETUP
1498 * transaction.
1499 *
1500 * Note, since we don't write any data to the TxFIFO, then it is
25985edc 1501 * currently believed that we do not need to wait for any space in
5b7d70c6
BD
1502 * the TxFIFO.
1503 */
1504static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
1505 struct s3c_hsotg_req *req)
1506{
1507 u32 ctrl;
1508
1509 if (!req) {
1510 dev_warn(hsotg->dev, "%s: no request?\n", __func__);
1511 return;
1512 }
1513
1514 if (req->req.length == 0) {
1515 hsotg->eps[0].sent_zlp = 1;
1516 s3c_hsotg_enqueue_setup(hsotg);
1517 return;
1518 }
1519
1520 hsotg->eps[0].dir_in = 1;
1521 hsotg->eps[0].sent_zlp = 1;
1522
1523 dev_dbg(hsotg->dev, "sending zero-length packet\n");
1524
1525 /* issue a zero-sized packet to terminate this */
94cb8fd6
LM
1526 writel(DxEPTSIZ_MC(1) | DxEPTSIZ_PktCnt(1) |
1527 DxEPTSIZ_XferSize(0), hsotg->regs + DIEPTSIZ(0));
5b7d70c6 1528
94cb8fd6
LM
1529 ctrl = readl(hsotg->regs + DIEPCTL0);
1530 ctrl |= DxEPCTL_CNAK; /* clear NAK set by core */
1531 ctrl |= DxEPCTL_EPEna; /* ensure ep enabled */
1532 ctrl |= DxEPCTL_USBActEp;
1533 writel(ctrl, hsotg->regs + DIEPCTL0);
5b7d70c6
BD
1534}
1535
1536/**
1537 * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
1538 * @hsotg: The device instance
1539 * @epnum: The endpoint received from
1540 * @was_setup: Set if processing a SetupDone event.
1541 *
1542 * The RXFIFO has delivered an OutDone event, which means that the data
1543 * transfer for an OUT endpoint has been completed, either by a short
1544 * packet or by the finish of a transfer.
8b9bc460 1545 */
5b7d70c6
BD
1546static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
1547 int epnum, bool was_setup)
1548{
94cb8fd6 1549 u32 epsize = readl(hsotg->regs + DOEPTSIZ(epnum));
5b7d70c6
BD
1550 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
1551 struct s3c_hsotg_req *hs_req = hs_ep->req;
1552 struct usb_request *req = &hs_req->req;
94cb8fd6 1553 unsigned size_left = DxEPTSIZ_XferSize_GET(epsize);
5b7d70c6
BD
1554 int result = 0;
1555
1556 if (!hs_req) {
1557 dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
1558 return;
1559 }
1560
1561 if (using_dma(hsotg)) {
5b7d70c6 1562 unsigned size_done;
5b7d70c6 1563
8b9bc460
LM
1564 /*
1565 * Calculate the size of the transfer by checking how much
5b7d70c6
BD
1566 * is left in the endpoint size register and then working it
1567 * out from the amount we loaded for the transfer.
1568 *
1569 * We need to do this as DMA pointers are always 32bit aligned
1570 * so may overshoot/undershoot the transfer.
1571 */
1572
5b7d70c6
BD
1573 size_done = hs_ep->size_loaded - size_left;
1574 size_done += hs_ep->last_load;
1575
1576 req->actual = size_done;
1577 }
1578
a33e7136
BD
1579 /* if there is more request to do, schedule new transfer */
1580 if (req->actual < req->length && size_left == 0) {
1581 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1582 return;
71225bee
LM
1583 } else if (epnum == 0) {
1584 /*
1585 * After was_setup = 1 =>
1586 * set CNAK for non Setup requests
1587 */
1588 hsotg->setup = was_setup ? 0 : 1;
a33e7136
BD
1589 }
1590
5b7d70c6
BD
1591 if (req->actual < req->length && req->short_not_ok) {
1592 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
1593 __func__, req->actual, req->length);
1594
8b9bc460
LM
1595 /*
1596 * todo - what should we return here? there's no one else
1597 * even bothering to check the status.
1598 */
5b7d70c6
BD
1599 }
1600
1601 if (epnum == 0) {
d3ca0259
LM
1602 /*
1603 * Condition req->complete != s3c_hsotg_complete_setup says:
1604 * send ZLP when we have an asynchronous request from gadget
1605 */
5b7d70c6
BD
1606 if (!was_setup && req->complete != s3c_hsotg_complete_setup)
1607 s3c_hsotg_send_zlp(hsotg, hs_req);
1608 }
1609
1610 s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result);
1611}
1612
1613/**
1614 * s3c_hsotg_read_frameno - read current frame number
1615 * @hsotg: The device instance
1616 *
1617 * Return the current frame number
8b9bc460 1618 */
5b7d70c6
BD
1619static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
1620{
1621 u32 dsts;
1622
94cb8fd6
LM
1623 dsts = readl(hsotg->regs + DSTS);
1624 dsts &= DSTS_SOFFN_MASK;
1625 dsts >>= DSTS_SOFFN_SHIFT;
5b7d70c6
BD
1626
1627 return dsts;
1628}
1629
1630/**
1631 * s3c_hsotg_handle_rx - RX FIFO has data
1632 * @hsotg: The device instance
1633 *
1634 * The IRQ handler has detected that the RX FIFO has some data in it
1635 * that requires processing, so find out what is in there and do the
1636 * appropriate read.
1637 *
25985edc 1638 * The RXFIFO is a true FIFO, the packets coming out are still in packet
5b7d70c6
BD
1639 * chunks, so if you have x packets received on an endpoint you'll get x
1640 * FIFO events delivered, each with a packet's worth of data in it.
1641 *
1642 * When using DMA, we should not be processing events from the RXFIFO
1643 * as the actual data should be sent to the memory directly and we turn
1644 * on the completion interrupts to get notifications of transfer completion.
1645 */
0978f8c5 1646static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
5b7d70c6 1647{
94cb8fd6 1648 u32 grxstsr = readl(hsotg->regs + GRXSTSP);
5b7d70c6
BD
1649 u32 epnum, status, size;
1650
1651 WARN_ON(using_dma(hsotg));
1652
94cb8fd6
LM
1653 epnum = grxstsr & GRXSTS_EPNum_MASK;
1654 status = grxstsr & GRXSTS_PktSts_MASK;
5b7d70c6 1655
94cb8fd6
LM
1656 size = grxstsr & GRXSTS_ByteCnt_MASK;
1657 size >>= GRXSTS_ByteCnt_SHIFT;
5b7d70c6
BD
1658
1659 if (1)
1660 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
1661 __func__, grxstsr, size, epnum);
1662
94cb8fd6 1663#define __status(x) ((x) >> GRXSTS_PktSts_SHIFT)
5b7d70c6 1664
94cb8fd6
LM
1665 switch (status >> GRXSTS_PktSts_SHIFT) {
1666 case __status(GRXSTS_PktSts_GlobalOutNAK):
5b7d70c6
BD
1667 dev_dbg(hsotg->dev, "GlobalOutNAK\n");
1668 break;
1669
94cb8fd6 1670 case __status(GRXSTS_PktSts_OutDone):
5b7d70c6
BD
1671 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
1672 s3c_hsotg_read_frameno(hsotg));
1673
1674 if (!using_dma(hsotg))
1675 s3c_hsotg_handle_outdone(hsotg, epnum, false);
1676 break;
1677
94cb8fd6 1678 case __status(GRXSTS_PktSts_SetupDone):
5b7d70c6
BD
1679 dev_dbg(hsotg->dev,
1680 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1681 s3c_hsotg_read_frameno(hsotg),
94cb8fd6 1682 readl(hsotg->regs + DOEPCTL(0)));
5b7d70c6
BD
1683
1684 s3c_hsotg_handle_outdone(hsotg, epnum, true);
1685 break;
1686
94cb8fd6 1687 case __status(GRXSTS_PktSts_OutRX):
5b7d70c6
BD
1688 s3c_hsotg_rx_data(hsotg, epnum, size);
1689 break;
1690
94cb8fd6 1691 case __status(GRXSTS_PktSts_SetupRX):
5b7d70c6
BD
1692 dev_dbg(hsotg->dev,
1693 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1694 s3c_hsotg_read_frameno(hsotg),
94cb8fd6 1695 readl(hsotg->regs + DOEPCTL(0)));
5b7d70c6
BD
1696
1697 s3c_hsotg_rx_data(hsotg, epnum, size);
1698 break;
1699
1700 default:
1701 dev_warn(hsotg->dev, "%s: unknown status %08x\n",
1702 __func__, grxstsr);
1703
1704 s3c_hsotg_dump(hsotg);
1705 break;
1706 }
1707}
1708
1709/**
1710 * s3c_hsotg_ep0_mps - turn max packet size into register setting
1711 * @mps: The maximum packet size in bytes.
8b9bc460 1712 */
5b7d70c6
BD
1713static u32 s3c_hsotg_ep0_mps(unsigned int mps)
1714{
1715 switch (mps) {
1716 case 64:
94cb8fd6 1717 return D0EPCTL_MPS_64;
5b7d70c6 1718 case 32:
94cb8fd6 1719 return D0EPCTL_MPS_32;
5b7d70c6 1720 case 16:
94cb8fd6 1721 return D0EPCTL_MPS_16;
5b7d70c6 1722 case 8:
94cb8fd6 1723 return D0EPCTL_MPS_8;
5b7d70c6
BD
1724 }
1725
1726 /* bad max packet size, warn and return invalid result */
1727 WARN_ON(1);
1728 return (u32)-1;
1729}
1730
1731/**
1732 * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
1733 * @hsotg: The driver state.
1734 * @ep: The index number of the endpoint
1735 * @mps: The maximum packet size in bytes
1736 *
1737 * Configure the maximum packet size for the given endpoint, updating
1738 * the hardware control registers to reflect this.
1739 */
1740static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
1741 unsigned int ep, unsigned int mps)
1742{
1743 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
1744 void __iomem *regs = hsotg->regs;
1745 u32 mpsval;
1746 u32 reg;
1747
1748 if (ep == 0) {
1749 /* EP0 is a special case */
1750 mpsval = s3c_hsotg_ep0_mps(mps);
1751 if (mpsval > 3)
1752 goto bad_mps;
1753 } else {
94cb8fd6 1754 if (mps >= DxEPCTL_MPS_LIMIT+1)
5b7d70c6
BD
1755 goto bad_mps;
1756
1757 mpsval = mps;
1758 }
1759
1760 hs_ep->ep.maxpacket = mps;
1761
8b9bc460
LM
1762 /*
1763 * update both the in and out endpoint controldir_ registers, even
1764 * if one of the directions may not be in use.
1765 */
5b7d70c6 1766
94cb8fd6
LM
1767 reg = readl(regs + DIEPCTL(ep));
1768 reg &= ~DxEPCTL_MPS_MASK;
5b7d70c6 1769 reg |= mpsval;
94cb8fd6 1770 writel(reg, regs + DIEPCTL(ep));
5b7d70c6 1771
659ad60c 1772 if (ep) {
94cb8fd6
LM
1773 reg = readl(regs + DOEPCTL(ep));
1774 reg &= ~DxEPCTL_MPS_MASK;
659ad60c 1775 reg |= mpsval;
94cb8fd6 1776 writel(reg, regs + DOEPCTL(ep));
659ad60c 1777 }
5b7d70c6
BD
1778
1779 return;
1780
1781bad_mps:
1782 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
1783}
1784
9c39ddc6
AT
1785/**
1786 * s3c_hsotg_txfifo_flush - flush Tx FIFO
1787 * @hsotg: The driver state
1788 * @idx: The index for the endpoint (0..15)
1789 */
1790static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
1791{
1792 int timeout;
1793 int val;
1794
94cb8fd6
LM
1795 writel(GRSTCTL_TxFNum(idx) | GRSTCTL_TxFFlsh,
1796 hsotg->regs + GRSTCTL);
9c39ddc6
AT
1797
1798 /* wait until the fifo is flushed */
1799 timeout = 100;
1800
1801 while (1) {
94cb8fd6 1802 val = readl(hsotg->regs + GRSTCTL);
9c39ddc6 1803
94cb8fd6 1804 if ((val & (GRSTCTL_TxFFlsh)) == 0)
9c39ddc6
AT
1805 break;
1806
1807 if (--timeout == 0) {
1808 dev_err(hsotg->dev,
1809 "%s: timeout flushing fifo (GRSTCTL=%08x)\n",
1810 __func__, val);
1811 }
1812
1813 udelay(1);
1814 }
1815}
5b7d70c6
BD
1816
1817/**
1818 * s3c_hsotg_trytx - check to see if anything needs transmitting
1819 * @hsotg: The driver state
1820 * @hs_ep: The driver endpoint to check.
1821 *
1822 * Check to see if there is a request that has data to send, and if so
1823 * make an attempt to write data into the FIFO.
1824 */
1825static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
1826 struct s3c_hsotg_ep *hs_ep)
1827{
1828 struct s3c_hsotg_req *hs_req = hs_ep->req;
1829
1830 if (!hs_ep->dir_in || !hs_req)
1831 return 0;
1832
1833 if (hs_req->req.actual < hs_req->req.length) {
1834 dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
1835 hs_ep->index);
1836 return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1837 }
1838
1839 return 0;
1840}
1841
1842/**
1843 * s3c_hsotg_complete_in - complete IN transfer
1844 * @hsotg: The device state.
1845 * @hs_ep: The endpoint that has just completed.
1846 *
1847 * An IN transfer has been completed, update the transfer's state and then
1848 * call the relevant completion routines.
1849 */
1850static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
1851 struct s3c_hsotg_ep *hs_ep)
1852{
1853 struct s3c_hsotg_req *hs_req = hs_ep->req;
94cb8fd6 1854 u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
5b7d70c6
BD
1855 int size_left, size_done;
1856
1857 if (!hs_req) {
1858 dev_dbg(hsotg->dev, "XferCompl but no req\n");
1859 return;
1860 }
1861
d3ca0259
LM
1862 /* Finish ZLP handling for IN EP0 transactions */
1863 if (hsotg->eps[0].sent_zlp) {
1864 dev_dbg(hsotg->dev, "zlp packet received\n");
1865 s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);
1866 return;
1867 }
1868
8b9bc460
LM
1869 /*
1870 * Calculate the size of the transfer by checking how much is left
5b7d70c6
BD
1871 * in the endpoint size register and then working it out from
1872 * the amount we loaded for the transfer.
1873 *
1874 * We do this even for DMA, as the transfer may have incremented
1875 * past the end of the buffer (DMA transfers are always 32bit
1876 * aligned).
1877 */
1878
94cb8fd6 1879 size_left = DxEPTSIZ_XferSize_GET(epsize);
5b7d70c6
BD
1880
1881 size_done = hs_ep->size_loaded - size_left;
1882 size_done += hs_ep->last_load;
1883
1884 if (hs_req->req.actual != size_done)
1885 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
1886 __func__, hs_req->req.actual, size_done);
1887
1888 hs_req->req.actual = size_done;
d3ca0259
LM
1889 dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
1890 hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
1891
1892 /*
1893 * Check if dealing with Maximum Packet Size(MPS) IN transfer at EP0
1894 * When sent data is a multiple MPS size (e.g. 64B ,128B ,192B
1895 * ,256B ... ), after last MPS sized packet send IN ZLP packet to
1896 * inform the host that no more data is available.
1897 * The state of req.zero member is checked to be sure that the value to
1898 * send is smaller than wValue expected from host.
1899 * Check req.length to NOT send another ZLP when the current one is
1900 * under completion (the one for which this completion has been called).
1901 */
1902 if (hs_req->req.length && hs_ep->index == 0 && hs_req->req.zero &&
1903 hs_req->req.length == hs_req->req.actual &&
1904 !(hs_req->req.length % hs_ep->ep.maxpacket)) {
1905
1906 dev_dbg(hsotg->dev, "ep0 zlp IN packet sent\n");
1907 s3c_hsotg_send_zlp(hsotg, hs_req);
5b7d70c6 1908
d3ca0259
LM
1909 return;
1910 }
5b7d70c6
BD
1911
1912 if (!size_left && hs_req->req.actual < hs_req->req.length) {
1913 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
1914 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1915 } else
1916 s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);
1917}
1918
1919/**
1920 * s3c_hsotg_epint - handle an in/out endpoint interrupt
1921 * @hsotg: The driver state
1922 * @idx: The index for the endpoint (0..15)
1923 * @dir_in: Set if this is an IN endpoint
1924 *
1925 * Process and clear any interrupt pending for an individual endpoint
8b9bc460 1926 */
5b7d70c6
BD
1927static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
1928 int dir_in)
1929{
1930 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
94cb8fd6
LM
1931 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
1932 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
1933 u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
5b7d70c6 1934 u32 ints;
5b7d70c6
BD
1935
1936 ints = readl(hsotg->regs + epint_reg);
1937
a3395f0d
AT
1938 /* Clear endpoint interrupts */
1939 writel(ints, hsotg->regs + epint_reg);
1940
5b7d70c6
BD
1941 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
1942 __func__, idx, dir_in ? "in" : "out", ints);
1943
94cb8fd6 1944 if (ints & DxEPINT_XferCompl) {
5b7d70c6
BD
1945 dev_dbg(hsotg->dev,
1946 "%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
1947 __func__, readl(hsotg->regs + epctl_reg),
1948 readl(hsotg->regs + epsiz_reg));
1949
8b9bc460
LM
1950 /*
1951 * we get OutDone from the FIFO, so we only need to look
1952 * at completing IN requests here
1953 */
5b7d70c6
BD
1954 if (dir_in) {
1955 s3c_hsotg_complete_in(hsotg, hs_ep);
1956
c9a64ea8 1957 if (idx == 0 && !hs_ep->req)
5b7d70c6
BD
1958 s3c_hsotg_enqueue_setup(hsotg);
1959 } else if (using_dma(hsotg)) {
8b9bc460
LM
1960 /*
1961 * We're using DMA, we need to fire an OutDone here
1962 * as we ignore the RXFIFO.
1963 */
5b7d70c6
BD
1964
1965 s3c_hsotg_handle_outdone(hsotg, idx, false);
1966 }
5b7d70c6
BD
1967 }
1968
94cb8fd6 1969 if (ints & DxEPINT_EPDisbld) {
5b7d70c6 1970 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
5b7d70c6 1971
9c39ddc6
AT
1972 if (dir_in) {
1973 int epctl = readl(hsotg->regs + epctl_reg);
1974
1975 s3c_hsotg_txfifo_flush(hsotg, idx);
1976
94cb8fd6
LM
1977 if ((epctl & DxEPCTL_Stall) &&
1978 (epctl & DxEPCTL_EPType_Bulk)) {
1979 int dctl = readl(hsotg->regs + DCTL);
9c39ddc6 1980
94cb8fd6
LM
1981 dctl |= DCTL_CGNPInNAK;
1982 writel(dctl, hsotg->regs + DCTL);
9c39ddc6
AT
1983 }
1984 }
1985 }
1986
94cb8fd6 1987 if (ints & DxEPINT_AHBErr)
5b7d70c6 1988 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
5b7d70c6 1989
94cb8fd6 1990 if (ints & DxEPINT_Setup) { /* Setup or Timeout */
5b7d70c6
BD
1991 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
1992
1993 if (using_dma(hsotg) && idx == 0) {
8b9bc460
LM
1994 /*
1995 * this is the notification we've received a
5b7d70c6
BD
1996 * setup packet. In non-DMA mode we'd get this
1997 * from the RXFIFO, instead we need to process
8b9bc460
LM
1998 * the setup here.
1999 */
5b7d70c6
BD
2000
2001 if (dir_in)
2002 WARN_ON_ONCE(1);
2003 else
2004 s3c_hsotg_handle_outdone(hsotg, 0, true);
2005 }
5b7d70c6
BD
2006 }
2007
94cb8fd6 2008 if (ints & DxEPINT_Back2BackSetup)
5b7d70c6 2009 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
5b7d70c6
BD
2010
2011 if (dir_in) {
8b9bc460 2012 /* not sure if this is important, but we'll clear it anyway */
94cb8fd6 2013 if (ints & DIEPMSK_INTknTXFEmpMsk) {
5b7d70c6
BD
2014 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
2015 __func__, idx);
5b7d70c6
BD
2016 }
2017
2018 /* this probably means something bad is happening */
94cb8fd6 2019 if (ints & DIEPMSK_INTknEPMisMsk) {
5b7d70c6
BD
2020 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
2021 __func__, idx);
5b7d70c6 2022 }
10aebc77
BD
2023
2024 /* FIFO has space or is empty (see GAHBCFG) */
2025 if (hsotg->dedicated_fifos &&
94cb8fd6 2026 ints & DIEPMSK_TxFIFOEmpty) {
10aebc77
BD
2027 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
2028 __func__, idx);
70fa030f
AT
2029 if (!using_dma(hsotg))
2030 s3c_hsotg_trytx(hsotg, hs_ep);
10aebc77 2031 }
5b7d70c6 2032 }
5b7d70c6
BD
2033}
2034
2035/**
2036 * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
2037 * @hsotg: The device state.
2038 *
2039 * Handle updating the device settings after the enumeration phase has
2040 * been completed.
8b9bc460 2041 */
5b7d70c6
BD
2042static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
2043{
94cb8fd6 2044 u32 dsts = readl(hsotg->regs + DSTS);
5b7d70c6
BD
2045 int ep0_mps = 0, ep_mps;
2046
8b9bc460
LM
2047 /*
2048 * This should signal the finish of the enumeration phase
5b7d70c6 2049 * of the USB handshaking, so we should now know what rate
8b9bc460
LM
2050 * we connected at.
2051 */
5b7d70c6
BD
2052
2053 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
2054
8b9bc460
LM
2055 /*
2056 * note, since we're limited by the size of transfer on EP0, and
5b7d70c6 2057 * it seems IN transfers must be a even number of packets we do
8b9bc460
LM
2058 * not advertise a 64byte MPS on EP0.
2059 */
5b7d70c6
BD
2060
2061 /* catch both EnumSpd_FS and EnumSpd_FS48 */
94cb8fd6
LM
2062 switch (dsts & DSTS_EnumSpd_MASK) {
2063 case DSTS_EnumSpd_FS:
2064 case DSTS_EnumSpd_FS48:
5b7d70c6 2065 hsotg->gadget.speed = USB_SPEED_FULL;
5b7d70c6
BD
2066 ep0_mps = EP0_MPS_LIMIT;
2067 ep_mps = 64;
2068 break;
2069
94cb8fd6 2070 case DSTS_EnumSpd_HS:
5b7d70c6 2071 hsotg->gadget.speed = USB_SPEED_HIGH;
5b7d70c6
BD
2072 ep0_mps = EP0_MPS_LIMIT;
2073 ep_mps = 512;
2074 break;
2075
94cb8fd6 2076 case DSTS_EnumSpd_LS:
5b7d70c6 2077 hsotg->gadget.speed = USB_SPEED_LOW;
8b9bc460
LM
2078 /*
2079 * note, we don't actually support LS in this driver at the
5b7d70c6
BD
2080 * moment, and the documentation seems to imply that it isn't
2081 * supported by the PHYs on some of the devices.
2082 */
2083 break;
2084 }
e538dfda
MN
2085 dev_info(hsotg->dev, "new device is %s\n",
2086 usb_speed_string(hsotg->gadget.speed));
5b7d70c6 2087
8b9bc460
LM
2088 /*
2089 * we should now know the maximum packet size for an
2090 * endpoint, so set the endpoints to a default value.
2091 */
5b7d70c6
BD
2092
2093 if (ep0_mps) {
2094 int i;
2095 s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
b3f489b2 2096 for (i = 1; i < hsotg->num_of_eps; i++)
5b7d70c6
BD
2097 s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
2098 }
2099
2100 /* ensure after enumeration our EP0 is active */
2101
2102 s3c_hsotg_enqueue_setup(hsotg);
2103
2104 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
94cb8fd6
LM
2105 readl(hsotg->regs + DIEPCTL0),
2106 readl(hsotg->regs + DOEPCTL0));
5b7d70c6
BD
2107}
2108
2109/**
2110 * kill_all_requests - remove all requests from the endpoint's queue
2111 * @hsotg: The device state.
2112 * @ep: The endpoint the requests may be on.
2113 * @result: The result code to use.
2114 * @force: Force removal of any current requests
2115 *
2116 * Go through the requests on the given endpoint and mark them
2117 * completed with the given result code.
2118 */
2119static void kill_all_requests(struct s3c_hsotg *hsotg,
2120 struct s3c_hsotg_ep *ep,
2121 int result, bool force)
2122{
2123 struct s3c_hsotg_req *req, *treq;
2124 unsigned long flags;
2125
2126 spin_lock_irqsave(&ep->lock, flags);
2127
2128 list_for_each_entry_safe(req, treq, &ep->queue, queue) {
8b9bc460
LM
2129 /*
2130 * currently, we can't do much about an already
2131 * running request on an in endpoint
2132 */
5b7d70c6
BD
2133
2134 if (ep->req == req && ep->dir_in && !force)
2135 continue;
2136
2137 s3c_hsotg_complete_request(hsotg, ep, req,
2138 result);
2139 }
2140
2141 spin_unlock_irqrestore(&ep->lock, flags);
2142}
2143
2144#define call_gadget(_hs, _entry) \
2145 if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
2146 (_hs)->driver && (_hs)->driver->_entry) \
2147 (_hs)->driver->_entry(&(_hs)->gadget);
2148
2149/**
5e891342 2150 * s3c_hsotg_disconnect - disconnect service
5b7d70c6
BD
2151 * @hsotg: The device state.
2152 *
5e891342
LM
2153 * The device has been disconnected. Remove all current
2154 * transactions and signal the gadget driver that this
2155 * has happened.
8b9bc460 2156 */
5e891342 2157static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg)
5b7d70c6
BD
2158{
2159 unsigned ep;
2160
b3f489b2 2161 for (ep = 0; ep < hsotg->num_of_eps; ep++)
5b7d70c6
BD
2162 kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
2163
2164 call_gadget(hsotg, disconnect);
2165}
2166
2167/**
2168 * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
2169 * @hsotg: The device state:
2170 * @periodic: True if this is a periodic FIFO interrupt
2171 */
2172static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
2173{
2174 struct s3c_hsotg_ep *ep;
2175 int epno, ret;
2176
2177 /* look through for any more data to transmit */
2178
b3f489b2 2179 for (epno = 0; epno < hsotg->num_of_eps; epno++) {
5b7d70c6
BD
2180 ep = &hsotg->eps[epno];
2181
2182 if (!ep->dir_in)
2183 continue;
2184
2185 if ((periodic && !ep->periodic) ||
2186 (!periodic && ep->periodic))
2187 continue;
2188
2189 ret = s3c_hsotg_trytx(hsotg, ep);
2190 if (ret < 0)
2191 break;
2192 }
2193}
2194
5b7d70c6 2195/* IRQ flags which will trigger a retry around the IRQ loop */
94cb8fd6
LM
2196#define IRQ_RETRY_MASK (GINTSTS_NPTxFEmp | \
2197 GINTSTS_PTxFEmp | \
2198 GINTSTS_RxFLvl)
5b7d70c6 2199
308d734e
LM
2200/**
2201 * s3c_hsotg_corereset - issue softreset to the core
2202 * @hsotg: The device state
2203 *
2204 * Issue a soft reset to the core, and await the core finishing it.
8b9bc460 2205 */
308d734e
LM
2206static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
2207{
2208 int timeout;
2209 u32 grstctl;
2210
2211 dev_dbg(hsotg->dev, "resetting core\n");
2212
2213 /* issue soft reset */
94cb8fd6 2214 writel(GRSTCTL_CSftRst, hsotg->regs + GRSTCTL);
308d734e
LM
2215
2216 timeout = 1000;
2217 do {
94cb8fd6
LM
2218 grstctl = readl(hsotg->regs + GRSTCTL);
2219 } while ((grstctl & GRSTCTL_CSftRst) && timeout-- > 0);
308d734e 2220
94cb8fd6 2221 if (grstctl & GRSTCTL_CSftRst) {
308d734e
LM
2222 dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
2223 return -EINVAL;
2224 }
2225
2226 timeout = 1000;
2227
2228 while (1) {
94cb8fd6 2229 u32 grstctl = readl(hsotg->regs + GRSTCTL);
308d734e
LM
2230
2231 if (timeout-- < 0) {
2232 dev_info(hsotg->dev,
2233 "%s: reset failed, GRSTCTL=%08x\n",
2234 __func__, grstctl);
2235 return -ETIMEDOUT;
2236 }
2237
94cb8fd6 2238 if (!(grstctl & GRSTCTL_AHBIdle))
308d734e
LM
2239 continue;
2240
2241 break; /* reset done */
2242 }
2243
2244 dev_dbg(hsotg->dev, "reset successful\n");
2245 return 0;
2246}
2247
8b9bc460
LM
2248/**
2249 * s3c_hsotg_core_init - issue softreset to the core
2250 * @hsotg: The device state
2251 *
2252 * Issue a soft reset to the core, and await the core finishing it.
2253 */
308d734e
LM
2254static void s3c_hsotg_core_init(struct s3c_hsotg *hsotg)
2255{
2256 s3c_hsotg_corereset(hsotg);
2257
2258 /*
2259 * we must now enable ep0 ready for host detection and then
2260 * set configuration.
2261 */
2262
2263 /* set the PLL on, remove the HNP/SRP and set the PHY */
94cb8fd6
LM
2264 writel(GUSBCFG_PHYIf16 | GUSBCFG_TOutCal(7) |
2265 (0x5 << 10), hsotg->regs + GUSBCFG);
308d734e
LM
2266
2267 s3c_hsotg_init_fifo(hsotg);
2268
94cb8fd6 2269 __orr32(hsotg->regs + DCTL, DCTL_SftDiscon);
308d734e 2270
94cb8fd6 2271 writel(1 << 18 | DCFG_DevSpd_HS, hsotg->regs + DCFG);
308d734e
LM
2272
2273 /* Clear any pending OTG interrupts */
94cb8fd6 2274 writel(0xffffffff, hsotg->regs + GOTGINT);
308d734e
LM
2275
2276 /* Clear any pending interrupts */
94cb8fd6 2277 writel(0xffffffff, hsotg->regs + GINTSTS);
308d734e 2278
94cb8fd6
LM
2279 writel(GINTSTS_ErlySusp | GINTSTS_SessReqInt |
2280 GINTSTS_GOUTNakEff | GINTSTS_GINNakEff |
2281 GINTSTS_ConIDStsChng | GINTSTS_USBRst |
2282 GINTSTS_EnumDone | GINTSTS_OTGInt |
2283 GINTSTS_USBSusp | GINTSTS_WkUpInt,
2284 hsotg->regs + GINTMSK);
308d734e
LM
2285
2286 if (using_dma(hsotg))
94cb8fd6
LM
2287 writel(GAHBCFG_GlblIntrEn | GAHBCFG_DMAEn |
2288 GAHBCFG_HBstLen_Incr4,
2289 hsotg->regs + GAHBCFG);
308d734e 2290 else
94cb8fd6 2291 writel(GAHBCFG_GlblIntrEn, hsotg->regs + GAHBCFG);
308d734e
LM
2292
2293 /*
2294 * Enabling INTknTXFEmpMsk here seems to be a big mistake, we end
2295 * up being flooded with interrupts if the host is polling the
2296 * endpoint to try and read data.
2297 */
2298
94cb8fd6
LM
2299 writel(((hsotg->dedicated_fifos) ? DIEPMSK_TxFIFOEmpty : 0) |
2300 DIEPMSK_EPDisbldMsk | DIEPMSK_XferComplMsk |
2301 DIEPMSK_TimeOUTMsk | DIEPMSK_AHBErrMsk |
2302 DIEPMSK_INTknEPMisMsk,
2303 hsotg->regs + DIEPMSK);
308d734e
LM
2304
2305 /*
2306 * don't need XferCompl, we get that from RXFIFO in slave mode. In
2307 * DMA mode we may need this.
2308 */
94cb8fd6
LM
2309 writel((using_dma(hsotg) ? (DIEPMSK_XferComplMsk |
2310 DIEPMSK_TimeOUTMsk) : 0) |
2311 DOEPMSK_EPDisbldMsk | DOEPMSK_AHBErrMsk |
2312 DOEPMSK_SetupMsk,
2313 hsotg->regs + DOEPMSK);
308d734e 2314
94cb8fd6 2315 writel(0, hsotg->regs + DAINTMSK);
308d734e
LM
2316
2317 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
94cb8fd6
LM
2318 readl(hsotg->regs + DIEPCTL0),
2319 readl(hsotg->regs + DOEPCTL0));
308d734e
LM
2320
2321 /* enable in and out endpoint interrupts */
94cb8fd6 2322 s3c_hsotg_en_gsint(hsotg, GINTSTS_OEPInt | GINTSTS_IEPInt);
308d734e
LM
2323
2324 /*
2325 * Enable the RXFIFO when in slave mode, as this is how we collect
2326 * the data. In DMA mode, we get events from the FIFO but also
2327 * things we cannot process, so do not use it.
2328 */
2329 if (!using_dma(hsotg))
94cb8fd6 2330 s3c_hsotg_en_gsint(hsotg, GINTSTS_RxFLvl);
308d734e
LM
2331
2332 /* Enable interrupts for EP0 in and out */
2333 s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
2334 s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
2335
94cb8fd6 2336 __orr32(hsotg->regs + DCTL, DCTL_PWROnPrgDone);
308d734e 2337 udelay(10); /* see openiboot */
94cb8fd6 2338 __bic32(hsotg->regs + DCTL, DCTL_PWROnPrgDone);
308d734e 2339
94cb8fd6 2340 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + DCTL));
308d734e
LM
2341
2342 /*
94cb8fd6 2343 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
308d734e
LM
2344 * writing to the EPCTL register..
2345 */
2346
2347 /* set to read 1 8byte packet */
94cb8fd6
LM
2348 writel(DxEPTSIZ_MC(1) | DxEPTSIZ_PktCnt(1) |
2349 DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);
308d734e
LM
2350
2351 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
94cb8fd6
LM
2352 DxEPCTL_CNAK | DxEPCTL_EPEna |
2353 DxEPCTL_USBActEp,
2354 hsotg->regs + DOEPCTL0);
308d734e
LM
2355
2356 /* enable, but don't activate EP0in */
2357 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
94cb8fd6 2358 DxEPCTL_USBActEp, hsotg->regs + DIEPCTL0);
308d734e
LM
2359
2360 s3c_hsotg_enqueue_setup(hsotg);
2361
2362 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
94cb8fd6
LM
2363 readl(hsotg->regs + DIEPCTL0),
2364 readl(hsotg->regs + DOEPCTL0));
308d734e
LM
2365
2366 /* clear global NAKs */
94cb8fd6
LM
2367 writel(DCTL_CGOUTNak | DCTL_CGNPInNAK,
2368 hsotg->regs + DCTL);
308d734e
LM
2369
2370 /* must be at-least 3ms to allow bus to see disconnect */
2371 mdelay(3);
2372
2373 /* remove the soft-disconnect and let's go */
94cb8fd6 2374 __bic32(hsotg->regs + DCTL, DCTL_SftDiscon);
308d734e
LM
2375}
2376
5b7d70c6
BD
2377/**
2378 * s3c_hsotg_irq - handle device interrupt
2379 * @irq: The IRQ number triggered
2380 * @pw: The pw value when registered the handler.
2381 */
2382static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
2383{
2384 struct s3c_hsotg *hsotg = pw;
2385 int retry_count = 8;
2386 u32 gintsts;
2387 u32 gintmsk;
2388
2389irq_retry:
94cb8fd6
LM
2390 gintsts = readl(hsotg->regs + GINTSTS);
2391 gintmsk = readl(hsotg->regs + GINTMSK);
5b7d70c6
BD
2392
2393 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
2394 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
2395
2396 gintsts &= gintmsk;
2397
94cb8fd6
LM
2398 if (gintsts & GINTSTS_OTGInt) {
2399 u32 otgint = readl(hsotg->regs + GOTGINT);
5b7d70c6
BD
2400
2401 dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
2402
94cb8fd6 2403 writel(otgint, hsotg->regs + GOTGINT);
5b7d70c6
BD
2404 }
2405
94cb8fd6 2406 if (gintsts & GINTSTS_SessReqInt) {
5b7d70c6 2407 dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
94cb8fd6 2408 writel(GINTSTS_SessReqInt, hsotg->regs + GINTSTS);
5b7d70c6
BD
2409 }
2410
94cb8fd6
LM
2411 if (gintsts & GINTSTS_EnumDone) {
2412 writel(GINTSTS_EnumDone, hsotg->regs + GINTSTS);
a3395f0d
AT
2413
2414 s3c_hsotg_irq_enumdone(hsotg);
5b7d70c6
BD
2415 }
2416
94cb8fd6 2417 if (gintsts & GINTSTS_ConIDStsChng) {
5b7d70c6 2418 dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
94cb8fd6
LM
2419 readl(hsotg->regs + DSTS),
2420 readl(hsotg->regs + GOTGCTL));
5b7d70c6 2421
94cb8fd6 2422 writel(GINTSTS_ConIDStsChng, hsotg->regs + GINTSTS);
5b7d70c6
BD
2423 }
2424
94cb8fd6
LM
2425 if (gintsts & (GINTSTS_OEPInt | GINTSTS_IEPInt)) {
2426 u32 daint = readl(hsotg->regs + DAINT);
2427 u32 daint_out = daint >> DAINT_OutEP_SHIFT;
2428 u32 daint_in = daint & ~(daint_out << DAINT_OutEP_SHIFT);
5b7d70c6
BD
2429 int ep;
2430
2431 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
2432
2433 for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
2434 if (daint_out & 1)
2435 s3c_hsotg_epint(hsotg, ep, 0);
2436 }
2437
2438 for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
2439 if (daint_in & 1)
2440 s3c_hsotg_epint(hsotg, ep, 1);
2441 }
5b7d70c6
BD
2442 }
2443
94cb8fd6 2444 if (gintsts & GINTSTS_USBRst) {
12a1f4dc 2445
94cb8fd6 2446 u32 usb_status = readl(hsotg->regs + GOTGCTL);
12a1f4dc 2447
5b7d70c6
BD
2448 dev_info(hsotg->dev, "%s: USBRst\n", __func__);
2449 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
94cb8fd6 2450 readl(hsotg->regs + GNPTXSTS));
5b7d70c6 2451
94cb8fd6 2452 writel(GINTSTS_USBRst, hsotg->regs + GINTSTS);
a3395f0d 2453
94cb8fd6 2454 if (usb_status & GOTGCTL_BSESVLD) {
12a1f4dc
LM
2455 if (time_after(jiffies, hsotg->last_rst +
2456 msecs_to_jiffies(200))) {
5b7d70c6 2457
12a1f4dc
LM
2458 kill_all_requests(hsotg, &hsotg->eps[0],
2459 -ECONNRESET, true);
5b7d70c6 2460
12a1f4dc
LM
2461 s3c_hsotg_core_init(hsotg);
2462 hsotg->last_rst = jiffies;
2463 }
2464 }
5b7d70c6
BD
2465 }
2466
2467 /* check both FIFOs */
2468
94cb8fd6 2469 if (gintsts & GINTSTS_NPTxFEmp) {
5b7d70c6
BD
2470 dev_dbg(hsotg->dev, "NPTxFEmp\n");
2471
8b9bc460
LM
2472 /*
2473 * Disable the interrupt to stop it happening again
5b7d70c6 2474 * unless one of these endpoint routines decides that
8b9bc460
LM
2475 * it needs re-enabling
2476 */
5b7d70c6 2477
94cb8fd6 2478 s3c_hsotg_disable_gsint(hsotg, GINTSTS_NPTxFEmp);
5b7d70c6 2479 s3c_hsotg_irq_fifoempty(hsotg, false);
5b7d70c6
BD
2480 }
2481
94cb8fd6 2482 if (gintsts & GINTSTS_PTxFEmp) {
5b7d70c6
BD
2483 dev_dbg(hsotg->dev, "PTxFEmp\n");
2484
94cb8fd6 2485 /* See note in GINTSTS_NPTxFEmp */
5b7d70c6 2486
94cb8fd6 2487 s3c_hsotg_disable_gsint(hsotg, GINTSTS_PTxFEmp);
5b7d70c6 2488 s3c_hsotg_irq_fifoempty(hsotg, true);
5b7d70c6
BD
2489 }
2490
94cb8fd6 2491 if (gintsts & GINTSTS_RxFLvl) {
8b9bc460
LM
2492 /*
2493 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
5b7d70c6 2494 * we need to retry s3c_hsotg_handle_rx if this is still
8b9bc460
LM
2495 * set.
2496 */
5b7d70c6
BD
2497
2498 s3c_hsotg_handle_rx(hsotg);
5b7d70c6
BD
2499 }
2500
94cb8fd6 2501 if (gintsts & GINTSTS_ModeMis) {
5b7d70c6 2502 dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
94cb8fd6 2503 writel(GINTSTS_ModeMis, hsotg->regs + GINTSTS);
5b7d70c6
BD
2504 }
2505
94cb8fd6
LM
2506 if (gintsts & GINTSTS_USBSusp) {
2507 dev_info(hsotg->dev, "GINTSTS_USBSusp\n");
2508 writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS);
5b7d70c6
BD
2509
2510 call_gadget(hsotg, suspend);
12a1f4dc 2511 s3c_hsotg_disconnect(hsotg);
5b7d70c6
BD
2512 }
2513
94cb8fd6
LM
2514 if (gintsts & GINTSTS_WkUpInt) {
2515 dev_info(hsotg->dev, "GINTSTS_WkUpIn\n");
2516 writel(GINTSTS_WkUpInt, hsotg->regs + GINTSTS);
5b7d70c6
BD
2517
2518 call_gadget(hsotg, resume);
2519 }
2520
94cb8fd6
LM
2521 if (gintsts & GINTSTS_ErlySusp) {
2522 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
2523 writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS);
12a1f4dc
LM
2524
2525 s3c_hsotg_disconnect(hsotg);
5b7d70c6
BD
2526 }
2527
8b9bc460
LM
2528 /*
2529 * these next two seem to crop-up occasionally causing the core
5b7d70c6 2530 * to shutdown the USB transfer, so try clearing them and logging
8b9bc460
LM
2531 * the occurrence.
2532 */
5b7d70c6 2533
94cb8fd6 2534 if (gintsts & GINTSTS_GOUTNakEff) {
5b7d70c6
BD
2535 dev_info(hsotg->dev, "GOUTNakEff triggered\n");
2536
94cb8fd6 2537 writel(DCTL_CGOUTNak, hsotg->regs + DCTL);
a3395f0d
AT
2538
2539 s3c_hsotg_dump(hsotg);
5b7d70c6
BD
2540 }
2541
94cb8fd6 2542 if (gintsts & GINTSTS_GINNakEff) {
5b7d70c6
BD
2543 dev_info(hsotg->dev, "GINNakEff triggered\n");
2544
94cb8fd6 2545 writel(DCTL_CGNPInNAK, hsotg->regs + DCTL);
a3395f0d
AT
2546
2547 s3c_hsotg_dump(hsotg);
5b7d70c6
BD
2548 }
2549
8b9bc460
LM
2550 /*
2551 * if we've had fifo events, we should try and go around the
2552 * loop again to see if there's any point in returning yet.
2553 */
5b7d70c6
BD
2554
2555 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
2556 goto irq_retry;
2557
2558 return IRQ_HANDLED;
2559}
2560
2561/**
2562 * s3c_hsotg_ep_enable - enable the given endpoint
2563 * @ep: The USB endpint to configure
2564 * @desc: The USB endpoint descriptor to configure with.
2565 *
2566 * This is called from the USB gadget code's usb_ep_enable().
8b9bc460 2567 */
5b7d70c6
BD
2568static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2569 const struct usb_endpoint_descriptor *desc)
2570{
2571 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2572 struct s3c_hsotg *hsotg = hs_ep->parent;
2573 unsigned long flags;
2574 int index = hs_ep->index;
2575 u32 epctrl_reg;
2576 u32 epctrl;
2577 u32 mps;
2578 int dir_in;
19c190f9 2579 int ret = 0;
5b7d70c6
BD
2580
2581 dev_dbg(hsotg->dev,
2582 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
2583 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
2584 desc->wMaxPacketSize, desc->bInterval);
2585
2586 /* not to be called for EP0 */
2587 WARN_ON(index == 0);
2588
2589 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
2590 if (dir_in != hs_ep->dir_in) {
2591 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
2592 return -EINVAL;
2593 }
2594
29cc8897 2595 mps = usb_endpoint_maxp(desc);
5b7d70c6
BD
2596
2597 /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
2598
94cb8fd6 2599 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
5b7d70c6
BD
2600 epctrl = readl(hsotg->regs + epctrl_reg);
2601
2602 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
2603 __func__, epctrl, epctrl_reg);
2604
2605 spin_lock_irqsave(&hs_ep->lock, flags);
2606
94cb8fd6
LM
2607 epctrl &= ~(DxEPCTL_EPType_MASK | DxEPCTL_MPS_MASK);
2608 epctrl |= DxEPCTL_MPS(mps);
5b7d70c6 2609
8b9bc460
LM
2610 /*
2611 * mark the endpoint as active, otherwise the core may ignore
2612 * transactions entirely for this endpoint
2613 */
94cb8fd6 2614 epctrl |= DxEPCTL_USBActEp;
5b7d70c6 2615
8b9bc460
LM
2616 /*
2617 * set the NAK status on the endpoint, otherwise we might try and
5b7d70c6
BD
2618 * do something with data that we've yet got a request to process
2619 * since the RXFIFO will take data for an endpoint even if the
2620 * size register hasn't been set.
2621 */
2622
94cb8fd6 2623 epctrl |= DxEPCTL_SNAK;
5b7d70c6
BD
2624
2625 /* update the endpoint state */
2626 hs_ep->ep.maxpacket = mps;
2627
2628 /* default, set to non-periodic */
2629 hs_ep->periodic = 0;
2630
2631 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
2632 case USB_ENDPOINT_XFER_ISOC:
2633 dev_err(hsotg->dev, "no current ISOC support\n");
19c190f9
JL
2634 ret = -EINVAL;
2635 goto out;
5b7d70c6
BD
2636
2637 case USB_ENDPOINT_XFER_BULK:
94cb8fd6 2638 epctrl |= DxEPCTL_EPType_Bulk;
5b7d70c6
BD
2639 break;
2640
2641 case USB_ENDPOINT_XFER_INT:
2642 if (dir_in) {
8b9bc460
LM
2643 /*
2644 * Allocate our TxFNum by simply using the index
5b7d70c6
BD
2645 * of the endpoint for the moment. We could do
2646 * something better if the host indicates how
8b9bc460
LM
2647 * many FIFOs we are expecting to use.
2648 */
5b7d70c6
BD
2649
2650 hs_ep->periodic = 1;
94cb8fd6 2651 epctrl |= DxEPCTL_TxFNum(index);
5b7d70c6
BD
2652 }
2653
94cb8fd6 2654 epctrl |= DxEPCTL_EPType_Intterupt;
5b7d70c6
BD
2655 break;
2656
2657 case USB_ENDPOINT_XFER_CONTROL:
94cb8fd6 2658 epctrl |= DxEPCTL_EPType_Control;
5b7d70c6
BD
2659 break;
2660 }
2661
8b9bc460
LM
2662 /*
2663 * if the hardware has dedicated fifos, we must give each IN EP
10aebc77
BD
2664 * a unique tx-fifo even if it is non-periodic.
2665 */
2666 if (dir_in && hsotg->dedicated_fifos)
94cb8fd6 2667 epctrl |= DxEPCTL_TxFNum(index);
10aebc77 2668
5b7d70c6
BD
2669 /* for non control endpoints, set PID to D0 */
2670 if (index)
94cb8fd6 2671 epctrl |= DxEPCTL_SetD0PID;
5b7d70c6
BD
2672
2673 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
2674 __func__, epctrl);
2675
2676 writel(epctrl, hsotg->regs + epctrl_reg);
2677 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
2678 __func__, readl(hsotg->regs + epctrl_reg));
2679
2680 /* enable the endpoint interrupt */
2681 s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
2682
19c190f9 2683out:
5b7d70c6 2684 spin_unlock_irqrestore(&hs_ep->lock, flags);
19c190f9 2685 return ret;
5b7d70c6
BD
2686}
2687
8b9bc460
LM
2688/**
2689 * s3c_hsotg_ep_disable - disable given endpoint
2690 * @ep: The endpoint to disable.
2691 */
5b7d70c6
BD
2692static int s3c_hsotg_ep_disable(struct usb_ep *ep)
2693{
2694 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2695 struct s3c_hsotg *hsotg = hs_ep->parent;
2696 int dir_in = hs_ep->dir_in;
2697 int index = hs_ep->index;
2698 unsigned long flags;
2699 u32 epctrl_reg;
2700 u32 ctrl;
2701
2702 dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
2703
2704 if (ep == &hsotg->eps[0].ep) {
2705 dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
2706 return -EINVAL;
2707 }
2708
94cb8fd6 2709 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
5b7d70c6
BD
2710
2711 /* terminate all requests with shutdown */
2712 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
2713
2714 spin_lock_irqsave(&hs_ep->lock, flags);
2715
2716 ctrl = readl(hsotg->regs + epctrl_reg);
94cb8fd6
LM
2717 ctrl &= ~DxEPCTL_EPEna;
2718 ctrl &= ~DxEPCTL_USBActEp;
2719 ctrl |= DxEPCTL_SNAK;
5b7d70c6
BD
2720
2721 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
2722 writel(ctrl, hsotg->regs + epctrl_reg);
2723
2724 /* disable endpoint interrupts */
2725 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
2726
2727 spin_unlock_irqrestore(&hs_ep->lock, flags);
2728 return 0;
2729}
2730
2731/**
2732 * on_list - check request is on the given endpoint
2733 * @ep: The endpoint to check.
2734 * @test: The request to test if it is on the endpoint.
8b9bc460 2735 */
5b7d70c6
BD
2736static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
2737{
2738 struct s3c_hsotg_req *req, *treq;
2739
2740 list_for_each_entry_safe(req, treq, &ep->queue, queue) {
2741 if (req == test)
2742 return true;
2743 }
2744
2745 return false;
2746}
2747
8b9bc460
LM
2748/**
2749 * s3c_hsotg_ep_dequeue - dequeue given endpoint
2750 * @ep: The endpoint to dequeue.
2751 * @req: The request to be removed from a queue.
2752 */
5b7d70c6
BD
2753static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2754{
2755 struct s3c_hsotg_req *hs_req = our_req(req);
2756 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2757 struct s3c_hsotg *hs = hs_ep->parent;
2758 unsigned long flags;
2759
2760 dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
2761
5b7d70c6
BD
2762 spin_lock_irqsave(&hs_ep->lock, flags);
2763
2764 if (!on_list(hs_ep, hs_req)) {
2765 spin_unlock_irqrestore(&hs_ep->lock, flags);
2766 return -EINVAL;
2767 }
2768
2769 s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
2770 spin_unlock_irqrestore(&hs_ep->lock, flags);
2771
2772 return 0;
2773}
2774
8b9bc460
LM
2775/**
2776 * s3c_hsotg_ep_sethalt - set halt on a given endpoint
2777 * @ep: The endpoint to set halt.
2778 * @value: Set or unset the halt.
2779 */
5b7d70c6
BD
2780static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
2781{
2782 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2783 struct s3c_hsotg *hs = hs_ep->parent;
2784 int index = hs_ep->index;
2785 unsigned long irqflags;
2786 u32 epreg;
2787 u32 epctl;
9c39ddc6 2788 u32 xfertype;
5b7d70c6
BD
2789
2790 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
2791
2792 spin_lock_irqsave(&hs_ep->lock, irqflags);
2793
2794 /* write both IN and OUT control registers */
2795
94cb8fd6 2796 epreg = DIEPCTL(index);
5b7d70c6
BD
2797 epctl = readl(hs->regs + epreg);
2798
9c39ddc6 2799 if (value) {
94cb8fd6
LM
2800 epctl |= DxEPCTL_Stall + DxEPCTL_SNAK;
2801 if (epctl & DxEPCTL_EPEna)
2802 epctl |= DxEPCTL_EPDis;
9c39ddc6 2803 } else {
94cb8fd6
LM
2804 epctl &= ~DxEPCTL_Stall;
2805 xfertype = epctl & DxEPCTL_EPType_MASK;
2806 if (xfertype == DxEPCTL_EPType_Bulk ||
2807 xfertype == DxEPCTL_EPType_Intterupt)
2808 epctl |= DxEPCTL_SetD0PID;
9c39ddc6 2809 }
5b7d70c6
BD
2810
2811 writel(epctl, hs->regs + epreg);
2812
94cb8fd6 2813 epreg = DOEPCTL(index);
5b7d70c6
BD
2814 epctl = readl(hs->regs + epreg);
2815
2816 if (value)
94cb8fd6 2817 epctl |= DxEPCTL_Stall;
9c39ddc6 2818 else {
94cb8fd6
LM
2819 epctl &= ~DxEPCTL_Stall;
2820 xfertype = epctl & DxEPCTL_EPType_MASK;
2821 if (xfertype == DxEPCTL_EPType_Bulk ||
2822 xfertype == DxEPCTL_EPType_Intterupt)
2823 epctl |= DxEPCTL_SetD0PID;
9c39ddc6 2824 }
5b7d70c6
BD
2825
2826 writel(epctl, hs->regs + epreg);
2827
2828 spin_unlock_irqrestore(&hs_ep->lock, irqflags);
2829
2830 return 0;
2831}
2832
2833static struct usb_ep_ops s3c_hsotg_ep_ops = {
2834 .enable = s3c_hsotg_ep_enable,
2835 .disable = s3c_hsotg_ep_disable,
2836 .alloc_request = s3c_hsotg_ep_alloc_request,
2837 .free_request = s3c_hsotg_ep_free_request,
2838 .queue = s3c_hsotg_ep_queue,
2839 .dequeue = s3c_hsotg_ep_dequeue,
2840 .set_halt = s3c_hsotg_ep_sethalt,
25985edc 2841 /* note, don't believe we have any call for the fifo routines */
5b7d70c6
BD
2842};
2843
41188786
LM
2844/**
2845 * s3c_hsotg_phy_enable - enable platform phy dev
8b9bc460 2846 * @hsotg: The driver state
41188786
LM
2847 *
2848 * A wrapper for platform code responsible for controlling
2849 * low-level USB code
2850 */
2851static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
2852{
2853 struct platform_device *pdev = to_platform_device(hsotg->dev);
2854
2855 dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
2856 if (hsotg->plat->phy_init)
2857 hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
2858}
2859
2860/**
2861 * s3c_hsotg_phy_disable - disable platform phy dev
8b9bc460 2862 * @hsotg: The driver state
41188786
LM
2863 *
2864 * A wrapper for platform code responsible for controlling
2865 * low-level USB code
2866 */
2867static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
2868{
2869 struct platform_device *pdev = to_platform_device(hsotg->dev);
2870
2871 if (hsotg->plat->phy_exit)
2872 hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
2873}
2874
8b9bc460
LM
2875/**
2876 * s3c_hsotg_init - initalize the usb core
2877 * @hsotg: The driver state
2878 */
b3f489b2
LM
2879static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
2880{
2881 /* unmask subset of endpoint interrupts */
2882
94cb8fd6
LM
2883 writel(DIEPMSK_TimeOUTMsk | DIEPMSK_AHBErrMsk |
2884 DIEPMSK_EPDisbldMsk | DIEPMSK_XferComplMsk,
2885 hsotg->regs + DIEPMSK);
b3f489b2 2886
94cb8fd6
LM
2887 writel(DOEPMSK_SetupMsk | DOEPMSK_AHBErrMsk |
2888 DOEPMSK_EPDisbldMsk | DOEPMSK_XferComplMsk,
2889 hsotg->regs + DOEPMSK);
b3f489b2 2890
94cb8fd6 2891 writel(0, hsotg->regs + DAINTMSK);
b3f489b2
LM
2892
2893 /* Be in disconnected state until gadget is registered */
94cb8fd6 2894 __orr32(hsotg->regs + DCTL, DCTL_SftDiscon);
b3f489b2
LM
2895
2896 if (0) {
2897 /* post global nak until we're ready */
94cb8fd6
LM
2898 writel(DCTL_SGNPInNAK | DCTL_SGOUTNak,
2899 hsotg->regs + DCTL);
b3f489b2
LM
2900 }
2901
2902 /* setup fifos */
2903
2904 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
94cb8fd6
LM
2905 readl(hsotg->regs + GRXFSIZ),
2906 readl(hsotg->regs + GNPTXFSIZ));
b3f489b2
LM
2907
2908 s3c_hsotg_init_fifo(hsotg);
2909
2910 /* set the PLL on, remove the HNP/SRP and set the PHY */
94cb8fd6
LM
2911 writel(GUSBCFG_PHYIf16 | GUSBCFG_TOutCal(7) | (0x5 << 10),
2912 hsotg->regs + GUSBCFG);
b3f489b2 2913
94cb8fd6
LM
2914 writel(using_dma(hsotg) ? GAHBCFG_DMAEn : 0x0,
2915 hsotg->regs + GAHBCFG);
b3f489b2
LM
2916}
2917
8b9bc460
LM
2918/**
2919 * s3c_hsotg_udc_start - prepare the udc for work
2920 * @gadget: The usb gadget state
2921 * @driver: The usb gadget driver
2922 *
2923 * Perform initialization to prepare udc device and driver
2924 * to work.
2925 */
f65f0f10
LM
2926static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
2927 struct usb_gadget_driver *driver)
5b7d70c6 2928{
f99b2bfe 2929 struct s3c_hsotg *hsotg = to_hsotg(gadget);
5b7d70c6
BD
2930 int ret;
2931
2932 if (!hsotg) {
2933 printk(KERN_ERR "%s: called with no device\n", __func__);
2934 return -ENODEV;
2935 }
2936
2937 if (!driver) {
2938 dev_err(hsotg->dev, "%s: no driver\n", __func__);
2939 return -EINVAL;
2940 }
2941
7177aed4 2942 if (driver->max_speed < USB_SPEED_FULL)
5b7d70c6 2943 dev_err(hsotg->dev, "%s: bad speed\n", __func__);
5b7d70c6 2944
f65f0f10 2945 if (!driver->setup) {
5b7d70c6
BD
2946 dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
2947 return -EINVAL;
2948 }
2949
2950 WARN_ON(hsotg->driver);
2951
2952 driver->driver.bus = NULL;
2953 hsotg->driver = driver;
2954 hsotg->gadget.dev.driver = &driver->driver;
2955 hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask;
2956 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
2957
f65f0f10
LM
2958 ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
2959 hsotg->supplies);
5b7d70c6 2960 if (ret) {
f65f0f10 2961 dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
5b7d70c6
BD
2962 goto err;
2963 }
2964
f65f0f10 2965 s3c_hsotg_phy_enable(hsotg);
5b7d70c6 2966
308d734e 2967 s3c_hsotg_core_init(hsotg);
12a1f4dc 2968 hsotg->last_rst = jiffies;
5b7d70c6
BD
2969 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
2970 return 0;
2971
2972err:
2973 hsotg->driver = NULL;
2974 hsotg->gadget.dev.driver = NULL;
2975 return ret;
2976}
2977
8b9bc460
LM
2978/**
2979 * s3c_hsotg_udc_stop - stop the udc
2980 * @gadget: The usb gadget state
2981 * @driver: The usb gadget driver
2982 *
2983 * Stop udc hw block and stay tunned for future transmissions
2984 */
f65f0f10
LM
2985static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
2986 struct usb_gadget_driver *driver)
5b7d70c6 2987{
f99b2bfe 2988 struct s3c_hsotg *hsotg = to_hsotg(gadget);
5b7d70c6
BD
2989 int ep;
2990
2991 if (!hsotg)
2992 return -ENODEV;
2993
2994 if (!driver || driver != hsotg->driver || !driver->unbind)
2995 return -EINVAL;
2996
2997 /* all endpoints should be shutdown */
b3f489b2 2998 for (ep = 0; ep < hsotg->num_of_eps; ep++)
5b7d70c6
BD
2999 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
3000
f65f0f10
LM
3001 s3c_hsotg_phy_disable(hsotg);
3002 regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
5b7d70c6 3003
5b7d70c6
BD
3004 hsotg->driver = NULL;
3005 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
f65f0f10 3006 hsotg->gadget.dev.driver = NULL;
5b7d70c6
BD
3007
3008 dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
3009 driver->driver.name);
3010
3011 return 0;
3012}
5b7d70c6 3013
8b9bc460
LM
3014/**
3015 * s3c_hsotg_gadget_getframe - read the frame number
3016 * @gadget: The usb gadget state
3017 *
3018 * Read the {micro} frame number
3019 */
5b7d70c6
BD
3020static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
3021{
3022 return s3c_hsotg_read_frameno(to_hsotg(gadget));
3023}
3024
3025static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
3026 .get_frame = s3c_hsotg_gadget_getframe,
f65f0f10
LM
3027 .udc_start = s3c_hsotg_udc_start,
3028 .udc_stop = s3c_hsotg_udc_stop,
5b7d70c6
BD
3029};
3030
3031/**
3032 * s3c_hsotg_initep - initialise a single endpoint
3033 * @hsotg: The device state.
3034 * @hs_ep: The endpoint to be initialised.
3035 * @epnum: The endpoint number
3036 *
3037 * Initialise the given endpoint (as part of the probe and device state
3038 * creation) to give to the gadget driver. Setup the endpoint name, any
3039 * direction information and other state that may be required.
3040 */
3041static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
3042 struct s3c_hsotg_ep *hs_ep,
3043 int epnum)
3044{
3045 u32 ptxfifo;
3046 char *dir;
3047
3048 if (epnum == 0)
3049 dir = "";
3050 else if ((epnum % 2) == 0) {
3051 dir = "out";
3052 } else {
3053 dir = "in";
3054 hs_ep->dir_in = 1;
3055 }
3056
3057 hs_ep->index = epnum;
3058
3059 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
3060
3061 INIT_LIST_HEAD(&hs_ep->queue);
3062 INIT_LIST_HEAD(&hs_ep->ep.ep_list);
3063
3064 spin_lock_init(&hs_ep->lock);
3065
3066 /* add to the list of endpoints known by the gadget driver */
3067 if (epnum)
3068 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
3069
3070 hs_ep->parent = hsotg;
3071 hs_ep->ep.name = hs_ep->name;
3072 hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;
3073 hs_ep->ep.ops = &s3c_hsotg_ep_ops;
3074
8b9bc460
LM
3075 /*
3076 * Read the FIFO size for the Periodic TX FIFO, even if we're
5b7d70c6
BD
3077 * an OUT endpoint, we may as well do this if in future the
3078 * code is changed to make each endpoint's direction changeable.
3079 */
3080
94cb8fd6
LM
3081 ptxfifo = readl(hsotg->regs + DPTXFSIZn(epnum));
3082 hs_ep->fifo_size = DPTXFSIZn_DPTxFSize_GET(ptxfifo) * 4;
5b7d70c6 3083
8b9bc460
LM
3084 /*
3085 * if we're using dma, we need to set the next-endpoint pointer
5b7d70c6
BD
3086 * to be something valid.
3087 */
3088
3089 if (using_dma(hsotg)) {
94cb8fd6
LM
3090 u32 next = DxEPCTL_NextEp((epnum + 1) % 15);
3091 writel(next, hsotg->regs + DIEPCTL(epnum));
3092 writel(next, hsotg->regs + DOEPCTL(epnum));
5b7d70c6
BD
3093 }
3094}
3095
b3f489b2
LM
3096/**
3097 * s3c_hsotg_hw_cfg - read HW configuration registers
3098 * @param: The device state
3099 *
3100 * Read the USB core HW configuration registers
3101 */
3102static void s3c_hsotg_hw_cfg(struct s3c_hsotg *hsotg)
5b7d70c6 3103{
b3f489b2
LM
3104 u32 cfg2, cfg4;
3105 /* check hardware configuration */
5b7d70c6 3106
b3f489b2
LM
3107 cfg2 = readl(hsotg->regs + 0x48);
3108 hsotg->num_of_eps = (cfg2 >> 10) & 0xF;
10aebc77 3109
b3f489b2 3110 dev_info(hsotg->dev, "EPs:%d\n", hsotg->num_of_eps);
10aebc77
BD
3111
3112 cfg4 = readl(hsotg->regs + 0x50);
3113 hsotg->dedicated_fifos = (cfg4 >> 25) & 1;
3114
3115 dev_info(hsotg->dev, "%s fifos\n",
3116 hsotg->dedicated_fifos ? "dedicated" : "shared");
5b7d70c6
BD
3117}
3118
8b9bc460
LM
3119/**
3120 * s3c_hsotg_dump - dump state of the udc
3121 * @param: The device state
3122 */
5b7d70c6
BD
3123static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
3124{
83a01804 3125#ifdef DEBUG
5b7d70c6
BD
3126 struct device *dev = hsotg->dev;
3127 void __iomem *regs = hsotg->regs;
3128 u32 val;
3129 int idx;
3130
3131 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
94cb8fd6
LM
3132 readl(regs + DCFG), readl(regs + DCTL),
3133 readl(regs + DIEPMSK));
5b7d70c6
BD
3134
3135 dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
94cb8fd6 3136 readl(regs + GAHBCFG), readl(regs + 0x44));
5b7d70c6
BD
3137
3138 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
94cb8fd6 3139 readl(regs + GRXFSIZ), readl(regs + GNPTXFSIZ));
5b7d70c6
BD
3140
3141 /* show periodic fifo settings */
3142
3143 for (idx = 1; idx <= 15; idx++) {
94cb8fd6 3144 val = readl(regs + DPTXFSIZn(idx));
5b7d70c6 3145 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
94cb8fd6
LM
3146 val >> DPTXFSIZn_DPTxFSize_SHIFT,
3147 val & DPTXFSIZn_DPTxFStAddr_MASK);
5b7d70c6
BD
3148 }
3149
3150 for (idx = 0; idx < 15; idx++) {
3151 dev_info(dev,
3152 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
94cb8fd6
LM
3153 readl(regs + DIEPCTL(idx)),
3154 readl(regs + DIEPTSIZ(idx)),
3155 readl(regs + DIEPDMA(idx)));
5b7d70c6 3156
94cb8fd6 3157 val = readl(regs + DOEPCTL(idx));
5b7d70c6
BD
3158 dev_info(dev,
3159 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
94cb8fd6
LM
3160 idx, readl(regs + DOEPCTL(idx)),
3161 readl(regs + DOEPTSIZ(idx)),
3162 readl(regs + DOEPDMA(idx)));
5b7d70c6
BD
3163
3164 }
3165
3166 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
94cb8fd6 3167 readl(regs + DVBUSDIS), readl(regs + DVBUSPULSE));
83a01804 3168#endif
5b7d70c6
BD
3169}
3170
5b7d70c6
BD
3171/**
3172 * state_show - debugfs: show overall driver and device state.
3173 * @seq: The seq file to write to.
3174 * @v: Unused parameter.
3175 *
3176 * This debugfs entry shows the overall state of the hardware and
3177 * some general information about each of the endpoints available
3178 * to the system.
3179 */
3180static int state_show(struct seq_file *seq, void *v)
3181{
3182 struct s3c_hsotg *hsotg = seq->private;
3183 void __iomem *regs = hsotg->regs;
3184 int idx;
3185
3186 seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
94cb8fd6
LM
3187 readl(regs + DCFG),
3188 readl(regs + DCTL),
3189 readl(regs + DSTS));
5b7d70c6
BD
3190
3191 seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
94cb8fd6 3192 readl(regs + DIEPMSK), readl(regs + DOEPMSK));
5b7d70c6
BD
3193
3194 seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
94cb8fd6
LM
3195 readl(regs + GINTMSK),
3196 readl(regs + GINTSTS));
5b7d70c6
BD
3197
3198 seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
94cb8fd6
LM
3199 readl(regs + DAINTMSK),
3200 readl(regs + DAINT));
5b7d70c6
BD
3201
3202 seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
94cb8fd6
LM
3203 readl(regs + GNPTXSTS),
3204 readl(regs + GRXSTSR));
5b7d70c6
BD
3205
3206 seq_printf(seq, "\nEndpoint status:\n");
3207
3208 for (idx = 0; idx < 15; idx++) {
3209 u32 in, out;
3210
94cb8fd6
LM
3211 in = readl(regs + DIEPCTL(idx));
3212 out = readl(regs + DOEPCTL(idx));
5b7d70c6
BD
3213
3214 seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
3215 idx, in, out);
3216
94cb8fd6
LM
3217 in = readl(regs + DIEPTSIZ(idx));
3218 out = readl(regs + DOEPTSIZ(idx));
5b7d70c6
BD
3219
3220 seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
3221 in, out);
3222
3223 seq_printf(seq, "\n");
3224 }
3225
3226 return 0;
3227}
3228
3229static int state_open(struct inode *inode, struct file *file)
3230{
3231 return single_open(file, state_show, inode->i_private);
3232}
3233
3234static const struct file_operations state_fops = {
3235 .owner = THIS_MODULE,
3236 .open = state_open,
3237 .read = seq_read,
3238 .llseek = seq_lseek,
3239 .release = single_release,
3240};
3241
3242/**
3243 * fifo_show - debugfs: show the fifo information
3244 * @seq: The seq_file to write data to.
3245 * @v: Unused parameter.
3246 *
3247 * Show the FIFO information for the overall fifo and all the
3248 * periodic transmission FIFOs.
8b9bc460 3249 */
5b7d70c6
BD
3250static int fifo_show(struct seq_file *seq, void *v)
3251{
3252 struct s3c_hsotg *hsotg = seq->private;
3253 void __iomem *regs = hsotg->regs;
3254 u32 val;
3255 int idx;
3256
3257 seq_printf(seq, "Non-periodic FIFOs:\n");
94cb8fd6 3258 seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + GRXFSIZ));
5b7d70c6 3259
94cb8fd6 3260 val = readl(regs + GNPTXFSIZ);
5b7d70c6 3261 seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
94cb8fd6
LM
3262 val >> GNPTXFSIZ_NPTxFDep_SHIFT,
3263 val & GNPTXFSIZ_NPTxFStAddr_MASK);
5b7d70c6
BD
3264
3265 seq_printf(seq, "\nPeriodic TXFIFOs:\n");
3266
3267 for (idx = 1; idx <= 15; idx++) {
94cb8fd6 3268 val = readl(regs + DPTXFSIZn(idx));
5b7d70c6
BD
3269
3270 seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
94cb8fd6
LM
3271 val >> DPTXFSIZn_DPTxFSize_SHIFT,
3272 val & DPTXFSIZn_DPTxFStAddr_MASK);
5b7d70c6
BD
3273 }
3274
3275 return 0;
3276}
3277
3278static int fifo_open(struct inode *inode, struct file *file)
3279{
3280 return single_open(file, fifo_show, inode->i_private);
3281}
3282
3283static const struct file_operations fifo_fops = {
3284 .owner = THIS_MODULE,
3285 .open = fifo_open,
3286 .read = seq_read,
3287 .llseek = seq_lseek,
3288 .release = single_release,
3289};
3290
3291
3292static const char *decode_direction(int is_in)
3293{
3294 return is_in ? "in" : "out";
3295}
3296
3297/**
3298 * ep_show - debugfs: show the state of an endpoint.
3299 * @seq: The seq_file to write data to.
3300 * @v: Unused parameter.
3301 *
3302 * This debugfs entry shows the state of the given endpoint (one is
3303 * registered for each available).
8b9bc460 3304 */
5b7d70c6
BD
3305static int ep_show(struct seq_file *seq, void *v)
3306{
3307 struct s3c_hsotg_ep *ep = seq->private;
3308 struct s3c_hsotg *hsotg = ep->parent;
3309 struct s3c_hsotg_req *req;
3310 void __iomem *regs = hsotg->regs;
3311 int index = ep->index;
3312 int show_limit = 15;
3313 unsigned long flags;
3314
3315 seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n",
3316 ep->index, ep->ep.name, decode_direction(ep->dir_in));
3317
3318 /* first show the register state */
3319
3320 seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
94cb8fd6
LM
3321 readl(regs + DIEPCTL(index)),
3322 readl(regs + DOEPCTL(index)));
5b7d70c6
BD
3323
3324 seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
94cb8fd6
LM
3325 readl(regs + DIEPDMA(index)),
3326 readl(regs + DOEPDMA(index)));
5b7d70c6
BD
3327
3328 seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
94cb8fd6
LM
3329 readl(regs + DIEPINT(index)),
3330 readl(regs + DOEPINT(index)));
5b7d70c6
BD
3331
3332 seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
94cb8fd6
LM
3333 readl(regs + DIEPTSIZ(index)),
3334 readl(regs + DOEPTSIZ(index)));
5b7d70c6
BD
3335
3336 seq_printf(seq, "\n");
3337 seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
3338 seq_printf(seq, "total_data=%ld\n", ep->total_data);
3339
3340 seq_printf(seq, "request list (%p,%p):\n",
3341 ep->queue.next, ep->queue.prev);
3342
3343 spin_lock_irqsave(&ep->lock, flags);
3344
3345 list_for_each_entry(req, &ep->queue, queue) {
3346 if (--show_limit < 0) {
3347 seq_printf(seq, "not showing more requests...\n");
3348 break;
3349 }
3350
3351 seq_printf(seq, "%c req %p: %d bytes @%p, ",
3352 req == ep->req ? '*' : ' ',
3353 req, req->req.length, req->req.buf);
3354 seq_printf(seq, "%d done, res %d\n",
3355 req->req.actual, req->req.status);
3356 }
3357
3358 spin_unlock_irqrestore(&ep->lock, flags);
3359
3360 return 0;
3361}
3362
3363static int ep_open(struct inode *inode, struct file *file)
3364{
3365 return single_open(file, ep_show, inode->i_private);
3366}
3367
3368static const struct file_operations ep_fops = {
3369 .owner = THIS_MODULE,
3370 .open = ep_open,
3371 .read = seq_read,
3372 .llseek = seq_lseek,
3373 .release = single_release,
3374};
3375
3376/**
3377 * s3c_hsotg_create_debug - create debugfs directory and files
3378 * @hsotg: The driver state
3379 *
3380 * Create the debugfs files to allow the user to get information
3381 * about the state of the system. The directory name is created
3382 * with the same name as the device itself, in case we end up
3383 * with multiple blocks in future systems.
8b9bc460 3384 */
5b7d70c6
BD
3385static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
3386{
3387 struct dentry *root;
3388 unsigned epidx;
3389
3390 root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
3391 hsotg->debug_root = root;
3392 if (IS_ERR(root)) {
3393 dev_err(hsotg->dev, "cannot create debug root\n");
3394 return;
3395 }
3396
3397 /* create general state file */
3398
3399 hsotg->debug_file = debugfs_create_file("state", 0444, root,
3400 hsotg, &state_fops);
3401
3402 if (IS_ERR(hsotg->debug_file))
3403 dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
3404
3405 hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
3406 hsotg, &fifo_fops);
3407
3408 if (IS_ERR(hsotg->debug_fifo))
3409 dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
3410
3411 /* create one file for each endpoint */
3412
b3f489b2 3413 for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
5b7d70c6
BD
3414 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3415
3416 ep->debugfs = debugfs_create_file(ep->name, 0444,
3417 root, ep, &ep_fops);
3418
3419 if (IS_ERR(ep->debugfs))
3420 dev_err(hsotg->dev, "failed to create %s debug file\n",
3421 ep->name);
3422 }
3423}
3424
3425/**
3426 * s3c_hsotg_delete_debug - cleanup debugfs entries
3427 * @hsotg: The driver state
3428 *
3429 * Cleanup (remove) the debugfs files for use on module exit.
8b9bc460 3430 */
5b7d70c6
BD
3431static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
3432{
3433 unsigned epidx;
3434
b3f489b2 3435 for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
5b7d70c6
BD
3436 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3437 debugfs_remove(ep->debugfs);
3438 }
3439
3440 debugfs_remove(hsotg->debug_file);
3441 debugfs_remove(hsotg->debug_fifo);
3442 debugfs_remove(hsotg->debug_root);
3443}
3444
f026a52d
LM
3445/**
3446 * s3c_hsotg_release - release callback for hsotg device
3447 * @dev: Device to for which release is called
3448 */
3449static void s3c_hsotg_release(struct device *dev)
3450{
3451 struct s3c_hsotg *hsotg = dev_get_drvdata(dev);
3452
3453 kfree(hsotg);
3454}
3455
8b9bc460
LM
3456/**
3457 * s3c_hsotg_probe - probe function for hsotg driver
3458 * @pdev: The platform information for the driver
3459 */
f026a52d 3460
5b7d70c6
BD
3461static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
3462{
3463 struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
3464 struct device *dev = &pdev->dev;
b3f489b2 3465 struct s3c_hsotg_ep *eps;
5b7d70c6
BD
3466 struct s3c_hsotg *hsotg;
3467 struct resource *res;
3468 int epnum;
3469 int ret;
fc9a731e 3470 int i;
5b7d70c6 3471
41188786
LM
3472 plat = pdev->dev.platform_data;
3473 if (!plat) {
3474 dev_err(&pdev->dev, "no platform data defined\n");
3475 return -EINVAL;
3476 }
5b7d70c6 3477
338edabc 3478 hsotg = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsotg), GFP_KERNEL);
5b7d70c6
BD
3479 if (!hsotg) {
3480 dev_err(dev, "cannot get memory\n");
3481 return -ENOMEM;
3482 }
3483
3484 hsotg->dev = dev;
3485 hsotg->plat = plat;
3486
31ee04de
MS
3487 hsotg->clk = clk_get(&pdev->dev, "otg");
3488 if (IS_ERR(hsotg->clk)) {
3489 dev_err(dev, "cannot get otg clock\n");
338edabc 3490 return PTR_ERR(hsotg->clk);
31ee04de
MS
3491 }
3492
5b7d70c6
BD
3493 platform_set_drvdata(pdev, hsotg);
3494
3495 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5b7d70c6 3496
338edabc 3497 hsotg->regs = devm_request_and_ioremap(&pdev->dev, res);
5b7d70c6
BD
3498 if (!hsotg->regs) {
3499 dev_err(dev, "cannot map registers\n");
3500 ret = -ENXIO;
338edabc 3501 goto err_clk;
5b7d70c6
BD
3502 }
3503
3504 ret = platform_get_irq(pdev, 0);
3505 if (ret < 0) {
3506 dev_err(dev, "cannot find IRQ\n");
338edabc 3507 goto err_clk;
5b7d70c6
BD
3508 }
3509
3510 hsotg->irq = ret;
3511
338edabc
SK
3512 ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
3513 dev_name(dev), hsotg);
5b7d70c6
BD
3514 if (ret < 0) {
3515 dev_err(dev, "cannot claim IRQ\n");
338edabc 3516 goto err_clk;
5b7d70c6
BD
3517 }
3518
3519 dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
3520
3521 device_initialize(&hsotg->gadget.dev);
3522
3523 dev_set_name(&hsotg->gadget.dev, "gadget");
3524
d327ab5b 3525 hsotg->gadget.max_speed = USB_SPEED_HIGH;
5b7d70c6
BD
3526 hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
3527 hsotg->gadget.name = dev_name(dev);
3528
3529 hsotg->gadget.dev.parent = dev;
3530 hsotg->gadget.dev.dma_mask = dev->dma_mask;
f026a52d 3531 hsotg->gadget.dev.release = s3c_hsotg_release;
5b7d70c6 3532
5b7d70c6
BD
3533 /* reset the system */
3534
04b4a0fc 3535 clk_prepare_enable(hsotg->clk);
31ee04de 3536
fc9a731e
LM
3537 /* regulators */
3538
3539 for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
3540 hsotg->supplies[i].supply = s3c_hsotg_supply_names[i];
3541
3542 ret = regulator_bulk_get(dev, ARRAY_SIZE(hsotg->supplies),
3543 hsotg->supplies);
3544 if (ret) {
3545 dev_err(dev, "failed to request supplies: %d\n", ret);
338edabc 3546 goto err_clk;
fc9a731e
LM
3547 }
3548
3549 ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
3550 hsotg->supplies);
3551
3552 if (ret) {
3553 dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
3554 goto err_supplies;
3555 }
3556
41188786
LM
3557 /* usb phy enable */
3558 s3c_hsotg_phy_enable(hsotg);
5b7d70c6 3559
5b7d70c6
BD
3560 s3c_hsotg_corereset(hsotg);
3561 s3c_hsotg_init(hsotg);
b3f489b2
LM
3562 s3c_hsotg_hw_cfg(hsotg);
3563
3564 /* hsotg->num_of_eps holds number of EPs other than ep0 */
3565
3566 if (hsotg->num_of_eps == 0) {
3567 dev_err(dev, "wrong number of EPs (zero)\n");
3568 goto err_supplies;
3569 }
3570
3571 eps = kcalloc(hsotg->num_of_eps + 1, sizeof(struct s3c_hsotg_ep),
3572 GFP_KERNEL);
3573 if (!eps) {
3574 dev_err(dev, "cannot get memory\n");
3575 goto err_supplies;
3576 }
3577
3578 hsotg->eps = eps;
3579
3580 /* setup endpoint information */
3581
3582 INIT_LIST_HEAD(&hsotg->gadget.ep_list);
3583 hsotg->gadget.ep0 = &hsotg->eps[0].ep;
3584
3585 /* allocate EP0 request */
3586
3587 hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
3588 GFP_KERNEL);
3589 if (!hsotg->ctrl_req) {
3590 dev_err(dev, "failed to allocate ctrl req\n");
3591 goto err_ep_mem;
3592 }
5b7d70c6
BD
3593
3594 /* initialise the endpoints now the core has been initialised */
b3f489b2 3595 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++)
5b7d70c6
BD
3596 s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
3597
f65f0f10
LM
3598 /* disable power and clock */
3599
3600 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
3601 hsotg->supplies);
3602 if (ret) {
3603 dev_err(hsotg->dev, "failed to disable supplies: %d\n", ret);
3604 goto err_ep_mem;
3605 }
3606
3607 s3c_hsotg_phy_disable(hsotg);
3608
3609 ret = device_add(&hsotg->gadget.dev);
3610 if (ret) {
3611 put_device(&hsotg->gadget.dev);
3612 goto err_ep_mem;
3613 }
3614
0f91349b
SAS
3615 ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget);
3616 if (ret)
b3f489b2 3617 goto err_ep_mem;
0f91349b 3618
5b7d70c6
BD
3619 s3c_hsotg_create_debug(hsotg);
3620
3621 s3c_hsotg_dump(hsotg);
3622
5b7d70c6
BD
3623 return 0;
3624
1d144c67 3625err_ep_mem:
b3f489b2 3626 kfree(eps);
fc9a731e 3627err_supplies:
41188786 3628 s3c_hsotg_phy_disable(hsotg);
fc9a731e 3629 regulator_bulk_free(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
338edabc 3630
31ee04de 3631err_clk:
1d144c67 3632 clk_disable_unprepare(hsotg->clk);
31ee04de 3633 clk_put(hsotg->clk);
338edabc 3634
5b7d70c6
BD
3635 return ret;
3636}
3637
8b9bc460
LM
3638/**
3639 * s3c_hsotg_remove - remove function for hsotg driver
3640 * @pdev: The platform information for the driver
3641 */
5b7d70c6
BD
3642static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
3643{
3644 struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
3645
0f91349b
SAS
3646 usb_del_gadget_udc(&hsotg->gadget);
3647
5b7d70c6
BD
3648 s3c_hsotg_delete_debug(hsotg);
3649
f65f0f10
LM
3650 if (hsotg->driver) {
3651 /* should have been done already by driver model core */
3652 usb_gadget_unregister_driver(hsotg->driver);
3653 }
5b7d70c6 3654
41188786 3655 s3c_hsotg_phy_disable(hsotg);
fc9a731e
LM
3656 regulator_bulk_free(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
3657
04b4a0fc 3658 clk_disable_unprepare(hsotg->clk);
31ee04de
MS
3659 clk_put(hsotg->clk);
3660
f65f0f10 3661 device_unregister(&hsotg->gadget.dev);
5b7d70c6
BD
3662 return 0;
3663}
3664
3665#if 1
3666#define s3c_hsotg_suspend NULL
3667#define s3c_hsotg_resume NULL
3668#endif
3669
3670static struct platform_driver s3c_hsotg_driver = {
3671 .driver = {
3672 .name = "s3c-hsotg",
3673 .owner = THIS_MODULE,
3674 },
3675 .probe = s3c_hsotg_probe,
3676 .remove = __devexit_p(s3c_hsotg_remove),
3677 .suspend = s3c_hsotg_suspend,
3678 .resume = s3c_hsotg_resume,
3679};
3680
cc27c96c 3681module_platform_driver(s3c_hsotg_driver);
5b7d70c6
BD
3682
3683MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
3684MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
3685MODULE_LICENSE("GPL");
3686MODULE_ALIAS("platform:s3c-hsotg");
This page took 0.455441 seconds and 5 git commands to generate.