usb: gadget: net2280: print error in ep_ops error paths
[deliverable/linux.git] / drivers / usb / gadget / udc / net2280.c
1 /*
2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
4 *
5 * PLX Technology Inc. (formerly NetChip Technology) supported the
6 * development of this driver.
7 *
8 *
9 * CODE STATUS HIGHLIGHTS
10 *
11 * This driver should work well with most "gadget" drivers, including
12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
13 * as well as Gadget Zero and Gadgetfs.
14 *
15 * DMA is enabled by default.
16 *
17 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't
18 * be enabled.
19 *
20 * Note that almost all the errata workarounds here are only needed for
21 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
22 */
23
24 /*
25 * Copyright (C) 2003 David Brownell
26 * Copyright (C) 2003-2005 PLX Technology, Inc.
27 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
28 *
29 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
30 * with 2282 chip
31 *
32 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility
33 * with usb 338x chip. Based on PLX driver
34 *
35 * This program is free software; you can redistribute it and/or modify
36 * it under the terms of the GNU General Public License as published by
37 * the Free Software Foundation; either version 2 of the License, or
38 * (at your option) any later version.
39 */
40
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/kernel.h>
45 #include <linux/delay.h>
46 #include <linux/ioport.h>
47 #include <linux/slab.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/timer.h>
51 #include <linux/list.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/device.h>
55 #include <linux/usb/ch9.h>
56 #include <linux/usb/gadget.h>
57 #include <linux/prefetch.h>
58 #include <linux/io.h>
59
60 #include <asm/byteorder.h>
61 #include <asm/irq.h>
62 #include <asm/unaligned.h>
63
64 #define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
65 #define DRIVER_VERSION "2005 Sept 27/v3.0"
66
67 #define EP_DONTUSE 13 /* nonzero */
68
69 #define USE_RDK_LEDS /* GPIO pins control three LEDs */
70
71
72 static const char driver_name[] = "net2280";
73 static const char driver_desc[] = DRIVER_DESC;
74
75 static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
76 static const char ep0name[] = "ep0";
77 static const char *const ep_name[] = {
78 ep0name,
79 "ep-a", "ep-b", "ep-c", "ep-d",
80 "ep-e", "ep-f", "ep-g", "ep-h",
81 };
82
83 /* Endpoint names for usb3380 advance mode */
84 static const char *const ep_name_adv[] = {
85 ep0name,
86 "ep1in", "ep2out", "ep3in", "ep4out",
87 "ep1out", "ep2in", "ep3out", "ep4in",
88 };
89
90 /* mode 0 == ep-{a,b,c,d} 1K fifo each
91 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
92 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
93 */
94 static ushort fifo_mode;
95
96 /* "modprobe net2280 fifo_mode=1" etc */
97 module_param(fifo_mode, ushort, 0644);
98
99 /* enable_suspend -- When enabled, the driver will respond to
100 * USB suspend requests by powering down the NET2280. Otherwise,
101 * USB suspend requests will be ignored. This is acceptable for
102 * self-powered devices
103 */
104 static bool enable_suspend;
105
106 /* "modprobe net2280 enable_suspend=1" etc */
107 module_param(enable_suspend, bool, 0444);
108
109 #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
110
111 static char *type_string(u8 bmAttributes)
112 {
113 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
114 case USB_ENDPOINT_XFER_BULK: return "bulk";
115 case USB_ENDPOINT_XFER_ISOC: return "iso";
116 case USB_ENDPOINT_XFER_INT: return "intr";
117 }
118 return "control";
119 }
120
121 #include "net2280.h"
122
123 #define valid_bit cpu_to_le32(BIT(VALID_BIT))
124 #define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
125
126 /*-------------------------------------------------------------------------*/
127 static inline void enable_pciirqenb(struct net2280_ep *ep)
128 {
129 u32 tmp = readl(&ep->dev->regs->pciirqenb0);
130
131 if (ep->dev->quirks & PLX_LEGACY)
132 tmp |= BIT(ep->num);
133 else
134 tmp |= BIT(ep_bit[ep->num]);
135 writel(tmp, &ep->dev->regs->pciirqenb0);
136
137 return;
138 }
139
140 static int
141 net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
142 {
143 struct net2280 *dev;
144 struct net2280_ep *ep;
145 u32 max, tmp;
146 unsigned long flags;
147 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
148 int ret = 0;
149
150 ep = container_of(_ep, struct net2280_ep, ep);
151 if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
152 desc->bDescriptorType != USB_DT_ENDPOINT) {
153 pr_err("%s: failed at line=%d\n", __func__, __LINE__);
154 return -EINVAL;
155 }
156 dev = ep->dev;
157 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
158 ret = -ESHUTDOWN;
159 goto print_err;
160 }
161
162 /* erratum 0119 workaround ties up an endpoint number */
163 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) {
164 ret = -EDOM;
165 goto print_err;
166 }
167
168 if (dev->quirks & PLX_SUPERSPEED) {
169 if ((desc->bEndpointAddress & 0x0f) >= 0x0c) {
170 ret = -EDOM;
171 goto print_err;
172 }
173 ep->is_in = !!usb_endpoint_dir_in(desc);
174 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) {
175 ret = -EINVAL;
176 goto print_err;
177 }
178 }
179
180 /* sanity check ep-e/ep-f since their fifos are small */
181 max = usb_endpoint_maxp(desc) & 0x1fff;
182 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) {
183 ret = -ERANGE;
184 goto print_err;
185 }
186
187 spin_lock_irqsave(&dev->lock, flags);
188 _ep->maxpacket = max & 0x7ff;
189 ep->desc = desc;
190
191 /* ep_reset() has already been called */
192 ep->stopped = 0;
193 ep->wedged = 0;
194 ep->out_overflow = 0;
195
196 /* set speed-dependent max packet; may kick in high bandwidth */
197 set_max_speed(ep, max);
198
199 /* set type, direction, address; reset fifo counters */
200 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
201 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
202 if (tmp == USB_ENDPOINT_XFER_INT) {
203 /* erratum 0105 workaround prevents hs NYET */
204 if (dev->chiprev == 0100 &&
205 dev->gadget.speed == USB_SPEED_HIGH &&
206 !(desc->bEndpointAddress & USB_DIR_IN))
207 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
208 &ep->regs->ep_rsp);
209 } else if (tmp == USB_ENDPOINT_XFER_BULK) {
210 /* catch some particularly blatant driver bugs */
211 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
212 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
213 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
214 spin_unlock_irqrestore(&dev->lock, flags);
215 ret = -ERANGE;
216 goto print_err;
217 }
218 }
219 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
220 /* Enable this endpoint */
221 if (dev->quirks & PLX_LEGACY) {
222 tmp <<= ENDPOINT_TYPE;
223 tmp |= desc->bEndpointAddress;
224 /* default full fifo lines */
225 tmp |= (4 << ENDPOINT_BYTE_COUNT);
226 tmp |= BIT(ENDPOINT_ENABLE);
227 ep->is_in = (tmp & USB_DIR_IN) != 0;
228 } else {
229 /* In Legacy mode, only OUT endpoints are used */
230 if (dev->enhanced_mode && ep->is_in) {
231 tmp <<= IN_ENDPOINT_TYPE;
232 tmp |= BIT(IN_ENDPOINT_ENABLE);
233 /* Not applicable to Legacy */
234 tmp |= BIT(ENDPOINT_DIRECTION);
235 } else {
236 tmp <<= OUT_ENDPOINT_TYPE;
237 tmp |= BIT(OUT_ENDPOINT_ENABLE);
238 tmp |= (ep->is_in << ENDPOINT_DIRECTION);
239 }
240
241 tmp |= usb_endpoint_num(desc);
242 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
243 }
244
245 /* Make sure all the registers are written before ep_rsp*/
246 wmb();
247
248 /* for OUT transfers, block the rx fifo until a read is posted */
249 if (!ep->is_in)
250 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
251 else if (!(dev->quirks & PLX_2280)) {
252 /* Added for 2282, Don't use nak packets on an in endpoint,
253 * this was ignored on 2280
254 */
255 writel(BIT(CLEAR_NAK_OUT_PACKETS) |
256 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
257 }
258
259 writel(tmp, &ep->cfg->ep_cfg);
260
261 /* enable irqs */
262 if (!ep->dma) { /* pio, per-packet */
263 enable_pciirqenb(ep);
264
265 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
266 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
267 if (dev->quirks & PLX_2280)
268 tmp |= readl(&ep->regs->ep_irqenb);
269 writel(tmp, &ep->regs->ep_irqenb);
270 } else { /* dma, per-request */
271 tmp = BIT((8 + ep->num)); /* completion */
272 tmp |= readl(&dev->regs->pciirqenb1);
273 writel(tmp, &dev->regs->pciirqenb1);
274
275 /* for short OUT transfers, dma completions can't
276 * advance the queue; do it pio-style, by hand.
277 * NOTE erratum 0112 workaround #2
278 */
279 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
280 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
281 writel(tmp, &ep->regs->ep_irqenb);
282
283 enable_pciirqenb(ep);
284 }
285 }
286
287 tmp = desc->bEndpointAddress;
288 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
289 _ep->name, tmp & 0x0f, DIR_STRING(tmp),
290 type_string(desc->bmAttributes),
291 ep->dma ? "dma" : "pio", max);
292
293 /* pci writes may still be posted */
294 spin_unlock_irqrestore(&dev->lock, flags);
295 return ret;
296
297 print_err:
298 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
299 return ret;
300 }
301
302 static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
303 {
304 u32 result;
305
306 do {
307 result = readl(ptr);
308 if (result == ~(u32)0) /* "device unplugged" */
309 return -ENODEV;
310 result &= mask;
311 if (result == done)
312 return 0;
313 udelay(1);
314 usec--;
315 } while (usec > 0);
316 return -ETIMEDOUT;
317 }
318
319 static const struct usb_ep_ops net2280_ep_ops;
320
321 static void ep_reset_228x(struct net2280_regs __iomem *regs,
322 struct net2280_ep *ep)
323 {
324 u32 tmp;
325
326 ep->desc = NULL;
327 INIT_LIST_HEAD(&ep->queue);
328
329 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
330 ep->ep.ops = &net2280_ep_ops;
331
332 /* disable the dma, irqs, endpoint... */
333 if (ep->dma) {
334 writel(0, &ep->dma->dmactl);
335 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
336 BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
337 BIT(DMA_ABORT),
338 &ep->dma->dmastat);
339
340 tmp = readl(&regs->pciirqenb0);
341 tmp &= ~BIT(ep->num);
342 writel(tmp, &regs->pciirqenb0);
343 } else {
344 tmp = readl(&regs->pciirqenb1);
345 tmp &= ~BIT((8 + ep->num)); /* completion */
346 writel(tmp, &regs->pciirqenb1);
347 }
348 writel(0, &ep->regs->ep_irqenb);
349
350 /* init to our chosen defaults, notably so that we NAK OUT
351 * packets until the driver queues a read (+note erratum 0112)
352 */
353 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
354 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
355 BIT(SET_NAK_OUT_PACKETS) |
356 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
357 BIT(CLEAR_INTERRUPT_MODE);
358 } else {
359 /* added for 2282 */
360 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
361 BIT(CLEAR_NAK_OUT_PACKETS) |
362 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
363 BIT(CLEAR_INTERRUPT_MODE);
364 }
365
366 if (ep->num != 0) {
367 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
368 BIT(CLEAR_ENDPOINT_HALT);
369 }
370 writel(tmp, &ep->regs->ep_rsp);
371
372 /* scrub most status bits, and flush any fifo state */
373 if (ep->dev->quirks & PLX_2280)
374 tmp = BIT(FIFO_OVERFLOW) |
375 BIT(FIFO_UNDERFLOW);
376 else
377 tmp = 0;
378
379 writel(tmp | BIT(TIMEOUT) |
380 BIT(USB_STALL_SENT) |
381 BIT(USB_IN_NAK_SENT) |
382 BIT(USB_IN_ACK_RCVD) |
383 BIT(USB_OUT_PING_NAK_SENT) |
384 BIT(USB_OUT_ACK_SENT) |
385 BIT(FIFO_FLUSH) |
386 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
387 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
388 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
389 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
390 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
391 BIT(DATA_IN_TOKEN_INTERRUPT),
392 &ep->regs->ep_stat);
393
394 /* fifo size is handled separately */
395 }
396
397 static void ep_reset_338x(struct net2280_regs __iomem *regs,
398 struct net2280_ep *ep)
399 {
400 u32 tmp, dmastat;
401
402 ep->desc = NULL;
403 INIT_LIST_HEAD(&ep->queue);
404
405 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
406 ep->ep.ops = &net2280_ep_ops;
407
408 /* disable the dma, irqs, endpoint... */
409 if (ep->dma) {
410 writel(0, &ep->dma->dmactl);
411 writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
412 BIT(DMA_PAUSE_DONE_INTERRUPT) |
413 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
414 BIT(DMA_TRANSACTION_DONE_INTERRUPT),
415 /* | BIT(DMA_ABORT), */
416 &ep->dma->dmastat);
417
418 dmastat = readl(&ep->dma->dmastat);
419 if (dmastat == 0x5002) {
420 ep_warn(ep->dev, "The dmastat return = %x!!\n",
421 dmastat);
422 writel(0x5a, &ep->dma->dmastat);
423 }
424
425 tmp = readl(&regs->pciirqenb0);
426 tmp &= ~BIT(ep_bit[ep->num]);
427 writel(tmp, &regs->pciirqenb0);
428 } else {
429 if (ep->num < 5) {
430 tmp = readl(&regs->pciirqenb1);
431 tmp &= ~BIT((8 + ep->num)); /* completion */
432 writel(tmp, &regs->pciirqenb1);
433 }
434 }
435 writel(0, &ep->regs->ep_irqenb);
436
437 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
438 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
439 BIT(FIFO_OVERFLOW) |
440 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
441 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
442 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
443 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
444 }
445
446 static void nuke(struct net2280_ep *);
447
448 static int net2280_disable(struct usb_ep *_ep)
449 {
450 struct net2280_ep *ep;
451 unsigned long flags;
452
453 ep = container_of(_ep, struct net2280_ep, ep);
454 if (!_ep || !ep->desc || _ep->name == ep0name) {
455 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
456 return -EINVAL;
457 }
458 spin_lock_irqsave(&ep->dev->lock, flags);
459 nuke(ep);
460
461 if (ep->dev->quirks & PLX_SUPERSPEED)
462 ep_reset_338x(ep->dev->regs, ep);
463 else
464 ep_reset_228x(ep->dev->regs, ep);
465
466 ep_vdbg(ep->dev, "disabled %s %s\n",
467 ep->dma ? "dma" : "pio", _ep->name);
468
469 /* synch memory views with the device */
470 (void)readl(&ep->cfg->ep_cfg);
471
472 if (!ep->dma && ep->num >= 1 && ep->num <= 4)
473 ep->dma = &ep->dev->dma[ep->num - 1];
474
475 spin_unlock_irqrestore(&ep->dev->lock, flags);
476 return 0;
477 }
478
479 /*-------------------------------------------------------------------------*/
480
481 static struct usb_request
482 *net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
483 {
484 struct net2280_ep *ep;
485 struct net2280_request *req;
486
487 if (!_ep) {
488 pr_err("%s: Invalid ep\n", __func__);
489 return NULL;
490 }
491 ep = container_of(_ep, struct net2280_ep, ep);
492
493 req = kzalloc(sizeof(*req), gfp_flags);
494 if (!req)
495 return NULL;
496
497 INIT_LIST_HEAD(&req->queue);
498
499 /* this dma descriptor may be swapped with the previous dummy */
500 if (ep->dma) {
501 struct net2280_dma *td;
502
503 td = pci_pool_alloc(ep->dev->requests, gfp_flags,
504 &req->td_dma);
505 if (!td) {
506 kfree(req);
507 return NULL;
508 }
509 td->dmacount = 0; /* not VALID */
510 td->dmadesc = td->dmaaddr;
511 req->td = td;
512 }
513 return &req->req;
514 }
515
516 static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
517 {
518 struct net2280_ep *ep;
519 struct net2280_request *req;
520
521 ep = container_of(_ep, struct net2280_ep, ep);
522 if (!_ep || !_req) {
523 dev_err(&ep->dev->pdev->dev, "%s: Inavlid ep=%p or req=%p\n",
524 __func__, _ep, _req);
525 return;
526 }
527
528 req = container_of(_req, struct net2280_request, req);
529 WARN_ON(!list_empty(&req->queue));
530 if (req->td)
531 pci_pool_free(ep->dev->requests, req->td, req->td_dma);
532 kfree(req);
533 }
534
535 /*-------------------------------------------------------------------------*/
536
537 /* load a packet into the fifo we use for usb IN transfers.
538 * works for all endpoints.
539 *
540 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
541 * at a time, but this code is simpler because it knows it only writes
542 * one packet. ep-a..ep-d should use dma instead.
543 */
544 static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
545 {
546 struct net2280_ep_regs __iomem *regs = ep->regs;
547 u8 *buf;
548 u32 tmp;
549 unsigned count, total;
550
551 /* INVARIANT: fifo is currently empty. (testable) */
552
553 if (req) {
554 buf = req->buf + req->actual;
555 prefetch(buf);
556 total = req->length - req->actual;
557 } else {
558 total = 0;
559 buf = NULL;
560 }
561
562 /* write just one packet at a time */
563 count = ep->ep.maxpacket;
564 if (count > total) /* min() cannot be used on a bitfield */
565 count = total;
566
567 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
568 ep->ep.name, count,
569 (count != ep->ep.maxpacket) ? " (short)" : "",
570 req);
571 while (count >= 4) {
572 /* NOTE be careful if you try to align these. fifo lines
573 * should normally be full (4 bytes) and successive partial
574 * lines are ok only in certain cases.
575 */
576 tmp = get_unaligned((u32 *)buf);
577 cpu_to_le32s(&tmp);
578 writel(tmp, &regs->ep_data);
579 buf += 4;
580 count -= 4;
581 }
582
583 /* last fifo entry is "short" unless we wrote a full packet.
584 * also explicitly validate last word in (periodic) transfers
585 * when maxpacket is not a multiple of 4 bytes.
586 */
587 if (count || total < ep->ep.maxpacket) {
588 tmp = count ? get_unaligned((u32 *)buf) : count;
589 cpu_to_le32s(&tmp);
590 set_fifo_bytecount(ep, count & 0x03);
591 writel(tmp, &regs->ep_data);
592 }
593
594 /* pci writes may still be posted */
595 }
596
597 /* work around erratum 0106: PCI and USB race over the OUT fifo.
598 * caller guarantees chiprev 0100, out endpoint is NAKing, and
599 * there's no real data in the fifo.
600 *
601 * NOTE: also used in cases where that erratum doesn't apply:
602 * where the host wrote "too much" data to us.
603 */
604 static void out_flush(struct net2280_ep *ep)
605 {
606 u32 __iomem *statp;
607 u32 tmp;
608
609 statp = &ep->regs->ep_stat;
610
611 tmp = readl(statp);
612 if (tmp & BIT(NAK_OUT_PACKETS)) {
613 ep_dbg(ep->dev, "%s %s %08x !NAK\n",
614 ep->ep.name, __func__, tmp);
615 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
616 }
617
618 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
619 BIT(DATA_PACKET_RECEIVED_INTERRUPT),
620 statp);
621 writel(BIT(FIFO_FLUSH), statp);
622 /* Make sure that stap is written */
623 mb();
624 tmp = readl(statp);
625 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
626 /* high speed did bulk NYET; fifo isn't filling */
627 ep->dev->gadget.speed == USB_SPEED_FULL) {
628 unsigned usec;
629
630 usec = 50; /* 64 byte bulk/interrupt */
631 handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
632 BIT(USB_OUT_PING_NAK_SENT), usec);
633 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
634 }
635 }
636
637 /* unload packet(s) from the fifo we use for usb OUT transfers.
638 * returns true iff the request completed, because of short packet
639 * or the request buffer having filled with full packets.
640 *
641 * for ep-a..ep-d this will read multiple packets out when they
642 * have been accepted.
643 */
644 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
645 {
646 struct net2280_ep_regs __iomem *regs = ep->regs;
647 u8 *buf = req->req.buf + req->req.actual;
648 unsigned count, tmp, is_short;
649 unsigned cleanup = 0, prevent = 0;
650
651 /* erratum 0106 ... packets coming in during fifo reads might
652 * be incompletely rejected. not all cases have workarounds.
653 */
654 if (ep->dev->chiprev == 0x0100 &&
655 ep->dev->gadget.speed == USB_SPEED_FULL) {
656 udelay(1);
657 tmp = readl(&ep->regs->ep_stat);
658 if ((tmp & BIT(NAK_OUT_PACKETS)))
659 cleanup = 1;
660 else if ((tmp & BIT(FIFO_FULL))) {
661 start_out_naking(ep);
662 prevent = 1;
663 }
664 /* else: hope we don't see the problem */
665 }
666
667 /* never overflow the rx buffer. the fifo reads packets until
668 * it sees a short one; we might not be ready for them all.
669 */
670 prefetchw(buf);
671 count = readl(&regs->ep_avail);
672 if (unlikely(count == 0)) {
673 udelay(1);
674 tmp = readl(&ep->regs->ep_stat);
675 count = readl(&regs->ep_avail);
676 /* handled that data already? */
677 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
678 return 0;
679 }
680
681 tmp = req->req.length - req->req.actual;
682 if (count > tmp) {
683 /* as with DMA, data overflow gets flushed */
684 if ((tmp % ep->ep.maxpacket) != 0) {
685 ep_err(ep->dev,
686 "%s out fifo %d bytes, expected %d\n",
687 ep->ep.name, count, tmp);
688 req->req.status = -EOVERFLOW;
689 cleanup = 1;
690 /* NAK_OUT_PACKETS will be set, so flushing is safe;
691 * the next read will start with the next packet
692 */
693 } /* else it's a ZLP, no worries */
694 count = tmp;
695 }
696 req->req.actual += count;
697
698 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
699
700 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
701 ep->ep.name, count, is_short ? " (short)" : "",
702 cleanup ? " flush" : "", prevent ? " nak" : "",
703 req, req->req.actual, req->req.length);
704
705 while (count >= 4) {
706 tmp = readl(&regs->ep_data);
707 cpu_to_le32s(&tmp);
708 put_unaligned(tmp, (u32 *)buf);
709 buf += 4;
710 count -= 4;
711 }
712 if (count) {
713 tmp = readl(&regs->ep_data);
714 /* LE conversion is implicit here: */
715 do {
716 *buf++ = (u8) tmp;
717 tmp >>= 8;
718 } while (--count);
719 }
720 if (cleanup)
721 out_flush(ep);
722 if (prevent) {
723 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
724 (void) readl(&ep->regs->ep_rsp);
725 }
726
727 return is_short || ((req->req.actual == req->req.length) &&
728 !req->req.zero);
729 }
730
731 /* fill out dma descriptor to match a given request */
732 static void fill_dma_desc(struct net2280_ep *ep,
733 struct net2280_request *req, int valid)
734 {
735 struct net2280_dma *td = req->td;
736 u32 dmacount = req->req.length;
737
738 /* don't let DMA continue after a short OUT packet,
739 * so overruns can't affect the next transfer.
740 * in case of overruns on max-size packets, we can't
741 * stop the fifo from filling but we can flush it.
742 */
743 if (ep->is_in)
744 dmacount |= BIT(DMA_DIRECTION);
745 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
746 !(ep->dev->quirks & PLX_2280))
747 dmacount |= BIT(END_OF_CHAIN);
748
749 req->valid = valid;
750 if (valid)
751 dmacount |= BIT(VALID_BIT);
752 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
753
754 /* td->dmadesc = previously set by caller */
755 td->dmaaddr = cpu_to_le32 (req->req.dma);
756
757 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
758 wmb();
759 td->dmacount = cpu_to_le32(dmacount);
760 }
761
762 static const u32 dmactl_default =
763 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
764 BIT(DMA_CLEAR_COUNT_ENABLE) |
765 /* erratum 0116 workaround part 1 (use POLLING) */
766 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
767 BIT(DMA_VALID_BIT_POLLING_ENABLE) |
768 BIT(DMA_VALID_BIT_ENABLE) |
769 BIT(DMA_SCATTER_GATHER_ENABLE) |
770 /* erratum 0116 workaround part 2 (no AUTOSTART) */
771 BIT(DMA_ENABLE);
772
773 static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
774 {
775 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
776 }
777
778 static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
779 {
780 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
781 spin_stop_dma(dma);
782 }
783
784 static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
785 {
786 struct net2280_dma_regs __iomem *dma = ep->dma;
787 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
788
789 if (!(ep->dev->quirks & PLX_2280))
790 tmp |= BIT(END_OF_CHAIN);
791
792 writel(tmp, &dma->dmacount);
793 writel(readl(&dma->dmastat), &dma->dmastat);
794
795 writel(td_dma, &dma->dmadesc);
796 if (ep->dev->quirks & PLX_SUPERSPEED)
797 dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
798 writel(dmactl, &dma->dmactl);
799
800 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
801 (void) readl(&ep->dev->pci->pcimstctl);
802
803 writel(BIT(DMA_START), &dma->dmastat);
804
805 if (!ep->is_in)
806 stop_out_naking(ep);
807 }
808
809 static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
810 {
811 u32 tmp;
812 struct net2280_dma_regs __iomem *dma = ep->dma;
813
814 /* FIXME can't use DMA for ZLPs */
815
816 /* on this path we "know" there's no dma active (yet) */
817 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
818 writel(0, &ep->dma->dmactl);
819
820 /* previous OUT packet might have been short */
821 if (!ep->is_in && (readl(&ep->regs->ep_stat) &
822 BIT(NAK_OUT_PACKETS))) {
823 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
824 &ep->regs->ep_stat);
825
826 tmp = readl(&ep->regs->ep_avail);
827 if (tmp) {
828 writel(readl(&dma->dmastat), &dma->dmastat);
829
830 /* transfer all/some fifo data */
831 writel(req->req.dma, &dma->dmaaddr);
832 tmp = min(tmp, req->req.length);
833
834 /* dma irq, faking scatterlist status */
835 req->td->dmacount = cpu_to_le32(req->req.length - tmp);
836 writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
837 &dma->dmacount);
838 req->td->dmadesc = 0;
839 req->valid = 1;
840
841 writel(BIT(DMA_ENABLE), &dma->dmactl);
842 writel(BIT(DMA_START), &dma->dmastat);
843 return;
844 }
845 }
846
847 tmp = dmactl_default;
848
849 /* force packet boundaries between dma requests, but prevent the
850 * controller from automagically writing a last "short" packet
851 * (zero length) unless the driver explicitly said to do that.
852 */
853 if (ep->is_in) {
854 if (likely((req->req.length % ep->ep.maxpacket) ||
855 req->req.zero)){
856 tmp |= BIT(DMA_FIFO_VALIDATE);
857 ep->in_fifo_validate = 1;
858 } else
859 ep->in_fifo_validate = 0;
860 }
861
862 /* init req->td, pointing to the current dummy */
863 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
864 fill_dma_desc(ep, req, 1);
865
866 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
867
868 start_queue(ep, tmp, req->td_dma);
869 }
870
871 static inline void
872 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
873 {
874 struct net2280_dma *end;
875 dma_addr_t tmp;
876
877 /* swap new dummy for old, link; fill and maybe activate */
878 end = ep->dummy;
879 ep->dummy = req->td;
880 req->td = end;
881
882 tmp = ep->td_dma;
883 ep->td_dma = req->td_dma;
884 req->td_dma = tmp;
885
886 end->dmadesc = cpu_to_le32 (ep->td_dma);
887
888 fill_dma_desc(ep, req, valid);
889 }
890
891 static void
892 done(struct net2280_ep *ep, struct net2280_request *req, int status)
893 {
894 struct net2280 *dev;
895 unsigned stopped = ep->stopped;
896
897 list_del_init(&req->queue);
898
899 if (req->req.status == -EINPROGRESS)
900 req->req.status = status;
901 else
902 status = req->req.status;
903
904 dev = ep->dev;
905 if (ep->dma)
906 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
907
908 if (status && status != -ESHUTDOWN)
909 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
910 ep->ep.name, &req->req, status,
911 req->req.actual, req->req.length);
912
913 /* don't modify queue heads during completion callback */
914 ep->stopped = 1;
915 spin_unlock(&dev->lock);
916 usb_gadget_giveback_request(&ep->ep, &req->req);
917 spin_lock(&dev->lock);
918 ep->stopped = stopped;
919 }
920
921 /*-------------------------------------------------------------------------*/
922
923 static int
924 net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
925 {
926 struct net2280_request *req;
927 struct net2280_ep *ep;
928 struct net2280 *dev;
929 unsigned long flags;
930 int ret = 0;
931
932 /* we always require a cpu-view buffer, so that we can
933 * always use pio (as fallback or whatever).
934 */
935 ep = container_of(_ep, struct net2280_ep, ep);
936 if (!_ep || (!ep->desc && ep->num != 0)) {
937 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
938 return -EINVAL;
939 }
940 req = container_of(_req, struct net2280_request, req);
941 if (!_req || !_req->complete || !_req->buf ||
942 !list_empty(&req->queue)) {
943 ret = -EINVAL;
944 goto print_err;
945 }
946 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) {
947 ret = -EDOM;
948 goto print_err;
949 }
950 dev = ep->dev;
951 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
952 ret = -ESHUTDOWN;
953 goto print_err;
954 }
955
956 /* FIXME implement PIO fallback for ZLPs with DMA */
957 if (ep->dma && _req->length == 0) {
958 ret = -EOPNOTSUPP;
959 goto print_err;
960 }
961
962 /* set up dma mapping in case the caller didn't */
963 if (ep->dma) {
964 ret = usb_gadget_map_request(&dev->gadget, _req,
965 ep->is_in);
966 if (ret)
967 goto print_err;
968 }
969
970 ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
971 _ep->name, _req, _req->length, _req->buf);
972
973 spin_lock_irqsave(&dev->lock, flags);
974
975 _req->status = -EINPROGRESS;
976 _req->actual = 0;
977
978 /* kickstart this i/o queue? */
979 if (list_empty(&ep->queue) && !ep->stopped &&
980 !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
981 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
982
983 /* use DMA if the endpoint supports it, else pio */
984 if (ep->dma)
985 start_dma(ep, req);
986 else {
987 /* maybe there's no control data, just status ack */
988 if (ep->num == 0 && _req->length == 0) {
989 allow_status(ep);
990 done(ep, req, 0);
991 ep_vdbg(dev, "%s status ack\n", ep->ep.name);
992 goto done;
993 }
994
995 /* PIO ... stuff the fifo, or unblock it. */
996 if (ep->is_in)
997 write_fifo(ep, _req);
998 else if (list_empty(&ep->queue)) {
999 u32 s;
1000
1001 /* OUT FIFO might have packet(s) buffered */
1002 s = readl(&ep->regs->ep_stat);
1003 if ((s & BIT(FIFO_EMPTY)) == 0) {
1004 /* note: _req->short_not_ok is
1005 * ignored here since PIO _always_
1006 * stops queue advance here, and
1007 * _req->status doesn't change for
1008 * short reads (only _req->actual)
1009 */
1010 if (read_fifo(ep, req) &&
1011 ep->num == 0) {
1012 done(ep, req, 0);
1013 allow_status(ep);
1014 /* don't queue it */
1015 req = NULL;
1016 } else if (read_fifo(ep, req) &&
1017 ep->num != 0) {
1018 done(ep, req, 0);
1019 req = NULL;
1020 } else
1021 s = readl(&ep->regs->ep_stat);
1022 }
1023
1024 /* don't NAK, let the fifo fill */
1025 if (req && (s & BIT(NAK_OUT_PACKETS)))
1026 writel(BIT(CLEAR_NAK_OUT_PACKETS),
1027 &ep->regs->ep_rsp);
1028 }
1029 }
1030
1031 } else if (ep->dma) {
1032 int valid = 1;
1033
1034 if (ep->is_in) {
1035 int expect;
1036
1037 /* preventing magic zlps is per-engine state, not
1038 * per-transfer; irq logic must recover hiccups.
1039 */
1040 expect = likely(req->req.zero ||
1041 (req->req.length % ep->ep.maxpacket));
1042 if (expect != ep->in_fifo_validate)
1043 valid = 0;
1044 }
1045 queue_dma(ep, req, valid);
1046
1047 } /* else the irq handler advances the queue. */
1048
1049 ep->responded = 1;
1050 if (req)
1051 list_add_tail(&req->queue, &ep->queue);
1052 done:
1053 spin_unlock_irqrestore(&dev->lock, flags);
1054
1055 /* pci writes may still be posted */
1056 return ret;
1057
1058 print_err:
1059 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
1060 return ret;
1061 }
1062
1063 static inline void
1064 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
1065 int status)
1066 {
1067 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
1068 done(ep, req, status);
1069 }
1070
1071 static void scan_dma_completions(struct net2280_ep *ep)
1072 {
1073 /* only look at descriptors that were "naturally" retired,
1074 * so fifo and list head state won't matter
1075 */
1076 while (!list_empty(&ep->queue)) {
1077 struct net2280_request *req;
1078 u32 tmp;
1079
1080 req = list_entry(ep->queue.next,
1081 struct net2280_request, queue);
1082 if (!req->valid)
1083 break;
1084 rmb();
1085 tmp = le32_to_cpup(&req->td->dmacount);
1086 if ((tmp & BIT(VALID_BIT)) != 0)
1087 break;
1088
1089 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1090 * cases where DMA must be aborted; this code handles
1091 * all non-abort DMA completions.
1092 */
1093 if (unlikely(req->td->dmadesc == 0)) {
1094 /* paranoia */
1095 tmp = readl(&ep->dma->dmacount);
1096 if (tmp & DMA_BYTE_COUNT_MASK)
1097 break;
1098 /* single transfer mode */
1099 dma_done(ep, req, tmp, 0);
1100 break;
1101 } else if (!ep->is_in &&
1102 (req->req.length % ep->ep.maxpacket) &&
1103 !(ep->dev->quirks & PLX_SUPERSPEED)) {
1104
1105 tmp = readl(&ep->regs->ep_stat);
1106 /* AVOID TROUBLE HERE by not issuing short reads from
1107 * your gadget driver. That helps avoids errata 0121,
1108 * 0122, and 0124; not all cases trigger the warning.
1109 */
1110 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
1111 ep_warn(ep->dev, "%s lost packet sync!\n",
1112 ep->ep.name);
1113 req->req.status = -EOVERFLOW;
1114 } else {
1115 tmp = readl(&ep->regs->ep_avail);
1116 if (tmp) {
1117 /* fifo gets flushed later */
1118 ep->out_overflow = 1;
1119 ep_dbg(ep->dev,
1120 "%s dma, discard %d len %d\n",
1121 ep->ep.name, tmp,
1122 req->req.length);
1123 req->req.status = -EOVERFLOW;
1124 }
1125 }
1126 }
1127 dma_done(ep, req, tmp, 0);
1128 }
1129 }
1130
1131 static void restart_dma(struct net2280_ep *ep)
1132 {
1133 struct net2280_request *req;
1134
1135 if (ep->stopped)
1136 return;
1137 req = list_entry(ep->queue.next, struct net2280_request, queue);
1138
1139 start_dma(ep, req);
1140 }
1141
1142 static void abort_dma(struct net2280_ep *ep)
1143 {
1144 /* abort the current transfer */
1145 if (likely(!list_empty(&ep->queue))) {
1146 /* FIXME work around errata 0121, 0122, 0124 */
1147 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
1148 spin_stop_dma(ep->dma);
1149 } else
1150 stop_dma(ep->dma);
1151 scan_dma_completions(ep);
1152 }
1153
1154 /* dequeue ALL requests */
1155 static void nuke(struct net2280_ep *ep)
1156 {
1157 struct net2280_request *req;
1158
1159 /* called with spinlock held */
1160 ep->stopped = 1;
1161 if (ep->dma)
1162 abort_dma(ep);
1163 while (!list_empty(&ep->queue)) {
1164 req = list_entry(ep->queue.next,
1165 struct net2280_request,
1166 queue);
1167 done(ep, req, -ESHUTDOWN);
1168 }
1169 }
1170
1171 /* dequeue JUST ONE request */
1172 static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1173 {
1174 struct net2280_ep *ep;
1175 struct net2280_request *req;
1176 unsigned long flags;
1177 u32 dmactl;
1178 int stopped;
1179
1180 ep = container_of(_ep, struct net2280_ep, ep);
1181 if (!_ep || (!ep->desc && ep->num != 0) || !_req) {
1182 pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n",
1183 __func__, _ep, _req);
1184 return -EINVAL;
1185 }
1186
1187 spin_lock_irqsave(&ep->dev->lock, flags);
1188 stopped = ep->stopped;
1189
1190 /* quiesce dma while we patch the queue */
1191 dmactl = 0;
1192 ep->stopped = 1;
1193 if (ep->dma) {
1194 dmactl = readl(&ep->dma->dmactl);
1195 /* WARNING erratum 0127 may kick in ... */
1196 stop_dma(ep->dma);
1197 scan_dma_completions(ep);
1198 }
1199
1200 /* make sure it's still queued on this endpoint */
1201 list_for_each_entry(req, &ep->queue, queue) {
1202 if (&req->req == _req)
1203 break;
1204 }
1205 if (&req->req != _req) {
1206 spin_unlock_irqrestore(&ep->dev->lock, flags);
1207 dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
1208 __func__);
1209 return -EINVAL;
1210 }
1211
1212 /* queue head may be partially complete. */
1213 if (ep->queue.next == &req->queue) {
1214 if (ep->dma) {
1215 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
1216 _req->status = -ECONNRESET;
1217 abort_dma(ep);
1218 if (likely(ep->queue.next == &req->queue)) {
1219 /* NOTE: misreports single-transfer mode*/
1220 req->td->dmacount = 0; /* invalidate */
1221 dma_done(ep, req,
1222 readl(&ep->dma->dmacount),
1223 -ECONNRESET);
1224 }
1225 } else {
1226 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
1227 done(ep, req, -ECONNRESET);
1228 }
1229 req = NULL;
1230 }
1231
1232 if (req)
1233 done(ep, req, -ECONNRESET);
1234 ep->stopped = stopped;
1235
1236 if (ep->dma) {
1237 /* turn off dma on inactive queues */
1238 if (list_empty(&ep->queue))
1239 stop_dma(ep->dma);
1240 else if (!ep->stopped) {
1241 /* resume current request, or start new one */
1242 if (req)
1243 writel(dmactl, &ep->dma->dmactl);
1244 else
1245 start_dma(ep, list_entry(ep->queue.next,
1246 struct net2280_request, queue));
1247 }
1248 }
1249
1250 spin_unlock_irqrestore(&ep->dev->lock, flags);
1251 return 0;
1252 }
1253
1254 /*-------------------------------------------------------------------------*/
1255
1256 static int net2280_fifo_status(struct usb_ep *_ep);
1257
1258 static int
1259 net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1260 {
1261 struct net2280_ep *ep;
1262 unsigned long flags;
1263 int retval = 0;
1264
1265 ep = container_of(_ep, struct net2280_ep, ep);
1266 if (!_ep || (!ep->desc && ep->num != 0)) {
1267 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
1268 return -EINVAL;
1269 }
1270 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
1271 retval = -ESHUTDOWN;
1272 goto print_err;
1273 }
1274 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1275 == USB_ENDPOINT_XFER_ISOC) {
1276 retval = -EINVAL;
1277 goto print_err;
1278 }
1279
1280 spin_lock_irqsave(&ep->dev->lock, flags);
1281 if (!list_empty(&ep->queue)) {
1282 retval = -EAGAIN;
1283 goto print_unlock;
1284 } else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) {
1285 retval = -EAGAIN;
1286 goto print_unlock;
1287 } else {
1288 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
1289 value ? "set" : "clear",
1290 wedged ? "wedge" : "halt");
1291 /* set/clear, then synch memory views with the device */
1292 if (value) {
1293 if (ep->num == 0)
1294 ep->dev->protocol_stall = 1;
1295 else
1296 set_halt(ep);
1297 if (wedged)
1298 ep->wedged = 1;
1299 } else {
1300 clear_halt(ep);
1301 if (ep->dev->quirks & PLX_SUPERSPEED &&
1302 !list_empty(&ep->queue) && ep->td_dma)
1303 restart_dma(ep);
1304 ep->wedged = 0;
1305 }
1306 (void) readl(&ep->regs->ep_rsp);
1307 }
1308 spin_unlock_irqrestore(&ep->dev->lock, flags);
1309
1310 return retval;
1311
1312 print_unlock:
1313 spin_unlock_irqrestore(&ep->dev->lock, flags);
1314 print_err:
1315 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval);
1316 return retval;
1317 }
1318
1319 static int net2280_set_halt(struct usb_ep *_ep, int value)
1320 {
1321 return net2280_set_halt_and_wedge(_ep, value, 0);
1322 }
1323
1324 static int net2280_set_wedge(struct usb_ep *_ep)
1325 {
1326 if (!_ep || _ep->name == ep0name) {
1327 pr_err("%s: Invalid ep=%p or ep0\n", __func__, _ep);
1328 return -EINVAL;
1329 }
1330 return net2280_set_halt_and_wedge(_ep, 1, 1);
1331 }
1332
1333 static int net2280_fifo_status(struct usb_ep *_ep)
1334 {
1335 struct net2280_ep *ep;
1336 u32 avail;
1337
1338 ep = container_of(_ep, struct net2280_ep, ep);
1339 if (!_ep || (!ep->desc && ep->num != 0)) {
1340 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
1341 return -ENODEV;
1342 }
1343 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
1344 dev_err(&ep->dev->pdev->dev,
1345 "%s: Invalid driver=%p or speed=%d\n",
1346 __func__, ep->dev->driver, ep->dev->gadget.speed);
1347 return -ESHUTDOWN;
1348 }
1349
1350 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
1351 if (avail > ep->fifo_size) {
1352 dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__);
1353 return -EOVERFLOW;
1354 }
1355 if (ep->is_in)
1356 avail = ep->fifo_size - avail;
1357 return avail;
1358 }
1359
1360 static void net2280_fifo_flush(struct usb_ep *_ep)
1361 {
1362 struct net2280_ep *ep;
1363
1364 ep = container_of(_ep, struct net2280_ep, ep);
1365 if (!_ep || (!ep->desc && ep->num != 0)) {
1366 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
1367 return;
1368 }
1369 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
1370 dev_err(&ep->dev->pdev->dev,
1371 "%s: Invalid driver=%p or speed=%d\n",
1372 __func__, ep->dev->driver, ep->dev->gadget.speed);
1373 return;
1374 }
1375
1376 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1377 (void) readl(&ep->regs->ep_rsp);
1378 }
1379
1380 static const struct usb_ep_ops net2280_ep_ops = {
1381 .enable = net2280_enable,
1382 .disable = net2280_disable,
1383
1384 .alloc_request = net2280_alloc_request,
1385 .free_request = net2280_free_request,
1386
1387 .queue = net2280_queue,
1388 .dequeue = net2280_dequeue,
1389
1390 .set_halt = net2280_set_halt,
1391 .set_wedge = net2280_set_wedge,
1392 .fifo_status = net2280_fifo_status,
1393 .fifo_flush = net2280_fifo_flush,
1394 };
1395
1396 /*-------------------------------------------------------------------------*/
1397
1398 static int net2280_get_frame(struct usb_gadget *_gadget)
1399 {
1400 struct net2280 *dev;
1401 unsigned long flags;
1402 u16 retval;
1403
1404 if (!_gadget)
1405 return -ENODEV;
1406 dev = container_of(_gadget, struct net2280, gadget);
1407 spin_lock_irqsave(&dev->lock, flags);
1408 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
1409 spin_unlock_irqrestore(&dev->lock, flags);
1410 return retval;
1411 }
1412
1413 static int net2280_wakeup(struct usb_gadget *_gadget)
1414 {
1415 struct net2280 *dev;
1416 u32 tmp;
1417 unsigned long flags;
1418
1419 if (!_gadget)
1420 return 0;
1421 dev = container_of(_gadget, struct net2280, gadget);
1422
1423 spin_lock_irqsave(&dev->lock, flags);
1424 tmp = readl(&dev->usb->usbctl);
1425 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
1426 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
1427 spin_unlock_irqrestore(&dev->lock, flags);
1428
1429 /* pci writes may still be posted */
1430 return 0;
1431 }
1432
1433 static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
1434 {
1435 struct net2280 *dev;
1436 u32 tmp;
1437 unsigned long flags;
1438
1439 if (!_gadget)
1440 return 0;
1441 dev = container_of(_gadget, struct net2280, gadget);
1442
1443 spin_lock_irqsave(&dev->lock, flags);
1444 tmp = readl(&dev->usb->usbctl);
1445 if (value) {
1446 tmp |= BIT(SELF_POWERED_STATUS);
1447 _gadget->is_selfpowered = 1;
1448 } else {
1449 tmp &= ~BIT(SELF_POWERED_STATUS);
1450 _gadget->is_selfpowered = 0;
1451 }
1452 writel(tmp, &dev->usb->usbctl);
1453 spin_unlock_irqrestore(&dev->lock, flags);
1454
1455 return 0;
1456 }
1457
1458 static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1459 {
1460 struct net2280 *dev;
1461 u32 tmp;
1462 unsigned long flags;
1463
1464 if (!_gadget)
1465 return -ENODEV;
1466 dev = container_of(_gadget, struct net2280, gadget);
1467
1468 spin_lock_irqsave(&dev->lock, flags);
1469 tmp = readl(&dev->usb->usbctl);
1470 dev->softconnect = (is_on != 0);
1471 if (is_on)
1472 tmp |= BIT(USB_DETECT_ENABLE);
1473 else
1474 tmp &= ~BIT(USB_DETECT_ENABLE);
1475 writel(tmp, &dev->usb->usbctl);
1476 spin_unlock_irqrestore(&dev->lock, flags);
1477
1478 return 0;
1479 }
1480
1481 static int net2280_start(struct usb_gadget *_gadget,
1482 struct usb_gadget_driver *driver);
1483 static int net2280_stop(struct usb_gadget *_gadget);
1484
1485 static const struct usb_gadget_ops net2280_ops = {
1486 .get_frame = net2280_get_frame,
1487 .wakeup = net2280_wakeup,
1488 .set_selfpowered = net2280_set_selfpowered,
1489 .pullup = net2280_pullup,
1490 .udc_start = net2280_start,
1491 .udc_stop = net2280_stop,
1492 };
1493
1494 /*-------------------------------------------------------------------------*/
1495
1496 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1497
1498 /* FIXME move these into procfs, and use seq_file.
1499 * Sysfs _still_ doesn't behave for arbitrarily sized files,
1500 * and also doesn't help products using this with 2.4 kernels.
1501 */
1502
1503 /* "function" sysfs attribute */
1504 static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
1505 char *buf)
1506 {
1507 struct net2280 *dev = dev_get_drvdata(_dev);
1508
1509 if (!dev->driver || !dev->driver->function ||
1510 strlen(dev->driver->function) > PAGE_SIZE)
1511 return 0;
1512 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1513 }
1514 static DEVICE_ATTR_RO(function);
1515
1516 static ssize_t registers_show(struct device *_dev,
1517 struct device_attribute *attr, char *buf)
1518 {
1519 struct net2280 *dev;
1520 char *next;
1521 unsigned size, t;
1522 unsigned long flags;
1523 int i;
1524 u32 t1, t2;
1525 const char *s;
1526
1527 dev = dev_get_drvdata(_dev);
1528 next = buf;
1529 size = PAGE_SIZE;
1530 spin_lock_irqsave(&dev->lock, flags);
1531
1532 if (dev->driver)
1533 s = dev->driver->driver.name;
1534 else
1535 s = "(none)";
1536
1537 /* Main Control Registers */
1538 t = scnprintf(next, size, "%s version " DRIVER_VERSION
1539 ", chiprev %04x\n\n"
1540 "devinit %03x fifoctl %08x gadget '%s'\n"
1541 "pci irqenb0 %02x irqenb1 %08x "
1542 "irqstat0 %04x irqstat1 %08x\n",
1543 driver_name, dev->chiprev,
1544 readl(&dev->regs->devinit),
1545 readl(&dev->regs->fifoctl),
1546 s,
1547 readl(&dev->regs->pciirqenb0),
1548 readl(&dev->regs->pciirqenb1),
1549 readl(&dev->regs->irqstat0),
1550 readl(&dev->regs->irqstat1));
1551 size -= t;
1552 next += t;
1553
1554 /* USB Control Registers */
1555 t1 = readl(&dev->usb->usbctl);
1556 t2 = readl(&dev->usb->usbstat);
1557 if (t1 & BIT(VBUS_PIN)) {
1558 if (t2 & BIT(HIGH_SPEED))
1559 s = "high speed";
1560 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1561 s = "powered";
1562 else
1563 s = "full speed";
1564 /* full speed bit (6) not working?? */
1565 } else
1566 s = "not attached";
1567 t = scnprintf(next, size,
1568 "stdrsp %08x usbctl %08x usbstat %08x "
1569 "addr 0x%02x (%s)\n",
1570 readl(&dev->usb->stdrsp), t1, t2,
1571 readl(&dev->usb->ouraddr), s);
1572 size -= t;
1573 next += t;
1574
1575 /* PCI Master Control Registers */
1576
1577 /* DMA Control Registers */
1578
1579 /* Configurable EP Control Registers */
1580 for (i = 0; i < dev->n_ep; i++) {
1581 struct net2280_ep *ep;
1582
1583 ep = &dev->ep[i];
1584 if (i && !ep->desc)
1585 continue;
1586
1587 t1 = readl(&ep->cfg->ep_cfg);
1588 t2 = readl(&ep->regs->ep_rsp) & 0xff;
1589 t = scnprintf(next, size,
1590 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1591 "irqenb %02x\n",
1592 ep->ep.name, t1, t2,
1593 (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
1594 ? "NAK " : "",
1595 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
1596 ? "hide " : "",
1597 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
1598 ? "CRC " : "",
1599 (t2 & BIT(CLEAR_INTERRUPT_MODE))
1600 ? "interrupt " : "",
1601 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1602 ? "status " : "",
1603 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
1604 ? "NAKmode " : "",
1605 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
1606 ? "DATA1 " : "DATA0 ",
1607 (t2 & BIT(CLEAR_ENDPOINT_HALT))
1608 ? "HALT " : "",
1609 readl(&ep->regs->ep_irqenb));
1610 size -= t;
1611 next += t;
1612
1613 t = scnprintf(next, size,
1614 "\tstat %08x avail %04x "
1615 "(ep%d%s-%s)%s\n",
1616 readl(&ep->regs->ep_stat),
1617 readl(&ep->regs->ep_avail),
1618 t1 & 0x0f, DIR_STRING(t1),
1619 type_string(t1 >> 8),
1620 ep->stopped ? "*" : "");
1621 size -= t;
1622 next += t;
1623
1624 if (!ep->dma)
1625 continue;
1626
1627 t = scnprintf(next, size,
1628 " dma\tctl %08x stat %08x count %08x\n"
1629 "\taddr %08x desc %08x\n",
1630 readl(&ep->dma->dmactl),
1631 readl(&ep->dma->dmastat),
1632 readl(&ep->dma->dmacount),
1633 readl(&ep->dma->dmaaddr),
1634 readl(&ep->dma->dmadesc));
1635 size -= t;
1636 next += t;
1637
1638 }
1639
1640 /* Indexed Registers (none yet) */
1641
1642 /* Statistics */
1643 t = scnprintf(next, size, "\nirqs: ");
1644 size -= t;
1645 next += t;
1646 for (i = 0; i < dev->n_ep; i++) {
1647 struct net2280_ep *ep;
1648
1649 ep = &dev->ep[i];
1650 if (i && !ep->irqs)
1651 continue;
1652 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
1653 size -= t;
1654 next += t;
1655
1656 }
1657 t = scnprintf(next, size, "\n");
1658 size -= t;
1659 next += t;
1660
1661 spin_unlock_irqrestore(&dev->lock, flags);
1662
1663 return PAGE_SIZE - size;
1664 }
1665 static DEVICE_ATTR_RO(registers);
1666
1667 static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1668 char *buf)
1669 {
1670 struct net2280 *dev;
1671 char *next;
1672 unsigned size;
1673 unsigned long flags;
1674 int i;
1675
1676 dev = dev_get_drvdata(_dev);
1677 next = buf;
1678 size = PAGE_SIZE;
1679 spin_lock_irqsave(&dev->lock, flags);
1680
1681 for (i = 0; i < dev->n_ep; i++) {
1682 struct net2280_ep *ep = &dev->ep[i];
1683 struct net2280_request *req;
1684 int t;
1685
1686 if (i != 0) {
1687 const struct usb_endpoint_descriptor *d;
1688
1689 d = ep->desc;
1690 if (!d)
1691 continue;
1692 t = d->bEndpointAddress;
1693 t = scnprintf(next, size,
1694 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1695 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1696 (t & USB_DIR_IN) ? "in" : "out",
1697 type_string(d->bmAttributes),
1698 usb_endpoint_maxp(d) & 0x1fff,
1699 ep->dma ? "dma" : "pio", ep->fifo_size
1700 );
1701 } else /* ep0 should only have one transfer queued */
1702 t = scnprintf(next, size, "ep0 max 64 pio %s\n",
1703 ep->is_in ? "in" : "out");
1704 if (t <= 0 || t > size)
1705 goto done;
1706 size -= t;
1707 next += t;
1708
1709 if (list_empty(&ep->queue)) {
1710 t = scnprintf(next, size, "\t(nothing queued)\n");
1711 if (t <= 0 || t > size)
1712 goto done;
1713 size -= t;
1714 next += t;
1715 continue;
1716 }
1717 list_for_each_entry(req, &ep->queue, queue) {
1718 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
1719 t = scnprintf(next, size,
1720 "\treq %p len %d/%d "
1721 "buf %p (dmacount %08x)\n",
1722 &req->req, req->req.actual,
1723 req->req.length, req->req.buf,
1724 readl(&ep->dma->dmacount));
1725 else
1726 t = scnprintf(next, size,
1727 "\treq %p len %d/%d buf %p\n",
1728 &req->req, req->req.actual,
1729 req->req.length, req->req.buf);
1730 if (t <= 0 || t > size)
1731 goto done;
1732 size -= t;
1733 next += t;
1734
1735 if (ep->dma) {
1736 struct net2280_dma *td;
1737
1738 td = req->td;
1739 t = scnprintf(next, size, "\t td %08x "
1740 " count %08x buf %08x desc %08x\n",
1741 (u32) req->td_dma,
1742 le32_to_cpu(td->dmacount),
1743 le32_to_cpu(td->dmaaddr),
1744 le32_to_cpu(td->dmadesc));
1745 if (t <= 0 || t > size)
1746 goto done;
1747 size -= t;
1748 next += t;
1749 }
1750 }
1751 }
1752
1753 done:
1754 spin_unlock_irqrestore(&dev->lock, flags);
1755 return PAGE_SIZE - size;
1756 }
1757 static DEVICE_ATTR_RO(queues);
1758
1759
1760 #else
1761
1762 #define device_create_file(a, b) (0)
1763 #define device_remove_file(a, b) do { } while (0)
1764
1765 #endif
1766
1767 /*-------------------------------------------------------------------------*/
1768
1769 /* another driver-specific mode might be a request type doing dma
1770 * to/from another device fifo instead of to/from memory.
1771 */
1772
1773 static void set_fifo_mode(struct net2280 *dev, int mode)
1774 {
1775 /* keeping high bits preserves BAR2 */
1776 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1777
1778 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1779 INIT_LIST_HEAD(&dev->gadget.ep_list);
1780 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1781 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1782 switch (mode) {
1783 case 0:
1784 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1785 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
1786 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1787 break;
1788 case 1:
1789 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
1790 break;
1791 case 2:
1792 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1793 dev->ep[1].fifo_size = 2048;
1794 dev->ep[2].fifo_size = 1024;
1795 break;
1796 }
1797 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1798 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
1799 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
1800 }
1801
1802 static void defect7374_disable_data_eps(struct net2280 *dev)
1803 {
1804 /*
1805 * For Defect 7374, disable data EPs (and more):
1806 * - This phase undoes the earlier phase of the Defect 7374 workaround,
1807 * returing ep regs back to normal.
1808 */
1809 struct net2280_ep *ep;
1810 int i;
1811 unsigned char ep_sel;
1812 u32 tmp_reg;
1813
1814 for (i = 1; i < 5; i++) {
1815 ep = &dev->ep[i];
1816 writel(0, &ep->cfg->ep_cfg);
1817 }
1818
1819 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
1820 for (i = 0; i < 6; i++)
1821 writel(0, &dev->dep[i].dep_cfg);
1822
1823 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1824 /* Select an endpoint for subsequent operations: */
1825 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1826 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
1827
1828 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
1829 ep_sel == 18 || ep_sel == 20)
1830 continue;
1831
1832 /* Change settings on some selected endpoints */
1833 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
1834 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
1835 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
1836 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1837 tmp_reg |= BIT(EP_INITIALIZED);
1838 writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
1839 }
1840 }
1841
1842 static void defect7374_enable_data_eps_zero(struct net2280 *dev)
1843 {
1844 u32 tmp = 0, tmp_reg;
1845 u32 scratch;
1846 int i;
1847 unsigned char ep_sel;
1848
1849 scratch = get_idx_reg(dev->regs, SCRATCH);
1850
1851 WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
1852 == DEFECT7374_FSM_SS_CONTROL_READ);
1853
1854 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
1855
1856 ep_warn(dev, "Operate Defect 7374 workaround soft this time");
1857 ep_warn(dev, "It will operate on cold-reboot and SS connect");
1858
1859 /*GPEPs:*/
1860 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
1861 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
1862 ((dev->enhanced_mode) ?
1863 BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) |
1864 BIT(IN_ENDPOINT_ENABLE));
1865
1866 for (i = 1; i < 5; i++)
1867 writel(tmp, &dev->ep[i].cfg->ep_cfg);
1868
1869 /* CSRIN, PCIIN, STATIN, RCIN*/
1870 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
1871 writel(tmp, &dev->dep[1].dep_cfg);
1872 writel(tmp, &dev->dep[3].dep_cfg);
1873 writel(tmp, &dev->dep[4].dep_cfg);
1874 writel(tmp, &dev->dep[5].dep_cfg);
1875
1876 /*Implemented for development and debug.
1877 * Can be refined/tuned later.*/
1878 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1879 /* Select an endpoint for subsequent operations: */
1880 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1881 writel(((tmp_reg & ~0x1f) | ep_sel),
1882 &dev->plregs->pl_ep_ctrl);
1883
1884 if (ep_sel == 1) {
1885 tmp =
1886 (readl(&dev->plregs->pl_ep_ctrl) |
1887 BIT(CLEAR_ACK_ERROR_CODE) | 0);
1888 writel(tmp, &dev->plregs->pl_ep_ctrl);
1889 continue;
1890 }
1891
1892 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
1893 ep_sel == 18 || ep_sel == 20)
1894 continue;
1895
1896 tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
1897 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
1898 writel(tmp, &dev->plregs->pl_ep_cfg_4);
1899
1900 tmp = readl(&dev->plregs->pl_ep_ctrl) &
1901 ~BIT(EP_INITIALIZED);
1902 writel(tmp, &dev->plregs->pl_ep_ctrl);
1903
1904 }
1905
1906 /* Set FSM to focus on the first Control Read:
1907 * - Tip: Connection speed is known upon the first
1908 * setup request.*/
1909 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
1910 set_idx_reg(dev->regs, SCRATCH, scratch);
1911
1912 }
1913
1914 /* keeping it simple:
1915 * - one bus driver, initted first;
1916 * - one function driver, initted second
1917 *
1918 * most of the work to support multiple net2280 controllers would
1919 * be to associate this gadget driver (yes?) with all of them, or
1920 * perhaps to bind specific drivers to specific devices.
1921 */
1922
1923 static void usb_reset_228x(struct net2280 *dev)
1924 {
1925 u32 tmp;
1926
1927 dev->gadget.speed = USB_SPEED_UNKNOWN;
1928 (void) readl(&dev->usb->usbctl);
1929
1930 net2280_led_init(dev);
1931
1932 /* disable automatic responses, and irqs */
1933 writel(0, &dev->usb->stdrsp);
1934 writel(0, &dev->regs->pciirqenb0);
1935 writel(0, &dev->regs->pciirqenb1);
1936
1937 /* clear old dma and irq state */
1938 for (tmp = 0; tmp < 4; tmp++) {
1939 struct net2280_ep *ep = &dev->ep[tmp + 1];
1940 if (ep->dma)
1941 abort_dma(ep);
1942 }
1943
1944 writel(~0, &dev->regs->irqstat0),
1945 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1946
1947 /* reset, and enable pci */
1948 tmp = readl(&dev->regs->devinit) |
1949 BIT(PCI_ENABLE) |
1950 BIT(FIFO_SOFT_RESET) |
1951 BIT(USB_SOFT_RESET) |
1952 BIT(M8051_RESET);
1953 writel(tmp, &dev->regs->devinit);
1954
1955 /* standard fifo and endpoint allocations */
1956 set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
1957 }
1958
1959 static void usb_reset_338x(struct net2280 *dev)
1960 {
1961 u32 tmp;
1962
1963 dev->gadget.speed = USB_SPEED_UNKNOWN;
1964 (void)readl(&dev->usb->usbctl);
1965
1966 net2280_led_init(dev);
1967
1968 if (dev->bug7734_patched) {
1969 /* disable automatic responses, and irqs */
1970 writel(0, &dev->usb->stdrsp);
1971 writel(0, &dev->regs->pciirqenb0);
1972 writel(0, &dev->regs->pciirqenb1);
1973 }
1974
1975 /* clear old dma and irq state */
1976 for (tmp = 0; tmp < 4; tmp++) {
1977 struct net2280_ep *ep = &dev->ep[tmp + 1];
1978
1979 if (ep->dma)
1980 abort_dma(ep);
1981 }
1982
1983 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
1984
1985 if (dev->bug7734_patched) {
1986 /* reset, and enable pci */
1987 tmp = readl(&dev->regs->devinit) |
1988 BIT(PCI_ENABLE) |
1989 BIT(FIFO_SOFT_RESET) |
1990 BIT(USB_SOFT_RESET) |
1991 BIT(M8051_RESET);
1992
1993 writel(tmp, &dev->regs->devinit);
1994 }
1995
1996 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
1997 INIT_LIST_HEAD(&dev->gadget.ep_list);
1998
1999 for (tmp = 1; tmp < dev->n_ep; tmp++)
2000 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
2001
2002 }
2003
2004 static void usb_reset(struct net2280 *dev)
2005 {
2006 if (dev->quirks & PLX_LEGACY)
2007 return usb_reset_228x(dev);
2008 return usb_reset_338x(dev);
2009 }
2010
2011 static void usb_reinit_228x(struct net2280 *dev)
2012 {
2013 u32 tmp;
2014
2015 /* basic endpoint init */
2016 for (tmp = 0; tmp < 7; tmp++) {
2017 struct net2280_ep *ep = &dev->ep[tmp];
2018
2019 ep->ep.name = ep_name[tmp];
2020 ep->dev = dev;
2021 ep->num = tmp;
2022
2023 if (tmp > 0 && tmp <= 4) {
2024 ep->fifo_size = 1024;
2025 ep->dma = &dev->dma[tmp - 1];
2026 } else
2027 ep->fifo_size = 64;
2028 ep->regs = &dev->epregs[tmp];
2029 ep->cfg = &dev->epregs[tmp];
2030 ep_reset_228x(dev->regs, ep);
2031 }
2032 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
2033 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
2034 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
2035
2036 dev->gadget.ep0 = &dev->ep[0].ep;
2037 dev->ep[0].stopped = 0;
2038 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2039
2040 /* we want to prevent lowlevel/insecure access from the USB host,
2041 * but erratum 0119 means this enable bit is ignored
2042 */
2043 for (tmp = 0; tmp < 5; tmp++)
2044 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
2045 }
2046
2047 static void usb_reinit_338x(struct net2280 *dev)
2048 {
2049 int i;
2050 u32 tmp, val;
2051 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
2052 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
2053 0x00, 0xC0, 0x00, 0xC0 };
2054
2055 /* basic endpoint init */
2056 for (i = 0; i < dev->n_ep; i++) {
2057 struct net2280_ep *ep = &dev->ep[i];
2058
2059 ep->ep.name = dev->enhanced_mode ? ep_name_adv[i] : ep_name[i];
2060 ep->dev = dev;
2061 ep->num = i;
2062
2063 if (i > 0 && i <= 4)
2064 ep->dma = &dev->dma[i - 1];
2065
2066 if (dev->enhanced_mode) {
2067 ep->cfg = &dev->epregs[ne[i]];
2068 ep->regs = (struct net2280_ep_regs __iomem *)
2069 (((void __iomem *)&dev->epregs[ne[i]]) +
2070 ep_reg_addr[i]);
2071 } else {
2072 ep->cfg = &dev->epregs[i];
2073 ep->regs = &dev->epregs[i];
2074 }
2075
2076 ep->fifo_size = (i != 0) ? 2048 : 512;
2077
2078 ep_reset_338x(dev->regs, ep);
2079 }
2080 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
2081
2082 dev->gadget.ep0 = &dev->ep[0].ep;
2083 dev->ep[0].stopped = 0;
2084
2085 /* Link layer set up */
2086 if (dev->bug7734_patched) {
2087 tmp = readl(&dev->usb_ext->usbctl2) &
2088 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
2089 writel(tmp, &dev->usb_ext->usbctl2);
2090 }
2091
2092 /* Hardware Defect and Workaround */
2093 val = readl(&dev->ll_lfps_regs->ll_lfps_5);
2094 val &= ~(0xf << TIMER_LFPS_6US);
2095 val |= 0x5 << TIMER_LFPS_6US;
2096 writel(val, &dev->ll_lfps_regs->ll_lfps_5);
2097
2098 val = readl(&dev->ll_lfps_regs->ll_lfps_6);
2099 val &= ~(0xffff << TIMER_LFPS_80US);
2100 val |= 0x0100 << TIMER_LFPS_80US;
2101 writel(val, &dev->ll_lfps_regs->ll_lfps_6);
2102
2103 /*
2104 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
2105 * Hot Reset Exit Handshake may Fail in Specific Case using
2106 * Default Register Settings. Workaround for Enumeration test.
2107 */
2108 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
2109 val &= ~(0x1f << HOT_TX_NORESET_TS2);
2110 val |= 0x10 << HOT_TX_NORESET_TS2;
2111 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
2112
2113 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
2114 val &= ~(0x1f << HOT_RX_RESET_TS2);
2115 val |= 0x3 << HOT_RX_RESET_TS2;
2116 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
2117
2118 /*
2119 * Set Recovery Idle to Recover bit:
2120 * - On SS connections, setting Recovery Idle to Recover Fmw improves
2121 * link robustness with various hosts and hubs.
2122 * - It is safe to set for all connection speeds; all chip revisions.
2123 * - R-M-W to leave other bits undisturbed.
2124 * - Reference PLX TT-7372
2125 */
2126 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
2127 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
2128 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
2129
2130 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2131
2132 /* disable dedicated endpoints */
2133 writel(0x0D, &dev->dep[0].dep_cfg);
2134 writel(0x0D, &dev->dep[1].dep_cfg);
2135 writel(0x0E, &dev->dep[2].dep_cfg);
2136 writel(0x0E, &dev->dep[3].dep_cfg);
2137 writel(0x0F, &dev->dep[4].dep_cfg);
2138 writel(0x0C, &dev->dep[5].dep_cfg);
2139 }
2140
2141 static void usb_reinit(struct net2280 *dev)
2142 {
2143 if (dev->quirks & PLX_LEGACY)
2144 return usb_reinit_228x(dev);
2145 return usb_reinit_338x(dev);
2146 }
2147
2148 static void ep0_start_228x(struct net2280 *dev)
2149 {
2150 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
2151 BIT(CLEAR_NAK_OUT_PACKETS) |
2152 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
2153 &dev->epregs[0].ep_rsp);
2154
2155 /*
2156 * hardware optionally handles a bunch of standard requests
2157 * that the API hides from drivers anyway. have it do so.
2158 * endpoint status/features are handled in software, to
2159 * help pass tests for some dubious behavior.
2160 */
2161 writel(BIT(SET_TEST_MODE) |
2162 BIT(SET_ADDRESS) |
2163 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
2164 BIT(GET_DEVICE_STATUS) |
2165 BIT(GET_INTERFACE_STATUS),
2166 &dev->usb->stdrsp);
2167 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2168 BIT(SELF_POWERED_USB_DEVICE) |
2169 BIT(REMOTE_WAKEUP_SUPPORT) |
2170 (dev->softconnect << USB_DETECT_ENABLE) |
2171 BIT(SELF_POWERED_STATUS),
2172 &dev->usb->usbctl);
2173
2174 /* enable irqs so we can see ep0 and general operation */
2175 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2176 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2177 &dev->regs->pciirqenb0);
2178 writel(BIT(PCI_INTERRUPT_ENABLE) |
2179 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2180 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2181 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
2182 BIT(VBUS_INTERRUPT_ENABLE) |
2183 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2184 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
2185 &dev->regs->pciirqenb1);
2186
2187 /* don't leave any writes posted */
2188 (void) readl(&dev->usb->usbctl);
2189 }
2190
2191 static void ep0_start_338x(struct net2280 *dev)
2192 {
2193
2194 if (dev->bug7734_patched)
2195 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
2196 BIT(SET_EP_HIDE_STATUS_PHASE),
2197 &dev->epregs[0].ep_rsp);
2198
2199 /*
2200 * hardware optionally handles a bunch of standard requests
2201 * that the API hides from drivers anyway. have it do so.
2202 * endpoint status/features are handled in software, to
2203 * help pass tests for some dubious behavior.
2204 */
2205 writel(BIT(SET_ISOCHRONOUS_DELAY) |
2206 BIT(SET_SEL) |
2207 BIT(SET_TEST_MODE) |
2208 BIT(SET_ADDRESS) |
2209 BIT(GET_INTERFACE_STATUS) |
2210 BIT(GET_DEVICE_STATUS),
2211 &dev->usb->stdrsp);
2212 dev->wakeup_enable = 1;
2213 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2214 (dev->softconnect << USB_DETECT_ENABLE) |
2215 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
2216 &dev->usb->usbctl);
2217
2218 /* enable irqs so we can see ep0 and general operation */
2219 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2220 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2221 &dev->regs->pciirqenb0);
2222 writel(BIT(PCI_INTERRUPT_ENABLE) |
2223 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2224 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
2225 BIT(VBUS_INTERRUPT_ENABLE),
2226 &dev->regs->pciirqenb1);
2227
2228 /* don't leave any writes posted */
2229 (void)readl(&dev->usb->usbctl);
2230 }
2231
2232 static void ep0_start(struct net2280 *dev)
2233 {
2234 if (dev->quirks & PLX_LEGACY)
2235 return ep0_start_228x(dev);
2236 return ep0_start_338x(dev);
2237 }
2238
2239 /* when a driver is successfully registered, it will receive
2240 * control requests including set_configuration(), which enables
2241 * non-control requests. then usb traffic follows until a
2242 * disconnect is reported. then a host may connect again, or
2243 * the driver might get unbound.
2244 */
2245 static int net2280_start(struct usb_gadget *_gadget,
2246 struct usb_gadget_driver *driver)
2247 {
2248 struct net2280 *dev;
2249 int retval;
2250 unsigned i;
2251
2252 /* insist on high speed support from the driver, since
2253 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
2254 * "must not be used in normal operation"
2255 */
2256 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
2257 !driver->setup)
2258 return -EINVAL;
2259
2260 dev = container_of(_gadget, struct net2280, gadget);
2261
2262 for (i = 0; i < dev->n_ep; i++)
2263 dev->ep[i].irqs = 0;
2264
2265 /* hook up the driver ... */
2266 dev->softconnect = 1;
2267 driver->driver.bus = NULL;
2268 dev->driver = driver;
2269
2270 retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
2271 if (retval)
2272 goto err_unbind;
2273 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
2274 if (retval)
2275 goto err_func;
2276
2277 /* enable host detection and ep0; and we're ready
2278 * for set_configuration as well as eventual disconnect.
2279 */
2280 net2280_led_active(dev, 1);
2281
2282 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
2283 defect7374_enable_data_eps_zero(dev);
2284
2285 ep0_start(dev);
2286
2287 /* pci writes may still be posted */
2288 return 0;
2289
2290 err_func:
2291 device_remove_file(&dev->pdev->dev, &dev_attr_function);
2292 err_unbind:
2293 dev->driver = NULL;
2294 return retval;
2295 }
2296
2297 static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
2298 {
2299 int i;
2300
2301 /* don't disconnect if it's not connected */
2302 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2303 driver = NULL;
2304
2305 /* stop hardware; prevent new request submissions;
2306 * and kill any outstanding requests.
2307 */
2308 usb_reset(dev);
2309 for (i = 0; i < dev->n_ep; i++)
2310 nuke(&dev->ep[i]);
2311
2312 /* report disconnect; the driver is already quiesced */
2313 if (driver) {
2314 spin_unlock(&dev->lock);
2315 driver->disconnect(&dev->gadget);
2316 spin_lock(&dev->lock);
2317 }
2318
2319 usb_reinit(dev);
2320 }
2321
2322 static int net2280_stop(struct usb_gadget *_gadget)
2323 {
2324 struct net2280 *dev;
2325 unsigned long flags;
2326
2327 dev = container_of(_gadget, struct net2280, gadget);
2328
2329 spin_lock_irqsave(&dev->lock, flags);
2330 stop_activity(dev, NULL);
2331 spin_unlock_irqrestore(&dev->lock, flags);
2332
2333 net2280_led_active(dev, 0);
2334
2335 device_remove_file(&dev->pdev->dev, &dev_attr_function);
2336 device_remove_file(&dev->pdev->dev, &dev_attr_queues);
2337
2338 dev->driver = NULL;
2339
2340 return 0;
2341 }
2342
2343 /*-------------------------------------------------------------------------*/
2344
2345 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2346 * also works for dma-capable endpoints, in pio mode or just
2347 * to manually advance the queue after short OUT transfers.
2348 */
2349 static void handle_ep_small(struct net2280_ep *ep)
2350 {
2351 struct net2280_request *req;
2352 u32 t;
2353 /* 0 error, 1 mid-data, 2 done */
2354 int mode = 1;
2355
2356 if (!list_empty(&ep->queue))
2357 req = list_entry(ep->queue.next,
2358 struct net2280_request, queue);
2359 else
2360 req = NULL;
2361
2362 /* ack all, and handle what we care about */
2363 t = readl(&ep->regs->ep_stat);
2364 ep->irqs++;
2365
2366 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
2367 ep->ep.name, t, req ? &req->req : NULL);
2368
2369 if (!ep->is_in || (ep->dev->quirks & PLX_2280))
2370 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
2371 else
2372 /* Added for 2282 */
2373 writel(t, &ep->regs->ep_stat);
2374
2375 /* for ep0, monitor token irqs to catch data stage length errors
2376 * and to synchronize on status.
2377 *
2378 * also, to defer reporting of protocol stalls ... here's where
2379 * data or status first appears, handling stalls here should never
2380 * cause trouble on the host side..
2381 *
2382 * control requests could be slightly faster without token synch for
2383 * status, but status can jam up that way.
2384 */
2385 if (unlikely(ep->num == 0)) {
2386 if (ep->is_in) {
2387 /* status; stop NAKing */
2388 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
2389 if (ep->dev->protocol_stall) {
2390 ep->stopped = 1;
2391 set_halt(ep);
2392 }
2393 if (!req)
2394 allow_status(ep);
2395 mode = 2;
2396 /* reply to extra IN data tokens with a zlp */
2397 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
2398 if (ep->dev->protocol_stall) {
2399 ep->stopped = 1;
2400 set_halt(ep);
2401 mode = 2;
2402 } else if (ep->responded &&
2403 !req && !ep->stopped)
2404 write_fifo(ep, NULL);
2405 }
2406 } else {
2407 /* status; stop NAKing */
2408 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
2409 if (ep->dev->protocol_stall) {
2410 ep->stopped = 1;
2411 set_halt(ep);
2412 }
2413 mode = 2;
2414 /* an extra OUT token is an error */
2415 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
2416 req &&
2417 req->req.actual == req->req.length) ||
2418 (ep->responded && !req)) {
2419 ep->dev->protocol_stall = 1;
2420 set_halt(ep);
2421 ep->stopped = 1;
2422 if (req)
2423 done(ep, req, -EOVERFLOW);
2424 req = NULL;
2425 }
2426 }
2427 }
2428
2429 if (unlikely(!req))
2430 return;
2431
2432 /* manual DMA queue advance after short OUT */
2433 if (likely(ep->dma)) {
2434 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
2435 u32 count;
2436 int stopped = ep->stopped;
2437
2438 /* TRANSFERRED works around OUT_DONE erratum 0112.
2439 * we expect (N <= maxpacket) bytes; host wrote M.
2440 * iff (M < N) we won't ever see a DMA interrupt.
2441 */
2442 ep->stopped = 1;
2443 for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
2444
2445 /* any preceding dma transfers must finish.
2446 * dma handles (M >= N), may empty the queue
2447 */
2448 scan_dma_completions(ep);
2449 if (unlikely(list_empty(&ep->queue) ||
2450 ep->out_overflow)) {
2451 req = NULL;
2452 break;
2453 }
2454 req = list_entry(ep->queue.next,
2455 struct net2280_request, queue);
2456
2457 /* here either (M < N), a "real" short rx;
2458 * or (M == N) and the queue didn't empty
2459 */
2460 if (likely(t & BIT(FIFO_EMPTY))) {
2461 count = readl(&ep->dma->dmacount);
2462 count &= DMA_BYTE_COUNT_MASK;
2463 if (readl(&ep->dma->dmadesc)
2464 != req->td_dma)
2465 req = NULL;
2466 break;
2467 }
2468 udelay(1);
2469 }
2470
2471 /* stop DMA, leave ep NAKing */
2472 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
2473 spin_stop_dma(ep->dma);
2474
2475 if (likely(req)) {
2476 req->td->dmacount = 0;
2477 t = readl(&ep->regs->ep_avail);
2478 dma_done(ep, req, count,
2479 (ep->out_overflow || t)
2480 ? -EOVERFLOW : 0);
2481 }
2482
2483 /* also flush to prevent erratum 0106 trouble */
2484 if (unlikely(ep->out_overflow ||
2485 (ep->dev->chiprev == 0x0100 &&
2486 ep->dev->gadget.speed
2487 == USB_SPEED_FULL))) {
2488 out_flush(ep);
2489 ep->out_overflow = 0;
2490 }
2491
2492 /* (re)start dma if needed, stop NAKing */
2493 ep->stopped = stopped;
2494 if (!list_empty(&ep->queue))
2495 restart_dma(ep);
2496 } else
2497 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
2498 ep->ep.name, t);
2499 return;
2500
2501 /* data packet(s) received (in the fifo, OUT) */
2502 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
2503 if (read_fifo(ep, req) && ep->num != 0)
2504 mode = 2;
2505
2506 /* data packet(s) transmitted (IN) */
2507 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
2508 unsigned len;
2509
2510 len = req->req.length - req->req.actual;
2511 if (len > ep->ep.maxpacket)
2512 len = ep->ep.maxpacket;
2513 req->req.actual += len;
2514
2515 /* if we wrote it all, we're usually done */
2516 /* send zlps until the status stage */
2517 if ((req->req.actual == req->req.length) &&
2518 (!req->req.zero || len != ep->ep.maxpacket) && ep->num)
2519 mode = 2;
2520
2521 /* there was nothing to do ... */
2522 } else if (mode == 1)
2523 return;
2524
2525 /* done */
2526 if (mode == 2) {
2527 /* stream endpoints often resubmit/unlink in completion */
2528 done(ep, req, 0);
2529
2530 /* maybe advance queue to next request */
2531 if (ep->num == 0) {
2532 /* NOTE: net2280 could let gadget driver start the
2533 * status stage later. since not all controllers let
2534 * them control that, the api doesn't (yet) allow it.
2535 */
2536 if (!ep->stopped)
2537 allow_status(ep);
2538 req = NULL;
2539 } else {
2540 if (!list_empty(&ep->queue) && !ep->stopped)
2541 req = list_entry(ep->queue.next,
2542 struct net2280_request, queue);
2543 else
2544 req = NULL;
2545 if (req && !ep->is_in)
2546 stop_out_naking(ep);
2547 }
2548 }
2549
2550 /* is there a buffer for the next packet?
2551 * for best streaming performance, make sure there is one.
2552 */
2553 if (req && !ep->stopped) {
2554
2555 /* load IN fifo with next packet (may be zlp) */
2556 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
2557 write_fifo(ep, &req->req);
2558 }
2559 }
2560
2561 static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
2562 {
2563 struct net2280_ep *ep;
2564
2565 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2566 return &dev->ep[0];
2567 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
2568 u8 bEndpointAddress;
2569
2570 if (!ep->desc)
2571 continue;
2572 bEndpointAddress = ep->desc->bEndpointAddress;
2573 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2574 continue;
2575 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2576 return ep;
2577 }
2578 return NULL;
2579 }
2580
2581 static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
2582 {
2583 u32 scratch, fsmvalue;
2584 u32 ack_wait_timeout, state;
2585
2586 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
2587 scratch = get_idx_reg(dev->regs, SCRATCH);
2588 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
2589 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
2590
2591 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
2592 (r.bRequestType & USB_DIR_IN)))
2593 return;
2594
2595 /* This is the first Control Read for this connection: */
2596 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
2597 /*
2598 * Connection is NOT SS:
2599 * - Connection must be FS or HS.
2600 * - This FSM state should allow workaround software to
2601 * run after the next USB connection.
2602 */
2603 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
2604 dev->bug7734_patched = 1;
2605 goto restore_data_eps;
2606 }
2607
2608 /* Connection is SS: */
2609 for (ack_wait_timeout = 0;
2610 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
2611 ack_wait_timeout++) {
2612
2613 state = readl(&dev->plregs->pl_ep_status_1)
2614 & (0xff << STATE);
2615 if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
2616 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
2617 scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
2618 dev->bug7734_patched = 1;
2619 break;
2620 }
2621
2622 /*
2623 * We have not yet received host's Data Phase ACK
2624 * - Wait and try again.
2625 */
2626 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
2627
2628 continue;
2629 }
2630
2631
2632 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
2633 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
2634 "to detect SS host's data phase ACK.");
2635 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
2636 "got 0x%2.2x.\n", state >> STATE);
2637 } else {
2638 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
2639 "%duSec for Control Read Data Phase ACK\n",
2640 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
2641 }
2642
2643 restore_data_eps:
2644 /*
2645 * Restore data EPs to their pre-workaround settings (disabled,
2646 * initialized, and other details).
2647 */
2648 defect7374_disable_data_eps(dev);
2649
2650 set_idx_reg(dev->regs, SCRATCH, scratch);
2651
2652 return;
2653 }
2654
2655 static void ep_clear_seqnum(struct net2280_ep *ep)
2656 {
2657 struct net2280 *dev = ep->dev;
2658 u32 val;
2659 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
2660
2661 val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
2662 val |= ep_pl[ep->num];
2663 writel(val, &dev->plregs->pl_ep_ctrl);
2664 val |= BIT(SEQUENCE_NUMBER_RESET);
2665 writel(val, &dev->plregs->pl_ep_ctrl);
2666
2667 return;
2668 }
2669
2670 static void handle_stat0_irqs_superspeed(struct net2280 *dev,
2671 struct net2280_ep *ep, struct usb_ctrlrequest r)
2672 {
2673 int tmp = 0;
2674
2675 #define w_value le16_to_cpu(r.wValue)
2676 #define w_index le16_to_cpu(r.wIndex)
2677 #define w_length le16_to_cpu(r.wLength)
2678
2679 switch (r.bRequest) {
2680 struct net2280_ep *e;
2681 u16 status;
2682
2683 case USB_REQ_SET_CONFIGURATION:
2684 dev->addressed_state = !w_value;
2685 goto usb3_delegate;
2686
2687 case USB_REQ_GET_STATUS:
2688 switch (r.bRequestType) {
2689 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2690 status = dev->wakeup_enable ? 0x02 : 0x00;
2691 if (dev->gadget.is_selfpowered)
2692 status |= BIT(0);
2693 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
2694 dev->ltm_enable << 4);
2695 writel(0, &dev->epregs[0].ep_irqenb);
2696 set_fifo_bytecount(ep, sizeof(status));
2697 writel((__force u32) status, &dev->epregs[0].ep_data);
2698 allow_status_338x(ep);
2699 break;
2700
2701 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2702 e = get_ep_by_addr(dev, w_index);
2703 if (!e)
2704 goto do_stall3;
2705 status = readl(&e->regs->ep_rsp) &
2706 BIT(CLEAR_ENDPOINT_HALT);
2707 writel(0, &dev->epregs[0].ep_irqenb);
2708 set_fifo_bytecount(ep, sizeof(status));
2709 writel((__force u32) status, &dev->epregs[0].ep_data);
2710 allow_status_338x(ep);
2711 break;
2712
2713 default:
2714 goto usb3_delegate;
2715 }
2716 break;
2717
2718 case USB_REQ_CLEAR_FEATURE:
2719 switch (r.bRequestType) {
2720 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2721 if (!dev->addressed_state) {
2722 switch (w_value) {
2723 case USB_DEVICE_U1_ENABLE:
2724 dev->u1_enable = 0;
2725 writel(readl(&dev->usb_ext->usbctl2) &
2726 ~BIT(U1_ENABLE),
2727 &dev->usb_ext->usbctl2);
2728 allow_status_338x(ep);
2729 goto next_endpoints3;
2730
2731 case USB_DEVICE_U2_ENABLE:
2732 dev->u2_enable = 0;
2733 writel(readl(&dev->usb_ext->usbctl2) &
2734 ~BIT(U2_ENABLE),
2735 &dev->usb_ext->usbctl2);
2736 allow_status_338x(ep);
2737 goto next_endpoints3;
2738
2739 case USB_DEVICE_LTM_ENABLE:
2740 dev->ltm_enable = 0;
2741 writel(readl(&dev->usb_ext->usbctl2) &
2742 ~BIT(LTM_ENABLE),
2743 &dev->usb_ext->usbctl2);
2744 allow_status_338x(ep);
2745 goto next_endpoints3;
2746
2747 default:
2748 break;
2749 }
2750 }
2751 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2752 dev->wakeup_enable = 0;
2753 writel(readl(&dev->usb->usbctl) &
2754 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
2755 &dev->usb->usbctl);
2756 allow_status_338x(ep);
2757 break;
2758 }
2759 goto usb3_delegate;
2760
2761 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2762 e = get_ep_by_addr(dev, w_index);
2763 if (!e)
2764 goto do_stall3;
2765 if (w_value != USB_ENDPOINT_HALT)
2766 goto do_stall3;
2767 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
2768 /*
2769 * Workaround for SS SeqNum not cleared via
2770 * Endpoint Halt (Clear) bit. select endpoint
2771 */
2772 ep_clear_seqnum(e);
2773 clear_halt(e);
2774 if (!list_empty(&e->queue) && e->td_dma)
2775 restart_dma(e);
2776 allow_status(ep);
2777 ep->stopped = 1;
2778 break;
2779
2780 default:
2781 goto usb3_delegate;
2782 }
2783 break;
2784 case USB_REQ_SET_FEATURE:
2785 switch (r.bRequestType) {
2786 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2787 if (!dev->addressed_state) {
2788 switch (w_value) {
2789 case USB_DEVICE_U1_ENABLE:
2790 dev->u1_enable = 1;
2791 writel(readl(&dev->usb_ext->usbctl2) |
2792 BIT(U1_ENABLE),
2793 &dev->usb_ext->usbctl2);
2794 allow_status_338x(ep);
2795 goto next_endpoints3;
2796
2797 case USB_DEVICE_U2_ENABLE:
2798 dev->u2_enable = 1;
2799 writel(readl(&dev->usb_ext->usbctl2) |
2800 BIT(U2_ENABLE),
2801 &dev->usb_ext->usbctl2);
2802 allow_status_338x(ep);
2803 goto next_endpoints3;
2804
2805 case USB_DEVICE_LTM_ENABLE:
2806 dev->ltm_enable = 1;
2807 writel(readl(&dev->usb_ext->usbctl2) |
2808 BIT(LTM_ENABLE),
2809 &dev->usb_ext->usbctl2);
2810 allow_status_338x(ep);
2811 goto next_endpoints3;
2812 default:
2813 break;
2814 }
2815 }
2816
2817 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2818 dev->wakeup_enable = 1;
2819 writel(readl(&dev->usb->usbctl) |
2820 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
2821 &dev->usb->usbctl);
2822 allow_status_338x(ep);
2823 break;
2824 }
2825 goto usb3_delegate;
2826
2827 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2828 e = get_ep_by_addr(dev, w_index);
2829 if (!e || (w_value != USB_ENDPOINT_HALT))
2830 goto do_stall3;
2831 ep->stopped = 1;
2832 if (ep->num == 0)
2833 ep->dev->protocol_stall = 1;
2834 else {
2835 if (ep->dma)
2836 abort_dma(ep);
2837 set_halt(ep);
2838 }
2839 allow_status_338x(ep);
2840 break;
2841
2842 default:
2843 goto usb3_delegate;
2844 }
2845
2846 break;
2847 default:
2848
2849 usb3_delegate:
2850 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
2851 r.bRequestType, r.bRequest,
2852 w_value, w_index, w_length,
2853 readl(&ep->cfg->ep_cfg));
2854
2855 ep->responded = 0;
2856 spin_unlock(&dev->lock);
2857 tmp = dev->driver->setup(&dev->gadget, &r);
2858 spin_lock(&dev->lock);
2859 }
2860 do_stall3:
2861 if (tmp < 0) {
2862 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
2863 r.bRequestType, r.bRequest, tmp);
2864 dev->protocol_stall = 1;
2865 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
2866 set_halt(ep);
2867 }
2868
2869 next_endpoints3:
2870
2871 #undef w_value
2872 #undef w_index
2873 #undef w_length
2874
2875 return;
2876 }
2877
2878 static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
2879 {
2880 struct net2280_ep *ep;
2881 u32 num, scratch;
2882
2883 /* most of these don't need individual acks */
2884 stat &= ~BIT(INTA_ASSERTED);
2885 if (!stat)
2886 return;
2887 /* ep_dbg(dev, "irqstat0 %04x\n", stat); */
2888
2889 /* starting a control request? */
2890 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
2891 union {
2892 u32 raw[2];
2893 struct usb_ctrlrequest r;
2894 } u;
2895 int tmp;
2896 struct net2280_request *req;
2897
2898 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
2899 u32 val = readl(&dev->usb->usbstat);
2900 if (val & BIT(SUPER_SPEED)) {
2901 dev->gadget.speed = USB_SPEED_SUPER;
2902 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2903 EP0_SS_MAX_PACKET_SIZE);
2904 } else if (val & BIT(HIGH_SPEED)) {
2905 dev->gadget.speed = USB_SPEED_HIGH;
2906 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2907 EP0_HS_MAX_PACKET_SIZE);
2908 } else {
2909 dev->gadget.speed = USB_SPEED_FULL;
2910 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2911 EP0_HS_MAX_PACKET_SIZE);
2912 }
2913 net2280_led_speed(dev, dev->gadget.speed);
2914 ep_dbg(dev, "%s\n",
2915 usb_speed_string(dev->gadget.speed));
2916 }
2917
2918 ep = &dev->ep[0];
2919 ep->irqs++;
2920
2921 /* make sure any leftover request state is cleared */
2922 stat &= ~BIT(ENDPOINT_0_INTERRUPT);
2923 while (!list_empty(&ep->queue)) {
2924 req = list_entry(ep->queue.next,
2925 struct net2280_request, queue);
2926 done(ep, req, (req->req.actual == req->req.length)
2927 ? 0 : -EPROTO);
2928 }
2929 ep->stopped = 0;
2930 dev->protocol_stall = 0;
2931 if (!(dev->quirks & PLX_SUPERSPEED)) {
2932 if (ep->dev->quirks & PLX_2280)
2933 tmp = BIT(FIFO_OVERFLOW) |
2934 BIT(FIFO_UNDERFLOW);
2935 else
2936 tmp = 0;
2937
2938 writel(tmp | BIT(TIMEOUT) |
2939 BIT(USB_STALL_SENT) |
2940 BIT(USB_IN_NAK_SENT) |
2941 BIT(USB_IN_ACK_RCVD) |
2942 BIT(USB_OUT_PING_NAK_SENT) |
2943 BIT(USB_OUT_ACK_SENT) |
2944 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
2945 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
2946 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
2947 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
2948 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
2949 BIT(DATA_IN_TOKEN_INTERRUPT),
2950 &ep->regs->ep_stat);
2951 }
2952 u.raw[0] = readl(&dev->usb->setup0123);
2953 u.raw[1] = readl(&dev->usb->setup4567);
2954
2955 cpu_to_le32s(&u.raw[0]);
2956 cpu_to_le32s(&u.raw[1]);
2957
2958 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
2959 defect7374_workaround(dev, u.r);
2960
2961 tmp = 0;
2962
2963 #define w_value le16_to_cpu(u.r.wValue)
2964 #define w_index le16_to_cpu(u.r.wIndex)
2965 #define w_length le16_to_cpu(u.r.wLength)
2966
2967 /* ack the irq */
2968 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
2969 stat ^= BIT(SETUP_PACKET_INTERRUPT);
2970
2971 /* watch control traffic at the token level, and force
2972 * synchronization before letting the status stage happen.
2973 * FIXME ignore tokens we'll NAK, until driver responds.
2974 * that'll mean a lot less irqs for some drivers.
2975 */
2976 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
2977 if (ep->is_in) {
2978 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
2979 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
2980 BIT(DATA_IN_TOKEN_INTERRUPT);
2981 stop_out_naking(ep);
2982 } else
2983 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
2984 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
2985 BIT(DATA_IN_TOKEN_INTERRUPT);
2986 writel(scratch, &dev->epregs[0].ep_irqenb);
2987
2988 /* we made the hardware handle most lowlevel requests;
2989 * everything else goes uplevel to the gadget code.
2990 */
2991 ep->responded = 1;
2992
2993 if (dev->gadget.speed == USB_SPEED_SUPER) {
2994 handle_stat0_irqs_superspeed(dev, ep, u.r);
2995 goto next_endpoints;
2996 }
2997
2998 switch (u.r.bRequest) {
2999 case USB_REQ_GET_STATUS: {
3000 struct net2280_ep *e;
3001 __le32 status;
3002
3003 /* hw handles device and interface status */
3004 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
3005 goto delegate;
3006 e = get_ep_by_addr(dev, w_index);
3007 if (!e || w_length > 2)
3008 goto do_stall;
3009
3010 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
3011 status = cpu_to_le32(1);
3012 else
3013 status = cpu_to_le32(0);
3014
3015 /* don't bother with a request object! */
3016 writel(0, &dev->epregs[0].ep_irqenb);
3017 set_fifo_bytecount(ep, w_length);
3018 writel((__force u32)status, &dev->epregs[0].ep_data);
3019 allow_status(ep);
3020 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
3021 goto next_endpoints;
3022 }
3023 break;
3024 case USB_REQ_CLEAR_FEATURE: {
3025 struct net2280_ep *e;
3026
3027 /* hw handles device features */
3028 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3029 goto delegate;
3030 if (w_value != USB_ENDPOINT_HALT || w_length != 0)
3031 goto do_stall;
3032 e = get_ep_by_addr(dev, w_index);
3033 if (!e)
3034 goto do_stall;
3035 if (e->wedged) {
3036 ep_vdbg(dev, "%s wedged, halt not cleared\n",
3037 ep->ep.name);
3038 } else {
3039 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
3040 clear_halt(e);
3041 if ((ep->dev->quirks & PLX_SUPERSPEED) &&
3042 !list_empty(&e->queue) && e->td_dma)
3043 restart_dma(e);
3044 }
3045 allow_status(ep);
3046 goto next_endpoints;
3047 }
3048 break;
3049 case USB_REQ_SET_FEATURE: {
3050 struct net2280_ep *e;
3051
3052 /* hw handles device features */
3053 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3054 goto delegate;
3055 if (w_value != USB_ENDPOINT_HALT || w_length != 0)
3056 goto do_stall;
3057 e = get_ep_by_addr(dev, w_index);
3058 if (!e)
3059 goto do_stall;
3060 if (e->ep.name == ep0name)
3061 goto do_stall;
3062 set_halt(e);
3063 if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
3064 abort_dma(e);
3065 allow_status(ep);
3066 ep_vdbg(dev, "%s set halt\n", ep->ep.name);
3067 goto next_endpoints;
3068 }
3069 break;
3070 default:
3071 delegate:
3072 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
3073 "ep_cfg %08x\n",
3074 u.r.bRequestType, u.r.bRequest,
3075 w_value, w_index, w_length,
3076 readl(&ep->cfg->ep_cfg));
3077 ep->responded = 0;
3078 spin_unlock(&dev->lock);
3079 tmp = dev->driver->setup(&dev->gadget, &u.r);
3080 spin_lock(&dev->lock);
3081 }
3082
3083 /* stall ep0 on error */
3084 if (tmp < 0) {
3085 do_stall:
3086 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
3087 u.r.bRequestType, u.r.bRequest, tmp);
3088 dev->protocol_stall = 1;
3089 }
3090
3091 /* some in/out token irq should follow; maybe stall then.
3092 * driver must queue a request (even zlp) or halt ep0
3093 * before the host times out.
3094 */
3095 }
3096
3097 #undef w_value
3098 #undef w_index
3099 #undef w_length
3100
3101 next_endpoints:
3102 /* endpoint data irq ? */
3103 scratch = stat & 0x7f;
3104 stat &= ~0x7f;
3105 for (num = 0; scratch; num++) {
3106 u32 t;
3107
3108 /* do this endpoint's FIFO and queue need tending? */
3109 t = BIT(num);
3110 if ((scratch & t) == 0)
3111 continue;
3112 scratch ^= t;
3113
3114 ep = &dev->ep[num];
3115 handle_ep_small(ep);
3116 }
3117
3118 if (stat)
3119 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
3120 }
3121
3122 #define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
3123 BIT(DMA_C_INTERRUPT) | \
3124 BIT(DMA_B_INTERRUPT) | \
3125 BIT(DMA_A_INTERRUPT))
3126 #define PCI_ERROR_INTERRUPTS ( \
3127 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
3128 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3129 BIT(PCI_RETRY_ABORT_INTERRUPT))
3130
3131 static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3132 {
3133 struct net2280_ep *ep;
3134 u32 tmp, num, mask, scratch;
3135
3136 /* after disconnect there's nothing else to do! */
3137 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
3138 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
3139
3140 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
3141 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
3142 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
3143 * only indicates a change in the reset state).
3144 */
3145 if (stat & tmp) {
3146 bool reset = false;
3147 bool disconnect = false;
3148
3149 /*
3150 * Ignore disconnects and resets if the speed hasn't been set.
3151 * VBUS can bounce and there's always an initial reset.
3152 */
3153 writel(tmp, &dev->regs->irqstat1);
3154 if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
3155 if ((stat & BIT(VBUS_INTERRUPT)) &&
3156 (readl(&dev->usb->usbctl) &
3157 BIT(VBUS_PIN)) == 0) {
3158 disconnect = true;
3159 ep_dbg(dev, "disconnect %s\n",
3160 dev->driver->driver.name);
3161 } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
3162 (readl(&dev->usb->usbstat) & mask)
3163 == 0) {
3164 reset = true;
3165 ep_dbg(dev, "reset %s\n",
3166 dev->driver->driver.name);
3167 }
3168
3169 if (disconnect || reset) {
3170 stop_activity(dev, dev->driver);
3171 ep0_start(dev);
3172 spin_unlock(&dev->lock);
3173 if (reset)
3174 usb_gadget_udc_reset
3175 (&dev->gadget, dev->driver);
3176 else
3177 (dev->driver->disconnect)
3178 (&dev->gadget);
3179 spin_lock(&dev->lock);
3180 return;
3181 }
3182 }
3183 stat &= ~tmp;
3184
3185 /* vBUS can bounce ... one of many reasons to ignore the
3186 * notion of hotplug events on bus connect/disconnect!
3187 */
3188 if (!stat)
3189 return;
3190 }
3191
3192 /* NOTE: chip stays in PCI D0 state for now, but it could
3193 * enter D1 to save more power
3194 */
3195 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
3196 if (stat & tmp) {
3197 writel(tmp, &dev->regs->irqstat1);
3198 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
3199 if (dev->driver->suspend)
3200 dev->driver->suspend(&dev->gadget);
3201 if (!enable_suspend)
3202 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
3203 } else {
3204 if (dev->driver->resume)
3205 dev->driver->resume(&dev->gadget);
3206 /* at high speed, note erratum 0133 */
3207 }
3208 stat &= ~tmp;
3209 }
3210
3211 /* clear any other status/irqs */
3212 if (stat)
3213 writel(stat, &dev->regs->irqstat1);
3214
3215 /* some status we can just ignore */
3216 if (dev->quirks & PLX_2280)
3217 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3218 BIT(SUSPEND_REQUEST_INTERRUPT) |
3219 BIT(RESUME_INTERRUPT) |
3220 BIT(SOF_INTERRUPT));
3221 else
3222 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3223 BIT(RESUME_INTERRUPT) |
3224 BIT(SOF_DOWN_INTERRUPT) |
3225 BIT(SOF_INTERRUPT));
3226
3227 if (!stat)
3228 return;
3229 /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
3230
3231 /* DMA status, for ep-{a,b,c,d} */
3232 scratch = stat & DMA_INTERRUPTS;
3233 stat &= ~DMA_INTERRUPTS;
3234 scratch >>= 9;
3235 for (num = 0; scratch; num++) {
3236 struct net2280_dma_regs __iomem *dma;
3237
3238 tmp = BIT(num);
3239 if ((tmp & scratch) == 0)
3240 continue;
3241 scratch ^= tmp;
3242
3243 ep = &dev->ep[num + 1];
3244 dma = ep->dma;
3245
3246 if (!dma)
3247 continue;
3248
3249 /* clear ep's dma status */
3250 tmp = readl(&dma->dmastat);
3251 writel(tmp, &dma->dmastat);
3252
3253 /* dma sync*/
3254 if (dev->quirks & PLX_SUPERSPEED) {
3255 u32 r_dmacount = readl(&dma->dmacount);
3256 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
3257 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
3258 continue;
3259 }
3260
3261 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
3262 ep_dbg(ep->dev, "%s no xact done? %08x\n",
3263 ep->ep.name, tmp);
3264 continue;
3265 }
3266 stop_dma(ep->dma);
3267
3268 /* OUT transfers terminate when the data from the
3269 * host is in our memory. Process whatever's done.
3270 * On this path, we know transfer's last packet wasn't
3271 * less than req->length. NAK_OUT_PACKETS may be set,
3272 * or the FIFO may already be holding new packets.
3273 *
3274 * IN transfers can linger in the FIFO for a very
3275 * long time ... we ignore that for now, accounting
3276 * precisely (like PIO does) needs per-packet irqs
3277 */
3278 scan_dma_completions(ep);
3279
3280 /* disable dma on inactive queues; else maybe restart */
3281 if (!list_empty(&ep->queue)) {
3282 tmp = readl(&dma->dmactl);
3283 restart_dma(ep);
3284 }
3285 ep->irqs++;
3286 }
3287
3288 /* NOTE: there are other PCI errors we might usefully notice.
3289 * if they appear very often, here's where to try recovering.
3290 */
3291 if (stat & PCI_ERROR_INTERRUPTS) {
3292 ep_err(dev, "pci dma error; stat %08x\n", stat);
3293 stat &= ~PCI_ERROR_INTERRUPTS;
3294 /* these are fatal errors, but "maybe" they won't
3295 * happen again ...
3296 */
3297 stop_activity(dev, dev->driver);
3298 ep0_start(dev);
3299 stat = 0;
3300 }
3301
3302 if (stat)
3303 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
3304 }
3305
3306 static irqreturn_t net2280_irq(int irq, void *_dev)
3307 {
3308 struct net2280 *dev = _dev;
3309
3310 /* shared interrupt, not ours */
3311 if ((dev->quirks & PLX_LEGACY) &&
3312 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
3313 return IRQ_NONE;
3314
3315 spin_lock(&dev->lock);
3316
3317 /* handle disconnect, dma, and more */
3318 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
3319
3320 /* control requests and PIO */
3321 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
3322
3323 if (dev->quirks & PLX_SUPERSPEED) {
3324 /* re-enable interrupt to trigger any possible new interrupt */
3325 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
3326 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
3327 writel(pciirqenb1, &dev->regs->pciirqenb1);
3328 }
3329
3330 spin_unlock(&dev->lock);
3331
3332 return IRQ_HANDLED;
3333 }
3334
3335 /*-------------------------------------------------------------------------*/
3336
3337 static void gadget_release(struct device *_dev)
3338 {
3339 struct net2280 *dev = dev_get_drvdata(_dev);
3340
3341 kfree(dev);
3342 }
3343
3344 /* tear down the binding between this driver and the pci device */
3345
3346 static void net2280_remove(struct pci_dev *pdev)
3347 {
3348 struct net2280 *dev = pci_get_drvdata(pdev);
3349
3350 usb_del_gadget_udc(&dev->gadget);
3351
3352 BUG_ON(dev->driver);
3353
3354 /* then clean up the resources we allocated during probe() */
3355 net2280_led_shutdown(dev);
3356 if (dev->requests) {
3357 int i;
3358 for (i = 1; i < 5; i++) {
3359 if (!dev->ep[i].dummy)
3360 continue;
3361 pci_pool_free(dev->requests, dev->ep[i].dummy,
3362 dev->ep[i].td_dma);
3363 }
3364 pci_pool_destroy(dev->requests);
3365 }
3366 if (dev->got_irq)
3367 free_irq(pdev->irq, dev);
3368 if (dev->quirks & PLX_SUPERSPEED)
3369 pci_disable_msi(pdev);
3370 if (dev->regs)
3371 iounmap(dev->regs);
3372 if (dev->region)
3373 release_mem_region(pci_resource_start(pdev, 0),
3374 pci_resource_len(pdev, 0));
3375 if (dev->enabled)
3376 pci_disable_device(pdev);
3377 device_remove_file(&pdev->dev, &dev_attr_registers);
3378
3379 ep_info(dev, "unbind\n");
3380 }
3381
3382 /* wrap this driver around the specified device, but
3383 * don't respond over USB until a gadget driver binds to us.
3384 */
3385
3386 static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3387 {
3388 struct net2280 *dev;
3389 unsigned long resource, len;
3390 void __iomem *base = NULL;
3391 int retval, i;
3392
3393 /* alloc, and start init */
3394 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3395 if (dev == NULL) {
3396 retval = -ENOMEM;
3397 goto done;
3398 }
3399
3400 pci_set_drvdata(pdev, dev);
3401 spin_lock_init(&dev->lock);
3402 dev->quirks = id->driver_data;
3403 dev->pdev = pdev;
3404 dev->gadget.ops = &net2280_ops;
3405 dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
3406 USB_SPEED_SUPER : USB_SPEED_HIGH;
3407
3408 /* the "gadget" abstracts/virtualizes the controller */
3409 dev->gadget.name = driver_name;
3410
3411 /* now all the pci goodies ... */
3412 if (pci_enable_device(pdev) < 0) {
3413 retval = -ENODEV;
3414 goto done;
3415 }
3416 dev->enabled = 1;
3417
3418 /* BAR 0 holds all the registers
3419 * BAR 1 is 8051 memory; unused here (note erratum 0103)
3420 * BAR 2 is fifo memory; unused here
3421 */
3422 resource = pci_resource_start(pdev, 0);
3423 len = pci_resource_len(pdev, 0);
3424 if (!request_mem_region(resource, len, driver_name)) {
3425 ep_dbg(dev, "controller already in use\n");
3426 retval = -EBUSY;
3427 goto done;
3428 }
3429 dev->region = 1;
3430
3431 /* FIXME provide firmware download interface to put
3432 * 8051 code into the chip, e.g. to turn on PCI PM.
3433 */
3434
3435 base = ioremap_nocache(resource, len);
3436 if (base == NULL) {
3437 ep_dbg(dev, "can't map memory\n");
3438 retval = -EFAULT;
3439 goto done;
3440 }
3441 dev->regs = (struct net2280_regs __iomem *) base;
3442 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
3443 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
3444 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
3445 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
3446 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
3447
3448 if (dev->quirks & PLX_SUPERSPEED) {
3449 u32 fsmvalue;
3450 u32 usbstat;
3451 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
3452 (base + 0x00b4);
3453 dev->llregs = (struct usb338x_ll_regs __iomem *)
3454 (base + 0x0700);
3455 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
3456 (base + 0x0748);
3457 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
3458 (base + 0x077c);
3459 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
3460 (base + 0x079c);
3461 dev->plregs = (struct usb338x_pl_regs __iomem *)
3462 (base + 0x0800);
3463 usbstat = readl(&dev->usb->usbstat);
3464 dev->enhanced_mode = !!(usbstat & BIT(11));
3465 dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
3466 /* put into initial config, link up all endpoints */
3467 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
3468 (0xf << DEFECT7374_FSM_FIELD);
3469 /* See if firmware needs to set up for workaround: */
3470 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
3471 dev->bug7734_patched = 1;
3472 writel(0, &dev->usb->usbctl);
3473 } else
3474 dev->bug7734_patched = 0;
3475 } else {
3476 dev->enhanced_mode = 0;
3477 dev->n_ep = 7;
3478 /* put into initial config, link up all endpoints */
3479 writel(0, &dev->usb->usbctl);
3480 }
3481
3482 usb_reset(dev);
3483 usb_reinit(dev);
3484
3485 /* irq setup after old hardware is cleaned up */
3486 if (!pdev->irq) {
3487 ep_err(dev, "No IRQ. Check PCI setup!\n");
3488 retval = -ENODEV;
3489 goto done;
3490 }
3491
3492 if (dev->quirks & PLX_SUPERSPEED)
3493 if (pci_enable_msi(pdev))
3494 ep_err(dev, "Failed to enable MSI mode\n");
3495
3496 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
3497 driver_name, dev)) {
3498 ep_err(dev, "request interrupt %d failed\n", pdev->irq);
3499 retval = -EBUSY;
3500 goto done;
3501 }
3502 dev->got_irq = 1;
3503
3504 /* DMA setup */
3505 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
3506 dev->requests = pci_pool_create("requests", pdev,
3507 sizeof(struct net2280_dma),
3508 0 /* no alignment requirements */,
3509 0 /* or page-crossing issues */);
3510 if (!dev->requests) {
3511 ep_dbg(dev, "can't get request pool\n");
3512 retval = -ENOMEM;
3513 goto done;
3514 }
3515 for (i = 1; i < 5; i++) {
3516 struct net2280_dma *td;
3517
3518 td = pci_pool_alloc(dev->requests, GFP_KERNEL,
3519 &dev->ep[i].td_dma);
3520 if (!td) {
3521 ep_dbg(dev, "can't get dummy %d\n", i);
3522 retval = -ENOMEM;
3523 goto done;
3524 }
3525 td->dmacount = 0; /* not VALID */
3526 td->dmadesc = td->dmaaddr;
3527 dev->ep[i].dummy = td;
3528 }
3529
3530 /* enable lower-overhead pci memory bursts during DMA */
3531 if (dev->quirks & PLX_LEGACY)
3532 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
3533 /*
3534 * 256 write retries may not be enough...
3535 BIT(PCI_RETRY_ABORT_ENABLE) |
3536 */
3537 BIT(DMA_READ_MULTIPLE_ENABLE) |
3538 BIT(DMA_READ_LINE_ENABLE),
3539 &dev->pci->pcimstctl);
3540 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
3541 pci_set_master(pdev);
3542 pci_try_set_mwi(pdev);
3543
3544 /* ... also flushes any posted pci writes */
3545 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
3546
3547 /* done */
3548 ep_info(dev, "%s\n", driver_desc);
3549 ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
3550 pdev->irq, base, dev->chiprev);
3551 ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
3552 dev->enhanced_mode ? "enhanced mode" : "legacy mode");
3553 retval = device_create_file(&pdev->dev, &dev_attr_registers);
3554 if (retval)
3555 goto done;
3556
3557 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3558 gadget_release);
3559 if (retval)
3560 goto done;
3561 return 0;
3562
3563 done:
3564 if (dev)
3565 net2280_remove(pdev);
3566 return retval;
3567 }
3568
3569 /* make sure the board is quiescent; otherwise it will continue
3570 * generating IRQs across the upcoming reboot.
3571 */
3572
3573 static void net2280_shutdown(struct pci_dev *pdev)
3574 {
3575 struct net2280 *dev = pci_get_drvdata(pdev);
3576
3577 /* disable IRQs */
3578 writel(0, &dev->regs->pciirqenb0);
3579 writel(0, &dev->regs->pciirqenb1);
3580
3581 /* disable the pullup so the host will think we're gone */
3582 writel(0, &dev->usb->usbctl);
3583
3584 }
3585
3586
3587 /*-------------------------------------------------------------------------*/
3588
3589 static const struct pci_device_id pci_ids[] = { {
3590 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3591 .class_mask = ~0,
3592 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
3593 .device = 0x2280,
3594 .subvendor = PCI_ANY_ID,
3595 .subdevice = PCI_ANY_ID,
3596 .driver_data = PLX_LEGACY | PLX_2280,
3597 }, {
3598 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3599 .class_mask = ~0,
3600 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
3601 .device = 0x2282,
3602 .subvendor = PCI_ANY_ID,
3603 .subdevice = PCI_ANY_ID,
3604 .driver_data = PLX_LEGACY,
3605 },
3606 {
3607 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3608 .class_mask = ~0,
3609 .vendor = PCI_VENDOR_ID_PLX,
3610 .device = 0x3380,
3611 .subvendor = PCI_ANY_ID,
3612 .subdevice = PCI_ANY_ID,
3613 .driver_data = PLX_SUPERSPEED,
3614 },
3615 {
3616 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3617 .class_mask = ~0,
3618 .vendor = PCI_VENDOR_ID_PLX,
3619 .device = 0x3382,
3620 .subvendor = PCI_ANY_ID,
3621 .subdevice = PCI_ANY_ID,
3622 .driver_data = PLX_SUPERSPEED,
3623 },
3624 { /* end: all zeroes */ }
3625 };
3626 MODULE_DEVICE_TABLE(pci, pci_ids);
3627
3628 /* pci driver glue; this is a "new style" PCI driver module */
3629 static struct pci_driver net2280_pci_driver = {
3630 .name = (char *) driver_name,
3631 .id_table = pci_ids,
3632
3633 .probe = net2280_probe,
3634 .remove = net2280_remove,
3635 .shutdown = net2280_shutdown,
3636
3637 /* FIXME add power management support */
3638 };
3639
3640 module_pci_driver(net2280_pci_driver);
3641
3642 MODULE_DESCRIPTION(DRIVER_DESC);
3643 MODULE_AUTHOR("David Brownell");
3644 MODULE_LICENSE("GPL");
This page took 0.13964 seconds and 6 git commands to generate.