USB: net2272: driver for PLX NET2272 USB device controller
[deliverable/linux.git] / drivers / usb / gadget / net2272.c
1 /*
2 * Driver for PLX NET2272 USB device controller
3 *
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/gpio.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/ioport.h>
30 #include <linux/irq.h>
31 #include <linux/kernel.h>
32 #include <linux/list.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/pci.h>
36 #include <linux/platform_device.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/timer.h>
40 #include <linux/usb.h>
41 #include <linux/usb/ch9.h>
42 #include <linux/usb/gadget.h>
43
44 #include <asm/byteorder.h>
45 #include <asm/system.h>
46 #include <asm/unaligned.h>
47
48 #include "net2272.h"
49
50 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
51
52 static const char driver_name[] = "net2272";
53 static const char driver_vers[] = "2006 October 17/mainline";
54 static const char driver_desc[] = DRIVER_DESC;
55
56 static const char ep0name[] = "ep0";
57 static const char * const ep_name[] = {
58 ep0name,
59 "ep-a", "ep-b", "ep-c",
60 };
61
62 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
63 #ifdef CONFIG_USB_GADGET_NET2272_DMA
64 /*
65 * use_dma: the NET2272 can use an external DMA controller.
66 * Note that since there is no generic DMA api, some functions,
67 * notably request_dma, start_dma, and cancel_dma will need to be
68 * modified for your platform's particular dma controller.
69 *
70 * If use_dma is disabled, pio will be used instead.
71 */
72 static int use_dma = 0;
73 module_param(use_dma, bool, 0644);
74
75 /*
76 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
77 * The NET2272 can only use dma for a single endpoint at a time.
78 * At some point this could be modified to allow either endpoint
79 * to take control of dma as it becomes available.
80 *
81 * Note that DMA should not be used on OUT endpoints unless it can
82 * be guaranteed that no short packets will arrive on an IN endpoint
83 * while the DMA operation is pending. Otherwise the OUT DMA will
84 * terminate prematurely (See NET2272 Errata 630-0213-0101)
85 */
86 static ushort dma_ep = 1;
87 module_param(dma_ep, ushort, 0644);
88
89 /*
90 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
91 * mode 0 == Slow DREQ mode
92 * mode 1 == Fast DREQ mode
93 * mode 2 == Burst mode
94 */
95 static ushort dma_mode = 2;
96 module_param(dma_mode, ushort, 0644);
97 #else
98 #define use_dma 0
99 #define dma_ep 1
100 #define dma_mode 2
101 #endif
102
103 /*
104 * fifo_mode: net2272 buffer configuration:
105 * mode 0 == ep-{a,b,c} 512db each
106 * mode 1 == ep-a 1k, ep-{b,c} 512db
107 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
108 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
109 */
110 static ushort fifo_mode = 0;
111 module_param(fifo_mode, ushort, 0644);
112
113 /*
114 * enable_suspend: When enabled, the driver will respond to
115 * USB suspend requests by powering down the NET2272. Otherwise,
116 * USB suspend requests will be ignored. This is acceptible for
117 * self-powered devices. For bus powered devices set this to 1.
118 */
119 static ushort enable_suspend = 0;
120 module_param(enable_suspend, ushort, 0644);
121
122 static void assert_out_naking(struct net2272_ep *ep, const char *where)
123 {
124 u8 tmp;
125
126 #ifndef DEBUG
127 return;
128 #endif
129
130 tmp = net2272_ep_read(ep, EP_STAT0);
131 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
132 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
133 ep->ep.name, where, tmp);
134 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
135 }
136 }
137 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
138
139 static void stop_out_naking(struct net2272_ep *ep)
140 {
141 u8 tmp = net2272_ep_read(ep, EP_STAT0);
142
143 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
144 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
145 }
146
147 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
148
149 static char *type_string(u8 bmAttributes)
150 {
151 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
152 case USB_ENDPOINT_XFER_BULK: return "bulk";
153 case USB_ENDPOINT_XFER_ISOC: return "iso";
154 case USB_ENDPOINT_XFER_INT: return "intr";
155 default: return "control";
156 }
157 }
158
159 static char *buf_state_string(unsigned state)
160 {
161 switch (state) {
162 case BUFF_FREE: return "free";
163 case BUFF_VALID: return "valid";
164 case BUFF_LCL: return "local";
165 case BUFF_USB: return "usb";
166 default: return "unknown";
167 }
168 }
169
170 static char *dma_mode_string(void)
171 {
172 if (!use_dma)
173 return "PIO";
174 switch (dma_mode) {
175 case 0: return "SLOW DREQ";
176 case 1: return "FAST DREQ";
177 case 2: return "BURST";
178 default: return "invalid";
179 }
180 }
181
182 static void net2272_dequeue_all(struct net2272_ep *);
183 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
184 static int net2272_fifo_status(struct usb_ep *);
185
186 static struct usb_ep_ops net2272_ep_ops;
187
188 /*---------------------------------------------------------------------------*/
189
190 static int
191 net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
192 {
193 struct net2272 *dev;
194 struct net2272_ep *ep;
195 u32 max;
196 u8 tmp;
197 unsigned long flags;
198
199 ep = container_of(_ep, struct net2272_ep, ep);
200 if (!_ep || !desc || ep->desc || _ep->name == ep0name
201 || desc->bDescriptorType != USB_DT_ENDPOINT)
202 return -EINVAL;
203 dev = ep->dev;
204 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
205 return -ESHUTDOWN;
206
207 max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff;
208
209 spin_lock_irqsave(&dev->lock, flags);
210 _ep->maxpacket = max & 0x7fff;
211 ep->desc = desc;
212
213 /* net2272_ep_reset() has already been called */
214 ep->stopped = 0;
215 ep->wedged = 0;
216
217 /* set speed-dependent max packet */
218 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
219 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
220
221 /* set type, direction, address; reset fifo counters */
222 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
223 tmp = usb_endpoint_type(desc);
224 if (usb_endpoint_xfer_bulk(desc)) {
225 /* catch some particularly blatant driver bugs */
226 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
227 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
228 spin_unlock_irqrestore(&dev->lock, flags);
229 return -ERANGE;
230 }
231 }
232 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
233 tmp <<= ENDPOINT_TYPE;
234 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
235 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
236 tmp |= (1 << ENDPOINT_ENABLE);
237
238 /* for OUT transfers, block the rx fifo until a read is posted */
239 ep->is_in = usb_endpoint_dir_in(desc);
240 if (!ep->is_in)
241 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
242
243 net2272_ep_write(ep, EP_CFG, tmp);
244
245 /* enable irqs */
246 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
247 net2272_write(dev, IRQENB0, tmp);
248
249 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
250 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
251 | net2272_ep_read(ep, EP_IRQENB);
252 net2272_ep_write(ep, EP_IRQENB, tmp);
253
254 tmp = desc->bEndpointAddress;
255 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
256 _ep->name, tmp & 0x0f, PIPEDIR(tmp),
257 type_string(desc->bmAttributes), max,
258 net2272_ep_read(ep, EP_CFG));
259
260 spin_unlock_irqrestore(&dev->lock, flags);
261 return 0;
262 }
263
264 static void net2272_ep_reset(struct net2272_ep *ep)
265 {
266 u8 tmp;
267
268 ep->desc = NULL;
269 INIT_LIST_HEAD(&ep->queue);
270
271 ep->ep.maxpacket = ~0;
272 ep->ep.ops = &net2272_ep_ops;
273
274 /* disable irqs, endpoint */
275 net2272_ep_write(ep, EP_IRQENB, 0);
276
277 /* init to our chosen defaults, notably so that we NAK OUT
278 * packets until the driver queues a read.
279 */
280 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
281 net2272_ep_write(ep, EP_RSPSET, tmp);
282
283 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
284 if (ep->num != 0)
285 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
286
287 net2272_ep_write(ep, EP_RSPCLR, tmp);
288
289 /* scrub most status bits, and flush any fifo state */
290 net2272_ep_write(ep, EP_STAT0,
291 (1 << DATA_IN_TOKEN_INTERRUPT)
292 | (1 << DATA_OUT_TOKEN_INTERRUPT)
293 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
294 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
295 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
296
297 net2272_ep_write(ep, EP_STAT1,
298 (1 << TIMEOUT)
299 | (1 << USB_OUT_ACK_SENT)
300 | (1 << USB_OUT_NAK_SENT)
301 | (1 << USB_IN_ACK_RCVD)
302 | (1 << USB_IN_NAK_SENT)
303 | (1 << USB_STALL_SENT)
304 | (1 << LOCAL_OUT_ZLP)
305 | (1 << BUFFER_FLUSH));
306
307 /* fifo size is handled seperately */
308 }
309
310 static int net2272_disable(struct usb_ep *_ep)
311 {
312 struct net2272_ep *ep;
313 unsigned long flags;
314
315 ep = container_of(_ep, struct net2272_ep, ep);
316 if (!_ep || !ep->desc || _ep->name == ep0name)
317 return -EINVAL;
318
319 spin_lock_irqsave(&ep->dev->lock, flags);
320 net2272_dequeue_all(ep);
321 net2272_ep_reset(ep);
322
323 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
324
325 spin_unlock_irqrestore(&ep->dev->lock, flags);
326 return 0;
327 }
328
329 /*---------------------------------------------------------------------------*/
330
331 static struct usb_request *
332 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
333 {
334 struct net2272_ep *ep;
335 struct net2272_request *req;
336
337 if (!_ep)
338 return NULL;
339 ep = container_of(_ep, struct net2272_ep, ep);
340
341 req = kzalloc(sizeof(*req), gfp_flags);
342 if (!req)
343 return NULL;
344
345 req->req.dma = DMA_ADDR_INVALID;
346 INIT_LIST_HEAD(&req->queue);
347
348 return &req->req;
349 }
350
351 static void
352 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
353 {
354 struct net2272_ep *ep;
355 struct net2272_request *req;
356
357 ep = container_of(_ep, struct net2272_ep, ep);
358 if (!_ep || !_req)
359 return;
360
361 req = container_of(_req, struct net2272_request, req);
362 WARN_ON(!list_empty(&req->queue));
363 kfree(req);
364 }
365
366 static void
367 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
368 {
369 struct net2272 *dev;
370 unsigned stopped = ep->stopped;
371
372 if (ep->num == 0) {
373 if (ep->dev->protocol_stall) {
374 ep->stopped = 1;
375 set_halt(ep);
376 }
377 allow_status(ep);
378 }
379
380 list_del_init(&req->queue);
381
382 if (req->req.status == -EINPROGRESS)
383 req->req.status = status;
384 else
385 status = req->req.status;
386
387 dev = ep->dev;
388 if (use_dma && req->mapped) {
389 dma_unmap_single(dev->dev, req->req.dma, req->req.length,
390 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
391 req->req.dma = DMA_ADDR_INVALID;
392 req->mapped = 0;
393 }
394
395 if (status && status != -ESHUTDOWN)
396 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
397 ep->ep.name, &req->req, status,
398 req->req.actual, req->req.length, req->req.buf);
399
400 /* don't modify queue heads during completion callback */
401 ep->stopped = 1;
402 spin_unlock(&dev->lock);
403 req->req.complete(&ep->ep, &req->req);
404 spin_lock(&dev->lock);
405 ep->stopped = stopped;
406 }
407
408 static int
409 net2272_write_packet(struct net2272_ep *ep, u8 *buf,
410 struct net2272_request *req, unsigned max)
411 {
412 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
413 u16 *bufp;
414 unsigned length, count;
415 u8 tmp;
416
417 length = min(req->req.length - req->req.actual, max);
418 req->req.actual += length;
419
420 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
421 ep->ep.name, req, max, length,
422 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
423
424 count = length;
425 bufp = (u16 *)buf;
426
427 while (likely(count >= 2)) {
428 /* no byte-swap required; chip endian set during init */
429 writew(*bufp++, ep_data);
430 count -= 2;
431 }
432 buf = (u8 *)bufp;
433
434 /* write final byte by placing the NET2272 into 8-bit mode */
435 if (unlikely(count)) {
436 tmp = net2272_read(ep->dev, LOCCTL);
437 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
438 writeb(*buf, ep_data);
439 net2272_write(ep->dev, LOCCTL, tmp);
440 }
441 return length;
442 }
443
444 /* returns: 0: still running, 1: completed, negative: errno */
445 static int
446 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
447 {
448 u8 *buf;
449 unsigned count, max;
450 int status;
451
452 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
453 ep->ep.name, req->req.actual, req->req.length);
454
455 /*
456 * Keep loading the endpoint until the final packet is loaded,
457 * or the endpoint buffer is full.
458 */
459 top:
460 /*
461 * Clear interrupt status
462 * - Packet Transmitted interrupt will become set again when the
463 * host successfully takes another packet
464 */
465 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
466 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
467 buf = req->req.buf + req->req.actual;
468 prefetch(buf);
469
470 /* force pagesel */
471 net2272_ep_read(ep, EP_STAT0);
472
473 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
474 (net2272_ep_read(ep, EP_AVAIL0));
475
476 if (max < ep->ep.maxpacket)
477 max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
478 | (net2272_ep_read(ep, EP_AVAIL0));
479
480 count = net2272_write_packet(ep, buf, req, max);
481 /* see if we are done */
482 if (req->req.length == req->req.actual) {
483 /* validate short or zlp packet */
484 if (count < ep->ep.maxpacket)
485 set_fifo_bytecount(ep, 0);
486 net2272_done(ep, req, 0);
487
488 if (!list_empty(&ep->queue)) {
489 req = list_entry(ep->queue.next,
490 struct net2272_request,
491 queue);
492 status = net2272_kick_dma(ep, req);
493
494 if (status < 0)
495 if ((net2272_ep_read(ep, EP_STAT0)
496 & (1 << BUFFER_EMPTY)))
497 goto top;
498 }
499 return 1;
500 }
501 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
502 }
503 return 0;
504 }
505
506 static void
507 net2272_out_flush(struct net2272_ep *ep)
508 {
509 ASSERT_OUT_NAKING(ep);
510
511 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
512 | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
513 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
514 }
515
516 static int
517 net2272_read_packet(struct net2272_ep *ep, u8 *buf,
518 struct net2272_request *req, unsigned avail)
519 {
520 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
521 unsigned is_short;
522 u16 *bufp;
523
524 req->req.actual += avail;
525
526 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
527 ep->ep.name, req, avail,
528 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
529
530 is_short = (avail < ep->ep.maxpacket);
531
532 if (unlikely(avail == 0)) {
533 /* remove any zlp from the buffer */
534 (void)readw(ep_data);
535 return is_short;
536 }
537
538 /* Ensure we get the final byte */
539 if (unlikely(avail % 2))
540 avail++;
541 bufp = (u16 *)buf;
542
543 do {
544 *bufp++ = readw(ep_data);
545 avail -= 2;
546 } while (avail);
547
548 /*
549 * To avoid false endpoint available race condition must read
550 * ep stat0 twice in the case of a short transfer
551 */
552 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
553 net2272_ep_read(ep, EP_STAT0);
554
555 return is_short;
556 }
557
558 static int
559 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
560 {
561 u8 *buf;
562 unsigned is_short;
563 int count;
564 int tmp;
565 int cleanup = 0;
566 int status = -1;
567
568 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
569 ep->ep.name, req->req.actual, req->req.length);
570
571 top:
572 do {
573 buf = req->req.buf + req->req.actual;
574 prefetchw(buf);
575
576 count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
577 | net2272_ep_read(ep, EP_AVAIL0);
578
579 net2272_ep_write(ep, EP_STAT0,
580 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
581 (1 << DATA_PACKET_RECEIVED_INTERRUPT));
582
583 tmp = req->req.length - req->req.actual;
584
585 if (count > tmp) {
586 if ((tmp % ep->ep.maxpacket) != 0) {
587 dev_err(ep->dev->dev,
588 "%s out fifo %d bytes, expected %d\n",
589 ep->ep.name, count, tmp);
590 cleanup = 1;
591 }
592 count = (tmp > 0) ? tmp : 0;
593 }
594
595 is_short = net2272_read_packet(ep, buf, req, count);
596
597 /* completion */
598 if (unlikely(cleanup || is_short ||
599 ((req->req.actual == req->req.length)
600 && !req->req.zero))) {
601
602 if (cleanup) {
603 net2272_out_flush(ep);
604 net2272_done(ep, req, -EOVERFLOW);
605 } else
606 net2272_done(ep, req, 0);
607
608 /* re-initialize endpoint transfer registers
609 * otherwise they may result in erroneous pre-validation
610 * for subsequent control reads
611 */
612 if (unlikely(ep->num == 0)) {
613 net2272_ep_write(ep, EP_TRANSFER2, 0);
614 net2272_ep_write(ep, EP_TRANSFER1, 0);
615 net2272_ep_write(ep, EP_TRANSFER0, 0);
616 }
617
618 if (!list_empty(&ep->queue)) {
619 req = list_entry(ep->queue.next,
620 struct net2272_request, queue);
621 status = net2272_kick_dma(ep, req);
622 if ((status < 0) &&
623 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
624 goto top;
625 }
626 return 1;
627 }
628 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
629
630 return 0;
631 }
632
633 static void
634 net2272_pio_advance(struct net2272_ep *ep)
635 {
636 struct net2272_request *req;
637
638 if (unlikely(list_empty(&ep->queue)))
639 return;
640
641 req = list_entry(ep->queue.next, struct net2272_request, queue);
642 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
643 }
644
645 /* returns 0 on success, else negative errno */
646 static int
647 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
648 unsigned len, unsigned dir)
649 {
650 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
651 ep, buf, len, dir);
652
653 /* The NET2272 only supports a single dma channel */
654 if (dev->dma_busy)
655 return -EBUSY;
656 /*
657 * EP_TRANSFER (used to determine the number of bytes received
658 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
659 */
660 if ((dir == 1) && (len > 0x1000000))
661 return -EINVAL;
662
663 dev->dma_busy = 1;
664
665 /* initialize platform's dma */
666 #ifdef CONFIG_PCI
667 /* NET2272 addr, buffer addr, length, etc. */
668 switch (dev->dev_id) {
669 case PCI_DEVICE_ID_RDK1:
670 /* Setup PLX 9054 DMA mode */
671 writel((1 << LOCAL_BUS_WIDTH) |
672 (1 << TA_READY_INPUT_ENABLE) |
673 (0 << LOCAL_BURST_ENABLE) |
674 (1 << DONE_INTERRUPT_ENABLE) |
675 (1 << LOCAL_ADDRESSING_MODE) |
676 (1 << DEMAND_MODE) |
677 (1 << DMA_EOT_ENABLE) |
678 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
679 (1 << DMA_CHANNEL_INTERRUPT_SELECT),
680 dev->rdk1.plx9054_base_addr + DMAMODE0);
681
682 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
683 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
684 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
685 writel((dir << DIRECTION_OF_TRANSFER) |
686 (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
687 dev->rdk1.plx9054_base_addr + DMADPR0);
688 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
689 readl(dev->rdk1.plx9054_base_addr + INTCSR),
690 dev->rdk1.plx9054_base_addr + INTCSR);
691
692 break;
693 }
694 #endif
695
696 net2272_write(dev, DMAREQ,
697 (0 << DMA_BUFFER_VALID) |
698 (1 << DMA_REQUEST_ENABLE) |
699 (1 << DMA_CONTROL_DACK) |
700 (dev->dma_eot_polarity << EOT_POLARITY) |
701 (dev->dma_dack_polarity << DACK_POLARITY) |
702 (dev->dma_dreq_polarity << DREQ_POLARITY) |
703 ((ep >> 1) << DMA_ENDPOINT_SELECT));
704
705 (void) net2272_read(dev, SCRATCH);
706
707 return 0;
708 }
709
710 static void
711 net2272_start_dma(struct net2272 *dev)
712 {
713 /* start platform's dma controller */
714 #ifdef CONFIG_PCI
715 switch (dev->dev_id) {
716 case PCI_DEVICE_ID_RDK1:
717 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
718 dev->rdk1.plx9054_base_addr + DMACSR0);
719 break;
720 }
721 #endif
722 }
723
724 /* returns 0 on success, else negative errno */
725 static int
726 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
727 {
728 unsigned size;
729 u8 tmp;
730
731 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
732 return -EINVAL;
733
734 /* don't use dma for odd-length transfers
735 * otherwise, we'd need to deal with the last byte with pio
736 */
737 if (req->req.length & 1)
738 return -EINVAL;
739
740 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08x\n",
741 ep->ep.name, req, req->req.dma);
742
743 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
744
745 /* The NET2272 can only use DMA on one endpoint at a time */
746 if (ep->dev->dma_busy)
747 return -EBUSY;
748
749 /* Make sure we only DMA an even number of bytes (we'll use
750 * pio to complete the transfer)
751 */
752 size = req->req.length;
753 size &= ~1;
754
755 /* device-to-host transfer */
756 if (ep->is_in) {
757 /* initialize platform's dma controller */
758 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
759 /* unable to obtain DMA channel; return error and use pio mode */
760 return -EBUSY;
761 req->req.actual += size;
762
763 /* host-to-device transfer */
764 } else {
765 tmp = net2272_ep_read(ep, EP_STAT0);
766
767 /* initialize platform's dma controller */
768 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
769 /* unable to obtain DMA channel; return error and use pio mode */
770 return -EBUSY;
771
772 if (!(tmp & (1 << BUFFER_EMPTY)))
773 ep->not_empty = 1;
774 else
775 ep->not_empty = 0;
776
777
778 /* allow the endpoint's buffer to fill */
779 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
780
781 /* this transfer completed and data's already in the fifo
782 * return error so pio gets used.
783 */
784 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
785
786 /* deassert dreq */
787 net2272_write(ep->dev, DMAREQ,
788 (0 << DMA_BUFFER_VALID) |
789 (0 << DMA_REQUEST_ENABLE) |
790 (1 << DMA_CONTROL_DACK) |
791 (ep->dev->dma_eot_polarity << EOT_POLARITY) |
792 (ep->dev->dma_dack_polarity << DACK_POLARITY) |
793 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
794 ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
795
796 return -EBUSY;
797 }
798 }
799
800 /* Don't use per-packet interrupts: use dma interrupts only */
801 net2272_ep_write(ep, EP_IRQENB, 0);
802
803 net2272_start_dma(ep->dev);
804
805 return 0;
806 }
807
808 static void net2272_cancel_dma(struct net2272 *dev)
809 {
810 #ifdef CONFIG_PCI
811 switch (dev->dev_id) {
812 case PCI_DEVICE_ID_RDK1:
813 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
814 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
815 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
816 (1 << CHANNEL_DONE)))
817 continue; /* wait for dma to stabalize */
818
819 /* dma abort generates an interrupt */
820 writeb(1 << CHANNEL_CLEAR_INTERRUPT,
821 dev->rdk1.plx9054_base_addr + DMACSR0);
822 break;
823 }
824 #endif
825
826 dev->dma_busy = 0;
827 }
828
829 /*---------------------------------------------------------------------------*/
830
831 static int
832 net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
833 {
834 struct net2272_request *req;
835 struct net2272_ep *ep;
836 struct net2272 *dev;
837 unsigned long flags;
838 int status = -1;
839 u8 s;
840
841 req = container_of(_req, struct net2272_request, req);
842 if (!_req || !_req->complete || !_req->buf
843 || !list_empty(&req->queue))
844 return -EINVAL;
845 ep = container_of(_ep, struct net2272_ep, ep);
846 if (!_ep || (!ep->desc && ep->num != 0))
847 return -EINVAL;
848 dev = ep->dev;
849 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
850 return -ESHUTDOWN;
851
852 /* set up dma mapping in case the caller didn't */
853 if (use_dma && ep->dma && _req->dma == DMA_ADDR_INVALID) {
854 _req->dma = dma_map_single(dev->dev, _req->buf, _req->length,
855 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
856 req->mapped = 1;
857 }
858
859 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08x %s\n",
860 _ep->name, _req, _req->length, _req->buf,
861 _req->dma, _req->zero ? "zero" : "!zero");
862
863 spin_lock_irqsave(&dev->lock, flags);
864
865 _req->status = -EINPROGRESS;
866 _req->actual = 0;
867
868 /* kickstart this i/o queue? */
869 if (list_empty(&ep->queue) && !ep->stopped) {
870 /* maybe there's no control data, just status ack */
871 if (ep->num == 0 && _req->length == 0) {
872 net2272_done(ep, req, 0);
873 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
874 goto done;
875 }
876
877 /* Return zlp, don't let it block subsequent packets */
878 s = net2272_ep_read(ep, EP_STAT0);
879 if (s & (1 << BUFFER_EMPTY)) {
880 /* Buffer is empty check for a blocking zlp, handle it */
881 if ((s & (1 << NAK_OUT_PACKETS)) &&
882 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
883 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
884 /*
885 * Request is going to terminate with a short packet ...
886 * hope the client is ready for it!
887 */
888 status = net2272_read_fifo(ep, req);
889 /* clear short packet naking */
890 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
891 goto done;
892 }
893 }
894
895 /* try dma first */
896 status = net2272_kick_dma(ep, req);
897
898 if (status < 0) {
899 /* dma failed (most likely in use by another endpoint)
900 * fallback to pio
901 */
902 status = 0;
903
904 if (ep->is_in)
905 status = net2272_write_fifo(ep, req);
906 else {
907 s = net2272_ep_read(ep, EP_STAT0);
908 if ((s & (1 << BUFFER_EMPTY)) == 0)
909 status = net2272_read_fifo(ep, req);
910 }
911
912 if (unlikely(status != 0)) {
913 if (status > 0)
914 status = 0;
915 req = NULL;
916 }
917 }
918 }
919 if (likely(req != 0))
920 list_add_tail(&req->queue, &ep->queue);
921
922 if (likely(!list_empty(&ep->queue)))
923 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
924 done:
925 spin_unlock_irqrestore(&dev->lock, flags);
926
927 return 0;
928 }
929
930 /* dequeue ALL requests */
931 static void
932 net2272_dequeue_all(struct net2272_ep *ep)
933 {
934 struct net2272_request *req;
935
936 /* called with spinlock held */
937 ep->stopped = 1;
938
939 while (!list_empty(&ep->queue)) {
940 req = list_entry(ep->queue.next,
941 struct net2272_request,
942 queue);
943 net2272_done(ep, req, -ESHUTDOWN);
944 }
945 }
946
947 /* dequeue JUST ONE request */
948 static int
949 net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
950 {
951 struct net2272_ep *ep;
952 struct net2272_request *req;
953 unsigned long flags;
954 int stopped;
955
956 ep = container_of(_ep, struct net2272_ep, ep);
957 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
958 return -EINVAL;
959
960 spin_lock_irqsave(&ep->dev->lock, flags);
961 stopped = ep->stopped;
962 ep->stopped = 1;
963
964 /* make sure it's still queued on this endpoint */
965 list_for_each_entry(req, &ep->queue, queue) {
966 if (&req->req == _req)
967 break;
968 }
969 if (&req->req != _req) {
970 spin_unlock_irqrestore(&ep->dev->lock, flags);
971 return -EINVAL;
972 }
973
974 /* queue head may be partially complete */
975 if (ep->queue.next == &req->queue) {
976 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
977 net2272_done(ep, req, -ECONNRESET);
978 }
979 req = NULL;
980 ep->stopped = stopped;
981
982 spin_unlock_irqrestore(&ep->dev->lock, flags);
983 return 0;
984 }
985
986 /*---------------------------------------------------------------------------*/
987
988 static int
989 net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
990 {
991 struct net2272_ep *ep;
992 unsigned long flags;
993 int ret = 0;
994
995 ep = container_of(_ep, struct net2272_ep, ep);
996 if (!_ep || (!ep->desc && ep->num != 0))
997 return -EINVAL;
998 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
999 return -ESHUTDOWN;
1000 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
1001 return -EINVAL;
1002
1003 spin_lock_irqsave(&ep->dev->lock, flags);
1004 if (!list_empty(&ep->queue))
1005 ret = -EAGAIN;
1006 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
1007 ret = -EAGAIN;
1008 else {
1009 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1010 value ? "set" : "clear",
1011 wedged ? "wedge" : "halt");
1012 /* set/clear */
1013 if (value) {
1014 if (ep->num == 0)
1015 ep->dev->protocol_stall = 1;
1016 else
1017 set_halt(ep);
1018 if (wedged)
1019 ep->wedged = 1;
1020 } else {
1021 clear_halt(ep);
1022 ep->wedged = 0;
1023 }
1024 }
1025 spin_unlock_irqrestore(&ep->dev->lock, flags);
1026
1027 return ret;
1028 }
1029
1030 static int
1031 net2272_set_halt(struct usb_ep *_ep, int value)
1032 {
1033 return net2272_set_halt_and_wedge(_ep, value, 0);
1034 }
1035
1036 static int
1037 net2272_set_wedge(struct usb_ep *_ep)
1038 {
1039 if (!_ep || _ep->name == ep0name)
1040 return -EINVAL;
1041 return net2272_set_halt_and_wedge(_ep, 1, 1);
1042 }
1043
1044 static int
1045 net2272_fifo_status(struct usb_ep *_ep)
1046 {
1047 struct net2272_ep *ep;
1048 u16 avail;
1049
1050 ep = container_of(_ep, struct net2272_ep, ep);
1051 if (!_ep || (!ep->desc && ep->num != 0))
1052 return -ENODEV;
1053 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1054 return -ESHUTDOWN;
1055
1056 avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1057 avail |= net2272_ep_read(ep, EP_AVAIL0);
1058 if (avail > ep->fifo_size)
1059 return -EOVERFLOW;
1060 if (ep->is_in)
1061 avail = ep->fifo_size - avail;
1062 return avail;
1063 }
1064
1065 static void
1066 net2272_fifo_flush(struct usb_ep *_ep)
1067 {
1068 struct net2272_ep *ep;
1069
1070 ep = container_of(_ep, struct net2272_ep, ep);
1071 if (!_ep || (!ep->desc && ep->num != 0))
1072 return;
1073 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1074 return;
1075
1076 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1077 }
1078
1079 static struct usb_ep_ops net2272_ep_ops = {
1080 .enable = net2272_enable,
1081 .disable = net2272_disable,
1082
1083 .alloc_request = net2272_alloc_request,
1084 .free_request = net2272_free_request,
1085
1086 .queue = net2272_queue,
1087 .dequeue = net2272_dequeue,
1088
1089 .set_halt = net2272_set_halt,
1090 .set_wedge = net2272_set_wedge,
1091 .fifo_status = net2272_fifo_status,
1092 .fifo_flush = net2272_fifo_flush,
1093 };
1094
1095 /*---------------------------------------------------------------------------*/
1096
1097 static int
1098 net2272_get_frame(struct usb_gadget *_gadget)
1099 {
1100 struct net2272 *dev;
1101 unsigned long flags;
1102 u16 ret;
1103
1104 if (!_gadget)
1105 return -ENODEV;
1106 dev = container_of(_gadget, struct net2272, gadget);
1107 spin_lock_irqsave(&dev->lock, flags);
1108
1109 ret = net2272_read(dev, FRAME1) << 8;
1110 ret |= net2272_read(dev, FRAME0);
1111
1112 spin_unlock_irqrestore(&dev->lock, flags);
1113 return ret;
1114 }
1115
1116 static int
1117 net2272_wakeup(struct usb_gadget *_gadget)
1118 {
1119 struct net2272 *dev;
1120 u8 tmp;
1121 unsigned long flags;
1122
1123 if (!_gadget)
1124 return 0;
1125 dev = container_of(_gadget, struct net2272, gadget);
1126
1127 spin_lock_irqsave(&dev->lock, flags);
1128 tmp = net2272_read(dev, USBCTL0);
1129 if (tmp & (1 << IO_WAKEUP_ENABLE))
1130 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1131
1132 spin_unlock_irqrestore(&dev->lock, flags);
1133
1134 return 0;
1135 }
1136
1137 static int
1138 net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1139 {
1140 struct net2272 *dev;
1141
1142 if (!_gadget)
1143 return -ENODEV;
1144 dev = container_of(_gadget, struct net2272, gadget);
1145
1146 dev->is_selfpowered = value;
1147
1148 return 0;
1149 }
1150
1151 static int
1152 net2272_pullup(struct usb_gadget *_gadget, int is_on)
1153 {
1154 struct net2272 *dev;
1155 u8 tmp;
1156 unsigned long flags;
1157
1158 if (!_gadget)
1159 return -ENODEV;
1160 dev = container_of(_gadget, struct net2272, gadget);
1161
1162 spin_lock_irqsave(&dev->lock, flags);
1163 tmp = net2272_read(dev, USBCTL0);
1164 dev->softconnect = (is_on != 0);
1165 if (is_on)
1166 tmp |= (1 << USB_DETECT_ENABLE);
1167 else
1168 tmp &= ~(1 << USB_DETECT_ENABLE);
1169 net2272_write(dev, USBCTL0, tmp);
1170 spin_unlock_irqrestore(&dev->lock, flags);
1171
1172 return 0;
1173 }
1174
1175 static const struct usb_gadget_ops net2272_ops = {
1176 .get_frame = net2272_get_frame,
1177 .wakeup = net2272_wakeup,
1178 .set_selfpowered = net2272_set_selfpowered,
1179 .pullup = net2272_pullup
1180 };
1181
1182 /*---------------------------------------------------------------------------*/
1183
1184 static ssize_t
1185 net2272_show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
1186 {
1187 struct net2272 *dev;
1188 char *next;
1189 unsigned size, t;
1190 unsigned long flags;
1191 u8 t1, t2;
1192 int i;
1193 const char *s;
1194
1195 dev = dev_get_drvdata(_dev);
1196 next = buf;
1197 size = PAGE_SIZE;
1198 spin_lock_irqsave(&dev->lock, flags);
1199
1200 if (dev->driver)
1201 s = dev->driver->driver.name;
1202 else
1203 s = "(none)";
1204
1205 /* Main Control Registers */
1206 t = scnprintf(next, size, "%s version %s,"
1207 "chiprev %02x, locctl %02x\n"
1208 "irqenb0 %02x irqenb1 %02x "
1209 "irqstat0 %02x irqstat1 %02x\n",
1210 driver_name, driver_vers, dev->chiprev,
1211 net2272_read(dev, LOCCTL),
1212 net2272_read(dev, IRQENB0),
1213 net2272_read(dev, IRQENB1),
1214 net2272_read(dev, IRQSTAT0),
1215 net2272_read(dev, IRQSTAT1));
1216 size -= t;
1217 next += t;
1218
1219 /* DMA */
1220 t1 = net2272_read(dev, DMAREQ);
1221 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1222 t1, ep_name[(t1 & 0x01) + 1],
1223 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1224 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1225 t1 & (1 << DMA_REQUEST) ? "req " : "",
1226 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1227 size -= t;
1228 next += t;
1229
1230 /* USB Control Registers */
1231 t1 = net2272_read(dev, USBCTL1);
1232 if (t1 & (1 << VBUS_PIN)) {
1233 if (t1 & (1 << USB_HIGH_SPEED))
1234 s = "high speed";
1235 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1236 s = "powered";
1237 else
1238 s = "full speed";
1239 } else
1240 s = "not attached";
1241 t = scnprintf(next, size,
1242 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1243 net2272_read(dev, USBCTL0), t1,
1244 net2272_read(dev, OURADDR), s);
1245 size -= t;
1246 next += t;
1247
1248 /* Endpoint Registers */
1249 for (i = 0; i < 4; ++i) {
1250 struct net2272_ep *ep;
1251
1252 ep = &dev->ep[i];
1253 if (i && !ep->desc)
1254 continue;
1255
1256 t1 = net2272_ep_read(ep, EP_CFG);
1257 t2 = net2272_ep_read(ep, EP_RSPSET);
1258 t = scnprintf(next, size,
1259 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1260 "irqenb %02x\n",
1261 ep->ep.name, t1, t2,
1262 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1263 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1264 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1265 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1266 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1267 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1268 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1269 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1270 net2272_ep_read(ep, EP_IRQENB));
1271 size -= t;
1272 next += t;
1273
1274 t = scnprintf(next, size,
1275 "\tstat0 %02x stat1 %02x avail %04x "
1276 "(ep%d%s-%s)%s\n",
1277 net2272_ep_read(ep, EP_STAT0),
1278 net2272_ep_read(ep, EP_STAT1),
1279 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1280 t1 & 0x0f,
1281 ep->is_in ? "in" : "out",
1282 type_string(t1 >> 5),
1283 ep->stopped ? "*" : "");
1284 size -= t;
1285 next += t;
1286
1287 t = scnprintf(next, size,
1288 "\tep_transfer %06x\n",
1289 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1290 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1291 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1292 size -= t;
1293 next += t;
1294
1295 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1296 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1297 t = scnprintf(next, size,
1298 "\tbuf-a %s buf-b %s\n",
1299 buf_state_string(t1),
1300 buf_state_string(t2));
1301 size -= t;
1302 next += t;
1303 }
1304
1305 spin_unlock_irqrestore(&dev->lock, flags);
1306
1307 return PAGE_SIZE - size;
1308 }
1309 static DEVICE_ATTR(registers, S_IRUGO, net2272_show_registers, NULL);
1310
1311 /*---------------------------------------------------------------------------*/
1312
1313 static void
1314 net2272_set_fifo_mode(struct net2272 *dev, int mode)
1315 {
1316 u8 tmp;
1317
1318 tmp = net2272_read(dev, LOCCTL) & 0x3f;
1319 tmp |= (mode << 6);
1320 net2272_write(dev, LOCCTL, tmp);
1321
1322 INIT_LIST_HEAD(&dev->gadget.ep_list);
1323
1324 /* always ep-a, ep-c ... maybe not ep-b */
1325 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1326
1327 switch (mode) {
1328 case 0:
1329 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1330 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1331 break;
1332 case 1:
1333 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1334 dev->ep[1].fifo_size = 1024;
1335 dev->ep[2].fifo_size = 512;
1336 break;
1337 case 2:
1338 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1339 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1340 break;
1341 case 3:
1342 dev->ep[1].fifo_size = 1024;
1343 break;
1344 }
1345
1346 /* ep-c is always 2 512 byte buffers */
1347 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1348 dev->ep[3].fifo_size = 512;
1349 }
1350
1351 /*---------------------------------------------------------------------------*/
1352
1353 static struct net2272 *the_controller;
1354
1355 static void
1356 net2272_usb_reset(struct net2272 *dev)
1357 {
1358 dev->gadget.speed = USB_SPEED_UNKNOWN;
1359
1360 net2272_cancel_dma(dev);
1361
1362 net2272_write(dev, IRQENB0, 0);
1363 net2272_write(dev, IRQENB1, 0);
1364
1365 /* clear irq state */
1366 net2272_write(dev, IRQSTAT0, 0xff);
1367 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1368
1369 net2272_write(dev, DMAREQ,
1370 (0 << DMA_BUFFER_VALID) |
1371 (0 << DMA_REQUEST_ENABLE) |
1372 (1 << DMA_CONTROL_DACK) |
1373 (dev->dma_eot_polarity << EOT_POLARITY) |
1374 (dev->dma_dack_polarity << DACK_POLARITY) |
1375 (dev->dma_dreq_polarity << DREQ_POLARITY) |
1376 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1377
1378 net2272_cancel_dma(dev);
1379 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1380
1381 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1382 * note that the higher level gadget drivers are expected to convert data to little endian.
1383 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1384 */
1385 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1386 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1387 }
1388
1389 static void
1390 net2272_usb_reinit(struct net2272 *dev)
1391 {
1392 int i;
1393
1394 /* basic endpoint init */
1395 for (i = 0; i < 4; ++i) {
1396 struct net2272_ep *ep = &dev->ep[i];
1397
1398 ep->ep.name = ep_name[i];
1399 ep->dev = dev;
1400 ep->num = i;
1401 ep->not_empty = 0;
1402
1403 if (use_dma && ep->num == dma_ep)
1404 ep->dma = 1;
1405
1406 if (i > 0 && i <= 3)
1407 ep->fifo_size = 512;
1408 else
1409 ep->fifo_size = 64;
1410 net2272_ep_reset(ep);
1411 }
1412 dev->ep[0].ep.maxpacket = 64;
1413
1414 dev->gadget.ep0 = &dev->ep[0].ep;
1415 dev->ep[0].stopped = 0;
1416 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1417 }
1418
1419 static void
1420 net2272_ep0_start(struct net2272 *dev)
1421 {
1422 struct net2272_ep *ep0 = &dev->ep[0];
1423
1424 net2272_ep_write(ep0, EP_RSPSET,
1425 (1 << NAK_OUT_PACKETS_MODE) |
1426 (1 << ALT_NAK_OUT_PACKETS));
1427 net2272_ep_write(ep0, EP_RSPCLR,
1428 (1 << HIDE_STATUS_PHASE) |
1429 (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1430 net2272_write(dev, USBCTL0,
1431 (dev->softconnect << USB_DETECT_ENABLE) |
1432 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1433 (1 << IO_WAKEUP_ENABLE));
1434 net2272_write(dev, IRQENB0,
1435 (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1436 (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1437 (1 << DMA_DONE_INTERRUPT_ENABLE));
1438 net2272_write(dev, IRQENB1,
1439 (1 << VBUS_INTERRUPT_ENABLE) |
1440 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1441 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1442 }
1443
1444 /* when a driver is successfully registered, it will receive
1445 * control requests including set_configuration(), which enables
1446 * non-control requests. then usb traffic follows until a
1447 * disconnect is reported. then a host may connect again, or
1448 * the driver might get unbound.
1449 */
1450 int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1451 int (*bind)(struct usb_gadget *))
1452 {
1453 struct net2272 *dev = the_controller;
1454 int ret;
1455 unsigned i;
1456
1457 if (!driver || !bind || !driver->unbind || !driver->setup ||
1458 driver->speed != USB_SPEED_HIGH)
1459 return -EINVAL;
1460 if (!dev)
1461 return -ENODEV;
1462 if (dev->driver)
1463 return -EBUSY;
1464
1465 for (i = 0; i < 4; ++i)
1466 dev->ep[i].irqs = 0;
1467 /* hook up the driver ... */
1468 dev->softconnect = 1;
1469 driver->driver.bus = NULL;
1470 dev->driver = driver;
1471 dev->gadget.dev.driver = &driver->driver;
1472 ret = bind(&dev->gadget);
1473 if (ret) {
1474 dev_dbg(dev->dev, "bind to driver %s --> %d\n",
1475 driver->driver.name, ret);
1476 dev->driver = NULL;
1477 dev->gadget.dev.driver = NULL;
1478 return ret;
1479 }
1480
1481 /* ... then enable host detection and ep0; and we're ready
1482 * for set_configuration as well as eventual disconnect.
1483 */
1484 net2272_ep0_start(dev);
1485
1486 dev_dbg(dev->dev, "%s ready\n", driver->driver.name);
1487
1488 return 0;
1489 }
1490 EXPORT_SYMBOL(usb_gadget_probe_driver);
1491
1492 static void
1493 stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1494 {
1495 int i;
1496
1497 /* don't disconnect if it's not connected */
1498 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1499 driver = NULL;
1500
1501 /* stop hardware; prevent new request submissions;
1502 * and kill any outstanding requests.
1503 */
1504 net2272_usb_reset(dev);
1505 for (i = 0; i < 4; ++i)
1506 net2272_dequeue_all(&dev->ep[i]);
1507
1508 /* report disconnect; the driver is already quiesced */
1509 if (driver) {
1510 spin_unlock(&dev->lock);
1511 driver->disconnect(&dev->gadget);
1512 spin_lock(&dev->lock);
1513
1514 }
1515 net2272_usb_reinit(dev);
1516 }
1517
1518 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1519 {
1520 struct net2272 *dev = the_controller;
1521 unsigned long flags;
1522
1523 if (!dev)
1524 return -ENODEV;
1525 if (!driver || driver != dev->driver)
1526 return -EINVAL;
1527
1528 spin_lock_irqsave(&dev->lock, flags);
1529 stop_activity(dev, driver);
1530 spin_unlock_irqrestore(&dev->lock, flags);
1531
1532 net2272_pullup(&dev->gadget, 0);
1533
1534 driver->unbind(&dev->gadget);
1535 dev->gadget.dev.driver = NULL;
1536 dev->driver = NULL;
1537
1538 dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name);
1539 return 0;
1540 }
1541 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1542
1543 /*---------------------------------------------------------------------------*/
1544 /* handle ep-a/ep-b dma completions */
1545 static void
1546 net2272_handle_dma(struct net2272_ep *ep)
1547 {
1548 struct net2272_request *req;
1549 unsigned len;
1550 int status;
1551
1552 if (!list_empty(&ep->queue))
1553 req = list_entry(ep->queue.next,
1554 struct net2272_request, queue);
1555 else
1556 req = NULL;
1557
1558 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1559
1560 /* Ensure DREQ is de-asserted */
1561 net2272_write(ep->dev, DMAREQ,
1562 (0 << DMA_BUFFER_VALID)
1563 | (0 << DMA_REQUEST_ENABLE)
1564 | (1 << DMA_CONTROL_DACK)
1565 | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1566 | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1567 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1568 | ((ep->dma >> 1) << DMA_ENDPOINT_SELECT));
1569
1570 ep->dev->dma_busy = 0;
1571
1572 net2272_ep_write(ep, EP_IRQENB,
1573 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1574 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1575 | net2272_ep_read(ep, EP_IRQENB));
1576
1577 /* device-to-host transfer completed */
1578 if (ep->is_in) {
1579 /* validate a short packet or zlp if necessary */
1580 if ((req->req.length % ep->ep.maxpacket != 0) ||
1581 req->req.zero)
1582 set_fifo_bytecount(ep, 0);
1583
1584 net2272_done(ep, req, 0);
1585 if (!list_empty(&ep->queue)) {
1586 req = list_entry(ep->queue.next,
1587 struct net2272_request, queue);
1588 status = net2272_kick_dma(ep, req);
1589 if (status < 0)
1590 net2272_pio_advance(ep);
1591 }
1592
1593 /* host-to-device transfer completed */
1594 } else {
1595 /* terminated with a short packet? */
1596 if (net2272_read(ep->dev, IRQSTAT0) &
1597 (1 << DMA_DONE_INTERRUPT)) {
1598 /* abort system dma */
1599 net2272_cancel_dma(ep->dev);
1600 }
1601
1602 /* EP_TRANSFER will contain the number of bytes
1603 * actually received.
1604 * NOTE: There is no overflow detection on EP_TRANSFER:
1605 * We can't deal with transfers larger than 2^24 bytes!
1606 */
1607 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1608 | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1609 | (net2272_ep_read(ep, EP_TRANSFER0));
1610
1611 if (ep->not_empty)
1612 len += 4;
1613
1614 req->req.actual += len;
1615
1616 /* get any remaining data */
1617 net2272_pio_advance(ep);
1618 }
1619 }
1620
1621 /*---------------------------------------------------------------------------*/
1622
1623 static void
1624 net2272_handle_ep(struct net2272_ep *ep)
1625 {
1626 struct net2272_request *req;
1627 u8 stat0, stat1;
1628
1629 if (!list_empty(&ep->queue))
1630 req = list_entry(ep->queue.next,
1631 struct net2272_request, queue);
1632 else
1633 req = NULL;
1634
1635 /* ack all, and handle what we care about */
1636 stat0 = net2272_ep_read(ep, EP_STAT0);
1637 stat1 = net2272_ep_read(ep, EP_STAT1);
1638 ep->irqs++;
1639
1640 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1641 ep->ep.name, stat0, stat1, req ? &req->req : 0);
1642
1643 net2272_ep_write(ep, EP_STAT0, stat0 &
1644 ~((1 << NAK_OUT_PACKETS)
1645 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1646 net2272_ep_write(ep, EP_STAT1, stat1);
1647
1648 /* data packet(s) received (in the fifo, OUT)
1649 * direction must be validated, otherwise control read status phase
1650 * could be interpreted as a valid packet
1651 */
1652 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1653 net2272_pio_advance(ep);
1654 /* data packet(s) transmitted (IN) */
1655 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1656 net2272_pio_advance(ep);
1657 }
1658
1659 static struct net2272_ep *
1660 net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1661 {
1662 struct net2272_ep *ep;
1663
1664 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1665 return &dev->ep[0];
1666
1667 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1668 u8 bEndpointAddress;
1669
1670 if (!ep->desc)
1671 continue;
1672 bEndpointAddress = ep->desc->bEndpointAddress;
1673 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1674 continue;
1675 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1676 return ep;
1677 }
1678 return NULL;
1679 }
1680
1681 /*
1682 * USB Test Packet:
1683 * JKJKJKJK * 9
1684 * JJKKJJKK * 8
1685 * JJJJKKKK * 8
1686 * JJJJJJJKKKKKKK * 8
1687 * JJJJJJJK * 8
1688 * {JKKKKKKK * 10}, JK
1689 */
1690 static const u8 net2272_test_packet[] = {
1691 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1692 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1693 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1694 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1695 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1696 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1697 };
1698
1699 static void
1700 net2272_set_test_mode(struct net2272 *dev, int mode)
1701 {
1702 int i;
1703
1704 /* Disable all net2272 interrupts:
1705 * Nothing but a power cycle should stop the test.
1706 */
1707 net2272_write(dev, IRQENB0, 0x00);
1708 net2272_write(dev, IRQENB1, 0x00);
1709
1710 /* Force tranceiver to high-speed */
1711 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1712
1713 net2272_write(dev, PAGESEL, 0);
1714 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1715 net2272_write(dev, EP_RSPCLR,
1716 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1717 | (1 << HIDE_STATUS_PHASE));
1718 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1719 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1720
1721 /* wait for status phase to complete */
1722 while (!(net2272_read(dev, EP_STAT0) &
1723 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1724 ;
1725
1726 /* Enable test mode */
1727 net2272_write(dev, USBTEST, mode);
1728
1729 /* load test packet */
1730 if (mode == TEST_PACKET) {
1731 /* switch to 8 bit mode */
1732 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1733 ~(1 << DATA_WIDTH));
1734
1735 for (i = 0; i < sizeof(net2272_test_packet); ++i)
1736 net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1737
1738 /* Validate test packet */
1739 net2272_write(dev, EP_TRANSFER0, 0);
1740 }
1741 }
1742
1743 static void
1744 net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1745 {
1746 struct net2272_ep *ep;
1747 u8 num, scratch;
1748
1749 /* starting a control request? */
1750 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1751 union {
1752 u8 raw[8];
1753 struct usb_ctrlrequest r;
1754 } u;
1755 int tmp = 0;
1756 struct net2272_request *req;
1757
1758 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1759 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1760 dev->gadget.speed = USB_SPEED_HIGH;
1761 else
1762 dev->gadget.speed = USB_SPEED_FULL;
1763 dev_dbg(dev->dev, "%s speed\n",
1764 (dev->gadget.speed == USB_SPEED_HIGH) ? "high" : "full");
1765 }
1766
1767 ep = &dev->ep[0];
1768 ep->irqs++;
1769
1770 /* make sure any leftover interrupt state is cleared */
1771 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1772 while (!list_empty(&ep->queue)) {
1773 req = list_entry(ep->queue.next,
1774 struct net2272_request, queue);
1775 net2272_done(ep, req,
1776 (req->req.actual == req->req.length) ? 0 : -EPROTO);
1777 }
1778 ep->stopped = 0;
1779 dev->protocol_stall = 0;
1780 net2272_ep_write(ep, EP_STAT0,
1781 (1 << DATA_IN_TOKEN_INTERRUPT)
1782 | (1 << DATA_OUT_TOKEN_INTERRUPT)
1783 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1784 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1785 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1786 net2272_ep_write(ep, EP_STAT1,
1787 (1 << TIMEOUT)
1788 | (1 << USB_OUT_ACK_SENT)
1789 | (1 << USB_OUT_NAK_SENT)
1790 | (1 << USB_IN_ACK_RCVD)
1791 | (1 << USB_IN_NAK_SENT)
1792 | (1 << USB_STALL_SENT)
1793 | (1 << LOCAL_OUT_ZLP));
1794
1795 /*
1796 * Ensure Control Read pre-validation setting is beyond maximum size
1797 * - Control Writes can leave non-zero values in EP_TRANSFER. If
1798 * an EP0 transfer following the Control Write is a Control Read,
1799 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1800 * pre-validation count.
1801 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1802 * the pre-validation count cannot cause an unexpected validatation
1803 */
1804 net2272_write(dev, PAGESEL, 0);
1805 net2272_write(dev, EP_TRANSFER2, 0xff);
1806 net2272_write(dev, EP_TRANSFER1, 0xff);
1807 net2272_write(dev, EP_TRANSFER0, 0xff);
1808
1809 u.raw[0] = net2272_read(dev, SETUP0);
1810 u.raw[1] = net2272_read(dev, SETUP1);
1811 u.raw[2] = net2272_read(dev, SETUP2);
1812 u.raw[3] = net2272_read(dev, SETUP3);
1813 u.raw[4] = net2272_read(dev, SETUP4);
1814 u.raw[5] = net2272_read(dev, SETUP5);
1815 u.raw[6] = net2272_read(dev, SETUP6);
1816 u.raw[7] = net2272_read(dev, SETUP7);
1817 /*
1818 * If you have a big endian cpu make sure le16_to_cpus
1819 * performs the proper byte swapping here...
1820 */
1821 le16_to_cpus(&u.r.wValue);
1822 le16_to_cpus(&u.r.wIndex);
1823 le16_to_cpus(&u.r.wLength);
1824
1825 /* ack the irq */
1826 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1827 stat ^= (1 << SETUP_PACKET_INTERRUPT);
1828
1829 /* watch control traffic at the token level, and force
1830 * synchronization before letting the status phase happen.
1831 */
1832 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1833 if (ep->is_in) {
1834 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1835 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1836 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1837 stop_out_naking(ep);
1838 } else
1839 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1840 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1841 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1842 net2272_ep_write(ep, EP_IRQENB, scratch);
1843
1844 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1845 goto delegate;
1846 switch (u.r.bRequest) {
1847 case USB_REQ_GET_STATUS: {
1848 struct net2272_ep *e;
1849 u16 status = 0;
1850
1851 switch (u.r.bRequestType & USB_RECIP_MASK) {
1852 case USB_RECIP_ENDPOINT:
1853 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1854 if (!e || u.r.wLength > 2)
1855 goto do_stall;
1856 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1857 status = __constant_cpu_to_le16(1);
1858 else
1859 status = __constant_cpu_to_le16(0);
1860
1861 /* don't bother with a request object! */
1862 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1863 writew(status, net2272_reg_addr(dev, EP_DATA));
1864 set_fifo_bytecount(&dev->ep[0], 0);
1865 allow_status(ep);
1866 dev_vdbg(dev->dev, "%s stat %02x\n",
1867 ep->ep.name, status);
1868 goto next_endpoints;
1869 case USB_RECIP_DEVICE:
1870 if (u.r.wLength > 2)
1871 goto do_stall;
1872 if (dev->is_selfpowered)
1873 status = (1 << USB_DEVICE_SELF_POWERED);
1874
1875 /* don't bother with a request object! */
1876 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1877 writew(status, net2272_reg_addr(dev, EP_DATA));
1878 set_fifo_bytecount(&dev->ep[0], 0);
1879 allow_status(ep);
1880 dev_vdbg(dev->dev, "device stat %02x\n", status);
1881 goto next_endpoints;
1882 case USB_RECIP_INTERFACE:
1883 if (u.r.wLength > 2)
1884 goto do_stall;
1885
1886 /* don't bother with a request object! */
1887 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1888 writew(status, net2272_reg_addr(dev, EP_DATA));
1889 set_fifo_bytecount(&dev->ep[0], 0);
1890 allow_status(ep);
1891 dev_vdbg(dev->dev, "interface status %02x\n", status);
1892 goto next_endpoints;
1893 }
1894
1895 break;
1896 }
1897 case USB_REQ_CLEAR_FEATURE: {
1898 struct net2272_ep *e;
1899
1900 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1901 goto delegate;
1902 if (u.r.wValue != USB_ENDPOINT_HALT ||
1903 u.r.wLength != 0)
1904 goto do_stall;
1905 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1906 if (!e)
1907 goto do_stall;
1908 if (e->wedged) {
1909 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1910 ep->ep.name);
1911 } else {
1912 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1913 clear_halt(e);
1914 }
1915 allow_status(ep);
1916 goto next_endpoints;
1917 }
1918 case USB_REQ_SET_FEATURE: {
1919 struct net2272_ep *e;
1920
1921 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1922 if (u.r.wIndex != NORMAL_OPERATION)
1923 net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1924 allow_status(ep);
1925 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1926 goto next_endpoints;
1927 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1928 goto delegate;
1929 if (u.r.wValue != USB_ENDPOINT_HALT ||
1930 u.r.wLength != 0)
1931 goto do_stall;
1932 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1933 if (!e)
1934 goto do_stall;
1935 set_halt(e);
1936 allow_status(ep);
1937 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1938 goto next_endpoints;
1939 }
1940 case USB_REQ_SET_ADDRESS: {
1941 net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1942 allow_status(ep);
1943 break;
1944 }
1945 default:
1946 delegate:
1947 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1948 "ep_cfg %08x\n",
1949 u.r.bRequestType, u.r.bRequest,
1950 u.r.wValue, u.r.wIndex,
1951 net2272_ep_read(ep, EP_CFG));
1952 spin_unlock(&dev->lock);
1953 tmp = dev->driver->setup(&dev->gadget, &u.r);
1954 spin_lock(&dev->lock);
1955 }
1956
1957 /* stall ep0 on error */
1958 if (tmp < 0) {
1959 do_stall:
1960 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1961 u.r.bRequestType, u.r.bRequest, tmp);
1962 dev->protocol_stall = 1;
1963 }
1964 /* endpoint dma irq? */
1965 } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1966 net2272_cancel_dma(dev);
1967 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1968 stat &= ~(1 << DMA_DONE_INTERRUPT);
1969 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1970 ? 2 : 1;
1971
1972 ep = &dev->ep[num];
1973 net2272_handle_dma(ep);
1974 }
1975
1976 next_endpoints:
1977 /* endpoint data irq? */
1978 scratch = stat & 0x0f;
1979 stat &= ~0x0f;
1980 for (num = 0; scratch; num++) {
1981 u8 t;
1982
1983 /* does this endpoint's FIFO and queue need tending? */
1984 t = 1 << num;
1985 if ((scratch & t) == 0)
1986 continue;
1987 scratch ^= t;
1988
1989 ep = &dev->ep[num];
1990 net2272_handle_ep(ep);
1991 }
1992
1993 /* some interrupts we can just ignore */
1994 stat &= ~(1 << SOF_INTERRUPT);
1995
1996 if (stat)
1997 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1998 }
1999
2000 static void
2001 net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
2002 {
2003 u8 tmp, mask;
2004
2005 /* after disconnect there's nothing else to do! */
2006 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
2007 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
2008
2009 if (stat & tmp) {
2010 net2272_write(dev, IRQSTAT1, tmp);
2011 if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2012 ((net2272_read(dev, USBCTL1) & mask) == 0))
2013 || ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN))
2014 == 0))
2015 && (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
2016 dev_dbg(dev->dev, "disconnect %s\n",
2017 dev->driver->driver.name);
2018 stop_activity(dev, dev->driver);
2019 net2272_ep0_start(dev);
2020 return;
2021 }
2022 stat &= ~tmp;
2023
2024 if (!stat)
2025 return;
2026 }
2027
2028 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2029 if (stat & tmp) {
2030 net2272_write(dev, IRQSTAT1, tmp);
2031 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2032 if (dev->driver->suspend)
2033 dev->driver->suspend(&dev->gadget);
2034 if (!enable_suspend) {
2035 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2036 dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2037 }
2038 } else {
2039 if (dev->driver->resume)
2040 dev->driver->resume(&dev->gadget);
2041 }
2042 stat &= ~tmp;
2043 }
2044
2045 /* clear any other status/irqs */
2046 if (stat)
2047 net2272_write(dev, IRQSTAT1, stat);
2048
2049 /* some status we can just ignore */
2050 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2051 | (1 << SUSPEND_REQUEST_INTERRUPT)
2052 | (1 << RESUME_INTERRUPT));
2053 if (!stat)
2054 return;
2055 else
2056 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2057 }
2058
2059 static irqreturn_t net2272_irq(int irq, void *_dev)
2060 {
2061 struct net2272 *dev = _dev;
2062 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2063 u32 intcsr;
2064 #endif
2065 #if defined(PLX_PCI_RDK)
2066 u8 dmareq;
2067 #endif
2068 spin_lock(&dev->lock);
2069 #if defined(PLX_PCI_RDK)
2070 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2071
2072 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2073 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2074 dev->rdk1.plx9054_base_addr + INTCSR);
2075 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2076 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2077 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2078 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2079 dev->rdk1.plx9054_base_addr + INTCSR);
2080 }
2081 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2082 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2083 dev->rdk1.plx9054_base_addr + DMACSR0);
2084
2085 dmareq = net2272_read(dev, DMAREQ);
2086 if (dmareq & 0x01)
2087 net2272_handle_dma(&dev->ep[2]);
2088 else
2089 net2272_handle_dma(&dev->ep[1]);
2090 }
2091 #endif
2092 #if defined(PLX_PCI_RDK2)
2093 /* see if PCI int for us by checking irqstat */
2094 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2095 if (!intcsr & (1 << NET2272_PCI_IRQ))
2096 return IRQ_NONE;
2097 /* check dma interrupts */
2098 #endif
2099 /* Platform/devcice interrupt handler */
2100 #if !defined(PLX_PCI_RDK)
2101 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2102 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2103 #endif
2104 spin_unlock(&dev->lock);
2105
2106 return IRQ_HANDLED;
2107 }
2108
2109 static int net2272_present(struct net2272 *dev)
2110 {
2111 /*
2112 * Quick test to see if CPU can communicate properly with the NET2272.
2113 * Verifies connection using writes and reads to write/read and
2114 * read-only registers.
2115 *
2116 * This routine is strongly recommended especially during early bring-up
2117 * of new hardware, however for designs that do not apply Power On System
2118 * Tests (POST) it may discarded (or perhaps minimized).
2119 */
2120 unsigned int ii;
2121 u8 val, refval;
2122
2123 /* Verify NET2272 write/read SCRATCH register can write and read */
2124 refval = net2272_read(dev, SCRATCH);
2125 for (ii = 0; ii < 0x100; ii += 7) {
2126 net2272_write(dev, SCRATCH, ii);
2127 val = net2272_read(dev, SCRATCH);
2128 if (val != ii) {
2129 dev_dbg(dev->dev,
2130 "%s: write/read SCRATCH register test failed: "
2131 "wrote:0x%2.2x, read:0x%2.2x\n",
2132 __func__, ii, val);
2133 return -EINVAL;
2134 }
2135 }
2136 /* To be nice, we write the original SCRATCH value back: */
2137 net2272_write(dev, SCRATCH, refval);
2138
2139 /* Verify NET2272 CHIPREV register is read-only: */
2140 refval = net2272_read(dev, CHIPREV_2272);
2141 for (ii = 0; ii < 0x100; ii += 7) {
2142 net2272_write(dev, CHIPREV_2272, ii);
2143 val = net2272_read(dev, CHIPREV_2272);
2144 if (val != refval) {
2145 dev_dbg(dev->dev,
2146 "%s: write/read CHIPREV register test failed: "
2147 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2148 __func__, ii, val, refval);
2149 return -EINVAL;
2150 }
2151 }
2152
2153 /*
2154 * Verify NET2272's "NET2270 legacy revision" register
2155 * - NET2272 has two revision registers. The NET2270 legacy revision
2156 * register should read the same value, regardless of the NET2272
2157 * silicon revision. The legacy register applies to NET2270
2158 * firmware being applied to the NET2272.
2159 */
2160 val = net2272_read(dev, CHIPREV_LEGACY);
2161 if (val != NET2270_LEGACY_REV) {
2162 /*
2163 * Unexpected legacy revision value
2164 * - Perhaps the chip is a NET2270?
2165 */
2166 dev_dbg(dev->dev,
2167 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2168 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2169 __func__, NET2270_LEGACY_REV, val);
2170 return -EINVAL;
2171 }
2172
2173 /*
2174 * Verify NET2272 silicon revision
2175 * - This revision register is appropriate for the silicon version
2176 * of the NET2272
2177 */
2178 val = net2272_read(dev, CHIPREV_2272);
2179 switch (val) {
2180 case CHIPREV_NET2272_R1:
2181 /*
2182 * NET2272 Rev 1 has DMA related errata:
2183 * - Newer silicon (Rev 1A or better) required
2184 */
2185 dev_dbg(dev->dev,
2186 "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2187 __func__);
2188 break;
2189 case CHIPREV_NET2272_R1A:
2190 break;
2191 default:
2192 /* NET2272 silicon version *may* not work with this firmware */
2193 dev_dbg(dev->dev,
2194 "%s: unexpected silicon revision register value: "
2195 " CHIPREV_2272: 0x%2.2x\n",
2196 __func__, val);
2197 /*
2198 * Return Success, even though the chip rev is not an expected value
2199 * - Older, pre-built firmware can attempt to operate on newer silicon
2200 * - Often, new silicon is perfectly compatible
2201 */
2202 }
2203
2204 /* Success: NET2272 checks out OK */
2205 return 0;
2206 }
2207
2208 static void
2209 net2272_gadget_release(struct device *_dev)
2210 {
2211 struct net2272 *dev = dev_get_drvdata(_dev);
2212 kfree(dev);
2213 }
2214
2215 /*---------------------------------------------------------------------------*/
2216
2217 static void __devexit
2218 net2272_remove(struct net2272 *dev)
2219 {
2220 /* start with the driver above us */
2221 if (dev->driver) {
2222 /* should have been done already by driver model core */
2223 dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n",
2224 dev->driver->driver.name);
2225 usb_gadget_unregister_driver(dev->driver);
2226 }
2227
2228 free_irq(dev->irq, dev);
2229 iounmap(dev->base_addr);
2230
2231 device_unregister(&dev->gadget.dev);
2232 device_remove_file(dev->dev, &dev_attr_registers);
2233
2234 dev_info(dev->dev, "unbind\n");
2235 the_controller = NULL;
2236 }
2237
2238 static struct net2272 * __devinit
2239 net2272_probe_init(struct device *dev, unsigned int irq)
2240 {
2241 struct net2272 *ret;
2242
2243 if (the_controller) {
2244 dev_warn(dev, "ignoring\n");
2245 return ERR_PTR(-EBUSY);
2246 }
2247
2248 if (!irq) {
2249 dev_dbg(dev, "No IRQ!\n");
2250 return ERR_PTR(-ENODEV);
2251 }
2252
2253 /* alloc, and start init */
2254 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2255 if (!ret)
2256 return ERR_PTR(-ENOMEM);
2257
2258 spin_lock_init(&ret->lock);
2259 ret->irq = irq;
2260 ret->dev = dev;
2261 ret->gadget.ops = &net2272_ops;
2262 ret->gadget.is_dualspeed = 1;
2263
2264 /* the "gadget" abstracts/virtualizes the controller */
2265 dev_set_name(&ret->gadget.dev, "gadget");
2266 ret->gadget.dev.parent = dev;
2267 ret->gadget.dev.dma_mask = dev->dma_mask;
2268 ret->gadget.dev.release = net2272_gadget_release;
2269 ret->gadget.name = driver_name;
2270
2271 return ret;
2272 }
2273
2274 static int __devinit
2275 net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2276 {
2277 int ret;
2278
2279 /* See if there... */
2280 if (net2272_present(dev)) {
2281 dev_warn(dev->dev, "2272 not found!\n");
2282 ret = -ENODEV;
2283 goto err;
2284 }
2285
2286 net2272_usb_reset(dev);
2287 net2272_usb_reinit(dev);
2288
2289 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2290 if (ret) {
2291 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2292 goto err;
2293 }
2294
2295 dev->chiprev = net2272_read(dev, CHIPREV_2272);
2296
2297 /* done */
2298 dev_info(dev->dev, "%s\n", driver_desc);
2299 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2300 dev->irq, dev->base_addr, dev->chiprev,
2301 dma_mode_string());
2302 dev_info(dev->dev, "version: %s\n", driver_vers);
2303
2304 the_controller = dev;
2305
2306 ret = device_register(&dev->gadget.dev);
2307 if (ret)
2308 goto err_irq;
2309 ret = device_create_file(dev->dev, &dev_attr_registers);
2310 if (ret)
2311 goto err_dev_reg;
2312
2313 return 0;
2314
2315 err_dev_reg:
2316 device_unregister(&dev->gadget.dev);
2317 err_irq:
2318 free_irq(dev->irq, dev);
2319 err:
2320 return ret;
2321 }
2322
2323 #ifdef CONFIG_PCI
2324
2325 /*
2326 * wrap this driver around the specified device, but
2327 * don't respond over USB until a gadget driver binds to us
2328 */
2329
2330 static int __devinit
2331 net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2332 {
2333 unsigned long resource, len, tmp;
2334 void __iomem *mem_mapped_addr[4];
2335 int ret, i;
2336
2337 /*
2338 * BAR 0 holds PLX 9054 config registers
2339 * BAR 1 is i/o memory; unused here
2340 * BAR 2 holds EPLD config registers
2341 * BAR 3 holds NET2272 registers
2342 */
2343
2344 /* Find and map all address spaces */
2345 for (i = 0; i < 4; ++i) {
2346 if (i == 1)
2347 continue; /* BAR1 unused */
2348
2349 resource = pci_resource_start(pdev, i);
2350 len = pci_resource_len(pdev, i);
2351
2352 if (!request_mem_region(resource, len, driver_name)) {
2353 dev_dbg(dev->dev, "controller already in use\n");
2354 ret = -EBUSY;
2355 goto err;
2356 }
2357
2358 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2359 if (mem_mapped_addr[i] == NULL) {
2360 release_mem_region(resource, len);
2361 dev_dbg(dev->dev, "can't map memory\n");
2362 ret = -EFAULT;
2363 goto err;
2364 }
2365 }
2366
2367 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2368 dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2369 dev->base_addr = mem_mapped_addr[3];
2370
2371 /* Set PLX 9054 bus width (16 bits) */
2372 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2373 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2374 dev->rdk1.plx9054_base_addr + LBRD1);
2375
2376 /* Enable PLX 9054 Interrupts */
2377 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2378 (1 << PCI_INTERRUPT_ENABLE) |
2379 (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2380 dev->rdk1.plx9054_base_addr + INTCSR);
2381
2382 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2383 dev->rdk1.plx9054_base_addr + DMACSR0);
2384
2385 /* reset */
2386 writeb((1 << EPLD_DMA_ENABLE) |
2387 (1 << DMA_CTL_DACK) |
2388 (1 << DMA_TIMEOUT_ENABLE) |
2389 (1 << USER) |
2390 (0 << MPX_MODE) |
2391 (1 << BUSWIDTH) |
2392 (1 << NET2272_RESET),
2393 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2394
2395 mb();
2396 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2397 ~(1 << NET2272_RESET),
2398 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2399 udelay(200);
2400
2401 return 0;
2402
2403 err:
2404 while (--i >= 0) {
2405 iounmap(mem_mapped_addr[i]);
2406 release_mem_region(pci_resource_start(pdev, i),
2407 pci_resource_len(pdev, i));
2408 }
2409
2410 return ret;
2411 }
2412
2413 static int __devinit
2414 net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2415 {
2416 unsigned long resource, len;
2417 void __iomem *mem_mapped_addr[2];
2418 int ret, i;
2419
2420 /*
2421 * BAR 0 holds FGPA config registers
2422 * BAR 1 holds NET2272 registers
2423 */
2424
2425 /* Find and map all address spaces, bar2-3 unused in rdk 2 */
2426 for (i = 0; i < 2; ++i) {
2427 resource = pci_resource_start(pdev, i);
2428 len = pci_resource_len(pdev, i);
2429
2430 if (!request_mem_region(resource, len, driver_name)) {
2431 dev_dbg(dev->dev, "controller already in use\n");
2432 ret = -EBUSY;
2433 goto err;
2434 }
2435
2436 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2437 if (mem_mapped_addr[i] == NULL) {
2438 release_mem_region(resource, len);
2439 dev_dbg(dev->dev, "can't map memory\n");
2440 ret = -EFAULT;
2441 goto err;
2442 }
2443 }
2444
2445 dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2446 dev->base_addr = mem_mapped_addr[1];
2447
2448 mb();
2449 /* Set 2272 bus width (16 bits) and reset */
2450 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2451 udelay(200);
2452 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2453 /* Print fpga version number */
2454 dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2455 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2456 /* Enable FPGA Interrupts */
2457 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2458
2459 return 0;
2460
2461 err:
2462 while (--i >= 0) {
2463 iounmap(mem_mapped_addr[i]);
2464 release_mem_region(pci_resource_start(pdev, i),
2465 pci_resource_len(pdev, i));
2466 }
2467
2468 return ret;
2469 }
2470
2471 static int __devinit
2472 net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2473 {
2474 struct net2272 *dev;
2475 int ret;
2476
2477 dev = net2272_probe_init(&pdev->dev, pdev->irq);
2478 if (IS_ERR(dev))
2479 return PTR_ERR(dev);
2480 dev->dev_id = pdev->device;
2481
2482 if (pci_enable_device(pdev) < 0) {
2483 ret = -ENODEV;
2484 goto err_free;
2485 }
2486
2487 pci_set_master(pdev);
2488
2489 switch (pdev->device) {
2490 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2491 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2492 default: BUG();
2493 }
2494 if (ret)
2495 goto err_pci;
2496
2497 ret = net2272_probe_fin(dev, 0);
2498 if (ret)
2499 goto err_pci;
2500
2501 pci_set_drvdata(pdev, dev);
2502
2503 return 0;
2504
2505 err_pci:
2506 pci_disable_device(pdev);
2507 err_free:
2508 kfree(dev);
2509
2510 return ret;
2511 }
2512
2513 static void __devexit
2514 net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2515 {
2516 int i;
2517
2518 /* disable PLX 9054 interrupts */
2519 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2520 ~(1 << PCI_INTERRUPT_ENABLE),
2521 dev->rdk1.plx9054_base_addr + INTCSR);
2522
2523 /* clean up resources allocated during probe() */
2524 iounmap(dev->rdk1.plx9054_base_addr);
2525 iounmap(dev->rdk1.epld_base_addr);
2526
2527 for (i = 0; i < 4; ++i) {
2528 if (i == 1)
2529 continue; /* BAR1 unused */
2530 release_mem_region(pci_resource_start(pdev, i),
2531 pci_resource_len(pdev, i));
2532 }
2533 }
2534
2535 static void __devexit
2536 net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2537 {
2538 int i;
2539
2540 /* disable fpga interrupts
2541 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2542 ~(1 << PCI_INTERRUPT_ENABLE),
2543 dev->rdk1.plx9054_base_addr + INTCSR);
2544 */
2545
2546 /* clean up resources allocated during probe() */
2547 iounmap(dev->rdk2.fpga_base_addr);
2548
2549 for (i = 0; i < 2; ++i)
2550 release_mem_region(pci_resource_start(pdev, i),
2551 pci_resource_len(pdev, i));
2552 }
2553
2554 static void __devexit
2555 net2272_pci_remove(struct pci_dev *pdev)
2556 {
2557 struct net2272 *dev = pci_get_drvdata(pdev);
2558
2559 net2272_remove(dev);
2560
2561 switch (pdev->device) {
2562 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2563 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2564 default: BUG();
2565 }
2566
2567 pci_disable_device(pdev);
2568
2569 kfree(dev);
2570 }
2571
2572 /* Table of matching PCI IDs */
2573 static struct pci_device_id __devinitdata pci_ids[] = {
2574 { /* RDK 1 card */
2575 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2576 .class_mask = 0,
2577 .vendor = PCI_VENDOR_ID_PLX,
2578 .device = PCI_DEVICE_ID_RDK1,
2579 .subvendor = PCI_ANY_ID,
2580 .subdevice = PCI_ANY_ID,
2581 },
2582 { /* RDK 2 card */
2583 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2584 .class_mask = 0,
2585 .vendor = PCI_VENDOR_ID_PLX,
2586 .device = PCI_DEVICE_ID_RDK2,
2587 .subvendor = PCI_ANY_ID,
2588 .subdevice = PCI_ANY_ID,
2589 },
2590 { }
2591 };
2592 MODULE_DEVICE_TABLE(pci, pci_ids);
2593
2594 static struct pci_driver net2272_pci_driver = {
2595 .name = driver_name,
2596 .id_table = pci_ids,
2597
2598 .probe = net2272_pci_probe,
2599 .remove = __devexit_p(net2272_pci_remove),
2600 };
2601
2602 #else
2603 # define pci_register_driver(x) 1
2604 # define pci_unregister_driver(x) 1
2605 #endif
2606
2607 /*---------------------------------------------------------------------------*/
2608
2609 static int __devinit
2610 net2272_plat_probe(struct platform_device *pdev)
2611 {
2612 struct net2272 *dev;
2613 int ret;
2614 unsigned int irqflags;
2615 resource_size_t base, len;
2616 struct resource *iomem, *iomem_bus, *irq_res;
2617
2618 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2619 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2620 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2621 if (!irq_res || !iomem) {
2622 dev_err(&pdev->dev, "must provide irq/base addr");
2623 return -EINVAL;
2624 }
2625
2626 dev = net2272_probe_init(&pdev->dev, irq_res->start);
2627 if (IS_ERR(dev))
2628 return PTR_ERR(dev);
2629
2630 irqflags = 0;
2631 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2632 irqflags |= IRQF_TRIGGER_RISING;
2633 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2634 irqflags |= IRQF_TRIGGER_FALLING;
2635 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2636 irqflags |= IRQF_TRIGGER_HIGH;
2637 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2638 irqflags |= IRQF_TRIGGER_LOW;
2639
2640 base = iomem->start;
2641 len = resource_size(iomem);
2642 if (iomem_bus)
2643 dev->base_shift = iomem_bus->start;
2644
2645 if (!request_mem_region(base, len, driver_name)) {
2646 dev_dbg(dev->dev, "get request memory region!\n");
2647 ret = -EBUSY;
2648 goto err;
2649 }
2650 dev->base_addr = ioremap_nocache(base, len);
2651 if (!dev->base_addr) {
2652 dev_dbg(dev->dev, "can't map memory\n");
2653 ret = -EFAULT;
2654 goto err_req;
2655 }
2656
2657 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2658 if (ret)
2659 goto err_io;
2660
2661 platform_set_drvdata(pdev, dev);
2662 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2663 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2664
2665 the_controller = dev;
2666
2667 return 0;
2668
2669 err_io:
2670 iounmap(dev->base_addr);
2671 err_req:
2672 release_mem_region(base, len);
2673 err:
2674 return ret;
2675 }
2676
2677 static int __devexit
2678 net2272_plat_remove(struct platform_device *pdev)
2679 {
2680 struct net2272 *dev = platform_get_drvdata(pdev);
2681
2682 net2272_remove(dev);
2683
2684 release_mem_region(pdev->resource[0].start,
2685 resource_size(&pdev->resource[0]));
2686
2687 kfree(dev);
2688
2689 return 0;
2690 }
2691
2692 static struct platform_driver net2272_plat_driver = {
2693 .probe = net2272_plat_probe,
2694 .remove = __devexit_p(net2272_plat_remove),
2695 .driver = {
2696 .name = driver_name,
2697 .owner = THIS_MODULE,
2698 },
2699 /* FIXME .suspend, .resume */
2700 };
2701
2702 static int __init net2272_init(void)
2703 {
2704 return pci_register_driver(&net2272_pci_driver) &
2705 platform_driver_register(&net2272_plat_driver);
2706 }
2707 module_init(net2272_init);
2708
2709 static void __exit net2272_cleanup(void)
2710 {
2711 pci_unregister_driver(&net2272_pci_driver);
2712 platform_driver_unregister(&net2272_plat_driver);
2713 }
2714 module_exit(net2272_cleanup);
2715
2716 MODULE_DESCRIPTION(DRIVER_DESC);
2717 MODULE_AUTHOR("PLX Technology, Inc.");
2718 MODULE_LICENSE("GPL");
This page took 0.087517 seconds and 5 git commands to generate.