Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shaggy...
[deliverable/linux.git] / drivers / usb / net / usbnet.c
1 /*
2 * USB Network driver infrastructure
3 * Copyright (C) 2000-2005 by David Brownell
4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 /*
22 * This is a generic "USB networking" framework that works with several
23 * kinds of full and high speed networking devices: host-to-host cables,
24 * smart usb peripherals, and actual Ethernet adapters.
25 *
26 * These devices usually differ in terms of control protocols (if they
27 * even have one!) and sometimes they define new framing to wrap or batch
28 * Ethernet packets. Otherwise, they talk to USB pretty much the same,
29 * so interface (un)binding, endpoint I/O queues, fault handling, and other
30 * issues can usefully be addressed by this framework.
31 */
32
33 // #define DEBUG // error path messages, extra info
34 // #define VERBOSE // more; success messages
35
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/init.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/workqueue.h>
43 #include <linux/mii.h>
44 #include <linux/usb.h>
45
46 #include "usbnet.h"
47
48 #define DRIVER_VERSION "22-Aug-2005"
49
50
51 /*-------------------------------------------------------------------------*/
52
53 /*
54 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max.
55 * Several dozen bytes of IPv4 data can fit in two such transactions.
56 * One maximum size Ethernet packet takes twenty four of them.
57 * For high speed, each frame comfortably fits almost 36 max size
58 * Ethernet packets (so queues should be bigger).
59 *
60 * REVISIT qlens should be members of 'struct usbnet'; the goal is to
61 * let the USB host controller be busy for 5msec or more before an irq
62 * is required, under load. Jumbograms change the equation.
63 */
64 #define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? 60 : 4)
65 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? 60 : 4)
66
67 // reawaken network queue this soon after stopping; else watchdog barks
68 #define TX_TIMEOUT_JIFFIES (5*HZ)
69
70 // throttle rx/tx briefly after some faults, so khubd might disconnect()
71 // us (it polls at HZ/4 usually) before we report too many false errors.
72 #define THROTTLE_JIFFIES (HZ/8)
73
74 // between wakeups
75 #define UNLINK_TIMEOUT_MS 3
76
77 /*-------------------------------------------------------------------------*/
78
79 // randomly generated ethernet address
80 static u8 node_id [ETH_ALEN];
81
82 static const char driver_name [] = "usbnet";
83
84 /* use ethtool to change the level for any given device */
85 static int msg_level = -1;
86 module_param (msg_level, int, 0);
87 MODULE_PARM_DESC (msg_level, "Override default message level");
88
89 /*-------------------------------------------------------------------------*/
90
91 /* handles CDC Ethernet and many other network "bulk data" interfaces */
92 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
93 {
94 int tmp;
95 struct usb_host_interface *alt = NULL;
96 struct usb_host_endpoint *in = NULL, *out = NULL;
97 struct usb_host_endpoint *status = NULL;
98
99 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
100 unsigned ep;
101
102 in = out = status = NULL;
103 alt = intf->altsetting + tmp;
104
105 /* take the first altsetting with in-bulk + out-bulk;
106 * remember any status endpoint, just in case;
107 * ignore other endpoints and altsetttings.
108 */
109 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
110 struct usb_host_endpoint *e;
111 int intr = 0;
112
113 e = alt->endpoint + ep;
114 switch (e->desc.bmAttributes) {
115 case USB_ENDPOINT_XFER_INT:
116 if (!(e->desc.bEndpointAddress & USB_DIR_IN))
117 continue;
118 intr = 1;
119 /* FALLTHROUGH */
120 case USB_ENDPOINT_XFER_BULK:
121 break;
122 default:
123 continue;
124 }
125 if (e->desc.bEndpointAddress & USB_DIR_IN) {
126 if (!intr && !in)
127 in = e;
128 else if (intr && !status)
129 status = e;
130 } else {
131 if (!out)
132 out = e;
133 }
134 }
135 if (in && out)
136 break;
137 }
138 if (!alt || !in || !out)
139 return -EINVAL;
140
141 if (alt->desc.bAlternateSetting != 0
142 || !(dev->driver_info->flags & FLAG_NO_SETINT)) {
143 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
144 alt->desc.bAlternateSetting);
145 if (tmp < 0)
146 return tmp;
147 }
148
149 dev->in = usb_rcvbulkpipe (dev->udev,
150 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
151 dev->out = usb_sndbulkpipe (dev->udev,
152 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
153 dev->status = status;
154 return 0;
155 }
156 EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
157
158 static void intr_complete (struct urb *urb, struct pt_regs *regs);
159
160 static int init_status (struct usbnet *dev, struct usb_interface *intf)
161 {
162 char *buf = NULL;
163 unsigned pipe = 0;
164 unsigned maxp;
165 unsigned period;
166
167 if (!dev->driver_info->status)
168 return 0;
169
170 pipe = usb_rcvintpipe (dev->udev,
171 dev->status->desc.bEndpointAddress
172 & USB_ENDPOINT_NUMBER_MASK);
173 maxp = usb_maxpacket (dev->udev, pipe, 0);
174
175 /* avoid 1 msec chatter: min 8 msec poll rate */
176 period = max ((int) dev->status->desc.bInterval,
177 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
178
179 buf = kmalloc (maxp, SLAB_KERNEL);
180 if (buf) {
181 dev->interrupt = usb_alloc_urb (0, SLAB_KERNEL);
182 if (!dev->interrupt) {
183 kfree (buf);
184 return -ENOMEM;
185 } else {
186 usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
187 buf, maxp, intr_complete, dev, period);
188 dev_dbg(&intf->dev,
189 "status ep%din, %d bytes period %d\n",
190 usb_pipeendpoint(pipe), maxp, period);
191 }
192 }
193 return 0;
194 }
195
196 /* Passes this packet up the stack, updating its accounting.
197 * Some link protocols batch packets, so their rx_fixup paths
198 * can return clones as well as just modify the original skb.
199 */
200 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
201 {
202 int status;
203
204 skb->dev = dev->net;
205 skb->protocol = eth_type_trans (skb, dev->net);
206 dev->stats.rx_packets++;
207 dev->stats.rx_bytes += skb->len;
208
209 if (netif_msg_rx_status (dev))
210 devdbg (dev, "< rx, len %zu, type 0x%x",
211 skb->len + sizeof (struct ethhdr), skb->protocol);
212 memset (skb->cb, 0, sizeof (struct skb_data));
213 status = netif_rx (skb);
214 if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
215 devdbg (dev, "netif_rx status %d", status);
216 }
217 EXPORT_SYMBOL_GPL(usbnet_skb_return);
218
219 \f
220 /*-------------------------------------------------------------------------
221 *
222 * Network Device Driver (peer link to "Host Device", from USB host)
223 *
224 *-------------------------------------------------------------------------*/
225
226 static int usbnet_change_mtu (struct net_device *net, int new_mtu)
227 {
228 struct usbnet *dev = netdev_priv(net);
229 int ll_mtu = new_mtu + net->hard_header_len;
230
231 if (new_mtu <= 0 || ll_mtu > dev->hard_mtu)
232 return -EINVAL;
233 // no second zero-length packet read wanted after mtu-sized packets
234 if ((ll_mtu % dev->maxpacket) == 0)
235 return -EDOM;
236 net->mtu = new_mtu;
237 return 0;
238 }
239
240 /*-------------------------------------------------------------------------*/
241
242 static struct net_device_stats *usbnet_get_stats (struct net_device *net)
243 {
244 struct usbnet *dev = netdev_priv(net);
245 return &dev->stats;
246 }
247
248 /*-------------------------------------------------------------------------*/
249
250 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
251 * completion callbacks. 2.5 should have fixed those bugs...
252 */
253
254 static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
255 {
256 unsigned long flags;
257
258 spin_lock_irqsave(&list->lock, flags);
259 __skb_unlink(skb, list);
260 spin_unlock(&list->lock);
261 spin_lock(&dev->done.lock);
262 __skb_queue_tail(&dev->done, skb);
263 if (dev->done.qlen == 1)
264 tasklet_schedule(&dev->bh);
265 spin_unlock_irqrestore(&dev->done.lock, flags);
266 }
267
268 /* some work can't be done in tasklets, so we use keventd
269 *
270 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
271 * but tasklet_schedule() doesn't. hope the failure is rare.
272 */
273 void usbnet_defer_kevent (struct usbnet *dev, int work)
274 {
275 set_bit (work, &dev->flags);
276 if (!schedule_work (&dev->kevent))
277 deverr (dev, "kevent %d may have been dropped", work);
278 else
279 devdbg (dev, "kevent %d scheduled", work);
280 }
281 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
282
283 /*-------------------------------------------------------------------------*/
284
285 static void rx_complete (struct urb *urb, struct pt_regs *regs);
286
287 static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
288 {
289 struct sk_buff *skb;
290 struct skb_data *entry;
291 int retval = 0;
292 unsigned long lockflags;
293 size_t size = dev->rx_urb_size;
294
295 if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
296 if (netif_msg_rx_err (dev))
297 devdbg (dev, "no rx skb");
298 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
299 usb_free_urb (urb);
300 return;
301 }
302 skb_reserve (skb, NET_IP_ALIGN);
303
304 entry = (struct skb_data *) skb->cb;
305 entry->urb = urb;
306 entry->dev = dev;
307 entry->state = rx_start;
308 entry->length = 0;
309
310 usb_fill_bulk_urb (urb, dev->udev, dev->in,
311 skb->data, size, rx_complete, skb);
312
313 spin_lock_irqsave (&dev->rxq.lock, lockflags);
314
315 if (netif_running (dev->net)
316 && netif_device_present (dev->net)
317 && !test_bit (EVENT_RX_HALT, &dev->flags)) {
318 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){
319 case -EPIPE:
320 usbnet_defer_kevent (dev, EVENT_RX_HALT);
321 break;
322 case -ENOMEM:
323 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
324 break;
325 case -ENODEV:
326 if (netif_msg_ifdown (dev))
327 devdbg (dev, "device gone");
328 netif_device_detach (dev->net);
329 break;
330 default:
331 if (netif_msg_rx_err (dev))
332 devdbg (dev, "rx submit, %d", retval);
333 tasklet_schedule (&dev->bh);
334 break;
335 case 0:
336 __skb_queue_tail (&dev->rxq, skb);
337 }
338 } else {
339 if (netif_msg_ifdown (dev))
340 devdbg (dev, "rx: stopped");
341 retval = -ENOLINK;
342 }
343 spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
344 if (retval) {
345 dev_kfree_skb_any (skb);
346 usb_free_urb (urb);
347 }
348 }
349
350
351 /*-------------------------------------------------------------------------*/
352
353 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
354 {
355 if (dev->driver_info->rx_fixup
356 && !dev->driver_info->rx_fixup (dev, skb))
357 goto error;
358 // else network stack removes extra byte if we forced a short packet
359
360 if (skb->len)
361 usbnet_skb_return (dev, skb);
362 else {
363 if (netif_msg_rx_err (dev))
364 devdbg (dev, "drop");
365 error:
366 dev->stats.rx_errors++;
367 skb_queue_tail (&dev->done, skb);
368 }
369 }
370
371 /*-------------------------------------------------------------------------*/
372
373 static void rx_complete (struct urb *urb, struct pt_regs *regs)
374 {
375 struct sk_buff *skb = (struct sk_buff *) urb->context;
376 struct skb_data *entry = (struct skb_data *) skb->cb;
377 struct usbnet *dev = entry->dev;
378 int urb_status = urb->status;
379
380 skb_put (skb, urb->actual_length);
381 entry->state = rx_done;
382 entry->urb = NULL;
383
384 switch (urb_status) {
385 // success
386 case 0:
387 if (skb->len < dev->net->hard_header_len) {
388 entry->state = rx_cleanup;
389 dev->stats.rx_errors++;
390 dev->stats.rx_length_errors++;
391 if (netif_msg_rx_err (dev))
392 devdbg (dev, "rx length %d", skb->len);
393 }
394 break;
395
396 // stalls need manual reset. this is rare ... except that
397 // when going through USB 2.0 TTs, unplug appears this way.
398 // we avoid the highspeed version of the ETIMEOUT/EILSEQ
399 // storm, recovering as needed.
400 case -EPIPE:
401 dev->stats.rx_errors++;
402 usbnet_defer_kevent (dev, EVENT_RX_HALT);
403 // FALLTHROUGH
404
405 // software-driven interface shutdown
406 case -ECONNRESET: // async unlink
407 case -ESHUTDOWN: // hardware gone
408 if (netif_msg_ifdown (dev))
409 devdbg (dev, "rx shutdown, code %d", urb_status);
410 goto block;
411
412 // we get controller i/o faults during khubd disconnect() delays.
413 // throttle down resubmits, to avoid log floods; just temporarily,
414 // so we still recover when the fault isn't a khubd delay.
415 case -EPROTO: // ehci
416 case -ETIMEDOUT: // ohci
417 case -EILSEQ: // uhci
418 dev->stats.rx_errors++;
419 if (!timer_pending (&dev->delay)) {
420 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
421 if (netif_msg_link (dev))
422 devdbg (dev, "rx throttle %d", urb_status);
423 }
424 block:
425 entry->state = rx_cleanup;
426 entry->urb = urb;
427 urb = NULL;
428 break;
429
430 // data overrun ... flush fifo?
431 case -EOVERFLOW:
432 dev->stats.rx_over_errors++;
433 // FALLTHROUGH
434
435 default:
436 entry->state = rx_cleanup;
437 dev->stats.rx_errors++;
438 if (netif_msg_rx_err (dev))
439 devdbg (dev, "rx status %d", urb_status);
440 break;
441 }
442
443 defer_bh(dev, skb, &dev->rxq);
444
445 if (urb) {
446 if (netif_running (dev->net)
447 && !test_bit (EVENT_RX_HALT, &dev->flags)) {
448 rx_submit (dev, urb, GFP_ATOMIC);
449 return;
450 }
451 usb_free_urb (urb);
452 }
453 if (netif_msg_rx_err (dev))
454 devdbg (dev, "no read resubmitted");
455 }
456
457 static void intr_complete (struct urb *urb, struct pt_regs *regs)
458 {
459 struct usbnet *dev = urb->context;
460 int status = urb->status;
461
462 switch (status) {
463 /* success */
464 case 0:
465 dev->driver_info->status(dev, urb);
466 break;
467
468 /* software-driven interface shutdown */
469 case -ENOENT: // urb killed
470 case -ESHUTDOWN: // hardware gone
471 if (netif_msg_ifdown (dev))
472 devdbg (dev, "intr shutdown, code %d", status);
473 return;
474
475 /* NOTE: not throttling like RX/TX, since this endpoint
476 * already polls infrequently
477 */
478 default:
479 devdbg (dev, "intr status %d", status);
480 break;
481 }
482
483 if (!netif_running (dev->net))
484 return;
485
486 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
487 status = usb_submit_urb (urb, GFP_ATOMIC);
488 if (status != 0 && netif_msg_timer (dev))
489 deverr(dev, "intr resubmit --> %d", status);
490 }
491
492 /*-------------------------------------------------------------------------*/
493
494 // unlink pending rx/tx; completion handlers do all other cleanup
495
496 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
497 {
498 unsigned long flags;
499 struct sk_buff *skb, *skbnext;
500 int count = 0;
501
502 spin_lock_irqsave (&q->lock, flags);
503 for (skb = q->next; skb != (struct sk_buff *) q; skb = skbnext) {
504 struct skb_data *entry;
505 struct urb *urb;
506 int retval;
507
508 entry = (struct skb_data *) skb->cb;
509 urb = entry->urb;
510 skbnext = skb->next;
511
512 // during some PM-driven resume scenarios,
513 // these (async) unlinks complete immediately
514 retval = usb_unlink_urb (urb);
515 if (retval != -EINPROGRESS && retval != 0)
516 devdbg (dev, "unlink urb err, %d", retval);
517 else
518 count++;
519 }
520 spin_unlock_irqrestore (&q->lock, flags);
521 return count;
522 }
523
524
525 /*-------------------------------------------------------------------------*/
526
527 // precondition: never called in_interrupt
528
529 static int usbnet_stop (struct net_device *net)
530 {
531 struct usbnet *dev = netdev_priv(net);
532 int temp;
533 DECLARE_WAIT_QUEUE_HEAD (unlink_wakeup);
534 DECLARE_WAITQUEUE (wait, current);
535
536 netif_stop_queue (net);
537
538 if (netif_msg_ifdown (dev))
539 devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
540 dev->stats.rx_packets, dev->stats.tx_packets,
541 dev->stats.rx_errors, dev->stats.tx_errors
542 );
543
544 // ensure there are no more active urbs
545 add_wait_queue (&unlink_wakeup, &wait);
546 dev->wait = &unlink_wakeup;
547 temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq);
548
549 // maybe wait for deletions to finish.
550 while (!skb_queue_empty(&dev->rxq) &&
551 !skb_queue_empty(&dev->txq) &&
552 !skb_queue_empty(&dev->done)) {
553 msleep(UNLINK_TIMEOUT_MS);
554 if (netif_msg_ifdown (dev))
555 devdbg (dev, "waited for %d urb completions", temp);
556 }
557 dev->wait = NULL;
558 remove_wait_queue (&unlink_wakeup, &wait);
559
560 usb_kill_urb(dev->interrupt);
561
562 /* deferred work (task, timer, softirq) must also stop.
563 * can't flush_scheduled_work() until we drop rtnl (later),
564 * else workers could deadlock; so make workers a NOP.
565 */
566 dev->flags = 0;
567 del_timer_sync (&dev->delay);
568 tasklet_kill (&dev->bh);
569
570 return 0;
571 }
572
573 /*-------------------------------------------------------------------------*/
574
575 // posts reads, and enables write queuing
576
577 // precondition: never called in_interrupt
578
579 static int usbnet_open (struct net_device *net)
580 {
581 struct usbnet *dev = netdev_priv(net);
582 int retval = 0;
583 struct driver_info *info = dev->driver_info;
584
585 // put into "known safe" state
586 if (info->reset && (retval = info->reset (dev)) < 0) {
587 if (netif_msg_ifup (dev))
588 devinfo (dev,
589 "open reset fail (%d) usbnet usb-%s-%s, %s",
590 retval,
591 dev->udev->bus->bus_name, dev->udev->devpath,
592 info->description);
593 goto done;
594 }
595
596 // insist peer be connected
597 if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
598 if (netif_msg_ifup (dev))
599 devdbg (dev, "can't open; %d", retval);
600 goto done;
601 }
602
603 /* start any status interrupt transfer */
604 if (dev->interrupt) {
605 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
606 if (retval < 0) {
607 if (netif_msg_ifup (dev))
608 deverr (dev, "intr submit %d", retval);
609 goto done;
610 }
611 }
612
613 netif_start_queue (net);
614 if (netif_msg_ifup (dev)) {
615 char *framing;
616
617 if (dev->driver_info->flags & FLAG_FRAMING_NC)
618 framing = "NetChip";
619 else if (dev->driver_info->flags & FLAG_FRAMING_GL)
620 framing = "GeneSys";
621 else if (dev->driver_info->flags & FLAG_FRAMING_Z)
622 framing = "Zaurus";
623 else if (dev->driver_info->flags & FLAG_FRAMING_RN)
624 framing = "RNDIS";
625 else if (dev->driver_info->flags & FLAG_FRAMING_AX)
626 framing = "ASIX";
627 else
628 framing = "simple";
629
630 devinfo (dev, "open: enable queueing "
631 "(rx %d, tx %d) mtu %d %s framing",
632 RX_QLEN (dev), TX_QLEN (dev), dev->net->mtu,
633 framing);
634 }
635
636 // delay posting reads until we're fully open
637 tasklet_schedule (&dev->bh);
638 done:
639 return retval;
640 }
641
642 /*-------------------------------------------------------------------------*/
643
644 /* ethtool methods; minidrivers may need to add some more, but
645 * they'll probably want to use this base set.
646 */
647
648 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
649 {
650 struct usbnet *dev = netdev_priv(net);
651
652 /* REVISIT don't always return "usbnet" */
653 strncpy (info->driver, driver_name, sizeof info->driver);
654 strncpy (info->version, DRIVER_VERSION, sizeof info->version);
655 strncpy (info->fw_version, dev->driver_info->description,
656 sizeof info->fw_version);
657 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
658 }
659 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
660
661 static u32 usbnet_get_link (struct net_device *net)
662 {
663 struct usbnet *dev = netdev_priv(net);
664
665 /* If a check_connect is defined, return its result */
666 if (dev->driver_info->check_connect)
667 return dev->driver_info->check_connect (dev) == 0;
668
669 /* Otherwise, say we're up (to avoid breaking scripts) */
670 return 1;
671 }
672
673 u32 usbnet_get_msglevel (struct net_device *net)
674 {
675 struct usbnet *dev = netdev_priv(net);
676
677 return dev->msg_enable;
678 }
679 EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
680
681 void usbnet_set_msglevel (struct net_device *net, u32 level)
682 {
683 struct usbnet *dev = netdev_priv(net);
684
685 dev->msg_enable = level;
686 }
687 EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
688
689 /* drivers may override default ethtool_ops in their bind() routine */
690 static struct ethtool_ops usbnet_ethtool_ops = {
691 .get_drvinfo = usbnet_get_drvinfo,
692 .get_link = usbnet_get_link,
693 .get_msglevel = usbnet_get_msglevel,
694 .set_msglevel = usbnet_set_msglevel,
695 };
696
697 /*-------------------------------------------------------------------------*/
698
699 /* work that cannot be done in interrupt context uses keventd.
700 *
701 * NOTE: with 2.5 we could do more of this using completion callbacks,
702 * especially now that control transfers can be queued.
703 */
704 static void
705 kevent (void *data)
706 {
707 struct usbnet *dev = data;
708 int status;
709
710 /* usb_clear_halt() needs a thread context */
711 if (test_bit (EVENT_TX_HALT, &dev->flags)) {
712 unlink_urbs (dev, &dev->txq);
713 status = usb_clear_halt (dev->udev, dev->out);
714 if (status < 0
715 && status != -EPIPE
716 && status != -ESHUTDOWN) {
717 if (netif_msg_tx_err (dev))
718 deverr (dev, "can't clear tx halt, status %d",
719 status);
720 } else {
721 clear_bit (EVENT_TX_HALT, &dev->flags);
722 if (status != -ESHUTDOWN)
723 netif_wake_queue (dev->net);
724 }
725 }
726 if (test_bit (EVENT_RX_HALT, &dev->flags)) {
727 unlink_urbs (dev, &dev->rxq);
728 status = usb_clear_halt (dev->udev, dev->in);
729 if (status < 0
730 && status != -EPIPE
731 && status != -ESHUTDOWN) {
732 if (netif_msg_rx_err (dev))
733 deverr (dev, "can't clear rx halt, status %d",
734 status);
735 } else {
736 clear_bit (EVENT_RX_HALT, &dev->flags);
737 tasklet_schedule (&dev->bh);
738 }
739 }
740
741 /* tasklet could resubmit itself forever if memory is tight */
742 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
743 struct urb *urb = NULL;
744
745 if (netif_running (dev->net))
746 urb = usb_alloc_urb (0, GFP_KERNEL);
747 else
748 clear_bit (EVENT_RX_MEMORY, &dev->flags);
749 if (urb != NULL) {
750 clear_bit (EVENT_RX_MEMORY, &dev->flags);
751 rx_submit (dev, urb, GFP_KERNEL);
752 tasklet_schedule (&dev->bh);
753 }
754 }
755
756 if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
757 struct driver_info *info = dev->driver_info;
758 int retval = 0;
759
760 clear_bit (EVENT_LINK_RESET, &dev->flags);
761 if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
762 devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
763 retval,
764 dev->udev->bus->bus_name, dev->udev->devpath,
765 info->description);
766 }
767 }
768
769 if (dev->flags)
770 devdbg (dev, "kevent done, flags = 0x%lx",
771 dev->flags);
772 }
773
774 /*-------------------------------------------------------------------------*/
775
776 static void tx_complete (struct urb *urb, struct pt_regs *regs)
777 {
778 struct sk_buff *skb = (struct sk_buff *) urb->context;
779 struct skb_data *entry = (struct skb_data *) skb->cb;
780 struct usbnet *dev = entry->dev;
781
782 if (urb->status == 0) {
783 dev->stats.tx_packets++;
784 dev->stats.tx_bytes += entry->length;
785 } else {
786 dev->stats.tx_errors++;
787
788 switch (urb->status) {
789 case -EPIPE:
790 usbnet_defer_kevent (dev, EVENT_TX_HALT);
791 break;
792
793 /* software-driven interface shutdown */
794 case -ECONNRESET: // async unlink
795 case -ESHUTDOWN: // hardware gone
796 break;
797
798 // like rx, tx gets controller i/o faults during khubd delays
799 // and so it uses the same throttling mechanism.
800 case -EPROTO: // ehci
801 case -ETIMEDOUT: // ohci
802 case -EILSEQ: // uhci
803 if (!timer_pending (&dev->delay)) {
804 mod_timer (&dev->delay,
805 jiffies + THROTTLE_JIFFIES);
806 if (netif_msg_link (dev))
807 devdbg (dev, "tx throttle %d",
808 urb->status);
809 }
810 netif_stop_queue (dev->net);
811 break;
812 default:
813 if (netif_msg_tx_err (dev))
814 devdbg (dev, "tx err %d", entry->urb->status);
815 break;
816 }
817 }
818
819 urb->dev = NULL;
820 entry->state = tx_done;
821 defer_bh(dev, skb, &dev->txq);
822 }
823
824 /*-------------------------------------------------------------------------*/
825
826 static void usbnet_tx_timeout (struct net_device *net)
827 {
828 struct usbnet *dev = netdev_priv(net);
829
830 unlink_urbs (dev, &dev->txq);
831 tasklet_schedule (&dev->bh);
832
833 // FIXME: device recovery -- reset?
834 }
835
836 /*-------------------------------------------------------------------------*/
837
838 static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
839 {
840 struct usbnet *dev = netdev_priv(net);
841 int length;
842 int retval = NET_XMIT_SUCCESS;
843 struct urb *urb = NULL;
844 struct skb_data *entry;
845 struct driver_info *info = dev->driver_info;
846 unsigned long flags;
847
848 // some devices want funky USB-level framing, for
849 // win32 driver (usually) and/or hardware quirks
850 if (info->tx_fixup) {
851 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
852 if (!skb) {
853 if (netif_msg_tx_err (dev))
854 devdbg (dev, "can't tx_fixup skb");
855 goto drop;
856 }
857 }
858 length = skb->len;
859
860 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
861 if (netif_msg_tx_err (dev))
862 devdbg (dev, "no urb");
863 goto drop;
864 }
865
866 entry = (struct skb_data *) skb->cb;
867 entry->urb = urb;
868 entry->dev = dev;
869 entry->state = tx_start;
870 entry->length = length;
871
872 usb_fill_bulk_urb (urb, dev->udev, dev->out,
873 skb->data, skb->len, tx_complete, skb);
874
875 /* don't assume the hardware handles USB_ZERO_PACKET
876 * NOTE: strictly conforming cdc-ether devices should expect
877 * the ZLP here, but ignore the one-byte packet.
878 *
879 * FIXME zero that byte, if it doesn't require a new skb.
880 */
881 if ((length % dev->maxpacket) == 0)
882 urb->transfer_buffer_length++;
883
884 spin_lock_irqsave (&dev->txq.lock, flags);
885
886 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
887 case -EPIPE:
888 netif_stop_queue (net);
889 usbnet_defer_kevent (dev, EVENT_TX_HALT);
890 break;
891 default:
892 if (netif_msg_tx_err (dev))
893 devdbg (dev, "tx: submit urb err %d", retval);
894 break;
895 case 0:
896 net->trans_start = jiffies;
897 __skb_queue_tail (&dev->txq, skb);
898 if (dev->txq.qlen >= TX_QLEN (dev))
899 netif_stop_queue (net);
900 }
901 spin_unlock_irqrestore (&dev->txq.lock, flags);
902
903 if (retval) {
904 if (netif_msg_tx_err (dev))
905 devdbg (dev, "drop, code %d", retval);
906 drop:
907 retval = NET_XMIT_SUCCESS;
908 dev->stats.tx_dropped++;
909 if (skb)
910 dev_kfree_skb_any (skb);
911 usb_free_urb (urb);
912 } else if (netif_msg_tx_queued (dev)) {
913 devdbg (dev, "> tx, len %d, type 0x%x",
914 length, skb->protocol);
915 }
916 return retval;
917 }
918
919
920 /*-------------------------------------------------------------------------*/
921
922 // tasklet (work deferred from completions, in_irq) or timer
923
924 static void usbnet_bh (unsigned long param)
925 {
926 struct usbnet *dev = (struct usbnet *) param;
927 struct sk_buff *skb;
928 struct skb_data *entry;
929
930 while ((skb = skb_dequeue (&dev->done))) {
931 entry = (struct skb_data *) skb->cb;
932 switch (entry->state) {
933 case rx_done:
934 entry->state = rx_cleanup;
935 rx_process (dev, skb);
936 continue;
937 case tx_done:
938 case rx_cleanup:
939 usb_free_urb (entry->urb);
940 dev_kfree_skb (skb);
941 continue;
942 default:
943 devdbg (dev, "bogus skb state %d", entry->state);
944 }
945 }
946
947 // waiting for all pending urbs to complete?
948 if (dev->wait) {
949 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
950 wake_up (dev->wait);
951 }
952
953 // or are we maybe short a few urbs?
954 } else if (netif_running (dev->net)
955 && netif_device_present (dev->net)
956 && !timer_pending (&dev->delay)
957 && !test_bit (EVENT_RX_HALT, &dev->flags)) {
958 int temp = dev->rxq.qlen;
959 int qlen = RX_QLEN (dev);
960
961 if (temp < qlen) {
962 struct urb *urb;
963 int i;
964
965 // don't refill the queue all at once
966 for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
967 urb = usb_alloc_urb (0, GFP_ATOMIC);
968 if (urb != NULL)
969 rx_submit (dev, urb, GFP_ATOMIC);
970 }
971 if (temp != dev->rxq.qlen && netif_msg_link (dev))
972 devdbg (dev, "rxqlen %d --> %d",
973 temp, dev->rxq.qlen);
974 if (dev->rxq.qlen < qlen)
975 tasklet_schedule (&dev->bh);
976 }
977 if (dev->txq.qlen < TX_QLEN (dev))
978 netif_wake_queue (dev->net);
979 }
980 }
981
982
983 \f
984 /*-------------------------------------------------------------------------
985 *
986 * USB Device Driver support
987 *
988 *-------------------------------------------------------------------------*/
989
990 // precondition: never called in_interrupt
991
992 void usbnet_disconnect (struct usb_interface *intf)
993 {
994 struct usbnet *dev;
995 struct usb_device *xdev;
996 struct net_device *net;
997
998 dev = usb_get_intfdata(intf);
999 usb_set_intfdata(intf, NULL);
1000 if (!dev)
1001 return;
1002
1003 xdev = interface_to_usbdev (intf);
1004
1005 if (netif_msg_probe (dev))
1006 devinfo (dev, "unregister '%s' usb-%s-%s, %s",
1007 intf->dev.driver->name,
1008 xdev->bus->bus_name, xdev->devpath,
1009 dev->driver_info->description);
1010
1011 net = dev->net;
1012 unregister_netdev (net);
1013
1014 /* we don't hold rtnl here ... */
1015 flush_scheduled_work ();
1016
1017 if (dev->driver_info->unbind)
1018 dev->driver_info->unbind (dev, intf);
1019
1020 free_netdev(net);
1021 usb_put_dev (xdev);
1022 }
1023 EXPORT_SYMBOL_GPL(usbnet_disconnect);
1024
1025
1026 /*-------------------------------------------------------------------------*/
1027
1028 // precondition: never called in_interrupt
1029
1030 int
1031 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1032 {
1033 struct usbnet *dev;
1034 struct net_device *net;
1035 struct usb_host_interface *interface;
1036 struct driver_info *info;
1037 struct usb_device *xdev;
1038 int status;
1039
1040 info = (struct driver_info *) prod->driver_info;
1041 if (!info) {
1042 dev_dbg (&udev->dev, "blacklisted by %s\n", driver_name);
1043 return -ENODEV;
1044 }
1045 xdev = interface_to_usbdev (udev);
1046 interface = udev->cur_altsetting;
1047
1048 usb_get_dev (xdev);
1049
1050 status = -ENOMEM;
1051
1052 // set up our own records
1053 net = alloc_etherdev(sizeof(*dev));
1054 if (!net) {
1055 dbg ("can't kmalloc dev");
1056 goto out;
1057 }
1058
1059 dev = netdev_priv(net);
1060 dev->udev = xdev;
1061 dev->driver_info = info;
1062 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1063 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1064 skb_queue_head_init (&dev->rxq);
1065 skb_queue_head_init (&dev->txq);
1066 skb_queue_head_init (&dev->done);
1067 dev->bh.func = usbnet_bh;
1068 dev->bh.data = (unsigned long) dev;
1069 INIT_WORK (&dev->kevent, kevent, dev);
1070 dev->delay.function = usbnet_bh;
1071 dev->delay.data = (unsigned long) dev;
1072 init_timer (&dev->delay);
1073
1074 SET_MODULE_OWNER (net);
1075 dev->net = net;
1076 strcpy (net->name, "usb%d");
1077 memcpy (net->dev_addr, node_id, sizeof node_id);
1078
1079 /* rx and tx sides can use different message sizes;
1080 * bind() should set rx_urb_size in that case.
1081 */
1082 dev->hard_mtu = net->mtu + net->hard_header_len;
1083 #if 0
1084 // dma_supported() is deeply broken on almost all architectures
1085 // possible with some EHCI controllers
1086 if (dma_supported (&udev->dev, DMA_64BIT_MASK))
1087 net->features |= NETIF_F_HIGHDMA;
1088 #endif
1089
1090 net->change_mtu = usbnet_change_mtu;
1091 net->get_stats = usbnet_get_stats;
1092 net->hard_start_xmit = usbnet_start_xmit;
1093 net->open = usbnet_open;
1094 net->stop = usbnet_stop;
1095 net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
1096 net->tx_timeout = usbnet_tx_timeout;
1097 net->ethtool_ops = &usbnet_ethtool_ops;
1098
1099 // allow device-specific bind/init procedures
1100 // NOTE net->name still not usable ...
1101 if (info->bind) {
1102 status = info->bind (dev, udev);
1103 // heuristic: "usb%d" for links we know are two-host,
1104 // else "eth%d" when there's reasonable doubt. userspace
1105 // can rename the link if it knows better.
1106 if ((dev->driver_info->flags & FLAG_ETHER) != 0
1107 && (net->dev_addr [0] & 0x02) == 0)
1108 strcpy (net->name, "eth%d");
1109
1110 /* maybe the remote can't receive an Ethernet MTU */
1111 if (net->mtu > (dev->hard_mtu - net->hard_header_len))
1112 net->mtu = dev->hard_mtu - net->hard_header_len;
1113 } else if (!info->in || !info->out)
1114 status = usbnet_get_endpoints (dev, udev);
1115 else {
1116 dev->in = usb_rcvbulkpipe (xdev, info->in);
1117 dev->out = usb_sndbulkpipe (xdev, info->out);
1118 if (!(info->flags & FLAG_NO_SETINT))
1119 status = usb_set_interface (xdev,
1120 interface->desc.bInterfaceNumber,
1121 interface->desc.bAlternateSetting);
1122 else
1123 status = 0;
1124
1125 }
1126 if (status == 0 && dev->status)
1127 status = init_status (dev, udev);
1128 if (status < 0)
1129 goto out1;
1130
1131 if (!dev->rx_urb_size)
1132 dev->rx_urb_size = dev->hard_mtu;
1133 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1134
1135 SET_NETDEV_DEV(net, &udev->dev);
1136 status = register_netdev (net);
1137 if (status)
1138 goto out3;
1139 if (netif_msg_probe (dev))
1140 devinfo (dev, "register '%s' at usb-%s-%s, %s, "
1141 "%02x:%02x:%02x:%02x:%02x:%02x",
1142 udev->dev.driver->name,
1143 xdev->bus->bus_name, xdev->devpath,
1144 dev->driver_info->description,
1145 net->dev_addr [0], net->dev_addr [1],
1146 net->dev_addr [2], net->dev_addr [3],
1147 net->dev_addr [4], net->dev_addr [5]);
1148
1149 // ok, it's ready to go.
1150 usb_set_intfdata (udev, dev);
1151
1152 // start as if the link is up
1153 netif_device_attach (net);
1154
1155 return 0;
1156
1157 out3:
1158 if (info->unbind)
1159 info->unbind (dev, udev);
1160 out1:
1161 free_netdev(net);
1162 out:
1163 usb_put_dev(xdev);
1164 return status;
1165 }
1166 EXPORT_SYMBOL_GPL(usbnet_probe);
1167
1168 /*-------------------------------------------------------------------------*/
1169
1170 /* FIXME these suspend/resume methods assume non-CDC style
1171 * devices, with only one interface.
1172 */
1173
1174 int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
1175 {
1176 struct usbnet *dev = usb_get_intfdata(intf);
1177
1178 /* accelerate emptying of the rx and queues, to avoid
1179 * having everything error out.
1180 */
1181 netif_device_detach (dev->net);
1182 (void) unlink_urbs (dev, &dev->rxq);
1183 (void) unlink_urbs (dev, &dev->txq);
1184 return 0;
1185 }
1186 EXPORT_SYMBOL_GPL(usbnet_suspend);
1187
1188 int usbnet_resume (struct usb_interface *intf)
1189 {
1190 struct usbnet *dev = usb_get_intfdata(intf);
1191
1192 netif_device_attach (dev->net);
1193 tasklet_schedule (&dev->bh);
1194 return 0;
1195 }
1196 EXPORT_SYMBOL_GPL(usbnet_resume);
1197
1198
1199 /*-------------------------------------------------------------------------*/
1200
1201 static int __init usbnet_init(void)
1202 {
1203 /* compiler should optimize this out */
1204 BUG_ON (sizeof (((struct sk_buff *)0)->cb)
1205 < sizeof (struct skb_data));
1206
1207 random_ether_addr(node_id);
1208 return 0;
1209 }
1210 module_init(usbnet_init);
1211
1212 static void __exit usbnet_exit(void)
1213 {
1214 }
1215 module_exit(usbnet_exit);
1216
1217 MODULE_AUTHOR("David Brownell");
1218 MODULE_DESCRIPTION("USB network driver framework");
1219 MODULE_LICENSE("GPL");
This page took 0.058768 seconds and 6 git commands to generate.