Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / net / usb / usbnet.c
1 /*
2 * USB Network driver infrastructure
3 * Copyright (C) 2000-2005 by David Brownell
4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 /*
22 * This is a generic "USB networking" framework that works with several
23 * kinds of full and high speed networking devices: host-to-host cables,
24 * smart usb peripherals, and actual Ethernet adapters.
25 *
26 * These devices usually differ in terms of control protocols (if they
27 * even have one!) and sometimes they define new framing to wrap or batch
28 * Ethernet packets. Otherwise, they talk to USB pretty much the same,
29 * so interface (un)binding, endpoint I/O queues, fault handling, and other
30 * issues can usefully be addressed by this framework.
31 */
32
33 // #define DEBUG // error path messages, extra info
34 // #define VERBOSE // more; success messages
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/ctype.h>
41 #include <linux/ethtool.h>
42 #include <linux/workqueue.h>
43 #include <linux/mii.h>
44 #include <linux/usb.h>
45 #include <linux/usb/usbnet.h>
46 #include <linux/slab.h>
47 #include <linux/kernel.h>
48 #include <linux/pm_runtime.h>
49
50 #define DRIVER_VERSION "22-Aug-2005"
51
52
53 /*-------------------------------------------------------------------------*/
54
55 /*
56 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max.
57 * Several dozen bytes of IPv4 data can fit in two such transactions.
58 * One maximum size Ethernet packet takes twenty four of them.
59 * For high speed, each frame comfortably fits almost 36 max size
60 * Ethernet packets (so queues should be bigger).
61 *
62 * REVISIT qlens should be members of 'struct usbnet'; the goal is to
63 * let the USB host controller be busy for 5msec or more before an irq
64 * is required, under load. Jumbograms change the equation.
65 */
66 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
67 #define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
68 (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
69 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
70 (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
71
72 // reawaken network queue this soon after stopping; else watchdog barks
73 #define TX_TIMEOUT_JIFFIES (5*HZ)
74
75 // throttle rx/tx briefly after some faults, so khubd might disconnect()
76 // us (it polls at HZ/4 usually) before we report too many false errors.
77 #define THROTTLE_JIFFIES (HZ/8)
78
79 // between wakeups
80 #define UNLINK_TIMEOUT_MS 3
81
82 /*-------------------------------------------------------------------------*/
83
84 // randomly generated ethernet address
85 static u8 node_id [ETH_ALEN];
86
87 static const char driver_name [] = "usbnet";
88
89 /* use ethtool to change the level for any given device */
90 static int msg_level = -1;
91 module_param (msg_level, int, 0);
92 MODULE_PARM_DESC (msg_level, "Override default message level");
93
94 /*-------------------------------------------------------------------------*/
95
96 /* handles CDC Ethernet and many other network "bulk data" interfaces */
97 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
98 {
99 int tmp;
100 struct usb_host_interface *alt = NULL;
101 struct usb_host_endpoint *in = NULL, *out = NULL;
102 struct usb_host_endpoint *status = NULL;
103
104 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
105 unsigned ep;
106
107 in = out = status = NULL;
108 alt = intf->altsetting + tmp;
109
110 /* take the first altsetting with in-bulk + out-bulk;
111 * remember any status endpoint, just in case;
112 * ignore other endpoints and altsettings.
113 */
114 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
115 struct usb_host_endpoint *e;
116 int intr = 0;
117
118 e = alt->endpoint + ep;
119 switch (e->desc.bmAttributes) {
120 case USB_ENDPOINT_XFER_INT:
121 if (!usb_endpoint_dir_in(&e->desc))
122 continue;
123 intr = 1;
124 /* FALLTHROUGH */
125 case USB_ENDPOINT_XFER_BULK:
126 break;
127 default:
128 continue;
129 }
130 if (usb_endpoint_dir_in(&e->desc)) {
131 if (!intr && !in)
132 in = e;
133 else if (intr && !status)
134 status = e;
135 } else {
136 if (!out)
137 out = e;
138 }
139 }
140 if (in && out)
141 break;
142 }
143 if (!alt || !in || !out)
144 return -EINVAL;
145
146 if (alt->desc.bAlternateSetting != 0 ||
147 !(dev->driver_info->flags & FLAG_NO_SETINT)) {
148 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
149 alt->desc.bAlternateSetting);
150 if (tmp < 0)
151 return tmp;
152 }
153
154 dev->in = usb_rcvbulkpipe (dev->udev,
155 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
156 dev->out = usb_sndbulkpipe (dev->udev,
157 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
158 dev->status = status;
159 return 0;
160 }
161 EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
162
163 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
164 {
165 int tmp, i;
166 unsigned char buf [13];
167
168 tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
169 if (tmp != 12) {
170 dev_dbg(&dev->udev->dev,
171 "bad MAC string %d fetch, %d\n", iMACAddress, tmp);
172 if (tmp >= 0)
173 tmp = -EINVAL;
174 return tmp;
175 }
176 for (i = tmp = 0; i < 6; i++, tmp += 2)
177 dev->net->dev_addr [i] =
178 (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]);
179 return 0;
180 }
181 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
182
183 static void intr_complete (struct urb *urb)
184 {
185 struct usbnet *dev = urb->context;
186 int status = urb->status;
187
188 switch (status) {
189 /* success */
190 case 0:
191 dev->driver_info->status(dev, urb);
192 break;
193
194 /* software-driven interface shutdown */
195 case -ENOENT: /* urb killed */
196 case -ESHUTDOWN: /* hardware gone */
197 netif_dbg(dev, ifdown, dev->net,
198 "intr shutdown, code %d\n", status);
199 return;
200
201 /* NOTE: not throttling like RX/TX, since this endpoint
202 * already polls infrequently
203 */
204 default:
205 netdev_dbg(dev->net, "intr status %d\n", status);
206 break;
207 }
208
209 if (!netif_running (dev->net))
210 return;
211
212 status = usb_submit_urb (urb, GFP_ATOMIC);
213 if (status != 0)
214 netif_err(dev, timer, dev->net,
215 "intr resubmit --> %d\n", status);
216 }
217
218 static int init_status (struct usbnet *dev, struct usb_interface *intf)
219 {
220 char *buf = NULL;
221 unsigned pipe = 0;
222 unsigned maxp;
223 unsigned period;
224
225 if (!dev->driver_info->status)
226 return 0;
227
228 pipe = usb_rcvintpipe (dev->udev,
229 dev->status->desc.bEndpointAddress
230 & USB_ENDPOINT_NUMBER_MASK);
231 maxp = usb_maxpacket (dev->udev, pipe, 0);
232
233 /* avoid 1 msec chatter: min 8 msec poll rate */
234 period = max ((int) dev->status->desc.bInterval,
235 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
236
237 buf = kmalloc (maxp, GFP_KERNEL);
238 if (buf) {
239 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
240 if (!dev->interrupt) {
241 kfree (buf);
242 return -ENOMEM;
243 } else {
244 usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
245 buf, maxp, intr_complete, dev, period);
246 dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
247 dev_dbg(&intf->dev,
248 "status ep%din, %d bytes period %d\n",
249 usb_pipeendpoint(pipe), maxp, period);
250 }
251 }
252 return 0;
253 }
254
255 /* Passes this packet up the stack, updating its accounting.
256 * Some link protocols batch packets, so their rx_fixup paths
257 * can return clones as well as just modify the original skb.
258 */
259 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
260 {
261 int status;
262
263 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
264 skb_queue_tail(&dev->rxq_pause, skb);
265 return;
266 }
267
268 skb->protocol = eth_type_trans (skb, dev->net);
269 dev->net->stats.rx_packets++;
270 dev->net->stats.rx_bytes += skb->len;
271
272 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
273 skb->len + sizeof (struct ethhdr), skb->protocol);
274 memset (skb->cb, 0, sizeof (struct skb_data));
275
276 if (skb_defer_rx_timestamp(skb))
277 return;
278
279 status = netif_rx (skb);
280 if (status != NET_RX_SUCCESS)
281 netif_dbg(dev, rx_err, dev->net,
282 "netif_rx status %d\n", status);
283 }
284 EXPORT_SYMBOL_GPL(usbnet_skb_return);
285
286 \f
287 /*-------------------------------------------------------------------------
288 *
289 * Network Device Driver (peer link to "Host Device", from USB host)
290 *
291 *-------------------------------------------------------------------------*/
292
293 int usbnet_change_mtu (struct net_device *net, int new_mtu)
294 {
295 struct usbnet *dev = netdev_priv(net);
296 int ll_mtu = new_mtu + net->hard_header_len;
297 int old_hard_mtu = dev->hard_mtu;
298 int old_rx_urb_size = dev->rx_urb_size;
299
300 if (new_mtu <= 0)
301 return -EINVAL;
302 // no second zero-length packet read wanted after mtu-sized packets
303 if ((ll_mtu % dev->maxpacket) == 0)
304 return -EDOM;
305 net->mtu = new_mtu;
306
307 dev->hard_mtu = net->mtu + net->hard_header_len;
308 if (dev->rx_urb_size == old_hard_mtu) {
309 dev->rx_urb_size = dev->hard_mtu;
310 if (dev->rx_urb_size > old_rx_urb_size)
311 usbnet_unlink_rx_urbs(dev);
312 }
313
314 return 0;
315 }
316 EXPORT_SYMBOL_GPL(usbnet_change_mtu);
317
318 /* The caller must hold list->lock */
319 static void __usbnet_queue_skb(struct sk_buff_head *list,
320 struct sk_buff *newsk, enum skb_state state)
321 {
322 struct skb_data *entry = (struct skb_data *) newsk->cb;
323
324 __skb_queue_tail(list, newsk);
325 entry->state = state;
326 }
327
328 /*-------------------------------------------------------------------------*/
329
330 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
331 * completion callbacks. 2.5 should have fixed those bugs...
332 */
333
334 static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
335 struct sk_buff_head *list, enum skb_state state)
336 {
337 unsigned long flags;
338 enum skb_state old_state;
339 struct skb_data *entry = (struct skb_data *) skb->cb;
340
341 spin_lock_irqsave(&list->lock, flags);
342 old_state = entry->state;
343 entry->state = state;
344 __skb_unlink(skb, list);
345 spin_unlock(&list->lock);
346 spin_lock(&dev->done.lock);
347 __skb_queue_tail(&dev->done, skb);
348 if (dev->done.qlen == 1)
349 tasklet_schedule(&dev->bh);
350 spin_unlock_irqrestore(&dev->done.lock, flags);
351 return old_state;
352 }
353
354 /* some work can't be done in tasklets, so we use keventd
355 *
356 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
357 * but tasklet_schedule() doesn't. hope the failure is rare.
358 */
359 void usbnet_defer_kevent (struct usbnet *dev, int work)
360 {
361 set_bit (work, &dev->flags);
362 if (!schedule_work (&dev->kevent)) {
363 if (net_ratelimit())
364 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
365 } else {
366 netdev_dbg(dev->net, "kevent %d scheduled\n", work);
367 }
368 }
369 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
370
371 /*-------------------------------------------------------------------------*/
372
373 static void rx_complete (struct urb *urb);
374
375 static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
376 {
377 struct sk_buff *skb;
378 struct skb_data *entry;
379 int retval = 0;
380 unsigned long lockflags;
381 size_t size = dev->rx_urb_size;
382
383 /* prevent rx skb allocation when error ratio is high */
384 if (test_bit(EVENT_RX_KILL, &dev->flags)) {
385 usb_free_urb(urb);
386 return -ENOLINK;
387 }
388
389 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
390 if (!skb) {
391 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
392 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
393 usb_free_urb (urb);
394 return -ENOMEM;
395 }
396
397 entry = (struct skb_data *) skb->cb;
398 entry->urb = urb;
399 entry->dev = dev;
400 entry->length = 0;
401
402 usb_fill_bulk_urb (urb, dev->udev, dev->in,
403 skb->data, size, rx_complete, skb);
404
405 spin_lock_irqsave (&dev->rxq.lock, lockflags);
406
407 if (netif_running (dev->net) &&
408 netif_device_present (dev->net) &&
409 !test_bit (EVENT_RX_HALT, &dev->flags) &&
410 !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
411 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
412 case -EPIPE:
413 usbnet_defer_kevent (dev, EVENT_RX_HALT);
414 break;
415 case -ENOMEM:
416 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
417 break;
418 case -ENODEV:
419 netif_dbg(dev, ifdown, dev->net, "device gone\n");
420 netif_device_detach (dev->net);
421 break;
422 case -EHOSTUNREACH:
423 retval = -ENOLINK;
424 break;
425 default:
426 netif_dbg(dev, rx_err, dev->net,
427 "rx submit, %d\n", retval);
428 tasklet_schedule (&dev->bh);
429 break;
430 case 0:
431 __usbnet_queue_skb(&dev->rxq, skb, rx_start);
432 }
433 } else {
434 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
435 retval = -ENOLINK;
436 }
437 spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
438 if (retval) {
439 dev_kfree_skb_any (skb);
440 usb_free_urb (urb);
441 }
442 return retval;
443 }
444
445
446 /*-------------------------------------------------------------------------*/
447
448 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
449 {
450 if (dev->driver_info->rx_fixup &&
451 !dev->driver_info->rx_fixup (dev, skb)) {
452 /* With RX_ASSEMBLE, rx_fixup() must update counters */
453 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
454 dev->net->stats.rx_errors++;
455 goto done;
456 }
457 // else network stack removes extra byte if we forced a short packet
458
459 if (skb->len) {
460 /* all data was already cloned from skb inside the driver */
461 if (dev->driver_info->flags & FLAG_MULTI_PACKET)
462 dev_kfree_skb_any(skb);
463 else
464 usbnet_skb_return(dev, skb);
465 return;
466 }
467
468 netif_dbg(dev, rx_err, dev->net, "drop\n");
469 dev->net->stats.rx_errors++;
470 done:
471 skb_queue_tail(&dev->done, skb);
472 }
473
474 /*-------------------------------------------------------------------------*/
475
476 static void rx_complete (struct urb *urb)
477 {
478 struct sk_buff *skb = (struct sk_buff *) urb->context;
479 struct skb_data *entry = (struct skb_data *) skb->cb;
480 struct usbnet *dev = entry->dev;
481 int urb_status = urb->status;
482 enum skb_state state;
483
484 skb_put (skb, urb->actual_length);
485 state = rx_done;
486 entry->urb = NULL;
487
488 switch (urb_status) {
489 /* success */
490 case 0:
491 if (skb->len < dev->net->hard_header_len) {
492 state = rx_cleanup;
493 dev->net->stats.rx_errors++;
494 dev->net->stats.rx_length_errors++;
495 netif_dbg(dev, rx_err, dev->net,
496 "rx length %d\n", skb->len);
497 }
498 break;
499
500 /* stalls need manual reset. this is rare ... except that
501 * when going through USB 2.0 TTs, unplug appears this way.
502 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
503 * storm, recovering as needed.
504 */
505 case -EPIPE:
506 dev->net->stats.rx_errors++;
507 usbnet_defer_kevent (dev, EVENT_RX_HALT);
508 // FALLTHROUGH
509
510 /* software-driven interface shutdown */
511 case -ECONNRESET: /* async unlink */
512 case -ESHUTDOWN: /* hardware gone */
513 netif_dbg(dev, ifdown, dev->net,
514 "rx shutdown, code %d\n", urb_status);
515 goto block;
516
517 /* we get controller i/o faults during khubd disconnect() delays.
518 * throttle down resubmits, to avoid log floods; just temporarily,
519 * so we still recover when the fault isn't a khubd delay.
520 */
521 case -EPROTO:
522 case -ETIME:
523 case -EILSEQ:
524 dev->net->stats.rx_errors++;
525 if (!timer_pending (&dev->delay)) {
526 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
527 netif_dbg(dev, link, dev->net,
528 "rx throttle %d\n", urb_status);
529 }
530 block:
531 state = rx_cleanup;
532 entry->urb = urb;
533 urb = NULL;
534 break;
535
536 /* data overrun ... flush fifo? */
537 case -EOVERFLOW:
538 dev->net->stats.rx_over_errors++;
539 // FALLTHROUGH
540
541 default:
542 state = rx_cleanup;
543 dev->net->stats.rx_errors++;
544 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
545 break;
546 }
547
548 /* stop rx if packet error rate is high */
549 if (++dev->pkt_cnt > 30) {
550 dev->pkt_cnt = 0;
551 dev->pkt_err = 0;
552 } else {
553 if (state == rx_cleanup)
554 dev->pkt_err++;
555 if (dev->pkt_err > 20)
556 set_bit(EVENT_RX_KILL, &dev->flags);
557 }
558
559 state = defer_bh(dev, skb, &dev->rxq, state);
560
561 if (urb) {
562 if (netif_running (dev->net) &&
563 !test_bit (EVENT_RX_HALT, &dev->flags) &&
564 state != unlink_start) {
565 rx_submit (dev, urb, GFP_ATOMIC);
566 usb_mark_last_busy(dev->udev);
567 return;
568 }
569 usb_free_urb (urb);
570 }
571 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
572 }
573
574 /*-------------------------------------------------------------------------*/
575 void usbnet_pause_rx(struct usbnet *dev)
576 {
577 set_bit(EVENT_RX_PAUSED, &dev->flags);
578
579 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n");
580 }
581 EXPORT_SYMBOL_GPL(usbnet_pause_rx);
582
583 void usbnet_resume_rx(struct usbnet *dev)
584 {
585 struct sk_buff *skb;
586 int num = 0;
587
588 clear_bit(EVENT_RX_PAUSED, &dev->flags);
589
590 while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
591 usbnet_skb_return(dev, skb);
592 num++;
593 }
594
595 tasklet_schedule(&dev->bh);
596
597 netif_dbg(dev, rx_status, dev->net,
598 "paused rx queue disabled, %d skbs requeued\n", num);
599 }
600 EXPORT_SYMBOL_GPL(usbnet_resume_rx);
601
602 void usbnet_purge_paused_rxq(struct usbnet *dev)
603 {
604 skb_queue_purge(&dev->rxq_pause);
605 }
606 EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
607
608 /*-------------------------------------------------------------------------*/
609
610 // unlink pending rx/tx; completion handlers do all other cleanup
611
612 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
613 {
614 unsigned long flags;
615 struct sk_buff *skb;
616 int count = 0;
617
618 spin_lock_irqsave (&q->lock, flags);
619 while (!skb_queue_empty(q)) {
620 struct skb_data *entry;
621 struct urb *urb;
622 int retval;
623
624 skb_queue_walk(q, skb) {
625 entry = (struct skb_data *) skb->cb;
626 if (entry->state != unlink_start)
627 goto found;
628 }
629 break;
630 found:
631 entry->state = unlink_start;
632 urb = entry->urb;
633
634 /*
635 * Get reference count of the URB to avoid it to be
636 * freed during usb_unlink_urb, which may trigger
637 * use-after-free problem inside usb_unlink_urb since
638 * usb_unlink_urb is always racing with .complete
639 * handler(include defer_bh).
640 */
641 usb_get_urb(urb);
642 spin_unlock_irqrestore(&q->lock, flags);
643 // during some PM-driven resume scenarios,
644 // these (async) unlinks complete immediately
645 retval = usb_unlink_urb (urb);
646 if (retval != -EINPROGRESS && retval != 0)
647 netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
648 else
649 count++;
650 usb_put_urb(urb);
651 spin_lock_irqsave(&q->lock, flags);
652 }
653 spin_unlock_irqrestore (&q->lock, flags);
654 return count;
655 }
656
657 // Flush all pending rx urbs
658 // minidrivers may need to do this when the MTU changes
659
660 void usbnet_unlink_rx_urbs(struct usbnet *dev)
661 {
662 if (netif_running(dev->net)) {
663 (void) unlink_urbs (dev, &dev->rxq);
664 tasklet_schedule(&dev->bh);
665 }
666 }
667 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
668
669 /*-------------------------------------------------------------------------*/
670
671 // precondition: never called in_interrupt
672 static void usbnet_terminate_urbs(struct usbnet *dev)
673 {
674 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
675 DECLARE_WAITQUEUE(wait, current);
676 int temp;
677
678 /* ensure there are no more active urbs */
679 add_wait_queue(&unlink_wakeup, &wait);
680 set_current_state(TASK_UNINTERRUPTIBLE);
681 dev->wait = &unlink_wakeup;
682 temp = unlink_urbs(dev, &dev->txq) +
683 unlink_urbs(dev, &dev->rxq);
684
685 /* maybe wait for deletions to finish. */
686 while (!skb_queue_empty(&dev->rxq)
687 && !skb_queue_empty(&dev->txq)
688 && !skb_queue_empty(&dev->done)) {
689 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
690 set_current_state(TASK_UNINTERRUPTIBLE);
691 netif_dbg(dev, ifdown, dev->net,
692 "waited for %d urb completions\n", temp);
693 }
694 set_current_state(TASK_RUNNING);
695 dev->wait = NULL;
696 remove_wait_queue(&unlink_wakeup, &wait);
697 }
698
699 int usbnet_stop (struct net_device *net)
700 {
701 struct usbnet *dev = netdev_priv(net);
702 struct driver_info *info = dev->driver_info;
703 int retval;
704
705 clear_bit(EVENT_DEV_OPEN, &dev->flags);
706 netif_stop_queue (net);
707
708 netif_info(dev, ifdown, dev->net,
709 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
710 net->stats.rx_packets, net->stats.tx_packets,
711 net->stats.rx_errors, net->stats.tx_errors);
712
713 /* allow minidriver to stop correctly (wireless devices to turn off
714 * radio etc) */
715 if (info->stop) {
716 retval = info->stop(dev);
717 if (retval < 0)
718 netif_info(dev, ifdown, dev->net,
719 "stop fail (%d) usbnet usb-%s-%s, %s\n",
720 retval,
721 dev->udev->bus->bus_name, dev->udev->devpath,
722 info->description);
723 }
724
725 if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
726 usbnet_terminate_urbs(dev);
727
728 usb_kill_urb(dev->interrupt);
729
730 usbnet_purge_paused_rxq(dev);
731
732 /* deferred work (task, timer, softirq) must also stop.
733 * can't flush_scheduled_work() until we drop rtnl (later),
734 * else workers could deadlock; so make workers a NOP.
735 */
736 dev->flags = 0;
737 del_timer_sync (&dev->delay);
738 tasklet_kill (&dev->bh);
739 if (info->manage_power &&
740 !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
741 info->manage_power(dev, 0);
742 else
743 usb_autopm_put_interface(dev->intf);
744
745 return 0;
746 }
747 EXPORT_SYMBOL_GPL(usbnet_stop);
748
749 /*-------------------------------------------------------------------------*/
750
751 // posts reads, and enables write queuing
752
753 // precondition: never called in_interrupt
754
755 int usbnet_open (struct net_device *net)
756 {
757 struct usbnet *dev = netdev_priv(net);
758 int retval;
759 struct driver_info *info = dev->driver_info;
760
761 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
762 netif_info(dev, ifup, dev->net,
763 "resumption fail (%d) usbnet usb-%s-%s, %s\n",
764 retval,
765 dev->udev->bus->bus_name,
766 dev->udev->devpath,
767 info->description);
768 goto done_nopm;
769 }
770
771 // put into "known safe" state
772 if (info->reset && (retval = info->reset (dev)) < 0) {
773 netif_info(dev, ifup, dev->net,
774 "open reset fail (%d) usbnet usb-%s-%s, %s\n",
775 retval,
776 dev->udev->bus->bus_name,
777 dev->udev->devpath,
778 info->description);
779 goto done;
780 }
781
782 // insist peer be connected
783 if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
784 netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
785 goto done;
786 }
787
788 /* start any status interrupt transfer */
789 if (dev->interrupt) {
790 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
791 if (retval < 0) {
792 netif_err(dev, ifup, dev->net,
793 "intr submit %d\n", retval);
794 goto done;
795 }
796 }
797
798 set_bit(EVENT_DEV_OPEN, &dev->flags);
799 netif_start_queue (net);
800 netif_info(dev, ifup, dev->net,
801 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
802 (int)RX_QLEN(dev), (int)TX_QLEN(dev),
803 dev->net->mtu,
804 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
805 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
806 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
807 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
808 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
809 "simple");
810
811 /* reset rx error state */
812 dev->pkt_cnt = 0;
813 dev->pkt_err = 0;
814 clear_bit(EVENT_RX_KILL, &dev->flags);
815
816 // delay posting reads until we're fully open
817 tasklet_schedule (&dev->bh);
818 if (info->manage_power) {
819 retval = info->manage_power(dev, 1);
820 if (retval < 0) {
821 retval = 0;
822 set_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
823 } else {
824 usb_autopm_put_interface(dev->intf);
825 }
826 }
827 return retval;
828 done:
829 usb_autopm_put_interface(dev->intf);
830 done_nopm:
831 return retval;
832 }
833 EXPORT_SYMBOL_GPL(usbnet_open);
834
835 /*-------------------------------------------------------------------------*/
836
837 /* ethtool methods; minidrivers may need to add some more, but
838 * they'll probably want to use this base set.
839 */
840
841 int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd)
842 {
843 struct usbnet *dev = netdev_priv(net);
844
845 if (!dev->mii.mdio_read)
846 return -EOPNOTSUPP;
847
848 return mii_ethtool_gset(&dev->mii, cmd);
849 }
850 EXPORT_SYMBOL_GPL(usbnet_get_settings);
851
852 int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
853 {
854 struct usbnet *dev = netdev_priv(net);
855 int retval;
856
857 if (!dev->mii.mdio_write)
858 return -EOPNOTSUPP;
859
860 retval = mii_ethtool_sset(&dev->mii, cmd);
861
862 /* link speed/duplex might have changed */
863 if (dev->driver_info->link_reset)
864 dev->driver_info->link_reset(dev);
865
866 return retval;
867
868 }
869 EXPORT_SYMBOL_GPL(usbnet_set_settings);
870
871 u32 usbnet_get_link (struct net_device *net)
872 {
873 struct usbnet *dev = netdev_priv(net);
874
875 /* If a check_connect is defined, return its result */
876 if (dev->driver_info->check_connect)
877 return dev->driver_info->check_connect (dev) == 0;
878
879 /* if the device has mii operations, use those */
880 if (dev->mii.mdio_read)
881 return mii_link_ok(&dev->mii);
882
883 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */
884 return ethtool_op_get_link(net);
885 }
886 EXPORT_SYMBOL_GPL(usbnet_get_link);
887
888 int usbnet_nway_reset(struct net_device *net)
889 {
890 struct usbnet *dev = netdev_priv(net);
891
892 if (!dev->mii.mdio_write)
893 return -EOPNOTSUPP;
894
895 return mii_nway_restart(&dev->mii);
896 }
897 EXPORT_SYMBOL_GPL(usbnet_nway_reset);
898
899 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
900 {
901 struct usbnet *dev = netdev_priv(net);
902
903 strlcpy (info->driver, dev->driver_name, sizeof info->driver);
904 strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
905 strlcpy (info->fw_version, dev->driver_info->description,
906 sizeof info->fw_version);
907 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
908 }
909 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
910
911 u32 usbnet_get_msglevel (struct net_device *net)
912 {
913 struct usbnet *dev = netdev_priv(net);
914
915 return dev->msg_enable;
916 }
917 EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
918
919 void usbnet_set_msglevel (struct net_device *net, u32 level)
920 {
921 struct usbnet *dev = netdev_priv(net);
922
923 dev->msg_enable = level;
924 }
925 EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
926
927 /* drivers may override default ethtool_ops in their bind() routine */
928 static const struct ethtool_ops usbnet_ethtool_ops = {
929 .get_settings = usbnet_get_settings,
930 .set_settings = usbnet_set_settings,
931 .get_link = usbnet_get_link,
932 .nway_reset = usbnet_nway_reset,
933 .get_drvinfo = usbnet_get_drvinfo,
934 .get_msglevel = usbnet_get_msglevel,
935 .set_msglevel = usbnet_set_msglevel,
936 .get_ts_info = ethtool_op_get_ts_info,
937 };
938
939 /*-------------------------------------------------------------------------*/
940
941 /* work that cannot be done in interrupt context uses keventd.
942 *
943 * NOTE: with 2.5 we could do more of this using completion callbacks,
944 * especially now that control transfers can be queued.
945 */
946 static void
947 kevent (struct work_struct *work)
948 {
949 struct usbnet *dev =
950 container_of(work, struct usbnet, kevent);
951 int status;
952
953 /* usb_clear_halt() needs a thread context */
954 if (test_bit (EVENT_TX_HALT, &dev->flags)) {
955 unlink_urbs (dev, &dev->txq);
956 status = usb_autopm_get_interface(dev->intf);
957 if (status < 0)
958 goto fail_pipe;
959 status = usb_clear_halt (dev->udev, dev->out);
960 usb_autopm_put_interface(dev->intf);
961 if (status < 0 &&
962 status != -EPIPE &&
963 status != -ESHUTDOWN) {
964 if (netif_msg_tx_err (dev))
965 fail_pipe:
966 netdev_err(dev->net, "can't clear tx halt, status %d\n",
967 status);
968 } else {
969 clear_bit (EVENT_TX_HALT, &dev->flags);
970 if (status != -ESHUTDOWN)
971 netif_wake_queue (dev->net);
972 }
973 }
974 if (test_bit (EVENT_RX_HALT, &dev->flags)) {
975 unlink_urbs (dev, &dev->rxq);
976 status = usb_autopm_get_interface(dev->intf);
977 if (status < 0)
978 goto fail_halt;
979 status = usb_clear_halt (dev->udev, dev->in);
980 usb_autopm_put_interface(dev->intf);
981 if (status < 0 &&
982 status != -EPIPE &&
983 status != -ESHUTDOWN) {
984 if (netif_msg_rx_err (dev))
985 fail_halt:
986 netdev_err(dev->net, "can't clear rx halt, status %d\n",
987 status);
988 } else {
989 clear_bit (EVENT_RX_HALT, &dev->flags);
990 tasklet_schedule (&dev->bh);
991 }
992 }
993
994 /* tasklet could resubmit itself forever if memory is tight */
995 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
996 struct urb *urb = NULL;
997 int resched = 1;
998
999 if (netif_running (dev->net))
1000 urb = usb_alloc_urb (0, GFP_KERNEL);
1001 else
1002 clear_bit (EVENT_RX_MEMORY, &dev->flags);
1003 if (urb != NULL) {
1004 clear_bit (EVENT_RX_MEMORY, &dev->flags);
1005 status = usb_autopm_get_interface(dev->intf);
1006 if (status < 0) {
1007 usb_free_urb(urb);
1008 goto fail_lowmem;
1009 }
1010 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
1011 resched = 0;
1012 usb_autopm_put_interface(dev->intf);
1013 fail_lowmem:
1014 if (resched)
1015 tasklet_schedule (&dev->bh);
1016 }
1017 }
1018
1019 if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
1020 struct driver_info *info = dev->driver_info;
1021 int retval = 0;
1022
1023 clear_bit (EVENT_LINK_RESET, &dev->flags);
1024 status = usb_autopm_get_interface(dev->intf);
1025 if (status < 0)
1026 goto skip_reset;
1027 if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
1028 usb_autopm_put_interface(dev->intf);
1029 skip_reset:
1030 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
1031 retval,
1032 dev->udev->bus->bus_name,
1033 dev->udev->devpath,
1034 info->description);
1035 } else {
1036 usb_autopm_put_interface(dev->intf);
1037 }
1038 }
1039
1040 if (dev->flags)
1041 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
1042 }
1043
1044 /*-------------------------------------------------------------------------*/
1045
1046 static void tx_complete (struct urb *urb)
1047 {
1048 struct sk_buff *skb = (struct sk_buff *) urb->context;
1049 struct skb_data *entry = (struct skb_data *) skb->cb;
1050 struct usbnet *dev = entry->dev;
1051
1052 if (urb->status == 0) {
1053 if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
1054 dev->net->stats.tx_packets++;
1055 dev->net->stats.tx_bytes += entry->length;
1056 } else {
1057 dev->net->stats.tx_errors++;
1058
1059 switch (urb->status) {
1060 case -EPIPE:
1061 usbnet_defer_kevent (dev, EVENT_TX_HALT);
1062 break;
1063
1064 /* software-driven interface shutdown */
1065 case -ECONNRESET: // async unlink
1066 case -ESHUTDOWN: // hardware gone
1067 break;
1068
1069 // like rx, tx gets controller i/o faults during khubd delays
1070 // and so it uses the same throttling mechanism.
1071 case -EPROTO:
1072 case -ETIME:
1073 case -EILSEQ:
1074 usb_mark_last_busy(dev->udev);
1075 if (!timer_pending (&dev->delay)) {
1076 mod_timer (&dev->delay,
1077 jiffies + THROTTLE_JIFFIES);
1078 netif_dbg(dev, link, dev->net,
1079 "tx throttle %d\n", urb->status);
1080 }
1081 netif_stop_queue (dev->net);
1082 break;
1083 default:
1084 netif_dbg(dev, tx_err, dev->net,
1085 "tx err %d\n", entry->urb->status);
1086 break;
1087 }
1088 }
1089
1090 usb_autopm_put_interface_async(dev->intf);
1091 (void) defer_bh(dev, skb, &dev->txq, tx_done);
1092 }
1093
1094 /*-------------------------------------------------------------------------*/
1095
1096 void usbnet_tx_timeout (struct net_device *net)
1097 {
1098 struct usbnet *dev = netdev_priv(net);
1099
1100 unlink_urbs (dev, &dev->txq);
1101 tasklet_schedule (&dev->bh);
1102
1103 // FIXME: device recovery -- reset?
1104 }
1105 EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
1106
1107 /*-------------------------------------------------------------------------*/
1108
1109 netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1110 struct net_device *net)
1111 {
1112 struct usbnet *dev = netdev_priv(net);
1113 int length;
1114 struct urb *urb = NULL;
1115 struct skb_data *entry;
1116 struct driver_info *info = dev->driver_info;
1117 unsigned long flags;
1118 int retval;
1119
1120 if (skb)
1121 skb_tx_timestamp(skb);
1122
1123 // some devices want funky USB-level framing, for
1124 // win32 driver (usually) and/or hardware quirks
1125 if (info->tx_fixup) {
1126 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1127 if (!skb) {
1128 /* packet collected; minidriver waiting for more */
1129 if (info->flags & FLAG_MULTI_PACKET)
1130 goto not_drop;
1131 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1132 goto drop;
1133 }
1134 }
1135 length = skb->len;
1136
1137 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
1138 netif_dbg(dev, tx_err, dev->net, "no urb\n");
1139 goto drop;
1140 }
1141
1142 entry = (struct skb_data *) skb->cb;
1143 entry->urb = urb;
1144 entry->dev = dev;
1145 entry->length = length;
1146
1147 usb_fill_bulk_urb (urb, dev->udev, dev->out,
1148 skb->data, skb->len, tx_complete, skb);
1149
1150 /* don't assume the hardware handles USB_ZERO_PACKET
1151 * NOTE: strictly conforming cdc-ether devices should expect
1152 * the ZLP here, but ignore the one-byte packet.
1153 * NOTE2: CDC NCM specification is different from CDC ECM when
1154 * handling ZLP/short packets, so cdc_ncm driver will make short
1155 * packet itself if needed.
1156 */
1157 if (length % dev->maxpacket == 0) {
1158 if (!(info->flags & FLAG_SEND_ZLP)) {
1159 if (!(info->flags & FLAG_MULTI_PACKET)) {
1160 urb->transfer_buffer_length++;
1161 if (skb_tailroom(skb)) {
1162 skb->data[skb->len] = 0;
1163 __skb_put(skb, 1);
1164 }
1165 }
1166 } else
1167 urb->transfer_flags |= URB_ZERO_PACKET;
1168 }
1169
1170 spin_lock_irqsave(&dev->txq.lock, flags);
1171 retval = usb_autopm_get_interface_async(dev->intf);
1172 if (retval < 0) {
1173 spin_unlock_irqrestore(&dev->txq.lock, flags);
1174 goto drop;
1175 }
1176
1177 #ifdef CONFIG_PM
1178 /* if this triggers the device is still a sleep */
1179 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
1180 /* transmission will be done in resume */
1181 usb_anchor_urb(urb, &dev->deferred);
1182 /* no use to process more packets */
1183 netif_stop_queue(net);
1184 usb_put_urb(urb);
1185 spin_unlock_irqrestore(&dev->txq.lock, flags);
1186 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
1187 goto deferred;
1188 }
1189 #endif
1190
1191 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
1192 case -EPIPE:
1193 netif_stop_queue (net);
1194 usbnet_defer_kevent (dev, EVENT_TX_HALT);
1195 usb_autopm_put_interface_async(dev->intf);
1196 break;
1197 default:
1198 usb_autopm_put_interface_async(dev->intf);
1199 netif_dbg(dev, tx_err, dev->net,
1200 "tx: submit urb err %d\n", retval);
1201 break;
1202 case 0:
1203 net->trans_start = jiffies;
1204 __usbnet_queue_skb(&dev->txq, skb, tx_start);
1205 if (dev->txq.qlen >= TX_QLEN (dev))
1206 netif_stop_queue (net);
1207 }
1208 spin_unlock_irqrestore (&dev->txq.lock, flags);
1209
1210 if (retval) {
1211 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
1212 drop:
1213 dev->net->stats.tx_dropped++;
1214 not_drop:
1215 if (skb)
1216 dev_kfree_skb_any (skb);
1217 usb_free_urb (urb);
1218 } else
1219 netif_dbg(dev, tx_queued, dev->net,
1220 "> tx, len %d, type 0x%x\n", length, skb->protocol);
1221 #ifdef CONFIG_PM
1222 deferred:
1223 #endif
1224 return NETDEV_TX_OK;
1225 }
1226 EXPORT_SYMBOL_GPL(usbnet_start_xmit);
1227
1228 static int rx_alloc_submit(struct usbnet *dev, gfp_t flags)
1229 {
1230 struct urb *urb;
1231 int i;
1232 int ret = 0;
1233
1234 /* don't refill the queue all at once */
1235 for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
1236 urb = usb_alloc_urb(0, flags);
1237 if (urb != NULL) {
1238 ret = rx_submit(dev, urb, flags);
1239 if (ret)
1240 goto err;
1241 } else {
1242 ret = -ENOMEM;
1243 goto err;
1244 }
1245 }
1246 err:
1247 return ret;
1248 }
1249
1250 /*-------------------------------------------------------------------------*/
1251
1252 // tasklet (work deferred from completions, in_irq) or timer
1253
1254 static void usbnet_bh (unsigned long param)
1255 {
1256 struct usbnet *dev = (struct usbnet *) param;
1257 struct sk_buff *skb;
1258 struct skb_data *entry;
1259
1260 while ((skb = skb_dequeue (&dev->done))) {
1261 entry = (struct skb_data *) skb->cb;
1262 switch (entry->state) {
1263 case rx_done:
1264 entry->state = rx_cleanup;
1265 rx_process (dev, skb);
1266 continue;
1267 case tx_done:
1268 case rx_cleanup:
1269 usb_free_urb (entry->urb);
1270 dev_kfree_skb (skb);
1271 continue;
1272 default:
1273 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
1274 }
1275 }
1276
1277 /* restart RX again after disabling due to high error rate */
1278 clear_bit(EVENT_RX_KILL, &dev->flags);
1279
1280 // waiting for all pending urbs to complete?
1281 if (dev->wait) {
1282 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
1283 wake_up (dev->wait);
1284 }
1285
1286 // or are we maybe short a few urbs?
1287 } else if (netif_running (dev->net) &&
1288 netif_device_present (dev->net) &&
1289 !timer_pending (&dev->delay) &&
1290 !test_bit (EVENT_RX_HALT, &dev->flags)) {
1291 int temp = dev->rxq.qlen;
1292
1293 if (temp < RX_QLEN(dev)) {
1294 if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK)
1295 return;
1296 if (temp != dev->rxq.qlen)
1297 netif_dbg(dev, link, dev->net,
1298 "rxqlen %d --> %d\n",
1299 temp, dev->rxq.qlen);
1300 if (dev->rxq.qlen < RX_QLEN(dev))
1301 tasklet_schedule (&dev->bh);
1302 }
1303 if (dev->txq.qlen < TX_QLEN (dev))
1304 netif_wake_queue (dev->net);
1305 }
1306 }
1307
1308
1309 /*-------------------------------------------------------------------------
1310 *
1311 * USB Device Driver support
1312 *
1313 *-------------------------------------------------------------------------*/
1314
1315 // precondition: never called in_interrupt
1316
1317 void usbnet_disconnect (struct usb_interface *intf)
1318 {
1319 struct usbnet *dev;
1320 struct usb_device *xdev;
1321 struct net_device *net;
1322
1323 dev = usb_get_intfdata(intf);
1324 usb_set_intfdata(intf, NULL);
1325 if (!dev)
1326 return;
1327
1328 xdev = interface_to_usbdev (intf);
1329
1330 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
1331 intf->dev.driver->name,
1332 xdev->bus->bus_name, xdev->devpath,
1333 dev->driver_info->description);
1334
1335 net = dev->net;
1336 unregister_netdev (net);
1337
1338 cancel_work_sync(&dev->kevent);
1339
1340 usb_scuttle_anchored_urbs(&dev->deferred);
1341
1342 if (dev->driver_info->unbind)
1343 dev->driver_info->unbind (dev, intf);
1344
1345 usb_kill_urb(dev->interrupt);
1346 usb_free_urb(dev->interrupt);
1347
1348 free_netdev(net);
1349 }
1350 EXPORT_SYMBOL_GPL(usbnet_disconnect);
1351
1352 static const struct net_device_ops usbnet_netdev_ops = {
1353 .ndo_open = usbnet_open,
1354 .ndo_stop = usbnet_stop,
1355 .ndo_start_xmit = usbnet_start_xmit,
1356 .ndo_tx_timeout = usbnet_tx_timeout,
1357 .ndo_change_mtu = usbnet_change_mtu,
1358 .ndo_set_mac_address = eth_mac_addr,
1359 .ndo_validate_addr = eth_validate_addr,
1360 };
1361
1362 /*-------------------------------------------------------------------------*/
1363
1364 // precondition: never called in_interrupt
1365
1366 static struct device_type wlan_type = {
1367 .name = "wlan",
1368 };
1369
1370 static struct device_type wwan_type = {
1371 .name = "wwan",
1372 };
1373
1374 int
1375 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1376 {
1377 struct usbnet *dev;
1378 struct net_device *net;
1379 struct usb_host_interface *interface;
1380 struct driver_info *info;
1381 struct usb_device *xdev;
1382 int status;
1383 const char *name;
1384 struct usb_driver *driver = to_usb_driver(udev->dev.driver);
1385
1386 /* usbnet already took usb runtime pm, so have to enable the feature
1387 * for usb interface, otherwise usb_autopm_get_interface may return
1388 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
1389 */
1390 if (!driver->supports_autosuspend) {
1391 driver->supports_autosuspend = 1;
1392 pm_runtime_enable(&udev->dev);
1393 }
1394
1395 name = udev->dev.driver->name;
1396 info = (struct driver_info *) prod->driver_info;
1397 if (!info) {
1398 dev_dbg (&udev->dev, "blacklisted by %s\n", name);
1399 return -ENODEV;
1400 }
1401 xdev = interface_to_usbdev (udev);
1402 interface = udev->cur_altsetting;
1403
1404 status = -ENOMEM;
1405
1406 // set up our own records
1407 net = alloc_etherdev(sizeof(*dev));
1408 if (!net)
1409 goto out;
1410
1411 /* netdev_printk() needs this so do it as early as possible */
1412 SET_NETDEV_DEV(net, &udev->dev);
1413
1414 dev = netdev_priv(net);
1415 dev->udev = xdev;
1416 dev->intf = udev;
1417 dev->driver_info = info;
1418 dev->driver_name = name;
1419 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1420 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1421 skb_queue_head_init (&dev->rxq);
1422 skb_queue_head_init (&dev->txq);
1423 skb_queue_head_init (&dev->done);
1424 skb_queue_head_init(&dev->rxq_pause);
1425 dev->bh.func = usbnet_bh;
1426 dev->bh.data = (unsigned long) dev;
1427 INIT_WORK (&dev->kevent, kevent);
1428 init_usb_anchor(&dev->deferred);
1429 dev->delay.function = usbnet_bh;
1430 dev->delay.data = (unsigned long) dev;
1431 init_timer (&dev->delay);
1432 mutex_init (&dev->phy_mutex);
1433
1434 dev->net = net;
1435 strcpy (net->name, "usb%d");
1436 memcpy (net->dev_addr, node_id, sizeof node_id);
1437
1438 /* rx and tx sides can use different message sizes;
1439 * bind() should set rx_urb_size in that case.
1440 */
1441 dev->hard_mtu = net->mtu + net->hard_header_len;
1442 #if 0
1443 // dma_supported() is deeply broken on almost all architectures
1444 // possible with some EHCI controllers
1445 if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
1446 net->features |= NETIF_F_HIGHDMA;
1447 #endif
1448
1449 net->netdev_ops = &usbnet_netdev_ops;
1450 net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
1451 net->ethtool_ops = &usbnet_ethtool_ops;
1452
1453 // allow device-specific bind/init procedures
1454 // NOTE net->name still not usable ...
1455 if (info->bind) {
1456 status = info->bind (dev, udev);
1457 if (status < 0)
1458 goto out1;
1459
1460 // heuristic: "usb%d" for links we know are two-host,
1461 // else "eth%d" when there's reasonable doubt. userspace
1462 // can rename the link if it knows better.
1463 if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
1464 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
1465 (net->dev_addr [0] & 0x02) == 0))
1466 strcpy (net->name, "eth%d");
1467 /* WLAN devices should always be named "wlan%d" */
1468 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1469 strcpy(net->name, "wlan%d");
1470 /* WWAN devices should always be named "wwan%d" */
1471 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1472 strcpy(net->name, "wwan%d");
1473
1474 /* devices that cannot do ARP */
1475 if ((dev->driver_info->flags & FLAG_NOARP) != 0)
1476 net->flags |= IFF_NOARP;
1477
1478 /* maybe the remote can't receive an Ethernet MTU */
1479 if (net->mtu > (dev->hard_mtu - net->hard_header_len))
1480 net->mtu = dev->hard_mtu - net->hard_header_len;
1481 } else if (!info->in || !info->out)
1482 status = usbnet_get_endpoints (dev, udev);
1483 else {
1484 dev->in = usb_rcvbulkpipe (xdev, info->in);
1485 dev->out = usb_sndbulkpipe (xdev, info->out);
1486 if (!(info->flags & FLAG_NO_SETINT))
1487 status = usb_set_interface (xdev,
1488 interface->desc.bInterfaceNumber,
1489 interface->desc.bAlternateSetting);
1490 else
1491 status = 0;
1492
1493 }
1494 if (status >= 0 && dev->status)
1495 status = init_status (dev, udev);
1496 if (status < 0)
1497 goto out3;
1498
1499 if (!dev->rx_urb_size)
1500 dev->rx_urb_size = dev->hard_mtu;
1501 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1502
1503 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1504 SET_NETDEV_DEVTYPE(net, &wlan_type);
1505 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1506 SET_NETDEV_DEVTYPE(net, &wwan_type);
1507
1508 status = register_netdev (net);
1509 if (status)
1510 goto out4;
1511 netif_info(dev, probe, dev->net,
1512 "register '%s' at usb-%s-%s, %s, %pM\n",
1513 udev->dev.driver->name,
1514 xdev->bus->bus_name, xdev->devpath,
1515 dev->driver_info->description,
1516 net->dev_addr);
1517
1518 // ok, it's ready to go.
1519 usb_set_intfdata (udev, dev);
1520
1521 netif_device_attach (net);
1522
1523 if (dev->driver_info->flags & FLAG_LINK_INTR)
1524 netif_carrier_off(net);
1525
1526 return 0;
1527
1528 out4:
1529 usb_free_urb(dev->interrupt);
1530 out3:
1531 if (info->unbind)
1532 info->unbind (dev, udev);
1533 out1:
1534 free_netdev(net);
1535 out:
1536 return status;
1537 }
1538 EXPORT_SYMBOL_GPL(usbnet_probe);
1539
1540 /*-------------------------------------------------------------------------*/
1541
1542 /*
1543 * suspend the whole driver as soon as the first interface is suspended
1544 * resume only when the last interface is resumed
1545 */
1546
1547 int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
1548 {
1549 struct usbnet *dev = usb_get_intfdata(intf);
1550
1551 if (!dev->suspend_count++) {
1552 spin_lock_irq(&dev->txq.lock);
1553 /* don't autosuspend while transmitting */
1554 if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
1555 dev->suspend_count--;
1556 spin_unlock_irq(&dev->txq.lock);
1557 return -EBUSY;
1558 } else {
1559 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
1560 spin_unlock_irq(&dev->txq.lock);
1561 }
1562 /*
1563 * accelerate emptying of the rx and queues, to avoid
1564 * having everything error out.
1565 */
1566 netif_device_detach (dev->net);
1567 usbnet_terminate_urbs(dev);
1568 usb_kill_urb(dev->interrupt);
1569
1570 /*
1571 * reattach so runtime management can use and
1572 * wake the device
1573 */
1574 netif_device_attach (dev->net);
1575 }
1576 return 0;
1577 }
1578 EXPORT_SYMBOL_GPL(usbnet_suspend);
1579
1580 int usbnet_resume (struct usb_interface *intf)
1581 {
1582 struct usbnet *dev = usb_get_intfdata(intf);
1583 struct sk_buff *skb;
1584 struct urb *res;
1585 int retval;
1586
1587 if (!--dev->suspend_count) {
1588 /* resume interrupt URBs */
1589 if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
1590 usb_submit_urb(dev->interrupt, GFP_NOIO);
1591
1592 spin_lock_irq(&dev->txq.lock);
1593 while ((res = usb_get_from_anchor(&dev->deferred))) {
1594
1595 skb = (struct sk_buff *)res->context;
1596 retval = usb_submit_urb(res, GFP_ATOMIC);
1597 if (retval < 0) {
1598 dev_kfree_skb_any(skb);
1599 usb_free_urb(res);
1600 usb_autopm_put_interface_async(dev->intf);
1601 } else {
1602 dev->net->trans_start = jiffies;
1603 __skb_queue_tail(&dev->txq, skb);
1604 }
1605 }
1606
1607 smp_mb();
1608 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
1609 spin_unlock_irq(&dev->txq.lock);
1610
1611 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
1612 /* handle remote wakeup ASAP */
1613 if (!dev->wait &&
1614 netif_device_present(dev->net) &&
1615 !timer_pending(&dev->delay) &&
1616 !test_bit(EVENT_RX_HALT, &dev->flags))
1617 rx_alloc_submit(dev, GFP_NOIO);
1618
1619 if (!(dev->txq.qlen >= TX_QLEN(dev)))
1620 netif_tx_wake_all_queues(dev->net);
1621 tasklet_schedule (&dev->bh);
1622 }
1623 }
1624
1625 if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags))
1626 usb_autopm_get_interface_no_resume(intf);
1627
1628 return 0;
1629 }
1630 EXPORT_SYMBOL_GPL(usbnet_resume);
1631
1632 /*
1633 * Either a subdriver implements manage_power, then it is assumed to always
1634 * be ready to be suspended or it reports the readiness to be suspended
1635 * explicitly
1636 */
1637 void usbnet_device_suggests_idle(struct usbnet *dev)
1638 {
1639 if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) {
1640 dev->intf->needs_remote_wakeup = 1;
1641 usb_autopm_put_interface_async(dev->intf);
1642 }
1643 }
1644 EXPORT_SYMBOL(usbnet_device_suggests_idle);
1645
1646 /*
1647 * For devices that can do without special commands
1648 */
1649 int usbnet_manage_power(struct usbnet *dev, int on)
1650 {
1651 dev->intf->needs_remote_wakeup = on;
1652 return 0;
1653 }
1654 EXPORT_SYMBOL(usbnet_manage_power);
1655
1656 /*-------------------------------------------------------------------------*/
1657 static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1658 u16 value, u16 index, void *data, u16 size)
1659 {
1660 void *buf = NULL;
1661 int err = -ENOMEM;
1662
1663 netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x"
1664 " value=0x%04x index=0x%04x size=%d\n",
1665 cmd, reqtype, value, index, size);
1666
1667 if (data) {
1668 buf = kmalloc(size, GFP_KERNEL);
1669 if (!buf)
1670 goto out;
1671 }
1672
1673 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1674 cmd, reqtype, value, index, buf, size,
1675 USB_CTRL_GET_TIMEOUT);
1676 if (err > 0 && err <= size)
1677 memcpy(data, buf, err);
1678 kfree(buf);
1679 out:
1680 return err;
1681 }
1682
1683 static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1684 u16 value, u16 index, const void *data,
1685 u16 size)
1686 {
1687 void *buf = NULL;
1688 int err = -ENOMEM;
1689
1690 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
1691 " value=0x%04x index=0x%04x size=%d\n",
1692 cmd, reqtype, value, index, size);
1693
1694 if (data) {
1695 buf = kmemdup(data, size, GFP_KERNEL);
1696 if (!buf)
1697 goto out;
1698 }
1699
1700 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1701 cmd, reqtype, value, index, buf, size,
1702 USB_CTRL_SET_TIMEOUT);
1703 kfree(buf);
1704
1705 out:
1706 return err;
1707 }
1708
1709 /*
1710 * The function can't be called inside suspend/resume callback,
1711 * otherwise deadlock will be caused.
1712 */
1713 int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1714 u16 value, u16 index, void *data, u16 size)
1715 {
1716 int ret;
1717
1718 if (usb_autopm_get_interface(dev->intf) < 0)
1719 return -ENODEV;
1720 ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index,
1721 data, size);
1722 usb_autopm_put_interface(dev->intf);
1723 return ret;
1724 }
1725 EXPORT_SYMBOL_GPL(usbnet_read_cmd);
1726
1727 /*
1728 * The function can't be called inside suspend/resume callback,
1729 * otherwise deadlock will be caused.
1730 */
1731 int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1732 u16 value, u16 index, const void *data, u16 size)
1733 {
1734 int ret;
1735
1736 if (usb_autopm_get_interface(dev->intf) < 0)
1737 return -ENODEV;
1738 ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index,
1739 data, size);
1740 usb_autopm_put_interface(dev->intf);
1741 return ret;
1742 }
1743 EXPORT_SYMBOL_GPL(usbnet_write_cmd);
1744
1745 /*
1746 * The function can be called inside suspend/resume callback safely
1747 * and should only be called by suspend/resume callback generally.
1748 */
1749 int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
1750 u16 value, u16 index, void *data, u16 size)
1751 {
1752 return __usbnet_read_cmd(dev, cmd, reqtype, value, index,
1753 data, size);
1754 }
1755 EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm);
1756
1757 /*
1758 * The function can be called inside suspend/resume callback safely
1759 * and should only be called by suspend/resume callback generally.
1760 */
1761 int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
1762 u16 value, u16 index, const void *data,
1763 u16 size)
1764 {
1765 return __usbnet_write_cmd(dev, cmd, reqtype, value, index,
1766 data, size);
1767 }
1768 EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm);
1769
1770 static void usbnet_async_cmd_cb(struct urb *urb)
1771 {
1772 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
1773 int status = urb->status;
1774
1775 if (status < 0)
1776 dev_dbg(&urb->dev->dev, "%s failed with %d",
1777 __func__, status);
1778
1779 kfree(req);
1780 usb_free_urb(urb);
1781 }
1782
1783 /*
1784 * The caller must make sure that device can't be put into suspend
1785 * state until the control URB completes.
1786 */
1787 int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
1788 u16 value, u16 index, const void *data, u16 size)
1789 {
1790 struct usb_ctrlrequest *req = NULL;
1791 struct urb *urb;
1792 int err = -ENOMEM;
1793 void *buf = NULL;
1794
1795 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
1796 " value=0x%04x index=0x%04x size=%d\n",
1797 cmd, reqtype, value, index, size);
1798
1799 urb = usb_alloc_urb(0, GFP_ATOMIC);
1800 if (!urb) {
1801 netdev_err(dev->net, "Error allocating URB in"
1802 " %s!\n", __func__);
1803 goto fail;
1804 }
1805
1806 if (data) {
1807 buf = kmemdup(data, size, GFP_ATOMIC);
1808 if (!buf) {
1809 netdev_err(dev->net, "Error allocating buffer"
1810 " in %s!\n", __func__);
1811 goto fail_free;
1812 }
1813 }
1814
1815 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
1816 if (!req)
1817 goto fail_free_buf;
1818
1819 req->bRequestType = reqtype;
1820 req->bRequest = cmd;
1821 req->wValue = cpu_to_le16(value);
1822 req->wIndex = cpu_to_le16(index);
1823 req->wLength = cpu_to_le16(size);
1824
1825 usb_fill_control_urb(urb, dev->udev,
1826 usb_sndctrlpipe(dev->udev, 0),
1827 (void *)req, buf, size,
1828 usbnet_async_cmd_cb, req);
1829 urb->transfer_flags |= URB_FREE_BUFFER;
1830
1831 err = usb_submit_urb(urb, GFP_ATOMIC);
1832 if (err < 0) {
1833 netdev_err(dev->net, "Error submitting the control"
1834 " message: status=%d\n", err);
1835 goto fail_free;
1836 }
1837 return 0;
1838
1839 fail_free_buf:
1840 kfree(buf);
1841 fail_free:
1842 kfree(req);
1843 usb_free_urb(urb);
1844 fail:
1845 return err;
1846
1847 }
1848 EXPORT_SYMBOL_GPL(usbnet_write_cmd_async);
1849 /*-------------------------------------------------------------------------*/
1850
1851 static int __init usbnet_init(void)
1852 {
1853 /* Compiler should optimize this out. */
1854 BUILD_BUG_ON(
1855 FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
1856
1857 eth_random_addr(node_id);
1858 return 0;
1859 }
1860 module_init(usbnet_init);
1861
1862 static void __exit usbnet_exit(void)
1863 {
1864 }
1865 module_exit(usbnet_exit);
1866
1867 MODULE_AUTHOR("David Brownell");
1868 MODULE_DESCRIPTION("USB network driver framework");
1869 MODULE_LICENSE("GPL");
This page took 0.2108 seconds and 5 git commands to generate.