Merge remote-tracking branch 'asoc/topic/mc13783' into asoc-next
[deliverable/linux.git] / drivers / usb / misc / usbtest.c
1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
10
11 #include <linux/usb.h>
12
13
14 /*-------------------------------------------------------------------------*/
15
16 static int override_alt = -1;
17 module_param_named(alt, override_alt, int, 0644);
18 MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
19
20 /*-------------------------------------------------------------------------*/
21
22 /* FIXME make these public somewhere; usbdevfs.h? */
23 struct usbtest_param {
24 /* inputs */
25 unsigned test_num; /* 0..(TEST_CASES-1) */
26 unsigned iterations;
27 unsigned length;
28 unsigned vary;
29 unsigned sglen;
30
31 /* outputs */
32 struct timeval duration;
33 };
34 #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
35
36 /*-------------------------------------------------------------------------*/
37
38 #define GENERIC /* let probe() bind using module params */
39
40 /* Some devices that can be used for testing will have "real" drivers.
41 * Entries for those need to be enabled here by hand, after disabling
42 * that "real" driver.
43 */
44 //#define IBOT2 /* grab iBOT2 webcams */
45 //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
46
47 /*-------------------------------------------------------------------------*/
48
49 struct usbtest_info {
50 const char *name;
51 u8 ep_in; /* bulk/intr source */
52 u8 ep_out; /* bulk/intr sink */
53 unsigned autoconf:1;
54 unsigned ctrl_out:1;
55 unsigned iso:1; /* try iso in/out */
56 int alt;
57 };
58
59 /* this is accessed only through usbfs ioctl calls.
60 * one ioctl to issue a test ... one lock per device.
61 * tests create other threads if they need them.
62 * urbs and buffers are allocated dynamically,
63 * and data generated deterministically.
64 */
65 struct usbtest_dev {
66 struct usb_interface *intf;
67 struct usbtest_info *info;
68 int in_pipe;
69 int out_pipe;
70 int in_iso_pipe;
71 int out_iso_pipe;
72 struct usb_endpoint_descriptor *iso_in, *iso_out;
73 struct mutex lock;
74
75 #define TBUF_SIZE 256
76 u8 *buf;
77 };
78
79 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
80 {
81 return interface_to_usbdev(test->intf);
82 }
83
84 /* set up all urbs so they can be used with either bulk or interrupt */
85 #define INTERRUPT_RATE 1 /* msec/transfer */
86
87 #define ERROR(tdev, fmt, args...) \
88 dev_err(&(tdev)->intf->dev , fmt , ## args)
89 #define WARNING(tdev, fmt, args...) \
90 dev_warn(&(tdev)->intf->dev , fmt , ## args)
91
92 #define GUARD_BYTE 0xA5
93
94 /*-------------------------------------------------------------------------*/
95
96 static int
97 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
98 {
99 int tmp;
100 struct usb_host_interface *alt;
101 struct usb_host_endpoint *in, *out;
102 struct usb_host_endpoint *iso_in, *iso_out;
103 struct usb_device *udev;
104
105 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
106 unsigned ep;
107
108 in = out = NULL;
109 iso_in = iso_out = NULL;
110 alt = intf->altsetting + tmp;
111
112 if (override_alt >= 0 &&
113 override_alt != alt->desc.bAlternateSetting)
114 continue;
115
116 /* take the first altsetting with in-bulk + out-bulk;
117 * ignore other endpoints and altsettings.
118 */
119 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
120 struct usb_host_endpoint *e;
121
122 e = alt->endpoint + ep;
123 switch (e->desc.bmAttributes) {
124 case USB_ENDPOINT_XFER_BULK:
125 break;
126 case USB_ENDPOINT_XFER_ISOC:
127 if (dev->info->iso)
128 goto try_iso;
129 /* FALLTHROUGH */
130 default:
131 continue;
132 }
133 if (usb_endpoint_dir_in(&e->desc)) {
134 if (!in)
135 in = e;
136 } else {
137 if (!out)
138 out = e;
139 }
140 continue;
141 try_iso:
142 if (usb_endpoint_dir_in(&e->desc)) {
143 if (!iso_in)
144 iso_in = e;
145 } else {
146 if (!iso_out)
147 iso_out = e;
148 }
149 }
150 if ((in && out) || iso_in || iso_out)
151 goto found;
152 }
153 return -EINVAL;
154
155 found:
156 udev = testdev_to_usbdev(dev);
157 dev->info->alt = alt->desc.bAlternateSetting;
158 if (alt->desc.bAlternateSetting != 0) {
159 tmp = usb_set_interface(udev,
160 alt->desc.bInterfaceNumber,
161 alt->desc.bAlternateSetting);
162 if (tmp < 0)
163 return tmp;
164 }
165
166 if (in) {
167 dev->in_pipe = usb_rcvbulkpipe(udev,
168 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
169 dev->out_pipe = usb_sndbulkpipe(udev,
170 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
171 }
172 if (iso_in) {
173 dev->iso_in = &iso_in->desc;
174 dev->in_iso_pipe = usb_rcvisocpipe(udev,
175 iso_in->desc.bEndpointAddress
176 & USB_ENDPOINT_NUMBER_MASK);
177 }
178
179 if (iso_out) {
180 dev->iso_out = &iso_out->desc;
181 dev->out_iso_pipe = usb_sndisocpipe(udev,
182 iso_out->desc.bEndpointAddress
183 & USB_ENDPOINT_NUMBER_MASK);
184 }
185 return 0;
186 }
187
188 /*-------------------------------------------------------------------------*/
189
190 /* Support for testing basic non-queued I/O streams.
191 *
192 * These just package urbs as requests that can be easily canceled.
193 * Each urb's data buffer is dynamically allocated; callers can fill
194 * them with non-zero test data (or test for it) when appropriate.
195 */
196
197 static void simple_callback(struct urb *urb)
198 {
199 complete(urb->context);
200 }
201
202 static struct urb *usbtest_alloc_urb(
203 struct usb_device *udev,
204 int pipe,
205 unsigned long bytes,
206 unsigned transfer_flags,
207 unsigned offset)
208 {
209 struct urb *urb;
210
211 urb = usb_alloc_urb(0, GFP_KERNEL);
212 if (!urb)
213 return urb;
214 usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
215 urb->interval = (udev->speed == USB_SPEED_HIGH)
216 ? (INTERRUPT_RATE << 3)
217 : INTERRUPT_RATE;
218 urb->transfer_flags = transfer_flags;
219 if (usb_pipein(pipe))
220 urb->transfer_flags |= URB_SHORT_NOT_OK;
221
222 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
223 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
224 GFP_KERNEL, &urb->transfer_dma);
225 else
226 urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
227
228 if (!urb->transfer_buffer) {
229 usb_free_urb(urb);
230 return NULL;
231 }
232
233 /* To test unaligned transfers add an offset and fill the
234 unused memory with a guard value */
235 if (offset) {
236 memset(urb->transfer_buffer, GUARD_BYTE, offset);
237 urb->transfer_buffer += offset;
238 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
239 urb->transfer_dma += offset;
240 }
241
242 /* For inbound transfers use guard byte so that test fails if
243 data not correctly copied */
244 memset(urb->transfer_buffer,
245 usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
246 bytes);
247 return urb;
248 }
249
250 static struct urb *simple_alloc_urb(
251 struct usb_device *udev,
252 int pipe,
253 unsigned long bytes)
254 {
255 return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
256 }
257
258 static unsigned pattern;
259 static unsigned mod_pattern;
260 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
261 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
262
263 static inline void simple_fill_buf(struct urb *urb)
264 {
265 unsigned i;
266 u8 *buf = urb->transfer_buffer;
267 unsigned len = urb->transfer_buffer_length;
268
269 switch (pattern) {
270 default:
271 /* FALLTHROUGH */
272 case 0:
273 memset(buf, 0, len);
274 break;
275 case 1: /* mod63 */
276 for (i = 0; i < len; i++)
277 *buf++ = (u8) (i % 63);
278 break;
279 }
280 }
281
282 static inline unsigned long buffer_offset(void *buf)
283 {
284 return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
285 }
286
287 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
288 {
289 u8 *buf = urb->transfer_buffer;
290 u8 *guard = buf - buffer_offset(buf);
291 unsigned i;
292
293 for (i = 0; guard < buf; i++, guard++) {
294 if (*guard != GUARD_BYTE) {
295 ERROR(tdev, "guard byte[%d] %d (not %d)\n",
296 i, *guard, GUARD_BYTE);
297 return -EINVAL;
298 }
299 }
300 return 0;
301 }
302
303 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
304 {
305 unsigned i;
306 u8 expected;
307 u8 *buf = urb->transfer_buffer;
308 unsigned len = urb->actual_length;
309
310 int ret = check_guard_bytes(tdev, urb);
311 if (ret)
312 return ret;
313
314 for (i = 0; i < len; i++, buf++) {
315 switch (pattern) {
316 /* all-zeroes has no synchronization issues */
317 case 0:
318 expected = 0;
319 break;
320 /* mod63 stays in sync with short-terminated transfers,
321 * or otherwise when host and gadget agree on how large
322 * each usb transfer request should be. resync is done
323 * with set_interface or set_config.
324 */
325 case 1: /* mod63 */
326 expected = i % 63;
327 break;
328 /* always fail unsupported patterns */
329 default:
330 expected = !*buf;
331 break;
332 }
333 if (*buf == expected)
334 continue;
335 ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
336 return -EINVAL;
337 }
338 return 0;
339 }
340
341 static void simple_free_urb(struct urb *urb)
342 {
343 unsigned long offset = buffer_offset(urb->transfer_buffer);
344
345 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
346 usb_free_coherent(
347 urb->dev,
348 urb->transfer_buffer_length + offset,
349 urb->transfer_buffer - offset,
350 urb->transfer_dma - offset);
351 else
352 kfree(urb->transfer_buffer - offset);
353 usb_free_urb(urb);
354 }
355
356 static int simple_io(
357 struct usbtest_dev *tdev,
358 struct urb *urb,
359 int iterations,
360 int vary,
361 int expected,
362 const char *label
363 )
364 {
365 struct usb_device *udev = urb->dev;
366 int max = urb->transfer_buffer_length;
367 struct completion completion;
368 int retval = 0;
369
370 urb->context = &completion;
371 while (retval == 0 && iterations-- > 0) {
372 init_completion(&completion);
373 if (usb_pipeout(urb->pipe)) {
374 simple_fill_buf(urb);
375 urb->transfer_flags |= URB_ZERO_PACKET;
376 }
377 retval = usb_submit_urb(urb, GFP_KERNEL);
378 if (retval != 0)
379 break;
380
381 /* NOTE: no timeouts; can't be broken out of by interrupt */
382 wait_for_completion(&completion);
383 retval = urb->status;
384 urb->dev = udev;
385 if (retval == 0 && usb_pipein(urb->pipe))
386 retval = simple_check_buf(tdev, urb);
387
388 if (vary) {
389 int len = urb->transfer_buffer_length;
390
391 len += vary;
392 len %= max;
393 if (len == 0)
394 len = (vary < max) ? vary : max;
395 urb->transfer_buffer_length = len;
396 }
397
398 /* FIXME if endpoint halted, clear halt (and log) */
399 }
400 urb->transfer_buffer_length = max;
401
402 if (expected != retval)
403 dev_err(&udev->dev,
404 "%s failed, iterations left %d, status %d (not %d)\n",
405 label, iterations, retval, expected);
406 return retval;
407 }
408
409
410 /*-------------------------------------------------------------------------*/
411
412 /* We use scatterlist primitives to test queued I/O.
413 * Yes, this also tests the scatterlist primitives.
414 */
415
416 static void free_sglist(struct scatterlist *sg, int nents)
417 {
418 unsigned i;
419
420 if (!sg)
421 return;
422 for (i = 0; i < nents; i++) {
423 if (!sg_page(&sg[i]))
424 continue;
425 kfree(sg_virt(&sg[i]));
426 }
427 kfree(sg);
428 }
429
430 static struct scatterlist *
431 alloc_sglist(int nents, int max, int vary)
432 {
433 struct scatterlist *sg;
434 unsigned i;
435 unsigned size = max;
436
437 if (max == 0)
438 return NULL;
439
440 sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
441 if (!sg)
442 return NULL;
443 sg_init_table(sg, nents);
444
445 for (i = 0; i < nents; i++) {
446 char *buf;
447 unsigned j;
448
449 buf = kzalloc(size, GFP_KERNEL);
450 if (!buf) {
451 free_sglist(sg, i);
452 return NULL;
453 }
454
455 /* kmalloc pages are always physically contiguous! */
456 sg_set_buf(&sg[i], buf, size);
457
458 switch (pattern) {
459 case 0:
460 /* already zeroed */
461 break;
462 case 1:
463 for (j = 0; j < size; j++)
464 *buf++ = (u8) (j % 63);
465 break;
466 }
467
468 if (vary) {
469 size += vary;
470 size %= max;
471 if (size == 0)
472 size = (vary < max) ? vary : max;
473 }
474 }
475
476 return sg;
477 }
478
479 static int perform_sglist(
480 struct usbtest_dev *tdev,
481 unsigned iterations,
482 int pipe,
483 struct usb_sg_request *req,
484 struct scatterlist *sg,
485 int nents
486 )
487 {
488 struct usb_device *udev = testdev_to_usbdev(tdev);
489 int retval = 0;
490
491 while (retval == 0 && iterations-- > 0) {
492 retval = usb_sg_init(req, udev, pipe,
493 (udev->speed == USB_SPEED_HIGH)
494 ? (INTERRUPT_RATE << 3)
495 : INTERRUPT_RATE,
496 sg, nents, 0, GFP_KERNEL);
497
498 if (retval)
499 break;
500 usb_sg_wait(req);
501 retval = req->status;
502
503 /* FIXME check resulting data pattern */
504
505 /* FIXME if endpoint halted, clear halt (and log) */
506 }
507
508 /* FIXME for unlink or fault handling tests, don't report
509 * failure if retval is as we expected ...
510 */
511 if (retval)
512 ERROR(tdev, "perform_sglist failed, "
513 "iterations left %d, status %d\n",
514 iterations, retval);
515 return retval;
516 }
517
518
519 /*-------------------------------------------------------------------------*/
520
521 /* unqueued control message testing
522 *
523 * there's a nice set of device functional requirements in chapter 9 of the
524 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
525 * special test firmware.
526 *
527 * we know the device is configured (or suspended) by the time it's visible
528 * through usbfs. we can't change that, so we won't test enumeration (which
529 * worked 'well enough' to get here, this time), power management (ditto),
530 * or remote wakeup (which needs human interaction).
531 */
532
533 static unsigned realworld = 1;
534 module_param(realworld, uint, 0);
535 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
536
537 static int get_altsetting(struct usbtest_dev *dev)
538 {
539 struct usb_interface *iface = dev->intf;
540 struct usb_device *udev = interface_to_usbdev(iface);
541 int retval;
542
543 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
544 USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
545 0, iface->altsetting[0].desc.bInterfaceNumber,
546 dev->buf, 1, USB_CTRL_GET_TIMEOUT);
547 switch (retval) {
548 case 1:
549 return dev->buf[0];
550 case 0:
551 retval = -ERANGE;
552 /* FALLTHROUGH */
553 default:
554 return retval;
555 }
556 }
557
558 static int set_altsetting(struct usbtest_dev *dev, int alternate)
559 {
560 struct usb_interface *iface = dev->intf;
561 struct usb_device *udev;
562
563 if (alternate < 0 || alternate >= 256)
564 return -EINVAL;
565
566 udev = interface_to_usbdev(iface);
567 return usb_set_interface(udev,
568 iface->altsetting[0].desc.bInterfaceNumber,
569 alternate);
570 }
571
572 static int is_good_config(struct usbtest_dev *tdev, int len)
573 {
574 struct usb_config_descriptor *config;
575
576 if (len < sizeof *config)
577 return 0;
578 config = (struct usb_config_descriptor *) tdev->buf;
579
580 switch (config->bDescriptorType) {
581 case USB_DT_CONFIG:
582 case USB_DT_OTHER_SPEED_CONFIG:
583 if (config->bLength != 9) {
584 ERROR(tdev, "bogus config descriptor length\n");
585 return 0;
586 }
587 /* this bit 'must be 1' but often isn't */
588 if (!realworld && !(config->bmAttributes & 0x80)) {
589 ERROR(tdev, "high bit of config attributes not set\n");
590 return 0;
591 }
592 if (config->bmAttributes & 0x1f) { /* reserved == 0 */
593 ERROR(tdev, "reserved config bits set\n");
594 return 0;
595 }
596 break;
597 default:
598 return 0;
599 }
600
601 if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
602 return 1;
603 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
604 return 1;
605 ERROR(tdev, "bogus config descriptor read size\n");
606 return 0;
607 }
608
609 /* sanity test for standard requests working with usb_control_mesg() and some
610 * of the utility functions which use it.
611 *
612 * this doesn't test how endpoint halts behave or data toggles get set, since
613 * we won't do I/O to bulk/interrupt endpoints here (which is how to change
614 * halt or toggle). toggle testing is impractical without support from hcds.
615 *
616 * this avoids failing devices linux would normally work with, by not testing
617 * config/altsetting operations for devices that only support their defaults.
618 * such devices rarely support those needless operations.
619 *
620 * NOTE that since this is a sanity test, it's not examining boundary cases
621 * to see if usbcore, hcd, and device all behave right. such testing would
622 * involve varied read sizes and other operation sequences.
623 */
624 static int ch9_postconfig(struct usbtest_dev *dev)
625 {
626 struct usb_interface *iface = dev->intf;
627 struct usb_device *udev = interface_to_usbdev(iface);
628 int i, alt, retval;
629
630 /* [9.2.3] if there's more than one altsetting, we need to be able to
631 * set and get each one. mostly trusts the descriptors from usbcore.
632 */
633 for (i = 0; i < iface->num_altsetting; i++) {
634
635 /* 9.2.3 constrains the range here */
636 alt = iface->altsetting[i].desc.bAlternateSetting;
637 if (alt < 0 || alt >= iface->num_altsetting) {
638 dev_err(&iface->dev,
639 "invalid alt [%d].bAltSetting = %d\n",
640 i, alt);
641 }
642
643 /* [real world] get/set unimplemented if there's only one */
644 if (realworld && iface->num_altsetting == 1)
645 continue;
646
647 /* [9.4.10] set_interface */
648 retval = set_altsetting(dev, alt);
649 if (retval) {
650 dev_err(&iface->dev, "can't set_interface = %d, %d\n",
651 alt, retval);
652 return retval;
653 }
654
655 /* [9.4.4] get_interface always works */
656 retval = get_altsetting(dev);
657 if (retval != alt) {
658 dev_err(&iface->dev, "get alt should be %d, was %d\n",
659 alt, retval);
660 return (retval < 0) ? retval : -EDOM;
661 }
662
663 }
664
665 /* [real world] get_config unimplemented if there's only one */
666 if (!realworld || udev->descriptor.bNumConfigurations != 1) {
667 int expected = udev->actconfig->desc.bConfigurationValue;
668
669 /* [9.4.2] get_configuration always works
670 * ... although some cheap devices (like one TI Hub I've got)
671 * won't return config descriptors except before set_config.
672 */
673 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
674 USB_REQ_GET_CONFIGURATION,
675 USB_DIR_IN | USB_RECIP_DEVICE,
676 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
677 if (retval != 1 || dev->buf[0] != expected) {
678 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
679 retval, dev->buf[0], expected);
680 return (retval < 0) ? retval : -EDOM;
681 }
682 }
683
684 /* there's always [9.4.3] a device descriptor [9.6.1] */
685 retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
686 dev->buf, sizeof udev->descriptor);
687 if (retval != sizeof udev->descriptor) {
688 dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
689 return (retval < 0) ? retval : -EDOM;
690 }
691
692 /* there's always [9.4.3] at least one config descriptor [9.6.3] */
693 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
694 retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
695 dev->buf, TBUF_SIZE);
696 if (!is_good_config(dev, retval)) {
697 dev_err(&iface->dev,
698 "config [%d] descriptor --> %d\n",
699 i, retval);
700 return (retval < 0) ? retval : -EDOM;
701 }
702
703 /* FIXME cross-checking udev->config[i] to make sure usbcore
704 * parsed it right (etc) would be good testing paranoia
705 */
706 }
707
708 /* and sometimes [9.2.6.6] speed dependent descriptors */
709 if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
710 struct usb_qualifier_descriptor *d = NULL;
711
712 /* device qualifier [9.6.2] */
713 retval = usb_get_descriptor(udev,
714 USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
715 sizeof(struct usb_qualifier_descriptor));
716 if (retval == -EPIPE) {
717 if (udev->speed == USB_SPEED_HIGH) {
718 dev_err(&iface->dev,
719 "hs dev qualifier --> %d\n",
720 retval);
721 return (retval < 0) ? retval : -EDOM;
722 }
723 /* usb2.0 but not high-speed capable; fine */
724 } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
725 dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
726 return (retval < 0) ? retval : -EDOM;
727 } else
728 d = (struct usb_qualifier_descriptor *) dev->buf;
729
730 /* might not have [9.6.2] any other-speed configs [9.6.4] */
731 if (d) {
732 unsigned max = d->bNumConfigurations;
733 for (i = 0; i < max; i++) {
734 retval = usb_get_descriptor(udev,
735 USB_DT_OTHER_SPEED_CONFIG, i,
736 dev->buf, TBUF_SIZE);
737 if (!is_good_config(dev, retval)) {
738 dev_err(&iface->dev,
739 "other speed config --> %d\n",
740 retval);
741 return (retval < 0) ? retval : -EDOM;
742 }
743 }
744 }
745 }
746 /* FIXME fetch strings from at least the device descriptor */
747
748 /* [9.4.5] get_status always works */
749 retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
750 if (retval) {
751 dev_err(&iface->dev, "get dev status --> %d\n", retval);
752 return retval;
753 }
754
755 /* FIXME configuration.bmAttributes says if we could try to set/clear
756 * the device's remote wakeup feature ... if we can, test that here
757 */
758
759 retval = usb_get_status(udev, USB_RECIP_INTERFACE,
760 iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
761 if (retval) {
762 dev_err(&iface->dev, "get interface status --> %d\n", retval);
763 return retval;
764 }
765 /* FIXME get status for each endpoint in the interface */
766
767 return 0;
768 }
769
770 /*-------------------------------------------------------------------------*/
771
772 /* use ch9 requests to test whether:
773 * (a) queues work for control, keeping N subtests queued and
774 * active (auto-resubmit) for M loops through the queue.
775 * (b) protocol stalls (control-only) will autorecover.
776 * it's not like bulk/intr; no halt clearing.
777 * (c) short control reads are reported and handled.
778 * (d) queues are always processed in-order
779 */
780
781 struct ctrl_ctx {
782 spinlock_t lock;
783 struct usbtest_dev *dev;
784 struct completion complete;
785 unsigned count;
786 unsigned pending;
787 int status;
788 struct urb **urb;
789 struct usbtest_param *param;
790 int last;
791 };
792
793 #define NUM_SUBCASES 15 /* how many test subcases here? */
794
795 struct subcase {
796 struct usb_ctrlrequest setup;
797 int number;
798 int expected;
799 };
800
801 static void ctrl_complete(struct urb *urb)
802 {
803 struct ctrl_ctx *ctx = urb->context;
804 struct usb_ctrlrequest *reqp;
805 struct subcase *subcase;
806 int status = urb->status;
807
808 reqp = (struct usb_ctrlrequest *)urb->setup_packet;
809 subcase = container_of(reqp, struct subcase, setup);
810
811 spin_lock(&ctx->lock);
812 ctx->count--;
813 ctx->pending--;
814
815 /* queue must transfer and complete in fifo order, unless
816 * usb_unlink_urb() is used to unlink something not at the
817 * physical queue head (not tested).
818 */
819 if (subcase->number > 0) {
820 if ((subcase->number - ctx->last) != 1) {
821 ERROR(ctx->dev,
822 "subcase %d completed out of order, last %d\n",
823 subcase->number, ctx->last);
824 status = -EDOM;
825 ctx->last = subcase->number;
826 goto error;
827 }
828 }
829 ctx->last = subcase->number;
830
831 /* succeed or fault in only one way? */
832 if (status == subcase->expected)
833 status = 0;
834
835 /* async unlink for cleanup? */
836 else if (status != -ECONNRESET) {
837
838 /* some faults are allowed, not required */
839 if (subcase->expected > 0 && (
840 ((status == -subcase->expected /* happened */
841 || status == 0)))) /* didn't */
842 status = 0;
843 /* sometimes more than one fault is allowed */
844 else if (subcase->number == 12 && status == -EPIPE)
845 status = 0;
846 else
847 ERROR(ctx->dev, "subtest %d error, status %d\n",
848 subcase->number, status);
849 }
850
851 /* unexpected status codes mean errors; ideally, in hardware */
852 if (status) {
853 error:
854 if (ctx->status == 0) {
855 int i;
856
857 ctx->status = status;
858 ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
859 "%d left, subcase %d, len %d/%d\n",
860 reqp->bRequestType, reqp->bRequest,
861 status, ctx->count, subcase->number,
862 urb->actual_length,
863 urb->transfer_buffer_length);
864
865 /* FIXME this "unlink everything" exit route should
866 * be a separate test case.
867 */
868
869 /* unlink whatever's still pending */
870 for (i = 1; i < ctx->param->sglen; i++) {
871 struct urb *u = ctx->urb[
872 (i + subcase->number)
873 % ctx->param->sglen];
874
875 if (u == urb || !u->dev)
876 continue;
877 spin_unlock(&ctx->lock);
878 status = usb_unlink_urb(u);
879 spin_lock(&ctx->lock);
880 switch (status) {
881 case -EINPROGRESS:
882 case -EBUSY:
883 case -EIDRM:
884 continue;
885 default:
886 ERROR(ctx->dev, "urb unlink --> %d\n",
887 status);
888 }
889 }
890 status = ctx->status;
891 }
892 }
893
894 /* resubmit if we need to, else mark this as done */
895 if ((status == 0) && (ctx->pending < ctx->count)) {
896 status = usb_submit_urb(urb, GFP_ATOMIC);
897 if (status != 0) {
898 ERROR(ctx->dev,
899 "can't resubmit ctrl %02x.%02x, err %d\n",
900 reqp->bRequestType, reqp->bRequest, status);
901 urb->dev = NULL;
902 } else
903 ctx->pending++;
904 } else
905 urb->dev = NULL;
906
907 /* signal completion when nothing's queued */
908 if (ctx->pending == 0)
909 complete(&ctx->complete);
910 spin_unlock(&ctx->lock);
911 }
912
913 static int
914 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
915 {
916 struct usb_device *udev = testdev_to_usbdev(dev);
917 struct urb **urb;
918 struct ctrl_ctx context;
919 int i;
920
921 if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
922 return -EOPNOTSUPP;
923
924 spin_lock_init(&context.lock);
925 context.dev = dev;
926 init_completion(&context.complete);
927 context.count = param->sglen * param->iterations;
928 context.pending = 0;
929 context.status = -ENOMEM;
930 context.param = param;
931 context.last = -1;
932
933 /* allocate and init the urbs we'll queue.
934 * as with bulk/intr sglists, sglen is the queue depth; it also
935 * controls which subtests run (more tests than sglen) or rerun.
936 */
937 urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
938 if (!urb)
939 return -ENOMEM;
940 for (i = 0; i < param->sglen; i++) {
941 int pipe = usb_rcvctrlpipe(udev, 0);
942 unsigned len;
943 struct urb *u;
944 struct usb_ctrlrequest req;
945 struct subcase *reqp;
946
947 /* sign of this variable means:
948 * -: tested code must return this (negative) error code
949 * +: tested code may return this (negative too) error code
950 */
951 int expected = 0;
952
953 /* requests here are mostly expected to succeed on any
954 * device, but some are chosen to trigger protocol stalls
955 * or short reads.
956 */
957 memset(&req, 0, sizeof req);
958 req.bRequest = USB_REQ_GET_DESCRIPTOR;
959 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
960
961 switch (i % NUM_SUBCASES) {
962 case 0: /* get device descriptor */
963 req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
964 len = sizeof(struct usb_device_descriptor);
965 break;
966 case 1: /* get first config descriptor (only) */
967 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
968 len = sizeof(struct usb_config_descriptor);
969 break;
970 case 2: /* get altsetting (OFTEN STALLS) */
971 req.bRequest = USB_REQ_GET_INTERFACE;
972 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
973 /* index = 0 means first interface */
974 len = 1;
975 expected = EPIPE;
976 break;
977 case 3: /* get interface status */
978 req.bRequest = USB_REQ_GET_STATUS;
979 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
980 /* interface 0 */
981 len = 2;
982 break;
983 case 4: /* get device status */
984 req.bRequest = USB_REQ_GET_STATUS;
985 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
986 len = 2;
987 break;
988 case 5: /* get device qualifier (MAY STALL) */
989 req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
990 len = sizeof(struct usb_qualifier_descriptor);
991 if (udev->speed != USB_SPEED_HIGH)
992 expected = EPIPE;
993 break;
994 case 6: /* get first config descriptor, plus interface */
995 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
996 len = sizeof(struct usb_config_descriptor);
997 len += sizeof(struct usb_interface_descriptor);
998 break;
999 case 7: /* get interface descriptor (ALWAYS STALLS) */
1000 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
1001 /* interface == 0 */
1002 len = sizeof(struct usb_interface_descriptor);
1003 expected = -EPIPE;
1004 break;
1005 /* NOTE: two consecutive stalls in the queue here.
1006 * that tests fault recovery a bit more aggressively. */
1007 case 8: /* clear endpoint halt (MAY STALL) */
1008 req.bRequest = USB_REQ_CLEAR_FEATURE;
1009 req.bRequestType = USB_RECIP_ENDPOINT;
1010 /* wValue 0 == ep halt */
1011 /* wIndex 0 == ep0 (shouldn't halt!) */
1012 len = 0;
1013 pipe = usb_sndctrlpipe(udev, 0);
1014 expected = EPIPE;
1015 break;
1016 case 9: /* get endpoint status */
1017 req.bRequest = USB_REQ_GET_STATUS;
1018 req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1019 /* endpoint 0 */
1020 len = 2;
1021 break;
1022 case 10: /* trigger short read (EREMOTEIO) */
1023 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1024 len = 1024;
1025 expected = -EREMOTEIO;
1026 break;
1027 /* NOTE: two consecutive _different_ faults in the queue. */
1028 case 11: /* get endpoint descriptor (ALWAYS STALLS) */
1029 req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1030 /* endpoint == 0 */
1031 len = sizeof(struct usb_interface_descriptor);
1032 expected = EPIPE;
1033 break;
1034 /* NOTE: sometimes even a third fault in the queue! */
1035 case 12: /* get string 0 descriptor (MAY STALL) */
1036 req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1037 /* string == 0, for language IDs */
1038 len = sizeof(struct usb_interface_descriptor);
1039 /* may succeed when > 4 languages */
1040 expected = EREMOTEIO; /* or EPIPE, if no strings */
1041 break;
1042 case 13: /* short read, resembling case 10 */
1043 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1044 /* last data packet "should" be DATA1, not DATA0 */
1045 if (udev->speed == USB_SPEED_SUPER)
1046 len = 1024 - 512;
1047 else
1048 len = 1024 - udev->descriptor.bMaxPacketSize0;
1049 expected = -EREMOTEIO;
1050 break;
1051 case 14: /* short read; try to fill the last packet */
1052 req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1053 /* device descriptor size == 18 bytes */
1054 len = udev->descriptor.bMaxPacketSize0;
1055 if (udev->speed == USB_SPEED_SUPER)
1056 len = 512;
1057 switch (len) {
1058 case 8:
1059 len = 24;
1060 break;
1061 case 16:
1062 len = 32;
1063 break;
1064 }
1065 expected = -EREMOTEIO;
1066 break;
1067 default:
1068 ERROR(dev, "bogus number of ctrl queue testcases!\n");
1069 context.status = -EINVAL;
1070 goto cleanup;
1071 }
1072 req.wLength = cpu_to_le16(len);
1073 urb[i] = u = simple_alloc_urb(udev, pipe, len);
1074 if (!u)
1075 goto cleanup;
1076
1077 reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
1078 if (!reqp)
1079 goto cleanup;
1080 reqp->setup = req;
1081 reqp->number = i % NUM_SUBCASES;
1082 reqp->expected = expected;
1083 u->setup_packet = (char *) &reqp->setup;
1084
1085 u->context = &context;
1086 u->complete = ctrl_complete;
1087 }
1088
1089 /* queue the urbs */
1090 context.urb = urb;
1091 spin_lock_irq(&context.lock);
1092 for (i = 0; i < param->sglen; i++) {
1093 context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1094 if (context.status != 0) {
1095 ERROR(dev, "can't submit urb[%d], status %d\n",
1096 i, context.status);
1097 context.count = context.pending;
1098 break;
1099 }
1100 context.pending++;
1101 }
1102 spin_unlock_irq(&context.lock);
1103
1104 /* FIXME set timer and time out; provide a disconnect hook */
1105
1106 /* wait for the last one to complete */
1107 if (context.pending > 0)
1108 wait_for_completion(&context.complete);
1109
1110 cleanup:
1111 for (i = 0; i < param->sglen; i++) {
1112 if (!urb[i])
1113 continue;
1114 urb[i]->dev = udev;
1115 kfree(urb[i]->setup_packet);
1116 simple_free_urb(urb[i]);
1117 }
1118 kfree(urb);
1119 return context.status;
1120 }
1121 #undef NUM_SUBCASES
1122
1123
1124 /*-------------------------------------------------------------------------*/
1125
1126 static void unlink1_callback(struct urb *urb)
1127 {
1128 int status = urb->status;
1129
1130 /* we "know" -EPIPE (stall) never happens */
1131 if (!status)
1132 status = usb_submit_urb(urb, GFP_ATOMIC);
1133 if (status) {
1134 urb->status = status;
1135 complete(urb->context);
1136 }
1137 }
1138
1139 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1140 {
1141 struct urb *urb;
1142 struct completion completion;
1143 int retval = 0;
1144
1145 init_completion(&completion);
1146 urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
1147 if (!urb)
1148 return -ENOMEM;
1149 urb->context = &completion;
1150 urb->complete = unlink1_callback;
1151
1152 /* keep the endpoint busy. there are lots of hc/hcd-internal
1153 * states, and testing should get to all of them over time.
1154 *
1155 * FIXME want additional tests for when endpoint is STALLing
1156 * due to errors, or is just NAKing requests.
1157 */
1158 retval = usb_submit_urb(urb, GFP_KERNEL);
1159 if (retval != 0) {
1160 dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1161 return retval;
1162 }
1163
1164 /* unlinking that should always work. variable delay tests more
1165 * hcd states and code paths, even with little other system load.
1166 */
1167 msleep(jiffies % (2 * INTERRUPT_RATE));
1168 if (async) {
1169 while (!completion_done(&completion)) {
1170 retval = usb_unlink_urb(urb);
1171
1172 switch (retval) {
1173 case -EBUSY:
1174 case -EIDRM:
1175 /* we can't unlink urbs while they're completing
1176 * or if they've completed, and we haven't
1177 * resubmitted. "normal" drivers would prevent
1178 * resubmission, but since we're testing unlink
1179 * paths, we can't.
1180 */
1181 ERROR(dev, "unlink retry\n");
1182 continue;
1183 case 0:
1184 case -EINPROGRESS:
1185 break;
1186
1187 default:
1188 dev_err(&dev->intf->dev,
1189 "unlink fail %d\n", retval);
1190 return retval;
1191 }
1192
1193 break;
1194 }
1195 } else
1196 usb_kill_urb(urb);
1197
1198 wait_for_completion(&completion);
1199 retval = urb->status;
1200 simple_free_urb(urb);
1201
1202 if (async)
1203 return (retval == -ECONNRESET) ? 0 : retval - 1000;
1204 else
1205 return (retval == -ENOENT || retval == -EPERM) ?
1206 0 : retval - 2000;
1207 }
1208
1209 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1210 {
1211 int retval = 0;
1212
1213 /* test sync and async paths */
1214 retval = unlink1(dev, pipe, len, 1);
1215 if (!retval)
1216 retval = unlink1(dev, pipe, len, 0);
1217 return retval;
1218 }
1219
1220 /*-------------------------------------------------------------------------*/
1221
1222 struct queued_ctx {
1223 struct completion complete;
1224 atomic_t pending;
1225 unsigned num;
1226 int status;
1227 struct urb **urbs;
1228 };
1229
1230 static void unlink_queued_callback(struct urb *urb)
1231 {
1232 int status = urb->status;
1233 struct queued_ctx *ctx = urb->context;
1234
1235 if (ctx->status)
1236 goto done;
1237 if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1238 if (status == -ECONNRESET)
1239 goto done;
1240 /* What error should we report if the URB completed normally? */
1241 }
1242 if (status != 0)
1243 ctx->status = status;
1244
1245 done:
1246 if (atomic_dec_and_test(&ctx->pending))
1247 complete(&ctx->complete);
1248 }
1249
1250 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1251 unsigned size)
1252 {
1253 struct queued_ctx ctx;
1254 struct usb_device *udev = testdev_to_usbdev(dev);
1255 void *buf;
1256 dma_addr_t buf_dma;
1257 int i;
1258 int retval = -ENOMEM;
1259
1260 init_completion(&ctx.complete);
1261 atomic_set(&ctx.pending, 1); /* One more than the actual value */
1262 ctx.num = num;
1263 ctx.status = 0;
1264
1265 buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1266 if (!buf)
1267 return retval;
1268 memset(buf, 0, size);
1269
1270 /* Allocate and init the urbs we'll queue */
1271 ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1272 if (!ctx.urbs)
1273 goto free_buf;
1274 for (i = 0; i < num; i++) {
1275 ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1276 if (!ctx.urbs[i])
1277 goto free_urbs;
1278 usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1279 unlink_queued_callback, &ctx);
1280 ctx.urbs[i]->transfer_dma = buf_dma;
1281 ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1282 }
1283
1284 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1285 for (i = 0; i < num; i++) {
1286 atomic_inc(&ctx.pending);
1287 retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1288 if (retval != 0) {
1289 dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1290 i, retval);
1291 atomic_dec(&ctx.pending);
1292 ctx.status = retval;
1293 break;
1294 }
1295 }
1296 if (i == num) {
1297 usb_unlink_urb(ctx.urbs[num - 4]);
1298 usb_unlink_urb(ctx.urbs[num - 2]);
1299 } else {
1300 while (--i >= 0)
1301 usb_unlink_urb(ctx.urbs[i]);
1302 }
1303
1304 if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
1305 complete(&ctx.complete);
1306 wait_for_completion(&ctx.complete);
1307 retval = ctx.status;
1308
1309 free_urbs:
1310 for (i = 0; i < num; i++)
1311 usb_free_urb(ctx.urbs[i]);
1312 kfree(ctx.urbs);
1313 free_buf:
1314 usb_free_coherent(udev, size, buf, buf_dma);
1315 return retval;
1316 }
1317
1318 /*-------------------------------------------------------------------------*/
1319
1320 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1321 {
1322 int retval;
1323 u16 status;
1324
1325 /* shouldn't look or act halted */
1326 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1327 if (retval < 0) {
1328 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1329 ep, retval);
1330 return retval;
1331 }
1332 if (status != 0) {
1333 ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1334 return -EINVAL;
1335 }
1336 retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1337 if (retval != 0)
1338 return -EINVAL;
1339 return 0;
1340 }
1341
1342 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1343 {
1344 int retval;
1345 u16 status;
1346
1347 /* should look and act halted */
1348 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1349 if (retval < 0) {
1350 ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1351 ep, retval);
1352 return retval;
1353 }
1354 if (status != 1) {
1355 ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1356 return -EINVAL;
1357 }
1358 retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1359 if (retval != -EPIPE)
1360 return -EINVAL;
1361 retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1362 if (retval != -EPIPE)
1363 return -EINVAL;
1364 return 0;
1365 }
1366
1367 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1368 {
1369 int retval;
1370
1371 /* shouldn't look or act halted now */
1372 retval = verify_not_halted(tdev, ep, urb);
1373 if (retval < 0)
1374 return retval;
1375
1376 /* set halt (protocol test only), verify it worked */
1377 retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1378 USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1379 USB_ENDPOINT_HALT, ep,
1380 NULL, 0, USB_CTRL_SET_TIMEOUT);
1381 if (retval < 0) {
1382 ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1383 return retval;
1384 }
1385 retval = verify_halted(tdev, ep, urb);
1386 if (retval < 0)
1387 return retval;
1388
1389 /* clear halt (tests API + protocol), verify it worked */
1390 retval = usb_clear_halt(urb->dev, urb->pipe);
1391 if (retval < 0) {
1392 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1393 return retval;
1394 }
1395 retval = verify_not_halted(tdev, ep, urb);
1396 if (retval < 0)
1397 return retval;
1398
1399 /* NOTE: could also verify SET_INTERFACE clear halts ... */
1400
1401 return 0;
1402 }
1403
1404 static int halt_simple(struct usbtest_dev *dev)
1405 {
1406 int ep;
1407 int retval = 0;
1408 struct urb *urb;
1409 struct usb_device *udev = testdev_to_usbdev(dev);
1410
1411 if (udev->speed == USB_SPEED_SUPER)
1412 urb = simple_alloc_urb(udev, 0, 1024);
1413 else
1414 urb = simple_alloc_urb(udev, 0, 512);
1415 if (urb == NULL)
1416 return -ENOMEM;
1417
1418 if (dev->in_pipe) {
1419 ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1420 urb->pipe = dev->in_pipe;
1421 retval = test_halt(dev, ep, urb);
1422 if (retval < 0)
1423 goto done;
1424 }
1425
1426 if (dev->out_pipe) {
1427 ep = usb_pipeendpoint(dev->out_pipe);
1428 urb->pipe = dev->out_pipe;
1429 retval = test_halt(dev, ep, urb);
1430 }
1431 done:
1432 simple_free_urb(urb);
1433 return retval;
1434 }
1435
1436 /*-------------------------------------------------------------------------*/
1437
1438 /* Control OUT tests use the vendor control requests from Intel's
1439 * USB 2.0 compliance test device: write a buffer, read it back.
1440 *
1441 * Intel's spec only _requires_ that it work for one packet, which
1442 * is pretty weak. Some HCDs place limits here; most devices will
1443 * need to be able to handle more than one OUT data packet. We'll
1444 * try whatever we're told to try.
1445 */
1446 static int ctrl_out(struct usbtest_dev *dev,
1447 unsigned count, unsigned length, unsigned vary, unsigned offset)
1448 {
1449 unsigned i, j, len;
1450 int retval;
1451 u8 *buf;
1452 char *what = "?";
1453 struct usb_device *udev;
1454
1455 if (length < 1 || length > 0xffff || vary >= length)
1456 return -EINVAL;
1457
1458 buf = kmalloc(length + offset, GFP_KERNEL);
1459 if (!buf)
1460 return -ENOMEM;
1461
1462 buf += offset;
1463 udev = testdev_to_usbdev(dev);
1464 len = length;
1465 retval = 0;
1466
1467 /* NOTE: hardware might well act differently if we pushed it
1468 * with lots back-to-back queued requests.
1469 */
1470 for (i = 0; i < count; i++) {
1471 /* write patterned data */
1472 for (j = 0; j < len; j++)
1473 buf[j] = i + j;
1474 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1475 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1476 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1477 if (retval != len) {
1478 what = "write";
1479 if (retval >= 0) {
1480 ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1481 retval, len);
1482 retval = -EBADMSG;
1483 }
1484 break;
1485 }
1486
1487 /* read it back -- assuming nothing intervened!! */
1488 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1489 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1490 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1491 if (retval != len) {
1492 what = "read";
1493 if (retval >= 0) {
1494 ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1495 retval, len);
1496 retval = -EBADMSG;
1497 }
1498 break;
1499 }
1500
1501 /* fail if we can't verify */
1502 for (j = 0; j < len; j++) {
1503 if (buf[j] != (u8) (i + j)) {
1504 ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1505 j, buf[j], (u8) i + j);
1506 retval = -EBADMSG;
1507 break;
1508 }
1509 }
1510 if (retval < 0) {
1511 what = "verify";
1512 break;
1513 }
1514
1515 len += vary;
1516
1517 /* [real world] the "zero bytes IN" case isn't really used.
1518 * hardware can easily trip up in this weird case, since its
1519 * status stage is IN, not OUT like other ep0in transfers.
1520 */
1521 if (len > length)
1522 len = realworld ? 1 : 0;
1523 }
1524
1525 if (retval < 0)
1526 ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1527 what, retval, i);
1528
1529 kfree(buf - offset);
1530 return retval;
1531 }
1532
1533 /*-------------------------------------------------------------------------*/
1534
1535 /* ISO tests ... mimics common usage
1536 * - buffer length is split into N packets (mostly maxpacket sized)
1537 * - multi-buffers according to sglen
1538 */
1539
1540 struct iso_context {
1541 unsigned count;
1542 unsigned pending;
1543 spinlock_t lock;
1544 struct completion done;
1545 int submit_error;
1546 unsigned long errors;
1547 unsigned long packet_count;
1548 struct usbtest_dev *dev;
1549 };
1550
1551 static void iso_callback(struct urb *urb)
1552 {
1553 struct iso_context *ctx = urb->context;
1554
1555 spin_lock(&ctx->lock);
1556 ctx->count--;
1557
1558 ctx->packet_count += urb->number_of_packets;
1559 if (urb->error_count > 0)
1560 ctx->errors += urb->error_count;
1561 else if (urb->status != 0)
1562 ctx->errors += urb->number_of_packets;
1563 else if (urb->actual_length != urb->transfer_buffer_length)
1564 ctx->errors++;
1565 else if (check_guard_bytes(ctx->dev, urb) != 0)
1566 ctx->errors++;
1567
1568 if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1569 && !ctx->submit_error) {
1570 int status = usb_submit_urb(urb, GFP_ATOMIC);
1571 switch (status) {
1572 case 0:
1573 goto done;
1574 default:
1575 dev_err(&ctx->dev->intf->dev,
1576 "iso resubmit err %d\n",
1577 status);
1578 /* FALLTHROUGH */
1579 case -ENODEV: /* disconnected */
1580 case -ESHUTDOWN: /* endpoint disabled */
1581 ctx->submit_error = 1;
1582 break;
1583 }
1584 }
1585
1586 ctx->pending--;
1587 if (ctx->pending == 0) {
1588 if (ctx->errors)
1589 dev_err(&ctx->dev->intf->dev,
1590 "iso test, %lu errors out of %lu\n",
1591 ctx->errors, ctx->packet_count);
1592 complete(&ctx->done);
1593 }
1594 done:
1595 spin_unlock(&ctx->lock);
1596 }
1597
1598 static struct urb *iso_alloc_urb(
1599 struct usb_device *udev,
1600 int pipe,
1601 struct usb_endpoint_descriptor *desc,
1602 long bytes,
1603 unsigned offset
1604 )
1605 {
1606 struct urb *urb;
1607 unsigned i, maxp, packets;
1608
1609 if (bytes < 0 || !desc)
1610 return NULL;
1611 maxp = 0x7ff & usb_endpoint_maxp(desc);
1612 maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
1613 packets = DIV_ROUND_UP(bytes, maxp);
1614
1615 urb = usb_alloc_urb(packets, GFP_KERNEL);
1616 if (!urb)
1617 return urb;
1618 urb->dev = udev;
1619 urb->pipe = pipe;
1620
1621 urb->number_of_packets = packets;
1622 urb->transfer_buffer_length = bytes;
1623 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1624 GFP_KERNEL,
1625 &urb->transfer_dma);
1626 if (!urb->transfer_buffer) {
1627 usb_free_urb(urb);
1628 return NULL;
1629 }
1630 if (offset) {
1631 memset(urb->transfer_buffer, GUARD_BYTE, offset);
1632 urb->transfer_buffer += offset;
1633 urb->transfer_dma += offset;
1634 }
1635 /* For inbound transfers use guard byte so that test fails if
1636 data not correctly copied */
1637 memset(urb->transfer_buffer,
1638 usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
1639 bytes);
1640
1641 for (i = 0; i < packets; i++) {
1642 /* here, only the last packet will be short */
1643 urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1644 bytes -= urb->iso_frame_desc[i].length;
1645
1646 urb->iso_frame_desc[i].offset = maxp * i;
1647 }
1648
1649 urb->complete = iso_callback;
1650 /* urb->context = SET BY CALLER */
1651 urb->interval = 1 << (desc->bInterval - 1);
1652 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1653 return urb;
1654 }
1655
1656 static int
1657 test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1658 int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
1659 {
1660 struct iso_context context;
1661 struct usb_device *udev;
1662 unsigned i;
1663 unsigned long packets = 0;
1664 int status = 0;
1665 struct urb *urbs[10]; /* FIXME no limit */
1666
1667 if (param->sglen > 10)
1668 return -EDOM;
1669
1670 memset(&context, 0, sizeof context);
1671 context.count = param->iterations * param->sglen;
1672 context.dev = dev;
1673 init_completion(&context.done);
1674 spin_lock_init(&context.lock);
1675
1676 memset(urbs, 0, sizeof urbs);
1677 udev = testdev_to_usbdev(dev);
1678 dev_info(&dev->intf->dev,
1679 "... iso period %d %sframes, wMaxPacket %04x\n",
1680 1 << (desc->bInterval - 1),
1681 (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
1682 usb_endpoint_maxp(desc));
1683
1684 for (i = 0; i < param->sglen; i++) {
1685 urbs[i] = iso_alloc_urb(udev, pipe, desc,
1686 param->length, offset);
1687 if (!urbs[i]) {
1688 status = -ENOMEM;
1689 goto fail;
1690 }
1691 packets += urbs[i]->number_of_packets;
1692 urbs[i]->context = &context;
1693 }
1694 packets *= param->iterations;
1695 dev_info(&dev->intf->dev,
1696 "... total %lu msec (%lu packets)\n",
1697 (packets * (1 << (desc->bInterval - 1)))
1698 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1699 packets);
1700
1701 spin_lock_irq(&context.lock);
1702 for (i = 0; i < param->sglen; i++) {
1703 ++context.pending;
1704 status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1705 if (status < 0) {
1706 ERROR(dev, "submit iso[%d], error %d\n", i, status);
1707 if (i == 0) {
1708 spin_unlock_irq(&context.lock);
1709 goto fail;
1710 }
1711
1712 simple_free_urb(urbs[i]);
1713 urbs[i] = NULL;
1714 context.pending--;
1715 context.submit_error = 1;
1716 break;
1717 }
1718 }
1719 spin_unlock_irq(&context.lock);
1720
1721 wait_for_completion(&context.done);
1722
1723 for (i = 0; i < param->sglen; i++) {
1724 if (urbs[i])
1725 simple_free_urb(urbs[i]);
1726 }
1727 /*
1728 * Isochronous transfers are expected to fail sometimes. As an
1729 * arbitrary limit, we will report an error if any submissions
1730 * fail or if the transfer failure rate is > 10%.
1731 */
1732 if (status != 0)
1733 ;
1734 else if (context.submit_error)
1735 status = -EACCES;
1736 else if (context.errors > context.packet_count / 10)
1737 status = -EIO;
1738 return status;
1739
1740 fail:
1741 for (i = 0; i < param->sglen; i++) {
1742 if (urbs[i])
1743 simple_free_urb(urbs[i]);
1744 }
1745 return status;
1746 }
1747
1748 static int test_unaligned_bulk(
1749 struct usbtest_dev *tdev,
1750 int pipe,
1751 unsigned length,
1752 int iterations,
1753 unsigned transfer_flags,
1754 const char *label)
1755 {
1756 int retval;
1757 struct urb *urb = usbtest_alloc_urb(
1758 testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
1759
1760 if (!urb)
1761 return -ENOMEM;
1762
1763 retval = simple_io(tdev, urb, iterations, 0, 0, label);
1764 simple_free_urb(urb);
1765 return retval;
1766 }
1767
1768 /*-------------------------------------------------------------------------*/
1769
1770 /* We only have this one interface to user space, through usbfs.
1771 * User mode code can scan usbfs to find N different devices (maybe on
1772 * different busses) to use when testing, and allocate one thread per
1773 * test. So discovery is simplified, and we have no device naming issues.
1774 *
1775 * Don't use these only as stress/load tests. Use them along with with
1776 * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
1777 * video capture, and so on. Run different tests at different times, in
1778 * different sequences. Nothing here should interact with other devices,
1779 * except indirectly by consuming USB bandwidth and CPU resources for test
1780 * threads and request completion. But the only way to know that for sure
1781 * is to test when HC queues are in use by many devices.
1782 *
1783 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
1784 * it locks out usbcore in certain code paths. Notably, if you disconnect
1785 * the device-under-test, khubd will wait block forever waiting for the
1786 * ioctl to complete ... so that usb_disconnect() can abort the pending
1787 * urbs and then call usbtest_disconnect(). To abort a test, you're best
1788 * off just killing the userspace task and waiting for it to exit.
1789 */
1790
1791 static int
1792 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1793 {
1794 struct usbtest_dev *dev = usb_get_intfdata(intf);
1795 struct usb_device *udev = testdev_to_usbdev(dev);
1796 struct usbtest_param *param = buf;
1797 int retval = -EOPNOTSUPP;
1798 struct urb *urb;
1799 struct scatterlist *sg;
1800 struct usb_sg_request req;
1801 struct timeval start;
1802 unsigned i;
1803
1804 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1805
1806 pattern = mod_pattern;
1807
1808 if (code != USBTEST_REQUEST)
1809 return -EOPNOTSUPP;
1810
1811 if (param->iterations <= 0)
1812 return -EINVAL;
1813
1814 if (mutex_lock_interruptible(&dev->lock))
1815 return -ERESTARTSYS;
1816
1817 /* FIXME: What if a system sleep starts while a test is running? */
1818
1819 /* some devices, like ez-usb default devices, need a non-default
1820 * altsetting to have any active endpoints. some tests change
1821 * altsettings; force a default so most tests don't need to check.
1822 */
1823 if (dev->info->alt >= 0) {
1824 int res;
1825
1826 if (intf->altsetting->desc.bInterfaceNumber) {
1827 mutex_unlock(&dev->lock);
1828 return -ENODEV;
1829 }
1830 res = set_altsetting(dev, dev->info->alt);
1831 if (res) {
1832 dev_err(&intf->dev,
1833 "set altsetting to %d failed, %d\n",
1834 dev->info->alt, res);
1835 mutex_unlock(&dev->lock);
1836 return res;
1837 }
1838 }
1839
1840 /*
1841 * Just a bunch of test cases that every HCD is expected to handle.
1842 *
1843 * Some may need specific firmware, though it'd be good to have
1844 * one firmware image to handle all the test cases.
1845 *
1846 * FIXME add more tests! cancel requests, verify the data, control
1847 * queueing, concurrent read+write threads, and so on.
1848 */
1849 do_gettimeofday(&start);
1850 switch (param->test_num) {
1851
1852 case 0:
1853 dev_info(&intf->dev, "TEST 0: NOP\n");
1854 retval = 0;
1855 break;
1856
1857 /* Simple non-queued bulk I/O tests */
1858 case 1:
1859 if (dev->out_pipe == 0)
1860 break;
1861 dev_info(&intf->dev,
1862 "TEST 1: write %d bytes %u times\n",
1863 param->length, param->iterations);
1864 urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1865 if (!urb) {
1866 retval = -ENOMEM;
1867 break;
1868 }
1869 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1870 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
1871 simple_free_urb(urb);
1872 break;
1873 case 2:
1874 if (dev->in_pipe == 0)
1875 break;
1876 dev_info(&intf->dev,
1877 "TEST 2: read %d bytes %u times\n",
1878 param->length, param->iterations);
1879 urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1880 if (!urb) {
1881 retval = -ENOMEM;
1882 break;
1883 }
1884 /* FIRMWARE: bulk source (maybe generates short writes) */
1885 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
1886 simple_free_urb(urb);
1887 break;
1888 case 3:
1889 if (dev->out_pipe == 0 || param->vary == 0)
1890 break;
1891 dev_info(&intf->dev,
1892 "TEST 3: write/%d 0..%d bytes %u times\n",
1893 param->vary, param->length, param->iterations);
1894 urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1895 if (!urb) {
1896 retval = -ENOMEM;
1897 break;
1898 }
1899 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1900 retval = simple_io(dev, urb, param->iterations, param->vary,
1901 0, "test3");
1902 simple_free_urb(urb);
1903 break;
1904 case 4:
1905 if (dev->in_pipe == 0 || param->vary == 0)
1906 break;
1907 dev_info(&intf->dev,
1908 "TEST 4: read/%d 0..%d bytes %u times\n",
1909 param->vary, param->length, param->iterations);
1910 urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1911 if (!urb) {
1912 retval = -ENOMEM;
1913 break;
1914 }
1915 /* FIRMWARE: bulk source (maybe generates short writes) */
1916 retval = simple_io(dev, urb, param->iterations, param->vary,
1917 0, "test4");
1918 simple_free_urb(urb);
1919 break;
1920
1921 /* Queued bulk I/O tests */
1922 case 5:
1923 if (dev->out_pipe == 0 || param->sglen == 0)
1924 break;
1925 dev_info(&intf->dev,
1926 "TEST 5: write %d sglists %d entries of %d bytes\n",
1927 param->iterations,
1928 param->sglen, param->length);
1929 sg = alloc_sglist(param->sglen, param->length, 0);
1930 if (!sg) {
1931 retval = -ENOMEM;
1932 break;
1933 }
1934 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1935 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1936 &req, sg, param->sglen);
1937 free_sglist(sg, param->sglen);
1938 break;
1939
1940 case 6:
1941 if (dev->in_pipe == 0 || param->sglen == 0)
1942 break;
1943 dev_info(&intf->dev,
1944 "TEST 6: read %d sglists %d entries of %d bytes\n",
1945 param->iterations,
1946 param->sglen, param->length);
1947 sg = alloc_sglist(param->sglen, param->length, 0);
1948 if (!sg) {
1949 retval = -ENOMEM;
1950 break;
1951 }
1952 /* FIRMWARE: bulk source (maybe generates short writes) */
1953 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1954 &req, sg, param->sglen);
1955 free_sglist(sg, param->sglen);
1956 break;
1957 case 7:
1958 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
1959 break;
1960 dev_info(&intf->dev,
1961 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
1962 param->vary, param->iterations,
1963 param->sglen, param->length);
1964 sg = alloc_sglist(param->sglen, param->length, param->vary);
1965 if (!sg) {
1966 retval = -ENOMEM;
1967 break;
1968 }
1969 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1970 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1971 &req, sg, param->sglen);
1972 free_sglist(sg, param->sglen);
1973 break;
1974 case 8:
1975 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
1976 break;
1977 dev_info(&intf->dev,
1978 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
1979 param->vary, param->iterations,
1980 param->sglen, param->length);
1981 sg = alloc_sglist(param->sglen, param->length, param->vary);
1982 if (!sg) {
1983 retval = -ENOMEM;
1984 break;
1985 }
1986 /* FIRMWARE: bulk source (maybe generates short writes) */
1987 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1988 &req, sg, param->sglen);
1989 free_sglist(sg, param->sglen);
1990 break;
1991
1992 /* non-queued sanity tests for control (chapter 9 subset) */
1993 case 9:
1994 retval = 0;
1995 dev_info(&intf->dev,
1996 "TEST 9: ch9 (subset) control tests, %d times\n",
1997 param->iterations);
1998 for (i = param->iterations; retval == 0 && i--; /* NOP */)
1999 retval = ch9_postconfig(dev);
2000 if (retval)
2001 dev_err(&intf->dev, "ch9 subset failed, "
2002 "iterations left %d\n", i);
2003 break;
2004
2005 /* queued control messaging */
2006 case 10:
2007 retval = 0;
2008 dev_info(&intf->dev,
2009 "TEST 10: queue %d control calls, %d times\n",
2010 param->sglen,
2011 param->iterations);
2012 retval = test_ctrl_queue(dev, param);
2013 break;
2014
2015 /* simple non-queued unlinks (ring with one urb) */
2016 case 11:
2017 if (dev->in_pipe == 0 || !param->length)
2018 break;
2019 retval = 0;
2020 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
2021 param->iterations, param->length);
2022 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2023 retval = unlink_simple(dev, dev->in_pipe,
2024 param->length);
2025 if (retval)
2026 dev_err(&intf->dev, "unlink reads failed %d, "
2027 "iterations left %d\n", retval, i);
2028 break;
2029 case 12:
2030 if (dev->out_pipe == 0 || !param->length)
2031 break;
2032 retval = 0;
2033 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
2034 param->iterations, param->length);
2035 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2036 retval = unlink_simple(dev, dev->out_pipe,
2037 param->length);
2038 if (retval)
2039 dev_err(&intf->dev, "unlink writes failed %d, "
2040 "iterations left %d\n", retval, i);
2041 break;
2042
2043 /* ep halt tests */
2044 case 13:
2045 if (dev->out_pipe == 0 && dev->in_pipe == 0)
2046 break;
2047 retval = 0;
2048 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
2049 param->iterations);
2050 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2051 retval = halt_simple(dev);
2052
2053 if (retval)
2054 ERROR(dev, "halts failed, iterations left %d\n", i);
2055 break;
2056
2057 /* control write tests */
2058 case 14:
2059 if (!dev->info->ctrl_out)
2060 break;
2061 dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
2062 param->iterations,
2063 realworld ? 1 : 0, param->length,
2064 param->vary);
2065 retval = ctrl_out(dev, param->iterations,
2066 param->length, param->vary, 0);
2067 break;
2068
2069 /* iso write tests */
2070 case 15:
2071 if (dev->out_iso_pipe == 0 || param->sglen == 0)
2072 break;
2073 dev_info(&intf->dev,
2074 "TEST 15: write %d iso, %d entries of %d bytes\n",
2075 param->iterations,
2076 param->sglen, param->length);
2077 /* FIRMWARE: iso sink */
2078 retval = test_iso_queue(dev, param,
2079 dev->out_iso_pipe, dev->iso_out, 0);
2080 break;
2081
2082 /* iso read tests */
2083 case 16:
2084 if (dev->in_iso_pipe == 0 || param->sglen == 0)
2085 break;
2086 dev_info(&intf->dev,
2087 "TEST 16: read %d iso, %d entries of %d bytes\n",
2088 param->iterations,
2089 param->sglen, param->length);
2090 /* FIRMWARE: iso source */
2091 retval = test_iso_queue(dev, param,
2092 dev->in_iso_pipe, dev->iso_in, 0);
2093 break;
2094
2095 /* FIXME scatterlist cancel (needs helper thread) */
2096
2097 /* Tests for bulk I/O using DMA mapping by core and odd address */
2098 case 17:
2099 if (dev->out_pipe == 0)
2100 break;
2101 dev_info(&intf->dev,
2102 "TEST 17: write odd addr %d bytes %u times core map\n",
2103 param->length, param->iterations);
2104
2105 retval = test_unaligned_bulk(
2106 dev, dev->out_pipe,
2107 param->length, param->iterations,
2108 0, "test17");
2109 break;
2110
2111 case 18:
2112 if (dev->in_pipe == 0)
2113 break;
2114 dev_info(&intf->dev,
2115 "TEST 18: read odd addr %d bytes %u times core map\n",
2116 param->length, param->iterations);
2117
2118 retval = test_unaligned_bulk(
2119 dev, dev->in_pipe,
2120 param->length, param->iterations,
2121 0, "test18");
2122 break;
2123
2124 /* Tests for bulk I/O using premapped coherent buffer and odd address */
2125 case 19:
2126 if (dev->out_pipe == 0)
2127 break;
2128 dev_info(&intf->dev,
2129 "TEST 19: write odd addr %d bytes %u times premapped\n",
2130 param->length, param->iterations);
2131
2132 retval = test_unaligned_bulk(
2133 dev, dev->out_pipe,
2134 param->length, param->iterations,
2135 URB_NO_TRANSFER_DMA_MAP, "test19");
2136 break;
2137
2138 case 20:
2139 if (dev->in_pipe == 0)
2140 break;
2141 dev_info(&intf->dev,
2142 "TEST 20: read odd addr %d bytes %u times premapped\n",
2143 param->length, param->iterations);
2144
2145 retval = test_unaligned_bulk(
2146 dev, dev->in_pipe,
2147 param->length, param->iterations,
2148 URB_NO_TRANSFER_DMA_MAP, "test20");
2149 break;
2150
2151 /* control write tests with unaligned buffer */
2152 case 21:
2153 if (!dev->info->ctrl_out)
2154 break;
2155 dev_info(&intf->dev,
2156 "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
2157 param->iterations,
2158 realworld ? 1 : 0, param->length,
2159 param->vary);
2160 retval = ctrl_out(dev, param->iterations,
2161 param->length, param->vary, 1);
2162 break;
2163
2164 /* unaligned iso tests */
2165 case 22:
2166 if (dev->out_iso_pipe == 0 || param->sglen == 0)
2167 break;
2168 dev_info(&intf->dev,
2169 "TEST 22: write %d iso odd, %d entries of %d bytes\n",
2170 param->iterations,
2171 param->sglen, param->length);
2172 retval = test_iso_queue(dev, param,
2173 dev->out_iso_pipe, dev->iso_out, 1);
2174 break;
2175
2176 case 23:
2177 if (dev->in_iso_pipe == 0 || param->sglen == 0)
2178 break;
2179 dev_info(&intf->dev,
2180 "TEST 23: read %d iso odd, %d entries of %d bytes\n",
2181 param->iterations,
2182 param->sglen, param->length);
2183 retval = test_iso_queue(dev, param,
2184 dev->in_iso_pipe, dev->iso_in, 1);
2185 break;
2186
2187 /* unlink URBs from a bulk-OUT queue */
2188 case 24:
2189 if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2190 break;
2191 retval = 0;
2192 dev_info(&intf->dev, "TEST 24: unlink from %d queues of "
2193 "%d %d-byte writes\n",
2194 param->iterations, param->sglen, param->length);
2195 for (i = param->iterations; retval == 0 && i > 0; --i) {
2196 retval = unlink_queued(dev, dev->out_pipe,
2197 param->sglen, param->length);
2198 if (retval) {
2199 dev_err(&intf->dev,
2200 "unlink queued writes failed %d, "
2201 "iterations left %d\n", retval, i);
2202 break;
2203 }
2204 }
2205 break;
2206
2207 }
2208 do_gettimeofday(&param->duration);
2209 param->duration.tv_sec -= start.tv_sec;
2210 param->duration.tv_usec -= start.tv_usec;
2211 if (param->duration.tv_usec < 0) {
2212 param->duration.tv_usec += 1000 * 1000;
2213 param->duration.tv_sec -= 1;
2214 }
2215 mutex_unlock(&dev->lock);
2216 return retval;
2217 }
2218
2219 /*-------------------------------------------------------------------------*/
2220
2221 static unsigned force_interrupt;
2222 module_param(force_interrupt, uint, 0);
2223 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2224
2225 #ifdef GENERIC
2226 static unsigned short vendor;
2227 module_param(vendor, ushort, 0);
2228 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2229
2230 static unsigned short product;
2231 module_param(product, ushort, 0);
2232 MODULE_PARM_DESC(product, "product code (from vendor)");
2233 #endif
2234
2235 static int
2236 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2237 {
2238 struct usb_device *udev;
2239 struct usbtest_dev *dev;
2240 struct usbtest_info *info;
2241 char *rtest, *wtest;
2242 char *irtest, *iwtest;
2243
2244 udev = interface_to_usbdev(intf);
2245
2246 #ifdef GENERIC
2247 /* specify devices by module parameters? */
2248 if (id->match_flags == 0) {
2249 /* vendor match required, product match optional */
2250 if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2251 return -ENODEV;
2252 if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2253 return -ENODEV;
2254 dev_info(&intf->dev, "matched module params, "
2255 "vend=0x%04x prod=0x%04x\n",
2256 le16_to_cpu(udev->descriptor.idVendor),
2257 le16_to_cpu(udev->descriptor.idProduct));
2258 }
2259 #endif
2260
2261 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2262 if (!dev)
2263 return -ENOMEM;
2264 info = (struct usbtest_info *) id->driver_info;
2265 dev->info = info;
2266 mutex_init(&dev->lock);
2267
2268 dev->intf = intf;
2269
2270 /* cacheline-aligned scratch for i/o */
2271 dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2272 if (dev->buf == NULL) {
2273 kfree(dev);
2274 return -ENOMEM;
2275 }
2276
2277 /* NOTE this doesn't yet test the handful of difference that are
2278 * visible with high speed interrupts: bigger maxpacket (1K) and
2279 * "high bandwidth" modes (up to 3 packets/uframe).
2280 */
2281 rtest = wtest = "";
2282 irtest = iwtest = "";
2283 if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2284 if (info->ep_in) {
2285 dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2286 rtest = " intr-in";
2287 }
2288 if (info->ep_out) {
2289 dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2290 wtest = " intr-out";
2291 }
2292 } else {
2293 if (override_alt >= 0 || info->autoconf) {
2294 int status;
2295
2296 status = get_endpoints(dev, intf);
2297 if (status < 0) {
2298 WARNING(dev, "couldn't get endpoints, %d\n",
2299 status);
2300 kfree(dev->buf);
2301 kfree(dev);
2302 return status;
2303 }
2304 /* may find bulk or ISO pipes */
2305 } else {
2306 if (info->ep_in)
2307 dev->in_pipe = usb_rcvbulkpipe(udev,
2308 info->ep_in);
2309 if (info->ep_out)
2310 dev->out_pipe = usb_sndbulkpipe(udev,
2311 info->ep_out);
2312 }
2313 if (dev->in_pipe)
2314 rtest = " bulk-in";
2315 if (dev->out_pipe)
2316 wtest = " bulk-out";
2317 if (dev->in_iso_pipe)
2318 irtest = " iso-in";
2319 if (dev->out_iso_pipe)
2320 iwtest = " iso-out";
2321 }
2322
2323 usb_set_intfdata(intf, dev);
2324 dev_info(&intf->dev, "%s\n", info->name);
2325 dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
2326 usb_speed_string(udev->speed),
2327 info->ctrl_out ? " in/out" : "",
2328 rtest, wtest,
2329 irtest, iwtest,
2330 info->alt >= 0 ? " (+alt)" : "");
2331 return 0;
2332 }
2333
2334 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2335 {
2336 return 0;
2337 }
2338
2339 static int usbtest_resume(struct usb_interface *intf)
2340 {
2341 return 0;
2342 }
2343
2344
2345 static void usbtest_disconnect(struct usb_interface *intf)
2346 {
2347 struct usbtest_dev *dev = usb_get_intfdata(intf);
2348
2349 usb_set_intfdata(intf, NULL);
2350 dev_dbg(&intf->dev, "disconnect\n");
2351 kfree(dev);
2352 }
2353
2354 /* Basic testing only needs a device that can source or sink bulk traffic.
2355 * Any device can test control transfers (default with GENERIC binding).
2356 *
2357 * Several entries work with the default EP0 implementation that's built
2358 * into EZ-USB chips. There's a default vendor ID which can be overridden
2359 * by (very) small config EEPROMS, but otherwise all these devices act
2360 * identically until firmware is loaded: only EP0 works. It turns out
2361 * to be easy to make other endpoints work, without modifying that EP0
2362 * behavior. For now, we expect that kind of firmware.
2363 */
2364
2365 /* an21xx or fx versions of ez-usb */
2366 static struct usbtest_info ez1_info = {
2367 .name = "EZ-USB device",
2368 .ep_in = 2,
2369 .ep_out = 2,
2370 .alt = 1,
2371 };
2372
2373 /* fx2 version of ez-usb */
2374 static struct usbtest_info ez2_info = {
2375 .name = "FX2 device",
2376 .ep_in = 6,
2377 .ep_out = 2,
2378 .alt = 1,
2379 };
2380
2381 /* ezusb family device with dedicated usb test firmware,
2382 */
2383 static struct usbtest_info fw_info = {
2384 .name = "usb test device",
2385 .ep_in = 2,
2386 .ep_out = 2,
2387 .alt = 1,
2388 .autoconf = 1, /* iso and ctrl_out need autoconf */
2389 .ctrl_out = 1,
2390 .iso = 1, /* iso_ep's are #8 in/out */
2391 };
2392
2393 /* peripheral running Linux and 'zero.c' test firmware, or
2394 * its user-mode cousin. different versions of this use
2395 * different hardware with the same vendor/product codes.
2396 * host side MUST rely on the endpoint descriptors.
2397 */
2398 static struct usbtest_info gz_info = {
2399 .name = "Linux gadget zero",
2400 .autoconf = 1,
2401 .ctrl_out = 1,
2402 .iso = 1,
2403 .alt = 0,
2404 };
2405
2406 static struct usbtest_info um_info = {
2407 .name = "Linux user mode test driver",
2408 .autoconf = 1,
2409 .alt = -1,
2410 };
2411
2412 static struct usbtest_info um2_info = {
2413 .name = "Linux user mode ISO test driver",
2414 .autoconf = 1,
2415 .iso = 1,
2416 .alt = -1,
2417 };
2418
2419 #ifdef IBOT2
2420 /* this is a nice source of high speed bulk data;
2421 * uses an FX2, with firmware provided in the device
2422 */
2423 static struct usbtest_info ibot2_info = {
2424 .name = "iBOT2 webcam",
2425 .ep_in = 2,
2426 .alt = -1,
2427 };
2428 #endif
2429
2430 #ifdef GENERIC
2431 /* we can use any device to test control traffic */
2432 static struct usbtest_info generic_info = {
2433 .name = "Generic USB device",
2434 .alt = -1,
2435 };
2436 #endif
2437
2438
2439 static const struct usb_device_id id_table[] = {
2440
2441 /*-------------------------------------------------------------*/
2442
2443 /* EZ-USB devices which download firmware to replace (or in our
2444 * case augment) the default device implementation.
2445 */
2446
2447 /* generic EZ-USB FX controller */
2448 { USB_DEVICE(0x0547, 0x2235),
2449 .driver_info = (unsigned long) &ez1_info,
2450 },
2451
2452 /* CY3671 development board with EZ-USB FX */
2453 { USB_DEVICE(0x0547, 0x0080),
2454 .driver_info = (unsigned long) &ez1_info,
2455 },
2456
2457 /* generic EZ-USB FX2 controller (or development board) */
2458 { USB_DEVICE(0x04b4, 0x8613),
2459 .driver_info = (unsigned long) &ez2_info,
2460 },
2461
2462 /* re-enumerated usb test device firmware */
2463 { USB_DEVICE(0xfff0, 0xfff0),
2464 .driver_info = (unsigned long) &fw_info,
2465 },
2466
2467 /* "Gadget Zero" firmware runs under Linux */
2468 { USB_DEVICE(0x0525, 0xa4a0),
2469 .driver_info = (unsigned long) &gz_info,
2470 },
2471
2472 /* so does a user-mode variant */
2473 { USB_DEVICE(0x0525, 0xa4a4),
2474 .driver_info = (unsigned long) &um_info,
2475 },
2476
2477 /* ... and a user-mode variant that talks iso */
2478 { USB_DEVICE(0x0525, 0xa4a3),
2479 .driver_info = (unsigned long) &um2_info,
2480 },
2481
2482 #ifdef KEYSPAN_19Qi
2483 /* Keyspan 19qi uses an21xx (original EZ-USB) */
2484 /* this does not coexist with the real Keyspan 19qi driver! */
2485 { USB_DEVICE(0x06cd, 0x010b),
2486 .driver_info = (unsigned long) &ez1_info,
2487 },
2488 #endif
2489
2490 /*-------------------------------------------------------------*/
2491
2492 #ifdef IBOT2
2493 /* iBOT2 makes a nice source of high speed bulk-in data */
2494 /* this does not coexist with a real iBOT2 driver! */
2495 { USB_DEVICE(0x0b62, 0x0059),
2496 .driver_info = (unsigned long) &ibot2_info,
2497 },
2498 #endif
2499
2500 /*-------------------------------------------------------------*/
2501
2502 #ifdef GENERIC
2503 /* module params can specify devices to use for control tests */
2504 { .driver_info = (unsigned long) &generic_info, },
2505 #endif
2506
2507 /*-------------------------------------------------------------*/
2508
2509 { }
2510 };
2511 MODULE_DEVICE_TABLE(usb, id_table);
2512
2513 static struct usb_driver usbtest_driver = {
2514 .name = "usbtest",
2515 .id_table = id_table,
2516 .probe = usbtest_probe,
2517 .unlocked_ioctl = usbtest_ioctl,
2518 .disconnect = usbtest_disconnect,
2519 .suspend = usbtest_suspend,
2520 .resume = usbtest_resume,
2521 };
2522
2523 /*-------------------------------------------------------------------------*/
2524
2525 static int __init usbtest_init(void)
2526 {
2527 #ifdef GENERIC
2528 if (vendor)
2529 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2530 #endif
2531 return usb_register(&usbtest_driver);
2532 }
2533 module_init(usbtest_init);
2534
2535 static void __exit usbtest_exit(void)
2536 {
2537 usb_deregister(&usbtest_driver);
2538 }
2539 module_exit(usbtest_exit);
2540
2541 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2542 MODULE_LICENSE("GPL");
2543
This page took 0.153295 seconds and 5 git commands to generate.