Merge tag 'iio-fixes-for-4.5a' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[deliverable/linux.git] / drivers / usb / core / urb.c
1 #include <linux/module.h>
2 #include <linux/string.h>
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/log2.h>
6 #include <linux/usb.h>
7 #include <linux/wait.h>
8 #include <linux/usb/hcd.h>
9 #include <linux/scatterlist.h>
10
11 #define to_urb(d) container_of(d, struct urb, kref)
12
13
14 static void urb_destroy(struct kref *kref)
15 {
16 struct urb *urb = to_urb(kref);
17
18 if (urb->transfer_flags & URB_FREE_BUFFER)
19 kfree(urb->transfer_buffer);
20
21 kfree(urb);
22 }
23
24 /**
25 * usb_init_urb - initializes a urb so that it can be used by a USB driver
26 * @urb: pointer to the urb to initialize
27 *
28 * Initializes a urb so that the USB subsystem can use it properly.
29 *
30 * If a urb is created with a call to usb_alloc_urb() it is not
31 * necessary to call this function. Only use this if you allocate the
32 * space for a struct urb on your own. If you call this function, be
33 * careful when freeing the memory for your urb that it is no longer in
34 * use by the USB core.
35 *
36 * Only use this function if you _really_ understand what you are doing.
37 */
38 void usb_init_urb(struct urb *urb)
39 {
40 if (urb) {
41 memset(urb, 0, sizeof(*urb));
42 kref_init(&urb->kref);
43 INIT_LIST_HEAD(&urb->anchor_list);
44 }
45 }
46 EXPORT_SYMBOL_GPL(usb_init_urb);
47
48 /**
49 * usb_alloc_urb - creates a new urb for a USB driver to use
50 * @iso_packets: number of iso packets for this urb
51 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
52 * valid options for this.
53 *
54 * Creates an urb for the USB driver to use, initializes a few internal
55 * structures, increments the usage counter, and returns a pointer to it.
56 *
57 * If the driver want to use this urb for interrupt, control, or bulk
58 * endpoints, pass '0' as the number of iso packets.
59 *
60 * The driver must call usb_free_urb() when it is finished with the urb.
61 *
62 * Return: A pointer to the new urb, or %NULL if no memory is available.
63 */
64 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
65 {
66 struct urb *urb;
67
68 urb = kmalloc(sizeof(struct urb) +
69 iso_packets * sizeof(struct usb_iso_packet_descriptor),
70 mem_flags);
71 if (!urb) {
72 printk(KERN_ERR "alloc_urb: kmalloc failed\n");
73 return NULL;
74 }
75 usb_init_urb(urb);
76 return urb;
77 }
78 EXPORT_SYMBOL_GPL(usb_alloc_urb);
79
80 /**
81 * usb_free_urb - frees the memory used by a urb when all users of it are finished
82 * @urb: pointer to the urb to free, may be NULL
83 *
84 * Must be called when a user of a urb is finished with it. When the last user
85 * of the urb calls this function, the memory of the urb is freed.
86 *
87 * Note: The transfer buffer associated with the urb is not freed unless the
88 * URB_FREE_BUFFER transfer flag is set.
89 */
90 void usb_free_urb(struct urb *urb)
91 {
92 if (urb)
93 kref_put(&urb->kref, urb_destroy);
94 }
95 EXPORT_SYMBOL_GPL(usb_free_urb);
96
97 /**
98 * usb_get_urb - increments the reference count of the urb
99 * @urb: pointer to the urb to modify, may be NULL
100 *
101 * This must be called whenever a urb is transferred from a device driver to a
102 * host controller driver. This allows proper reference counting to happen
103 * for urbs.
104 *
105 * Return: A pointer to the urb with the incremented reference counter.
106 */
107 struct urb *usb_get_urb(struct urb *urb)
108 {
109 if (urb)
110 kref_get(&urb->kref);
111 return urb;
112 }
113 EXPORT_SYMBOL_GPL(usb_get_urb);
114
115 /**
116 * usb_anchor_urb - anchors an URB while it is processed
117 * @urb: pointer to the urb to anchor
118 * @anchor: pointer to the anchor
119 *
120 * This can be called to have access to URBs which are to be executed
121 * without bothering to track them
122 */
123 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
124 {
125 unsigned long flags;
126
127 spin_lock_irqsave(&anchor->lock, flags);
128 usb_get_urb(urb);
129 list_add_tail(&urb->anchor_list, &anchor->urb_list);
130 urb->anchor = anchor;
131
132 if (unlikely(anchor->poisoned))
133 atomic_inc(&urb->reject);
134
135 spin_unlock_irqrestore(&anchor->lock, flags);
136 }
137 EXPORT_SYMBOL_GPL(usb_anchor_urb);
138
139 static int usb_anchor_check_wakeup(struct usb_anchor *anchor)
140 {
141 return atomic_read(&anchor->suspend_wakeups) == 0 &&
142 list_empty(&anchor->urb_list);
143 }
144
145 /* Callers must hold anchor->lock */
146 static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
147 {
148 urb->anchor = NULL;
149 list_del(&urb->anchor_list);
150 usb_put_urb(urb);
151 if (usb_anchor_check_wakeup(anchor))
152 wake_up(&anchor->wait);
153 }
154
155 /**
156 * usb_unanchor_urb - unanchors an URB
157 * @urb: pointer to the urb to anchor
158 *
159 * Call this to stop the system keeping track of this URB
160 */
161 void usb_unanchor_urb(struct urb *urb)
162 {
163 unsigned long flags;
164 struct usb_anchor *anchor;
165
166 if (!urb)
167 return;
168
169 anchor = urb->anchor;
170 if (!anchor)
171 return;
172
173 spin_lock_irqsave(&anchor->lock, flags);
174 /*
175 * At this point, we could be competing with another thread which
176 * has the same intention. To protect the urb from being unanchored
177 * twice, only the winner of the race gets the job.
178 */
179 if (likely(anchor == urb->anchor))
180 __usb_unanchor_urb(urb, anchor);
181 spin_unlock_irqrestore(&anchor->lock, flags);
182 }
183 EXPORT_SYMBOL_GPL(usb_unanchor_urb);
184
185 /*-------------------------------------------------------------------*/
186
187 /**
188 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
189 * @urb: pointer to the urb describing the request
190 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
191 * of valid options for this.
192 *
193 * This submits a transfer request, and transfers control of the URB
194 * describing that request to the USB subsystem. Request completion will
195 * be indicated later, asynchronously, by calling the completion handler.
196 * The three types of completion are success, error, and unlink
197 * (a software-induced fault, also called "request cancellation").
198 *
199 * URBs may be submitted in interrupt context.
200 *
201 * The caller must have correctly initialized the URB before submitting
202 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
203 * available to ensure that most fields are correctly initialized, for
204 * the particular kind of transfer, although they will not initialize
205 * any transfer flags.
206 *
207 * If the submission is successful, the complete() callback from the URB
208 * will be called exactly once, when the USB core and Host Controller Driver
209 * (HCD) are finished with the URB. When the completion function is called,
210 * control of the URB is returned to the device driver which issued the
211 * request. The completion handler may then immediately free or reuse that
212 * URB.
213 *
214 * With few exceptions, USB device drivers should never access URB fields
215 * provided by usbcore or the HCD until its complete() is called.
216 * The exceptions relate to periodic transfer scheduling. For both
217 * interrupt and isochronous urbs, as part of successful URB submission
218 * urb->interval is modified to reflect the actual transfer period used
219 * (normally some power of two units). And for isochronous urbs,
220 * urb->start_frame is modified to reflect when the URB's transfers were
221 * scheduled to start.
222 *
223 * Not all isochronous transfer scheduling policies will work, but most
224 * host controller drivers should easily handle ISO queues going from now
225 * until 10-200 msec into the future. Drivers should try to keep at
226 * least one or two msec of data in the queue; many controllers require
227 * that new transfers start at least 1 msec in the future when they are
228 * added. If the driver is unable to keep up and the queue empties out,
229 * the behavior for new submissions is governed by the URB_ISO_ASAP flag.
230 * If the flag is set, or if the queue is idle, then the URB is always
231 * assigned to the first available (and not yet expired) slot in the
232 * endpoint's schedule. If the flag is not set and the queue is active
233 * then the URB is always assigned to the next slot in the schedule
234 * following the end of the endpoint's previous URB, even if that slot is
235 * in the past. When a packet is assigned in this way to a slot that has
236 * already expired, the packet is not transmitted and the corresponding
237 * usb_iso_packet_descriptor's status field will return -EXDEV. If this
238 * would happen to all the packets in the URB, submission fails with a
239 * -EXDEV error code.
240 *
241 * For control endpoints, the synchronous usb_control_msg() call is
242 * often used (in non-interrupt context) instead of this call.
243 * That is often used through convenience wrappers, for the requests
244 * that are standardized in the USB 2.0 specification. For bulk
245 * endpoints, a synchronous usb_bulk_msg() call is available.
246 *
247 * Return:
248 * 0 on successful submissions. A negative error number otherwise.
249 *
250 * Request Queuing:
251 *
252 * URBs may be submitted to endpoints before previous ones complete, to
253 * minimize the impact of interrupt latencies and system overhead on data
254 * throughput. With that queuing policy, an endpoint's queue would never
255 * be empty. This is required for continuous isochronous data streams,
256 * and may also be required for some kinds of interrupt transfers. Such
257 * queuing also maximizes bandwidth utilization by letting USB controllers
258 * start work on later requests before driver software has finished the
259 * completion processing for earlier (successful) requests.
260 *
261 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
262 * than one. This was previously a HCD-specific behavior, except for ISO
263 * transfers. Non-isochronous endpoint queues are inactive during cleanup
264 * after faults (transfer errors or cancellation).
265 *
266 * Reserved Bandwidth Transfers:
267 *
268 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
269 * using the interval specified in the urb. Submitting the first urb to
270 * the endpoint reserves the bandwidth necessary to make those transfers.
271 * If the USB subsystem can't allocate sufficient bandwidth to perform
272 * the periodic request, submitting such a periodic request should fail.
273 *
274 * For devices under xHCI, the bandwidth is reserved at configuration time, or
275 * when the alt setting is selected. If there is not enough bus bandwidth, the
276 * configuration/alt setting request will fail. Therefore, submissions to
277 * periodic endpoints on devices under xHCI should never fail due to bandwidth
278 * constraints.
279 *
280 * Device drivers must explicitly request that repetition, by ensuring that
281 * some URB is always on the endpoint's queue (except possibly for short
282 * periods during completion callbacks). When there is no longer an urb
283 * queued, the endpoint's bandwidth reservation is canceled. This means
284 * drivers can use their completion handlers to ensure they keep bandwidth
285 * they need, by reinitializing and resubmitting the just-completed urb
286 * until the driver longer needs that periodic bandwidth.
287 *
288 * Memory Flags:
289 *
290 * The general rules for how to decide which mem_flags to use
291 * are the same as for kmalloc. There are four
292 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
293 * GFP_ATOMIC.
294 *
295 * GFP_NOFS is not ever used, as it has not been implemented yet.
296 *
297 * GFP_ATOMIC is used when
298 * (a) you are inside a completion handler, an interrupt, bottom half,
299 * tasklet or timer, or
300 * (b) you are holding a spinlock or rwlock (does not apply to
301 * semaphores), or
302 * (c) current->state != TASK_RUNNING, this is the case only after
303 * you've changed it.
304 *
305 * GFP_NOIO is used in the block io path and error handling of storage
306 * devices.
307 *
308 * All other situations use GFP_KERNEL.
309 *
310 * Some more specific rules for mem_flags can be inferred, such as
311 * (1) start_xmit, timeout, and receive methods of network drivers must
312 * use GFP_ATOMIC (they are called with a spinlock held);
313 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
314 * called with a spinlock held);
315 * (3) If you use a kernel thread with a network driver you must use
316 * GFP_NOIO, unless (b) or (c) apply;
317 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
318 * apply or your are in a storage driver's block io path;
319 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
320 * (6) changing firmware on a running storage or net device uses
321 * GFP_NOIO, unless b) or c) apply
322 *
323 */
324 int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
325 {
326 static int pipetypes[4] = {
327 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
328 };
329 int xfertype, max;
330 struct usb_device *dev;
331 struct usb_host_endpoint *ep;
332 int is_out;
333 unsigned int allowed;
334
335 if (!urb || !urb->complete)
336 return -EINVAL;
337 if (urb->hcpriv) {
338 WARN_ONCE(1, "URB %p submitted while active\n", urb);
339 return -EBUSY;
340 }
341
342 dev = urb->dev;
343 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
344 return -ENODEV;
345
346 /* For now, get the endpoint from the pipe. Eventually drivers
347 * will be required to set urb->ep directly and we will eliminate
348 * urb->pipe.
349 */
350 ep = usb_pipe_endpoint(dev, urb->pipe);
351 if (!ep)
352 return -ENOENT;
353
354 urb->ep = ep;
355 urb->status = -EINPROGRESS;
356 urb->actual_length = 0;
357
358 /* Lots of sanity checks, so HCDs can rely on clean data
359 * and don't need to duplicate tests
360 */
361 xfertype = usb_endpoint_type(&ep->desc);
362 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
363 struct usb_ctrlrequest *setup =
364 (struct usb_ctrlrequest *) urb->setup_packet;
365
366 if (!setup)
367 return -ENOEXEC;
368 is_out = !(setup->bRequestType & USB_DIR_IN) ||
369 !setup->wLength;
370 } else {
371 is_out = usb_endpoint_dir_out(&ep->desc);
372 }
373
374 /* Clear the internal flags and cache the direction for later use */
375 urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
376 URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
377 URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
378 URB_DMA_SG_COMBINED);
379 urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
380
381 if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
382 dev->state < USB_STATE_CONFIGURED)
383 return -ENODEV;
384
385 max = usb_endpoint_maxp(&ep->desc);
386 if (max <= 0) {
387 dev_dbg(&dev->dev,
388 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
389 usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
390 __func__, max);
391 return -EMSGSIZE;
392 }
393
394 /* periodic transfers limit size per frame/uframe,
395 * but drivers only control those sizes for ISO.
396 * while we're checking, initialize return status.
397 */
398 if (xfertype == USB_ENDPOINT_XFER_ISOC) {
399 int n, len;
400
401 /* SuperSpeed isoc endpoints have up to 16 bursts of up to
402 * 3 packets each
403 */
404 if (dev->speed == USB_SPEED_SUPER) {
405 int burst = 1 + ep->ss_ep_comp.bMaxBurst;
406 int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
407 max *= burst;
408 max *= mult;
409 }
410
411 /* "high bandwidth" mode, 1-3 packets/uframe? */
412 if (dev->speed == USB_SPEED_HIGH) {
413 int mult = 1 + ((max >> 11) & 0x03);
414 max &= 0x07ff;
415 max *= mult;
416 }
417
418 if (urb->number_of_packets <= 0)
419 return -EINVAL;
420 for (n = 0; n < urb->number_of_packets; n++) {
421 len = urb->iso_frame_desc[n].length;
422 if (len < 0 || len > max)
423 return -EMSGSIZE;
424 urb->iso_frame_desc[n].status = -EXDEV;
425 urb->iso_frame_desc[n].actual_length = 0;
426 }
427 } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint &&
428 dev->speed != USB_SPEED_WIRELESS) {
429 struct scatterlist *sg;
430 int i;
431
432 for_each_sg(urb->sg, sg, urb->num_sgs - 1, i)
433 if (sg->length % max)
434 return -EINVAL;
435 }
436
437 /* the I/O buffer must be mapped/unmapped, except when length=0 */
438 if (urb->transfer_buffer_length > INT_MAX)
439 return -EMSGSIZE;
440
441 /*
442 * stuff that drivers shouldn't do, but which shouldn't
443 * cause problems in HCDs if they get it wrong.
444 */
445
446 /* Check that the pipe's type matches the endpoint's type */
447 if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
448 dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
449 usb_pipetype(urb->pipe), pipetypes[xfertype]);
450
451 /* Check against a simple/standard policy */
452 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
453 URB_FREE_BUFFER);
454 switch (xfertype) {
455 case USB_ENDPOINT_XFER_BULK:
456 case USB_ENDPOINT_XFER_INT:
457 if (is_out)
458 allowed |= URB_ZERO_PACKET;
459 /* FALLTHROUGH */
460 case USB_ENDPOINT_XFER_CONTROL:
461 allowed |= URB_NO_FSBR; /* only affects UHCI */
462 /* FALLTHROUGH */
463 default: /* all non-iso endpoints */
464 if (!is_out)
465 allowed |= URB_SHORT_NOT_OK;
466 break;
467 case USB_ENDPOINT_XFER_ISOC:
468 allowed |= URB_ISO_ASAP;
469 break;
470 }
471 allowed &= urb->transfer_flags;
472
473 /* warn if submitter gave bogus flags */
474 if (allowed != urb->transfer_flags)
475 dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
476 urb->transfer_flags, allowed);
477
478 /*
479 * Force periodic transfer intervals to be legal values that are
480 * a power of two (so HCDs don't need to).
481 *
482 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC
483 * supports different values... this uses EHCI/UHCI defaults (and
484 * EHCI can use smaller non-default values).
485 */
486 switch (xfertype) {
487 case USB_ENDPOINT_XFER_ISOC:
488 case USB_ENDPOINT_XFER_INT:
489 /* too small? */
490 switch (dev->speed) {
491 case USB_SPEED_WIRELESS:
492 if ((urb->interval < 6)
493 && (xfertype == USB_ENDPOINT_XFER_INT))
494 return -EINVAL;
495 default:
496 if (urb->interval <= 0)
497 return -EINVAL;
498 break;
499 }
500 /* too big? */
501 switch (dev->speed) {
502 case USB_SPEED_SUPER: /* units are 125us */
503 /* Handle up to 2^(16-1) microframes */
504 if (urb->interval > (1 << 15))
505 return -EINVAL;
506 max = 1 << 15;
507 break;
508 case USB_SPEED_WIRELESS:
509 if (urb->interval > 16)
510 return -EINVAL;
511 break;
512 case USB_SPEED_HIGH: /* units are microframes */
513 /* NOTE usb handles 2^15 */
514 if (urb->interval > (1024 * 8))
515 urb->interval = 1024 * 8;
516 max = 1024 * 8;
517 break;
518 case USB_SPEED_FULL: /* units are frames/msec */
519 case USB_SPEED_LOW:
520 if (xfertype == USB_ENDPOINT_XFER_INT) {
521 if (urb->interval > 255)
522 return -EINVAL;
523 /* NOTE ohci only handles up to 32 */
524 max = 128;
525 } else {
526 if (urb->interval > 1024)
527 urb->interval = 1024;
528 /* NOTE usb and ohci handle up to 2^15 */
529 max = 1024;
530 }
531 break;
532 default:
533 return -EINVAL;
534 }
535 if (dev->speed != USB_SPEED_WIRELESS) {
536 /* Round down to a power of 2, no more than max */
537 urb->interval = min(max, 1 << ilog2(urb->interval));
538 }
539 }
540
541 return usb_hcd_submit_urb(urb, mem_flags);
542 }
543 EXPORT_SYMBOL_GPL(usb_submit_urb);
544
545 /*-------------------------------------------------------------------*/
546
547 /**
548 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
549 * @urb: pointer to urb describing a previously submitted request,
550 * may be NULL
551 *
552 * This routine cancels an in-progress request. URBs complete only once
553 * per submission, and may be canceled only once per submission.
554 * Successful cancellation means termination of @urb will be expedited
555 * and the completion handler will be called with a status code
556 * indicating that the request has been canceled (rather than any other
557 * code).
558 *
559 * Drivers should not call this routine or related routines, such as
560 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
561 * method has returned. The disconnect function should synchronize with
562 * a driver's I/O routines to insure that all URB-related activity has
563 * completed before it returns.
564 *
565 * This request is asynchronous, however the HCD might call the ->complete()
566 * callback during unlink. Therefore when drivers call usb_unlink_urb(), they
567 * must not hold any locks that may be taken by the completion function.
568 * Success is indicated by returning -EINPROGRESS, at which time the URB will
569 * probably not yet have been given back to the device driver. When it is
570 * eventually called, the completion function will see @urb->status ==
571 * -ECONNRESET.
572 * Failure is indicated by usb_unlink_urb() returning any other value.
573 * Unlinking will fail when @urb is not currently "linked" (i.e., it was
574 * never submitted, or it was unlinked before, or the hardware is already
575 * finished with it), even if the completion handler has not yet run.
576 *
577 * The URB must not be deallocated while this routine is running. In
578 * particular, when a driver calls this routine, it must insure that the
579 * completion handler cannot deallocate the URB.
580 *
581 * Return: -EINPROGRESS on success. See description for other values on
582 * failure.
583 *
584 * Unlinking and Endpoint Queues:
585 *
586 * [The behaviors and guarantees described below do not apply to virtual
587 * root hubs but only to endpoint queues for physical USB devices.]
588 *
589 * Host Controller Drivers (HCDs) place all the URBs for a particular
590 * endpoint in a queue. Normally the queue advances as the controller
591 * hardware processes each request. But when an URB terminates with an
592 * error its queue generally stops (see below), at least until that URB's
593 * completion routine returns. It is guaranteed that a stopped queue
594 * will not restart until all its unlinked URBs have been fully retired,
595 * with their completion routines run, even if that's not until some time
596 * after the original completion handler returns. The same behavior and
597 * guarantee apply when an URB terminates because it was unlinked.
598 *
599 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
600 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
601 * and -EREMOTEIO. Control endpoint queues behave the same way except
602 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues
603 * for isochronous endpoints are treated differently, because they must
604 * advance at fixed rates. Such queues do not stop when an URB
605 * encounters an error or is unlinked. An unlinked isochronous URB may
606 * leave a gap in the stream of packets; it is undefined whether such
607 * gaps can be filled in.
608 *
609 * Note that early termination of an URB because a short packet was
610 * received will generate a -EREMOTEIO error if and only if the
611 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device
612 * drivers can build deep queues for large or complex bulk transfers
613 * and clean them up reliably after any sort of aborted transfer by
614 * unlinking all pending URBs at the first fault.
615 *
616 * When a control URB terminates with an error other than -EREMOTEIO, it
617 * is quite likely that the status stage of the transfer will not take
618 * place.
619 */
620 int usb_unlink_urb(struct urb *urb)
621 {
622 if (!urb)
623 return -EINVAL;
624 if (!urb->dev)
625 return -ENODEV;
626 if (!urb->ep)
627 return -EIDRM;
628 return usb_hcd_unlink_urb(urb, -ECONNRESET);
629 }
630 EXPORT_SYMBOL_GPL(usb_unlink_urb);
631
632 /**
633 * usb_kill_urb - cancel a transfer request and wait for it to finish
634 * @urb: pointer to URB describing a previously submitted request,
635 * may be NULL
636 *
637 * This routine cancels an in-progress request. It is guaranteed that
638 * upon return all completion handlers will have finished and the URB
639 * will be totally idle and available for reuse. These features make
640 * this an ideal way to stop I/O in a disconnect() callback or close()
641 * function. If the request has not already finished or been unlinked
642 * the completion handler will see urb->status == -ENOENT.
643 *
644 * While the routine is running, attempts to resubmit the URB will fail
645 * with error -EPERM. Thus even if the URB's completion handler always
646 * tries to resubmit, it will not succeed and the URB will become idle.
647 *
648 * The URB must not be deallocated while this routine is running. In
649 * particular, when a driver calls this routine, it must insure that the
650 * completion handler cannot deallocate the URB.
651 *
652 * This routine may not be used in an interrupt context (such as a bottom
653 * half or a completion handler), or when holding a spinlock, or in other
654 * situations where the caller can't schedule().
655 *
656 * This routine should not be called by a driver after its disconnect
657 * method has returned.
658 */
659 void usb_kill_urb(struct urb *urb)
660 {
661 might_sleep();
662 if (!(urb && urb->dev && urb->ep))
663 return;
664 atomic_inc(&urb->reject);
665
666 usb_hcd_unlink_urb(urb, -ENOENT);
667 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
668
669 atomic_dec(&urb->reject);
670 }
671 EXPORT_SYMBOL_GPL(usb_kill_urb);
672
673 /**
674 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
675 * @urb: pointer to URB describing a previously submitted request,
676 * may be NULL
677 *
678 * This routine cancels an in-progress request. It is guaranteed that
679 * upon return all completion handlers will have finished and the URB
680 * will be totally idle and cannot be reused. These features make
681 * this an ideal way to stop I/O in a disconnect() callback.
682 * If the request has not already finished or been unlinked
683 * the completion handler will see urb->status == -ENOENT.
684 *
685 * After and while the routine runs, attempts to resubmit the URB will fail
686 * with error -EPERM. Thus even if the URB's completion handler always
687 * tries to resubmit, it will not succeed and the URB will become idle.
688 *
689 * The URB must not be deallocated while this routine is running. In
690 * particular, when a driver calls this routine, it must insure that the
691 * completion handler cannot deallocate the URB.
692 *
693 * This routine may not be used in an interrupt context (such as a bottom
694 * half or a completion handler), or when holding a spinlock, or in other
695 * situations where the caller can't schedule().
696 *
697 * This routine should not be called by a driver after its disconnect
698 * method has returned.
699 */
700 void usb_poison_urb(struct urb *urb)
701 {
702 might_sleep();
703 if (!urb)
704 return;
705 atomic_inc(&urb->reject);
706
707 if (!urb->dev || !urb->ep)
708 return;
709
710 usb_hcd_unlink_urb(urb, -ENOENT);
711 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
712 }
713 EXPORT_SYMBOL_GPL(usb_poison_urb);
714
715 void usb_unpoison_urb(struct urb *urb)
716 {
717 if (!urb)
718 return;
719
720 atomic_dec(&urb->reject);
721 }
722 EXPORT_SYMBOL_GPL(usb_unpoison_urb);
723
724 /**
725 * usb_block_urb - reliably prevent further use of an URB
726 * @urb: pointer to URB to be blocked, may be NULL
727 *
728 * After the routine has run, attempts to resubmit the URB will fail
729 * with error -EPERM. Thus even if the URB's completion handler always
730 * tries to resubmit, it will not succeed and the URB will become idle.
731 *
732 * The URB must not be deallocated while this routine is running. In
733 * particular, when a driver calls this routine, it must insure that the
734 * completion handler cannot deallocate the URB.
735 */
736 void usb_block_urb(struct urb *urb)
737 {
738 if (!urb)
739 return;
740
741 atomic_inc(&urb->reject);
742 }
743 EXPORT_SYMBOL_GPL(usb_block_urb);
744
745 /**
746 * usb_kill_anchored_urbs - cancel transfer requests en masse
747 * @anchor: anchor the requests are bound to
748 *
749 * this allows all outstanding URBs to be killed starting
750 * from the back of the queue
751 *
752 * This routine should not be called by a driver after its disconnect
753 * method has returned.
754 */
755 void usb_kill_anchored_urbs(struct usb_anchor *anchor)
756 {
757 struct urb *victim;
758
759 spin_lock_irq(&anchor->lock);
760 while (!list_empty(&anchor->urb_list)) {
761 victim = list_entry(anchor->urb_list.prev, struct urb,
762 anchor_list);
763 /* we must make sure the URB isn't freed before we kill it*/
764 usb_get_urb(victim);
765 spin_unlock_irq(&anchor->lock);
766 /* this will unanchor the URB */
767 usb_kill_urb(victim);
768 usb_put_urb(victim);
769 spin_lock_irq(&anchor->lock);
770 }
771 spin_unlock_irq(&anchor->lock);
772 }
773 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
774
775
776 /**
777 * usb_poison_anchored_urbs - cease all traffic from an anchor
778 * @anchor: anchor the requests are bound to
779 *
780 * this allows all outstanding URBs to be poisoned starting
781 * from the back of the queue. Newly added URBs will also be
782 * poisoned
783 *
784 * This routine should not be called by a driver after its disconnect
785 * method has returned.
786 */
787 void usb_poison_anchored_urbs(struct usb_anchor *anchor)
788 {
789 struct urb *victim;
790
791 spin_lock_irq(&anchor->lock);
792 anchor->poisoned = 1;
793 while (!list_empty(&anchor->urb_list)) {
794 victim = list_entry(anchor->urb_list.prev, struct urb,
795 anchor_list);
796 /* we must make sure the URB isn't freed before we kill it*/
797 usb_get_urb(victim);
798 spin_unlock_irq(&anchor->lock);
799 /* this will unanchor the URB */
800 usb_poison_urb(victim);
801 usb_put_urb(victim);
802 spin_lock_irq(&anchor->lock);
803 }
804 spin_unlock_irq(&anchor->lock);
805 }
806 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
807
808 /**
809 * usb_unpoison_anchored_urbs - let an anchor be used successfully again
810 * @anchor: anchor the requests are bound to
811 *
812 * Reverses the effect of usb_poison_anchored_urbs
813 * the anchor can be used normally after it returns
814 */
815 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
816 {
817 unsigned long flags;
818 struct urb *lazarus;
819
820 spin_lock_irqsave(&anchor->lock, flags);
821 list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
822 usb_unpoison_urb(lazarus);
823 }
824 anchor->poisoned = 0;
825 spin_unlock_irqrestore(&anchor->lock, flags);
826 }
827 EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
828 /**
829 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
830 * @anchor: anchor the requests are bound to
831 *
832 * this allows all outstanding URBs to be unlinked starting
833 * from the back of the queue. This function is asynchronous.
834 * The unlinking is just triggered. It may happen after this
835 * function has returned.
836 *
837 * This routine should not be called by a driver after its disconnect
838 * method has returned.
839 */
840 void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
841 {
842 struct urb *victim;
843
844 while ((victim = usb_get_from_anchor(anchor)) != NULL) {
845 usb_unlink_urb(victim);
846 usb_put_urb(victim);
847 }
848 }
849 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
850
851 /**
852 * usb_anchor_suspend_wakeups
853 * @anchor: the anchor you want to suspend wakeups on
854 *
855 * Call this to stop the last urb being unanchored from waking up any
856 * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give-
857 * back path to delay waking up until after the completion handler has run.
858 */
859 void usb_anchor_suspend_wakeups(struct usb_anchor *anchor)
860 {
861 if (anchor)
862 atomic_inc(&anchor->suspend_wakeups);
863 }
864 EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups);
865
866 /**
867 * usb_anchor_resume_wakeups
868 * @anchor: the anchor you want to resume wakeups on
869 *
870 * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and
871 * wake up any current waiters if the anchor is empty.
872 */
873 void usb_anchor_resume_wakeups(struct usb_anchor *anchor)
874 {
875 if (!anchor)
876 return;
877
878 atomic_dec(&anchor->suspend_wakeups);
879 if (usb_anchor_check_wakeup(anchor))
880 wake_up(&anchor->wait);
881 }
882 EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups);
883
884 /**
885 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
886 * @anchor: the anchor you want to become unused
887 * @timeout: how long you are willing to wait in milliseconds
888 *
889 * Call this is you want to be sure all an anchor's
890 * URBs have finished
891 *
892 * Return: Non-zero if the anchor became unused. Zero on timeout.
893 */
894 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
895 unsigned int timeout)
896 {
897 return wait_event_timeout(anchor->wait,
898 usb_anchor_check_wakeup(anchor),
899 msecs_to_jiffies(timeout));
900 }
901 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
902
903 /**
904 * usb_get_from_anchor - get an anchor's oldest urb
905 * @anchor: the anchor whose urb you want
906 *
907 * This will take the oldest urb from an anchor,
908 * unanchor and return it
909 *
910 * Return: The oldest urb from @anchor, or %NULL if @anchor has no
911 * urbs associated with it.
912 */
913 struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
914 {
915 struct urb *victim;
916 unsigned long flags;
917
918 spin_lock_irqsave(&anchor->lock, flags);
919 if (!list_empty(&anchor->urb_list)) {
920 victim = list_entry(anchor->urb_list.next, struct urb,
921 anchor_list);
922 usb_get_urb(victim);
923 __usb_unanchor_urb(victim, anchor);
924 } else {
925 victim = NULL;
926 }
927 spin_unlock_irqrestore(&anchor->lock, flags);
928
929 return victim;
930 }
931
932 EXPORT_SYMBOL_GPL(usb_get_from_anchor);
933
934 /**
935 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
936 * @anchor: the anchor whose urbs you want to unanchor
937 *
938 * use this to get rid of all an anchor's urbs
939 */
940 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
941 {
942 struct urb *victim;
943 unsigned long flags;
944
945 spin_lock_irqsave(&anchor->lock, flags);
946 while (!list_empty(&anchor->urb_list)) {
947 victim = list_entry(anchor->urb_list.prev, struct urb,
948 anchor_list);
949 __usb_unanchor_urb(victim, anchor);
950 }
951 spin_unlock_irqrestore(&anchor->lock, flags);
952 }
953
954 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
955
956 /**
957 * usb_anchor_empty - is an anchor empty
958 * @anchor: the anchor you want to query
959 *
960 * Return: 1 if the anchor has no urbs associated with it.
961 */
962 int usb_anchor_empty(struct usb_anchor *anchor)
963 {
964 return list_empty(&anchor->urb_list);
965 }
966
967 EXPORT_SYMBOL_GPL(usb_anchor_empty);
968
This page took 0.07661 seconds and 5 git commands to generate.