Merge remote-tracking branch 'asoc/topic/tegra' into asoc-next
[deliverable/linux.git] / drivers / usb / core / urb.c
1 #include <linux/module.h>
2 #include <linux/string.h>
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/init.h>
6 #include <linux/log2.h>
7 #include <linux/usb.h>
8 #include <linux/wait.h>
9 #include <linux/usb/hcd.h>
10 #include <linux/scatterlist.h>
11
12 #define to_urb(d) container_of(d, struct urb, kref)
13
14
15 static void urb_destroy(struct kref *kref)
16 {
17 struct urb *urb = to_urb(kref);
18
19 if (urb->transfer_flags & URB_FREE_BUFFER)
20 kfree(urb->transfer_buffer);
21
22 kfree(urb);
23 }
24
25 /**
26 * usb_init_urb - initializes a urb so that it can be used by a USB driver
27 * @urb: pointer to the urb to initialize
28 *
29 * Initializes a urb so that the USB subsystem can use it properly.
30 *
31 * If a urb is created with a call to usb_alloc_urb() it is not
32 * necessary to call this function. Only use this if you allocate the
33 * space for a struct urb on your own. If you call this function, be
34 * careful when freeing the memory for your urb that it is no longer in
35 * use by the USB core.
36 *
37 * Only use this function if you _really_ understand what you are doing.
38 */
39 void usb_init_urb(struct urb *urb)
40 {
41 if (urb) {
42 memset(urb, 0, sizeof(*urb));
43 kref_init(&urb->kref);
44 INIT_LIST_HEAD(&urb->anchor_list);
45 }
46 }
47 EXPORT_SYMBOL_GPL(usb_init_urb);
48
49 /**
50 * usb_alloc_urb - creates a new urb for a USB driver to use
51 * @iso_packets: number of iso packets for this urb
52 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
53 * valid options for this.
54 *
55 * Creates an urb for the USB driver to use, initializes a few internal
56 * structures, incrementes the usage counter, and returns a pointer to it.
57 *
58 * If the driver want to use this urb for interrupt, control, or bulk
59 * endpoints, pass '0' as the number of iso packets.
60 *
61 * The driver must call usb_free_urb() when it is finished with the urb.
62 *
63 * Return: A pointer to the new urb, or %NULL if no memory is available.
64 */
65 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
66 {
67 struct urb *urb;
68
69 urb = kmalloc(sizeof(struct urb) +
70 iso_packets * sizeof(struct usb_iso_packet_descriptor),
71 mem_flags);
72 if (!urb) {
73 printk(KERN_ERR "alloc_urb: kmalloc failed\n");
74 return NULL;
75 }
76 usb_init_urb(urb);
77 return urb;
78 }
79 EXPORT_SYMBOL_GPL(usb_alloc_urb);
80
81 /**
82 * usb_free_urb - frees the memory used by a urb when all users of it are finished
83 * @urb: pointer to the urb to free, may be NULL
84 *
85 * Must be called when a user of a urb is finished with it. When the last user
86 * of the urb calls this function, the memory of the urb is freed.
87 *
88 * Note: The transfer buffer associated with the urb is not freed unless the
89 * URB_FREE_BUFFER transfer flag is set.
90 */
91 void usb_free_urb(struct urb *urb)
92 {
93 if (urb)
94 kref_put(&urb->kref, urb_destroy);
95 }
96 EXPORT_SYMBOL_GPL(usb_free_urb);
97
98 /**
99 * usb_get_urb - increments the reference count of the urb
100 * @urb: pointer to the urb to modify, may be NULL
101 *
102 * This must be called whenever a urb is transferred from a device driver to a
103 * host controller driver. This allows proper reference counting to happen
104 * for urbs.
105 *
106 * Return: A pointer to the urb with the incremented reference counter.
107 */
108 struct urb *usb_get_urb(struct urb *urb)
109 {
110 if (urb)
111 kref_get(&urb->kref);
112 return urb;
113 }
114 EXPORT_SYMBOL_GPL(usb_get_urb);
115
116 /**
117 * usb_anchor_urb - anchors an URB while it is processed
118 * @urb: pointer to the urb to anchor
119 * @anchor: pointer to the anchor
120 *
121 * This can be called to have access to URBs which are to be executed
122 * without bothering to track them
123 */
124 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
125 {
126 unsigned long flags;
127
128 spin_lock_irqsave(&anchor->lock, flags);
129 usb_get_urb(urb);
130 list_add_tail(&urb->anchor_list, &anchor->urb_list);
131 urb->anchor = anchor;
132
133 if (unlikely(anchor->poisoned)) {
134 atomic_inc(&urb->reject);
135 }
136
137 spin_unlock_irqrestore(&anchor->lock, flags);
138 }
139 EXPORT_SYMBOL_GPL(usb_anchor_urb);
140
141 /* Callers must hold anchor->lock */
142 static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
143 {
144 urb->anchor = NULL;
145 list_del(&urb->anchor_list);
146 usb_put_urb(urb);
147 if (list_empty(&anchor->urb_list))
148 wake_up(&anchor->wait);
149 }
150
151 /**
152 * usb_unanchor_urb - unanchors an URB
153 * @urb: pointer to the urb to anchor
154 *
155 * Call this to stop the system keeping track of this URB
156 */
157 void usb_unanchor_urb(struct urb *urb)
158 {
159 unsigned long flags;
160 struct usb_anchor *anchor;
161
162 if (!urb)
163 return;
164
165 anchor = urb->anchor;
166 if (!anchor)
167 return;
168
169 spin_lock_irqsave(&anchor->lock, flags);
170 /*
171 * At this point, we could be competing with another thread which
172 * has the same intention. To protect the urb from being unanchored
173 * twice, only the winner of the race gets the job.
174 */
175 if (likely(anchor == urb->anchor))
176 __usb_unanchor_urb(urb, anchor);
177 spin_unlock_irqrestore(&anchor->lock, flags);
178 }
179 EXPORT_SYMBOL_GPL(usb_unanchor_urb);
180
181 /*-------------------------------------------------------------------*/
182
183 /**
184 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
185 * @urb: pointer to the urb describing the request
186 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
187 * of valid options for this.
188 *
189 * This submits a transfer request, and transfers control of the URB
190 * describing that request to the USB subsystem. Request completion will
191 * be indicated later, asynchronously, by calling the completion handler.
192 * The three types of completion are success, error, and unlink
193 * (a software-induced fault, also called "request cancellation").
194 *
195 * URBs may be submitted in interrupt context.
196 *
197 * The caller must have correctly initialized the URB before submitting
198 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
199 * available to ensure that most fields are correctly initialized, for
200 * the particular kind of transfer, although they will not initialize
201 * any transfer flags.
202 *
203 * If the submission is successful, the complete() callback from the URB
204 * will be called exactly once, when the USB core and Host Controller Driver
205 * (HCD) are finished with the URB. When the completion function is called,
206 * control of the URB is returned to the device driver which issued the
207 * request. The completion handler may then immediately free or reuse that
208 * URB.
209 *
210 * With few exceptions, USB device drivers should never access URB fields
211 * provided by usbcore or the HCD until its complete() is called.
212 * The exceptions relate to periodic transfer scheduling. For both
213 * interrupt and isochronous urbs, as part of successful URB submission
214 * urb->interval is modified to reflect the actual transfer period used
215 * (normally some power of two units). And for isochronous urbs,
216 * urb->start_frame is modified to reflect when the URB's transfers were
217 * scheduled to start.
218 *
219 * Not all isochronous transfer scheduling policies will work, but most
220 * host controller drivers should easily handle ISO queues going from now
221 * until 10-200 msec into the future. Drivers should try to keep at
222 * least one or two msec of data in the queue; many controllers require
223 * that new transfers start at least 1 msec in the future when they are
224 * added. If the driver is unable to keep up and the queue empties out,
225 * the behavior for new submissions is governed by the URB_ISO_ASAP flag.
226 * If the flag is set, or if the queue is idle, then the URB is always
227 * assigned to the first available (and not yet expired) slot in the
228 * endpoint's schedule. If the flag is not set and the queue is active
229 * then the URB is always assigned to the next slot in the schedule
230 * following the end of the endpoint's previous URB, even if that slot is
231 * in the past. When a packet is assigned in this way to a slot that has
232 * already expired, the packet is not transmitted and the corresponding
233 * usb_iso_packet_descriptor's status field will return -EXDEV. If this
234 * would happen to all the packets in the URB, submission fails with a
235 * -EXDEV error code.
236 *
237 * For control endpoints, the synchronous usb_control_msg() call is
238 * often used (in non-interrupt context) instead of this call.
239 * That is often used through convenience wrappers, for the requests
240 * that are standardized in the USB 2.0 specification. For bulk
241 * endpoints, a synchronous usb_bulk_msg() call is available.
242 *
243 * Return:
244 * 0 on successful submissions. A negative error number otherwise.
245 *
246 * Request Queuing:
247 *
248 * URBs may be submitted to endpoints before previous ones complete, to
249 * minimize the impact of interrupt latencies and system overhead on data
250 * throughput. With that queuing policy, an endpoint's queue would never
251 * be empty. This is required for continuous isochronous data streams,
252 * and may also be required for some kinds of interrupt transfers. Such
253 * queuing also maximizes bandwidth utilization by letting USB controllers
254 * start work on later requests before driver software has finished the
255 * completion processing for earlier (successful) requests.
256 *
257 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
258 * than one. This was previously a HCD-specific behavior, except for ISO
259 * transfers. Non-isochronous endpoint queues are inactive during cleanup
260 * after faults (transfer errors or cancellation).
261 *
262 * Reserved Bandwidth Transfers:
263 *
264 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
265 * using the interval specified in the urb. Submitting the first urb to
266 * the endpoint reserves the bandwidth necessary to make those transfers.
267 * If the USB subsystem can't allocate sufficient bandwidth to perform
268 * the periodic request, submitting such a periodic request should fail.
269 *
270 * For devices under xHCI, the bandwidth is reserved at configuration time, or
271 * when the alt setting is selected. If there is not enough bus bandwidth, the
272 * configuration/alt setting request will fail. Therefore, submissions to
273 * periodic endpoints on devices under xHCI should never fail due to bandwidth
274 * constraints.
275 *
276 * Device drivers must explicitly request that repetition, by ensuring that
277 * some URB is always on the endpoint's queue (except possibly for short
278 * periods during completion callacks). When there is no longer an urb
279 * queued, the endpoint's bandwidth reservation is canceled. This means
280 * drivers can use their completion handlers to ensure they keep bandwidth
281 * they need, by reinitializing and resubmitting the just-completed urb
282 * until the driver longer needs that periodic bandwidth.
283 *
284 * Memory Flags:
285 *
286 * The general rules for how to decide which mem_flags to use
287 * are the same as for kmalloc. There are four
288 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
289 * GFP_ATOMIC.
290 *
291 * GFP_NOFS is not ever used, as it has not been implemented yet.
292 *
293 * GFP_ATOMIC is used when
294 * (a) you are inside a completion handler, an interrupt, bottom half,
295 * tasklet or timer, or
296 * (b) you are holding a spinlock or rwlock (does not apply to
297 * semaphores), or
298 * (c) current->state != TASK_RUNNING, this is the case only after
299 * you've changed it.
300 *
301 * GFP_NOIO is used in the block io path and error handling of storage
302 * devices.
303 *
304 * All other situations use GFP_KERNEL.
305 *
306 * Some more specific rules for mem_flags can be inferred, such as
307 * (1) start_xmit, timeout, and receive methods of network drivers must
308 * use GFP_ATOMIC (they are called with a spinlock held);
309 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
310 * called with a spinlock held);
311 * (3) If you use a kernel thread with a network driver you must use
312 * GFP_NOIO, unless (b) or (c) apply;
313 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
314 * apply or your are in a storage driver's block io path;
315 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
316 * (6) changing firmware on a running storage or net device uses
317 * GFP_NOIO, unless b) or c) apply
318 *
319 */
320 int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
321 {
322 int xfertype, max;
323 struct usb_device *dev;
324 struct usb_host_endpoint *ep;
325 int is_out;
326
327 if (!urb || !urb->complete)
328 return -EINVAL;
329 if (urb->hcpriv) {
330 WARN_ONCE(1, "URB %p submitted while active\n", urb);
331 return -EBUSY;
332 }
333
334 dev = urb->dev;
335 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
336 return -ENODEV;
337
338 /* For now, get the endpoint from the pipe. Eventually drivers
339 * will be required to set urb->ep directly and we will eliminate
340 * urb->pipe.
341 */
342 ep = usb_pipe_endpoint(dev, urb->pipe);
343 if (!ep)
344 return -ENOENT;
345
346 urb->ep = ep;
347 urb->status = -EINPROGRESS;
348 urb->actual_length = 0;
349
350 /* Lots of sanity checks, so HCDs can rely on clean data
351 * and don't need to duplicate tests
352 */
353 xfertype = usb_endpoint_type(&ep->desc);
354 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
355 struct usb_ctrlrequest *setup =
356 (struct usb_ctrlrequest *) urb->setup_packet;
357
358 if (!setup)
359 return -ENOEXEC;
360 is_out = !(setup->bRequestType & USB_DIR_IN) ||
361 !setup->wLength;
362 } else {
363 is_out = usb_endpoint_dir_out(&ep->desc);
364 }
365
366 /* Clear the internal flags and cache the direction for later use */
367 urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
368 URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
369 URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
370 URB_DMA_SG_COMBINED);
371 urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
372
373 if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
374 dev->state < USB_STATE_CONFIGURED)
375 return -ENODEV;
376
377 max = usb_endpoint_maxp(&ep->desc);
378 if (max <= 0) {
379 dev_dbg(&dev->dev,
380 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
381 usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
382 __func__, max);
383 return -EMSGSIZE;
384 }
385
386 /* periodic transfers limit size per frame/uframe,
387 * but drivers only control those sizes for ISO.
388 * while we're checking, initialize return status.
389 */
390 if (xfertype == USB_ENDPOINT_XFER_ISOC) {
391 int n, len;
392
393 /* SuperSpeed isoc endpoints have up to 16 bursts of up to
394 * 3 packets each
395 */
396 if (dev->speed == USB_SPEED_SUPER) {
397 int burst = 1 + ep->ss_ep_comp.bMaxBurst;
398 int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
399 max *= burst;
400 max *= mult;
401 }
402
403 /* "high bandwidth" mode, 1-3 packets/uframe? */
404 if (dev->speed == USB_SPEED_HIGH) {
405 int mult = 1 + ((max >> 11) & 0x03);
406 max &= 0x07ff;
407 max *= mult;
408 }
409
410 if (urb->number_of_packets <= 0)
411 return -EINVAL;
412 for (n = 0; n < urb->number_of_packets; n++) {
413 len = urb->iso_frame_desc[n].length;
414 if (len < 0 || len > max)
415 return -EMSGSIZE;
416 urb->iso_frame_desc[n].status = -EXDEV;
417 urb->iso_frame_desc[n].actual_length = 0;
418 }
419 } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint &&
420 dev->speed != USB_SPEED_WIRELESS) {
421 struct scatterlist *sg;
422 int i;
423
424 for_each_sg(urb->sg, sg, urb->num_sgs - 1, i)
425 if (sg->length % max)
426 return -EINVAL;
427 }
428
429 /* the I/O buffer must be mapped/unmapped, except when length=0 */
430 if (urb->transfer_buffer_length > INT_MAX)
431 return -EMSGSIZE;
432
433 #ifdef DEBUG
434 /* stuff that drivers shouldn't do, but which shouldn't
435 * cause problems in HCDs if they get it wrong.
436 */
437 {
438 unsigned int allowed;
439 static int pipetypes[4] = {
440 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
441 };
442
443 /* Check that the pipe's type matches the endpoint's type */
444 if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
445 dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
446 usb_pipetype(urb->pipe), pipetypes[xfertype]);
447
448 /* Check against a simple/standard policy */
449 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
450 URB_FREE_BUFFER);
451 switch (xfertype) {
452 case USB_ENDPOINT_XFER_BULK:
453 if (is_out)
454 allowed |= URB_ZERO_PACKET;
455 /* FALLTHROUGH */
456 case USB_ENDPOINT_XFER_CONTROL:
457 allowed |= URB_NO_FSBR; /* only affects UHCI */
458 /* FALLTHROUGH */
459 default: /* all non-iso endpoints */
460 if (!is_out)
461 allowed |= URB_SHORT_NOT_OK;
462 break;
463 case USB_ENDPOINT_XFER_ISOC:
464 allowed |= URB_ISO_ASAP;
465 break;
466 }
467 allowed &= urb->transfer_flags;
468
469 /* warn if submitter gave bogus flags */
470 if (allowed != urb->transfer_flags)
471 dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
472 urb->transfer_flags, allowed);
473 }
474 #endif
475 /*
476 * Force periodic transfer intervals to be legal values that are
477 * a power of two (so HCDs don't need to).
478 *
479 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC
480 * supports different values... this uses EHCI/UHCI defaults (and
481 * EHCI can use smaller non-default values).
482 */
483 switch (xfertype) {
484 case USB_ENDPOINT_XFER_ISOC:
485 case USB_ENDPOINT_XFER_INT:
486 /* too small? */
487 switch (dev->speed) {
488 case USB_SPEED_WIRELESS:
489 if (urb->interval < 6)
490 return -EINVAL;
491 break;
492 default:
493 if (urb->interval <= 0)
494 return -EINVAL;
495 break;
496 }
497 /* too big? */
498 switch (dev->speed) {
499 case USB_SPEED_SUPER: /* units are 125us */
500 /* Handle up to 2^(16-1) microframes */
501 if (urb->interval > (1 << 15))
502 return -EINVAL;
503 max = 1 << 15;
504 break;
505 case USB_SPEED_WIRELESS:
506 if (urb->interval > 16)
507 return -EINVAL;
508 break;
509 case USB_SPEED_HIGH: /* units are microframes */
510 /* NOTE usb handles 2^15 */
511 if (urb->interval > (1024 * 8))
512 urb->interval = 1024 * 8;
513 max = 1024 * 8;
514 break;
515 case USB_SPEED_FULL: /* units are frames/msec */
516 case USB_SPEED_LOW:
517 if (xfertype == USB_ENDPOINT_XFER_INT) {
518 if (urb->interval > 255)
519 return -EINVAL;
520 /* NOTE ohci only handles up to 32 */
521 max = 128;
522 } else {
523 if (urb->interval > 1024)
524 urb->interval = 1024;
525 /* NOTE usb and ohci handle up to 2^15 */
526 max = 1024;
527 }
528 break;
529 default:
530 return -EINVAL;
531 }
532 if (dev->speed != USB_SPEED_WIRELESS) {
533 /* Round down to a power of 2, no more than max */
534 urb->interval = min(max, 1 << ilog2(urb->interval));
535 }
536 }
537
538 return usb_hcd_submit_urb(urb, mem_flags);
539 }
540 EXPORT_SYMBOL_GPL(usb_submit_urb);
541
542 /*-------------------------------------------------------------------*/
543
544 /**
545 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
546 * @urb: pointer to urb describing a previously submitted request,
547 * may be NULL
548 *
549 * This routine cancels an in-progress request. URBs complete only once
550 * per submission, and may be canceled only once per submission.
551 * Successful cancellation means termination of @urb will be expedited
552 * and the completion handler will be called with a status code
553 * indicating that the request has been canceled (rather than any other
554 * code).
555 *
556 * Drivers should not call this routine or related routines, such as
557 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
558 * method has returned. The disconnect function should synchronize with
559 * a driver's I/O routines to insure that all URB-related activity has
560 * completed before it returns.
561 *
562 * This request is asynchronous, however the HCD might call the ->complete()
563 * callback during unlink. Therefore when drivers call usb_unlink_urb(), they
564 * must not hold any locks that may be taken by the completion function.
565 * Success is indicated by returning -EINPROGRESS, at which time the URB will
566 * probably not yet have been given back to the device driver. When it is
567 * eventually called, the completion function will see @urb->status ==
568 * -ECONNRESET.
569 * Failure is indicated by usb_unlink_urb() returning any other value.
570 * Unlinking will fail when @urb is not currently "linked" (i.e., it was
571 * never submitted, or it was unlinked before, or the hardware is already
572 * finished with it), even if the completion handler has not yet run.
573 *
574 * The URB must not be deallocated while this routine is running. In
575 * particular, when a driver calls this routine, it must insure that the
576 * completion handler cannot deallocate the URB.
577 *
578 * Return: -EINPROGRESS on success. See description for other values on
579 * failure.
580 *
581 * Unlinking and Endpoint Queues:
582 *
583 * [The behaviors and guarantees described below do not apply to virtual
584 * root hubs but only to endpoint queues for physical USB devices.]
585 *
586 * Host Controller Drivers (HCDs) place all the URBs for a particular
587 * endpoint in a queue. Normally the queue advances as the controller
588 * hardware processes each request. But when an URB terminates with an
589 * error its queue generally stops (see below), at least until that URB's
590 * completion routine returns. It is guaranteed that a stopped queue
591 * will not restart until all its unlinked URBs have been fully retired,
592 * with their completion routines run, even if that's not until some time
593 * after the original completion handler returns. The same behavior and
594 * guarantee apply when an URB terminates because it was unlinked.
595 *
596 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
597 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
598 * and -EREMOTEIO. Control endpoint queues behave the same way except
599 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues
600 * for isochronous endpoints are treated differently, because they must
601 * advance at fixed rates. Such queues do not stop when an URB
602 * encounters an error or is unlinked. An unlinked isochronous URB may
603 * leave a gap in the stream of packets; it is undefined whether such
604 * gaps can be filled in.
605 *
606 * Note that early termination of an URB because a short packet was
607 * received will generate a -EREMOTEIO error if and only if the
608 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device
609 * drivers can build deep queues for large or complex bulk transfers
610 * and clean them up reliably after any sort of aborted transfer by
611 * unlinking all pending URBs at the first fault.
612 *
613 * When a control URB terminates with an error other than -EREMOTEIO, it
614 * is quite likely that the status stage of the transfer will not take
615 * place.
616 */
617 int usb_unlink_urb(struct urb *urb)
618 {
619 if (!urb)
620 return -EINVAL;
621 if (!urb->dev)
622 return -ENODEV;
623 if (!urb->ep)
624 return -EIDRM;
625 return usb_hcd_unlink_urb(urb, -ECONNRESET);
626 }
627 EXPORT_SYMBOL_GPL(usb_unlink_urb);
628
629 /**
630 * usb_kill_urb - cancel a transfer request and wait for it to finish
631 * @urb: pointer to URB describing a previously submitted request,
632 * may be NULL
633 *
634 * This routine cancels an in-progress request. It is guaranteed that
635 * upon return all completion handlers will have finished and the URB
636 * will be totally idle and available for reuse. These features make
637 * this an ideal way to stop I/O in a disconnect() callback or close()
638 * function. If the request has not already finished or been unlinked
639 * the completion handler will see urb->status == -ENOENT.
640 *
641 * While the routine is running, attempts to resubmit the URB will fail
642 * with error -EPERM. Thus even if the URB's completion handler always
643 * tries to resubmit, it will not succeed and the URB will become idle.
644 *
645 * The URB must not be deallocated while this routine is running. In
646 * particular, when a driver calls this routine, it must insure that the
647 * completion handler cannot deallocate the URB.
648 *
649 * This routine may not be used in an interrupt context (such as a bottom
650 * half or a completion handler), or when holding a spinlock, or in other
651 * situations where the caller can't schedule().
652 *
653 * This routine should not be called by a driver after its disconnect
654 * method has returned.
655 */
656 void usb_kill_urb(struct urb *urb)
657 {
658 might_sleep();
659 if (!(urb && urb->dev && urb->ep))
660 return;
661 atomic_inc(&urb->reject);
662
663 usb_hcd_unlink_urb(urb, -ENOENT);
664 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
665
666 atomic_dec(&urb->reject);
667 }
668 EXPORT_SYMBOL_GPL(usb_kill_urb);
669
670 /**
671 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
672 * @urb: pointer to URB describing a previously submitted request,
673 * may be NULL
674 *
675 * This routine cancels an in-progress request. It is guaranteed that
676 * upon return all completion handlers will have finished and the URB
677 * will be totally idle and cannot be reused. These features make
678 * this an ideal way to stop I/O in a disconnect() callback.
679 * If the request has not already finished or been unlinked
680 * the completion handler will see urb->status == -ENOENT.
681 *
682 * After and while the routine runs, attempts to resubmit the URB will fail
683 * with error -EPERM. Thus even if the URB's completion handler always
684 * tries to resubmit, it will not succeed and the URB will become idle.
685 *
686 * The URB must not be deallocated while this routine is running. In
687 * particular, when a driver calls this routine, it must insure that the
688 * completion handler cannot deallocate the URB.
689 *
690 * This routine may not be used in an interrupt context (such as a bottom
691 * half or a completion handler), or when holding a spinlock, or in other
692 * situations where the caller can't schedule().
693 *
694 * This routine should not be called by a driver after its disconnect
695 * method has returned.
696 */
697 void usb_poison_urb(struct urb *urb)
698 {
699 might_sleep();
700 if (!urb)
701 return;
702 atomic_inc(&urb->reject);
703
704 if (!urb->dev || !urb->ep)
705 return;
706
707 usb_hcd_unlink_urb(urb, -ENOENT);
708 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
709 }
710 EXPORT_SYMBOL_GPL(usb_poison_urb);
711
712 void usb_unpoison_urb(struct urb *urb)
713 {
714 if (!urb)
715 return;
716
717 atomic_dec(&urb->reject);
718 }
719 EXPORT_SYMBOL_GPL(usb_unpoison_urb);
720
721 /**
722 * usb_block_urb - reliably prevent further use of an URB
723 * @urb: pointer to URB to be blocked, may be NULL
724 *
725 * After the routine has run, attempts to resubmit the URB will fail
726 * with error -EPERM. Thus even if the URB's completion handler always
727 * tries to resubmit, it will not succeed and the URB will become idle.
728 *
729 * The URB must not be deallocated while this routine is running. In
730 * particular, when a driver calls this routine, it must insure that the
731 * completion handler cannot deallocate the URB.
732 */
733 void usb_block_urb(struct urb *urb)
734 {
735 if (!urb)
736 return;
737
738 atomic_inc(&urb->reject);
739 }
740 EXPORT_SYMBOL_GPL(usb_block_urb);
741
742 /**
743 * usb_kill_anchored_urbs - cancel transfer requests en masse
744 * @anchor: anchor the requests are bound to
745 *
746 * this allows all outstanding URBs to be killed starting
747 * from the back of the queue
748 *
749 * This routine should not be called by a driver after its disconnect
750 * method has returned.
751 */
752 void usb_kill_anchored_urbs(struct usb_anchor *anchor)
753 {
754 struct urb *victim;
755
756 spin_lock_irq(&anchor->lock);
757 while (!list_empty(&anchor->urb_list)) {
758 victim = list_entry(anchor->urb_list.prev, struct urb,
759 anchor_list);
760 /* we must make sure the URB isn't freed before we kill it*/
761 usb_get_urb(victim);
762 spin_unlock_irq(&anchor->lock);
763 /* this will unanchor the URB */
764 usb_kill_urb(victim);
765 usb_put_urb(victim);
766 spin_lock_irq(&anchor->lock);
767 }
768 spin_unlock_irq(&anchor->lock);
769 }
770 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
771
772
773 /**
774 * usb_poison_anchored_urbs - cease all traffic from an anchor
775 * @anchor: anchor the requests are bound to
776 *
777 * this allows all outstanding URBs to be poisoned starting
778 * from the back of the queue. Newly added URBs will also be
779 * poisoned
780 *
781 * This routine should not be called by a driver after its disconnect
782 * method has returned.
783 */
784 void usb_poison_anchored_urbs(struct usb_anchor *anchor)
785 {
786 struct urb *victim;
787
788 spin_lock_irq(&anchor->lock);
789 anchor->poisoned = 1;
790 while (!list_empty(&anchor->urb_list)) {
791 victim = list_entry(anchor->urb_list.prev, struct urb,
792 anchor_list);
793 /* we must make sure the URB isn't freed before we kill it*/
794 usb_get_urb(victim);
795 spin_unlock_irq(&anchor->lock);
796 /* this will unanchor the URB */
797 usb_poison_urb(victim);
798 usb_put_urb(victim);
799 spin_lock_irq(&anchor->lock);
800 }
801 spin_unlock_irq(&anchor->lock);
802 }
803 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
804
805 /**
806 * usb_unpoison_anchored_urbs - let an anchor be used successfully again
807 * @anchor: anchor the requests are bound to
808 *
809 * Reverses the effect of usb_poison_anchored_urbs
810 * the anchor can be used normally after it returns
811 */
812 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
813 {
814 unsigned long flags;
815 struct urb *lazarus;
816
817 spin_lock_irqsave(&anchor->lock, flags);
818 list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
819 usb_unpoison_urb(lazarus);
820 }
821 anchor->poisoned = 0;
822 spin_unlock_irqrestore(&anchor->lock, flags);
823 }
824 EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
825 /**
826 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
827 * @anchor: anchor the requests are bound to
828 *
829 * this allows all outstanding URBs to be unlinked starting
830 * from the back of the queue. This function is asynchronous.
831 * The unlinking is just tiggered. It may happen after this
832 * function has returned.
833 *
834 * This routine should not be called by a driver after its disconnect
835 * method has returned.
836 */
837 void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
838 {
839 struct urb *victim;
840
841 while ((victim = usb_get_from_anchor(anchor)) != NULL) {
842 usb_unlink_urb(victim);
843 usb_put_urb(victim);
844 }
845 }
846 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
847
848 /**
849 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
850 * @anchor: the anchor you want to become unused
851 * @timeout: how long you are willing to wait in milliseconds
852 *
853 * Call this is you want to be sure all an anchor's
854 * URBs have finished
855 *
856 * Return: Non-zero if the anchor became unused. Zero on timeout.
857 */
858 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
859 unsigned int timeout)
860 {
861 return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list),
862 msecs_to_jiffies(timeout));
863 }
864 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
865
866 /**
867 * usb_get_from_anchor - get an anchor's oldest urb
868 * @anchor: the anchor whose urb you want
869 *
870 * This will take the oldest urb from an anchor,
871 * unanchor and return it
872 *
873 * Return: The oldest urb from @anchor, or %NULL if @anchor has no
874 * urbs associated with it.
875 */
876 struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
877 {
878 struct urb *victim;
879 unsigned long flags;
880
881 spin_lock_irqsave(&anchor->lock, flags);
882 if (!list_empty(&anchor->urb_list)) {
883 victim = list_entry(anchor->urb_list.next, struct urb,
884 anchor_list);
885 usb_get_urb(victim);
886 __usb_unanchor_urb(victim, anchor);
887 } else {
888 victim = NULL;
889 }
890 spin_unlock_irqrestore(&anchor->lock, flags);
891
892 return victim;
893 }
894
895 EXPORT_SYMBOL_GPL(usb_get_from_anchor);
896
897 /**
898 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
899 * @anchor: the anchor whose urbs you want to unanchor
900 *
901 * use this to get rid of all an anchor's urbs
902 */
903 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
904 {
905 struct urb *victim;
906 unsigned long flags;
907
908 spin_lock_irqsave(&anchor->lock, flags);
909 while (!list_empty(&anchor->urb_list)) {
910 victim = list_entry(anchor->urb_list.prev, struct urb,
911 anchor_list);
912 __usb_unanchor_urb(victim, anchor);
913 }
914 spin_unlock_irqrestore(&anchor->lock, flags);
915 }
916
917 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
918
919 /**
920 * usb_anchor_empty - is an anchor empty
921 * @anchor: the anchor you want to query
922 *
923 * Return: 1 if the anchor has no urbs associated with it.
924 */
925 int usb_anchor_empty(struct usb_anchor *anchor)
926 {
927 return list_empty(&anchor->urb_list);
928 }
929
930 EXPORT_SYMBOL_GPL(usb_anchor_empty);
931
This page took 0.095608 seconds and 5 git commands to generate.