USB: isp1760: don't auto disable Port1 on ISP1761
[deliverable/linux.git] / drivers / usb / core / urb.c
CommitLineData
1da177e4
LT
1#include <linux/module.h>
2#include <linux/string.h>
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/init.h>
d617bc83 6#include <linux/log2.h>
1da177e4 7#include <linux/usb.h>
51a2f077 8#include <linux/wait.h>
1da177e4
LT
9#include "hcd.h"
10
11#define to_urb(d) container_of(d, struct urb, kref)
12
6a2839be 13
1da177e4
LT
14static void urb_destroy(struct kref *kref)
15{
16 struct urb *urb = to_urb(kref);
51a2f077 17
8b3b01c8
MH
18 if (urb->transfer_flags & URB_FREE_BUFFER)
19 kfree(urb->transfer_buffer);
20
1da177e4
LT
21 kfree(urb);
22}
23
24/**
25 * usb_init_urb - initializes a urb so that it can be used by a USB driver
26 * @urb: pointer to the urb to initialize
27 *
28 * Initializes a urb so that the USB subsystem can use it properly.
29 *
30 * If a urb is created with a call to usb_alloc_urb() it is not
31 * necessary to call this function. Only use this if you allocate the
32 * space for a struct urb on your own. If you call this function, be
33 * careful when freeing the memory for your urb that it is no longer in
34 * use by the USB core.
35 *
36 * Only use this function if you _really_ understand what you are doing.
37 */
38void usb_init_urb(struct urb *urb)
39{
40 if (urb) {
41 memset(urb, 0, sizeof(*urb));
42 kref_init(&urb->kref);
51a2f077 43 INIT_LIST_HEAD(&urb->anchor_list);
1da177e4
LT
44 }
45}
782e70c6 46EXPORT_SYMBOL_GPL(usb_init_urb);
1da177e4
LT
47
48/**
49 * usb_alloc_urb - creates a new urb for a USB driver to use
50 * @iso_packets: number of iso packets for this urb
51 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
52 * valid options for this.
53 *
54 * Creates an urb for the USB driver to use, initializes a few internal
55 * structures, incrementes the usage counter, and returns a pointer to it.
56 *
57 * If no memory is available, NULL is returned.
58 *
59 * If the driver want to use this urb for interrupt, control, or bulk
60 * endpoints, pass '0' as the number of iso packets.
61 *
62 * The driver must call usb_free_urb() when it is finished with the urb.
63 */
55016f10 64struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
1da177e4
LT
65{
66 struct urb *urb;
67
ec17cf1c 68 urb = kmalloc(sizeof(struct urb) +
1da177e4
LT
69 iso_packets * sizeof(struct usb_iso_packet_descriptor),
70 mem_flags);
71 if (!urb) {
69a85942 72 printk(KERN_ERR "alloc_urb: kmalloc failed\n");
1da177e4
LT
73 return NULL;
74 }
75 usb_init_urb(urb);
76 return urb;
77}
782e70c6 78EXPORT_SYMBOL_GPL(usb_alloc_urb);
1da177e4
LT
79
80/**
81 * usb_free_urb - frees the memory used by a urb when all users of it are finished
82 * @urb: pointer to the urb to free, may be NULL
83 *
84 * Must be called when a user of a urb is finished with it. When the last user
85 * of the urb calls this function, the memory of the urb is freed.
86 *
2870fde7
RV
87 * Note: The transfer buffer associated with the urb is not freed unless the
88 * URB_FREE_BUFFER transfer flag is set.
1da177e4
LT
89 */
90void usb_free_urb(struct urb *urb)
91{
92 if (urb)
93 kref_put(&urb->kref, urb_destroy);
94}
782e70c6 95EXPORT_SYMBOL_GPL(usb_free_urb);
1da177e4
LT
96
97/**
98 * usb_get_urb - increments the reference count of the urb
99 * @urb: pointer to the urb to modify, may be NULL
100 *
101 * This must be called whenever a urb is transferred from a device driver to a
102 * host controller driver. This allows proper reference counting to happen
103 * for urbs.
104 *
105 * A pointer to the urb with the incremented reference counter is returned.
106 */
2c044a48 107struct urb *usb_get_urb(struct urb *urb)
1da177e4
LT
108{
109 if (urb)
110 kref_get(&urb->kref);
111 return urb;
112}
782e70c6 113EXPORT_SYMBOL_GPL(usb_get_urb);
51a2f077
ON
114
115/**
116 * usb_anchor_urb - anchors an URB while it is processed
117 * @urb: pointer to the urb to anchor
118 * @anchor: pointer to the anchor
119 *
120 * This can be called to have access to URBs which are to be executed
121 * without bothering to track them
122 */
123void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
124{
125 unsigned long flags;
126
127 spin_lock_irqsave(&anchor->lock, flags);
128 usb_get_urb(urb);
129 list_add_tail(&urb->anchor_list, &anchor->urb_list);
130 urb->anchor = anchor;
6a2839be
ON
131
132 if (unlikely(anchor->poisoned)) {
49367d8f 133 atomic_inc(&urb->reject);
6a2839be
ON
134 }
135
51a2f077
ON
136 spin_unlock_irqrestore(&anchor->lock, flags);
137}
138EXPORT_SYMBOL_GPL(usb_anchor_urb);
139
140/**
141 * usb_unanchor_urb - unanchors an URB
142 * @urb: pointer to the urb to anchor
143 *
144 * Call this to stop the system keeping track of this URB
145 */
146void usb_unanchor_urb(struct urb *urb)
147{
148 unsigned long flags;
149 struct usb_anchor *anchor;
150
151 if (!urb)
152 return;
153
154 anchor = urb->anchor;
155 if (!anchor)
156 return;
157
158 spin_lock_irqsave(&anchor->lock, flags);
159 if (unlikely(anchor != urb->anchor)) {
160 /* we've lost the race to another thread */
161 spin_unlock_irqrestore(&anchor->lock, flags);
162 return;
163 }
164 urb->anchor = NULL;
165 list_del(&urb->anchor_list);
166 spin_unlock_irqrestore(&anchor->lock, flags);
167 usb_put_urb(urb);
168 if (list_empty(&anchor->urb_list))
169 wake_up(&anchor->wait);
170}
171EXPORT_SYMBOL_GPL(usb_unanchor_urb);
172
1da177e4
LT
173/*-------------------------------------------------------------------*/
174
175/**
176 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
177 * @urb: pointer to the urb describing the request
178 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
179 * of valid options for this.
180 *
181 * This submits a transfer request, and transfers control of the URB
182 * describing that request to the USB subsystem. Request completion will
183 * be indicated later, asynchronously, by calling the completion handler.
184 * The three types of completion are success, error, and unlink
2c044a48 185 * (a software-induced fault, also called "request cancellation").
1da177e4
LT
186 *
187 * URBs may be submitted in interrupt context.
188 *
189 * The caller must have correctly initialized the URB before submitting
190 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
191 * available to ensure that most fields are correctly initialized, for
192 * the particular kind of transfer, although they will not initialize
193 * any transfer flags.
194 *
195 * Successful submissions return 0; otherwise this routine returns a
196 * negative error number. If the submission is successful, the complete()
197 * callback from the URB will be called exactly once, when the USB core and
198 * Host Controller Driver (HCD) are finished with the URB. When the completion
199 * function is called, control of the URB is returned to the device
200 * driver which issued the request. The completion handler may then
201 * immediately free or reuse that URB.
202 *
203 * With few exceptions, USB device drivers should never access URB fields
204 * provided by usbcore or the HCD until its complete() is called.
205 * The exceptions relate to periodic transfer scheduling. For both
206 * interrupt and isochronous urbs, as part of successful URB submission
207 * urb->interval is modified to reflect the actual transfer period used
208 * (normally some power of two units). And for isochronous urbs,
209 * urb->start_frame is modified to reflect when the URB's transfers were
210 * scheduled to start. Not all isochronous transfer scheduling policies
211 * will work, but most host controller drivers should easily handle ISO
212 * queues going from now until 10-200 msec into the future.
213 *
214 * For control endpoints, the synchronous usb_control_msg() call is
215 * often used (in non-interrupt context) instead of this call.
216 * That is often used through convenience wrappers, for the requests
217 * that are standardized in the USB 2.0 specification. For bulk
218 * endpoints, a synchronous usb_bulk_msg() call is available.
219 *
220 * Request Queuing:
221 *
222 * URBs may be submitted to endpoints before previous ones complete, to
223 * minimize the impact of interrupt latencies and system overhead on data
224 * throughput. With that queuing policy, an endpoint's queue would never
225 * be empty. This is required for continuous isochronous data streams,
226 * and may also be required for some kinds of interrupt transfers. Such
227 * queuing also maximizes bandwidth utilization by letting USB controllers
228 * start work on later requests before driver software has finished the
229 * completion processing for earlier (successful) requests.
230 *
231 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
232 * than one. This was previously a HCD-specific behavior, except for ISO
233 * transfers. Non-isochronous endpoint queues are inactive during cleanup
093cf723 234 * after faults (transfer errors or cancellation).
1da177e4
LT
235 *
236 * Reserved Bandwidth Transfers:
237 *
238 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
239 * using the interval specified in the urb. Submitting the first urb to
240 * the endpoint reserves the bandwidth necessary to make those transfers.
241 * If the USB subsystem can't allocate sufficient bandwidth to perform
242 * the periodic request, submitting such a periodic request should fail.
243 *
244 * Device drivers must explicitly request that repetition, by ensuring that
245 * some URB is always on the endpoint's queue (except possibly for short
246 * periods during completion callacks). When there is no longer an urb
247 * queued, the endpoint's bandwidth reservation is canceled. This means
248 * drivers can use their completion handlers to ensure they keep bandwidth
249 * they need, by reinitializing and resubmitting the just-completed urb
250 * until the driver longer needs that periodic bandwidth.
251 *
252 * Memory Flags:
253 *
254 * The general rules for how to decide which mem_flags to use
255 * are the same as for kmalloc. There are four
256 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
257 * GFP_ATOMIC.
258 *
259 * GFP_NOFS is not ever used, as it has not been implemented yet.
260 *
261 * GFP_ATOMIC is used when
262 * (a) you are inside a completion handler, an interrupt, bottom half,
263 * tasklet or timer, or
264 * (b) you are holding a spinlock or rwlock (does not apply to
265 * semaphores), or
266 * (c) current->state != TASK_RUNNING, this is the case only after
267 * you've changed it.
2c044a48 268 *
1da177e4
LT
269 * GFP_NOIO is used in the block io path and error handling of storage
270 * devices.
271 *
272 * All other situations use GFP_KERNEL.
273 *
274 * Some more specific rules for mem_flags can be inferred, such as
275 * (1) start_xmit, timeout, and receive methods of network drivers must
276 * use GFP_ATOMIC (they are called with a spinlock held);
277 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
278 * called with a spinlock held);
279 * (3) If you use a kernel thread with a network driver you must use
280 * GFP_NOIO, unless (b) or (c) apply;
281 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
282 * apply or your are in a storage driver's block io path;
283 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
284 * (6) changing firmware on a running storage or net device uses
285 * GFP_NOIO, unless b) or c) apply
286 *
287 */
55016f10 288int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
1da177e4 289{
5b653c79
AS
290 int xfertype, max;
291 struct usb_device *dev;
292 struct usb_host_endpoint *ep;
293 int is_out;
1da177e4
LT
294
295 if (!urb || urb->hcpriv || !urb->complete)
296 return -EINVAL;
2c044a48
GKH
297 dev = urb->dev;
298 if ((!dev) || (dev->state < USB_STATE_DEFAULT))
1da177e4 299 return -ENODEV;
1da177e4 300
5b653c79
AS
301 /* For now, get the endpoint from the pipe. Eventually drivers
302 * will be required to set urb->ep directly and we will eliminate
303 * urb->pipe.
304 */
305 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
306 [usb_pipeendpoint(urb->pipe)];
307 if (!ep)
308 return -ENOENT;
309
310 urb->ep = ep;
1da177e4
LT
311 urb->status = -EINPROGRESS;
312 urb->actual_length = 0;
1da177e4
LT
313
314 /* Lots of sanity checks, so HCDs can rely on clean data
315 * and don't need to duplicate tests
316 */
5b653c79 317 xfertype = usb_endpoint_type(&ep->desc);
fea34091
AS
318 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
319 struct usb_ctrlrequest *setup =
320 (struct usb_ctrlrequest *) urb->setup_packet;
321
322 if (!setup)
323 return -ENOEXEC;
324 is_out = !(setup->bRequestType & USB_DIR_IN) ||
325 !setup->wLength;
326 } else {
327 is_out = usb_endpoint_dir_out(&ep->desc);
328 }
329
330 /* Cache the direction for later use */
331 urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) |
332 (is_out ? URB_DIR_OUT : URB_DIR_IN);
1da177e4 333
5b653c79
AS
334 if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
335 dev->state < USB_STATE_CONFIGURED)
1da177e4
LT
336 return -ENODEV;
337
5b653c79 338 max = le16_to_cpu(ep->desc.wMaxPacketSize);
1da177e4
LT
339 if (max <= 0) {
340 dev_dbg(&dev->dev,
341 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
5b653c79 342 usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
441b62c1 343 __func__, max);
1da177e4
LT
344 return -EMSGSIZE;
345 }
346
347 /* periodic transfers limit size per frame/uframe,
348 * but drivers only control those sizes for ISO.
349 * while we're checking, initialize return status.
350 */
5b653c79 351 if (xfertype == USB_ENDPOINT_XFER_ISOC) {
1da177e4
LT
352 int n, len;
353
354 /* "high bandwidth" mode, 1-3 packets/uframe? */
355 if (dev->speed == USB_SPEED_HIGH) {
356 int mult = 1 + ((max >> 11) & 0x03);
357 max &= 0x07ff;
358 max *= mult;
359 }
360
2c044a48 361 if (urb->number_of_packets <= 0)
1da177e4
LT
362 return -EINVAL;
363 for (n = 0; n < urb->number_of_packets; n++) {
9251644a 364 len = urb->iso_frame_desc[n].length;
2c044a48 365 if (len < 0 || len > max)
1da177e4 366 return -EMSGSIZE;
9251644a
ON
367 urb->iso_frame_desc[n].status = -EXDEV;
368 urb->iso_frame_desc[n].actual_length = 0;
1da177e4
LT
369 }
370 }
371
372 /* the I/O buffer must be mapped/unmapped, except when length=0 */
373 if (urb->transfer_buffer_length < 0)
374 return -EMSGSIZE;
375
376#ifdef DEBUG
377 /* stuff that drivers shouldn't do, but which shouldn't
378 * cause problems in HCDs if they get it wrong.
379 */
380 {
381 unsigned int orig_flags = urb->transfer_flags;
382 unsigned int allowed;
383
384 /* enforce simple/standard policy */
b375a049 385 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
0b28baaf 386 URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER);
5b653c79
AS
387 switch (xfertype) {
388 case USB_ENDPOINT_XFER_BULK:
1da177e4
LT
389 if (is_out)
390 allowed |= URB_ZERO_PACKET;
391 /* FALLTHROUGH */
5b653c79 392 case USB_ENDPOINT_XFER_CONTROL:
1da177e4
LT
393 allowed |= URB_NO_FSBR; /* only affects UHCI */
394 /* FALLTHROUGH */
395 default: /* all non-iso endpoints */
396 if (!is_out)
397 allowed |= URB_SHORT_NOT_OK;
398 break;
5b653c79 399 case USB_ENDPOINT_XFER_ISOC:
1da177e4
LT
400 allowed |= URB_ISO_ASAP;
401 break;
402 }
403 urb->transfer_flags &= allowed;
404
405 /* fail if submitter gave bogus flags */
406 if (urb->transfer_flags != orig_flags) {
69a85942 407 dev_err(&dev->dev, "BOGUS urb flags, %x --> %x\n",
1da177e4
LT
408 orig_flags, urb->transfer_flags);
409 return -EINVAL;
410 }
411 }
412#endif
413 /*
414 * Force periodic transfer intervals to be legal values that are
415 * a power of two (so HCDs don't need to).
416 *
417 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC
418 * supports different values... this uses EHCI/UHCI defaults (and
419 * EHCI can use smaller non-default values).
420 */
5b653c79
AS
421 switch (xfertype) {
422 case USB_ENDPOINT_XFER_ISOC:
423 case USB_ENDPOINT_XFER_INT:
1da177e4
LT
424 /* too small? */
425 if (urb->interval <= 0)
426 return -EINVAL;
427 /* too big? */
428 switch (dev->speed) {
429 case USB_SPEED_HIGH: /* units are microframes */
2c044a48 430 /* NOTE usb handles 2^15 */
1da177e4
LT
431 if (urb->interval > (1024 * 8))
432 urb->interval = 1024 * 8;
5b653c79 433 max = 1024 * 8;
1da177e4
LT
434 break;
435 case USB_SPEED_FULL: /* units are frames/msec */
436 case USB_SPEED_LOW:
5b653c79 437 if (xfertype == USB_ENDPOINT_XFER_INT) {
1da177e4
LT
438 if (urb->interval > 255)
439 return -EINVAL;
2c044a48 440 /* NOTE ohci only handles up to 32 */
5b653c79 441 max = 128;
1da177e4
LT
442 } else {
443 if (urb->interval > 1024)
444 urb->interval = 1024;
2c044a48 445 /* NOTE usb and ohci handle up to 2^15 */
5b653c79 446 max = 1024;
1da177e4
LT
447 }
448 break;
449 default:
450 return -EINVAL;
451 }
d617bc83
AS
452 /* Round down to a power of 2, no more than max */
453 urb->interval = min(max, 1 << ilog2(urb->interval));
1da177e4
LT
454 }
455
9251644a 456 return usb_hcd_submit_urb(urb, mem_flags);
1da177e4 457}
782e70c6 458EXPORT_SYMBOL_GPL(usb_submit_urb);
1da177e4
LT
459
460/*-------------------------------------------------------------------*/
461
462/**
463 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
464 * @urb: pointer to urb describing a previously submitted request,
465 * may be NULL
466 *
beafef07
AS
467 * This routine cancels an in-progress request. URBs complete only once
468 * per submission, and may be canceled only once per submission.
469 * Successful cancellation means termination of @urb will be expedited
470 * and the completion handler will be called with a status code
471 * indicating that the request has been canceled (rather than any other
472 * code).
473 *
cde217a5
AS
474 * Drivers should not call this routine or related routines, such as
475 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
476 * method has returned. The disconnect function should synchronize with
477 * a driver's I/O routines to insure that all URB-related activity has
478 * completed before it returns.
479 *
beafef07
AS
480 * This request is always asynchronous. Success is indicated by
481 * returning -EINPROGRESS, at which time the URB will probably not yet
482 * have been given back to the device driver. When it is eventually
483 * called, the completion function will see @urb->status == -ECONNRESET.
484 * Failure is indicated by usb_unlink_urb() returning any other value.
485 * Unlinking will fail when @urb is not currently "linked" (i.e., it was
486 * never submitted, or it was unlinked before, or the hardware is already
487 * finished with it), even if the completion handler has not yet run.
1da177e4
LT
488 *
489 * Unlinking and Endpoint Queues:
490 *
beafef07
AS
491 * [The behaviors and guarantees described below do not apply to virtual
492 * root hubs but only to endpoint queues for physical USB devices.]
493 *
1da177e4
LT
494 * Host Controller Drivers (HCDs) place all the URBs for a particular
495 * endpoint in a queue. Normally the queue advances as the controller
8835f665 496 * hardware processes each request. But when an URB terminates with an
beafef07
AS
497 * error its queue generally stops (see below), at least until that URB's
498 * completion routine returns. It is guaranteed that a stopped queue
499 * will not restart until all its unlinked URBs have been fully retired,
500 * with their completion routines run, even if that's not until some time
501 * after the original completion handler returns. The same behavior and
502 * guarantee apply when an URB terminates because it was unlinked.
503 *
504 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
505 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
506 * and -EREMOTEIO. Control endpoint queues behave the same way except
507 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues
508 * for isochronous endpoints are treated differently, because they must
509 * advance at fixed rates. Such queues do not stop when an URB
510 * encounters an error or is unlinked. An unlinked isochronous URB may
511 * leave a gap in the stream of packets; it is undefined whether such
512 * gaps can be filled in.
513 *
514 * Note that early termination of an URB because a short packet was
515 * received will generate a -EREMOTEIO error if and only if the
516 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device
517 * drivers can build deep queues for large or complex bulk transfers
518 * and clean them up reliably after any sort of aborted transfer by
519 * unlinking all pending URBs at the first fault.
520 *
521 * When a control URB terminates with an error other than -EREMOTEIO, it
522 * is quite likely that the status stage of the transfer will not take
523 * place.
1da177e4
LT
524 */
525int usb_unlink_urb(struct urb *urb)
526{
527 if (!urb)
528 return -EINVAL;
d617bc83 529 if (!urb->dev)
1da177e4 530 return -ENODEV;
d617bc83
AS
531 if (!urb->ep)
532 return -EIDRM;
a6d2bb9f 533 return usb_hcd_unlink_urb(urb, -ECONNRESET);
1da177e4 534}
782e70c6 535EXPORT_SYMBOL_GPL(usb_unlink_urb);
1da177e4
LT
536
537/**
538 * usb_kill_urb - cancel a transfer request and wait for it to finish
539 * @urb: pointer to URB describing a previously submitted request,
540 * may be NULL
541 *
542 * This routine cancels an in-progress request. It is guaranteed that
543 * upon return all completion handlers will have finished and the URB
544 * will be totally idle and available for reuse. These features make
545 * this an ideal way to stop I/O in a disconnect() callback or close()
546 * function. If the request has not already finished or been unlinked
547 * the completion handler will see urb->status == -ENOENT.
548 *
549 * While the routine is running, attempts to resubmit the URB will fail
550 * with error -EPERM. Thus even if the URB's completion handler always
551 * tries to resubmit, it will not succeed and the URB will become idle.
552 *
553 * This routine may not be used in an interrupt context (such as a bottom
554 * half or a completion handler), or when holding a spinlock, or in other
555 * situations where the caller can't schedule().
cde217a5
AS
556 *
557 * This routine should not be called by a driver after its disconnect
558 * method has returned.
1da177e4
LT
559 */
560void usb_kill_urb(struct urb *urb)
561{
e9aa795a 562 might_sleep();
d617bc83 563 if (!(urb && urb->dev && urb->ep))
1da177e4 564 return;
49367d8f 565 atomic_inc(&urb->reject);
1da177e4 566
a6d2bb9f 567 usb_hcd_unlink_urb(urb, -ENOENT);
1da177e4
LT
568 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
569
49367d8f 570 atomic_dec(&urb->reject);
1da177e4 571}
782e70c6 572EXPORT_SYMBOL_GPL(usb_kill_urb);
1da177e4 573
55b447bf
ON
574/**
575 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
576 * @urb: pointer to URB describing a previously submitted request,
577 * may be NULL
578 *
579 * This routine cancels an in-progress request. It is guaranteed that
580 * upon return all completion handlers will have finished and the URB
581 * will be totally idle and cannot be reused. These features make
582 * this an ideal way to stop I/O in a disconnect() callback.
583 * If the request has not already finished or been unlinked
584 * the completion handler will see urb->status == -ENOENT.
585 *
586 * After and while the routine runs, attempts to resubmit the URB will fail
587 * with error -EPERM. Thus even if the URB's completion handler always
588 * tries to resubmit, it will not succeed and the URB will become idle.
589 *
590 * This routine may not be used in an interrupt context (such as a bottom
591 * half or a completion handler), or when holding a spinlock, or in other
592 * situations where the caller can't schedule().
cde217a5
AS
593 *
594 * This routine should not be called by a driver after its disconnect
595 * method has returned.
55b447bf
ON
596 */
597void usb_poison_urb(struct urb *urb)
598{
599 might_sleep();
600 if (!(urb && urb->dev && urb->ep))
601 return;
49367d8f 602 atomic_inc(&urb->reject);
55b447bf
ON
603
604 usb_hcd_unlink_urb(urb, -ENOENT);
605 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
606}
607EXPORT_SYMBOL_GPL(usb_poison_urb);
608
609void usb_unpoison_urb(struct urb *urb)
610{
611 if (!urb)
612 return;
613
49367d8f 614 atomic_dec(&urb->reject);
55b447bf
ON
615}
616EXPORT_SYMBOL_GPL(usb_unpoison_urb);
617
51a2f077
ON
618/**
619 * usb_kill_anchored_urbs - cancel transfer requests en masse
620 * @anchor: anchor the requests are bound to
621 *
622 * this allows all outstanding URBs to be killed starting
623 * from the back of the queue
cde217a5
AS
624 *
625 * This routine should not be called by a driver after its disconnect
626 * method has returned.
51a2f077
ON
627 */
628void usb_kill_anchored_urbs(struct usb_anchor *anchor)
629{
630 struct urb *victim;
631
632 spin_lock_irq(&anchor->lock);
633 while (!list_empty(&anchor->urb_list)) {
2c044a48
GKH
634 victim = list_entry(anchor->urb_list.prev, struct urb,
635 anchor_list);
51a2f077
ON
636 /* we must make sure the URB isn't freed before we kill it*/
637 usb_get_urb(victim);
638 spin_unlock_irq(&anchor->lock);
639 /* this will unanchor the URB */
640 usb_kill_urb(victim);
641 usb_put_urb(victim);
642 spin_lock_irq(&anchor->lock);
643 }
644 spin_unlock_irq(&anchor->lock);
645}
646EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
647
6a2839be
ON
648
649/**
650 * usb_poison_anchored_urbs - cease all traffic from an anchor
651 * @anchor: anchor the requests are bound to
652 *
653 * this allows all outstanding URBs to be poisoned starting
654 * from the back of the queue. Newly added URBs will also be
655 * poisoned
cde217a5
AS
656 *
657 * This routine should not be called by a driver after its disconnect
658 * method has returned.
6a2839be
ON
659 */
660void usb_poison_anchored_urbs(struct usb_anchor *anchor)
661{
662 struct urb *victim;
663
664 spin_lock_irq(&anchor->lock);
665 anchor->poisoned = 1;
666 while (!list_empty(&anchor->urb_list)) {
667 victim = list_entry(anchor->urb_list.prev, struct urb,
668 anchor_list);
669 /* we must make sure the URB isn't freed before we kill it*/
670 usb_get_urb(victim);
671 spin_unlock_irq(&anchor->lock);
672 /* this will unanchor the URB */
673 usb_poison_urb(victim);
674 usb_put_urb(victim);
675 spin_lock_irq(&anchor->lock);
676 }
677 spin_unlock_irq(&anchor->lock);
678}
679EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
cde217a5 680
eda76959
ON
681/**
682 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
683 * @anchor: anchor the requests are bound to
684 *
685 * this allows all outstanding URBs to be unlinked starting
686 * from the back of the queue. This function is asynchronous.
687 * The unlinking is just tiggered. It may happen after this
688 * function has returned.
cde217a5
AS
689 *
690 * This routine should not be called by a driver after its disconnect
691 * method has returned.
eda76959
ON
692 */
693void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
694{
695 struct urb *victim;
77571f05 696 unsigned long flags;
eda76959 697
77571f05 698 spin_lock_irqsave(&anchor->lock, flags);
eda76959
ON
699 while (!list_empty(&anchor->urb_list)) {
700 victim = list_entry(anchor->urb_list.prev, struct urb,
701 anchor_list);
77571f05
ON
702 usb_get_urb(victim);
703 spin_unlock_irqrestore(&anchor->lock, flags);
eda76959
ON
704 /* this will unanchor the URB */
705 usb_unlink_urb(victim);
77571f05
ON
706 usb_put_urb(victim);
707 spin_lock_irqsave(&anchor->lock, flags);
eda76959 708 }
77571f05 709 spin_unlock_irqrestore(&anchor->lock, flags);
eda76959
ON
710}
711EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
712
51a2f077
ON
713/**
714 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
715 * @anchor: the anchor you want to become unused
716 * @timeout: how long you are willing to wait in milliseconds
717 *
718 * Call this is you want to be sure all an anchor's
719 * URBs have finished
720 */
721int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
722 unsigned int timeout)
723{
724 return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list),
725 msecs_to_jiffies(timeout));
726}
727EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
19876252
ON
728
729/**
730 * usb_get_from_anchor - get an anchor's oldest urb
731 * @anchor: the anchor whose urb you want
732 *
733 * this will take the oldest urb from an anchor,
734 * unanchor and return it
735 */
736struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
737{
738 struct urb *victim;
739 unsigned long flags;
740
741 spin_lock_irqsave(&anchor->lock, flags);
742 if (!list_empty(&anchor->urb_list)) {
743 victim = list_entry(anchor->urb_list.next, struct urb,
744 anchor_list);
745 usb_get_urb(victim);
746 spin_unlock_irqrestore(&anchor->lock, flags);
747 usb_unanchor_urb(victim);
748 } else {
749 spin_unlock_irqrestore(&anchor->lock, flags);
750 victim = NULL;
751 }
752
753 return victim;
754}
755
756EXPORT_SYMBOL_GPL(usb_get_from_anchor);
757
758/**
759 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
760 * @anchor: the anchor whose urbs you want to unanchor
761 *
762 * use this to get rid of all an anchor's urbs
763 */
764void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
765{
766 struct urb *victim;
767 unsigned long flags;
768
769 spin_lock_irqsave(&anchor->lock, flags);
770 while (!list_empty(&anchor->urb_list)) {
771 victim = list_entry(anchor->urb_list.prev, struct urb,
772 anchor_list);
773 usb_get_urb(victim);
774 spin_unlock_irqrestore(&anchor->lock, flags);
775 /* this may free the URB */
776 usb_unanchor_urb(victim);
777 usb_put_urb(victim);
778 spin_lock_irqsave(&anchor->lock, flags);
779 }
780 spin_unlock_irqrestore(&anchor->lock, flags);
781}
782
783EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
784
785/**
786 * usb_anchor_empty - is an anchor empty
787 * @anchor: the anchor you want to query
788 *
789 * returns 1 if the anchor has no urbs associated with it
790 */
791int usb_anchor_empty(struct usb_anchor *anchor)
792{
793 return list_empty(&anchor->urb_list);
794}
795
796EXPORT_SYMBOL_GPL(usb_anchor_empty);
797
This page took 0.454954 seconds and 5 git commands to generate.