Commit | Line | Data |
---|---|---|
df365423 IPG |
1 | /* |
2 | * WUSB Wire Adapter | |
3 | * Data transfer and URB enqueing | |
4 | * | |
5 | * Copyright (C) 2005-2006 Intel Corporation | |
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License version | |
10 | * 2 as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
20 | * 02110-1301, USA. | |
21 | * | |
22 | * | |
23 | * How transfers work: get a buffer, break it up in segments (segment | |
24 | * size is a multiple of the maxpacket size). For each segment issue a | |
25 | * segment request (struct wa_xfer_*), then send the data buffer if | |
26 | * out or nothing if in (all over the DTO endpoint). | |
27 | * | |
28 | * For each submitted segment request, a notification will come over | |
29 | * the NEP endpoint and a transfer result (struct xfer_result) will | |
30 | * arrive in the DTI URB. Read it, get the xfer ID, see if there is | |
31 | * data coming (inbound transfer), schedule a read and handle it. | |
32 | * | |
33 | * Sounds simple, it is a pain to implement. | |
34 | * | |
35 | * | |
36 | * ENTRY POINTS | |
37 | * | |
38 | * FIXME | |
39 | * | |
40 | * LIFE CYCLE / STATE DIAGRAM | |
41 | * | |
42 | * FIXME | |
43 | * | |
44 | * THIS CODE IS DISGUSTING | |
45 | * | |
46 | * Warned you are; it's my second try and still not happy with it. | |
47 | * | |
48 | * NOTES: | |
49 | * | |
50 | * - No iso | |
51 | * | |
52 | * - Supports DMA xfers, control, bulk and maybe interrupt | |
53 | * | |
54 | * - Does not recycle unused rpipes | |
55 | * | |
56 | * An rpipe is assigned to an endpoint the first time it is used, | |
57 | * and then it's there, assigned, until the endpoint is disabled | |
58 | * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the | |
59 | * rpipe to the endpoint is done under the wa->rpipe_sem semaphore | |
60 | * (should be a mutex). | |
61 | * | |
62 | * Two methods it could be done: | |
63 | * | |
25985edc | 64 | * (a) set up a timer every time an rpipe's use count drops to 1 |
df365423 IPG |
65 | * (which means unused) or when a transfer ends. Reset the |
66 | * timer when a xfer is queued. If the timer expires, release | |
67 | * the rpipe [see rpipe_ep_disable()]. | |
68 | * | |
69 | * (b) when looking for free rpipes to attach [rpipe_get_by_ep()], | |
70 | * when none are found go over the list, check their endpoint | |
71 | * and their activity record (if no last-xfer-done-ts in the | |
72 | * last x seconds) take it | |
73 | * | |
74 | * However, due to the fact that we have a set of limited | |
75 | * resources (max-segments-at-the-same-time per xfer, | |
76 | * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end | |
77 | * we are going to have to rebuild all this based on an scheduler, | |
78 | * to where we have a list of transactions to do and based on the | |
f77f13e2 | 79 | * availability of the different required components (blocks, |
df365423 IPG |
80 | * rpipes, segment slots, etc), we go scheduling them. Painful. |
81 | */ | |
82 | #include <linux/init.h> | |
83 | #include <linux/spinlock.h> | |
5a0e3ad6 | 84 | #include <linux/slab.h> |
df365423 | 85 | #include <linux/hash.h> |
9708cd2f | 86 | #include <linux/ratelimit.h> |
f940fcd8 | 87 | #include <linux/export.h> |
2b81c083 | 88 | #include <linux/scatterlist.h> |
bce83697 | 89 | |
df365423 IPG |
90 | #include "wa-hc.h" |
91 | #include "wusbhc.h" | |
92 | ||
df365423 IPG |
93 | enum { |
94 | WA_SEGS_MAX = 255, | |
95 | }; | |
96 | ||
97 | enum wa_seg_status { | |
98 | WA_SEG_NOTREADY, | |
99 | WA_SEG_READY, | |
100 | WA_SEG_DELAYED, | |
101 | WA_SEG_SUBMITTED, | |
102 | WA_SEG_PENDING, | |
103 | WA_SEG_DTI_PENDING, | |
104 | WA_SEG_DONE, | |
105 | WA_SEG_ERROR, | |
106 | WA_SEG_ABORTED, | |
107 | }; | |
108 | ||
109 | static void wa_xfer_delayed_run(struct wa_rpipe *); | |
110 | ||
111 | /* | |
112 | * Life cycle governed by 'struct urb' (the refcount of the struct is | |
113 | * that of the 'struct urb' and usb_free_urb() would free the whole | |
114 | * struct). | |
115 | */ | |
116 | struct wa_seg { | |
117 | struct urb urb; | |
118 | struct urb *dto_urb; /* for data output? */ | |
119 | struct list_head list_node; /* for rpipe->req_list */ | |
120 | struct wa_xfer *xfer; /* out xfer */ | |
121 | u8 index; /* which segment we are */ | |
122 | enum wa_seg_status status; | |
123 | ssize_t result; /* bytes xfered or error */ | |
124 | struct wa_xfer_hdr xfer_hdr; | |
125 | u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */ | |
126 | }; | |
127 | ||
66591015 | 128 | static inline void wa_seg_init(struct wa_seg *seg) |
df365423 | 129 | { |
66591015 TP |
130 | usb_init_urb(&seg->urb); |
131 | ||
132 | /* set the remaining memory to 0. */ | |
133 | memset(((void *)seg) + sizeof(seg->urb), 0, | |
134 | sizeof(*seg) - sizeof(seg->urb)); | |
df365423 IPG |
135 | } |
136 | ||
137 | /* | |
138 | * Protected by xfer->lock | |
139 | * | |
140 | */ | |
141 | struct wa_xfer { | |
142 | struct kref refcnt; | |
143 | struct list_head list_node; | |
144 | spinlock_t lock; | |
145 | u32 id; | |
146 | ||
147 | struct wahc *wa; /* Wire adapter we are plugged to */ | |
148 | struct usb_host_endpoint *ep; | |
25985edc | 149 | struct urb *urb; /* URB we are transferring for */ |
df365423 IPG |
150 | struct wa_seg **seg; /* transfer segments */ |
151 | u8 segs, segs_submitted, segs_done; | |
152 | unsigned is_inbound:1; | |
153 | unsigned is_dma:1; | |
154 | size_t seg_size; | |
155 | int result; | |
156 | ||
157 | gfp_t gfp; /* allocation mask */ | |
158 | ||
159 | struct wusb_dev *wusb_dev; /* for activity timestamps */ | |
160 | }; | |
161 | ||
162 | static inline void wa_xfer_init(struct wa_xfer *xfer) | |
163 | { | |
164 | kref_init(&xfer->refcnt); | |
165 | INIT_LIST_HEAD(&xfer->list_node); | |
166 | spin_lock_init(&xfer->lock); | |
167 | } | |
168 | ||
169 | /* | |
25985edc | 170 | * Destroy a transfer structure |
df365423 | 171 | * |
79731cbd TP |
172 | * Note that freeing xfer->seg[cnt]->urb will free the containing |
173 | * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs. | |
df365423 IPG |
174 | */ |
175 | static void wa_xfer_destroy(struct kref *_xfer) | |
176 | { | |
177 | struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt); | |
178 | if (xfer->seg) { | |
179 | unsigned cnt; | |
180 | for (cnt = 0; cnt < xfer->segs; cnt++) { | |
79731cbd TP |
181 | usb_free_urb(xfer->seg[cnt]->dto_urb); |
182 | usb_free_urb(&xfer->seg[cnt]->urb); | |
df365423 IPG |
183 | } |
184 | } | |
185 | kfree(xfer); | |
df365423 IPG |
186 | } |
187 | ||
188 | static void wa_xfer_get(struct wa_xfer *xfer) | |
189 | { | |
190 | kref_get(&xfer->refcnt); | |
191 | } | |
192 | ||
193 | static void wa_xfer_put(struct wa_xfer *xfer) | |
194 | { | |
df365423 | 195 | kref_put(&xfer->refcnt, wa_xfer_destroy); |
df365423 IPG |
196 | } |
197 | ||
198 | /* | |
199 | * xfer is referenced | |
200 | * | |
201 | * xfer->lock has to be unlocked | |
202 | * | |
203 | * We take xfer->lock for setting the result; this is a barrier | |
204 | * against drivers/usb/core/hcd.c:unlink1() being called after we call | |
205 | * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a | |
206 | * reference to the transfer. | |
207 | */ | |
208 | static void wa_xfer_giveback(struct wa_xfer *xfer) | |
209 | { | |
210 | unsigned long flags; | |
bce83697 | 211 | |
df365423 IPG |
212 | spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); |
213 | list_del_init(&xfer->list_node); | |
214 | spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); | |
215 | /* FIXME: segmentation broken -- kills DWA */ | |
216 | wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); | |
217 | wa_put(xfer->wa); | |
218 | wa_xfer_put(xfer); | |
df365423 IPG |
219 | } |
220 | ||
221 | /* | |
222 | * xfer is referenced | |
223 | * | |
224 | * xfer->lock has to be unlocked | |
225 | */ | |
226 | static void wa_xfer_completion(struct wa_xfer *xfer) | |
227 | { | |
df365423 IPG |
228 | if (xfer->wusb_dev) |
229 | wusb_dev_put(xfer->wusb_dev); | |
230 | rpipe_put(xfer->ep->hcpriv); | |
231 | wa_xfer_giveback(xfer); | |
df365423 IPG |
232 | } |
233 | ||
234 | /* | |
235 | * If transfer is done, wrap it up and return true | |
236 | * | |
237 | * xfer->lock has to be locked | |
238 | */ | |
239 | static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) | |
240 | { | |
bce83697 | 241 | struct device *dev = &xfer->wa->usb_iface->dev; |
df365423 IPG |
242 | unsigned result, cnt; |
243 | struct wa_seg *seg; | |
244 | struct urb *urb = xfer->urb; | |
245 | unsigned found_short = 0; | |
246 | ||
df365423 IPG |
247 | result = xfer->segs_done == xfer->segs_submitted; |
248 | if (result == 0) | |
249 | goto out; | |
250 | urb->actual_length = 0; | |
251 | for (cnt = 0; cnt < xfer->segs; cnt++) { | |
252 | seg = xfer->seg[cnt]; | |
253 | switch (seg->status) { | |
254 | case WA_SEG_DONE: | |
255 | if (found_short && seg->result > 0) { | |
bce83697 DV |
256 | dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n", |
257 | xfer, cnt, seg->result); | |
df365423 IPG |
258 | urb->status = -EINVAL; |
259 | goto out; | |
260 | } | |
261 | urb->actual_length += seg->result; | |
262 | if (seg->result < xfer->seg_size | |
263 | && cnt != xfer->segs-1) | |
264 | found_short = 1; | |
bce83697 DV |
265 | dev_dbg(dev, "xfer %p#%u: DONE short %d " |
266 | "result %zu urb->actual_length %d\n", | |
267 | xfer, seg->index, found_short, seg->result, | |
268 | urb->actual_length); | |
df365423 IPG |
269 | break; |
270 | case WA_SEG_ERROR: | |
271 | xfer->result = seg->result; | |
bce83697 DV |
272 | dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n", |
273 | xfer, seg->index, seg->result); | |
df365423 IPG |
274 | goto out; |
275 | case WA_SEG_ABORTED: | |
bce83697 DV |
276 | dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n", |
277 | xfer, seg->index, urb->status); | |
df365423 IPG |
278 | xfer->result = urb->status; |
279 | goto out; | |
280 | default: | |
bce83697 DV |
281 | dev_warn(dev, "xfer %p#%u: is_done bad state %d\n", |
282 | xfer, cnt, seg->status); | |
df365423 | 283 | xfer->result = -EINVAL; |
df365423 IPG |
284 | goto out; |
285 | } | |
286 | } | |
287 | xfer->result = 0; | |
288 | out: | |
df365423 IPG |
289 | return result; |
290 | } | |
291 | ||
292 | /* | |
293 | * Initialize a transfer's ID | |
294 | * | |
295 | * We need to use a sequential number; if we use the pointer or the | |
296 | * hash of the pointer, it can repeat over sequential transfers and | |
297 | * then it will confuse the HWA....wonder why in hell they put a 32 | |
298 | * bit handle in there then. | |
299 | */ | |
300 | static void wa_xfer_id_init(struct wa_xfer *xfer) | |
301 | { | |
302 | xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); | |
303 | } | |
304 | ||
305 | /* | |
306 | * Return the xfer's ID associated with xfer | |
307 | * | |
308 | * Need to generate a | |
309 | */ | |
310 | static u32 wa_xfer_id(struct wa_xfer *xfer) | |
311 | { | |
312 | return xfer->id; | |
313 | } | |
314 | ||
315 | /* | |
316 | * Search for a transfer list ID on the HCD's URB list | |
317 | * | |
318 | * For 32 bit architectures, we use the pointer itself; for 64 bits, a | |
319 | * 32-bit hash of the pointer. | |
320 | * | |
321 | * @returns NULL if not found. | |
322 | */ | |
323 | static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id) | |
324 | { | |
325 | unsigned long flags; | |
326 | struct wa_xfer *xfer_itr; | |
327 | spin_lock_irqsave(&wa->xfer_list_lock, flags); | |
328 | list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) { | |
329 | if (id == xfer_itr->id) { | |
330 | wa_xfer_get(xfer_itr); | |
331 | goto out; | |
332 | } | |
333 | } | |
334 | xfer_itr = NULL; | |
335 | out: | |
336 | spin_unlock_irqrestore(&wa->xfer_list_lock, flags); | |
337 | return xfer_itr; | |
338 | } | |
339 | ||
340 | struct wa_xfer_abort_buffer { | |
341 | struct urb urb; | |
342 | struct wa_xfer_abort cmd; | |
343 | }; | |
344 | ||
345 | static void __wa_xfer_abort_cb(struct urb *urb) | |
346 | { | |
347 | struct wa_xfer_abort_buffer *b = urb->context; | |
348 | usb_put_urb(&b->urb); | |
349 | } | |
350 | ||
351 | /* | |
352 | * Aborts an ongoing transaction | |
353 | * | |
354 | * Assumes the transfer is referenced and locked and in a submitted | |
355 | * state (mainly that there is an endpoint/rpipe assigned). | |
356 | * | |
357 | * The callback (see above) does nothing but freeing up the data by | |
358 | * putting the URB. Because the URB is allocated at the head of the | |
359 | * struct, the whole space we allocated is kfreed. | |
360 | * | |
361 | * We'll get an 'aborted transaction' xfer result on DTI, that'll | |
362 | * politely ignore because at this point the transaction has been | |
363 | * marked as aborted already. | |
364 | */ | |
365 | static void __wa_xfer_abort(struct wa_xfer *xfer) | |
366 | { | |
367 | int result; | |
368 | struct device *dev = &xfer->wa->usb_iface->dev; | |
369 | struct wa_xfer_abort_buffer *b; | |
370 | struct wa_rpipe *rpipe = xfer->ep->hcpriv; | |
371 | ||
372 | b = kmalloc(sizeof(*b), GFP_ATOMIC); | |
373 | if (b == NULL) | |
374 | goto error_kmalloc; | |
375 | b->cmd.bLength = sizeof(b->cmd); | |
376 | b->cmd.bRequestType = WA_XFER_ABORT; | |
377 | b->cmd.wRPipe = rpipe->descr.wRPipeIndex; | |
378 | b->cmd.dwTransferID = wa_xfer_id(xfer); | |
379 | ||
380 | usb_init_urb(&b->urb); | |
381 | usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev, | |
382 | usb_sndbulkpipe(xfer->wa->usb_dev, | |
383 | xfer->wa->dto_epd->bEndpointAddress), | |
384 | &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b); | |
385 | result = usb_submit_urb(&b->urb, GFP_ATOMIC); | |
386 | if (result < 0) | |
387 | goto error_submit; | |
388 | return; /* callback frees! */ | |
389 | ||
390 | ||
391 | error_submit: | |
392 | if (printk_ratelimit()) | |
393 | dev_err(dev, "xfer %p: Can't submit abort request: %d\n", | |
394 | xfer, result); | |
395 | kfree(b); | |
396 | error_kmalloc: | |
397 | return; | |
398 | ||
399 | } | |
400 | ||
401 | /* | |
402 | * | |
403 | * @returns < 0 on error, transfer segment request size if ok | |
404 | */ | |
405 | static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer, | |
406 | enum wa_xfer_type *pxfer_type) | |
407 | { | |
408 | ssize_t result; | |
409 | struct device *dev = &xfer->wa->usb_iface->dev; | |
410 | size_t maxpktsize; | |
411 | struct urb *urb = xfer->urb; | |
412 | struct wa_rpipe *rpipe = xfer->ep->hcpriv; | |
413 | ||
df365423 IPG |
414 | switch (rpipe->descr.bmAttribute & 0x3) { |
415 | case USB_ENDPOINT_XFER_CONTROL: | |
416 | *pxfer_type = WA_XFER_TYPE_CTL; | |
417 | result = sizeof(struct wa_xfer_ctl); | |
418 | break; | |
419 | case USB_ENDPOINT_XFER_INT: | |
420 | case USB_ENDPOINT_XFER_BULK: | |
421 | *pxfer_type = WA_XFER_TYPE_BI; | |
422 | result = sizeof(struct wa_xfer_bi); | |
423 | break; | |
424 | case USB_ENDPOINT_XFER_ISOC: | |
425 | dev_err(dev, "FIXME: ISOC not implemented\n"); | |
426 | result = -ENOSYS; | |
427 | goto error; | |
428 | default: | |
429 | /* never happens */ | |
430 | BUG(); | |
431 | result = -EINVAL; /* shut gcc up */ | |
432 | }; | |
433 | xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0; | |
434 | xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; | |
435 | xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks) | |
436 | * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1); | |
437 | /* Compute the segment size and make sure it is a multiple of | |
438 | * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of | |
439 | * a check (FIXME) */ | |
440 | maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize); | |
441 | if (xfer->seg_size < maxpktsize) { | |
442 | dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize " | |
443 | "%zu\n", xfer->seg_size, maxpktsize); | |
444 | result = -EINVAL; | |
445 | goto error; | |
446 | } | |
447 | xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; | |
2b81c083 | 448 | xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size); |
df365423 IPG |
449 | if (xfer->segs >= WA_SEGS_MAX) { |
450 | dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n", | |
451 | (int)(urb->transfer_buffer_length / xfer->seg_size), | |
452 | WA_SEGS_MAX); | |
453 | result = -EINVAL; | |
454 | goto error; | |
455 | } | |
456 | if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) | |
457 | xfer->segs = 1; | |
458 | error: | |
df365423 IPG |
459 | return result; |
460 | } | |
461 | ||
bce83697 | 462 | /* Fill in the common request header and xfer-type specific data. */ |
df365423 IPG |
463 | static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, |
464 | struct wa_xfer_hdr *xfer_hdr0, | |
465 | enum wa_xfer_type xfer_type, | |
466 | size_t xfer_hdr_size) | |
467 | { | |
468 | struct wa_rpipe *rpipe = xfer->ep->hcpriv; | |
469 | ||
470 | xfer_hdr0 = &xfer->seg[0]->xfer_hdr; | |
471 | xfer_hdr0->bLength = xfer_hdr_size; | |
472 | xfer_hdr0->bRequestType = xfer_type; | |
473 | xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex; | |
474 | xfer_hdr0->dwTransferID = wa_xfer_id(xfer); | |
475 | xfer_hdr0->bTransferSegment = 0; | |
476 | switch (xfer_type) { | |
477 | case WA_XFER_TYPE_CTL: { | |
478 | struct wa_xfer_ctl *xfer_ctl = | |
479 | container_of(xfer_hdr0, struct wa_xfer_ctl, hdr); | |
480 | xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0; | |
df365423 IPG |
481 | memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet, |
482 | sizeof(xfer_ctl->baSetupData)); | |
483 | break; | |
484 | } | |
485 | case WA_XFER_TYPE_BI: | |
486 | break; | |
487 | case WA_XFER_TYPE_ISO: | |
488 | printk(KERN_ERR "FIXME: ISOC not implemented\n"); | |
489 | default: | |
490 | BUG(); | |
491 | }; | |
492 | } | |
493 | ||
494 | /* | |
495 | * Callback for the OUT data phase of the segment request | |
496 | * | |
497 | * Check wa_seg_cb(); most comments also apply here because this | |
498 | * function does almost the same thing and they work closely | |
499 | * together. | |
500 | * | |
25985edc | 501 | * If the seg request has failed but this DTO phase has succeeded, |
df365423 IPG |
502 | * wa_seg_cb() has already failed the segment and moved the |
503 | * status to WA_SEG_ERROR, so this will go through 'case 0' and | |
504 | * effectively do nothing. | |
505 | */ | |
506 | static void wa_seg_dto_cb(struct urb *urb) | |
507 | { | |
508 | struct wa_seg *seg = urb->context; | |
509 | struct wa_xfer *xfer = seg->xfer; | |
510 | struct wahc *wa; | |
511 | struct device *dev; | |
512 | struct wa_rpipe *rpipe; | |
513 | unsigned long flags; | |
514 | unsigned rpipe_ready = 0; | |
515 | u8 done = 0; | |
516 | ||
df365423 IPG |
517 | switch (urb->status) { |
518 | case 0: | |
519 | spin_lock_irqsave(&xfer->lock, flags); | |
520 | wa = xfer->wa; | |
521 | dev = &wa->usb_iface->dev; | |
bce83697 DV |
522 | dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n", |
523 | xfer, seg->index, urb->actual_length); | |
df365423 IPG |
524 | if (seg->status < WA_SEG_PENDING) |
525 | seg->status = WA_SEG_PENDING; | |
526 | seg->result = urb->actual_length; | |
527 | spin_unlock_irqrestore(&xfer->lock, flags); | |
528 | break; | |
529 | case -ECONNRESET: /* URB unlinked; no need to do anything */ | |
530 | case -ENOENT: /* as it was done by the who unlinked us */ | |
531 | break; | |
532 | default: /* Other errors ... */ | |
533 | spin_lock_irqsave(&xfer->lock, flags); | |
534 | wa = xfer->wa; | |
535 | dev = &wa->usb_iface->dev; | |
536 | rpipe = xfer->ep->hcpriv; | |
bce83697 DV |
537 | dev_dbg(dev, "xfer %p#%u: data out error %d\n", |
538 | xfer, seg->index, urb->status); | |
df365423 IPG |
539 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, |
540 | EDC_ERROR_TIMEFRAME)){ | |
541 | dev_err(dev, "DTO: URB max acceptable errors " | |
542 | "exceeded, resetting device\n"); | |
543 | wa_reset_all(wa); | |
544 | } | |
545 | if (seg->status != WA_SEG_ERROR) { | |
546 | seg->status = WA_SEG_ERROR; | |
547 | seg->result = urb->status; | |
548 | xfer->segs_done++; | |
549 | __wa_xfer_abort(xfer); | |
550 | rpipe_ready = rpipe_avail_inc(rpipe); | |
551 | done = __wa_xfer_is_done(xfer); | |
552 | } | |
553 | spin_unlock_irqrestore(&xfer->lock, flags); | |
554 | if (done) | |
555 | wa_xfer_completion(xfer); | |
556 | if (rpipe_ready) | |
557 | wa_xfer_delayed_run(rpipe); | |
558 | } | |
df365423 IPG |
559 | } |
560 | ||
561 | /* | |
562 | * Callback for the segment request | |
563 | * | |
af901ca1 | 564 | * If successful transition state (unless already transitioned or |
df365423 IPG |
565 | * outbound transfer); otherwise, take a note of the error, mark this |
566 | * segment done and try completion. | |
567 | * | |
568 | * Note we don't access until we are sure that the transfer hasn't | |
569 | * been cancelled (ECONNRESET, ENOENT), which could mean that | |
570 | * seg->xfer could be already gone. | |
571 | * | |
572 | * We have to check before setting the status to WA_SEG_PENDING | |
573 | * because sometimes the xfer result callback arrives before this | |
574 | * callback (geeeeeeze), so it might happen that we are already in | |
575 | * another state. As well, we don't set it if the transfer is inbound, | |
576 | * as in that case, wa_seg_dto_cb will do it when the OUT data phase | |
577 | * finishes. | |
578 | */ | |
579 | static void wa_seg_cb(struct urb *urb) | |
580 | { | |
581 | struct wa_seg *seg = urb->context; | |
582 | struct wa_xfer *xfer = seg->xfer; | |
583 | struct wahc *wa; | |
584 | struct device *dev; | |
585 | struct wa_rpipe *rpipe; | |
586 | unsigned long flags; | |
587 | unsigned rpipe_ready; | |
588 | u8 done = 0; | |
589 | ||
df365423 IPG |
590 | switch (urb->status) { |
591 | case 0: | |
592 | spin_lock_irqsave(&xfer->lock, flags); | |
593 | wa = xfer->wa; | |
594 | dev = &wa->usb_iface->dev; | |
bce83697 | 595 | dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index); |
df365423 IPG |
596 | if (xfer->is_inbound && seg->status < WA_SEG_PENDING) |
597 | seg->status = WA_SEG_PENDING; | |
598 | spin_unlock_irqrestore(&xfer->lock, flags); | |
599 | break; | |
600 | case -ECONNRESET: /* URB unlinked; no need to do anything */ | |
601 | case -ENOENT: /* as it was done by the who unlinked us */ | |
602 | break; | |
603 | default: /* Other errors ... */ | |
604 | spin_lock_irqsave(&xfer->lock, flags); | |
605 | wa = xfer->wa; | |
606 | dev = &wa->usb_iface->dev; | |
607 | rpipe = xfer->ep->hcpriv; | |
608 | if (printk_ratelimit()) | |
609 | dev_err(dev, "xfer %p#%u: request error %d\n", | |
610 | xfer, seg->index, urb->status); | |
611 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, | |
612 | EDC_ERROR_TIMEFRAME)){ | |
613 | dev_err(dev, "DTO: URB max acceptable errors " | |
614 | "exceeded, resetting device\n"); | |
615 | wa_reset_all(wa); | |
616 | } | |
617 | usb_unlink_urb(seg->dto_urb); | |
618 | seg->status = WA_SEG_ERROR; | |
619 | seg->result = urb->status; | |
620 | xfer->segs_done++; | |
621 | __wa_xfer_abort(xfer); | |
622 | rpipe_ready = rpipe_avail_inc(rpipe); | |
623 | done = __wa_xfer_is_done(xfer); | |
624 | spin_unlock_irqrestore(&xfer->lock, flags); | |
625 | if (done) | |
626 | wa_xfer_completion(xfer); | |
627 | if (rpipe_ready) | |
628 | wa_xfer_delayed_run(rpipe); | |
629 | } | |
df365423 IPG |
630 | } |
631 | ||
2b81c083 TP |
632 | /* allocate an SG list to store bytes_to_transfer bytes and copy the |
633 | * subset of the in_sg that matches the buffer subset | |
634 | * we are about to transfer. */ | |
635 | static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg, | |
636 | const unsigned int bytes_transferred, | |
637 | const unsigned int bytes_to_transfer, unsigned int *out_num_sgs) | |
638 | { | |
639 | struct scatterlist *out_sg; | |
640 | unsigned int bytes_processed = 0, offset_into_current_page_data = 0, | |
641 | nents; | |
642 | struct scatterlist *current_xfer_sg = in_sg; | |
643 | struct scatterlist *current_seg_sg, *last_seg_sg; | |
644 | ||
645 | /* skip previously transferred pages. */ | |
646 | while ((current_xfer_sg) && | |
647 | (bytes_processed < bytes_transferred)) { | |
648 | bytes_processed += current_xfer_sg->length; | |
649 | ||
650 | /* advance the sg if current segment starts on or past the | |
651 | next page. */ | |
652 | if (bytes_processed <= bytes_transferred) | |
653 | current_xfer_sg = sg_next(current_xfer_sg); | |
654 | } | |
655 | ||
656 | /* the data for the current segment starts in current_xfer_sg. | |
657 | calculate the offset. */ | |
658 | if (bytes_processed > bytes_transferred) { | |
659 | offset_into_current_page_data = current_xfer_sg->length - | |
660 | (bytes_processed - bytes_transferred); | |
661 | } | |
662 | ||
663 | /* calculate the number of pages needed by this segment. */ | |
664 | nents = DIV_ROUND_UP((bytes_to_transfer + | |
665 | offset_into_current_page_data + | |
666 | current_xfer_sg->offset), | |
667 | PAGE_SIZE); | |
668 | ||
669 | out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC); | |
670 | if (out_sg) { | |
671 | sg_init_table(out_sg, nents); | |
672 | ||
673 | /* copy the portion of the incoming SG that correlates to the | |
674 | * data to be transferred by this segment to the segment SG. */ | |
675 | last_seg_sg = current_seg_sg = out_sg; | |
676 | bytes_processed = 0; | |
677 | ||
678 | /* reset nents and calculate the actual number of sg entries | |
679 | needed. */ | |
680 | nents = 0; | |
681 | while ((bytes_processed < bytes_to_transfer) && | |
682 | current_seg_sg && current_xfer_sg) { | |
683 | unsigned int page_len = min((current_xfer_sg->length - | |
684 | offset_into_current_page_data), | |
685 | (bytes_to_transfer - bytes_processed)); | |
686 | ||
687 | sg_set_page(current_seg_sg, sg_page(current_xfer_sg), | |
688 | page_len, | |
689 | current_xfer_sg->offset + | |
690 | offset_into_current_page_data); | |
691 | ||
692 | bytes_processed += page_len; | |
693 | ||
694 | last_seg_sg = current_seg_sg; | |
695 | current_seg_sg = sg_next(current_seg_sg); | |
696 | current_xfer_sg = sg_next(current_xfer_sg); | |
697 | ||
698 | /* only the first page may require additional offset. */ | |
699 | offset_into_current_page_data = 0; | |
700 | nents++; | |
701 | } | |
702 | ||
703 | /* update num_sgs and terminate the list since we may have | |
704 | * concatenated pages. */ | |
705 | sg_mark_end(last_seg_sg); | |
706 | *out_num_sgs = nents; | |
707 | } | |
708 | ||
709 | return out_sg; | |
710 | } | |
711 | ||
df365423 IPG |
712 | /* |
713 | * Allocate the segs array and initialize each of them | |
714 | * | |
715 | * The segments are freed by wa_xfer_destroy() when the xfer use count | |
716 | * drops to zero; however, because each segment is given the same life | |
717 | * cycle as the USB URB it contains, it is actually freed by | |
718 | * usb_put_urb() on the contained USB URB (twisted, eh?). | |
719 | */ | |
720 | static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size) | |
721 | { | |
722 | int result, cnt; | |
723 | size_t alloc_size = sizeof(*xfer->seg[0]) | |
724 | - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size; | |
725 | struct usb_device *usb_dev = xfer->wa->usb_dev; | |
726 | const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd; | |
727 | struct wa_seg *seg; | |
728 | size_t buf_itr, buf_size, buf_itr_size; | |
729 | ||
730 | result = -ENOMEM; | |
92c4d9bd | 731 | xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC); |
df365423 IPG |
732 | if (xfer->seg == NULL) |
733 | goto error_segs_kzalloc; | |
734 | buf_itr = 0; | |
735 | buf_size = xfer->urb->transfer_buffer_length; | |
736 | for (cnt = 0; cnt < xfer->segs; cnt++) { | |
66591015 | 737 | seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC); |
df365423 | 738 | if (seg == NULL) |
66591015 | 739 | goto error_seg_kmalloc; |
df365423 IPG |
740 | wa_seg_init(seg); |
741 | seg->xfer = xfer; | |
742 | seg->index = cnt; | |
743 | usb_fill_bulk_urb(&seg->urb, usb_dev, | |
744 | usb_sndbulkpipe(usb_dev, | |
745 | dto_epd->bEndpointAddress), | |
746 | &seg->xfer_hdr, xfer_hdr_size, | |
747 | wa_seg_cb, seg); | |
2b81c083 | 748 | buf_itr_size = min(buf_size, xfer->seg_size); |
df365423 | 749 | if (xfer->is_inbound == 0 && buf_size > 0) { |
2b81c083 | 750 | /* outbound data. */ |
df365423 IPG |
751 | seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); |
752 | if (seg->dto_urb == NULL) | |
753 | goto error_dto_alloc; | |
754 | usb_fill_bulk_urb( | |
755 | seg->dto_urb, usb_dev, | |
756 | usb_sndbulkpipe(usb_dev, | |
757 | dto_epd->bEndpointAddress), | |
758 | NULL, 0, wa_seg_dto_cb, seg); | |
759 | if (xfer->is_dma) { | |
760 | seg->dto_urb->transfer_dma = | |
761 | xfer->urb->transfer_dma + buf_itr; | |
762 | seg->dto_urb->transfer_flags |= | |
763 | URB_NO_TRANSFER_DMA_MAP; | |
2b81c083 TP |
764 | seg->dto_urb->transfer_buffer = NULL; |
765 | seg->dto_urb->sg = NULL; | |
766 | seg->dto_urb->num_sgs = 0; | |
767 | } else { | |
768 | /* do buffer or SG processing. */ | |
769 | seg->dto_urb->transfer_flags &= | |
770 | ~URB_NO_TRANSFER_DMA_MAP; | |
771 | /* this should always be 0 before a resubmit. */ | |
772 | seg->dto_urb->num_mapped_sgs = 0; | |
773 | ||
774 | if (xfer->urb->transfer_buffer) { | |
775 | seg->dto_urb->transfer_buffer = | |
776 | xfer->urb->transfer_buffer + | |
777 | buf_itr; | |
778 | seg->dto_urb->sg = NULL; | |
779 | seg->dto_urb->num_sgs = 0; | |
780 | } else { | |
781 | /* allocate an SG list to store seg_size | |
782 | bytes and copy the subset of the | |
783 | xfer->urb->sg that matches the | |
784 | buffer subset we are about to read. | |
785 | */ | |
786 | seg->dto_urb->sg = | |
787 | wa_xfer_create_subset_sg( | |
788 | xfer->urb->sg, | |
789 | buf_itr, buf_itr_size, | |
790 | &(seg->dto_urb->num_sgs)); | |
791 | ||
792 | if (!(seg->dto_urb->sg)) { | |
793 | seg->dto_urb->num_sgs = 0; | |
794 | goto error_sg_alloc; | |
795 | } | |
796 | ||
797 | seg->dto_urb->transfer_buffer = NULL; | |
798 | } | |
799 | } | |
df365423 IPG |
800 | seg->dto_urb->transfer_buffer_length = buf_itr_size; |
801 | } | |
802 | seg->status = WA_SEG_READY; | |
803 | buf_itr += buf_itr_size; | |
804 | buf_size -= buf_itr_size; | |
805 | } | |
806 | return 0; | |
807 | ||
2b81c083 | 808 | error_sg_alloc: |
11b1bf81 | 809 | usb_free_urb(xfer->seg[cnt]->dto_urb); |
df365423 IPG |
810 | error_dto_alloc: |
811 | kfree(xfer->seg[cnt]); | |
812 | cnt--; | |
66591015 | 813 | error_seg_kmalloc: |
df365423 | 814 | /* use the fact that cnt is left at were it failed */ |
f07af4b6 | 815 | for (; cnt >= 0; cnt--) { |
11b1bf81 | 816 | if (xfer->seg[cnt] && xfer->is_inbound == 0) { |
f07af4b6 | 817 | usb_free_urb(xfer->seg[cnt]->dto_urb); |
11b1bf81 TP |
818 | kfree(xfer->seg[cnt]->dto_urb->sg); |
819 | } | |
df365423 IPG |
820 | kfree(xfer->seg[cnt]); |
821 | } | |
822 | error_segs_kzalloc: | |
823 | return result; | |
824 | } | |
825 | ||
826 | /* | |
827 | * Allocates all the stuff needed to submit a transfer | |
828 | * | |
829 | * Breaks the whole data buffer in a list of segments, each one has a | |
830 | * structure allocated to it and linked in xfer->seg[index] | |
831 | * | |
832 | * FIXME: merge setup_segs() and the last part of this function, no | |
833 | * need to do two for loops when we could run everything in a | |
834 | * single one | |
835 | */ | |
836 | static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) | |
837 | { | |
838 | int result; | |
839 | struct device *dev = &xfer->wa->usb_iface->dev; | |
840 | enum wa_xfer_type xfer_type = 0; /* shut up GCC */ | |
841 | size_t xfer_hdr_size, cnt, transfer_size; | |
842 | struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; | |
843 | ||
df365423 IPG |
844 | result = __wa_xfer_setup_sizes(xfer, &xfer_type); |
845 | if (result < 0) | |
846 | goto error_setup_sizes; | |
847 | xfer_hdr_size = result; | |
848 | result = __wa_xfer_setup_segs(xfer, xfer_hdr_size); | |
849 | if (result < 0) { | |
850 | dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n", | |
851 | xfer, xfer->segs, result); | |
852 | goto error_setup_segs; | |
853 | } | |
854 | /* Fill the first header */ | |
855 | xfer_hdr0 = &xfer->seg[0]->xfer_hdr; | |
856 | wa_xfer_id_init(xfer); | |
857 | __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); | |
858 | ||
859 | /* Fill remainig headers */ | |
860 | xfer_hdr = xfer_hdr0; | |
861 | transfer_size = urb->transfer_buffer_length; | |
862 | xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? | |
863 | xfer->seg_size : transfer_size; | |
864 | transfer_size -= xfer->seg_size; | |
865 | for (cnt = 1; cnt < xfer->segs; cnt++) { | |
866 | xfer_hdr = &xfer->seg[cnt]->xfer_hdr; | |
867 | memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); | |
868 | xfer_hdr->bTransferSegment = cnt; | |
869 | xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ? | |
870 | cpu_to_le32(xfer->seg_size) | |
871 | : cpu_to_le32(transfer_size); | |
872 | xfer->seg[cnt]->status = WA_SEG_READY; | |
873 | transfer_size -= xfer->seg_size; | |
874 | } | |
875 | xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */ | |
876 | result = 0; | |
877 | error_setup_segs: | |
878 | error_setup_sizes: | |
df365423 IPG |
879 | return result; |
880 | } | |
881 | ||
882 | /* | |
883 | * | |
884 | * | |
885 | * rpipe->seg_lock is held! | |
886 | */ | |
887 | static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, | |
888 | struct wa_seg *seg) | |
889 | { | |
890 | int result; | |
891 | result = usb_submit_urb(&seg->urb, GFP_ATOMIC); | |
892 | if (result < 0) { | |
893 | printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n", | |
894 | xfer, seg->index, result); | |
895 | goto error_seg_submit; | |
896 | } | |
897 | if (seg->dto_urb) { | |
898 | result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); | |
899 | if (result < 0) { | |
900 | printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n", | |
901 | xfer, seg->index, result); | |
902 | goto error_dto_submit; | |
903 | } | |
904 | } | |
905 | seg->status = WA_SEG_SUBMITTED; | |
906 | rpipe_avail_dec(rpipe); | |
907 | return 0; | |
908 | ||
909 | error_dto_submit: | |
910 | usb_unlink_urb(&seg->urb); | |
911 | error_seg_submit: | |
912 | seg->status = WA_SEG_ERROR; | |
913 | seg->result = result; | |
914 | return result; | |
915 | } | |
916 | ||
917 | /* | |
918 | * Execute more queued request segments until the maximum concurrent allowed | |
919 | * | |
920 | * The ugly unlock/lock sequence on the error path is needed as the | |
921 | * xfer->lock normally nests the seg_lock and not viceversa. | |
922 | * | |
923 | */ | |
924 | static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) | |
925 | { | |
926 | int result; | |
927 | struct device *dev = &rpipe->wa->usb_iface->dev; | |
928 | struct wa_seg *seg; | |
929 | struct wa_xfer *xfer; | |
930 | unsigned long flags; | |
931 | ||
df365423 IPG |
932 | spin_lock_irqsave(&rpipe->seg_lock, flags); |
933 | while (atomic_read(&rpipe->segs_available) > 0 | |
934 | && !list_empty(&rpipe->seg_list)) { | |
e9a088fa | 935 | seg = list_first_entry(&(rpipe->seg_list), struct wa_seg, |
df365423 IPG |
936 | list_node); |
937 | list_del(&seg->list_node); | |
938 | xfer = seg->xfer; | |
939 | result = __wa_seg_submit(rpipe, xfer, seg); | |
bce83697 DV |
940 | dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n", |
941 | xfer, seg->index, atomic_read(&rpipe->segs_available), result); | |
df365423 IPG |
942 | if (unlikely(result < 0)) { |
943 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | |
944 | spin_lock_irqsave(&xfer->lock, flags); | |
945 | __wa_xfer_abort(xfer); | |
946 | xfer->segs_done++; | |
947 | spin_unlock_irqrestore(&xfer->lock, flags); | |
948 | spin_lock_irqsave(&rpipe->seg_lock, flags); | |
949 | } | |
950 | } | |
951 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | |
df365423 IPG |
952 | } |
953 | ||
954 | /* | |
955 | * | |
956 | * xfer->lock is taken | |
957 | * | |
958 | * On failure submitting we just stop submitting and return error; | |
959 | * wa_urb_enqueue_b() will execute the completion path | |
960 | */ | |
961 | static int __wa_xfer_submit(struct wa_xfer *xfer) | |
962 | { | |
963 | int result; | |
964 | struct wahc *wa = xfer->wa; | |
965 | struct device *dev = &wa->usb_iface->dev; | |
966 | unsigned cnt; | |
967 | struct wa_seg *seg; | |
968 | unsigned long flags; | |
969 | struct wa_rpipe *rpipe = xfer->ep->hcpriv; | |
970 | size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests); | |
971 | u8 available; | |
972 | u8 empty; | |
973 | ||
df365423 IPG |
974 | spin_lock_irqsave(&wa->xfer_list_lock, flags); |
975 | list_add_tail(&xfer->list_node, &wa->xfer_list); | |
976 | spin_unlock_irqrestore(&wa->xfer_list_lock, flags); | |
977 | ||
978 | BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests); | |
979 | result = 0; | |
980 | spin_lock_irqsave(&rpipe->seg_lock, flags); | |
981 | for (cnt = 0; cnt < xfer->segs; cnt++) { | |
982 | available = atomic_read(&rpipe->segs_available); | |
983 | empty = list_empty(&rpipe->seg_list); | |
984 | seg = xfer->seg[cnt]; | |
bce83697 DV |
985 | dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n", |
986 | xfer, cnt, available, empty, | |
987 | available == 0 || !empty ? "delayed" : "submitted"); | |
df365423 | 988 | if (available == 0 || !empty) { |
bce83697 | 989 | dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt); |
df365423 IPG |
990 | seg->status = WA_SEG_DELAYED; |
991 | list_add_tail(&seg->list_node, &rpipe->seg_list); | |
992 | } else { | |
993 | result = __wa_seg_submit(rpipe, xfer, seg); | |
bce83697 DV |
994 | if (result < 0) { |
995 | __wa_xfer_abort(xfer); | |
df365423 | 996 | goto error_seg_submit; |
bce83697 | 997 | } |
df365423 IPG |
998 | } |
999 | xfer->segs_submitted++; | |
1000 | } | |
df365423 | 1001 | error_seg_submit: |
df365423 | 1002 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); |
df365423 IPG |
1003 | return result; |
1004 | } | |
1005 | ||
1006 | /* | |
1007 | * Second part of a URB/transfer enqueuement | |
1008 | * | |
1009 | * Assumes this comes from wa_urb_enqueue() [maybe through | |
1010 | * wa_urb_enqueue_run()]. At this point: | |
1011 | * | |
1012 | * xfer->wa filled and refcounted | |
1013 | * xfer->ep filled with rpipe refcounted if | |
1014 | * delayed == 0 | |
1015 | * xfer->urb filled and refcounted (this is the case when called | |
1016 | * from wa_urb_enqueue() as we come from usb_submit_urb() | |
1017 | * and when called by wa_urb_enqueue_run(), as we took an | |
1018 | * extra ref dropped by _run() after we return). | |
1019 | * xfer->gfp filled | |
1020 | * | |
1021 | * If we fail at __wa_xfer_submit(), then we just check if we are done | |
1022 | * and if so, we run the completion procedure. However, if we are not | |
1023 | * yet done, we do nothing and wait for the completion handlers from | |
1024 | * the submitted URBs or from the xfer-result path to kick in. If xfer | |
1025 | * result never kicks in, the xfer will timeout from the USB code and | |
1026 | * dequeue() will be called. | |
1027 | */ | |
1028 | static void wa_urb_enqueue_b(struct wa_xfer *xfer) | |
1029 | { | |
1030 | int result; | |
1031 | unsigned long flags; | |
1032 | struct urb *urb = xfer->urb; | |
1033 | struct wahc *wa = xfer->wa; | |
1034 | struct wusbhc *wusbhc = wa->wusb; | |
df365423 IPG |
1035 | struct wusb_dev *wusb_dev; |
1036 | unsigned done; | |
1037 | ||
df365423 IPG |
1038 | result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); |
1039 | if (result < 0) | |
1040 | goto error_rpipe_get; | |
1041 | result = -ENODEV; | |
1042 | /* FIXME: segmentation broken -- kills DWA */ | |
1043 | mutex_lock(&wusbhc->mutex); /* get a WUSB dev */ | |
49fa0921 JS |
1044 | if (urb->dev == NULL) { |
1045 | mutex_unlock(&wusbhc->mutex); | |
df365423 | 1046 | goto error_dev_gone; |
49fa0921 | 1047 | } |
df365423 IPG |
1048 | wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); |
1049 | if (wusb_dev == NULL) { | |
1050 | mutex_unlock(&wusbhc->mutex); | |
1051 | goto error_dev_gone; | |
1052 | } | |
1053 | mutex_unlock(&wusbhc->mutex); | |
1054 | ||
1055 | spin_lock_irqsave(&xfer->lock, flags); | |
1056 | xfer->wusb_dev = wusb_dev; | |
1057 | result = urb->status; | |
1058 | if (urb->status != -EINPROGRESS) | |
1059 | goto error_dequeued; | |
1060 | ||
1061 | result = __wa_xfer_setup(xfer, urb); | |
1062 | if (result < 0) | |
1063 | goto error_xfer_setup; | |
1064 | result = __wa_xfer_submit(xfer); | |
1065 | if (result < 0) | |
1066 | goto error_xfer_submit; | |
1067 | spin_unlock_irqrestore(&xfer->lock, flags); | |
df365423 IPG |
1068 | return; |
1069 | ||
1070 | /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() | |
1071 | * does a wa_xfer_put() that will call wa_xfer_destroy() and clean | |
1072 | * upundo setup(). | |
1073 | */ | |
1074 | error_xfer_setup: | |
1075 | error_dequeued: | |
1076 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1077 | /* FIXME: segmentation broken, kills DWA */ | |
1078 | if (wusb_dev) | |
1079 | wusb_dev_put(wusb_dev); | |
1080 | error_dev_gone: | |
1081 | rpipe_put(xfer->ep->hcpriv); | |
1082 | error_rpipe_get: | |
1083 | xfer->result = result; | |
1084 | wa_xfer_giveback(xfer); | |
df365423 IPG |
1085 | return; |
1086 | ||
1087 | error_xfer_submit: | |
1088 | done = __wa_xfer_is_done(xfer); | |
1089 | xfer->result = result; | |
1090 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1091 | if (done) | |
1092 | wa_xfer_completion(xfer); | |
df365423 IPG |
1093 | } |
1094 | ||
1095 | /* | |
1096 | * Execute the delayed transfers in the Wire Adapter @wa | |
1097 | * | |
1098 | * We need to be careful here, as dequeue() could be called in the | |
1099 | * middle. That's why we do the whole thing under the | |
e9a088fa | 1100 | * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock |
df365423 | 1101 | * and then checks the list -- so as we would be acquiring in inverse |
e9a088fa TP |
1102 | * order, we move the delayed list to a separate list while locked and then |
1103 | * submit them without the list lock held. | |
df365423 IPG |
1104 | */ |
1105 | void wa_urb_enqueue_run(struct work_struct *ws) | |
1106 | { | |
6d33f7bb | 1107 | struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work); |
df365423 IPG |
1108 | struct wa_xfer *xfer, *next; |
1109 | struct urb *urb; | |
e9a088fa | 1110 | LIST_HEAD(tmp_list); |
df365423 | 1111 | |
e9a088fa | 1112 | /* Create a copy of the wa->xfer_delayed_list while holding the lock */ |
df365423 | 1113 | spin_lock_irq(&wa->xfer_list_lock); |
e9a088fa TP |
1114 | list_cut_position(&tmp_list, &wa->xfer_delayed_list, |
1115 | wa->xfer_delayed_list.prev); | |
1116 | spin_unlock_irq(&wa->xfer_list_lock); | |
1117 | ||
1118 | /* | |
1119 | * enqueue from temp list without list lock held since wa_urb_enqueue_b | |
1120 | * can take xfer->lock as well as lock mutexes. | |
1121 | */ | |
1122 | list_for_each_entry_safe(xfer, next, &tmp_list, list_node) { | |
df365423 | 1123 | list_del_init(&xfer->list_node); |
df365423 IPG |
1124 | |
1125 | urb = xfer->urb; | |
1126 | wa_urb_enqueue_b(xfer); | |
1127 | usb_put_urb(urb); /* taken when queuing */ | |
df365423 | 1128 | } |
df365423 IPG |
1129 | } |
1130 | EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); | |
1131 | ||
6d33f7bb TP |
1132 | /* |
1133 | * Process the errored transfers on the Wire Adapter outside of interrupt. | |
1134 | */ | |
1135 | void wa_process_errored_transfers_run(struct work_struct *ws) | |
1136 | { | |
1137 | struct wahc *wa = container_of(ws, struct wahc, xfer_error_work); | |
1138 | struct wa_xfer *xfer, *next; | |
1139 | LIST_HEAD(tmp_list); | |
1140 | ||
1141 | pr_info("%s: Run delayed STALL processing.\n", __func__); | |
1142 | ||
1143 | /* Create a copy of the wa->xfer_errored_list while holding the lock */ | |
1144 | spin_lock_irq(&wa->xfer_list_lock); | |
1145 | list_cut_position(&tmp_list, &wa->xfer_errored_list, | |
1146 | wa->xfer_errored_list.prev); | |
1147 | spin_unlock_irq(&wa->xfer_list_lock); | |
1148 | ||
1149 | /* | |
1150 | * run rpipe_clear_feature_stalled from temp list without list lock | |
1151 | * held. | |
1152 | */ | |
1153 | list_for_each_entry_safe(xfer, next, &tmp_list, list_node) { | |
1154 | struct usb_host_endpoint *ep; | |
1155 | unsigned long flags; | |
1156 | struct wa_rpipe *rpipe; | |
1157 | ||
1158 | spin_lock_irqsave(&xfer->lock, flags); | |
1159 | ep = xfer->ep; | |
1160 | rpipe = ep->hcpriv; | |
1161 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1162 | ||
1163 | /* clear RPIPE feature stalled without holding a lock. */ | |
1164 | rpipe_clear_feature_stalled(wa, ep); | |
1165 | ||
1166 | /* complete the xfer. This removes it from the tmp list. */ | |
1167 | wa_xfer_completion(xfer); | |
1168 | ||
1169 | /* check for work. */ | |
1170 | wa_xfer_delayed_run(rpipe); | |
1171 | } | |
1172 | } | |
1173 | EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run); | |
1174 | ||
df365423 IPG |
1175 | /* |
1176 | * Submit a transfer to the Wire Adapter in a delayed way | |
1177 | * | |
1178 | * The process of enqueuing involves possible sleeps() [see | |
1179 | * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are | |
1180 | * in an atomic section, we defer the enqueue_b() call--else we call direct. | |
1181 | * | |
1182 | * @urb: We own a reference to it done by the HCI Linux USB stack that | |
1183 | * will be given up by calling usb_hcd_giveback_urb() or by | |
1184 | * returning error from this function -> ergo we don't have to | |
1185 | * refcount it. | |
1186 | */ | |
1187 | int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, | |
1188 | struct urb *urb, gfp_t gfp) | |
1189 | { | |
1190 | int result; | |
1191 | struct device *dev = &wa->usb_iface->dev; | |
1192 | struct wa_xfer *xfer; | |
1193 | unsigned long my_flags; | |
1194 | unsigned cant_sleep = irqs_disabled() | in_atomic(); | |
1195 | ||
2b81c083 TP |
1196 | if ((urb->transfer_buffer == NULL) |
1197 | && (urb->sg == NULL) | |
df365423 IPG |
1198 | && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) |
1199 | && urb->transfer_buffer_length != 0) { | |
1200 | dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb); | |
1201 | dump_stack(); | |
1202 | } | |
1203 | ||
1204 | result = -ENOMEM; | |
1205 | xfer = kzalloc(sizeof(*xfer), gfp); | |
1206 | if (xfer == NULL) | |
1207 | goto error_kmalloc; | |
1208 | ||
1209 | result = -ENOENT; | |
1210 | if (urb->status != -EINPROGRESS) /* cancelled */ | |
1211 | goto error_dequeued; /* before starting? */ | |
1212 | wa_xfer_init(xfer); | |
1213 | xfer->wa = wa_get(wa); | |
1214 | xfer->urb = urb; | |
1215 | xfer->gfp = gfp; | |
1216 | xfer->ep = ep; | |
1217 | urb->hcpriv = xfer; | |
bce83697 DV |
1218 | |
1219 | dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", | |
1220 | xfer, urb, urb->pipe, urb->transfer_buffer_length, | |
1221 | urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", | |
1222 | urb->pipe & USB_DIR_IN ? "inbound" : "outbound", | |
1223 | cant_sleep ? "deferred" : "inline"); | |
1224 | ||
df365423 IPG |
1225 | if (cant_sleep) { |
1226 | usb_get_urb(urb); | |
1227 | spin_lock_irqsave(&wa->xfer_list_lock, my_flags); | |
1228 | list_add_tail(&xfer->list_node, &wa->xfer_delayed_list); | |
1229 | spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); | |
6d33f7bb | 1230 | queue_work(wusbd, &wa->xfer_enqueue_work); |
df365423 IPG |
1231 | } else { |
1232 | wa_urb_enqueue_b(xfer); | |
1233 | } | |
df365423 IPG |
1234 | return 0; |
1235 | ||
1236 | error_dequeued: | |
1237 | kfree(xfer); | |
1238 | error_kmalloc: | |
df365423 IPG |
1239 | return result; |
1240 | } | |
1241 | EXPORT_SYMBOL_GPL(wa_urb_enqueue); | |
1242 | ||
1243 | /* | |
1244 | * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion | |
1245 | * handler] is called. | |
1246 | * | |
1247 | * Until a transfer goes successfully through wa_urb_enqueue() it | |
1248 | * needs to be dequeued with completion calling; when stuck in delayed | |
1249 | * or before wa_xfer_setup() is called, we need to do completion. | |
1250 | * | |
1251 | * not setup If there is no hcpriv yet, that means that that enqueue | |
1252 | * still had no time to set the xfer up. Because | |
1253 | * urb->status should be other than -EINPROGRESS, | |
1254 | * enqueue() will catch that and bail out. | |
1255 | * | |
1256 | * If the transfer has gone through setup, we just need to clean it | |
1257 | * up. If it has gone through submit(), we have to abort it [with an | |
1258 | * asynch request] and then make sure we cancel each segment. | |
1259 | * | |
1260 | */ | |
1261 | int wa_urb_dequeue(struct wahc *wa, struct urb *urb) | |
1262 | { | |
df365423 IPG |
1263 | unsigned long flags, flags2; |
1264 | struct wa_xfer *xfer; | |
1265 | struct wa_seg *seg; | |
1266 | struct wa_rpipe *rpipe; | |
1267 | unsigned cnt; | |
1268 | unsigned rpipe_ready = 0; | |
1269 | ||
df365423 IPG |
1270 | xfer = urb->hcpriv; |
1271 | if (xfer == NULL) { | |
6d33f7bb TP |
1272 | /* |
1273 | * Nothing setup yet enqueue will see urb->status != | |
df365423 IPG |
1274 | * -EINPROGRESS (by hcd layer) and bail out with |
1275 | * error, no need to do completion | |
1276 | */ | |
1277 | BUG_ON(urb->status == -EINPROGRESS); | |
1278 | goto out; | |
1279 | } | |
1280 | spin_lock_irqsave(&xfer->lock, flags); | |
1281 | rpipe = xfer->ep->hcpriv; | |
ec58fad1 TP |
1282 | if (rpipe == NULL) { |
1283 | pr_debug("%s: xfer id 0x%08X has no RPIPE. %s", | |
1284 | __func__, wa_xfer_id(xfer), | |
1285 | "Probably already aborted.\n" ); | |
1286 | goto out_unlock; | |
1287 | } | |
df365423 IPG |
1288 | /* Check the delayed list -> if there, release and complete */ |
1289 | spin_lock_irqsave(&wa->xfer_list_lock, flags2); | |
1290 | if (!list_empty(&xfer->list_node) && xfer->seg == NULL) | |
1291 | goto dequeue_delayed; | |
1292 | spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); | |
1293 | if (xfer->seg == NULL) /* still hasn't reached */ | |
1294 | goto out_unlock; /* setup(), enqueue_b() completes */ | |
1295 | /* Ok, the xfer is in flight already, it's been setup and submitted.*/ | |
1296 | __wa_xfer_abort(xfer); | |
1297 | for (cnt = 0; cnt < xfer->segs; cnt++) { | |
1298 | seg = xfer->seg[cnt]; | |
1299 | switch (seg->status) { | |
1300 | case WA_SEG_NOTREADY: | |
1301 | case WA_SEG_READY: | |
1302 | printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n", | |
1303 | xfer, cnt, seg->status); | |
1304 | WARN_ON(1); | |
1305 | break; | |
1306 | case WA_SEG_DELAYED: | |
1307 | seg->status = WA_SEG_ABORTED; | |
1308 | spin_lock_irqsave(&rpipe->seg_lock, flags2); | |
1309 | list_del(&seg->list_node); | |
1310 | xfer->segs_done++; | |
1311 | rpipe_ready = rpipe_avail_inc(rpipe); | |
1312 | spin_unlock_irqrestore(&rpipe->seg_lock, flags2); | |
1313 | break; | |
1314 | case WA_SEG_SUBMITTED: | |
1315 | seg->status = WA_SEG_ABORTED; | |
1316 | usb_unlink_urb(&seg->urb); | |
1317 | if (xfer->is_inbound == 0) | |
1318 | usb_unlink_urb(seg->dto_urb); | |
1319 | xfer->segs_done++; | |
1320 | rpipe_ready = rpipe_avail_inc(rpipe); | |
1321 | break; | |
1322 | case WA_SEG_PENDING: | |
1323 | seg->status = WA_SEG_ABORTED; | |
1324 | xfer->segs_done++; | |
1325 | rpipe_ready = rpipe_avail_inc(rpipe); | |
1326 | break; | |
1327 | case WA_SEG_DTI_PENDING: | |
1328 | usb_unlink_urb(wa->dti_urb); | |
1329 | seg->status = WA_SEG_ABORTED; | |
1330 | xfer->segs_done++; | |
1331 | rpipe_ready = rpipe_avail_inc(rpipe); | |
1332 | break; | |
1333 | case WA_SEG_DONE: | |
1334 | case WA_SEG_ERROR: | |
1335 | case WA_SEG_ABORTED: | |
1336 | break; | |
1337 | } | |
1338 | } | |
1339 | xfer->result = urb->status; /* -ENOENT or -ECONNRESET */ | |
1340 | __wa_xfer_is_done(xfer); | |
1341 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1342 | wa_xfer_completion(xfer); | |
1343 | if (rpipe_ready) | |
1344 | wa_xfer_delayed_run(rpipe); | |
df365423 IPG |
1345 | return 0; |
1346 | ||
1347 | out_unlock: | |
1348 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1349 | out: | |
df365423 IPG |
1350 | return 0; |
1351 | ||
1352 | dequeue_delayed: | |
1353 | list_del_init(&xfer->list_node); | |
1354 | spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); | |
1355 | xfer->result = urb->status; | |
1356 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1357 | wa_xfer_giveback(xfer); | |
1358 | usb_put_urb(urb); /* we got a ref in enqueue() */ | |
df365423 IPG |
1359 | return 0; |
1360 | } | |
1361 | EXPORT_SYMBOL_GPL(wa_urb_dequeue); | |
1362 | ||
1363 | /* | |
1364 | * Translation from WA status codes (WUSB1.0 Table 8.15) to errno | |
1365 | * codes | |
1366 | * | |
1367 | * Positive errno values are internal inconsistencies and should be | |
1368 | * flagged louder. Negative are to be passed up to the user in the | |
1369 | * normal way. | |
1370 | * | |
1371 | * @status: USB WA status code -- high two bits are stripped. | |
1372 | */ | |
1373 | static int wa_xfer_status_to_errno(u8 status) | |
1374 | { | |
1375 | int errno; | |
1376 | u8 real_status = status; | |
1377 | static int xlat[] = { | |
1378 | [WA_XFER_STATUS_SUCCESS] = 0, | |
1379 | [WA_XFER_STATUS_HALTED] = -EPIPE, | |
1380 | [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS, | |
1381 | [WA_XFER_STATUS_BABBLE] = -EOVERFLOW, | |
1382 | [WA_XFER_RESERVED] = EINVAL, | |
1383 | [WA_XFER_STATUS_NOT_FOUND] = 0, | |
1384 | [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM, | |
1385 | [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ, | |
1386 | [WA_XFER_STATUS_ABORTED] = -EINTR, | |
1387 | [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL, | |
1388 | [WA_XFER_INVALID_FORMAT] = EINVAL, | |
1389 | [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL, | |
1390 | [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL, | |
1391 | }; | |
1392 | status &= 0x3f; | |
1393 | ||
1394 | if (status == 0) | |
1395 | return 0; | |
1396 | if (status >= ARRAY_SIZE(xlat)) { | |
9708cd2f | 1397 | printk_ratelimited(KERN_ERR "%s(): BUG? " |
df365423 IPG |
1398 | "Unknown WA transfer status 0x%02x\n", |
1399 | __func__, real_status); | |
1400 | return -EINVAL; | |
1401 | } | |
1402 | errno = xlat[status]; | |
1403 | if (unlikely(errno > 0)) { | |
9708cd2f | 1404 | printk_ratelimited(KERN_ERR "%s(): BUG? " |
df365423 IPG |
1405 | "Inconsistent WA status: 0x%02x\n", |
1406 | __func__, real_status); | |
1407 | errno = -errno; | |
1408 | } | |
1409 | return errno; | |
1410 | } | |
1411 | ||
1412 | /* | |
1413 | * Process a xfer result completion message | |
1414 | * | |
1415 | * inbound transfers: need to schedule a DTI read | |
1416 | * | |
6d33f7bb | 1417 | * FIXME: this function needs to be broken up in parts |
df365423 IPG |
1418 | */ |
1419 | static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) | |
1420 | { | |
1421 | int result; | |
1422 | struct device *dev = &wa->usb_iface->dev; | |
1423 | unsigned long flags; | |
1424 | u8 seg_idx; | |
1425 | struct wa_seg *seg; | |
1426 | struct wa_rpipe *rpipe; | |
1427 | struct wa_xfer_result *xfer_result = wa->xfer_result; | |
1428 | u8 done = 0; | |
1429 | u8 usb_status; | |
1430 | unsigned rpipe_ready = 0; | |
1431 | ||
df365423 IPG |
1432 | spin_lock_irqsave(&xfer->lock, flags); |
1433 | seg_idx = xfer_result->bTransferSegment & 0x7f; | |
1434 | if (unlikely(seg_idx >= xfer->segs)) | |
1435 | goto error_bad_seg; | |
1436 | seg = xfer->seg[seg_idx]; | |
1437 | rpipe = xfer->ep->hcpriv; | |
1438 | usb_status = xfer_result->bTransferStatus; | |
2b81c083 | 1439 | dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n", |
bce83697 | 1440 | xfer, seg_idx, usb_status, seg->status); |
df365423 IPG |
1441 | if (seg->status == WA_SEG_ABORTED |
1442 | || seg->status == WA_SEG_ERROR) /* already handled */ | |
1443 | goto segment_aborted; | |
1444 | if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */ | |
1445 | seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */ | |
1446 | if (seg->status != WA_SEG_PENDING) { | |
1447 | if (printk_ratelimit()) | |
1448 | dev_err(dev, "xfer %p#%u: Bad segment state %u\n", | |
1449 | xfer, seg_idx, seg->status); | |
1450 | seg->status = WA_SEG_PENDING; /* workaround/"fix" it */ | |
1451 | } | |
1452 | if (usb_status & 0x80) { | |
1453 | seg->result = wa_xfer_status_to_errno(usb_status); | |
2b81c083 TP |
1454 | dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n", |
1455 | xfer, xfer->id, seg->index, usb_status); | |
df365423 IPG |
1456 | goto error_complete; |
1457 | } | |
1458 | /* FIXME: we ignore warnings, tally them for stats */ | |
1459 | if (usb_status & 0x40) /* Warning?... */ | |
1460 | usb_status = 0; /* ... pass */ | |
1461 | if (xfer->is_inbound) { /* IN data phase: read to buffer */ | |
1462 | seg->status = WA_SEG_DTI_PENDING; | |
1463 | BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); | |
2b81c083 TP |
1464 | /* this should always be 0 before a resubmit. */ |
1465 | wa->buf_in_urb->num_mapped_sgs = 0; | |
1466 | ||
df365423 IPG |
1467 | if (xfer->is_dma) { |
1468 | wa->buf_in_urb->transfer_dma = | |
1469 | xfer->urb->transfer_dma | |
2b81c083 | 1470 | + (seg_idx * xfer->seg_size); |
df365423 IPG |
1471 | wa->buf_in_urb->transfer_flags |
1472 | |= URB_NO_TRANSFER_DMA_MAP; | |
2b81c083 TP |
1473 | wa->buf_in_urb->transfer_buffer = NULL; |
1474 | wa->buf_in_urb->sg = NULL; | |
1475 | wa->buf_in_urb->num_sgs = 0; | |
df365423 | 1476 | } else { |
2b81c083 | 1477 | /* do buffer or SG processing. */ |
df365423 IPG |
1478 | wa->buf_in_urb->transfer_flags |
1479 | &= ~URB_NO_TRANSFER_DMA_MAP; | |
2b81c083 TP |
1480 | |
1481 | if (xfer->urb->transfer_buffer) { | |
1482 | wa->buf_in_urb->transfer_buffer = | |
1483 | xfer->urb->transfer_buffer | |
1484 | + (seg_idx * xfer->seg_size); | |
1485 | wa->buf_in_urb->sg = NULL; | |
1486 | wa->buf_in_urb->num_sgs = 0; | |
1487 | } else { | |
1488 | /* allocate an SG list to store seg_size bytes | |
1489 | and copy the subset of the xfer->urb->sg | |
1490 | that matches the buffer subset we are | |
1491 | about to read. */ | |
1492 | wa->buf_in_urb->sg = wa_xfer_create_subset_sg( | |
1493 | xfer->urb->sg, | |
1494 | seg_idx * xfer->seg_size, | |
1495 | le32_to_cpu( | |
1496 | xfer_result->dwTransferLength), | |
1497 | &(wa->buf_in_urb->num_sgs)); | |
1498 | ||
1499 | if (!(wa->buf_in_urb->sg)) { | |
1500 | wa->buf_in_urb->num_sgs = 0; | |
1501 | goto error_sg_alloc; | |
1502 | } | |
1503 | wa->buf_in_urb->transfer_buffer = NULL; | |
1504 | } | |
df365423 IPG |
1505 | } |
1506 | wa->buf_in_urb->transfer_buffer_length = | |
1507 | le32_to_cpu(xfer_result->dwTransferLength); | |
1508 | wa->buf_in_urb->context = seg; | |
1509 | result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); | |
1510 | if (result < 0) | |
1511 | goto error_submit_buf_in; | |
1512 | } else { | |
1513 | /* OUT data phase, complete it -- */ | |
1514 | seg->status = WA_SEG_DONE; | |
1515 | seg->result = le32_to_cpu(xfer_result->dwTransferLength); | |
1516 | xfer->segs_done++; | |
1517 | rpipe_ready = rpipe_avail_inc(rpipe); | |
1518 | done = __wa_xfer_is_done(xfer); | |
1519 | } | |
1520 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1521 | if (done) | |
1522 | wa_xfer_completion(xfer); | |
1523 | if (rpipe_ready) | |
1524 | wa_xfer_delayed_run(rpipe); | |
df365423 IPG |
1525 | return; |
1526 | ||
df365423 IPG |
1527 | error_submit_buf_in: |
1528 | if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { | |
1529 | dev_err(dev, "DTI: URB max acceptable errors " | |
1530 | "exceeded, resetting device\n"); | |
1531 | wa_reset_all(wa); | |
1532 | } | |
1533 | if (printk_ratelimit()) | |
1534 | dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n", | |
1535 | xfer, seg_idx, result); | |
1536 | seg->result = result; | |
2b81c083 TP |
1537 | kfree(wa->buf_in_urb->sg); |
1538 | error_sg_alloc: | |
6d33f7bb | 1539 | __wa_xfer_abort(xfer); |
df365423 IPG |
1540 | error_complete: |
1541 | seg->status = WA_SEG_ERROR; | |
1542 | xfer->segs_done++; | |
1543 | rpipe_ready = rpipe_avail_inc(rpipe); | |
df365423 | 1544 | done = __wa_xfer_is_done(xfer); |
6d33f7bb TP |
1545 | /* |
1546 | * queue work item to clear STALL for control endpoints. | |
1547 | * Otherwise, let endpoint_reset take care of it. | |
1548 | */ | |
1549 | if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) && | |
1550 | usb_endpoint_xfer_control(&xfer->ep->desc) && | |
1551 | done) { | |
1552 | ||
1553 | dev_info(dev, "Control EP stall. Queue delayed work.\n"); | |
1554 | spin_lock_irq(&wa->xfer_list_lock); | |
8eb41299 WY |
1555 | /* move xfer from xfer_list to xfer_errored_list. */ |
1556 | list_move_tail(&xfer->list_node, &wa->xfer_errored_list); | |
6d33f7bb TP |
1557 | spin_unlock_irq(&wa->xfer_list_lock); |
1558 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1559 | queue_work(wusbd, &wa->xfer_error_work); | |
1560 | } else { | |
1561 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1562 | if (done) | |
1563 | wa_xfer_completion(xfer); | |
1564 | if (rpipe_ready) | |
1565 | wa_xfer_delayed_run(rpipe); | |
1566 | } | |
1567 | ||
df365423 IPG |
1568 | return; |
1569 | ||
df365423 IPG |
1570 | error_bad_seg: |
1571 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1572 | wa_urb_dequeue(wa, xfer->urb); | |
1573 | if (printk_ratelimit()) | |
1574 | dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx); | |
1575 | if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { | |
1576 | dev_err(dev, "DTI: URB max acceptable errors " | |
1577 | "exceeded, resetting device\n"); | |
1578 | wa_reset_all(wa); | |
1579 | } | |
df365423 IPG |
1580 | return; |
1581 | ||
df365423 IPG |
1582 | segment_aborted: |
1583 | /* nothing to do, as the aborter did the completion */ | |
1584 | spin_unlock_irqrestore(&xfer->lock, flags); | |
df365423 IPG |
1585 | } |
1586 | ||
1587 | /* | |
1588 | * Callback for the IN data phase | |
1589 | * | |
af901ca1 | 1590 | * If successful transition state; otherwise, take a note of the |
df365423 IPG |
1591 | * error, mark this segment done and try completion. |
1592 | * | |
1593 | * Note we don't access until we are sure that the transfer hasn't | |
1594 | * been cancelled (ECONNRESET, ENOENT), which could mean that | |
1595 | * seg->xfer could be already gone. | |
1596 | */ | |
1597 | static void wa_buf_in_cb(struct urb *urb) | |
1598 | { | |
1599 | struct wa_seg *seg = urb->context; | |
1600 | struct wa_xfer *xfer = seg->xfer; | |
1601 | struct wahc *wa; | |
1602 | struct device *dev; | |
1603 | struct wa_rpipe *rpipe; | |
1604 | unsigned rpipe_ready; | |
1605 | unsigned long flags; | |
1606 | u8 done = 0; | |
1607 | ||
2b81c083 TP |
1608 | /* free the sg if it was used. */ |
1609 | kfree(urb->sg); | |
1610 | urb->sg = NULL; | |
1611 | ||
df365423 IPG |
1612 | switch (urb->status) { |
1613 | case 0: | |
1614 | spin_lock_irqsave(&xfer->lock, flags); | |
1615 | wa = xfer->wa; | |
1616 | dev = &wa->usb_iface->dev; | |
1617 | rpipe = xfer->ep->hcpriv; | |
bce83697 DV |
1618 | dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n", |
1619 | xfer, seg->index, (size_t)urb->actual_length); | |
df365423 IPG |
1620 | seg->status = WA_SEG_DONE; |
1621 | seg->result = urb->actual_length; | |
1622 | xfer->segs_done++; | |
1623 | rpipe_ready = rpipe_avail_inc(rpipe); | |
1624 | done = __wa_xfer_is_done(xfer); | |
1625 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1626 | if (done) | |
1627 | wa_xfer_completion(xfer); | |
1628 | if (rpipe_ready) | |
1629 | wa_xfer_delayed_run(rpipe); | |
1630 | break; | |
1631 | case -ECONNRESET: /* URB unlinked; no need to do anything */ | |
1632 | case -ENOENT: /* as it was done by the who unlinked us */ | |
1633 | break; | |
1634 | default: /* Other errors ... */ | |
1635 | spin_lock_irqsave(&xfer->lock, flags); | |
1636 | wa = xfer->wa; | |
1637 | dev = &wa->usb_iface->dev; | |
1638 | rpipe = xfer->ep->hcpriv; | |
1639 | if (printk_ratelimit()) | |
1640 | dev_err(dev, "xfer %p#%u: data in error %d\n", | |
1641 | xfer, seg->index, urb->status); | |
1642 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, | |
1643 | EDC_ERROR_TIMEFRAME)){ | |
1644 | dev_err(dev, "DTO: URB max acceptable errors " | |
1645 | "exceeded, resetting device\n"); | |
1646 | wa_reset_all(wa); | |
1647 | } | |
1648 | seg->status = WA_SEG_ERROR; | |
1649 | seg->result = urb->status; | |
1650 | xfer->segs_done++; | |
1651 | rpipe_ready = rpipe_avail_inc(rpipe); | |
1652 | __wa_xfer_abort(xfer); | |
1653 | done = __wa_xfer_is_done(xfer); | |
1654 | spin_unlock_irqrestore(&xfer->lock, flags); | |
1655 | if (done) | |
1656 | wa_xfer_completion(xfer); | |
1657 | if (rpipe_ready) | |
1658 | wa_xfer_delayed_run(rpipe); | |
1659 | } | |
df365423 IPG |
1660 | } |
1661 | ||
1662 | /* | |
1663 | * Handle an incoming transfer result buffer | |
1664 | * | |
1665 | * Given a transfer result buffer, it completes the transfer (possibly | |
1666 | * scheduling and buffer in read) and then resubmits the DTI URB for a | |
1667 | * new transfer result read. | |
1668 | * | |
1669 | * | |
1670 | * The xfer_result DTI URB state machine | |
1671 | * | |
1672 | * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In) | |
1673 | * | |
1674 | * We start in OFF mode, the first xfer_result notification [through | |
1675 | * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to | |
1676 | * read. | |
1677 | * | |
1678 | * We receive a buffer -- if it is not a xfer_result, we complain and | |
1679 | * repost the DTI-URB. If it is a xfer_result then do the xfer seg | |
1680 | * request accounting. If it is an IN segment, we move to RBI and post | |
1681 | * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will | |
1682 | * repost the DTI-URB and move to RXR state. if there was no IN | |
1683 | * segment, it will repost the DTI-URB. | |
1684 | * | |
1685 | * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many | |
1686 | * errors) in the URBs. | |
1687 | */ | |
1688 | static void wa_xfer_result_cb(struct urb *urb) | |
1689 | { | |
1690 | int result; | |
1691 | struct wahc *wa = urb->context; | |
1692 | struct device *dev = &wa->usb_iface->dev; | |
1693 | struct wa_xfer_result *xfer_result; | |
1694 | u32 xfer_id; | |
1695 | struct wa_xfer *xfer; | |
1696 | u8 usb_status; | |
1697 | ||
df365423 IPG |
1698 | BUG_ON(wa->dti_urb != urb); |
1699 | switch (wa->dti_urb->status) { | |
1700 | case 0: | |
1701 | /* We have a xfer result buffer; check it */ | |
bce83697 DV |
1702 | dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", |
1703 | urb->actual_length, urb->transfer_buffer); | |
df365423 IPG |
1704 | if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { |
1705 | dev_err(dev, "DTI Error: xfer result--bad size " | |
1706 | "xfer result (%d bytes vs %zu needed)\n", | |
1707 | urb->actual_length, sizeof(*xfer_result)); | |
1708 | break; | |
1709 | } | |
1710 | xfer_result = wa->xfer_result; | |
1711 | if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { | |
1712 | dev_err(dev, "DTI Error: xfer result--" | |
1713 | "bad header length %u\n", | |
1714 | xfer_result->hdr.bLength); | |
1715 | break; | |
1716 | } | |
1717 | if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { | |
1718 | dev_err(dev, "DTI Error: xfer result--" | |
1719 | "bad header type 0x%02x\n", | |
1720 | xfer_result->hdr.bNotifyType); | |
1721 | break; | |
1722 | } | |
1723 | usb_status = xfer_result->bTransferStatus & 0x3f; | |
ec58fad1 | 1724 | if (usb_status == WA_XFER_STATUS_NOT_FOUND) |
df365423 IPG |
1725 | /* taken care of already */ |
1726 | break; | |
1727 | xfer_id = xfer_result->dwTransferID; | |
1728 | xfer = wa_xfer_get_by_id(wa, xfer_id); | |
1729 | if (xfer == NULL) { | |
1730 | /* FIXME: transaction might have been cancelled */ | |
1731 | dev_err(dev, "DTI Error: xfer result--" | |
1732 | "unknown xfer 0x%08x (status 0x%02x)\n", | |
1733 | xfer_id, usb_status); | |
1734 | break; | |
1735 | } | |
1736 | wa_xfer_result_chew(wa, xfer); | |
1737 | wa_xfer_put(xfer); | |
1738 | break; | |
1739 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ | |
1740 | case -ESHUTDOWN: /* going away! */ | |
1741 | dev_dbg(dev, "DTI: going down! %d\n", urb->status); | |
1742 | goto out; | |
1743 | default: | |
1744 | /* Unknown error */ | |
1745 | if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, | |
1746 | EDC_ERROR_TIMEFRAME)) { | |
1747 | dev_err(dev, "DTI: URB max acceptable errors " | |
1748 | "exceeded, resetting device\n"); | |
1749 | wa_reset_all(wa); | |
1750 | goto out; | |
1751 | } | |
1752 | if (printk_ratelimit()) | |
1753 | dev_err(dev, "DTI: URB error %d\n", urb->status); | |
1754 | break; | |
1755 | } | |
1756 | /* Resubmit the DTI URB */ | |
1757 | result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); | |
1758 | if (result < 0) { | |
1759 | dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " | |
1760 | "resetting\n", result); | |
1761 | wa_reset_all(wa); | |
1762 | } | |
1763 | out: | |
df365423 IPG |
1764 | return; |
1765 | } | |
1766 | ||
1767 | /* | |
1768 | * Transfer complete notification | |
1769 | * | |
1770 | * Called from the notif.c code. We get a notification on EP2 saying | |
1771 | * that some endpoint has some transfer result data available. We are | |
1772 | * about to read it. | |
1773 | * | |
1774 | * To speed up things, we always have a URB reading the DTI URB; we | |
1775 | * don't really set it up and start it until the first xfer complete | |
1776 | * notification arrives, which is what we do here. | |
1777 | * | |
1778 | * Follow up in wa_xfer_result_cb(), as that's where the whole state | |
1779 | * machine starts. | |
1780 | * | |
1781 | * So here we just initialize the DTI URB for reading transfer result | |
1782 | * notifications and also the buffer-in URB, for reading buffers. Then | |
1783 | * we just submit the DTI URB. | |
1784 | * | |
1785 | * @wa shall be referenced | |
1786 | */ | |
1787 | void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr) | |
1788 | { | |
1789 | int result; | |
1790 | struct device *dev = &wa->usb_iface->dev; | |
1791 | struct wa_notif_xfer *notif_xfer; | |
1792 | const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; | |
1793 | ||
df365423 IPG |
1794 | notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); |
1795 | BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); | |
1796 | ||
1797 | if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) { | |
1798 | /* FIXME: hardcoded limitation, adapt */ | |
1799 | dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n", | |
1800 | notif_xfer->bEndpoint, dti_epd->bEndpointAddress); | |
1801 | goto error; | |
1802 | } | |
1803 | if (wa->dti_urb != NULL) /* DTI URB already started */ | |
1804 | goto out; | |
1805 | ||
1806 | wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); | |
1807 | if (wa->dti_urb == NULL) { | |
1808 | dev_err(dev, "Can't allocate DTI URB\n"); | |
1809 | goto error_dti_urb_alloc; | |
1810 | } | |
1811 | usb_fill_bulk_urb( | |
1812 | wa->dti_urb, wa->usb_dev, | |
1813 | usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), | |
1814 | wa->xfer_result, wa->xfer_result_size, | |
1815 | wa_xfer_result_cb, wa); | |
1816 | ||
1817 | wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL); | |
1818 | if (wa->buf_in_urb == NULL) { | |
1819 | dev_err(dev, "Can't allocate BUF-IN URB\n"); | |
1820 | goto error_buf_in_urb_alloc; | |
1821 | } | |
1822 | usb_fill_bulk_urb( | |
1823 | wa->buf_in_urb, wa->usb_dev, | |
1824 | usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), | |
1825 | NULL, 0, wa_buf_in_cb, wa); | |
1826 | result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); | |
1827 | if (result < 0) { | |
1828 | dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " | |
1829 | "resetting\n", result); | |
1830 | goto error_dti_urb_submit; | |
1831 | } | |
1832 | out: | |
df365423 IPG |
1833 | return; |
1834 | ||
1835 | error_dti_urb_submit: | |
1836 | usb_put_urb(wa->buf_in_urb); | |
1837 | error_buf_in_urb_alloc: | |
1838 | usb_put_urb(wa->dti_urb); | |
1839 | wa->dti_urb = NULL; | |
1840 | error_dti_urb_alloc: | |
1841 | error: | |
1842 | wa_reset_all(wa); | |
df365423 | 1843 | } |