igb: Add device support for flashless SKU of i210 device
[deliverable/linux.git] / drivers / staging / usbip / stub_rx.c
1 /*
2 * Copyright (C) 2003-2008 Takahiro Hirofuchi
3 *
4 * This is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
17 * USA.
18 */
19
20 #include <asm/byteorder.h>
21 #include <linux/kthread.h>
22 #include <linux/usb.h>
23 #include <linux/usb/hcd.h>
24
25 #include "usbip_common.h"
26 #include "stub.h"
27
28 static int is_clear_halt_cmd(struct urb *urb)
29 {
30 struct usb_ctrlrequest *req;
31
32 req = (struct usb_ctrlrequest *) urb->setup_packet;
33
34 return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
35 (req->bRequestType == USB_RECIP_ENDPOINT) &&
36 (req->wValue == USB_ENDPOINT_HALT);
37 }
38
39 static int is_set_interface_cmd(struct urb *urb)
40 {
41 struct usb_ctrlrequest *req;
42
43 req = (struct usb_ctrlrequest *) urb->setup_packet;
44
45 return (req->bRequest == USB_REQ_SET_INTERFACE) &&
46 (req->bRequestType == USB_RECIP_INTERFACE);
47 }
48
49 static int is_set_configuration_cmd(struct urb *urb)
50 {
51 struct usb_ctrlrequest *req;
52
53 req = (struct usb_ctrlrequest *) urb->setup_packet;
54
55 return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
56 (req->bRequestType == USB_RECIP_DEVICE);
57 }
58
59 static int is_reset_device_cmd(struct urb *urb)
60 {
61 struct usb_ctrlrequest *req;
62 __u16 value;
63 __u16 index;
64
65 req = (struct usb_ctrlrequest *) urb->setup_packet;
66 value = le16_to_cpu(req->wValue);
67 index = le16_to_cpu(req->wIndex);
68
69 if ((req->bRequest == USB_REQ_SET_FEATURE) &&
70 (req->bRequestType == USB_RT_PORT) &&
71 (value == USB_PORT_FEAT_RESET)) {
72 usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
73 return 1;
74 } else
75 return 0;
76 }
77
78 static int tweak_clear_halt_cmd(struct urb *urb)
79 {
80 struct usb_ctrlrequest *req;
81 int target_endp;
82 int target_dir;
83 int target_pipe;
84 int ret;
85
86 req = (struct usb_ctrlrequest *) urb->setup_packet;
87
88 /*
89 * The stalled endpoint is specified in the wIndex value. The endpoint
90 * of the urb is the target of this clear_halt request (i.e., control
91 * endpoint).
92 */
93 target_endp = le16_to_cpu(req->wIndex) & 0x000f;
94
95 /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
96 target_dir = le16_to_cpu(req->wIndex) & 0x0080;
97
98 if (target_dir)
99 target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
100 else
101 target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
102
103 ret = usb_clear_halt(urb->dev, target_pipe);
104 if (ret < 0)
105 dev_err(&urb->dev->dev, "usb_clear_halt error: devnum %d endp "
106 "%d ret %d\n", urb->dev->devnum, target_endp, ret);
107 else
108 dev_info(&urb->dev->dev, "usb_clear_halt done: devnum %d endp "
109 "%d\n", urb->dev->devnum, target_endp);
110
111 return ret;
112 }
113
114 static int tweak_set_interface_cmd(struct urb *urb)
115 {
116 struct usb_ctrlrequest *req;
117 __u16 alternate;
118 __u16 interface;
119 int ret;
120
121 req = (struct usb_ctrlrequest *) urb->setup_packet;
122 alternate = le16_to_cpu(req->wValue);
123 interface = le16_to_cpu(req->wIndex);
124
125 usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
126 interface, alternate);
127
128 ret = usb_set_interface(urb->dev, interface, alternate);
129 if (ret < 0)
130 dev_err(&urb->dev->dev, "usb_set_interface error: inf %u alt "
131 "%u ret %d\n", interface, alternate, ret);
132 else
133 dev_info(&urb->dev->dev, "usb_set_interface done: inf %u alt "
134 "%u\n", interface, alternate);
135
136 return ret;
137 }
138
139 static int tweak_set_configuration_cmd(struct urb *urb)
140 {
141 struct usb_ctrlrequest *req;
142 __u16 config;
143
144 req = (struct usb_ctrlrequest *) urb->setup_packet;
145 config = le16_to_cpu(req->wValue);
146
147 /*
148 * I have never seen a multi-config device. Very rare.
149 * For most devices, this will be called to choose a default
150 * configuration only once in an initialization phase.
151 *
152 * set_configuration may change a device configuration and its device
153 * drivers will be unbound and assigned for a new device configuration.
154 * This means this usbip driver will be also unbound when called, then
155 * eventually reassigned to the device as far as driver matching
156 * condition is kept.
157 *
158 * Unfortunately, an existing usbip connection will be dropped
159 * due to this driver unbinding. So, skip here.
160 * A user may need to set a special configuration value before
161 * exporting the device.
162 */
163 dev_info(&urb->dev->dev, "usb_set_configuration %d to %s... skip!\n",
164 config, dev_name(&urb->dev->dev));
165
166 return 0;
167 }
168
169 static int tweak_reset_device_cmd(struct urb *urb)
170 {
171 struct stub_priv *priv = (struct stub_priv *) urb->context;
172 struct stub_device *sdev = priv->sdev;
173
174 dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
175
176 /*
177 * With the implementation of pre_reset and post_reset the driver no
178 * longer unbinds. This allows the use of synchronous reset.
179 */
180
181 if (usb_lock_device_for_reset(sdev->udev, sdev->interface) < 0) {
182 dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
183 return 0;
184 }
185 usb_reset_device(sdev->udev);
186 usb_unlock_device(sdev->udev);
187
188 return 0;
189 }
190
191 /*
192 * clear_halt, set_interface, and set_configuration require special tricks.
193 */
194 static void tweak_special_requests(struct urb *urb)
195 {
196 if (!urb || !urb->setup_packet)
197 return;
198
199 if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
200 return;
201
202 if (is_clear_halt_cmd(urb))
203 /* tweak clear_halt */
204 tweak_clear_halt_cmd(urb);
205
206 else if (is_set_interface_cmd(urb))
207 /* tweak set_interface */
208 tweak_set_interface_cmd(urb);
209
210 else if (is_set_configuration_cmd(urb))
211 /* tweak set_configuration */
212 tweak_set_configuration_cmd(urb);
213
214 else if (is_reset_device_cmd(urb))
215 tweak_reset_device_cmd(urb);
216 else
217 usbip_dbg_stub_rx("no need to tweak\n");
218 }
219
220 /*
221 * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
222 * By unlinking the urb asynchronously, stub_rx can continuously
223 * process coming urbs. Even if the urb is unlinked, its completion
224 * handler will be called and stub_tx will send a return pdu.
225 *
226 * See also comments about unlinking strategy in vhci_hcd.c.
227 */
228 static int stub_recv_cmd_unlink(struct stub_device *sdev,
229 struct usbip_header *pdu)
230 {
231 int ret;
232 unsigned long flags;
233 struct stub_priv *priv;
234
235 spin_lock_irqsave(&sdev->priv_lock, flags);
236
237 list_for_each_entry(priv, &sdev->priv_init, list) {
238 if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
239 continue;
240
241 dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
242 priv->urb);
243
244 /*
245 * This matched urb is not completed yet (i.e., be in
246 * flight in usb hcd hardware/driver). Now we are
247 * cancelling it. The unlinking flag means that we are
248 * now not going to return the normal result pdu of a
249 * submission request, but going to return a result pdu
250 * of the unlink request.
251 */
252 priv->unlinking = 1;
253
254 /*
255 * In the case that unlinking flag is on, prev->seqnum
256 * is changed from the seqnum of the cancelling urb to
257 * the seqnum of the unlink request. This will be used
258 * to make the result pdu of the unlink request.
259 */
260 priv->seqnum = pdu->base.seqnum;
261
262 spin_unlock_irqrestore(&sdev->priv_lock, flags);
263
264 /*
265 * usb_unlink_urb() is now out of spinlocking to avoid
266 * spinlock recursion since stub_complete() is
267 * sometimes called in this context but not in the
268 * interrupt context. If stub_complete() is executed
269 * before we call usb_unlink_urb(), usb_unlink_urb()
270 * will return an error value. In this case, stub_tx
271 * will return the result pdu of this unlink request
272 * though submission is completed and actual unlinking
273 * is not executed. OK?
274 */
275 /* In the above case, urb->status is not -ECONNRESET,
276 * so a driver in a client host will know the failure
277 * of the unlink request ?
278 */
279 ret = usb_unlink_urb(priv->urb);
280 if (ret != -EINPROGRESS)
281 dev_err(&priv->urb->dev->dev,
282 "failed to unlink a urb %p, ret %d\n",
283 priv->urb, ret);
284
285 return 0;
286 }
287
288 usbip_dbg_stub_rx("seqnum %d is not pending\n",
289 pdu->u.cmd_unlink.seqnum);
290
291 /*
292 * The urb of the unlink target is not found in priv_init queue. It was
293 * already completed and its results is/was going to be sent by a
294 * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
295 * return the completeness of this unlink request to vhci_hcd.
296 */
297 stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
298
299 spin_unlock_irqrestore(&sdev->priv_lock, flags);
300
301 return 0;
302 }
303
304 static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
305 {
306 struct usbip_device *ud = &sdev->ud;
307 int valid = 0;
308
309 if (pdu->base.devid == sdev->devid) {
310 spin_lock_irq(&ud->lock);
311 if (ud->status == SDEV_ST_USED) {
312 /* A request is valid. */
313 valid = 1;
314 }
315 spin_unlock_irq(&ud->lock);
316 }
317
318 return valid;
319 }
320
321 static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
322 struct usbip_header *pdu)
323 {
324 struct stub_priv *priv;
325 struct usbip_device *ud = &sdev->ud;
326 unsigned long flags;
327
328 spin_lock_irqsave(&sdev->priv_lock, flags);
329
330 priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
331 if (!priv) {
332 dev_err(&sdev->interface->dev, "alloc stub_priv\n");
333 spin_unlock_irqrestore(&sdev->priv_lock, flags);
334 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
335 return NULL;
336 }
337
338 priv->seqnum = pdu->base.seqnum;
339 priv->sdev = sdev;
340
341 /*
342 * After a stub_priv is linked to a list_head,
343 * our error handler can free allocated data.
344 */
345 list_add_tail(&priv->list, &sdev->priv_init);
346
347 spin_unlock_irqrestore(&sdev->priv_lock, flags);
348
349 return priv;
350 }
351
352 static int get_pipe(struct stub_device *sdev, int epnum, int dir)
353 {
354 struct usb_device *udev = sdev->udev;
355 struct usb_host_endpoint *ep;
356 struct usb_endpoint_descriptor *epd = NULL;
357
358 if (dir == USBIP_DIR_IN)
359 ep = udev->ep_in[epnum & 0x7f];
360 else
361 ep = udev->ep_out[epnum & 0x7f];
362 if (!ep) {
363 dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
364 epnum);
365 BUG();
366 }
367
368 epd = &ep->desc;
369 if (usb_endpoint_xfer_control(epd)) {
370 if (dir == USBIP_DIR_OUT)
371 return usb_sndctrlpipe(udev, epnum);
372 else
373 return usb_rcvctrlpipe(udev, epnum);
374 }
375
376 if (usb_endpoint_xfer_bulk(epd)) {
377 if (dir == USBIP_DIR_OUT)
378 return usb_sndbulkpipe(udev, epnum);
379 else
380 return usb_rcvbulkpipe(udev, epnum);
381 }
382
383 if (usb_endpoint_xfer_int(epd)) {
384 if (dir == USBIP_DIR_OUT)
385 return usb_sndintpipe(udev, epnum);
386 else
387 return usb_rcvintpipe(udev, epnum);
388 }
389
390 if (usb_endpoint_xfer_isoc(epd)) {
391 if (dir == USBIP_DIR_OUT)
392 return usb_sndisocpipe(udev, epnum);
393 else
394 return usb_rcvisocpipe(udev, epnum);
395 }
396
397 /* NOT REACHED */
398 dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum);
399 return 0;
400 }
401
402 static void masking_bogus_flags(struct urb *urb)
403 {
404 int xfertype;
405 struct usb_device *dev;
406 struct usb_host_endpoint *ep;
407 int is_out;
408 unsigned int allowed;
409
410 if (!urb || urb->hcpriv || !urb->complete)
411 return;
412 dev = urb->dev;
413 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
414 return;
415
416 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
417 [usb_pipeendpoint(urb->pipe)];
418 if (!ep)
419 return;
420
421 xfertype = usb_endpoint_type(&ep->desc);
422 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
423 struct usb_ctrlrequest *setup =
424 (struct usb_ctrlrequest *) urb->setup_packet;
425
426 if (!setup)
427 return;
428 is_out = !(setup->bRequestType & USB_DIR_IN) ||
429 !setup->wLength;
430 } else {
431 is_out = usb_endpoint_dir_out(&ep->desc);
432 }
433
434 /* enforce simple/standard policy */
435 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
436 URB_DIR_MASK | URB_FREE_BUFFER);
437 switch (xfertype) {
438 case USB_ENDPOINT_XFER_BULK:
439 if (is_out)
440 allowed |= URB_ZERO_PACKET;
441 /* FALLTHROUGH */
442 case USB_ENDPOINT_XFER_CONTROL:
443 allowed |= URB_NO_FSBR; /* only affects UHCI */
444 /* FALLTHROUGH */
445 default: /* all non-iso endpoints */
446 if (!is_out)
447 allowed |= URB_SHORT_NOT_OK;
448 break;
449 case USB_ENDPOINT_XFER_ISOC:
450 allowed |= URB_ISO_ASAP;
451 break;
452 }
453 urb->transfer_flags &= allowed;
454 }
455
456 static void stub_recv_cmd_submit(struct stub_device *sdev,
457 struct usbip_header *pdu)
458 {
459 int ret;
460 struct stub_priv *priv;
461 struct usbip_device *ud = &sdev->ud;
462 struct usb_device *udev = sdev->udev;
463 int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
464
465 priv = stub_priv_alloc(sdev, pdu);
466 if (!priv)
467 return;
468
469 /* setup a urb */
470 if (usb_pipeisoc(pipe))
471 priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
472 GFP_KERNEL);
473 else
474 priv->urb = usb_alloc_urb(0, GFP_KERNEL);
475
476 if (!priv->urb) {
477 dev_err(&sdev->interface->dev, "malloc urb\n");
478 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
479 return;
480 }
481
482 /* allocate urb transfer buffer, if needed */
483 if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
484 priv->urb->transfer_buffer =
485 kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
486 GFP_KERNEL);
487 if (!priv->urb->transfer_buffer) {
488 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
489 return;
490 }
491 }
492
493 /* copy urb setup packet */
494 priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
495 GFP_KERNEL);
496 if (!priv->urb->setup_packet) {
497 dev_err(&sdev->interface->dev, "allocate setup_packet\n");
498 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
499 return;
500 }
501
502 /* set other members from the base header of pdu */
503 priv->urb->context = (void *) priv;
504 priv->urb->dev = udev;
505 priv->urb->pipe = pipe;
506 priv->urb->complete = stub_complete;
507
508 usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
509
510
511 if (usbip_recv_xbuff(ud, priv->urb) < 0)
512 return;
513
514 if (usbip_recv_iso(ud, priv->urb) < 0)
515 return;
516
517 /* no need to submit an intercepted request, but harmless? */
518 tweak_special_requests(priv->urb);
519
520 masking_bogus_flags(priv->urb);
521 /* urb is now ready to submit */
522 ret = usb_submit_urb(priv->urb, GFP_KERNEL);
523
524 if (ret == 0)
525 usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
526 pdu->base.seqnum);
527 else {
528 dev_err(&sdev->interface->dev, "submit_urb error, %d\n", ret);
529 usbip_dump_header(pdu);
530 usbip_dump_urb(priv->urb);
531
532 /*
533 * Pessimistic.
534 * This connection will be discarded.
535 */
536 usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
537 }
538
539 usbip_dbg_stub_rx("Leave\n");
540 return;
541 }
542
543 /* recv a pdu */
544 static void stub_rx_pdu(struct usbip_device *ud)
545 {
546 int ret;
547 struct usbip_header pdu;
548 struct stub_device *sdev = container_of(ud, struct stub_device, ud);
549 struct device *dev = &sdev->interface->dev;
550
551 usbip_dbg_stub_rx("Enter\n");
552
553 memset(&pdu, 0, sizeof(pdu));
554
555 /* receive a pdu header */
556 ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
557 if (ret != sizeof(pdu)) {
558 dev_err(dev, "recv a header, %d\n", ret);
559 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
560 return;
561 }
562
563 usbip_header_correct_endian(&pdu, 0);
564
565 if (usbip_dbg_flag_stub_rx)
566 usbip_dump_header(&pdu);
567
568 if (!valid_request(sdev, &pdu)) {
569 dev_err(dev, "recv invalid request\n");
570 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
571 return;
572 }
573
574 switch (pdu.base.command) {
575 case USBIP_CMD_UNLINK:
576 stub_recv_cmd_unlink(sdev, &pdu);
577 break;
578
579 case USBIP_CMD_SUBMIT:
580 stub_recv_cmd_submit(sdev, &pdu);
581 break;
582
583 default:
584 /* NOTREACHED */
585 dev_err(dev, "unknown pdu\n");
586 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
587 break;
588 }
589 }
590
591 int stub_rx_loop(void *data)
592 {
593 struct usbip_device *ud = data;
594
595 while (!kthread_should_stop()) {
596 if (usbip_event_happened(ud))
597 break;
598
599 stub_rx_pdu(ud);
600 }
601
602 return 0;
603 }
This page took 0.056503 seconds and 5 git commands to generate.