Merge tag 'mmc-updates-for-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / usb / gadget / inode.c
1 /*
2 * inode.c -- user mode filesystem api for usb gadget controllers
3 *
4 * Copyright (C) 2003-2004 David Brownell
5 * Copyright (C) 2003 Agilent Technologies
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13
14 /* #define VERBOSE_DEBUG */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/fs.h>
19 #include <linux/pagemap.h>
20 #include <linux/uts.h>
21 #include <linux/wait.h>
22 #include <linux/compiler.h>
23 #include <asm/uaccess.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/poll.h>
27
28 #include <linux/device.h>
29 #include <linux/moduleparam.h>
30
31 #include <linux/usb/gadgetfs.h>
32 #include <linux/usb/gadget.h>
33
34
35 /*
36 * The gadgetfs API maps each endpoint to a file descriptor so that you
37 * can use standard synchronous read/write calls for I/O. There's some
38 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
39 * drivers show how this works in practice. You can also use AIO to
40 * eliminate I/O gaps between requests, to help when streaming data.
41 *
42 * Key parts that must be USB-specific are protocols defining how the
43 * read/write operations relate to the hardware state machines. There
44 * are two types of files. One type is for the device, implementing ep0.
45 * The other type is for each IN or OUT endpoint. In both cases, the
46 * user mode driver must configure the hardware before using it.
47 *
48 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
49 * (by writing configuration and device descriptors). Afterwards it
50 * may serve as a source of device events, used to handle all control
51 * requests other than basic enumeration.
52 *
53 * - Then, after a SET_CONFIGURATION control request, ep_config() is
54 * called when each /dev/gadget/ep* file is configured (by writing
55 * endpoint descriptors). Afterwards these files are used to write()
56 * IN data or to read() OUT data. To halt the endpoint, a "wrong
57 * direction" request is issued (like reading an IN endpoint).
58 *
59 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
60 * not possible on all hardware. For example, precise fault handling with
61 * respect to data left in endpoint fifos after aborted operations; or
62 * selective clearing of endpoint halts, to implement SET_INTERFACE.
63 */
64
65 #define DRIVER_DESC "USB Gadget filesystem"
66 #define DRIVER_VERSION "24 Aug 2004"
67
68 static const char driver_desc [] = DRIVER_DESC;
69 static const char shortname [] = "gadgetfs";
70
71 MODULE_DESCRIPTION (DRIVER_DESC);
72 MODULE_AUTHOR ("David Brownell");
73 MODULE_LICENSE ("GPL");
74
75
76 /*----------------------------------------------------------------------*/
77
78 #define GADGETFS_MAGIC 0xaee71ee7
79
80 /* /dev/gadget/$CHIP represents ep0 and the whole device */
81 enum ep0_state {
82 /* DISBLED is the initial state.
83 */
84 STATE_DEV_DISABLED = 0,
85
86 /* Only one open() of /dev/gadget/$CHIP; only one file tracks
87 * ep0/device i/o modes and binding to the controller. Driver
88 * must always write descriptors to initialize the device, then
89 * the device becomes UNCONNECTED until enumeration.
90 */
91 STATE_DEV_OPENED,
92
93 /* From then on, ep0 fd is in either of two basic modes:
94 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
95 * - SETUP: read/write will transfer control data and succeed;
96 * or if "wrong direction", performs protocol stall
97 */
98 STATE_DEV_UNCONNECTED,
99 STATE_DEV_CONNECTED,
100 STATE_DEV_SETUP,
101
102 /* UNBOUND means the driver closed ep0, so the device won't be
103 * accessible again (DEV_DISABLED) until all fds are closed.
104 */
105 STATE_DEV_UNBOUND,
106 };
107
108 /* enough for the whole queue: most events invalidate others */
109 #define N_EVENT 5
110
111 struct dev_data {
112 spinlock_t lock;
113 atomic_t count;
114 enum ep0_state state; /* P: lock */
115 struct usb_gadgetfs_event event [N_EVENT];
116 unsigned ev_next;
117 struct fasync_struct *fasync;
118 u8 current_config;
119
120 /* drivers reading ep0 MUST handle control requests (SETUP)
121 * reported that way; else the host will time out.
122 */
123 unsigned usermode_setup : 1,
124 setup_in : 1,
125 setup_can_stall : 1,
126 setup_out_ready : 1,
127 setup_out_error : 1,
128 setup_abort : 1;
129 unsigned setup_wLength;
130
131 /* the rest is basically write-once */
132 struct usb_config_descriptor *config, *hs_config;
133 struct usb_device_descriptor *dev;
134 struct usb_request *req;
135 struct usb_gadget *gadget;
136 struct list_head epfiles;
137 void *buf;
138 wait_queue_head_t wait;
139 struct super_block *sb;
140 struct dentry *dentry;
141
142 /* except this scratch i/o buffer for ep0 */
143 u8 rbuf [256];
144 };
145
146 static inline void get_dev (struct dev_data *data)
147 {
148 atomic_inc (&data->count);
149 }
150
151 static void put_dev (struct dev_data *data)
152 {
153 if (likely (!atomic_dec_and_test (&data->count)))
154 return;
155 /* needs no more cleanup */
156 BUG_ON (waitqueue_active (&data->wait));
157 kfree (data);
158 }
159
160 static struct dev_data *dev_new (void)
161 {
162 struct dev_data *dev;
163
164 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
165 if (!dev)
166 return NULL;
167 dev->state = STATE_DEV_DISABLED;
168 atomic_set (&dev->count, 1);
169 spin_lock_init (&dev->lock);
170 INIT_LIST_HEAD (&dev->epfiles);
171 init_waitqueue_head (&dev->wait);
172 return dev;
173 }
174
175 /*----------------------------------------------------------------------*/
176
177 /* other /dev/gadget/$ENDPOINT files represent endpoints */
178 enum ep_state {
179 STATE_EP_DISABLED = 0,
180 STATE_EP_READY,
181 STATE_EP_ENABLED,
182 STATE_EP_UNBOUND,
183 };
184
185 struct ep_data {
186 struct mutex lock;
187 enum ep_state state;
188 atomic_t count;
189 struct dev_data *dev;
190 /* must hold dev->lock before accessing ep or req */
191 struct usb_ep *ep;
192 struct usb_request *req;
193 ssize_t status;
194 char name [16];
195 struct usb_endpoint_descriptor desc, hs_desc;
196 struct list_head epfiles;
197 wait_queue_head_t wait;
198 struct dentry *dentry;
199 struct inode *inode;
200 };
201
202 static inline void get_ep (struct ep_data *data)
203 {
204 atomic_inc (&data->count);
205 }
206
207 static void put_ep (struct ep_data *data)
208 {
209 if (likely (!atomic_dec_and_test (&data->count)))
210 return;
211 put_dev (data->dev);
212 /* needs no more cleanup */
213 BUG_ON (!list_empty (&data->epfiles));
214 BUG_ON (waitqueue_active (&data->wait));
215 kfree (data);
216 }
217
218 /*----------------------------------------------------------------------*/
219
220 /* most "how to use the hardware" policy choices are in userspace:
221 * mapping endpoint roles (which the driver needs) to the capabilities
222 * which the usb controller has. most of those capabilities are exposed
223 * implicitly, starting with the driver name and then endpoint names.
224 */
225
226 static const char *CHIP;
227
228 /*----------------------------------------------------------------------*/
229
230 /* NOTE: don't use dev_printk calls before binding to the gadget
231 * at the end of ep0 configuration, or after unbind.
232 */
233
234 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
235 #define xprintk(d,level,fmt,args...) \
236 printk(level "%s: " fmt , shortname , ## args)
237
238 #ifdef DEBUG
239 #define DBG(dev,fmt,args...) \
240 xprintk(dev , KERN_DEBUG , fmt , ## args)
241 #else
242 #define DBG(dev,fmt,args...) \
243 do { } while (0)
244 #endif /* DEBUG */
245
246 #ifdef VERBOSE_DEBUG
247 #define VDEBUG DBG
248 #else
249 #define VDEBUG(dev,fmt,args...) \
250 do { } while (0)
251 #endif /* DEBUG */
252
253 #define ERROR(dev,fmt,args...) \
254 xprintk(dev , KERN_ERR , fmt , ## args)
255 #define INFO(dev,fmt,args...) \
256 xprintk(dev , KERN_INFO , fmt , ## args)
257
258
259 /*----------------------------------------------------------------------*/
260
261 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
262 *
263 * After opening, configure non-control endpoints. Then use normal
264 * stream read() and write() requests; and maybe ioctl() to get more
265 * precise FIFO status when recovering from cancellation.
266 */
267
268 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
269 {
270 struct ep_data *epdata = ep->driver_data;
271
272 if (!req->context)
273 return;
274 if (req->status)
275 epdata->status = req->status;
276 else
277 epdata->status = req->actual;
278 complete ((struct completion *)req->context);
279 }
280
281 /* tasklock endpoint, returning when it's connected.
282 * still need dev->lock to use epdata->ep.
283 */
284 static int
285 get_ready_ep (unsigned f_flags, struct ep_data *epdata)
286 {
287 int val;
288
289 if (f_flags & O_NONBLOCK) {
290 if (!mutex_trylock(&epdata->lock))
291 goto nonblock;
292 if (epdata->state != STATE_EP_ENABLED) {
293 mutex_unlock(&epdata->lock);
294 nonblock:
295 val = -EAGAIN;
296 } else
297 val = 0;
298 return val;
299 }
300
301 val = mutex_lock_interruptible(&epdata->lock);
302 if (val < 0)
303 return val;
304
305 switch (epdata->state) {
306 case STATE_EP_ENABLED:
307 break;
308 // case STATE_EP_DISABLED: /* "can't happen" */
309 // case STATE_EP_READY: /* "can't happen" */
310 default: /* error! */
311 pr_debug ("%s: ep %p not available, state %d\n",
312 shortname, epdata, epdata->state);
313 // FALLTHROUGH
314 case STATE_EP_UNBOUND: /* clean disconnect */
315 val = -ENODEV;
316 mutex_unlock(&epdata->lock);
317 }
318 return val;
319 }
320
321 static ssize_t
322 ep_io (struct ep_data *epdata, void *buf, unsigned len)
323 {
324 DECLARE_COMPLETION_ONSTACK (done);
325 int value;
326
327 spin_lock_irq (&epdata->dev->lock);
328 if (likely (epdata->ep != NULL)) {
329 struct usb_request *req = epdata->req;
330
331 req->context = &done;
332 req->complete = epio_complete;
333 req->buf = buf;
334 req->length = len;
335 value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
336 } else
337 value = -ENODEV;
338 spin_unlock_irq (&epdata->dev->lock);
339
340 if (likely (value == 0)) {
341 value = wait_event_interruptible (done.wait, done.done);
342 if (value != 0) {
343 spin_lock_irq (&epdata->dev->lock);
344 if (likely (epdata->ep != NULL)) {
345 DBG (epdata->dev, "%s i/o interrupted\n",
346 epdata->name);
347 usb_ep_dequeue (epdata->ep, epdata->req);
348 spin_unlock_irq (&epdata->dev->lock);
349
350 wait_event (done.wait, done.done);
351 if (epdata->status == -ECONNRESET)
352 epdata->status = -EINTR;
353 } else {
354 spin_unlock_irq (&epdata->dev->lock);
355
356 DBG (epdata->dev, "endpoint gone\n");
357 epdata->status = -ENODEV;
358 }
359 }
360 return epdata->status;
361 }
362 return value;
363 }
364
365
366 /* handle a synchronous OUT bulk/intr/iso transfer */
367 static ssize_t
368 ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
369 {
370 struct ep_data *data = fd->private_data;
371 void *kbuf;
372 ssize_t value;
373
374 if ((value = get_ready_ep (fd->f_flags, data)) < 0)
375 return value;
376
377 /* halt any endpoint by doing a "wrong direction" i/o call */
378 if (usb_endpoint_dir_in(&data->desc)) {
379 if (usb_endpoint_xfer_isoc(&data->desc)) {
380 mutex_unlock(&data->lock);
381 return -EINVAL;
382 }
383 DBG (data->dev, "%s halt\n", data->name);
384 spin_lock_irq (&data->dev->lock);
385 if (likely (data->ep != NULL))
386 usb_ep_set_halt (data->ep);
387 spin_unlock_irq (&data->dev->lock);
388 mutex_unlock(&data->lock);
389 return -EBADMSG;
390 }
391
392 /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
393
394 value = -ENOMEM;
395 kbuf = kmalloc (len, GFP_KERNEL);
396 if (unlikely (!kbuf))
397 goto free1;
398
399 value = ep_io (data, kbuf, len);
400 VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
401 data->name, len, (int) value);
402 if (value >= 0 && copy_to_user (buf, kbuf, value))
403 value = -EFAULT;
404
405 free1:
406 mutex_unlock(&data->lock);
407 kfree (kbuf);
408 return value;
409 }
410
411 /* handle a synchronous IN bulk/intr/iso transfer */
412 static ssize_t
413 ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
414 {
415 struct ep_data *data = fd->private_data;
416 void *kbuf;
417 ssize_t value;
418
419 if ((value = get_ready_ep (fd->f_flags, data)) < 0)
420 return value;
421
422 /* halt any endpoint by doing a "wrong direction" i/o call */
423 if (!usb_endpoint_dir_in(&data->desc)) {
424 if (usb_endpoint_xfer_isoc(&data->desc)) {
425 mutex_unlock(&data->lock);
426 return -EINVAL;
427 }
428 DBG (data->dev, "%s halt\n", data->name);
429 spin_lock_irq (&data->dev->lock);
430 if (likely (data->ep != NULL))
431 usb_ep_set_halt (data->ep);
432 spin_unlock_irq (&data->dev->lock);
433 mutex_unlock(&data->lock);
434 return -EBADMSG;
435 }
436
437 /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
438
439 value = -ENOMEM;
440 kbuf = kmalloc (len, GFP_KERNEL);
441 if (!kbuf)
442 goto free1;
443 if (copy_from_user (kbuf, buf, len)) {
444 value = -EFAULT;
445 goto free1;
446 }
447
448 value = ep_io (data, kbuf, len);
449 VDEBUG (data->dev, "%s write %zu IN, status %d\n",
450 data->name, len, (int) value);
451 free1:
452 mutex_unlock(&data->lock);
453 kfree (kbuf);
454 return value;
455 }
456
457 static int
458 ep_release (struct inode *inode, struct file *fd)
459 {
460 struct ep_data *data = fd->private_data;
461 int value;
462
463 value = mutex_lock_interruptible(&data->lock);
464 if (value < 0)
465 return value;
466
467 /* clean up if this can be reopened */
468 if (data->state != STATE_EP_UNBOUND) {
469 data->state = STATE_EP_DISABLED;
470 data->desc.bDescriptorType = 0;
471 data->hs_desc.bDescriptorType = 0;
472 usb_ep_disable(data->ep);
473 }
474 mutex_unlock(&data->lock);
475 put_ep (data);
476 return 0;
477 }
478
479 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
480 {
481 struct ep_data *data = fd->private_data;
482 int status;
483
484 if ((status = get_ready_ep (fd->f_flags, data)) < 0)
485 return status;
486
487 spin_lock_irq (&data->dev->lock);
488 if (likely (data->ep != NULL)) {
489 switch (code) {
490 case GADGETFS_FIFO_STATUS:
491 status = usb_ep_fifo_status (data->ep);
492 break;
493 case GADGETFS_FIFO_FLUSH:
494 usb_ep_fifo_flush (data->ep);
495 break;
496 case GADGETFS_CLEAR_HALT:
497 status = usb_ep_clear_halt (data->ep);
498 break;
499 default:
500 status = -ENOTTY;
501 }
502 } else
503 status = -ENODEV;
504 spin_unlock_irq (&data->dev->lock);
505 mutex_unlock(&data->lock);
506 return status;
507 }
508
509 /*----------------------------------------------------------------------*/
510
511 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
512
513 struct kiocb_priv {
514 struct usb_request *req;
515 struct ep_data *epdata;
516 void *buf;
517 const struct iovec *iv;
518 unsigned long nr_segs;
519 unsigned actual;
520 };
521
522 static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
523 {
524 struct kiocb_priv *priv = iocb->private;
525 struct ep_data *epdata;
526 int value;
527
528 local_irq_disable();
529 epdata = priv->epdata;
530 // spin_lock(&epdata->dev->lock);
531 kiocbSetCancelled(iocb);
532 if (likely(epdata && epdata->ep && priv->req))
533 value = usb_ep_dequeue (epdata->ep, priv->req);
534 else
535 value = -EINVAL;
536 // spin_unlock(&epdata->dev->lock);
537 local_irq_enable();
538
539 aio_put_req(iocb);
540 return value;
541 }
542
543 static ssize_t ep_aio_read_retry(struct kiocb *iocb)
544 {
545 struct kiocb_priv *priv = iocb->private;
546 ssize_t len, total;
547 void *to_copy;
548 int i;
549
550 /* we "retry" to get the right mm context for this: */
551
552 /* copy stuff into user buffers */
553 total = priv->actual;
554 len = 0;
555 to_copy = priv->buf;
556 for (i=0; i < priv->nr_segs; i++) {
557 ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
558
559 if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
560 if (len == 0)
561 len = -EFAULT;
562 break;
563 }
564
565 total -= this;
566 len += this;
567 to_copy += this;
568 if (total == 0)
569 break;
570 }
571 kfree(priv->buf);
572 kfree(priv);
573 return len;
574 }
575
576 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
577 {
578 struct kiocb *iocb = req->context;
579 struct kiocb_priv *priv = iocb->private;
580 struct ep_data *epdata = priv->epdata;
581
582 /* lock against disconnect (and ideally, cancel) */
583 spin_lock(&epdata->dev->lock);
584 priv->req = NULL;
585 priv->epdata = NULL;
586
587 /* if this was a write or a read returning no data then we
588 * don't need to copy anything to userspace, so we can
589 * complete the aio request immediately.
590 */
591 if (priv->iv == NULL || unlikely(req->actual == 0)) {
592 kfree(req->buf);
593 kfree(priv);
594 iocb->private = NULL;
595 /* aio_complete() reports bytes-transferred _and_ faults */
596 aio_complete(iocb, req->actual ? req->actual : req->status,
597 req->status);
598 } else {
599 /* retry() won't report both; so we hide some faults */
600 if (unlikely(0 != req->status))
601 DBG(epdata->dev, "%s fault %d len %d\n",
602 ep->name, req->status, req->actual);
603
604 priv->buf = req->buf;
605 priv->actual = req->actual;
606 kick_iocb(iocb);
607 }
608 spin_unlock(&epdata->dev->lock);
609
610 usb_ep_free_request(ep, req);
611 put_ep(epdata);
612 }
613
614 static ssize_t
615 ep_aio_rwtail(
616 struct kiocb *iocb,
617 char *buf,
618 size_t len,
619 struct ep_data *epdata,
620 const struct iovec *iv,
621 unsigned long nr_segs
622 )
623 {
624 struct kiocb_priv *priv;
625 struct usb_request *req;
626 ssize_t value;
627
628 priv = kmalloc(sizeof *priv, GFP_KERNEL);
629 if (!priv) {
630 value = -ENOMEM;
631 fail:
632 kfree(buf);
633 return value;
634 }
635 iocb->private = priv;
636 priv->iv = iv;
637 priv->nr_segs = nr_segs;
638
639 value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
640 if (unlikely(value < 0)) {
641 kfree(priv);
642 goto fail;
643 }
644
645 iocb->ki_cancel = ep_aio_cancel;
646 get_ep(epdata);
647 priv->epdata = epdata;
648 priv->actual = 0;
649
650 /* each kiocb is coupled to one usb_request, but we can't
651 * allocate or submit those if the host disconnected.
652 */
653 spin_lock_irq(&epdata->dev->lock);
654 if (likely(epdata->ep)) {
655 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
656 if (likely(req)) {
657 priv->req = req;
658 req->buf = buf;
659 req->length = len;
660 req->complete = ep_aio_complete;
661 req->context = iocb;
662 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
663 if (unlikely(0 != value))
664 usb_ep_free_request(epdata->ep, req);
665 } else
666 value = -EAGAIN;
667 } else
668 value = -ENODEV;
669 spin_unlock_irq(&epdata->dev->lock);
670
671 mutex_unlock(&epdata->lock);
672
673 if (unlikely(value)) {
674 kfree(priv);
675 put_ep(epdata);
676 } else
677 value = (iv ? -EIOCBRETRY : -EIOCBQUEUED);
678 return value;
679 }
680
681 static ssize_t
682 ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
683 unsigned long nr_segs, loff_t o)
684 {
685 struct ep_data *epdata = iocb->ki_filp->private_data;
686 char *buf;
687
688 if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
689 return -EINVAL;
690
691 buf = kmalloc(iocb->ki_left, GFP_KERNEL);
692 if (unlikely(!buf))
693 return -ENOMEM;
694
695 iocb->ki_retry = ep_aio_read_retry;
696 return ep_aio_rwtail(iocb, buf, iocb->ki_left, epdata, iov, nr_segs);
697 }
698
699 static ssize_t
700 ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
701 unsigned long nr_segs, loff_t o)
702 {
703 struct ep_data *epdata = iocb->ki_filp->private_data;
704 char *buf;
705 size_t len = 0;
706 int i = 0;
707
708 if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
709 return -EINVAL;
710
711 buf = kmalloc(iocb->ki_left, GFP_KERNEL);
712 if (unlikely(!buf))
713 return -ENOMEM;
714
715 for (i=0; i < nr_segs; i++) {
716 if (unlikely(copy_from_user(&buf[len], iov[i].iov_base,
717 iov[i].iov_len) != 0)) {
718 kfree(buf);
719 return -EFAULT;
720 }
721 len += iov[i].iov_len;
722 }
723 return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0);
724 }
725
726 /*----------------------------------------------------------------------*/
727
728 /* used after endpoint configuration */
729 static const struct file_operations ep_io_operations = {
730 .owner = THIS_MODULE,
731 .llseek = no_llseek,
732
733 .read = ep_read,
734 .write = ep_write,
735 .unlocked_ioctl = ep_ioctl,
736 .release = ep_release,
737
738 .aio_read = ep_aio_read,
739 .aio_write = ep_aio_write,
740 };
741
742 /* ENDPOINT INITIALIZATION
743 *
744 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
745 * status = write (fd, descriptors, sizeof descriptors)
746 *
747 * That write establishes the endpoint configuration, configuring
748 * the controller to process bulk, interrupt, or isochronous transfers
749 * at the right maxpacket size, and so on.
750 *
751 * The descriptors are message type 1, identified by a host order u32
752 * at the beginning of what's written. Descriptor order is: full/low
753 * speed descriptor, then optional high speed descriptor.
754 */
755 static ssize_t
756 ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
757 {
758 struct ep_data *data = fd->private_data;
759 struct usb_ep *ep;
760 u32 tag;
761 int value, length = len;
762
763 value = mutex_lock_interruptible(&data->lock);
764 if (value < 0)
765 return value;
766
767 if (data->state != STATE_EP_READY) {
768 value = -EL2HLT;
769 goto fail;
770 }
771
772 value = len;
773 if (len < USB_DT_ENDPOINT_SIZE + 4)
774 goto fail0;
775
776 /* we might need to change message format someday */
777 if (copy_from_user (&tag, buf, 4)) {
778 goto fail1;
779 }
780 if (tag != 1) {
781 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
782 goto fail0;
783 }
784 buf += 4;
785 len -= 4;
786
787 /* NOTE: audio endpoint extensions not accepted here;
788 * just don't include the extra bytes.
789 */
790
791 /* full/low speed descriptor, then high speed */
792 if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
793 goto fail1;
794 }
795 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
796 || data->desc.bDescriptorType != USB_DT_ENDPOINT)
797 goto fail0;
798 if (len != USB_DT_ENDPOINT_SIZE) {
799 if (len != 2 * USB_DT_ENDPOINT_SIZE)
800 goto fail0;
801 if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
802 USB_DT_ENDPOINT_SIZE)) {
803 goto fail1;
804 }
805 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
806 || data->hs_desc.bDescriptorType
807 != USB_DT_ENDPOINT) {
808 DBG(data->dev, "config %s, bad hs length or type\n",
809 data->name);
810 goto fail0;
811 }
812 }
813
814 spin_lock_irq (&data->dev->lock);
815 if (data->dev->state == STATE_DEV_UNBOUND) {
816 value = -ENOENT;
817 goto gone;
818 } else if ((ep = data->ep) == NULL) {
819 value = -ENODEV;
820 goto gone;
821 }
822 switch (data->dev->gadget->speed) {
823 case USB_SPEED_LOW:
824 case USB_SPEED_FULL:
825 ep->desc = &data->desc;
826 value = usb_ep_enable(ep);
827 if (value == 0)
828 data->state = STATE_EP_ENABLED;
829 break;
830 case USB_SPEED_HIGH:
831 /* fails if caller didn't provide that descriptor... */
832 ep->desc = &data->hs_desc;
833 value = usb_ep_enable(ep);
834 if (value == 0)
835 data->state = STATE_EP_ENABLED;
836 break;
837 default:
838 DBG(data->dev, "unconnected, %s init abandoned\n",
839 data->name);
840 value = -EINVAL;
841 }
842 if (value == 0) {
843 fd->f_op = &ep_io_operations;
844 value = length;
845 }
846 gone:
847 spin_unlock_irq (&data->dev->lock);
848 if (value < 0) {
849 fail:
850 data->desc.bDescriptorType = 0;
851 data->hs_desc.bDescriptorType = 0;
852 }
853 mutex_unlock(&data->lock);
854 return value;
855 fail0:
856 value = -EINVAL;
857 goto fail;
858 fail1:
859 value = -EFAULT;
860 goto fail;
861 }
862
863 static int
864 ep_open (struct inode *inode, struct file *fd)
865 {
866 struct ep_data *data = inode->i_private;
867 int value = -EBUSY;
868
869 if (mutex_lock_interruptible(&data->lock) != 0)
870 return -EINTR;
871 spin_lock_irq (&data->dev->lock);
872 if (data->dev->state == STATE_DEV_UNBOUND)
873 value = -ENOENT;
874 else if (data->state == STATE_EP_DISABLED) {
875 value = 0;
876 data->state = STATE_EP_READY;
877 get_ep (data);
878 fd->private_data = data;
879 VDEBUG (data->dev, "%s ready\n", data->name);
880 } else
881 DBG (data->dev, "%s state %d\n",
882 data->name, data->state);
883 spin_unlock_irq (&data->dev->lock);
884 mutex_unlock(&data->lock);
885 return value;
886 }
887
888 /* used before endpoint configuration */
889 static const struct file_operations ep_config_operations = {
890 .owner = THIS_MODULE,
891 .llseek = no_llseek,
892
893 .open = ep_open,
894 .write = ep_config,
895 .release = ep_release,
896 };
897
898 /*----------------------------------------------------------------------*/
899
900 /* EP0 IMPLEMENTATION can be partly in userspace.
901 *
902 * Drivers that use this facility receive various events, including
903 * control requests the kernel doesn't handle. Drivers that don't
904 * use this facility may be too simple-minded for real applications.
905 */
906
907 static inline void ep0_readable (struct dev_data *dev)
908 {
909 wake_up (&dev->wait);
910 kill_fasync (&dev->fasync, SIGIO, POLL_IN);
911 }
912
913 static void clean_req (struct usb_ep *ep, struct usb_request *req)
914 {
915 struct dev_data *dev = ep->driver_data;
916
917 if (req->buf != dev->rbuf) {
918 kfree(req->buf);
919 req->buf = dev->rbuf;
920 }
921 req->complete = epio_complete;
922 dev->setup_out_ready = 0;
923 }
924
925 static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
926 {
927 struct dev_data *dev = ep->driver_data;
928 unsigned long flags;
929 int free = 1;
930
931 /* for control OUT, data must still get to userspace */
932 spin_lock_irqsave(&dev->lock, flags);
933 if (!dev->setup_in) {
934 dev->setup_out_error = (req->status != 0);
935 if (!dev->setup_out_error)
936 free = 0;
937 dev->setup_out_ready = 1;
938 ep0_readable (dev);
939 }
940
941 /* clean up as appropriate */
942 if (free && req->buf != &dev->rbuf)
943 clean_req (ep, req);
944 req->complete = epio_complete;
945 spin_unlock_irqrestore(&dev->lock, flags);
946 }
947
948 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
949 {
950 struct dev_data *dev = ep->driver_data;
951
952 if (dev->setup_out_ready) {
953 DBG (dev, "ep0 request busy!\n");
954 return -EBUSY;
955 }
956 if (len > sizeof (dev->rbuf))
957 req->buf = kmalloc(len, GFP_ATOMIC);
958 if (req->buf == NULL) {
959 req->buf = dev->rbuf;
960 return -ENOMEM;
961 }
962 req->complete = ep0_complete;
963 req->length = len;
964 req->zero = 0;
965 return 0;
966 }
967
968 static ssize_t
969 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
970 {
971 struct dev_data *dev = fd->private_data;
972 ssize_t retval;
973 enum ep0_state state;
974
975 spin_lock_irq (&dev->lock);
976
977 /* report fd mode change before acting on it */
978 if (dev->setup_abort) {
979 dev->setup_abort = 0;
980 retval = -EIDRM;
981 goto done;
982 }
983
984 /* control DATA stage */
985 if ((state = dev->state) == STATE_DEV_SETUP) {
986
987 if (dev->setup_in) { /* stall IN */
988 VDEBUG(dev, "ep0in stall\n");
989 (void) usb_ep_set_halt (dev->gadget->ep0);
990 retval = -EL2HLT;
991 dev->state = STATE_DEV_CONNECTED;
992
993 } else if (len == 0) { /* ack SET_CONFIGURATION etc */
994 struct usb_ep *ep = dev->gadget->ep0;
995 struct usb_request *req = dev->req;
996
997 if ((retval = setup_req (ep, req, 0)) == 0)
998 retval = usb_ep_queue (ep, req, GFP_ATOMIC);
999 dev->state = STATE_DEV_CONNECTED;
1000
1001 /* assume that was SET_CONFIGURATION */
1002 if (dev->current_config) {
1003 unsigned power;
1004
1005 if (gadget_is_dualspeed(dev->gadget)
1006 && (dev->gadget->speed
1007 == USB_SPEED_HIGH))
1008 power = dev->hs_config->bMaxPower;
1009 else
1010 power = dev->config->bMaxPower;
1011 usb_gadget_vbus_draw(dev->gadget, 2 * power);
1012 }
1013
1014 } else { /* collect OUT data */
1015 if ((fd->f_flags & O_NONBLOCK) != 0
1016 && !dev->setup_out_ready) {
1017 retval = -EAGAIN;
1018 goto done;
1019 }
1020 spin_unlock_irq (&dev->lock);
1021 retval = wait_event_interruptible (dev->wait,
1022 dev->setup_out_ready != 0);
1023
1024 /* FIXME state could change from under us */
1025 spin_lock_irq (&dev->lock);
1026 if (retval)
1027 goto done;
1028
1029 if (dev->state != STATE_DEV_SETUP) {
1030 retval = -ECANCELED;
1031 goto done;
1032 }
1033 dev->state = STATE_DEV_CONNECTED;
1034
1035 if (dev->setup_out_error)
1036 retval = -EIO;
1037 else {
1038 len = min (len, (size_t)dev->req->actual);
1039 // FIXME don't call this with the spinlock held ...
1040 if (copy_to_user (buf, dev->req->buf, len))
1041 retval = -EFAULT;
1042 else
1043 retval = len;
1044 clean_req (dev->gadget->ep0, dev->req);
1045 /* NOTE userspace can't yet choose to stall */
1046 }
1047 }
1048 goto done;
1049 }
1050
1051 /* else normal: return event data */
1052 if (len < sizeof dev->event [0]) {
1053 retval = -EINVAL;
1054 goto done;
1055 }
1056 len -= len % sizeof (struct usb_gadgetfs_event);
1057 dev->usermode_setup = 1;
1058
1059 scan:
1060 /* return queued events right away */
1061 if (dev->ev_next != 0) {
1062 unsigned i, n;
1063
1064 n = len / sizeof (struct usb_gadgetfs_event);
1065 if (dev->ev_next < n)
1066 n = dev->ev_next;
1067
1068 /* ep0 i/o has special semantics during STATE_DEV_SETUP */
1069 for (i = 0; i < n; i++) {
1070 if (dev->event [i].type == GADGETFS_SETUP) {
1071 dev->state = STATE_DEV_SETUP;
1072 n = i + 1;
1073 break;
1074 }
1075 }
1076 spin_unlock_irq (&dev->lock);
1077 len = n * sizeof (struct usb_gadgetfs_event);
1078 if (copy_to_user (buf, &dev->event, len))
1079 retval = -EFAULT;
1080 else
1081 retval = len;
1082 if (len > 0) {
1083 /* NOTE this doesn't guard against broken drivers;
1084 * concurrent ep0 readers may lose events.
1085 */
1086 spin_lock_irq (&dev->lock);
1087 if (dev->ev_next > n) {
1088 memmove(&dev->event[0], &dev->event[n],
1089 sizeof (struct usb_gadgetfs_event)
1090 * (dev->ev_next - n));
1091 }
1092 dev->ev_next -= n;
1093 spin_unlock_irq (&dev->lock);
1094 }
1095 return retval;
1096 }
1097 if (fd->f_flags & O_NONBLOCK) {
1098 retval = -EAGAIN;
1099 goto done;
1100 }
1101
1102 switch (state) {
1103 default:
1104 DBG (dev, "fail %s, state %d\n", __func__, state);
1105 retval = -ESRCH;
1106 break;
1107 case STATE_DEV_UNCONNECTED:
1108 case STATE_DEV_CONNECTED:
1109 spin_unlock_irq (&dev->lock);
1110 DBG (dev, "%s wait\n", __func__);
1111
1112 /* wait for events */
1113 retval = wait_event_interruptible (dev->wait,
1114 dev->ev_next != 0);
1115 if (retval < 0)
1116 return retval;
1117 spin_lock_irq (&dev->lock);
1118 goto scan;
1119 }
1120
1121 done:
1122 spin_unlock_irq (&dev->lock);
1123 return retval;
1124 }
1125
1126 static struct usb_gadgetfs_event *
1127 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1128 {
1129 struct usb_gadgetfs_event *event;
1130 unsigned i;
1131
1132 switch (type) {
1133 /* these events purge the queue */
1134 case GADGETFS_DISCONNECT:
1135 if (dev->state == STATE_DEV_SETUP)
1136 dev->setup_abort = 1;
1137 // FALL THROUGH
1138 case GADGETFS_CONNECT:
1139 dev->ev_next = 0;
1140 break;
1141 case GADGETFS_SETUP: /* previous request timed out */
1142 case GADGETFS_SUSPEND: /* same effect */
1143 /* these events can't be repeated */
1144 for (i = 0; i != dev->ev_next; i++) {
1145 if (dev->event [i].type != type)
1146 continue;
1147 DBG(dev, "discard old event[%d] %d\n", i, type);
1148 dev->ev_next--;
1149 if (i == dev->ev_next)
1150 break;
1151 /* indices start at zero, for simplicity */
1152 memmove (&dev->event [i], &dev->event [i + 1],
1153 sizeof (struct usb_gadgetfs_event)
1154 * (dev->ev_next - i));
1155 }
1156 break;
1157 default:
1158 BUG ();
1159 }
1160 VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1161 event = &dev->event [dev->ev_next++];
1162 BUG_ON (dev->ev_next > N_EVENT);
1163 memset (event, 0, sizeof *event);
1164 event->type = type;
1165 return event;
1166 }
1167
1168 static ssize_t
1169 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1170 {
1171 struct dev_data *dev = fd->private_data;
1172 ssize_t retval = -ESRCH;
1173
1174 spin_lock_irq (&dev->lock);
1175
1176 /* report fd mode change before acting on it */
1177 if (dev->setup_abort) {
1178 dev->setup_abort = 0;
1179 retval = -EIDRM;
1180
1181 /* data and/or status stage for control request */
1182 } else if (dev->state == STATE_DEV_SETUP) {
1183
1184 /* IN DATA+STATUS caller makes len <= wLength */
1185 if (dev->setup_in) {
1186 retval = setup_req (dev->gadget->ep0, dev->req, len);
1187 if (retval == 0) {
1188 dev->state = STATE_DEV_CONNECTED;
1189 spin_unlock_irq (&dev->lock);
1190 if (copy_from_user (dev->req->buf, buf, len))
1191 retval = -EFAULT;
1192 else {
1193 if (len < dev->setup_wLength)
1194 dev->req->zero = 1;
1195 retval = usb_ep_queue (
1196 dev->gadget->ep0, dev->req,
1197 GFP_KERNEL);
1198 }
1199 if (retval < 0) {
1200 spin_lock_irq (&dev->lock);
1201 clean_req (dev->gadget->ep0, dev->req);
1202 spin_unlock_irq (&dev->lock);
1203 } else
1204 retval = len;
1205
1206 return retval;
1207 }
1208
1209 /* can stall some OUT transfers */
1210 } else if (dev->setup_can_stall) {
1211 VDEBUG(dev, "ep0out stall\n");
1212 (void) usb_ep_set_halt (dev->gadget->ep0);
1213 retval = -EL2HLT;
1214 dev->state = STATE_DEV_CONNECTED;
1215 } else {
1216 DBG(dev, "bogus ep0out stall!\n");
1217 }
1218 } else
1219 DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1220
1221 spin_unlock_irq (&dev->lock);
1222 return retval;
1223 }
1224
1225 static int
1226 ep0_fasync (int f, struct file *fd, int on)
1227 {
1228 struct dev_data *dev = fd->private_data;
1229 // caller must F_SETOWN before signal delivery happens
1230 VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1231 return fasync_helper (f, fd, on, &dev->fasync);
1232 }
1233
1234 static struct usb_gadget_driver gadgetfs_driver;
1235
1236 static int
1237 dev_release (struct inode *inode, struct file *fd)
1238 {
1239 struct dev_data *dev = fd->private_data;
1240
1241 /* closing ep0 === shutdown all */
1242
1243 usb_gadget_unregister_driver (&gadgetfs_driver);
1244
1245 /* at this point "good" hardware has disconnected the
1246 * device from USB; the host won't see it any more.
1247 * alternatively, all host requests will time out.
1248 */
1249
1250 kfree (dev->buf);
1251 dev->buf = NULL;
1252 put_dev (dev);
1253
1254 /* other endpoints were all decoupled from this device */
1255 spin_lock_irq(&dev->lock);
1256 dev->state = STATE_DEV_DISABLED;
1257 spin_unlock_irq(&dev->lock);
1258 return 0;
1259 }
1260
1261 static unsigned int
1262 ep0_poll (struct file *fd, poll_table *wait)
1263 {
1264 struct dev_data *dev = fd->private_data;
1265 int mask = 0;
1266
1267 poll_wait(fd, &dev->wait, wait);
1268
1269 spin_lock_irq (&dev->lock);
1270
1271 /* report fd mode change before acting on it */
1272 if (dev->setup_abort) {
1273 dev->setup_abort = 0;
1274 mask = POLLHUP;
1275 goto out;
1276 }
1277
1278 if (dev->state == STATE_DEV_SETUP) {
1279 if (dev->setup_in || dev->setup_can_stall)
1280 mask = POLLOUT;
1281 } else {
1282 if (dev->ev_next != 0)
1283 mask = POLLIN;
1284 }
1285 out:
1286 spin_unlock_irq(&dev->lock);
1287 return mask;
1288 }
1289
1290 static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1291 {
1292 struct dev_data *dev = fd->private_data;
1293 struct usb_gadget *gadget = dev->gadget;
1294 long ret = -ENOTTY;
1295
1296 if (gadget->ops->ioctl)
1297 ret = gadget->ops->ioctl (gadget, code, value);
1298
1299 return ret;
1300 }
1301
1302 /* used after device configuration */
1303 static const struct file_operations ep0_io_operations = {
1304 .owner = THIS_MODULE,
1305 .llseek = no_llseek,
1306
1307 .read = ep0_read,
1308 .write = ep0_write,
1309 .fasync = ep0_fasync,
1310 .poll = ep0_poll,
1311 .unlocked_ioctl = dev_ioctl,
1312 .release = dev_release,
1313 };
1314
1315 /*----------------------------------------------------------------------*/
1316
1317 /* The in-kernel gadget driver handles most ep0 issues, in particular
1318 * enumerating the single configuration (as provided from user space).
1319 *
1320 * Unrecognized ep0 requests may be handled in user space.
1321 */
1322
1323 static void make_qualifier (struct dev_data *dev)
1324 {
1325 struct usb_qualifier_descriptor qual;
1326 struct usb_device_descriptor *desc;
1327
1328 qual.bLength = sizeof qual;
1329 qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1330 qual.bcdUSB = cpu_to_le16 (0x0200);
1331
1332 desc = dev->dev;
1333 qual.bDeviceClass = desc->bDeviceClass;
1334 qual.bDeviceSubClass = desc->bDeviceSubClass;
1335 qual.bDeviceProtocol = desc->bDeviceProtocol;
1336
1337 /* assumes ep0 uses the same value for both speeds ... */
1338 qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1339
1340 qual.bNumConfigurations = 1;
1341 qual.bRESERVED = 0;
1342
1343 memcpy (dev->rbuf, &qual, sizeof qual);
1344 }
1345
1346 static int
1347 config_buf (struct dev_data *dev, u8 type, unsigned index)
1348 {
1349 int len;
1350 int hs = 0;
1351
1352 /* only one configuration */
1353 if (index > 0)
1354 return -EINVAL;
1355
1356 if (gadget_is_dualspeed(dev->gadget)) {
1357 hs = (dev->gadget->speed == USB_SPEED_HIGH);
1358 if (type == USB_DT_OTHER_SPEED_CONFIG)
1359 hs = !hs;
1360 }
1361 if (hs) {
1362 dev->req->buf = dev->hs_config;
1363 len = le16_to_cpu(dev->hs_config->wTotalLength);
1364 } else {
1365 dev->req->buf = dev->config;
1366 len = le16_to_cpu(dev->config->wTotalLength);
1367 }
1368 ((u8 *)dev->req->buf) [1] = type;
1369 return len;
1370 }
1371
1372 static int
1373 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1374 {
1375 struct dev_data *dev = get_gadget_data (gadget);
1376 struct usb_request *req = dev->req;
1377 int value = -EOPNOTSUPP;
1378 struct usb_gadgetfs_event *event;
1379 u16 w_value = le16_to_cpu(ctrl->wValue);
1380 u16 w_length = le16_to_cpu(ctrl->wLength);
1381
1382 spin_lock (&dev->lock);
1383 dev->setup_abort = 0;
1384 if (dev->state == STATE_DEV_UNCONNECTED) {
1385 if (gadget_is_dualspeed(gadget)
1386 && gadget->speed == USB_SPEED_HIGH
1387 && dev->hs_config == NULL) {
1388 spin_unlock(&dev->lock);
1389 ERROR (dev, "no high speed config??\n");
1390 return -EINVAL;
1391 }
1392
1393 dev->state = STATE_DEV_CONNECTED;
1394
1395 INFO (dev, "connected\n");
1396 event = next_event (dev, GADGETFS_CONNECT);
1397 event->u.speed = gadget->speed;
1398 ep0_readable (dev);
1399
1400 /* host may have given up waiting for response. we can miss control
1401 * requests handled lower down (device/endpoint status and features);
1402 * then ep0_{read,write} will report the wrong status. controller
1403 * driver will have aborted pending i/o.
1404 */
1405 } else if (dev->state == STATE_DEV_SETUP)
1406 dev->setup_abort = 1;
1407
1408 req->buf = dev->rbuf;
1409 req->context = NULL;
1410 value = -EOPNOTSUPP;
1411 switch (ctrl->bRequest) {
1412
1413 case USB_REQ_GET_DESCRIPTOR:
1414 if (ctrl->bRequestType != USB_DIR_IN)
1415 goto unrecognized;
1416 switch (w_value >> 8) {
1417
1418 case USB_DT_DEVICE:
1419 value = min (w_length, (u16) sizeof *dev->dev);
1420 dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1421 req->buf = dev->dev;
1422 break;
1423 case USB_DT_DEVICE_QUALIFIER:
1424 if (!dev->hs_config)
1425 break;
1426 value = min (w_length, (u16)
1427 sizeof (struct usb_qualifier_descriptor));
1428 make_qualifier (dev);
1429 break;
1430 case USB_DT_OTHER_SPEED_CONFIG:
1431 // FALLTHROUGH
1432 case USB_DT_CONFIG:
1433 value = config_buf (dev,
1434 w_value >> 8,
1435 w_value & 0xff);
1436 if (value >= 0)
1437 value = min (w_length, (u16) value);
1438 break;
1439 case USB_DT_STRING:
1440 goto unrecognized;
1441
1442 default: // all others are errors
1443 break;
1444 }
1445 break;
1446
1447 /* currently one config, two speeds */
1448 case USB_REQ_SET_CONFIGURATION:
1449 if (ctrl->bRequestType != 0)
1450 goto unrecognized;
1451 if (0 == (u8) w_value) {
1452 value = 0;
1453 dev->current_config = 0;
1454 usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1455 // user mode expected to disable endpoints
1456 } else {
1457 u8 config, power;
1458
1459 if (gadget_is_dualspeed(gadget)
1460 && gadget->speed == USB_SPEED_HIGH) {
1461 config = dev->hs_config->bConfigurationValue;
1462 power = dev->hs_config->bMaxPower;
1463 } else {
1464 config = dev->config->bConfigurationValue;
1465 power = dev->config->bMaxPower;
1466 }
1467
1468 if (config == (u8) w_value) {
1469 value = 0;
1470 dev->current_config = config;
1471 usb_gadget_vbus_draw(gadget, 2 * power);
1472 }
1473 }
1474
1475 /* report SET_CONFIGURATION like any other control request,
1476 * except that usermode may not stall this. the next
1477 * request mustn't be allowed start until this finishes:
1478 * endpoints and threads set up, etc.
1479 *
1480 * NOTE: older PXA hardware (before PXA 255: without UDCCFR)
1481 * has bad/racey automagic that prevents synchronizing here.
1482 * even kernel mode drivers often miss them.
1483 */
1484 if (value == 0) {
1485 INFO (dev, "configuration #%d\n", dev->current_config);
1486 if (dev->usermode_setup) {
1487 dev->setup_can_stall = 0;
1488 goto delegate;
1489 }
1490 }
1491 break;
1492
1493 #ifndef CONFIG_USB_GADGET_PXA25X
1494 /* PXA automagically handles this request too */
1495 case USB_REQ_GET_CONFIGURATION:
1496 if (ctrl->bRequestType != 0x80)
1497 goto unrecognized;
1498 *(u8 *)req->buf = dev->current_config;
1499 value = min (w_length, (u16) 1);
1500 break;
1501 #endif
1502
1503 default:
1504 unrecognized:
1505 VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1506 dev->usermode_setup ? "delegate" : "fail",
1507 ctrl->bRequestType, ctrl->bRequest,
1508 w_value, le16_to_cpu(ctrl->wIndex), w_length);
1509
1510 /* if there's an ep0 reader, don't stall */
1511 if (dev->usermode_setup) {
1512 dev->setup_can_stall = 1;
1513 delegate:
1514 dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1515 ? 1 : 0;
1516 dev->setup_wLength = w_length;
1517 dev->setup_out_ready = 0;
1518 dev->setup_out_error = 0;
1519 value = 0;
1520
1521 /* read DATA stage for OUT right away */
1522 if (unlikely (!dev->setup_in && w_length)) {
1523 value = setup_req (gadget->ep0, dev->req,
1524 w_length);
1525 if (value < 0)
1526 break;
1527 value = usb_ep_queue (gadget->ep0, dev->req,
1528 GFP_ATOMIC);
1529 if (value < 0) {
1530 clean_req (gadget->ep0, dev->req);
1531 break;
1532 }
1533
1534 /* we can't currently stall these */
1535 dev->setup_can_stall = 0;
1536 }
1537
1538 /* state changes when reader collects event */
1539 event = next_event (dev, GADGETFS_SETUP);
1540 event->u.setup = *ctrl;
1541 ep0_readable (dev);
1542 spin_unlock (&dev->lock);
1543 return 0;
1544 }
1545 }
1546
1547 /* proceed with data transfer and status phases? */
1548 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1549 req->length = value;
1550 req->zero = value < w_length;
1551 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
1552 if (value < 0) {
1553 DBG (dev, "ep_queue --> %d\n", value);
1554 req->status = 0;
1555 }
1556 }
1557
1558 /* device stalls when value < 0 */
1559 spin_unlock (&dev->lock);
1560 return value;
1561 }
1562
1563 static void destroy_ep_files (struct dev_data *dev)
1564 {
1565 DBG (dev, "%s %d\n", __func__, dev->state);
1566
1567 /* dev->state must prevent interference */
1568 spin_lock_irq (&dev->lock);
1569 while (!list_empty(&dev->epfiles)) {
1570 struct ep_data *ep;
1571 struct inode *parent;
1572 struct dentry *dentry;
1573
1574 /* break link to FS */
1575 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1576 list_del_init (&ep->epfiles);
1577 dentry = ep->dentry;
1578 ep->dentry = NULL;
1579 parent = dentry->d_parent->d_inode;
1580
1581 /* break link to controller */
1582 if (ep->state == STATE_EP_ENABLED)
1583 (void) usb_ep_disable (ep->ep);
1584 ep->state = STATE_EP_UNBOUND;
1585 usb_ep_free_request (ep->ep, ep->req);
1586 ep->ep = NULL;
1587 wake_up (&ep->wait);
1588 put_ep (ep);
1589
1590 spin_unlock_irq (&dev->lock);
1591
1592 /* break link to dcache */
1593 mutex_lock (&parent->i_mutex);
1594 d_delete (dentry);
1595 dput (dentry);
1596 mutex_unlock (&parent->i_mutex);
1597
1598 spin_lock_irq (&dev->lock);
1599 }
1600 spin_unlock_irq (&dev->lock);
1601 }
1602
1603
1604 static struct inode *
1605 gadgetfs_create_file (struct super_block *sb, char const *name,
1606 void *data, const struct file_operations *fops,
1607 struct dentry **dentry_p);
1608
1609 static int activate_ep_files (struct dev_data *dev)
1610 {
1611 struct usb_ep *ep;
1612 struct ep_data *data;
1613
1614 gadget_for_each_ep (ep, dev->gadget) {
1615
1616 data = kzalloc(sizeof(*data), GFP_KERNEL);
1617 if (!data)
1618 goto enomem0;
1619 data->state = STATE_EP_DISABLED;
1620 mutex_init(&data->lock);
1621 init_waitqueue_head (&data->wait);
1622
1623 strncpy (data->name, ep->name, sizeof (data->name) - 1);
1624 atomic_set (&data->count, 1);
1625 data->dev = dev;
1626 get_dev (dev);
1627
1628 data->ep = ep;
1629 ep->driver_data = data;
1630
1631 data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1632 if (!data->req)
1633 goto enomem1;
1634
1635 data->inode = gadgetfs_create_file (dev->sb, data->name,
1636 data, &ep_config_operations,
1637 &data->dentry);
1638 if (!data->inode)
1639 goto enomem2;
1640 list_add_tail (&data->epfiles, &dev->epfiles);
1641 }
1642 return 0;
1643
1644 enomem2:
1645 usb_ep_free_request (ep, data->req);
1646 enomem1:
1647 put_dev (dev);
1648 kfree (data);
1649 enomem0:
1650 DBG (dev, "%s enomem\n", __func__);
1651 destroy_ep_files (dev);
1652 return -ENOMEM;
1653 }
1654
1655 static void
1656 gadgetfs_unbind (struct usb_gadget *gadget)
1657 {
1658 struct dev_data *dev = get_gadget_data (gadget);
1659
1660 DBG (dev, "%s\n", __func__);
1661
1662 spin_lock_irq (&dev->lock);
1663 dev->state = STATE_DEV_UNBOUND;
1664 spin_unlock_irq (&dev->lock);
1665
1666 destroy_ep_files (dev);
1667 gadget->ep0->driver_data = NULL;
1668 set_gadget_data (gadget, NULL);
1669
1670 /* we've already been disconnected ... no i/o is active */
1671 if (dev->req)
1672 usb_ep_free_request (gadget->ep0, dev->req);
1673 DBG (dev, "%s done\n", __func__);
1674 put_dev (dev);
1675 }
1676
1677 static struct dev_data *the_device;
1678
1679 static int gadgetfs_bind(struct usb_gadget *gadget,
1680 struct usb_gadget_driver *driver)
1681 {
1682 struct dev_data *dev = the_device;
1683
1684 if (!dev)
1685 return -ESRCH;
1686 if (0 != strcmp (CHIP, gadget->name)) {
1687 pr_err("%s expected %s controller not %s\n",
1688 shortname, CHIP, gadget->name);
1689 return -ENODEV;
1690 }
1691
1692 set_gadget_data (gadget, dev);
1693 dev->gadget = gadget;
1694 gadget->ep0->driver_data = dev;
1695
1696 /* preallocate control response and buffer */
1697 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1698 if (!dev->req)
1699 goto enomem;
1700 dev->req->context = NULL;
1701 dev->req->complete = epio_complete;
1702
1703 if (activate_ep_files (dev) < 0)
1704 goto enomem;
1705
1706 INFO (dev, "bound to %s driver\n", gadget->name);
1707 spin_lock_irq(&dev->lock);
1708 dev->state = STATE_DEV_UNCONNECTED;
1709 spin_unlock_irq(&dev->lock);
1710 get_dev (dev);
1711 return 0;
1712
1713 enomem:
1714 gadgetfs_unbind (gadget);
1715 return -ENOMEM;
1716 }
1717
1718 static void
1719 gadgetfs_disconnect (struct usb_gadget *gadget)
1720 {
1721 struct dev_data *dev = get_gadget_data (gadget);
1722 unsigned long flags;
1723
1724 spin_lock_irqsave (&dev->lock, flags);
1725 if (dev->state == STATE_DEV_UNCONNECTED)
1726 goto exit;
1727 dev->state = STATE_DEV_UNCONNECTED;
1728
1729 INFO (dev, "disconnected\n");
1730 next_event (dev, GADGETFS_DISCONNECT);
1731 ep0_readable (dev);
1732 exit:
1733 spin_unlock_irqrestore (&dev->lock, flags);
1734 }
1735
1736 static void
1737 gadgetfs_suspend (struct usb_gadget *gadget)
1738 {
1739 struct dev_data *dev = get_gadget_data (gadget);
1740
1741 INFO (dev, "suspended from state %d\n", dev->state);
1742 spin_lock (&dev->lock);
1743 switch (dev->state) {
1744 case STATE_DEV_SETUP: // VERY odd... host died??
1745 case STATE_DEV_CONNECTED:
1746 case STATE_DEV_UNCONNECTED:
1747 next_event (dev, GADGETFS_SUSPEND);
1748 ep0_readable (dev);
1749 /* FALLTHROUGH */
1750 default:
1751 break;
1752 }
1753 spin_unlock (&dev->lock);
1754 }
1755
1756 static struct usb_gadget_driver gadgetfs_driver = {
1757 .function = (char *) driver_desc,
1758 .bind = gadgetfs_bind,
1759 .unbind = gadgetfs_unbind,
1760 .setup = gadgetfs_setup,
1761 .disconnect = gadgetfs_disconnect,
1762 .suspend = gadgetfs_suspend,
1763
1764 .driver = {
1765 .name = (char *) shortname,
1766 },
1767 };
1768
1769 /*----------------------------------------------------------------------*/
1770
1771 static void gadgetfs_nop(struct usb_gadget *arg) { }
1772
1773 static int gadgetfs_probe(struct usb_gadget *gadget,
1774 struct usb_gadget_driver *driver)
1775 {
1776 CHIP = gadget->name;
1777 return -EISNAM;
1778 }
1779
1780 static struct usb_gadget_driver probe_driver = {
1781 .max_speed = USB_SPEED_HIGH,
1782 .bind = gadgetfs_probe,
1783 .unbind = gadgetfs_nop,
1784 .setup = (void *)gadgetfs_nop,
1785 .disconnect = gadgetfs_nop,
1786 .driver = {
1787 .name = "nop",
1788 },
1789 };
1790
1791
1792 /* DEVICE INITIALIZATION
1793 *
1794 * fd = open ("/dev/gadget/$CHIP", O_RDWR)
1795 * status = write (fd, descriptors, sizeof descriptors)
1796 *
1797 * That write establishes the device configuration, so the kernel can
1798 * bind to the controller ... guaranteeing it can handle enumeration
1799 * at all necessary speeds. Descriptor order is:
1800 *
1801 * . message tag (u32, host order) ... for now, must be zero; it
1802 * would change to support features like multi-config devices
1803 * . full/low speed config ... all wTotalLength bytes (with interface,
1804 * class, altsetting, endpoint, and other descriptors)
1805 * . high speed config ... all descriptors, for high speed operation;
1806 * this one's optional except for high-speed hardware
1807 * . device descriptor
1808 *
1809 * Endpoints are not yet enabled. Drivers must wait until device
1810 * configuration and interface altsetting changes create
1811 * the need to configure (or unconfigure) them.
1812 *
1813 * After initialization, the device stays active for as long as that
1814 * $CHIP file is open. Events must then be read from that descriptor,
1815 * such as configuration notifications.
1816 */
1817
1818 static int is_valid_config (struct usb_config_descriptor *config)
1819 {
1820 return config->bDescriptorType == USB_DT_CONFIG
1821 && config->bLength == USB_DT_CONFIG_SIZE
1822 && config->bConfigurationValue != 0
1823 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1824 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1825 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1826 /* FIXME check lengths: walk to end */
1827 }
1828
1829 static ssize_t
1830 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1831 {
1832 struct dev_data *dev = fd->private_data;
1833 ssize_t value = len, length = len;
1834 unsigned total;
1835 u32 tag;
1836 char *kbuf;
1837
1838 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
1839 return -EINVAL;
1840
1841 /* we might need to change message format someday */
1842 if (copy_from_user (&tag, buf, 4))
1843 return -EFAULT;
1844 if (tag != 0)
1845 return -EINVAL;
1846 buf += 4;
1847 length -= 4;
1848
1849 kbuf = memdup_user(buf, length);
1850 if (IS_ERR(kbuf))
1851 return PTR_ERR(kbuf);
1852
1853 spin_lock_irq (&dev->lock);
1854 value = -EINVAL;
1855 if (dev->buf)
1856 goto fail;
1857 dev->buf = kbuf;
1858
1859 /* full or low speed config */
1860 dev->config = (void *) kbuf;
1861 total = le16_to_cpu(dev->config->wTotalLength);
1862 if (!is_valid_config (dev->config) || total >= length)
1863 goto fail;
1864 kbuf += total;
1865 length -= total;
1866
1867 /* optional high speed config */
1868 if (kbuf [1] == USB_DT_CONFIG) {
1869 dev->hs_config = (void *) kbuf;
1870 total = le16_to_cpu(dev->hs_config->wTotalLength);
1871 if (!is_valid_config (dev->hs_config) || total >= length)
1872 goto fail;
1873 kbuf += total;
1874 length -= total;
1875 }
1876
1877 /* could support multiple configs, using another encoding! */
1878
1879 /* device descriptor (tweaked for paranoia) */
1880 if (length != USB_DT_DEVICE_SIZE)
1881 goto fail;
1882 dev->dev = (void *)kbuf;
1883 if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1884 || dev->dev->bDescriptorType != USB_DT_DEVICE
1885 || dev->dev->bNumConfigurations != 1)
1886 goto fail;
1887 dev->dev->bNumConfigurations = 1;
1888 dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1889
1890 /* triggers gadgetfs_bind(); then we can enumerate. */
1891 spin_unlock_irq (&dev->lock);
1892 if (dev->hs_config)
1893 gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1894 else
1895 gadgetfs_driver.max_speed = USB_SPEED_FULL;
1896
1897 value = usb_gadget_probe_driver(&gadgetfs_driver);
1898 if (value != 0) {
1899 kfree (dev->buf);
1900 dev->buf = NULL;
1901 } else {
1902 /* at this point "good" hardware has for the first time
1903 * let the USB the host see us. alternatively, if users
1904 * unplug/replug that will clear all the error state.
1905 *
1906 * note: everything running before here was guaranteed
1907 * to choke driver model style diagnostics. from here
1908 * on, they can work ... except in cleanup paths that
1909 * kick in after the ep0 descriptor is closed.
1910 */
1911 fd->f_op = &ep0_io_operations;
1912 value = len;
1913 }
1914 return value;
1915
1916 fail:
1917 spin_unlock_irq (&dev->lock);
1918 pr_debug ("%s: %s fail %Zd, %p\n", shortname, __func__, value, dev);
1919 kfree (dev->buf);
1920 dev->buf = NULL;
1921 return value;
1922 }
1923
1924 static int
1925 dev_open (struct inode *inode, struct file *fd)
1926 {
1927 struct dev_data *dev = inode->i_private;
1928 int value = -EBUSY;
1929
1930 spin_lock_irq(&dev->lock);
1931 if (dev->state == STATE_DEV_DISABLED) {
1932 dev->ev_next = 0;
1933 dev->state = STATE_DEV_OPENED;
1934 fd->private_data = dev;
1935 get_dev (dev);
1936 value = 0;
1937 }
1938 spin_unlock_irq(&dev->lock);
1939 return value;
1940 }
1941
1942 static const struct file_operations dev_init_operations = {
1943 .owner = THIS_MODULE,
1944 .llseek = no_llseek,
1945
1946 .open = dev_open,
1947 .write = dev_config,
1948 .fasync = ep0_fasync,
1949 .unlocked_ioctl = dev_ioctl,
1950 .release = dev_release,
1951 };
1952
1953 /*----------------------------------------------------------------------*/
1954
1955 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1956 *
1957 * Mounting the filesystem creates a controller file, used first for
1958 * device configuration then later for event monitoring.
1959 */
1960
1961
1962 /* FIXME PAM etc could set this security policy without mount options
1963 * if epfiles inherited ownership and permissons from ep0 ...
1964 */
1965
1966 static unsigned default_uid;
1967 static unsigned default_gid;
1968 static unsigned default_perm = S_IRUSR | S_IWUSR;
1969
1970 module_param (default_uid, uint, 0644);
1971 module_param (default_gid, uint, 0644);
1972 module_param (default_perm, uint, 0644);
1973
1974
1975 static struct inode *
1976 gadgetfs_make_inode (struct super_block *sb,
1977 void *data, const struct file_operations *fops,
1978 int mode)
1979 {
1980 struct inode *inode = new_inode (sb);
1981
1982 if (inode) {
1983 inode->i_ino = get_next_ino();
1984 inode->i_mode = mode;
1985 inode->i_uid = make_kuid(&init_user_ns, default_uid);
1986 inode->i_gid = make_kgid(&init_user_ns, default_gid);
1987 inode->i_atime = inode->i_mtime = inode->i_ctime
1988 = CURRENT_TIME;
1989 inode->i_private = data;
1990 inode->i_fop = fops;
1991 }
1992 return inode;
1993 }
1994
1995 /* creates in fs root directory, so non-renamable and non-linkable.
1996 * so inode and dentry are paired, until device reconfig.
1997 */
1998 static struct inode *
1999 gadgetfs_create_file (struct super_block *sb, char const *name,
2000 void *data, const struct file_operations *fops,
2001 struct dentry **dentry_p)
2002 {
2003 struct dentry *dentry;
2004 struct inode *inode;
2005
2006 dentry = d_alloc_name(sb->s_root, name);
2007 if (!dentry)
2008 return NULL;
2009
2010 inode = gadgetfs_make_inode (sb, data, fops,
2011 S_IFREG | (default_perm & S_IRWXUGO));
2012 if (!inode) {
2013 dput(dentry);
2014 return NULL;
2015 }
2016 d_add (dentry, inode);
2017 *dentry_p = dentry;
2018 return inode;
2019 }
2020
2021 static const struct super_operations gadget_fs_operations = {
2022 .statfs = simple_statfs,
2023 .drop_inode = generic_delete_inode,
2024 };
2025
2026 static int
2027 gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
2028 {
2029 struct inode *inode;
2030 struct dev_data *dev;
2031
2032 if (the_device)
2033 return -ESRCH;
2034
2035 /* fake probe to determine $CHIP */
2036 usb_gadget_probe_driver(&probe_driver);
2037 if (!CHIP)
2038 return -ENODEV;
2039
2040 /* superblock */
2041 sb->s_blocksize = PAGE_CACHE_SIZE;
2042 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2043 sb->s_magic = GADGETFS_MAGIC;
2044 sb->s_op = &gadget_fs_operations;
2045 sb->s_time_gran = 1;
2046
2047 /* root inode */
2048 inode = gadgetfs_make_inode (sb,
2049 NULL, &simple_dir_operations,
2050 S_IFDIR | S_IRUGO | S_IXUGO);
2051 if (!inode)
2052 goto Enomem;
2053 inode->i_op = &simple_dir_inode_operations;
2054 if (!(sb->s_root = d_make_root (inode)))
2055 goto Enomem;
2056
2057 /* the ep0 file is named after the controller we expect;
2058 * user mode code can use it for sanity checks, like we do.
2059 */
2060 dev = dev_new ();
2061 if (!dev)
2062 goto Enomem;
2063
2064 dev->sb = sb;
2065 if (!gadgetfs_create_file (sb, CHIP,
2066 dev, &dev_init_operations,
2067 &dev->dentry)) {
2068 put_dev(dev);
2069 goto Enomem;
2070 }
2071
2072 /* other endpoint files are available after hardware setup,
2073 * from binding to a controller.
2074 */
2075 the_device = dev;
2076 return 0;
2077
2078 Enomem:
2079 return -ENOMEM;
2080 }
2081
2082 /* "mount -t gadgetfs path /dev/gadget" ends up here */
2083 static struct dentry *
2084 gadgetfs_mount (struct file_system_type *t, int flags,
2085 const char *path, void *opts)
2086 {
2087 return mount_single (t, flags, opts, gadgetfs_fill_super);
2088 }
2089
2090 static void
2091 gadgetfs_kill_sb (struct super_block *sb)
2092 {
2093 kill_litter_super (sb);
2094 if (the_device) {
2095 put_dev (the_device);
2096 the_device = NULL;
2097 }
2098 }
2099
2100 /*----------------------------------------------------------------------*/
2101
2102 static struct file_system_type gadgetfs_type = {
2103 .owner = THIS_MODULE,
2104 .name = shortname,
2105 .mount = gadgetfs_mount,
2106 .kill_sb = gadgetfs_kill_sb,
2107 };
2108
2109 /*----------------------------------------------------------------------*/
2110
2111 static int __init init (void)
2112 {
2113 int status;
2114
2115 status = register_filesystem (&gadgetfs_type);
2116 if (status == 0)
2117 pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2118 shortname, driver_desc);
2119 return status;
2120 }
2121 module_init (init);
2122
2123 static void __exit cleanup (void)
2124 {
2125 pr_debug ("unregister %s\n", shortname);
2126 unregister_filesystem (&gadgetfs_type);
2127 }
2128 module_exit (cleanup);
2129
This page took 0.137512 seconds and 5 git commands to generate.