2 * inode.c -- user mode filesystem api for usb gadget controllers
4 * Copyright (C) 2003-2004 David Brownell
5 * Copyright (C) 2003 Agilent Technologies
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
14 /* #define VERBOSE_DEBUG */
16 #include <linux/init.h>
17 #include <linux/module.h>
19 #include <linux/pagemap.h>
20 #include <linux/uts.h>
21 #include <linux/wait.h>
22 #include <linux/compiler.h>
23 #include <asm/uaccess.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/poll.h>
27 #include <linux/mmu_context.h>
28 #include <linux/aio.h>
30 #include <linux/device.h>
31 #include <linux/moduleparam.h>
33 #include <linux/usb/gadgetfs.h>
34 #include <linux/usb/gadget.h>
38 * The gadgetfs API maps each endpoint to a file descriptor so that you
39 * can use standard synchronous read/write calls for I/O. There's some
40 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
41 * drivers show how this works in practice. You can also use AIO to
42 * eliminate I/O gaps between requests, to help when streaming data.
44 * Key parts that must be USB-specific are protocols defining how the
45 * read/write operations relate to the hardware state machines. There
46 * are two types of files. One type is for the device, implementing ep0.
47 * The other type is for each IN or OUT endpoint. In both cases, the
48 * user mode driver must configure the hardware before using it.
50 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
51 * (by writing configuration and device descriptors). Afterwards it
52 * may serve as a source of device events, used to handle all control
53 * requests other than basic enumeration.
55 * - Then, after a SET_CONFIGURATION control request, ep_config() is
56 * called when each /dev/gadget/ep* file is configured (by writing
57 * endpoint descriptors). Afterwards these files are used to write()
58 * IN data or to read() OUT data. To halt the endpoint, a "wrong
59 * direction" request is issued (like reading an IN endpoint).
61 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
62 * not possible on all hardware. For example, precise fault handling with
63 * respect to data left in endpoint fifos after aborted operations; or
64 * selective clearing of endpoint halts, to implement SET_INTERFACE.
67 #define DRIVER_DESC "USB Gadget filesystem"
68 #define DRIVER_VERSION "24 Aug 2004"
70 static const char driver_desc
[] = DRIVER_DESC
;
71 static const char shortname
[] = "gadgetfs";
73 MODULE_DESCRIPTION (DRIVER_DESC
);
74 MODULE_AUTHOR ("David Brownell");
75 MODULE_LICENSE ("GPL");
77 static int ep_open(struct inode
*, struct file
*);
80 /*----------------------------------------------------------------------*/
82 #define GADGETFS_MAGIC 0xaee71ee7
84 /* /dev/gadget/$CHIP represents ep0 and the whole device */
86 /* DISBLED is the initial state.
88 STATE_DEV_DISABLED
= 0,
90 /* Only one open() of /dev/gadget/$CHIP; only one file tracks
91 * ep0/device i/o modes and binding to the controller. Driver
92 * must always write descriptors to initialize the device, then
93 * the device becomes UNCONNECTED until enumeration.
97 /* From then on, ep0 fd is in either of two basic modes:
98 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
99 * - SETUP: read/write will transfer control data and succeed;
100 * or if "wrong direction", performs protocol stall
102 STATE_DEV_UNCONNECTED
,
106 /* UNBOUND means the driver closed ep0, so the device won't be
107 * accessible again (DEV_DISABLED) until all fds are closed.
112 /* enough for the whole queue: most events invalidate others */
118 enum ep0_state state
; /* P: lock */
119 struct usb_gadgetfs_event event
[N_EVENT
];
121 struct fasync_struct
*fasync
;
124 /* drivers reading ep0 MUST handle control requests (SETUP)
125 * reported that way; else the host will time out.
127 unsigned usermode_setup
: 1,
133 unsigned setup_wLength
;
135 /* the rest is basically write-once */
136 struct usb_config_descriptor
*config
, *hs_config
;
137 struct usb_device_descriptor
*dev
;
138 struct usb_request
*req
;
139 struct usb_gadget
*gadget
;
140 struct list_head epfiles
;
142 wait_queue_head_t wait
;
143 struct super_block
*sb
;
144 struct dentry
*dentry
;
146 /* except this scratch i/o buffer for ep0 */
150 static inline void get_dev (struct dev_data
*data
)
152 atomic_inc (&data
->count
);
155 static void put_dev (struct dev_data
*data
)
157 if (likely (!atomic_dec_and_test (&data
->count
)))
159 /* needs no more cleanup */
160 BUG_ON (waitqueue_active (&data
->wait
));
164 static struct dev_data
*dev_new (void)
166 struct dev_data
*dev
;
168 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
171 dev
->state
= STATE_DEV_DISABLED
;
172 atomic_set (&dev
->count
, 1);
173 spin_lock_init (&dev
->lock
);
174 INIT_LIST_HEAD (&dev
->epfiles
);
175 init_waitqueue_head (&dev
->wait
);
179 /*----------------------------------------------------------------------*/
181 /* other /dev/gadget/$ENDPOINT files represent endpoints */
183 STATE_EP_DISABLED
= 0,
193 struct dev_data
*dev
;
194 /* must hold dev->lock before accessing ep or req */
196 struct usb_request
*req
;
199 struct usb_endpoint_descriptor desc
, hs_desc
;
200 struct list_head epfiles
;
201 wait_queue_head_t wait
;
202 struct dentry
*dentry
;
205 static inline void get_ep (struct ep_data
*data
)
207 atomic_inc (&data
->count
);
210 static void put_ep (struct ep_data
*data
)
212 if (likely (!atomic_dec_and_test (&data
->count
)))
215 /* needs no more cleanup */
216 BUG_ON (!list_empty (&data
->epfiles
));
217 BUG_ON (waitqueue_active (&data
->wait
));
221 /*----------------------------------------------------------------------*/
223 /* most "how to use the hardware" policy choices are in userspace:
224 * mapping endpoint roles (which the driver needs) to the capabilities
225 * which the usb controller has. most of those capabilities are exposed
226 * implicitly, starting with the driver name and then endpoint names.
229 static const char *CHIP
;
231 /*----------------------------------------------------------------------*/
233 /* NOTE: don't use dev_printk calls before binding to the gadget
234 * at the end of ep0 configuration, or after unbind.
237 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
238 #define xprintk(d,level,fmt,args...) \
239 printk(level "%s: " fmt , shortname , ## args)
242 #define DBG(dev,fmt,args...) \
243 xprintk(dev , KERN_DEBUG , fmt , ## args)
245 #define DBG(dev,fmt,args...) \
252 #define VDEBUG(dev,fmt,args...) \
256 #define ERROR(dev,fmt,args...) \
257 xprintk(dev , KERN_ERR , fmt , ## args)
258 #define INFO(dev,fmt,args...) \
259 xprintk(dev , KERN_INFO , fmt , ## args)
262 /*----------------------------------------------------------------------*/
264 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
266 * After opening, configure non-control endpoints. Then use normal
267 * stream read() and write() requests; and maybe ioctl() to get more
268 * precise FIFO status when recovering from cancellation.
271 static void epio_complete (struct usb_ep
*ep
, struct usb_request
*req
)
273 struct ep_data
*epdata
= ep
->driver_data
;
278 epdata
->status
= req
->status
;
280 epdata
->status
= req
->actual
;
281 complete ((struct completion
*)req
->context
);
284 /* tasklock endpoint, returning when it's connected.
285 * still need dev->lock to use epdata->ep.
288 get_ready_ep (unsigned f_flags
, struct ep_data
*epdata
, bool is_write
)
292 if (f_flags
& O_NONBLOCK
) {
293 if (!mutex_trylock(&epdata
->lock
))
295 if (epdata
->state
!= STATE_EP_ENABLED
&&
296 (!is_write
|| epdata
->state
!= STATE_EP_READY
)) {
297 mutex_unlock(&epdata
->lock
);
305 val
= mutex_lock_interruptible(&epdata
->lock
);
309 switch (epdata
->state
) {
310 case STATE_EP_ENABLED
:
312 case STATE_EP_READY
: /* not configured yet */
316 case STATE_EP_UNBOUND
: /* clean disconnect */
318 // case STATE_EP_DISABLED: /* "can't happen" */
319 default: /* error! */
320 pr_debug ("%s: ep %p not available, state %d\n",
321 shortname
, epdata
, epdata
->state
);
323 mutex_unlock(&epdata
->lock
);
328 ep_io (struct ep_data
*epdata
, void *buf
, unsigned len
)
330 DECLARE_COMPLETION_ONSTACK (done
);
333 spin_lock_irq (&epdata
->dev
->lock
);
334 if (likely (epdata
->ep
!= NULL
)) {
335 struct usb_request
*req
= epdata
->req
;
337 req
->context
= &done
;
338 req
->complete
= epio_complete
;
341 value
= usb_ep_queue (epdata
->ep
, req
, GFP_ATOMIC
);
344 spin_unlock_irq (&epdata
->dev
->lock
);
346 if (likely (value
== 0)) {
347 value
= wait_event_interruptible (done
.wait
, done
.done
);
349 spin_lock_irq (&epdata
->dev
->lock
);
350 if (likely (epdata
->ep
!= NULL
)) {
351 DBG (epdata
->dev
, "%s i/o interrupted\n",
353 usb_ep_dequeue (epdata
->ep
, epdata
->req
);
354 spin_unlock_irq (&epdata
->dev
->lock
);
356 wait_event (done
.wait
, done
.done
);
357 if (epdata
->status
== -ECONNRESET
)
358 epdata
->status
= -EINTR
;
360 spin_unlock_irq (&epdata
->dev
->lock
);
362 DBG (epdata
->dev
, "endpoint gone\n");
363 epdata
->status
= -ENODEV
;
366 return epdata
->status
;
372 ep_release (struct inode
*inode
, struct file
*fd
)
374 struct ep_data
*data
= fd
->private_data
;
377 value
= mutex_lock_interruptible(&data
->lock
);
381 /* clean up if this can be reopened */
382 if (data
->state
!= STATE_EP_UNBOUND
) {
383 data
->state
= STATE_EP_DISABLED
;
384 data
->desc
.bDescriptorType
= 0;
385 data
->hs_desc
.bDescriptorType
= 0;
386 usb_ep_disable(data
->ep
);
388 mutex_unlock(&data
->lock
);
393 static long ep_ioctl(struct file
*fd
, unsigned code
, unsigned long value
)
395 struct ep_data
*data
= fd
->private_data
;
398 if ((status
= get_ready_ep (fd
->f_flags
, data
, false)) < 0)
401 spin_lock_irq (&data
->dev
->lock
);
402 if (likely (data
->ep
!= NULL
)) {
404 case GADGETFS_FIFO_STATUS
:
405 status
= usb_ep_fifo_status (data
->ep
);
407 case GADGETFS_FIFO_FLUSH
:
408 usb_ep_fifo_flush (data
->ep
);
410 case GADGETFS_CLEAR_HALT
:
411 status
= usb_ep_clear_halt (data
->ep
);
418 spin_unlock_irq (&data
->dev
->lock
);
419 mutex_unlock(&data
->lock
);
423 /*----------------------------------------------------------------------*/
425 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
428 struct usb_request
*req
;
429 struct ep_data
*epdata
;
431 struct mm_struct
*mm
;
432 struct work_struct work
;
439 static int ep_aio_cancel(struct kiocb
*iocb
)
441 struct kiocb_priv
*priv
= iocb
->private;
442 struct ep_data
*epdata
;
446 epdata
= priv
->epdata
;
447 // spin_lock(&epdata->dev->lock);
448 if (likely(epdata
&& epdata
->ep
&& priv
->req
))
449 value
= usb_ep_dequeue (epdata
->ep
, priv
->req
);
452 // spin_unlock(&epdata->dev->lock);
458 static void ep_user_copy_worker(struct work_struct
*work
)
460 struct kiocb_priv
*priv
= container_of(work
, struct kiocb_priv
, work
);
461 struct mm_struct
*mm
= priv
->mm
;
462 struct kiocb
*iocb
= priv
->iocb
;
466 ret
= copy_to_iter(priv
->buf
, priv
->actual
, &priv
->to
);
471 /* completing the iocb can drop the ctx and mm, don't touch mm after */
472 iocb
->ki_complete(iocb
, ret
, ret
);
475 kfree(priv
->to_free
);
479 static void ep_aio_complete(struct usb_ep
*ep
, struct usb_request
*req
)
481 struct kiocb
*iocb
= req
->context
;
482 struct kiocb_priv
*priv
= iocb
->private;
483 struct ep_data
*epdata
= priv
->epdata
;
485 /* lock against disconnect (and ideally, cancel) */
486 spin_lock(&epdata
->dev
->lock
);
490 /* if this was a write or a read returning no data then we
491 * don't need to copy anything to userspace, so we can
492 * complete the aio request immediately.
494 if (priv
->to_free
== NULL
|| unlikely(req
->actual
== 0)) {
496 kfree(priv
->to_free
);
498 iocb
->private = NULL
;
499 /* aio_complete() reports bytes-transferred _and_ faults */
501 iocb
->ki_complete(iocb
, req
->actual
? req
->actual
: req
->status
,
504 /* ep_copy_to_user() won't report both; we hide some faults */
505 if (unlikely(0 != req
->status
))
506 DBG(epdata
->dev
, "%s fault %d len %d\n",
507 ep
->name
, req
->status
, req
->actual
);
509 priv
->buf
= req
->buf
;
510 priv
->actual
= req
->actual
;
511 INIT_WORK(&priv
->work
, ep_user_copy_worker
);
512 schedule_work(&priv
->work
);
514 spin_unlock(&epdata
->dev
->lock
);
516 usb_ep_free_request(ep
, req
);
520 static ssize_t
ep_aio(struct kiocb
*iocb
,
521 struct kiocb_priv
*priv
,
522 struct ep_data
*epdata
,
526 struct usb_request
*req
;
529 iocb
->private = priv
;
532 kiocb_set_cancel_fn(iocb
, ep_aio_cancel
);
534 priv
->epdata
= epdata
;
536 priv
->mm
= current
->mm
; /* mm teardown waits for iocbs in exit_aio() */
538 /* each kiocb is coupled to one usb_request, but we can't
539 * allocate or submit those if the host disconnected.
541 spin_lock_irq(&epdata
->dev
->lock
);
543 if (unlikely(epdata
->ep
))
546 req
= usb_ep_alloc_request(epdata
->ep
, GFP_ATOMIC
);
554 req
->complete
= ep_aio_complete
;
556 value
= usb_ep_queue(epdata
->ep
, req
, GFP_ATOMIC
);
557 if (unlikely(0 != value
)) {
558 usb_ep_free_request(epdata
->ep
, req
);
561 spin_unlock_irq(&epdata
->dev
->lock
);
565 spin_unlock_irq(&epdata
->dev
->lock
);
566 kfree(priv
->to_free
);
573 ep_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
575 struct file
*file
= iocb
->ki_filp
;
576 struct ep_data
*epdata
= file
->private_data
;
577 size_t len
= iov_iter_count(to
);
581 if ((value
= get_ready_ep(file
->f_flags
, epdata
, false)) < 0)
584 /* halt any endpoint by doing a "wrong direction" i/o call */
585 if (usb_endpoint_dir_in(&epdata
->desc
)) {
586 if (usb_endpoint_xfer_isoc(&epdata
->desc
) ||
587 !is_sync_kiocb(iocb
)) {
588 mutex_unlock(&epdata
->lock
);
591 DBG (epdata
->dev
, "%s halt\n", epdata
->name
);
592 spin_lock_irq(&epdata
->dev
->lock
);
593 if (likely(epdata
->ep
!= NULL
))
594 usb_ep_set_halt(epdata
->ep
);
595 spin_unlock_irq(&epdata
->dev
->lock
);
596 mutex_unlock(&epdata
->lock
);
600 buf
= kmalloc(len
, GFP_KERNEL
);
601 if (unlikely(!buf
)) {
602 mutex_unlock(&epdata
->lock
);
605 if (is_sync_kiocb(iocb
)) {
606 value
= ep_io(epdata
, buf
, len
);
607 if (value
>= 0 && copy_to_iter(buf
, value
, to
))
610 struct kiocb_priv
*priv
= kzalloc(sizeof *priv
, GFP_KERNEL
);
614 priv
->to_free
= dup_iter(&priv
->to
, to
, GFP_KERNEL
);
615 if (!priv
->to_free
) {
619 value
= ep_aio(iocb
, priv
, epdata
, buf
, len
);
620 if (value
== -EIOCBQUEUED
)
625 mutex_unlock(&epdata
->lock
);
629 static ssize_t
ep_config(struct ep_data
*, const char *, size_t);
632 ep_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
634 struct file
*file
= iocb
->ki_filp
;
635 struct ep_data
*epdata
= file
->private_data
;
636 size_t len
= iov_iter_count(from
);
641 if ((value
= get_ready_ep(file
->f_flags
, epdata
, true)) < 0)
644 configured
= epdata
->state
== STATE_EP_ENABLED
;
646 /* halt any endpoint by doing a "wrong direction" i/o call */
647 if (configured
&& !usb_endpoint_dir_in(&epdata
->desc
)) {
648 if (usb_endpoint_xfer_isoc(&epdata
->desc
) ||
649 !is_sync_kiocb(iocb
)) {
650 mutex_unlock(&epdata
->lock
);
653 DBG (epdata
->dev
, "%s halt\n", epdata
->name
);
654 spin_lock_irq(&epdata
->dev
->lock
);
655 if (likely(epdata
->ep
!= NULL
))
656 usb_ep_set_halt(epdata
->ep
);
657 spin_unlock_irq(&epdata
->dev
->lock
);
658 mutex_unlock(&epdata
->lock
);
662 buf
= kmalloc(len
, GFP_KERNEL
);
663 if (unlikely(!buf
)) {
664 mutex_unlock(&epdata
->lock
);
668 if (unlikely(copy_from_iter(buf
, len
, from
) != len
)) {
673 if (unlikely(!configured
)) {
674 value
= ep_config(epdata
, buf
, len
);
675 } else if (is_sync_kiocb(iocb
)) {
676 value
= ep_io(epdata
, buf
, len
);
678 struct kiocb_priv
*priv
= kzalloc(sizeof *priv
, GFP_KERNEL
);
681 value
= ep_aio(iocb
, priv
, epdata
, buf
, len
);
682 if (value
== -EIOCBQUEUED
)
688 mutex_unlock(&epdata
->lock
);
692 /*----------------------------------------------------------------------*/
694 /* used after endpoint configuration */
695 static const struct file_operations ep_io_operations
= {
696 .owner
= THIS_MODULE
,
699 .release
= ep_release
,
701 .read
= new_sync_read
,
702 .write
= new_sync_write
,
703 .unlocked_ioctl
= ep_ioctl
,
704 .read_iter
= ep_read_iter
,
705 .write_iter
= ep_write_iter
,
708 /* ENDPOINT INITIALIZATION
710 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
711 * status = write (fd, descriptors, sizeof descriptors)
713 * That write establishes the endpoint configuration, configuring
714 * the controller to process bulk, interrupt, or isochronous transfers
715 * at the right maxpacket size, and so on.
717 * The descriptors are message type 1, identified by a host order u32
718 * at the beginning of what's written. Descriptor order is: full/low
719 * speed descriptor, then optional high speed descriptor.
722 ep_config (struct ep_data
*data
, const char *buf
, size_t len
)
726 int value
, length
= len
;
728 if (data
->state
!= STATE_EP_READY
) {
734 if (len
< USB_DT_ENDPOINT_SIZE
+ 4)
737 /* we might need to change message format someday */
738 memcpy(&tag
, buf
, 4);
740 DBG(data
->dev
, "config %s, bad tag %d\n", data
->name
, tag
);
746 /* NOTE: audio endpoint extensions not accepted here;
747 * just don't include the extra bytes.
750 /* full/low speed descriptor, then high speed */
751 memcpy(&data
->desc
, buf
, USB_DT_ENDPOINT_SIZE
);
752 if (data
->desc
.bLength
!= USB_DT_ENDPOINT_SIZE
753 || data
->desc
.bDescriptorType
!= USB_DT_ENDPOINT
)
755 if (len
!= USB_DT_ENDPOINT_SIZE
) {
756 if (len
!= 2 * USB_DT_ENDPOINT_SIZE
)
758 memcpy(&data
->hs_desc
, buf
+ USB_DT_ENDPOINT_SIZE
,
759 USB_DT_ENDPOINT_SIZE
);
760 if (data
->hs_desc
.bLength
!= USB_DT_ENDPOINT_SIZE
761 || data
->hs_desc
.bDescriptorType
762 != USB_DT_ENDPOINT
) {
763 DBG(data
->dev
, "config %s, bad hs length or type\n",
769 spin_lock_irq (&data
->dev
->lock
);
770 if (data
->dev
->state
== STATE_DEV_UNBOUND
) {
773 } else if ((ep
= data
->ep
) == NULL
) {
777 switch (data
->dev
->gadget
->speed
) {
780 ep
->desc
= &data
->desc
;
783 /* fails if caller didn't provide that descriptor... */
784 ep
->desc
= &data
->hs_desc
;
787 DBG(data
->dev
, "unconnected, %s init abandoned\n",
792 value
= usb_ep_enable(ep
);
794 data
->state
= STATE_EP_ENABLED
;
798 spin_unlock_irq (&data
->dev
->lock
);
801 data
->desc
.bDescriptorType
= 0;
802 data
->hs_desc
.bDescriptorType
= 0;
811 ep_open (struct inode
*inode
, struct file
*fd
)
813 struct ep_data
*data
= inode
->i_private
;
816 if (mutex_lock_interruptible(&data
->lock
) != 0)
818 spin_lock_irq (&data
->dev
->lock
);
819 if (data
->dev
->state
== STATE_DEV_UNBOUND
)
821 else if (data
->state
== STATE_EP_DISABLED
) {
823 data
->state
= STATE_EP_READY
;
825 fd
->private_data
= data
;
826 VDEBUG (data
->dev
, "%s ready\n", data
->name
);
828 DBG (data
->dev
, "%s state %d\n",
829 data
->name
, data
->state
);
830 spin_unlock_irq (&data
->dev
->lock
);
831 mutex_unlock(&data
->lock
);
835 /*----------------------------------------------------------------------*/
837 /* EP0 IMPLEMENTATION can be partly in userspace.
839 * Drivers that use this facility receive various events, including
840 * control requests the kernel doesn't handle. Drivers that don't
841 * use this facility may be too simple-minded for real applications.
844 static inline void ep0_readable (struct dev_data
*dev
)
846 wake_up (&dev
->wait
);
847 kill_fasync (&dev
->fasync
, SIGIO
, POLL_IN
);
850 static void clean_req (struct usb_ep
*ep
, struct usb_request
*req
)
852 struct dev_data
*dev
= ep
->driver_data
;
854 if (req
->buf
!= dev
->rbuf
) {
856 req
->buf
= dev
->rbuf
;
858 req
->complete
= epio_complete
;
859 dev
->setup_out_ready
= 0;
862 static void ep0_complete (struct usb_ep
*ep
, struct usb_request
*req
)
864 struct dev_data
*dev
= ep
->driver_data
;
868 /* for control OUT, data must still get to userspace */
869 spin_lock_irqsave(&dev
->lock
, flags
);
870 if (!dev
->setup_in
) {
871 dev
->setup_out_error
= (req
->status
!= 0);
872 if (!dev
->setup_out_error
)
874 dev
->setup_out_ready
= 1;
878 /* clean up as appropriate */
879 if (free
&& req
->buf
!= &dev
->rbuf
)
881 req
->complete
= epio_complete
;
882 spin_unlock_irqrestore(&dev
->lock
, flags
);
885 static int setup_req (struct usb_ep
*ep
, struct usb_request
*req
, u16 len
)
887 struct dev_data
*dev
= ep
->driver_data
;
889 if (dev
->setup_out_ready
) {
890 DBG (dev
, "ep0 request busy!\n");
893 if (len
> sizeof (dev
->rbuf
))
894 req
->buf
= kmalloc(len
, GFP_ATOMIC
);
895 if (req
->buf
== NULL
) {
896 req
->buf
= dev
->rbuf
;
899 req
->complete
= ep0_complete
;
906 ep0_read (struct file
*fd
, char __user
*buf
, size_t len
, loff_t
*ptr
)
908 struct dev_data
*dev
= fd
->private_data
;
910 enum ep0_state state
;
912 spin_lock_irq (&dev
->lock
);
913 if (dev
->state
<= STATE_DEV_OPENED
) {
918 /* report fd mode change before acting on it */
919 if (dev
->setup_abort
) {
920 dev
->setup_abort
= 0;
925 /* control DATA stage */
926 if ((state
= dev
->state
) == STATE_DEV_SETUP
) {
928 if (dev
->setup_in
) { /* stall IN */
929 VDEBUG(dev
, "ep0in stall\n");
930 (void) usb_ep_set_halt (dev
->gadget
->ep0
);
932 dev
->state
= STATE_DEV_CONNECTED
;
934 } else if (len
== 0) { /* ack SET_CONFIGURATION etc */
935 struct usb_ep
*ep
= dev
->gadget
->ep0
;
936 struct usb_request
*req
= dev
->req
;
938 if ((retval
= setup_req (ep
, req
, 0)) == 0)
939 retval
= usb_ep_queue (ep
, req
, GFP_ATOMIC
);
940 dev
->state
= STATE_DEV_CONNECTED
;
942 /* assume that was SET_CONFIGURATION */
943 if (dev
->current_config
) {
946 if (gadget_is_dualspeed(dev
->gadget
)
947 && (dev
->gadget
->speed
949 power
= dev
->hs_config
->bMaxPower
;
951 power
= dev
->config
->bMaxPower
;
952 usb_gadget_vbus_draw(dev
->gadget
, 2 * power
);
955 } else { /* collect OUT data */
956 if ((fd
->f_flags
& O_NONBLOCK
) != 0
957 && !dev
->setup_out_ready
) {
961 spin_unlock_irq (&dev
->lock
);
962 retval
= wait_event_interruptible (dev
->wait
,
963 dev
->setup_out_ready
!= 0);
965 /* FIXME state could change from under us */
966 spin_lock_irq (&dev
->lock
);
970 if (dev
->state
!= STATE_DEV_SETUP
) {
974 dev
->state
= STATE_DEV_CONNECTED
;
976 if (dev
->setup_out_error
)
979 len
= min (len
, (size_t)dev
->req
->actual
);
980 // FIXME don't call this with the spinlock held ...
981 if (copy_to_user (buf
, dev
->req
->buf
, len
))
985 clean_req (dev
->gadget
->ep0
, dev
->req
);
986 /* NOTE userspace can't yet choose to stall */
992 /* else normal: return event data */
993 if (len
< sizeof dev
->event
[0]) {
997 len
-= len
% sizeof (struct usb_gadgetfs_event
);
998 dev
->usermode_setup
= 1;
1001 /* return queued events right away */
1002 if (dev
->ev_next
!= 0) {
1005 n
= len
/ sizeof (struct usb_gadgetfs_event
);
1006 if (dev
->ev_next
< n
)
1009 /* ep0 i/o has special semantics during STATE_DEV_SETUP */
1010 for (i
= 0; i
< n
; i
++) {
1011 if (dev
->event
[i
].type
== GADGETFS_SETUP
) {
1012 dev
->state
= STATE_DEV_SETUP
;
1017 spin_unlock_irq (&dev
->lock
);
1018 len
= n
* sizeof (struct usb_gadgetfs_event
);
1019 if (copy_to_user (buf
, &dev
->event
, len
))
1024 /* NOTE this doesn't guard against broken drivers;
1025 * concurrent ep0 readers may lose events.
1027 spin_lock_irq (&dev
->lock
);
1028 if (dev
->ev_next
> n
) {
1029 memmove(&dev
->event
[0], &dev
->event
[n
],
1030 sizeof (struct usb_gadgetfs_event
)
1031 * (dev
->ev_next
- n
));
1034 spin_unlock_irq (&dev
->lock
);
1038 if (fd
->f_flags
& O_NONBLOCK
) {
1045 DBG (dev
, "fail %s, state %d\n", __func__
, state
);
1048 case STATE_DEV_UNCONNECTED
:
1049 case STATE_DEV_CONNECTED
:
1050 spin_unlock_irq (&dev
->lock
);
1051 DBG (dev
, "%s wait\n", __func__
);
1053 /* wait for events */
1054 retval
= wait_event_interruptible (dev
->wait
,
1058 spin_lock_irq (&dev
->lock
);
1063 spin_unlock_irq (&dev
->lock
);
1067 static struct usb_gadgetfs_event
*
1068 next_event (struct dev_data
*dev
, enum usb_gadgetfs_event_type type
)
1070 struct usb_gadgetfs_event
*event
;
1074 /* these events purge the queue */
1075 case GADGETFS_DISCONNECT
:
1076 if (dev
->state
== STATE_DEV_SETUP
)
1077 dev
->setup_abort
= 1;
1079 case GADGETFS_CONNECT
:
1082 case GADGETFS_SETUP
: /* previous request timed out */
1083 case GADGETFS_SUSPEND
: /* same effect */
1084 /* these events can't be repeated */
1085 for (i
= 0; i
!= dev
->ev_next
; i
++) {
1086 if (dev
->event
[i
].type
!= type
)
1088 DBG(dev
, "discard old event[%d] %d\n", i
, type
);
1090 if (i
== dev
->ev_next
)
1092 /* indices start at zero, for simplicity */
1093 memmove (&dev
->event
[i
], &dev
->event
[i
+ 1],
1094 sizeof (struct usb_gadgetfs_event
)
1095 * (dev
->ev_next
- i
));
1101 VDEBUG(dev
, "event[%d] = %d\n", dev
->ev_next
, type
);
1102 event
= &dev
->event
[dev
->ev_next
++];
1103 BUG_ON (dev
->ev_next
> N_EVENT
);
1104 memset (event
, 0, sizeof *event
);
1110 ep0_write (struct file
*fd
, const char __user
*buf
, size_t len
, loff_t
*ptr
)
1112 struct dev_data
*dev
= fd
->private_data
;
1113 ssize_t retval
= -ESRCH
;
1115 /* report fd mode change before acting on it */
1116 if (dev
->setup_abort
) {
1117 dev
->setup_abort
= 0;
1120 /* data and/or status stage for control request */
1121 } else if (dev
->state
== STATE_DEV_SETUP
) {
1123 /* IN DATA+STATUS caller makes len <= wLength */
1124 if (dev
->setup_in
) {
1125 retval
= setup_req (dev
->gadget
->ep0
, dev
->req
, len
);
1127 dev
->state
= STATE_DEV_CONNECTED
;
1128 spin_unlock_irq (&dev
->lock
);
1129 if (copy_from_user (dev
->req
->buf
, buf
, len
))
1132 if (len
< dev
->setup_wLength
)
1134 retval
= usb_ep_queue (
1135 dev
->gadget
->ep0
, dev
->req
,
1139 spin_lock_irq (&dev
->lock
);
1140 clean_req (dev
->gadget
->ep0
, dev
->req
);
1141 spin_unlock_irq (&dev
->lock
);
1148 /* can stall some OUT transfers */
1149 } else if (dev
->setup_can_stall
) {
1150 VDEBUG(dev
, "ep0out stall\n");
1151 (void) usb_ep_set_halt (dev
->gadget
->ep0
);
1153 dev
->state
= STATE_DEV_CONNECTED
;
1155 DBG(dev
, "bogus ep0out stall!\n");
1158 DBG (dev
, "fail %s, state %d\n", __func__
, dev
->state
);
1164 ep0_fasync (int f
, struct file
*fd
, int on
)
1166 struct dev_data
*dev
= fd
->private_data
;
1167 // caller must F_SETOWN before signal delivery happens
1168 VDEBUG (dev
, "%s %s\n", __func__
, on
? "on" : "off");
1169 return fasync_helper (f
, fd
, on
, &dev
->fasync
);
1172 static struct usb_gadget_driver gadgetfs_driver
;
1175 dev_release (struct inode
*inode
, struct file
*fd
)
1177 struct dev_data
*dev
= fd
->private_data
;
1179 /* closing ep0 === shutdown all */
1181 usb_gadget_unregister_driver (&gadgetfs_driver
);
1183 /* at this point "good" hardware has disconnected the
1184 * device from USB; the host won't see it any more.
1185 * alternatively, all host requests will time out.
1191 /* other endpoints were all decoupled from this device */
1192 spin_lock_irq(&dev
->lock
);
1193 dev
->state
= STATE_DEV_DISABLED
;
1194 spin_unlock_irq(&dev
->lock
);
1201 ep0_poll (struct file
*fd
, poll_table
*wait
)
1203 struct dev_data
*dev
= fd
->private_data
;
1206 if (dev
->state
<= STATE_DEV_OPENED
)
1207 return DEFAULT_POLLMASK
;
1209 poll_wait(fd
, &dev
->wait
, wait
);
1211 spin_lock_irq (&dev
->lock
);
1213 /* report fd mode change before acting on it */
1214 if (dev
->setup_abort
) {
1215 dev
->setup_abort
= 0;
1220 if (dev
->state
== STATE_DEV_SETUP
) {
1221 if (dev
->setup_in
|| dev
->setup_can_stall
)
1224 if (dev
->ev_next
!= 0)
1228 spin_unlock_irq(&dev
->lock
);
1232 static long dev_ioctl (struct file
*fd
, unsigned code
, unsigned long value
)
1234 struct dev_data
*dev
= fd
->private_data
;
1235 struct usb_gadget
*gadget
= dev
->gadget
;
1238 if (gadget
->ops
->ioctl
)
1239 ret
= gadget
->ops
->ioctl (gadget
, code
, value
);
1244 /*----------------------------------------------------------------------*/
1246 /* The in-kernel gadget driver handles most ep0 issues, in particular
1247 * enumerating the single configuration (as provided from user space).
1249 * Unrecognized ep0 requests may be handled in user space.
1252 static void make_qualifier (struct dev_data
*dev
)
1254 struct usb_qualifier_descriptor qual
;
1255 struct usb_device_descriptor
*desc
;
1257 qual
.bLength
= sizeof qual
;
1258 qual
.bDescriptorType
= USB_DT_DEVICE_QUALIFIER
;
1259 qual
.bcdUSB
= cpu_to_le16 (0x0200);
1262 qual
.bDeviceClass
= desc
->bDeviceClass
;
1263 qual
.bDeviceSubClass
= desc
->bDeviceSubClass
;
1264 qual
.bDeviceProtocol
= desc
->bDeviceProtocol
;
1266 /* assumes ep0 uses the same value for both speeds ... */
1267 qual
.bMaxPacketSize0
= dev
->gadget
->ep0
->maxpacket
;
1269 qual
.bNumConfigurations
= 1;
1272 memcpy (dev
->rbuf
, &qual
, sizeof qual
);
1276 config_buf (struct dev_data
*dev
, u8 type
, unsigned index
)
1281 /* only one configuration */
1285 if (gadget_is_dualspeed(dev
->gadget
)) {
1286 hs
= (dev
->gadget
->speed
== USB_SPEED_HIGH
);
1287 if (type
== USB_DT_OTHER_SPEED_CONFIG
)
1291 dev
->req
->buf
= dev
->hs_config
;
1292 len
= le16_to_cpu(dev
->hs_config
->wTotalLength
);
1294 dev
->req
->buf
= dev
->config
;
1295 len
= le16_to_cpu(dev
->config
->wTotalLength
);
1297 ((u8
*)dev
->req
->buf
) [1] = type
;
1302 gadgetfs_setup (struct usb_gadget
*gadget
, const struct usb_ctrlrequest
*ctrl
)
1304 struct dev_data
*dev
= get_gadget_data (gadget
);
1305 struct usb_request
*req
= dev
->req
;
1306 int value
= -EOPNOTSUPP
;
1307 struct usb_gadgetfs_event
*event
;
1308 u16 w_value
= le16_to_cpu(ctrl
->wValue
);
1309 u16 w_length
= le16_to_cpu(ctrl
->wLength
);
1311 spin_lock (&dev
->lock
);
1312 dev
->setup_abort
= 0;
1313 if (dev
->state
== STATE_DEV_UNCONNECTED
) {
1314 if (gadget_is_dualspeed(gadget
)
1315 && gadget
->speed
== USB_SPEED_HIGH
1316 && dev
->hs_config
== NULL
) {
1317 spin_unlock(&dev
->lock
);
1318 ERROR (dev
, "no high speed config??\n");
1322 dev
->state
= STATE_DEV_CONNECTED
;
1324 INFO (dev
, "connected\n");
1325 event
= next_event (dev
, GADGETFS_CONNECT
);
1326 event
->u
.speed
= gadget
->speed
;
1329 /* host may have given up waiting for response. we can miss control
1330 * requests handled lower down (device/endpoint status and features);
1331 * then ep0_{read,write} will report the wrong status. controller
1332 * driver will have aborted pending i/o.
1334 } else if (dev
->state
== STATE_DEV_SETUP
)
1335 dev
->setup_abort
= 1;
1337 req
->buf
= dev
->rbuf
;
1338 req
->context
= NULL
;
1339 value
= -EOPNOTSUPP
;
1340 switch (ctrl
->bRequest
) {
1342 case USB_REQ_GET_DESCRIPTOR
:
1343 if (ctrl
->bRequestType
!= USB_DIR_IN
)
1345 switch (w_value
>> 8) {
1348 value
= min (w_length
, (u16
) sizeof *dev
->dev
);
1349 dev
->dev
->bMaxPacketSize0
= dev
->gadget
->ep0
->maxpacket
;
1350 req
->buf
= dev
->dev
;
1352 case USB_DT_DEVICE_QUALIFIER
:
1353 if (!dev
->hs_config
)
1355 value
= min (w_length
, (u16
)
1356 sizeof (struct usb_qualifier_descriptor
));
1357 make_qualifier (dev
);
1359 case USB_DT_OTHER_SPEED_CONFIG
:
1362 value
= config_buf (dev
,
1366 value
= min (w_length
, (u16
) value
);
1371 default: // all others are errors
1376 /* currently one config, two speeds */
1377 case USB_REQ_SET_CONFIGURATION
:
1378 if (ctrl
->bRequestType
!= 0)
1380 if (0 == (u8
) w_value
) {
1382 dev
->current_config
= 0;
1383 usb_gadget_vbus_draw(gadget
, 8 /* mA */ );
1384 // user mode expected to disable endpoints
1388 if (gadget_is_dualspeed(gadget
)
1389 && gadget
->speed
== USB_SPEED_HIGH
) {
1390 config
= dev
->hs_config
->bConfigurationValue
;
1391 power
= dev
->hs_config
->bMaxPower
;
1393 config
= dev
->config
->bConfigurationValue
;
1394 power
= dev
->config
->bMaxPower
;
1397 if (config
== (u8
) w_value
) {
1399 dev
->current_config
= config
;
1400 usb_gadget_vbus_draw(gadget
, 2 * power
);
1404 /* report SET_CONFIGURATION like any other control request,
1405 * except that usermode may not stall this. the next
1406 * request mustn't be allowed start until this finishes:
1407 * endpoints and threads set up, etc.
1409 * NOTE: older PXA hardware (before PXA 255: without UDCCFR)
1410 * has bad/racey automagic that prevents synchronizing here.
1411 * even kernel mode drivers often miss them.
1414 INFO (dev
, "configuration #%d\n", dev
->current_config
);
1415 usb_gadget_set_state(gadget
, USB_STATE_CONFIGURED
);
1416 if (dev
->usermode_setup
) {
1417 dev
->setup_can_stall
= 0;
1423 #ifndef CONFIG_USB_PXA25X
1424 /* PXA automagically handles this request too */
1425 case USB_REQ_GET_CONFIGURATION
:
1426 if (ctrl
->bRequestType
!= 0x80)
1428 *(u8
*)req
->buf
= dev
->current_config
;
1429 value
= min (w_length
, (u16
) 1);
1435 VDEBUG (dev
, "%s req%02x.%02x v%04x i%04x l%d\n",
1436 dev
->usermode_setup
? "delegate" : "fail",
1437 ctrl
->bRequestType
, ctrl
->bRequest
,
1438 w_value
, le16_to_cpu(ctrl
->wIndex
), w_length
);
1440 /* if there's an ep0 reader, don't stall */
1441 if (dev
->usermode_setup
) {
1442 dev
->setup_can_stall
= 1;
1444 dev
->setup_in
= (ctrl
->bRequestType
& USB_DIR_IN
)
1446 dev
->setup_wLength
= w_length
;
1447 dev
->setup_out_ready
= 0;
1448 dev
->setup_out_error
= 0;
1451 /* read DATA stage for OUT right away */
1452 if (unlikely (!dev
->setup_in
&& w_length
)) {
1453 value
= setup_req (gadget
->ep0
, dev
->req
,
1457 value
= usb_ep_queue (gadget
->ep0
, dev
->req
,
1460 clean_req (gadget
->ep0
, dev
->req
);
1464 /* we can't currently stall these */
1465 dev
->setup_can_stall
= 0;
1468 /* state changes when reader collects event */
1469 event
= next_event (dev
, GADGETFS_SETUP
);
1470 event
->u
.setup
= *ctrl
;
1472 spin_unlock (&dev
->lock
);
1477 /* proceed with data transfer and status phases? */
1478 if (value
>= 0 && dev
->state
!= STATE_DEV_SETUP
) {
1479 req
->length
= value
;
1480 req
->zero
= value
< w_length
;
1481 value
= usb_ep_queue (gadget
->ep0
, req
, GFP_ATOMIC
);
1483 DBG (dev
, "ep_queue --> %d\n", value
);
1488 /* device stalls when value < 0 */
1489 spin_unlock (&dev
->lock
);
1493 static void destroy_ep_files (struct dev_data
*dev
)
1495 DBG (dev
, "%s %d\n", __func__
, dev
->state
);
1497 /* dev->state must prevent interference */
1498 spin_lock_irq (&dev
->lock
);
1499 while (!list_empty(&dev
->epfiles
)) {
1501 struct inode
*parent
;
1502 struct dentry
*dentry
;
1504 /* break link to FS */
1505 ep
= list_first_entry (&dev
->epfiles
, struct ep_data
, epfiles
);
1506 list_del_init (&ep
->epfiles
);
1507 dentry
= ep
->dentry
;
1509 parent
= dentry
->d_parent
->d_inode
;
1511 /* break link to controller */
1512 if (ep
->state
== STATE_EP_ENABLED
)
1513 (void) usb_ep_disable (ep
->ep
);
1514 ep
->state
= STATE_EP_UNBOUND
;
1515 usb_ep_free_request (ep
->ep
, ep
->req
);
1517 wake_up (&ep
->wait
);
1520 spin_unlock_irq (&dev
->lock
);
1522 /* break link to dcache */
1523 mutex_lock (&parent
->i_mutex
);
1526 mutex_unlock (&parent
->i_mutex
);
1528 spin_lock_irq (&dev
->lock
);
1530 spin_unlock_irq (&dev
->lock
);
1534 static struct dentry
*
1535 gadgetfs_create_file (struct super_block
*sb
, char const *name
,
1536 void *data
, const struct file_operations
*fops
);
1538 static int activate_ep_files (struct dev_data
*dev
)
1541 struct ep_data
*data
;
1543 gadget_for_each_ep (ep
, dev
->gadget
) {
1545 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1548 data
->state
= STATE_EP_DISABLED
;
1549 mutex_init(&data
->lock
);
1550 init_waitqueue_head (&data
->wait
);
1552 strncpy (data
->name
, ep
->name
, sizeof (data
->name
) - 1);
1553 atomic_set (&data
->count
, 1);
1558 ep
->driver_data
= data
;
1560 data
->req
= usb_ep_alloc_request (ep
, GFP_KERNEL
);
1564 data
->dentry
= gadgetfs_create_file (dev
->sb
, data
->name
,
1565 data
, &ep_io_operations
);
1568 list_add_tail (&data
->epfiles
, &dev
->epfiles
);
1573 usb_ep_free_request (ep
, data
->req
);
1578 DBG (dev
, "%s enomem\n", __func__
);
1579 destroy_ep_files (dev
);
1584 gadgetfs_unbind (struct usb_gadget
*gadget
)
1586 struct dev_data
*dev
= get_gadget_data (gadget
);
1588 DBG (dev
, "%s\n", __func__
);
1590 spin_lock_irq (&dev
->lock
);
1591 dev
->state
= STATE_DEV_UNBOUND
;
1592 spin_unlock_irq (&dev
->lock
);
1594 destroy_ep_files (dev
);
1595 gadget
->ep0
->driver_data
= NULL
;
1596 set_gadget_data (gadget
, NULL
);
1598 /* we've already been disconnected ... no i/o is active */
1600 usb_ep_free_request (gadget
->ep0
, dev
->req
);
1601 DBG (dev
, "%s done\n", __func__
);
1605 static struct dev_data
*the_device
;
1607 static int gadgetfs_bind(struct usb_gadget
*gadget
,
1608 struct usb_gadget_driver
*driver
)
1610 struct dev_data
*dev
= the_device
;
1614 if (0 != strcmp (CHIP
, gadget
->name
)) {
1615 pr_err("%s expected %s controller not %s\n",
1616 shortname
, CHIP
, gadget
->name
);
1620 set_gadget_data (gadget
, dev
);
1621 dev
->gadget
= gadget
;
1622 gadget
->ep0
->driver_data
= dev
;
1624 /* preallocate control response and buffer */
1625 dev
->req
= usb_ep_alloc_request (gadget
->ep0
, GFP_KERNEL
);
1628 dev
->req
->context
= NULL
;
1629 dev
->req
->complete
= epio_complete
;
1631 if (activate_ep_files (dev
) < 0)
1634 INFO (dev
, "bound to %s driver\n", gadget
->name
);
1635 spin_lock_irq(&dev
->lock
);
1636 dev
->state
= STATE_DEV_UNCONNECTED
;
1637 spin_unlock_irq(&dev
->lock
);
1642 gadgetfs_unbind (gadget
);
1647 gadgetfs_disconnect (struct usb_gadget
*gadget
)
1649 struct dev_data
*dev
= get_gadget_data (gadget
);
1650 unsigned long flags
;
1652 spin_lock_irqsave (&dev
->lock
, flags
);
1653 if (dev
->state
== STATE_DEV_UNCONNECTED
)
1655 dev
->state
= STATE_DEV_UNCONNECTED
;
1657 INFO (dev
, "disconnected\n");
1658 next_event (dev
, GADGETFS_DISCONNECT
);
1661 spin_unlock_irqrestore (&dev
->lock
, flags
);
1665 gadgetfs_suspend (struct usb_gadget
*gadget
)
1667 struct dev_data
*dev
= get_gadget_data (gadget
);
1669 INFO (dev
, "suspended from state %d\n", dev
->state
);
1670 spin_lock (&dev
->lock
);
1671 switch (dev
->state
) {
1672 case STATE_DEV_SETUP
: // VERY odd... host died??
1673 case STATE_DEV_CONNECTED
:
1674 case STATE_DEV_UNCONNECTED
:
1675 next_event (dev
, GADGETFS_SUSPEND
);
1681 spin_unlock (&dev
->lock
);
1684 static struct usb_gadget_driver gadgetfs_driver
= {
1685 .function
= (char *) driver_desc
,
1686 .bind
= gadgetfs_bind
,
1687 .unbind
= gadgetfs_unbind
,
1688 .setup
= gadgetfs_setup
,
1689 .reset
= gadgetfs_disconnect
,
1690 .disconnect
= gadgetfs_disconnect
,
1691 .suspend
= gadgetfs_suspend
,
1694 .name
= (char *) shortname
,
1698 /*----------------------------------------------------------------------*/
1700 static void gadgetfs_nop(struct usb_gadget
*arg
) { }
1702 static int gadgetfs_probe(struct usb_gadget
*gadget
,
1703 struct usb_gadget_driver
*driver
)
1705 CHIP
= gadget
->name
;
1709 static struct usb_gadget_driver probe_driver
= {
1710 .max_speed
= USB_SPEED_HIGH
,
1711 .bind
= gadgetfs_probe
,
1712 .unbind
= gadgetfs_nop
,
1713 .setup
= (void *)gadgetfs_nop
,
1714 .disconnect
= gadgetfs_nop
,
1721 /* DEVICE INITIALIZATION
1723 * fd = open ("/dev/gadget/$CHIP", O_RDWR)
1724 * status = write (fd, descriptors, sizeof descriptors)
1726 * That write establishes the device configuration, so the kernel can
1727 * bind to the controller ... guaranteeing it can handle enumeration
1728 * at all necessary speeds. Descriptor order is:
1730 * . message tag (u32, host order) ... for now, must be zero; it
1731 * would change to support features like multi-config devices
1732 * . full/low speed config ... all wTotalLength bytes (with interface,
1733 * class, altsetting, endpoint, and other descriptors)
1734 * . high speed config ... all descriptors, for high speed operation;
1735 * this one's optional except for high-speed hardware
1736 * . device descriptor
1738 * Endpoints are not yet enabled. Drivers must wait until device
1739 * configuration and interface altsetting changes create
1740 * the need to configure (or unconfigure) them.
1742 * After initialization, the device stays active for as long as that
1743 * $CHIP file is open. Events must then be read from that descriptor,
1744 * such as configuration notifications.
1747 static int is_valid_config (struct usb_config_descriptor
*config
)
1749 return config
->bDescriptorType
== USB_DT_CONFIG
1750 && config
->bLength
== USB_DT_CONFIG_SIZE
1751 && config
->bConfigurationValue
!= 0
1752 && (config
->bmAttributes
& USB_CONFIG_ATT_ONE
) != 0
1753 && (config
->bmAttributes
& USB_CONFIG_ATT_WAKEUP
) == 0;
1754 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1755 /* FIXME check lengths: walk to end */
1759 dev_config (struct file
*fd
, const char __user
*buf
, size_t len
, loff_t
*ptr
)
1761 struct dev_data
*dev
= fd
->private_data
;
1762 ssize_t value
= len
, length
= len
;
1767 spin_lock_irq(&dev
->lock
);
1768 if (dev
->state
> STATE_DEV_OPENED
) {
1769 value
= ep0_write(fd
, buf
, len
, ptr
);
1770 spin_unlock_irq(&dev
->lock
);
1773 spin_unlock_irq(&dev
->lock
);
1775 if (len
< (USB_DT_CONFIG_SIZE
+ USB_DT_DEVICE_SIZE
+ 4))
1778 /* we might need to change message format someday */
1779 if (copy_from_user (&tag
, buf
, 4))
1786 kbuf
= memdup_user(buf
, length
);
1788 return PTR_ERR(kbuf
);
1790 spin_lock_irq (&dev
->lock
);
1796 /* full or low speed config */
1797 dev
->config
= (void *) kbuf
;
1798 total
= le16_to_cpu(dev
->config
->wTotalLength
);
1799 if (!is_valid_config (dev
->config
) || total
>= length
)
1804 /* optional high speed config */
1805 if (kbuf
[1] == USB_DT_CONFIG
) {
1806 dev
->hs_config
= (void *) kbuf
;
1807 total
= le16_to_cpu(dev
->hs_config
->wTotalLength
);
1808 if (!is_valid_config (dev
->hs_config
) || total
>= length
)
1814 /* could support multiple configs, using another encoding! */
1816 /* device descriptor (tweaked for paranoia) */
1817 if (length
!= USB_DT_DEVICE_SIZE
)
1819 dev
->dev
= (void *)kbuf
;
1820 if (dev
->dev
->bLength
!= USB_DT_DEVICE_SIZE
1821 || dev
->dev
->bDescriptorType
!= USB_DT_DEVICE
1822 || dev
->dev
->bNumConfigurations
!= 1)
1824 dev
->dev
->bNumConfigurations
= 1;
1825 dev
->dev
->bcdUSB
= cpu_to_le16 (0x0200);
1827 /* triggers gadgetfs_bind(); then we can enumerate. */
1828 spin_unlock_irq (&dev
->lock
);
1830 gadgetfs_driver
.max_speed
= USB_SPEED_HIGH
;
1832 gadgetfs_driver
.max_speed
= USB_SPEED_FULL
;
1834 value
= usb_gadget_probe_driver(&gadgetfs_driver
);
1839 /* at this point "good" hardware has for the first time
1840 * let the USB the host see us. alternatively, if users
1841 * unplug/replug that will clear all the error state.
1843 * note: everything running before here was guaranteed
1844 * to choke driver model style diagnostics. from here
1845 * on, they can work ... except in cleanup paths that
1846 * kick in after the ep0 descriptor is closed.
1853 spin_unlock_irq (&dev
->lock
);
1854 pr_debug ("%s: %s fail %Zd, %p\n", shortname
, __func__
, value
, dev
);
1861 dev_open (struct inode
*inode
, struct file
*fd
)
1863 struct dev_data
*dev
= inode
->i_private
;
1866 spin_lock_irq(&dev
->lock
);
1867 if (dev
->state
== STATE_DEV_DISABLED
) {
1869 dev
->state
= STATE_DEV_OPENED
;
1870 fd
->private_data
= dev
;
1874 spin_unlock_irq(&dev
->lock
);
1878 static const struct file_operations ep0_operations
= {
1879 .llseek
= no_llseek
,
1883 .write
= dev_config
,
1884 .fasync
= ep0_fasync
,
1886 .unlocked_ioctl
= dev_ioctl
,
1887 .release
= dev_release
,
1890 /*----------------------------------------------------------------------*/
1892 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1894 * Mounting the filesystem creates a controller file, used first for
1895 * device configuration then later for event monitoring.
1899 /* FIXME PAM etc could set this security policy without mount options
1900 * if epfiles inherited ownership and permissons from ep0 ...
1903 static unsigned default_uid
;
1904 static unsigned default_gid
;
1905 static unsigned default_perm
= S_IRUSR
| S_IWUSR
;
1907 module_param (default_uid
, uint
, 0644);
1908 module_param (default_gid
, uint
, 0644);
1909 module_param (default_perm
, uint
, 0644);
1912 static struct inode
*
1913 gadgetfs_make_inode (struct super_block
*sb
,
1914 void *data
, const struct file_operations
*fops
,
1917 struct inode
*inode
= new_inode (sb
);
1920 inode
->i_ino
= get_next_ino();
1921 inode
->i_mode
= mode
;
1922 inode
->i_uid
= make_kuid(&init_user_ns
, default_uid
);
1923 inode
->i_gid
= make_kgid(&init_user_ns
, default_gid
);
1924 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
1926 inode
->i_private
= data
;
1927 inode
->i_fop
= fops
;
1932 /* creates in fs root directory, so non-renamable and non-linkable.
1933 * so inode and dentry are paired, until device reconfig.
1935 static struct dentry
*
1936 gadgetfs_create_file (struct super_block
*sb
, char const *name
,
1937 void *data
, const struct file_operations
*fops
)
1939 struct dentry
*dentry
;
1940 struct inode
*inode
;
1942 dentry
= d_alloc_name(sb
->s_root
, name
);
1946 inode
= gadgetfs_make_inode (sb
, data
, fops
,
1947 S_IFREG
| (default_perm
& S_IRWXUGO
));
1952 d_add (dentry
, inode
);
1956 static const struct super_operations gadget_fs_operations
= {
1957 .statfs
= simple_statfs
,
1958 .drop_inode
= generic_delete_inode
,
1962 gadgetfs_fill_super (struct super_block
*sb
, void *opts
, int silent
)
1964 struct inode
*inode
;
1965 struct dev_data
*dev
;
1970 /* fake probe to determine $CHIP */
1972 usb_gadget_probe_driver(&probe_driver
);
1977 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
1978 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
1979 sb
->s_magic
= GADGETFS_MAGIC
;
1980 sb
->s_op
= &gadget_fs_operations
;
1981 sb
->s_time_gran
= 1;
1984 inode
= gadgetfs_make_inode (sb
,
1985 NULL
, &simple_dir_operations
,
1986 S_IFDIR
| S_IRUGO
| S_IXUGO
);
1989 inode
->i_op
= &simple_dir_inode_operations
;
1990 if (!(sb
->s_root
= d_make_root (inode
)))
1993 /* the ep0 file is named after the controller we expect;
1994 * user mode code can use it for sanity checks, like we do.
2001 dev
->dentry
= gadgetfs_create_file(sb
, CHIP
, dev
, &ep0_operations
);
2007 /* other endpoint files are available after hardware setup,
2008 * from binding to a controller.
2017 /* "mount -t gadgetfs path /dev/gadget" ends up here */
2018 static struct dentry
*
2019 gadgetfs_mount (struct file_system_type
*t
, int flags
,
2020 const char *path
, void *opts
)
2022 return mount_single (t
, flags
, opts
, gadgetfs_fill_super
);
2026 gadgetfs_kill_sb (struct super_block
*sb
)
2028 kill_litter_super (sb
);
2030 put_dev (the_device
);
2035 /*----------------------------------------------------------------------*/
2037 static struct file_system_type gadgetfs_type
= {
2038 .owner
= THIS_MODULE
,
2040 .mount
= gadgetfs_mount
,
2041 .kill_sb
= gadgetfs_kill_sb
,
2043 MODULE_ALIAS_FS("gadgetfs");
2045 /*----------------------------------------------------------------------*/
2047 static int __init
init (void)
2051 status
= register_filesystem (&gadgetfs_type
);
2053 pr_info ("%s: %s, version " DRIVER_VERSION
"\n",
2054 shortname
, driver_desc
);
2059 static void __exit
cleanup (void)
2061 pr_debug ("unregister %s\n", shortname
);
2062 unregister_filesystem (&gadgetfs_type
);
2064 module_exit (cleanup
);