MAINTAINERS: Add phy-miphy28lp.c and phy-miphy365x.c to ARCH/STI architecture
[deliverable/linux.git] / drivers / usb / gadget / function / f_fs.c
1 /*
2 * f_fs.c -- user mode file system API for USB composite function controllers
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 * Author: Michal Nazarewicz <mina86@mina86.com>
6 *
7 * Based on inode.c (GadgetFS) which was:
8 * Copyright (C) 2003-2004 David Brownell
9 * Copyright (C) 2003 Agilent Technologies
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17
18 /* #define DEBUG */
19 /* #define VERBOSE_DEBUG */
20
21 #include <linux/blkdev.h>
22 #include <linux/pagemap.h>
23 #include <linux/export.h>
24 #include <linux/hid.h>
25 #include <linux/module.h>
26 #include <asm/unaligned.h>
27
28 #include <linux/usb/composite.h>
29 #include <linux/usb/functionfs.h>
30
31 #include <linux/aio.h>
32 #include <linux/mmu_context.h>
33 #include <linux/poll.h>
34 #include <linux/eventfd.h>
35
36 #include "u_fs.h"
37 #include "u_f.h"
38 #include "u_os_desc.h"
39 #include "configfs.h"
40
41 #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
42
43 /* Reference counter handling */
44 static void ffs_data_get(struct ffs_data *ffs);
45 static void ffs_data_put(struct ffs_data *ffs);
46 /* Creates new ffs_data object. */
47 static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
48
49 /* Opened counter handling. */
50 static void ffs_data_opened(struct ffs_data *ffs);
51 static void ffs_data_closed(struct ffs_data *ffs);
52
53 /* Called with ffs->mutex held; take over ownership of data. */
54 static int __must_check
55 __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
56 static int __must_check
57 __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
58
59
60 /* The function structure ***************************************************/
61
62 struct ffs_ep;
63
64 struct ffs_function {
65 struct usb_configuration *conf;
66 struct usb_gadget *gadget;
67 struct ffs_data *ffs;
68
69 struct ffs_ep *eps;
70 u8 eps_revmap[16];
71 short *interfaces_nums;
72
73 struct usb_function function;
74 };
75
76
77 static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
78 {
79 return container_of(f, struct ffs_function, function);
80 }
81
82
83 static inline enum ffs_setup_state
84 ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
85 {
86 return (enum ffs_setup_state)
87 cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
88 }
89
90
91 static void ffs_func_eps_disable(struct ffs_function *func);
92 static int __must_check ffs_func_eps_enable(struct ffs_function *func);
93
94 static int ffs_func_bind(struct usb_configuration *,
95 struct usb_function *);
96 static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
97 static void ffs_func_disable(struct usb_function *);
98 static int ffs_func_setup(struct usb_function *,
99 const struct usb_ctrlrequest *);
100 static void ffs_func_suspend(struct usb_function *);
101 static void ffs_func_resume(struct usb_function *);
102
103
104 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
105 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
106
107
108 /* The endpoints structures *************************************************/
109
110 struct ffs_ep {
111 struct usb_ep *ep; /* P: ffs->eps_lock */
112 struct usb_request *req; /* P: epfile->mutex */
113
114 /* [0]: full speed, [1]: high speed, [2]: super speed */
115 struct usb_endpoint_descriptor *descs[3];
116
117 u8 num;
118
119 int status; /* P: epfile->mutex */
120 };
121
122 struct ffs_epfile {
123 /* Protects ep->ep and ep->req. */
124 struct mutex mutex;
125 wait_queue_head_t wait;
126
127 struct ffs_data *ffs;
128 struct ffs_ep *ep; /* P: ffs->eps_lock */
129
130 struct dentry *dentry;
131
132 char name[5];
133
134 unsigned char in; /* P: ffs->eps_lock */
135 unsigned char isoc; /* P: ffs->eps_lock */
136
137 unsigned char _pad;
138 };
139
140 /* ffs_io_data structure ***************************************************/
141
142 struct ffs_io_data {
143 bool aio;
144 bool read;
145
146 struct kiocb *kiocb;
147 const struct iovec *iovec;
148 unsigned long nr_segs;
149 char __user *buf;
150 size_t len;
151
152 struct mm_struct *mm;
153 struct work_struct work;
154
155 struct usb_ep *ep;
156 struct usb_request *req;
157
158 struct ffs_data *ffs;
159 };
160
161 struct ffs_desc_helper {
162 struct ffs_data *ffs;
163 unsigned interfaces_count;
164 unsigned eps_count;
165 };
166
167 static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
168 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
169
170 static struct dentry *
171 ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
172 const struct file_operations *fops);
173
174 /* Devices management *******************************************************/
175
176 DEFINE_MUTEX(ffs_lock);
177 EXPORT_SYMBOL_GPL(ffs_lock);
178
179 static struct ffs_dev *_ffs_find_dev(const char *name);
180 static struct ffs_dev *_ffs_alloc_dev(void);
181 static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
182 static void _ffs_free_dev(struct ffs_dev *dev);
183 static void *ffs_acquire_dev(const char *dev_name);
184 static void ffs_release_dev(struct ffs_data *ffs_data);
185 static int ffs_ready(struct ffs_data *ffs);
186 static void ffs_closed(struct ffs_data *ffs);
187
188 /* Misc helper functions ****************************************************/
189
190 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
191 __attribute__((warn_unused_result, nonnull));
192 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
193 __attribute__((warn_unused_result, nonnull));
194
195
196 /* Control file aka ep0 *****************************************************/
197
198 static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
199 {
200 struct ffs_data *ffs = req->context;
201
202 complete_all(&ffs->ep0req_completion);
203 }
204
205 static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
206 {
207 struct usb_request *req = ffs->ep0req;
208 int ret;
209
210 req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
211
212 spin_unlock_irq(&ffs->ev.waitq.lock);
213
214 req->buf = data;
215 req->length = len;
216
217 /*
218 * UDC layer requires to provide a buffer even for ZLP, but should
219 * not use it at all. Let's provide some poisoned pointer to catch
220 * possible bug in the driver.
221 */
222 if (req->buf == NULL)
223 req->buf = (void *)0xDEADBABE;
224
225 reinit_completion(&ffs->ep0req_completion);
226
227 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
228 if (unlikely(ret < 0))
229 return ret;
230
231 ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
232 if (unlikely(ret)) {
233 usb_ep_dequeue(ffs->gadget->ep0, req);
234 return -EINTR;
235 }
236
237 ffs->setup_state = FFS_NO_SETUP;
238 return req->status ? req->status : req->actual;
239 }
240
241 static int __ffs_ep0_stall(struct ffs_data *ffs)
242 {
243 if (ffs->ev.can_stall) {
244 pr_vdebug("ep0 stall\n");
245 usb_ep_set_halt(ffs->gadget->ep0);
246 ffs->setup_state = FFS_NO_SETUP;
247 return -EL2HLT;
248 } else {
249 pr_debug("bogus ep0 stall!\n");
250 return -ESRCH;
251 }
252 }
253
254 static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
255 size_t len, loff_t *ptr)
256 {
257 struct ffs_data *ffs = file->private_data;
258 ssize_t ret;
259 char *data;
260
261 ENTER();
262
263 /* Fast check if setup was canceled */
264 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
265 return -EIDRM;
266
267 /* Acquire mutex */
268 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
269 if (unlikely(ret < 0))
270 return ret;
271
272 /* Check state */
273 switch (ffs->state) {
274 case FFS_READ_DESCRIPTORS:
275 case FFS_READ_STRINGS:
276 /* Copy data */
277 if (unlikely(len < 16)) {
278 ret = -EINVAL;
279 break;
280 }
281
282 data = ffs_prepare_buffer(buf, len);
283 if (IS_ERR(data)) {
284 ret = PTR_ERR(data);
285 break;
286 }
287
288 /* Handle data */
289 if (ffs->state == FFS_READ_DESCRIPTORS) {
290 pr_info("read descriptors\n");
291 ret = __ffs_data_got_descs(ffs, data, len);
292 if (unlikely(ret < 0))
293 break;
294
295 ffs->state = FFS_READ_STRINGS;
296 ret = len;
297 } else {
298 pr_info("read strings\n");
299 ret = __ffs_data_got_strings(ffs, data, len);
300 if (unlikely(ret < 0))
301 break;
302
303 ret = ffs_epfiles_create(ffs);
304 if (unlikely(ret)) {
305 ffs->state = FFS_CLOSING;
306 break;
307 }
308
309 ffs->state = FFS_ACTIVE;
310 mutex_unlock(&ffs->mutex);
311
312 ret = ffs_ready(ffs);
313 if (unlikely(ret < 0)) {
314 ffs->state = FFS_CLOSING;
315 return ret;
316 }
317
318 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
319 return len;
320 }
321 break;
322
323 case FFS_ACTIVE:
324 data = NULL;
325 /*
326 * We're called from user space, we can use _irq
327 * rather then _irqsave
328 */
329 spin_lock_irq(&ffs->ev.waitq.lock);
330 switch (ffs_setup_state_clear_cancelled(ffs)) {
331 case FFS_SETUP_CANCELLED:
332 ret = -EIDRM;
333 goto done_spin;
334
335 case FFS_NO_SETUP:
336 ret = -ESRCH;
337 goto done_spin;
338
339 case FFS_SETUP_PENDING:
340 break;
341 }
342
343 /* FFS_SETUP_PENDING */
344 if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
345 spin_unlock_irq(&ffs->ev.waitq.lock);
346 ret = __ffs_ep0_stall(ffs);
347 break;
348 }
349
350 /* FFS_SETUP_PENDING and not stall */
351 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
352
353 spin_unlock_irq(&ffs->ev.waitq.lock);
354
355 data = ffs_prepare_buffer(buf, len);
356 if (IS_ERR(data)) {
357 ret = PTR_ERR(data);
358 break;
359 }
360
361 spin_lock_irq(&ffs->ev.waitq.lock);
362
363 /*
364 * We are guaranteed to be still in FFS_ACTIVE state
365 * but the state of setup could have changed from
366 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
367 * to check for that. If that happened we copied data
368 * from user space in vain but it's unlikely.
369 *
370 * For sure we are not in FFS_NO_SETUP since this is
371 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
372 * transition can be performed and it's protected by
373 * mutex.
374 */
375 if (ffs_setup_state_clear_cancelled(ffs) ==
376 FFS_SETUP_CANCELLED) {
377 ret = -EIDRM;
378 done_spin:
379 spin_unlock_irq(&ffs->ev.waitq.lock);
380 } else {
381 /* unlocks spinlock */
382 ret = __ffs_ep0_queue_wait(ffs, data, len);
383 }
384 kfree(data);
385 break;
386
387 default:
388 ret = -EBADFD;
389 break;
390 }
391
392 mutex_unlock(&ffs->mutex);
393 return ret;
394 }
395
396 /* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
397 static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
398 size_t n)
399 {
400 /*
401 * n cannot be bigger than ffs->ev.count, which cannot be bigger than
402 * size of ffs->ev.types array (which is four) so that's how much space
403 * we reserve.
404 */
405 struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
406 const size_t size = n * sizeof *events;
407 unsigned i = 0;
408
409 memset(events, 0, size);
410
411 do {
412 events[i].type = ffs->ev.types[i];
413 if (events[i].type == FUNCTIONFS_SETUP) {
414 events[i].u.setup = ffs->ev.setup;
415 ffs->setup_state = FFS_SETUP_PENDING;
416 }
417 } while (++i < n);
418
419 ffs->ev.count -= n;
420 if (ffs->ev.count)
421 memmove(ffs->ev.types, ffs->ev.types + n,
422 ffs->ev.count * sizeof *ffs->ev.types);
423
424 spin_unlock_irq(&ffs->ev.waitq.lock);
425 mutex_unlock(&ffs->mutex);
426
427 return unlikely(__copy_to_user(buf, events, size)) ? -EFAULT : size;
428 }
429
430 static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
431 size_t len, loff_t *ptr)
432 {
433 struct ffs_data *ffs = file->private_data;
434 char *data = NULL;
435 size_t n;
436 int ret;
437
438 ENTER();
439
440 /* Fast check if setup was canceled */
441 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
442 return -EIDRM;
443
444 /* Acquire mutex */
445 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
446 if (unlikely(ret < 0))
447 return ret;
448
449 /* Check state */
450 if (ffs->state != FFS_ACTIVE) {
451 ret = -EBADFD;
452 goto done_mutex;
453 }
454
455 /*
456 * We're called from user space, we can use _irq rather then
457 * _irqsave
458 */
459 spin_lock_irq(&ffs->ev.waitq.lock);
460
461 switch (ffs_setup_state_clear_cancelled(ffs)) {
462 case FFS_SETUP_CANCELLED:
463 ret = -EIDRM;
464 break;
465
466 case FFS_NO_SETUP:
467 n = len / sizeof(struct usb_functionfs_event);
468 if (unlikely(!n)) {
469 ret = -EINVAL;
470 break;
471 }
472
473 if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
474 ret = -EAGAIN;
475 break;
476 }
477
478 if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
479 ffs->ev.count)) {
480 ret = -EINTR;
481 break;
482 }
483
484 return __ffs_ep0_read_events(ffs, buf,
485 min(n, (size_t)ffs->ev.count));
486
487 case FFS_SETUP_PENDING:
488 if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
489 spin_unlock_irq(&ffs->ev.waitq.lock);
490 ret = __ffs_ep0_stall(ffs);
491 goto done_mutex;
492 }
493
494 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
495
496 spin_unlock_irq(&ffs->ev.waitq.lock);
497
498 if (likely(len)) {
499 data = kmalloc(len, GFP_KERNEL);
500 if (unlikely(!data)) {
501 ret = -ENOMEM;
502 goto done_mutex;
503 }
504 }
505
506 spin_lock_irq(&ffs->ev.waitq.lock);
507
508 /* See ffs_ep0_write() */
509 if (ffs_setup_state_clear_cancelled(ffs) ==
510 FFS_SETUP_CANCELLED) {
511 ret = -EIDRM;
512 break;
513 }
514
515 /* unlocks spinlock */
516 ret = __ffs_ep0_queue_wait(ffs, data, len);
517 if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
518 ret = -EFAULT;
519 goto done_mutex;
520
521 default:
522 ret = -EBADFD;
523 break;
524 }
525
526 spin_unlock_irq(&ffs->ev.waitq.lock);
527 done_mutex:
528 mutex_unlock(&ffs->mutex);
529 kfree(data);
530 return ret;
531 }
532
533 static int ffs_ep0_open(struct inode *inode, struct file *file)
534 {
535 struct ffs_data *ffs = inode->i_private;
536
537 ENTER();
538
539 if (unlikely(ffs->state == FFS_CLOSING))
540 return -EBUSY;
541
542 file->private_data = ffs;
543 ffs_data_opened(ffs);
544
545 return 0;
546 }
547
548 static int ffs_ep0_release(struct inode *inode, struct file *file)
549 {
550 struct ffs_data *ffs = file->private_data;
551
552 ENTER();
553
554 ffs_data_closed(ffs);
555
556 return 0;
557 }
558
559 static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
560 {
561 struct ffs_data *ffs = file->private_data;
562 struct usb_gadget *gadget = ffs->gadget;
563 long ret;
564
565 ENTER();
566
567 if (code == FUNCTIONFS_INTERFACE_REVMAP) {
568 struct ffs_function *func = ffs->func;
569 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
570 } else if (gadget && gadget->ops->ioctl) {
571 ret = gadget->ops->ioctl(gadget, code, value);
572 } else {
573 ret = -ENOTTY;
574 }
575
576 return ret;
577 }
578
579 static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
580 {
581 struct ffs_data *ffs = file->private_data;
582 unsigned int mask = POLLWRNORM;
583 int ret;
584
585 poll_wait(file, &ffs->ev.waitq, wait);
586
587 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
588 if (unlikely(ret < 0))
589 return mask;
590
591 switch (ffs->state) {
592 case FFS_READ_DESCRIPTORS:
593 case FFS_READ_STRINGS:
594 mask |= POLLOUT;
595 break;
596
597 case FFS_ACTIVE:
598 switch (ffs->setup_state) {
599 case FFS_NO_SETUP:
600 if (ffs->ev.count)
601 mask |= POLLIN;
602 break;
603
604 case FFS_SETUP_PENDING:
605 case FFS_SETUP_CANCELLED:
606 mask |= (POLLIN | POLLOUT);
607 break;
608 }
609 case FFS_CLOSING:
610 break;
611 case FFS_DEACTIVATED:
612 break;
613 }
614
615 mutex_unlock(&ffs->mutex);
616
617 return mask;
618 }
619
620 static const struct file_operations ffs_ep0_operations = {
621 .llseek = no_llseek,
622
623 .open = ffs_ep0_open,
624 .write = ffs_ep0_write,
625 .read = ffs_ep0_read,
626 .release = ffs_ep0_release,
627 .unlocked_ioctl = ffs_ep0_ioctl,
628 .poll = ffs_ep0_poll,
629 };
630
631
632 /* "Normal" endpoints operations ********************************************/
633
634 static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
635 {
636 ENTER();
637 if (likely(req->context)) {
638 struct ffs_ep *ep = _ep->driver_data;
639 ep->status = req->status ? req->status : req->actual;
640 complete(req->context);
641 }
642 }
643
644 static void ffs_user_copy_worker(struct work_struct *work)
645 {
646 struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
647 work);
648 int ret = io_data->req->status ? io_data->req->status :
649 io_data->req->actual;
650
651 if (io_data->read && ret > 0) {
652 int i;
653 size_t pos = 0;
654
655 /*
656 * Since req->length may be bigger than io_data->len (after
657 * being rounded up to maxpacketsize), we may end up with more
658 * data then user space has space for.
659 */
660 ret = min_t(int, ret, io_data->len);
661
662 use_mm(io_data->mm);
663 for (i = 0; i < io_data->nr_segs; i++) {
664 size_t len = min_t(size_t, ret - pos,
665 io_data->iovec[i].iov_len);
666 if (!len)
667 break;
668 if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
669 &io_data->buf[pos], len))) {
670 ret = -EFAULT;
671 break;
672 }
673 pos += len;
674 }
675 unuse_mm(io_data->mm);
676 }
677
678 aio_complete(io_data->kiocb, ret, ret);
679
680 if (io_data->ffs->ffs_eventfd && !io_data->kiocb->ki_eventfd)
681 eventfd_signal(io_data->ffs->ffs_eventfd, 1);
682
683 usb_ep_free_request(io_data->ep, io_data->req);
684
685 io_data->kiocb->private = NULL;
686 if (io_data->read)
687 kfree(io_data->iovec);
688 kfree(io_data->buf);
689 kfree(io_data);
690 }
691
692 static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
693 struct usb_request *req)
694 {
695 struct ffs_io_data *io_data = req->context;
696
697 ENTER();
698
699 INIT_WORK(&io_data->work, ffs_user_copy_worker);
700 schedule_work(&io_data->work);
701 }
702
703 static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
704 {
705 struct ffs_epfile *epfile = file->private_data;
706 struct ffs_ep *ep;
707 char *data = NULL;
708 ssize_t ret, data_len = -EINVAL;
709 int halt;
710
711 /* Are we still active? */
712 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
713 ret = -ENODEV;
714 goto error;
715 }
716
717 /* Wait for endpoint to be enabled */
718 ep = epfile->ep;
719 if (!ep) {
720 if (file->f_flags & O_NONBLOCK) {
721 ret = -EAGAIN;
722 goto error;
723 }
724
725 ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
726 if (ret) {
727 ret = -EINTR;
728 goto error;
729 }
730 }
731
732 /* Do we halt? */
733 halt = (!io_data->read == !epfile->in);
734 if (halt && epfile->isoc) {
735 ret = -EINVAL;
736 goto error;
737 }
738
739 /* Allocate & copy */
740 if (!halt) {
741 /*
742 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
743 * before the waiting completes, so do not assign to 'gadget' earlier
744 */
745 struct usb_gadget *gadget = epfile->ffs->gadget;
746
747 spin_lock_irq(&epfile->ffs->eps_lock);
748 /* In the meantime, endpoint got disabled or changed. */
749 if (epfile->ep != ep) {
750 spin_unlock_irq(&epfile->ffs->eps_lock);
751 return -ESHUTDOWN;
752 }
753 /*
754 * Controller may require buffer size to be aligned to
755 * maxpacketsize of an out endpoint.
756 */
757 data_len = io_data->read ?
758 usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
759 io_data->len;
760 spin_unlock_irq(&epfile->ffs->eps_lock);
761
762 data = kmalloc(data_len, GFP_KERNEL);
763 if (unlikely(!data))
764 return -ENOMEM;
765 if (io_data->aio && !io_data->read) {
766 int i;
767 size_t pos = 0;
768 for (i = 0; i < io_data->nr_segs; i++) {
769 if (unlikely(copy_from_user(&data[pos],
770 io_data->iovec[i].iov_base,
771 io_data->iovec[i].iov_len))) {
772 ret = -EFAULT;
773 goto error;
774 }
775 pos += io_data->iovec[i].iov_len;
776 }
777 } else {
778 if (!io_data->read &&
779 unlikely(__copy_from_user(data, io_data->buf,
780 io_data->len))) {
781 ret = -EFAULT;
782 goto error;
783 }
784 }
785 }
786
787 /* We will be using request */
788 ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
789 if (unlikely(ret))
790 goto error;
791
792 spin_lock_irq(&epfile->ffs->eps_lock);
793
794 if (epfile->ep != ep) {
795 /* In the meantime, endpoint got disabled or changed. */
796 ret = -ESHUTDOWN;
797 spin_unlock_irq(&epfile->ffs->eps_lock);
798 } else if (halt) {
799 /* Halt */
800 if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
801 usb_ep_set_halt(ep->ep);
802 spin_unlock_irq(&epfile->ffs->eps_lock);
803 ret = -EBADMSG;
804 } else {
805 /* Fire the request */
806 struct usb_request *req;
807
808 /*
809 * Sanity Check: even though data_len can't be used
810 * uninitialized at the time I write this comment, some
811 * compilers complain about this situation.
812 * In order to keep the code clean from warnings, data_len is
813 * being initialized to -EINVAL during its declaration, which
814 * means we can't rely on compiler anymore to warn no future
815 * changes won't result in data_len being used uninitialized.
816 * For such reason, we're adding this redundant sanity check
817 * here.
818 */
819 if (unlikely(data_len == -EINVAL)) {
820 WARN(1, "%s: data_len == -EINVAL\n", __func__);
821 ret = -EINVAL;
822 goto error_lock;
823 }
824
825 if (io_data->aio) {
826 req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
827 if (unlikely(!req))
828 goto error_lock;
829
830 req->buf = data;
831 req->length = data_len;
832
833 io_data->buf = data;
834 io_data->ep = ep->ep;
835 io_data->req = req;
836 io_data->ffs = epfile->ffs;
837
838 req->context = io_data;
839 req->complete = ffs_epfile_async_io_complete;
840
841 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
842 if (unlikely(ret)) {
843 usb_ep_free_request(ep->ep, req);
844 goto error_lock;
845 }
846 ret = -EIOCBQUEUED;
847
848 spin_unlock_irq(&epfile->ffs->eps_lock);
849 } else {
850 DECLARE_COMPLETION_ONSTACK(done);
851
852 req = ep->req;
853 req->buf = data;
854 req->length = data_len;
855
856 req->context = &done;
857 req->complete = ffs_epfile_io_complete;
858
859 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
860
861 spin_unlock_irq(&epfile->ffs->eps_lock);
862
863 if (unlikely(ret < 0)) {
864 /* nop */
865 } else if (unlikely(
866 wait_for_completion_interruptible(&done))) {
867 ret = -EINTR;
868 usb_ep_dequeue(ep->ep, req);
869 } else {
870 /*
871 * XXX We may end up silently droping data
872 * here. Since data_len (i.e. req->length) may
873 * be bigger than len (after being rounded up
874 * to maxpacketsize), we may end up with more
875 * data then user space has space for.
876 */
877 ret = ep->status;
878 if (io_data->read && ret > 0) {
879 ret = min_t(size_t, ret, io_data->len);
880
881 if (unlikely(copy_to_user(io_data->buf,
882 data, ret)))
883 ret = -EFAULT;
884 }
885 }
886 kfree(data);
887 }
888 }
889
890 mutex_unlock(&epfile->mutex);
891 return ret;
892
893 error_lock:
894 spin_unlock_irq(&epfile->ffs->eps_lock);
895 mutex_unlock(&epfile->mutex);
896 error:
897 kfree(data);
898 return ret;
899 }
900
901 static ssize_t
902 ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
903 loff_t *ptr)
904 {
905 struct ffs_io_data io_data;
906
907 ENTER();
908
909 io_data.aio = false;
910 io_data.read = false;
911 io_data.buf = (char * __user)buf;
912 io_data.len = len;
913
914 return ffs_epfile_io(file, &io_data);
915 }
916
917 static ssize_t
918 ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
919 {
920 struct ffs_io_data io_data;
921
922 ENTER();
923
924 io_data.aio = false;
925 io_data.read = true;
926 io_data.buf = buf;
927 io_data.len = len;
928
929 return ffs_epfile_io(file, &io_data);
930 }
931
932 static int
933 ffs_epfile_open(struct inode *inode, struct file *file)
934 {
935 struct ffs_epfile *epfile = inode->i_private;
936
937 ENTER();
938
939 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
940 return -ENODEV;
941
942 file->private_data = epfile;
943 ffs_data_opened(epfile->ffs);
944
945 return 0;
946 }
947
948 static int ffs_aio_cancel(struct kiocb *kiocb)
949 {
950 struct ffs_io_data *io_data = kiocb->private;
951 struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
952 int value;
953
954 ENTER();
955
956 spin_lock_irq(&epfile->ffs->eps_lock);
957
958 if (likely(io_data && io_data->ep && io_data->req))
959 value = usb_ep_dequeue(io_data->ep, io_data->req);
960 else
961 value = -EINVAL;
962
963 spin_unlock_irq(&epfile->ffs->eps_lock);
964
965 return value;
966 }
967
968 static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
969 const struct iovec *iovec,
970 unsigned long nr_segs, loff_t loff)
971 {
972 struct ffs_io_data *io_data;
973
974 ENTER();
975
976 io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
977 if (unlikely(!io_data))
978 return -ENOMEM;
979
980 io_data->aio = true;
981 io_data->read = false;
982 io_data->kiocb = kiocb;
983 io_data->iovec = iovec;
984 io_data->nr_segs = nr_segs;
985 io_data->len = kiocb->ki_nbytes;
986 io_data->mm = current->mm;
987
988 kiocb->private = io_data;
989
990 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
991
992 return ffs_epfile_io(kiocb->ki_filp, io_data);
993 }
994
995 static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
996 const struct iovec *iovec,
997 unsigned long nr_segs, loff_t loff)
998 {
999 struct ffs_io_data *io_data;
1000 struct iovec *iovec_copy;
1001
1002 ENTER();
1003
1004 iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
1005 if (unlikely(!iovec_copy))
1006 return -ENOMEM;
1007
1008 memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
1009
1010 io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
1011 if (unlikely(!io_data)) {
1012 kfree(iovec_copy);
1013 return -ENOMEM;
1014 }
1015
1016 io_data->aio = true;
1017 io_data->read = true;
1018 io_data->kiocb = kiocb;
1019 io_data->iovec = iovec_copy;
1020 io_data->nr_segs = nr_segs;
1021 io_data->len = kiocb->ki_nbytes;
1022 io_data->mm = current->mm;
1023
1024 kiocb->private = io_data;
1025
1026 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
1027
1028 return ffs_epfile_io(kiocb->ki_filp, io_data);
1029 }
1030
1031 static int
1032 ffs_epfile_release(struct inode *inode, struct file *file)
1033 {
1034 struct ffs_epfile *epfile = inode->i_private;
1035
1036 ENTER();
1037
1038 ffs_data_closed(epfile->ffs);
1039
1040 return 0;
1041 }
1042
1043 static long ffs_epfile_ioctl(struct file *file, unsigned code,
1044 unsigned long value)
1045 {
1046 struct ffs_epfile *epfile = file->private_data;
1047 int ret;
1048
1049 ENTER();
1050
1051 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1052 return -ENODEV;
1053
1054 spin_lock_irq(&epfile->ffs->eps_lock);
1055 if (likely(epfile->ep)) {
1056 switch (code) {
1057 case FUNCTIONFS_FIFO_STATUS:
1058 ret = usb_ep_fifo_status(epfile->ep->ep);
1059 break;
1060 case FUNCTIONFS_FIFO_FLUSH:
1061 usb_ep_fifo_flush(epfile->ep->ep);
1062 ret = 0;
1063 break;
1064 case FUNCTIONFS_CLEAR_HALT:
1065 ret = usb_ep_clear_halt(epfile->ep->ep);
1066 break;
1067 case FUNCTIONFS_ENDPOINT_REVMAP:
1068 ret = epfile->ep->num;
1069 break;
1070 case FUNCTIONFS_ENDPOINT_DESC:
1071 {
1072 int desc_idx;
1073 struct usb_endpoint_descriptor *desc;
1074
1075 switch (epfile->ffs->gadget->speed) {
1076 case USB_SPEED_SUPER:
1077 desc_idx = 2;
1078 break;
1079 case USB_SPEED_HIGH:
1080 desc_idx = 1;
1081 break;
1082 default:
1083 desc_idx = 0;
1084 }
1085 desc = epfile->ep->descs[desc_idx];
1086
1087 spin_unlock_irq(&epfile->ffs->eps_lock);
1088 ret = copy_to_user((void *)value, desc, sizeof(*desc));
1089 if (ret)
1090 ret = -EFAULT;
1091 return ret;
1092 }
1093 default:
1094 ret = -ENOTTY;
1095 }
1096 } else {
1097 ret = -ENODEV;
1098 }
1099 spin_unlock_irq(&epfile->ffs->eps_lock);
1100
1101 return ret;
1102 }
1103
1104 static const struct file_operations ffs_epfile_operations = {
1105 .llseek = no_llseek,
1106
1107 .open = ffs_epfile_open,
1108 .write = ffs_epfile_write,
1109 .read = ffs_epfile_read,
1110 .aio_write = ffs_epfile_aio_write,
1111 .aio_read = ffs_epfile_aio_read,
1112 .release = ffs_epfile_release,
1113 .unlocked_ioctl = ffs_epfile_ioctl,
1114 };
1115
1116
1117 /* File system and super block operations ***********************************/
1118
1119 /*
1120 * Mounting the file system creates a controller file, used first for
1121 * function configuration then later for event monitoring.
1122 */
1123
1124 static struct inode *__must_check
1125 ffs_sb_make_inode(struct super_block *sb, void *data,
1126 const struct file_operations *fops,
1127 const struct inode_operations *iops,
1128 struct ffs_file_perms *perms)
1129 {
1130 struct inode *inode;
1131
1132 ENTER();
1133
1134 inode = new_inode(sb);
1135
1136 if (likely(inode)) {
1137 struct timespec current_time = CURRENT_TIME;
1138
1139 inode->i_ino = get_next_ino();
1140 inode->i_mode = perms->mode;
1141 inode->i_uid = perms->uid;
1142 inode->i_gid = perms->gid;
1143 inode->i_atime = current_time;
1144 inode->i_mtime = current_time;
1145 inode->i_ctime = current_time;
1146 inode->i_private = data;
1147 if (fops)
1148 inode->i_fop = fops;
1149 if (iops)
1150 inode->i_op = iops;
1151 }
1152
1153 return inode;
1154 }
1155
1156 /* Create "regular" file */
1157 static struct dentry *ffs_sb_create_file(struct super_block *sb,
1158 const char *name, void *data,
1159 const struct file_operations *fops)
1160 {
1161 struct ffs_data *ffs = sb->s_fs_info;
1162 struct dentry *dentry;
1163 struct inode *inode;
1164
1165 ENTER();
1166
1167 dentry = d_alloc_name(sb->s_root, name);
1168 if (unlikely(!dentry))
1169 return NULL;
1170
1171 inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
1172 if (unlikely(!inode)) {
1173 dput(dentry);
1174 return NULL;
1175 }
1176
1177 d_add(dentry, inode);
1178 return dentry;
1179 }
1180
1181 /* Super block */
1182 static const struct super_operations ffs_sb_operations = {
1183 .statfs = simple_statfs,
1184 .drop_inode = generic_delete_inode,
1185 };
1186
1187 struct ffs_sb_fill_data {
1188 struct ffs_file_perms perms;
1189 umode_t root_mode;
1190 const char *dev_name;
1191 bool no_disconnect;
1192 struct ffs_data *ffs_data;
1193 };
1194
1195 static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1196 {
1197 struct ffs_sb_fill_data *data = _data;
1198 struct inode *inode;
1199 struct ffs_data *ffs = data->ffs_data;
1200
1201 ENTER();
1202
1203 ffs->sb = sb;
1204 data->ffs_data = NULL;
1205 sb->s_fs_info = ffs;
1206 sb->s_blocksize = PAGE_CACHE_SIZE;
1207 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1208 sb->s_magic = FUNCTIONFS_MAGIC;
1209 sb->s_op = &ffs_sb_operations;
1210 sb->s_time_gran = 1;
1211
1212 /* Root inode */
1213 data->perms.mode = data->root_mode;
1214 inode = ffs_sb_make_inode(sb, NULL,
1215 &simple_dir_operations,
1216 &simple_dir_inode_operations,
1217 &data->perms);
1218 sb->s_root = d_make_root(inode);
1219 if (unlikely(!sb->s_root))
1220 return -ENOMEM;
1221
1222 /* EP0 file */
1223 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1224 &ffs_ep0_operations)))
1225 return -ENOMEM;
1226
1227 return 0;
1228 }
1229
1230 static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1231 {
1232 ENTER();
1233
1234 if (!opts || !*opts)
1235 return 0;
1236
1237 for (;;) {
1238 unsigned long value;
1239 char *eq, *comma;
1240
1241 /* Option limit */
1242 comma = strchr(opts, ',');
1243 if (comma)
1244 *comma = 0;
1245
1246 /* Value limit */
1247 eq = strchr(opts, '=');
1248 if (unlikely(!eq)) {
1249 pr_err("'=' missing in %s\n", opts);
1250 return -EINVAL;
1251 }
1252 *eq = 0;
1253
1254 /* Parse value */
1255 if (kstrtoul(eq + 1, 0, &value)) {
1256 pr_err("%s: invalid value: %s\n", opts, eq + 1);
1257 return -EINVAL;
1258 }
1259
1260 /* Interpret option */
1261 switch (eq - opts) {
1262 case 13:
1263 if (!memcmp(opts, "no_disconnect", 13))
1264 data->no_disconnect = !!value;
1265 else
1266 goto invalid;
1267 break;
1268 case 5:
1269 if (!memcmp(opts, "rmode", 5))
1270 data->root_mode = (value & 0555) | S_IFDIR;
1271 else if (!memcmp(opts, "fmode", 5))
1272 data->perms.mode = (value & 0666) | S_IFREG;
1273 else
1274 goto invalid;
1275 break;
1276
1277 case 4:
1278 if (!memcmp(opts, "mode", 4)) {
1279 data->root_mode = (value & 0555) | S_IFDIR;
1280 data->perms.mode = (value & 0666) | S_IFREG;
1281 } else {
1282 goto invalid;
1283 }
1284 break;
1285
1286 case 3:
1287 if (!memcmp(opts, "uid", 3)) {
1288 data->perms.uid = make_kuid(current_user_ns(), value);
1289 if (!uid_valid(data->perms.uid)) {
1290 pr_err("%s: unmapped value: %lu\n", opts, value);
1291 return -EINVAL;
1292 }
1293 } else if (!memcmp(opts, "gid", 3)) {
1294 data->perms.gid = make_kgid(current_user_ns(), value);
1295 if (!gid_valid(data->perms.gid)) {
1296 pr_err("%s: unmapped value: %lu\n", opts, value);
1297 return -EINVAL;
1298 }
1299 } else {
1300 goto invalid;
1301 }
1302 break;
1303
1304 default:
1305 invalid:
1306 pr_err("%s: invalid option\n", opts);
1307 return -EINVAL;
1308 }
1309
1310 /* Next iteration */
1311 if (!comma)
1312 break;
1313 opts = comma + 1;
1314 }
1315
1316 return 0;
1317 }
1318
1319 /* "mount -t functionfs dev_name /dev/function" ends up here */
1320
1321 static struct dentry *
1322 ffs_fs_mount(struct file_system_type *t, int flags,
1323 const char *dev_name, void *opts)
1324 {
1325 struct ffs_sb_fill_data data = {
1326 .perms = {
1327 .mode = S_IFREG | 0600,
1328 .uid = GLOBAL_ROOT_UID,
1329 .gid = GLOBAL_ROOT_GID,
1330 },
1331 .root_mode = S_IFDIR | 0500,
1332 .no_disconnect = false,
1333 };
1334 struct dentry *rv;
1335 int ret;
1336 void *ffs_dev;
1337 struct ffs_data *ffs;
1338
1339 ENTER();
1340
1341 ret = ffs_fs_parse_opts(&data, opts);
1342 if (unlikely(ret < 0))
1343 return ERR_PTR(ret);
1344
1345 ffs = ffs_data_new();
1346 if (unlikely(!ffs))
1347 return ERR_PTR(-ENOMEM);
1348 ffs->file_perms = data.perms;
1349 ffs->no_disconnect = data.no_disconnect;
1350
1351 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1352 if (unlikely(!ffs->dev_name)) {
1353 ffs_data_put(ffs);
1354 return ERR_PTR(-ENOMEM);
1355 }
1356
1357 ffs_dev = ffs_acquire_dev(dev_name);
1358 if (IS_ERR(ffs_dev)) {
1359 ffs_data_put(ffs);
1360 return ERR_CAST(ffs_dev);
1361 }
1362 ffs->private_data = ffs_dev;
1363 data.ffs_data = ffs;
1364
1365 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
1366 if (IS_ERR(rv) && data.ffs_data) {
1367 ffs_release_dev(data.ffs_data);
1368 ffs_data_put(data.ffs_data);
1369 }
1370 return rv;
1371 }
1372
1373 static void
1374 ffs_fs_kill_sb(struct super_block *sb)
1375 {
1376 ENTER();
1377
1378 kill_litter_super(sb);
1379 if (sb->s_fs_info) {
1380 ffs_release_dev(sb->s_fs_info);
1381 ffs_data_closed(sb->s_fs_info);
1382 ffs_data_put(sb->s_fs_info);
1383 }
1384 }
1385
1386 static struct file_system_type ffs_fs_type = {
1387 .owner = THIS_MODULE,
1388 .name = "functionfs",
1389 .mount = ffs_fs_mount,
1390 .kill_sb = ffs_fs_kill_sb,
1391 };
1392 MODULE_ALIAS_FS("functionfs");
1393
1394
1395 /* Driver's main init/cleanup functions *************************************/
1396
1397 static int functionfs_init(void)
1398 {
1399 int ret;
1400
1401 ENTER();
1402
1403 ret = register_filesystem(&ffs_fs_type);
1404 if (likely(!ret))
1405 pr_info("file system registered\n");
1406 else
1407 pr_err("failed registering file system (%d)\n", ret);
1408
1409 return ret;
1410 }
1411
1412 static void functionfs_cleanup(void)
1413 {
1414 ENTER();
1415
1416 pr_info("unloading\n");
1417 unregister_filesystem(&ffs_fs_type);
1418 }
1419
1420
1421 /* ffs_data and ffs_function construction and destruction code **************/
1422
1423 static void ffs_data_clear(struct ffs_data *ffs);
1424 static void ffs_data_reset(struct ffs_data *ffs);
1425
1426 static void ffs_data_get(struct ffs_data *ffs)
1427 {
1428 ENTER();
1429
1430 atomic_inc(&ffs->ref);
1431 }
1432
1433 static void ffs_data_opened(struct ffs_data *ffs)
1434 {
1435 ENTER();
1436
1437 atomic_inc(&ffs->ref);
1438 if (atomic_add_return(1, &ffs->opened) == 1 &&
1439 ffs->state == FFS_DEACTIVATED) {
1440 ffs->state = FFS_CLOSING;
1441 ffs_data_reset(ffs);
1442 }
1443 }
1444
1445 static void ffs_data_put(struct ffs_data *ffs)
1446 {
1447 ENTER();
1448
1449 if (unlikely(atomic_dec_and_test(&ffs->ref))) {
1450 pr_info("%s(): freeing\n", __func__);
1451 ffs_data_clear(ffs);
1452 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
1453 waitqueue_active(&ffs->ep0req_completion.wait));
1454 kfree(ffs->dev_name);
1455 kfree(ffs);
1456 }
1457 }
1458
1459 static void ffs_data_closed(struct ffs_data *ffs)
1460 {
1461 ENTER();
1462
1463 if (atomic_dec_and_test(&ffs->opened)) {
1464 if (ffs->no_disconnect) {
1465 ffs->state = FFS_DEACTIVATED;
1466 if (ffs->epfiles) {
1467 ffs_epfiles_destroy(ffs->epfiles,
1468 ffs->eps_count);
1469 ffs->epfiles = NULL;
1470 }
1471 if (ffs->setup_state == FFS_SETUP_PENDING)
1472 __ffs_ep0_stall(ffs);
1473 } else {
1474 ffs->state = FFS_CLOSING;
1475 ffs_data_reset(ffs);
1476 }
1477 }
1478 if (atomic_read(&ffs->opened) < 0) {
1479 ffs->state = FFS_CLOSING;
1480 ffs_data_reset(ffs);
1481 }
1482
1483 ffs_data_put(ffs);
1484 }
1485
1486 static struct ffs_data *ffs_data_new(void)
1487 {
1488 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1489 if (unlikely(!ffs))
1490 return NULL;
1491
1492 ENTER();
1493
1494 atomic_set(&ffs->ref, 1);
1495 atomic_set(&ffs->opened, 0);
1496 ffs->state = FFS_READ_DESCRIPTORS;
1497 mutex_init(&ffs->mutex);
1498 spin_lock_init(&ffs->eps_lock);
1499 init_waitqueue_head(&ffs->ev.waitq);
1500 init_completion(&ffs->ep0req_completion);
1501
1502 /* XXX REVISIT need to update it in some places, or do we? */
1503 ffs->ev.can_stall = 1;
1504
1505 return ffs;
1506 }
1507
1508 static void ffs_data_clear(struct ffs_data *ffs)
1509 {
1510 ENTER();
1511
1512 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
1513 ffs_closed(ffs);
1514
1515 BUG_ON(ffs->gadget);
1516
1517 if (ffs->epfiles)
1518 ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1519
1520 if (ffs->ffs_eventfd)
1521 eventfd_ctx_put(ffs->ffs_eventfd);
1522
1523 kfree(ffs->raw_descs_data);
1524 kfree(ffs->raw_strings);
1525 kfree(ffs->stringtabs);
1526 }
1527
1528 static void ffs_data_reset(struct ffs_data *ffs)
1529 {
1530 ENTER();
1531
1532 ffs_data_clear(ffs);
1533
1534 ffs->epfiles = NULL;
1535 ffs->raw_descs_data = NULL;
1536 ffs->raw_descs = NULL;
1537 ffs->raw_strings = NULL;
1538 ffs->stringtabs = NULL;
1539
1540 ffs->raw_descs_length = 0;
1541 ffs->fs_descs_count = 0;
1542 ffs->hs_descs_count = 0;
1543 ffs->ss_descs_count = 0;
1544
1545 ffs->strings_count = 0;
1546 ffs->interfaces_count = 0;
1547 ffs->eps_count = 0;
1548
1549 ffs->ev.count = 0;
1550
1551 ffs->state = FFS_READ_DESCRIPTORS;
1552 ffs->setup_state = FFS_NO_SETUP;
1553 ffs->flags = 0;
1554 }
1555
1556
1557 static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1558 {
1559 struct usb_gadget_strings **lang;
1560 int first_id;
1561
1562 ENTER();
1563
1564 if (WARN_ON(ffs->state != FFS_ACTIVE
1565 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
1566 return -EBADFD;
1567
1568 first_id = usb_string_ids_n(cdev, ffs->strings_count);
1569 if (unlikely(first_id < 0))
1570 return first_id;
1571
1572 ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
1573 if (unlikely(!ffs->ep0req))
1574 return -ENOMEM;
1575 ffs->ep0req->complete = ffs_ep0_complete;
1576 ffs->ep0req->context = ffs;
1577
1578 lang = ffs->stringtabs;
1579 if (lang) {
1580 for (; *lang; ++lang) {
1581 struct usb_string *str = (*lang)->strings;
1582 int id = first_id;
1583 for (; str->s; ++id, ++str)
1584 str->id = id;
1585 }
1586 }
1587
1588 ffs->gadget = cdev->gadget;
1589 ffs_data_get(ffs);
1590 return 0;
1591 }
1592
1593 static void functionfs_unbind(struct ffs_data *ffs)
1594 {
1595 ENTER();
1596
1597 if (!WARN_ON(!ffs->gadget)) {
1598 usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
1599 ffs->ep0req = NULL;
1600 ffs->gadget = NULL;
1601 clear_bit(FFS_FL_BOUND, &ffs->flags);
1602 ffs_data_put(ffs);
1603 }
1604 }
1605
1606 static int ffs_epfiles_create(struct ffs_data *ffs)
1607 {
1608 struct ffs_epfile *epfile, *epfiles;
1609 unsigned i, count;
1610
1611 ENTER();
1612
1613 count = ffs->eps_count;
1614 epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
1615 if (!epfiles)
1616 return -ENOMEM;
1617
1618 epfile = epfiles;
1619 for (i = 1; i <= count; ++i, ++epfile) {
1620 epfile->ffs = ffs;
1621 mutex_init(&epfile->mutex);
1622 init_waitqueue_head(&epfile->wait);
1623 if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
1624 sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
1625 else
1626 sprintf(epfile->name, "ep%u", i);
1627 epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
1628 epfile,
1629 &ffs_epfile_operations);
1630 if (unlikely(!epfile->dentry)) {
1631 ffs_epfiles_destroy(epfiles, i - 1);
1632 return -ENOMEM;
1633 }
1634 }
1635
1636 ffs->epfiles = epfiles;
1637 return 0;
1638 }
1639
1640 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1641 {
1642 struct ffs_epfile *epfile = epfiles;
1643
1644 ENTER();
1645
1646 for (; count; --count, ++epfile) {
1647 BUG_ON(mutex_is_locked(&epfile->mutex) ||
1648 waitqueue_active(&epfile->wait));
1649 if (epfile->dentry) {
1650 d_delete(epfile->dentry);
1651 dput(epfile->dentry);
1652 epfile->dentry = NULL;
1653 }
1654 }
1655
1656 kfree(epfiles);
1657 }
1658
1659 static void ffs_func_eps_disable(struct ffs_function *func)
1660 {
1661 struct ffs_ep *ep = func->eps;
1662 struct ffs_epfile *epfile = func->ffs->epfiles;
1663 unsigned count = func->ffs->eps_count;
1664 unsigned long flags;
1665
1666 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1667 do {
1668 /* pending requests get nuked */
1669 if (likely(ep->ep))
1670 usb_ep_disable(ep->ep);
1671 ++ep;
1672
1673 if (epfile) {
1674 epfile->ep = NULL;
1675 ++epfile;
1676 }
1677 } while (--count);
1678 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1679 }
1680
1681 static int ffs_func_eps_enable(struct ffs_function *func)
1682 {
1683 struct ffs_data *ffs = func->ffs;
1684 struct ffs_ep *ep = func->eps;
1685 struct ffs_epfile *epfile = ffs->epfiles;
1686 unsigned count = ffs->eps_count;
1687 unsigned long flags;
1688 int ret = 0;
1689
1690 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1691 do {
1692 struct usb_endpoint_descriptor *ds;
1693 int desc_idx;
1694
1695 if (ffs->gadget->speed == USB_SPEED_SUPER)
1696 desc_idx = 2;
1697 else if (ffs->gadget->speed == USB_SPEED_HIGH)
1698 desc_idx = 1;
1699 else
1700 desc_idx = 0;
1701
1702 /* fall-back to lower speed if desc missing for current speed */
1703 do {
1704 ds = ep->descs[desc_idx];
1705 } while (!ds && --desc_idx >= 0);
1706
1707 if (!ds) {
1708 ret = -EINVAL;
1709 break;
1710 }
1711
1712 ep->ep->driver_data = ep;
1713 ep->ep->desc = ds;
1714 ret = usb_ep_enable(ep->ep);
1715 if (likely(!ret)) {
1716 epfile->ep = ep;
1717 epfile->in = usb_endpoint_dir_in(ds);
1718 epfile->isoc = usb_endpoint_xfer_isoc(ds);
1719 } else {
1720 break;
1721 }
1722
1723 wake_up(&epfile->wait);
1724
1725 ++ep;
1726 ++epfile;
1727 } while (--count);
1728 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1729
1730 return ret;
1731 }
1732
1733
1734 /* Parsing and building descriptors and strings *****************************/
1735
1736 /*
1737 * This validates if data pointed by data is a valid USB descriptor as
1738 * well as record how many interfaces, endpoints and strings are
1739 * required by given configuration. Returns address after the
1740 * descriptor or NULL if data is invalid.
1741 */
1742
1743 enum ffs_entity_type {
1744 FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
1745 };
1746
1747 enum ffs_os_desc_type {
1748 FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
1749 };
1750
1751 typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
1752 u8 *valuep,
1753 struct usb_descriptor_header *desc,
1754 void *priv);
1755
1756 typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
1757 struct usb_os_desc_header *h, void *data,
1758 unsigned len, void *priv);
1759
1760 static int __must_check ffs_do_single_desc(char *data, unsigned len,
1761 ffs_entity_callback entity,
1762 void *priv)
1763 {
1764 struct usb_descriptor_header *_ds = (void *)data;
1765 u8 length;
1766 int ret;
1767
1768 ENTER();
1769
1770 /* At least two bytes are required: length and type */
1771 if (len < 2) {
1772 pr_vdebug("descriptor too short\n");
1773 return -EINVAL;
1774 }
1775
1776 /* If we have at least as many bytes as the descriptor takes? */
1777 length = _ds->bLength;
1778 if (len < length) {
1779 pr_vdebug("descriptor longer then available data\n");
1780 return -EINVAL;
1781 }
1782
1783 #define __entity_check_INTERFACE(val) 1
1784 #define __entity_check_STRING(val) (val)
1785 #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
1786 #define __entity(type, val) do { \
1787 pr_vdebug("entity " #type "(%02x)\n", (val)); \
1788 if (unlikely(!__entity_check_ ##type(val))) { \
1789 pr_vdebug("invalid entity's value\n"); \
1790 return -EINVAL; \
1791 } \
1792 ret = entity(FFS_ ##type, &val, _ds, priv); \
1793 if (unlikely(ret < 0)) { \
1794 pr_debug("entity " #type "(%02x); ret = %d\n", \
1795 (val), ret); \
1796 return ret; \
1797 } \
1798 } while (0)
1799
1800 /* Parse descriptor depending on type. */
1801 switch (_ds->bDescriptorType) {
1802 case USB_DT_DEVICE:
1803 case USB_DT_CONFIG:
1804 case USB_DT_STRING:
1805 case USB_DT_DEVICE_QUALIFIER:
1806 /* function can't have any of those */
1807 pr_vdebug("descriptor reserved for gadget: %d\n",
1808 _ds->bDescriptorType);
1809 return -EINVAL;
1810
1811 case USB_DT_INTERFACE: {
1812 struct usb_interface_descriptor *ds = (void *)_ds;
1813 pr_vdebug("interface descriptor\n");
1814 if (length != sizeof *ds)
1815 goto inv_length;
1816
1817 __entity(INTERFACE, ds->bInterfaceNumber);
1818 if (ds->iInterface)
1819 __entity(STRING, ds->iInterface);
1820 }
1821 break;
1822
1823 case USB_DT_ENDPOINT: {
1824 struct usb_endpoint_descriptor *ds = (void *)_ds;
1825 pr_vdebug("endpoint descriptor\n");
1826 if (length != USB_DT_ENDPOINT_SIZE &&
1827 length != USB_DT_ENDPOINT_AUDIO_SIZE)
1828 goto inv_length;
1829 __entity(ENDPOINT, ds->bEndpointAddress);
1830 }
1831 break;
1832
1833 case HID_DT_HID:
1834 pr_vdebug("hid descriptor\n");
1835 if (length != sizeof(struct hid_descriptor))
1836 goto inv_length;
1837 break;
1838
1839 case USB_DT_OTG:
1840 if (length != sizeof(struct usb_otg_descriptor))
1841 goto inv_length;
1842 break;
1843
1844 case USB_DT_INTERFACE_ASSOCIATION: {
1845 struct usb_interface_assoc_descriptor *ds = (void *)_ds;
1846 pr_vdebug("interface association descriptor\n");
1847 if (length != sizeof *ds)
1848 goto inv_length;
1849 if (ds->iFunction)
1850 __entity(STRING, ds->iFunction);
1851 }
1852 break;
1853
1854 case USB_DT_SS_ENDPOINT_COMP:
1855 pr_vdebug("EP SS companion descriptor\n");
1856 if (length != sizeof(struct usb_ss_ep_comp_descriptor))
1857 goto inv_length;
1858 break;
1859
1860 case USB_DT_OTHER_SPEED_CONFIG:
1861 case USB_DT_INTERFACE_POWER:
1862 case USB_DT_DEBUG:
1863 case USB_DT_SECURITY:
1864 case USB_DT_CS_RADIO_CONTROL:
1865 /* TODO */
1866 pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
1867 return -EINVAL;
1868
1869 default:
1870 /* We should never be here */
1871 pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
1872 return -EINVAL;
1873
1874 inv_length:
1875 pr_vdebug("invalid length: %d (descriptor %d)\n",
1876 _ds->bLength, _ds->bDescriptorType);
1877 return -EINVAL;
1878 }
1879
1880 #undef __entity
1881 #undef __entity_check_DESCRIPTOR
1882 #undef __entity_check_INTERFACE
1883 #undef __entity_check_STRING
1884 #undef __entity_check_ENDPOINT
1885
1886 return length;
1887 }
1888
1889 static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1890 ffs_entity_callback entity, void *priv)
1891 {
1892 const unsigned _len = len;
1893 unsigned long num = 0;
1894
1895 ENTER();
1896
1897 for (;;) {
1898 int ret;
1899
1900 if (num == count)
1901 data = NULL;
1902
1903 /* Record "descriptor" entity */
1904 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
1905 if (unlikely(ret < 0)) {
1906 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
1907 num, ret);
1908 return ret;
1909 }
1910
1911 if (!data)
1912 return _len - len;
1913
1914 ret = ffs_do_single_desc(data, len, entity, priv);
1915 if (unlikely(ret < 0)) {
1916 pr_debug("%s returns %d\n", __func__, ret);
1917 return ret;
1918 }
1919
1920 len -= ret;
1921 data += ret;
1922 ++num;
1923 }
1924 }
1925
1926 static int __ffs_data_do_entity(enum ffs_entity_type type,
1927 u8 *valuep, struct usb_descriptor_header *desc,
1928 void *priv)
1929 {
1930 struct ffs_desc_helper *helper = priv;
1931 struct usb_endpoint_descriptor *d;
1932
1933 ENTER();
1934
1935 switch (type) {
1936 case FFS_DESCRIPTOR:
1937 break;
1938
1939 case FFS_INTERFACE:
1940 /*
1941 * Interfaces are indexed from zero so if we
1942 * encountered interface "n" then there are at least
1943 * "n+1" interfaces.
1944 */
1945 if (*valuep >= helper->interfaces_count)
1946 helper->interfaces_count = *valuep + 1;
1947 break;
1948
1949 case FFS_STRING:
1950 /*
1951 * Strings are indexed from 1 (0 is magic ;) reserved
1952 * for languages list or some such)
1953 */
1954 if (*valuep > helper->ffs->strings_count)
1955 helper->ffs->strings_count = *valuep;
1956 break;
1957
1958 case FFS_ENDPOINT:
1959 d = (void *)desc;
1960 helper->eps_count++;
1961 if (helper->eps_count >= 15)
1962 return -EINVAL;
1963 /* Check if descriptors for any speed were already parsed */
1964 if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
1965 helper->ffs->eps_addrmap[helper->eps_count] =
1966 d->bEndpointAddress;
1967 else if (helper->ffs->eps_addrmap[helper->eps_count] !=
1968 d->bEndpointAddress)
1969 return -EINVAL;
1970 break;
1971 }
1972
1973 return 0;
1974 }
1975
1976 static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
1977 struct usb_os_desc_header *desc)
1978 {
1979 u16 bcd_version = le16_to_cpu(desc->bcdVersion);
1980 u16 w_index = le16_to_cpu(desc->wIndex);
1981
1982 if (bcd_version != 1) {
1983 pr_vdebug("unsupported os descriptors version: %d",
1984 bcd_version);
1985 return -EINVAL;
1986 }
1987 switch (w_index) {
1988 case 0x4:
1989 *next_type = FFS_OS_DESC_EXT_COMPAT;
1990 break;
1991 case 0x5:
1992 *next_type = FFS_OS_DESC_EXT_PROP;
1993 break;
1994 default:
1995 pr_vdebug("unsupported os descriptor type: %d", w_index);
1996 return -EINVAL;
1997 }
1998
1999 return sizeof(*desc);
2000 }
2001
2002 /*
2003 * Process all extended compatibility/extended property descriptors
2004 * of a feature descriptor
2005 */
2006 static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
2007 enum ffs_os_desc_type type,
2008 u16 feature_count,
2009 ffs_os_desc_callback entity,
2010 void *priv,
2011 struct usb_os_desc_header *h)
2012 {
2013 int ret;
2014 const unsigned _len = len;
2015
2016 ENTER();
2017
2018 /* loop over all ext compat/ext prop descriptors */
2019 while (feature_count--) {
2020 ret = entity(type, h, data, len, priv);
2021 if (unlikely(ret < 0)) {
2022 pr_debug("bad OS descriptor, type: %d\n", type);
2023 return ret;
2024 }
2025 data += ret;
2026 len -= ret;
2027 }
2028 return _len - len;
2029 }
2030
2031 /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
2032 static int __must_check ffs_do_os_descs(unsigned count,
2033 char *data, unsigned len,
2034 ffs_os_desc_callback entity, void *priv)
2035 {
2036 const unsigned _len = len;
2037 unsigned long num = 0;
2038
2039 ENTER();
2040
2041 for (num = 0; num < count; ++num) {
2042 int ret;
2043 enum ffs_os_desc_type type;
2044 u16 feature_count;
2045 struct usb_os_desc_header *desc = (void *)data;
2046
2047 if (len < sizeof(*desc))
2048 return -EINVAL;
2049
2050 /*
2051 * Record "descriptor" entity.
2052 * Process dwLength, bcdVersion, wIndex, get b/wCount.
2053 * Move the data pointer to the beginning of extended
2054 * compatibilities proper or extended properties proper
2055 * portions of the data
2056 */
2057 if (le32_to_cpu(desc->dwLength) > len)
2058 return -EINVAL;
2059
2060 ret = __ffs_do_os_desc_header(&type, desc);
2061 if (unlikely(ret < 0)) {
2062 pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2063 num, ret);
2064 return ret;
2065 }
2066 /*
2067 * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2068 */
2069 feature_count = le16_to_cpu(desc->wCount);
2070 if (type == FFS_OS_DESC_EXT_COMPAT &&
2071 (feature_count > 255 || desc->Reserved))
2072 return -EINVAL;
2073 len -= ret;
2074 data += ret;
2075
2076 /*
2077 * Process all function/property descriptors
2078 * of this Feature Descriptor
2079 */
2080 ret = ffs_do_single_os_desc(data, len, type,
2081 feature_count, entity, priv, desc);
2082 if (unlikely(ret < 0)) {
2083 pr_debug("%s returns %d\n", __func__, ret);
2084 return ret;
2085 }
2086
2087 len -= ret;
2088 data += ret;
2089 }
2090 return _len - len;
2091 }
2092
2093 /**
2094 * Validate contents of the buffer from userspace related to OS descriptors.
2095 */
2096 static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2097 struct usb_os_desc_header *h, void *data,
2098 unsigned len, void *priv)
2099 {
2100 struct ffs_data *ffs = priv;
2101 u8 length;
2102
2103 ENTER();
2104
2105 switch (type) {
2106 case FFS_OS_DESC_EXT_COMPAT: {
2107 struct usb_ext_compat_desc *d = data;
2108 int i;
2109
2110 if (len < sizeof(*d) ||
2111 d->bFirstInterfaceNumber >= ffs->interfaces_count ||
2112 d->Reserved1)
2113 return -EINVAL;
2114 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2115 if (d->Reserved2[i])
2116 return -EINVAL;
2117
2118 length = sizeof(struct usb_ext_compat_desc);
2119 }
2120 break;
2121 case FFS_OS_DESC_EXT_PROP: {
2122 struct usb_ext_prop_desc *d = data;
2123 u32 type, pdl;
2124 u16 pnl;
2125
2126 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2127 return -EINVAL;
2128 length = le32_to_cpu(d->dwSize);
2129 type = le32_to_cpu(d->dwPropertyDataType);
2130 if (type < USB_EXT_PROP_UNICODE ||
2131 type > USB_EXT_PROP_UNICODE_MULTI) {
2132 pr_vdebug("unsupported os descriptor property type: %d",
2133 type);
2134 return -EINVAL;
2135 }
2136 pnl = le16_to_cpu(d->wPropertyNameLength);
2137 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
2138 if (length != 14 + pnl + pdl) {
2139 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2140 length, pnl, pdl, type);
2141 return -EINVAL;
2142 }
2143 ++ffs->ms_os_descs_ext_prop_count;
2144 /* property name reported to the host as "WCHAR"s */
2145 ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
2146 ffs->ms_os_descs_ext_prop_data_len += pdl;
2147 }
2148 break;
2149 default:
2150 pr_vdebug("unknown descriptor: %d\n", type);
2151 return -EINVAL;
2152 }
2153 return length;
2154 }
2155
2156 static int __ffs_data_got_descs(struct ffs_data *ffs,
2157 char *const _data, size_t len)
2158 {
2159 char *data = _data, *raw_descs;
2160 unsigned os_descs_count = 0, counts[3], flags;
2161 int ret = -EINVAL, i;
2162 struct ffs_desc_helper helper;
2163
2164 ENTER();
2165
2166 if (get_unaligned_le32(data + 4) != len)
2167 goto error;
2168
2169 switch (get_unaligned_le32(data)) {
2170 case FUNCTIONFS_DESCRIPTORS_MAGIC:
2171 flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
2172 data += 8;
2173 len -= 8;
2174 break;
2175 case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
2176 flags = get_unaligned_le32(data + 8);
2177 ffs->user_flags = flags;
2178 if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
2179 FUNCTIONFS_HAS_HS_DESC |
2180 FUNCTIONFS_HAS_SS_DESC |
2181 FUNCTIONFS_HAS_MS_OS_DESC |
2182 FUNCTIONFS_VIRTUAL_ADDR |
2183 FUNCTIONFS_EVENTFD)) {
2184 ret = -ENOSYS;
2185 goto error;
2186 }
2187 data += 12;
2188 len -= 12;
2189 break;
2190 default:
2191 goto error;
2192 }
2193
2194 if (flags & FUNCTIONFS_EVENTFD) {
2195 if (len < 4)
2196 goto error;
2197 ffs->ffs_eventfd =
2198 eventfd_ctx_fdget((int)get_unaligned_le32(data));
2199 if (IS_ERR(ffs->ffs_eventfd)) {
2200 ret = PTR_ERR(ffs->ffs_eventfd);
2201 ffs->ffs_eventfd = NULL;
2202 goto error;
2203 }
2204 data += 4;
2205 len -= 4;
2206 }
2207
2208 /* Read fs_count, hs_count and ss_count (if present) */
2209 for (i = 0; i < 3; ++i) {
2210 if (!(flags & (1 << i))) {
2211 counts[i] = 0;
2212 } else if (len < 4) {
2213 goto error;
2214 } else {
2215 counts[i] = get_unaligned_le32(data);
2216 data += 4;
2217 len -= 4;
2218 }
2219 }
2220 if (flags & (1 << i)) {
2221 os_descs_count = get_unaligned_le32(data);
2222 data += 4;
2223 len -= 4;
2224 };
2225
2226 /* Read descriptors */
2227 raw_descs = data;
2228 helper.ffs = ffs;
2229 for (i = 0; i < 3; ++i) {
2230 if (!counts[i])
2231 continue;
2232 helper.interfaces_count = 0;
2233 helper.eps_count = 0;
2234 ret = ffs_do_descs(counts[i], data, len,
2235 __ffs_data_do_entity, &helper);
2236 if (ret < 0)
2237 goto error;
2238 if (!ffs->eps_count && !ffs->interfaces_count) {
2239 ffs->eps_count = helper.eps_count;
2240 ffs->interfaces_count = helper.interfaces_count;
2241 } else {
2242 if (ffs->eps_count != helper.eps_count) {
2243 ret = -EINVAL;
2244 goto error;
2245 }
2246 if (ffs->interfaces_count != helper.interfaces_count) {
2247 ret = -EINVAL;
2248 goto error;
2249 }
2250 }
2251 data += ret;
2252 len -= ret;
2253 }
2254 if (os_descs_count) {
2255 ret = ffs_do_os_descs(os_descs_count, data, len,
2256 __ffs_data_do_os_desc, ffs);
2257 if (ret < 0)
2258 goto error;
2259 data += ret;
2260 len -= ret;
2261 }
2262
2263 if (raw_descs == data || len) {
2264 ret = -EINVAL;
2265 goto error;
2266 }
2267
2268 ffs->raw_descs_data = _data;
2269 ffs->raw_descs = raw_descs;
2270 ffs->raw_descs_length = data - raw_descs;
2271 ffs->fs_descs_count = counts[0];
2272 ffs->hs_descs_count = counts[1];
2273 ffs->ss_descs_count = counts[2];
2274 ffs->ms_os_descs_count = os_descs_count;
2275
2276 return 0;
2277
2278 error:
2279 kfree(_data);
2280 return ret;
2281 }
2282
2283 static int __ffs_data_got_strings(struct ffs_data *ffs,
2284 char *const _data, size_t len)
2285 {
2286 u32 str_count, needed_count, lang_count;
2287 struct usb_gadget_strings **stringtabs, *t;
2288 struct usb_string *strings, *s;
2289 const char *data = _data;
2290
2291 ENTER();
2292
2293 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2294 get_unaligned_le32(data + 4) != len))
2295 goto error;
2296 str_count = get_unaligned_le32(data + 8);
2297 lang_count = get_unaligned_le32(data + 12);
2298
2299 /* if one is zero the other must be zero */
2300 if (unlikely(!str_count != !lang_count))
2301 goto error;
2302
2303 /* Do we have at least as many strings as descriptors need? */
2304 needed_count = ffs->strings_count;
2305 if (unlikely(str_count < needed_count))
2306 goto error;
2307
2308 /*
2309 * If we don't need any strings just return and free all
2310 * memory.
2311 */
2312 if (!needed_count) {
2313 kfree(_data);
2314 return 0;
2315 }
2316
2317 /* Allocate everything in one chunk so there's less maintenance. */
2318 {
2319 unsigned i = 0;
2320 vla_group(d);
2321 vla_item(d, struct usb_gadget_strings *, stringtabs,
2322 lang_count + 1);
2323 vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
2324 vla_item(d, struct usb_string, strings,
2325 lang_count*(needed_count+1));
2326
2327 char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
2328
2329 if (unlikely(!vlabuf)) {
2330 kfree(_data);
2331 return -ENOMEM;
2332 }
2333
2334 /* Initialize the VLA pointers */
2335 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2336 t = vla_ptr(vlabuf, d, stringtab);
2337 i = lang_count;
2338 do {
2339 *stringtabs++ = t++;
2340 } while (--i);
2341 *stringtabs = NULL;
2342
2343 /* stringtabs = vlabuf = d_stringtabs for later kfree */
2344 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2345 t = vla_ptr(vlabuf, d, stringtab);
2346 s = vla_ptr(vlabuf, d, strings);
2347 strings = s;
2348 }
2349
2350 /* For each language */
2351 data += 16;
2352 len -= 16;
2353
2354 do { /* lang_count > 0 so we can use do-while */
2355 unsigned needed = needed_count;
2356
2357 if (unlikely(len < 3))
2358 goto error_free;
2359 t->language = get_unaligned_le16(data);
2360 t->strings = s;
2361 ++t;
2362
2363 data += 2;
2364 len -= 2;
2365
2366 /* For each string */
2367 do { /* str_count > 0 so we can use do-while */
2368 size_t length = strnlen(data, len);
2369
2370 if (unlikely(length == len))
2371 goto error_free;
2372
2373 /*
2374 * User may provide more strings then we need,
2375 * if that's the case we simply ignore the
2376 * rest
2377 */
2378 if (likely(needed)) {
2379 /*
2380 * s->id will be set while adding
2381 * function to configuration so for
2382 * now just leave garbage here.
2383 */
2384 s->s = data;
2385 --needed;
2386 ++s;
2387 }
2388
2389 data += length + 1;
2390 len -= length + 1;
2391 } while (--str_count);
2392
2393 s->id = 0; /* terminator */
2394 s->s = NULL;
2395 ++s;
2396
2397 } while (--lang_count);
2398
2399 /* Some garbage left? */
2400 if (unlikely(len))
2401 goto error_free;
2402
2403 /* Done! */
2404 ffs->stringtabs = stringtabs;
2405 ffs->raw_strings = _data;
2406
2407 return 0;
2408
2409 error_free:
2410 kfree(stringtabs);
2411 error:
2412 kfree(_data);
2413 return -EINVAL;
2414 }
2415
2416
2417 /* Events handling and management *******************************************/
2418
2419 static void __ffs_event_add(struct ffs_data *ffs,
2420 enum usb_functionfs_event_type type)
2421 {
2422 enum usb_functionfs_event_type rem_type1, rem_type2 = type;
2423 int neg = 0;
2424
2425 /*
2426 * Abort any unhandled setup
2427 *
2428 * We do not need to worry about some cmpxchg() changing value
2429 * of ffs->setup_state without holding the lock because when
2430 * state is FFS_SETUP_PENDING cmpxchg() in several places in
2431 * the source does nothing.
2432 */
2433 if (ffs->setup_state == FFS_SETUP_PENDING)
2434 ffs->setup_state = FFS_SETUP_CANCELLED;
2435
2436 /*
2437 * Logic of this function guarantees that there are at most four pending
2438 * evens on ffs->ev.types queue. This is important because the queue
2439 * has space for four elements only and __ffs_ep0_read_events function
2440 * depends on that limit as well. If more event types are added, those
2441 * limits have to be revisited or guaranteed to still hold.
2442 */
2443 switch (type) {
2444 case FUNCTIONFS_RESUME:
2445 rem_type2 = FUNCTIONFS_SUSPEND;
2446 /* FALL THROUGH */
2447 case FUNCTIONFS_SUSPEND:
2448 case FUNCTIONFS_SETUP:
2449 rem_type1 = type;
2450 /* Discard all similar events */
2451 break;
2452
2453 case FUNCTIONFS_BIND:
2454 case FUNCTIONFS_UNBIND:
2455 case FUNCTIONFS_DISABLE:
2456 case FUNCTIONFS_ENABLE:
2457 /* Discard everything other then power management. */
2458 rem_type1 = FUNCTIONFS_SUSPEND;
2459 rem_type2 = FUNCTIONFS_RESUME;
2460 neg = 1;
2461 break;
2462
2463 default:
2464 WARN(1, "%d: unknown event, this should not happen\n", type);
2465 return;
2466 }
2467
2468 {
2469 u8 *ev = ffs->ev.types, *out = ev;
2470 unsigned n = ffs->ev.count;
2471 for (; n; --n, ++ev)
2472 if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2473 *out++ = *ev;
2474 else
2475 pr_vdebug("purging event %d\n", *ev);
2476 ffs->ev.count = out - ffs->ev.types;
2477 }
2478
2479 pr_vdebug("adding event %d\n", type);
2480 ffs->ev.types[ffs->ev.count++] = type;
2481 wake_up_locked(&ffs->ev.waitq);
2482 if (ffs->ffs_eventfd)
2483 eventfd_signal(ffs->ffs_eventfd, 1);
2484 }
2485
2486 static void ffs_event_add(struct ffs_data *ffs,
2487 enum usb_functionfs_event_type type)
2488 {
2489 unsigned long flags;
2490 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2491 __ffs_event_add(ffs, type);
2492 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2493 }
2494
2495 /* Bind/unbind USB function hooks *******************************************/
2496
2497 static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
2498 {
2499 int i;
2500
2501 for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
2502 if (ffs->eps_addrmap[i] == endpoint_address)
2503 return i;
2504 return -ENOENT;
2505 }
2506
2507 static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2508 struct usb_descriptor_header *desc,
2509 void *priv)
2510 {
2511 struct usb_endpoint_descriptor *ds = (void *)desc;
2512 struct ffs_function *func = priv;
2513 struct ffs_ep *ffs_ep;
2514 unsigned ep_desc_id;
2515 int idx;
2516 static const char *speed_names[] = { "full", "high", "super" };
2517
2518 if (type != FFS_DESCRIPTOR)
2519 return 0;
2520
2521 /*
2522 * If ss_descriptors is not NULL, we are reading super speed
2523 * descriptors; if hs_descriptors is not NULL, we are reading high
2524 * speed descriptors; otherwise, we are reading full speed
2525 * descriptors.
2526 */
2527 if (func->function.ss_descriptors) {
2528 ep_desc_id = 2;
2529 func->function.ss_descriptors[(long)valuep] = desc;
2530 } else if (func->function.hs_descriptors) {
2531 ep_desc_id = 1;
2532 func->function.hs_descriptors[(long)valuep] = desc;
2533 } else {
2534 ep_desc_id = 0;
2535 func->function.fs_descriptors[(long)valuep] = desc;
2536 }
2537
2538 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
2539 return 0;
2540
2541 idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
2542 if (idx < 0)
2543 return idx;
2544
2545 ffs_ep = func->eps + idx;
2546
2547 if (unlikely(ffs_ep->descs[ep_desc_id])) {
2548 pr_err("two %sspeed descriptors for EP %d\n",
2549 speed_names[ep_desc_id],
2550 ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2551 return -EINVAL;
2552 }
2553 ffs_ep->descs[ep_desc_id] = ds;
2554
2555 ffs_dump_mem(": Original ep desc", ds, ds->bLength);
2556 if (ffs_ep->ep) {
2557 ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
2558 if (!ds->wMaxPacketSize)
2559 ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
2560 } else {
2561 struct usb_request *req;
2562 struct usb_ep *ep;
2563 u8 bEndpointAddress;
2564
2565 /*
2566 * We back up bEndpointAddress because autoconfig overwrites
2567 * it with physical endpoint address.
2568 */
2569 bEndpointAddress = ds->bEndpointAddress;
2570 pr_vdebug("autoconfig\n");
2571 ep = usb_ep_autoconfig(func->gadget, ds);
2572 if (unlikely(!ep))
2573 return -ENOTSUPP;
2574 ep->driver_data = func->eps + idx;
2575
2576 req = usb_ep_alloc_request(ep, GFP_KERNEL);
2577 if (unlikely(!req))
2578 return -ENOMEM;
2579
2580 ffs_ep->ep = ep;
2581 ffs_ep->req = req;
2582 func->eps_revmap[ds->bEndpointAddress &
2583 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
2584 /*
2585 * If we use virtual address mapping, we restore
2586 * original bEndpointAddress value.
2587 */
2588 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
2589 ds->bEndpointAddress = bEndpointAddress;
2590 }
2591 ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
2592
2593 return 0;
2594 }
2595
2596 static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2597 struct usb_descriptor_header *desc,
2598 void *priv)
2599 {
2600 struct ffs_function *func = priv;
2601 unsigned idx;
2602 u8 newValue;
2603
2604 switch (type) {
2605 default:
2606 case FFS_DESCRIPTOR:
2607 /* Handled in previous pass by __ffs_func_bind_do_descs() */
2608 return 0;
2609
2610 case FFS_INTERFACE:
2611 idx = *valuep;
2612 if (func->interfaces_nums[idx] < 0) {
2613 int id = usb_interface_id(func->conf, &func->function);
2614 if (unlikely(id < 0))
2615 return id;
2616 func->interfaces_nums[idx] = id;
2617 }
2618 newValue = func->interfaces_nums[idx];
2619 break;
2620
2621 case FFS_STRING:
2622 /* String' IDs are allocated when fsf_data is bound to cdev */
2623 newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
2624 break;
2625
2626 case FFS_ENDPOINT:
2627 /*
2628 * USB_DT_ENDPOINT are handled in
2629 * __ffs_func_bind_do_descs().
2630 */
2631 if (desc->bDescriptorType == USB_DT_ENDPOINT)
2632 return 0;
2633
2634 idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
2635 if (unlikely(!func->eps[idx].ep))
2636 return -EINVAL;
2637
2638 {
2639 struct usb_endpoint_descriptor **descs;
2640 descs = func->eps[idx].descs;
2641 newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
2642 }
2643 break;
2644 }
2645
2646 pr_vdebug("%02x -> %02x\n", *valuep, newValue);
2647 *valuep = newValue;
2648 return 0;
2649 }
2650
2651 static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
2652 struct usb_os_desc_header *h, void *data,
2653 unsigned len, void *priv)
2654 {
2655 struct ffs_function *func = priv;
2656 u8 length = 0;
2657
2658 switch (type) {
2659 case FFS_OS_DESC_EXT_COMPAT: {
2660 struct usb_ext_compat_desc *desc = data;
2661 struct usb_os_desc_table *t;
2662
2663 t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
2664 t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
2665 memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
2666 ARRAY_SIZE(desc->CompatibleID) +
2667 ARRAY_SIZE(desc->SubCompatibleID));
2668 length = sizeof(*desc);
2669 }
2670 break;
2671 case FFS_OS_DESC_EXT_PROP: {
2672 struct usb_ext_prop_desc *desc = data;
2673 struct usb_os_desc_table *t;
2674 struct usb_os_desc_ext_prop *ext_prop;
2675 char *ext_prop_name;
2676 char *ext_prop_data;
2677
2678 t = &func->function.os_desc_table[h->interface];
2679 t->if_id = func->interfaces_nums[h->interface];
2680
2681 ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
2682 func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
2683
2684 ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
2685 ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
2686 ext_prop->data_len = le32_to_cpu(*(u32 *)
2687 usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
2688 length = ext_prop->name_len + ext_prop->data_len + 14;
2689
2690 ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
2691 func->ffs->ms_os_descs_ext_prop_name_avail +=
2692 ext_prop->name_len;
2693
2694 ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
2695 func->ffs->ms_os_descs_ext_prop_data_avail +=
2696 ext_prop->data_len;
2697 memcpy(ext_prop_data,
2698 usb_ext_prop_data_ptr(data, ext_prop->name_len),
2699 ext_prop->data_len);
2700 /* unicode data reported to the host as "WCHAR"s */
2701 switch (ext_prop->type) {
2702 case USB_EXT_PROP_UNICODE:
2703 case USB_EXT_PROP_UNICODE_ENV:
2704 case USB_EXT_PROP_UNICODE_LINK:
2705 case USB_EXT_PROP_UNICODE_MULTI:
2706 ext_prop->data_len *= 2;
2707 break;
2708 }
2709 ext_prop->data = ext_prop_data;
2710
2711 memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
2712 ext_prop->name_len);
2713 /* property name reported to the host as "WCHAR"s */
2714 ext_prop->name_len *= 2;
2715 ext_prop->name = ext_prop_name;
2716
2717 t->os_desc->ext_prop_len +=
2718 ext_prop->name_len + ext_prop->data_len + 14;
2719 ++t->os_desc->ext_prop_count;
2720 list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
2721 }
2722 break;
2723 default:
2724 pr_vdebug("unknown descriptor: %d\n", type);
2725 }
2726
2727 return length;
2728 }
2729
2730 static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
2731 struct usb_configuration *c)
2732 {
2733 struct ffs_function *func = ffs_func_from_usb(f);
2734 struct f_fs_opts *ffs_opts =
2735 container_of(f->fi, struct f_fs_opts, func_inst);
2736 int ret;
2737
2738 ENTER();
2739
2740 /*
2741 * Legacy gadget triggers binding in functionfs_ready_callback,
2742 * which already uses locking; taking the same lock here would
2743 * cause a deadlock.
2744 *
2745 * Configfs-enabled gadgets however do need ffs_dev_lock.
2746 */
2747 if (!ffs_opts->no_configfs)
2748 ffs_dev_lock();
2749 ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
2750 func->ffs = ffs_opts->dev->ffs_data;
2751 if (!ffs_opts->no_configfs)
2752 ffs_dev_unlock();
2753 if (ret)
2754 return ERR_PTR(ret);
2755
2756 func->conf = c;
2757 func->gadget = c->cdev->gadget;
2758
2759 /*
2760 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
2761 * configurations are bound in sequence with list_for_each_entry,
2762 * in each configuration its functions are bound in sequence
2763 * with list_for_each_entry, so we assume no race condition
2764 * with regard to ffs_opts->bound access
2765 */
2766 if (!ffs_opts->refcnt) {
2767 ret = functionfs_bind(func->ffs, c->cdev);
2768 if (ret)
2769 return ERR_PTR(ret);
2770 }
2771 ffs_opts->refcnt++;
2772 func->function.strings = func->ffs->stringtabs;
2773
2774 return ffs_opts;
2775 }
2776
2777 static int _ffs_func_bind(struct usb_configuration *c,
2778 struct usb_function *f)
2779 {
2780 struct ffs_function *func = ffs_func_from_usb(f);
2781 struct ffs_data *ffs = func->ffs;
2782
2783 const int full = !!func->ffs->fs_descs_count;
2784 const int high = gadget_is_dualspeed(func->gadget) &&
2785 func->ffs->hs_descs_count;
2786 const int super = gadget_is_superspeed(func->gadget) &&
2787 func->ffs->ss_descs_count;
2788
2789 int fs_len, hs_len, ss_len, ret, i;
2790
2791 /* Make it a single chunk, less management later on */
2792 vla_group(d);
2793 vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
2794 vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
2795 full ? ffs->fs_descs_count + 1 : 0);
2796 vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
2797 high ? ffs->hs_descs_count + 1 : 0);
2798 vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
2799 super ? ffs->ss_descs_count + 1 : 0);
2800 vla_item_with_sz(d, short, inums, ffs->interfaces_count);
2801 vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
2802 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2803 vla_item_with_sz(d, char[16], ext_compat,
2804 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2805 vla_item_with_sz(d, struct usb_os_desc, os_desc,
2806 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2807 vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
2808 ffs->ms_os_descs_ext_prop_count);
2809 vla_item_with_sz(d, char, ext_prop_name,
2810 ffs->ms_os_descs_ext_prop_name_len);
2811 vla_item_with_sz(d, char, ext_prop_data,
2812 ffs->ms_os_descs_ext_prop_data_len);
2813 vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
2814 char *vlabuf;
2815
2816 ENTER();
2817
2818 /* Has descriptors only for speeds gadget does not support */
2819 if (unlikely(!(full | high | super)))
2820 return -ENOTSUPP;
2821
2822 /* Allocate a single chunk, less management later on */
2823 vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
2824 if (unlikely(!vlabuf))
2825 return -ENOMEM;
2826
2827 ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
2828 ffs->ms_os_descs_ext_prop_name_avail =
2829 vla_ptr(vlabuf, d, ext_prop_name);
2830 ffs->ms_os_descs_ext_prop_data_avail =
2831 vla_ptr(vlabuf, d, ext_prop_data);
2832
2833 /* Copy descriptors */
2834 memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
2835 ffs->raw_descs_length);
2836
2837 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
2838 for (ret = ffs->eps_count; ret; --ret) {
2839 struct ffs_ep *ptr;
2840
2841 ptr = vla_ptr(vlabuf, d, eps);
2842 ptr[ret].num = -1;
2843 }
2844
2845 /* Save pointers
2846 * d_eps == vlabuf, func->eps used to kfree vlabuf later
2847 */
2848 func->eps = vla_ptr(vlabuf, d, eps);
2849 func->interfaces_nums = vla_ptr(vlabuf, d, inums);
2850
2851 /*
2852 * Go through all the endpoint descriptors and allocate
2853 * endpoints first, so that later we can rewrite the endpoint
2854 * numbers without worrying that it may be described later on.
2855 */
2856 if (likely(full)) {
2857 func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
2858 fs_len = ffs_do_descs(ffs->fs_descs_count,
2859 vla_ptr(vlabuf, d, raw_descs),
2860 d_raw_descs__sz,
2861 __ffs_func_bind_do_descs, func);
2862 if (unlikely(fs_len < 0)) {
2863 ret = fs_len;
2864 goto error;
2865 }
2866 } else {
2867 fs_len = 0;
2868 }
2869
2870 if (likely(high)) {
2871 func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
2872 hs_len = ffs_do_descs(ffs->hs_descs_count,
2873 vla_ptr(vlabuf, d, raw_descs) + fs_len,
2874 d_raw_descs__sz - fs_len,
2875 __ffs_func_bind_do_descs, func);
2876 if (unlikely(hs_len < 0)) {
2877 ret = hs_len;
2878 goto error;
2879 }
2880 } else {
2881 hs_len = 0;
2882 }
2883
2884 if (likely(super)) {
2885 func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
2886 ss_len = ffs_do_descs(ffs->ss_descs_count,
2887 vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
2888 d_raw_descs__sz - fs_len - hs_len,
2889 __ffs_func_bind_do_descs, func);
2890 if (unlikely(ss_len < 0)) {
2891 ret = ss_len;
2892 goto error;
2893 }
2894 } else {
2895 ss_len = 0;
2896 }
2897
2898 /*
2899 * Now handle interface numbers allocation and interface and
2900 * endpoint numbers rewriting. We can do that in one go
2901 * now.
2902 */
2903 ret = ffs_do_descs(ffs->fs_descs_count +
2904 (high ? ffs->hs_descs_count : 0) +
2905 (super ? ffs->ss_descs_count : 0),
2906 vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
2907 __ffs_func_bind_do_nums, func);
2908 if (unlikely(ret < 0))
2909 goto error;
2910
2911 func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
2912 if (c->cdev->use_os_string)
2913 for (i = 0; i < ffs->interfaces_count; ++i) {
2914 struct usb_os_desc *desc;
2915
2916 desc = func->function.os_desc_table[i].os_desc =
2917 vla_ptr(vlabuf, d, os_desc) +
2918 i * sizeof(struct usb_os_desc);
2919 desc->ext_compat_id =
2920 vla_ptr(vlabuf, d, ext_compat) + i * 16;
2921 INIT_LIST_HEAD(&desc->ext_prop);
2922 }
2923 ret = ffs_do_os_descs(ffs->ms_os_descs_count,
2924 vla_ptr(vlabuf, d, raw_descs) +
2925 fs_len + hs_len + ss_len,
2926 d_raw_descs__sz - fs_len - hs_len - ss_len,
2927 __ffs_func_bind_do_os_desc, func);
2928 if (unlikely(ret < 0))
2929 goto error;
2930 func->function.os_desc_n =
2931 c->cdev->use_os_string ? ffs->interfaces_count : 0;
2932
2933 /* And we're done */
2934 ffs_event_add(ffs, FUNCTIONFS_BIND);
2935 return 0;
2936
2937 error:
2938 /* XXX Do we need to release all claimed endpoints here? */
2939 return ret;
2940 }
2941
2942 static int ffs_func_bind(struct usb_configuration *c,
2943 struct usb_function *f)
2944 {
2945 struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
2946
2947 if (IS_ERR(ffs_opts))
2948 return PTR_ERR(ffs_opts);
2949
2950 return _ffs_func_bind(c, f);
2951 }
2952
2953
2954 /* Other USB function hooks *************************************************/
2955
2956 static void ffs_reset_work(struct work_struct *work)
2957 {
2958 struct ffs_data *ffs = container_of(work,
2959 struct ffs_data, reset_work);
2960 ffs_data_reset(ffs);
2961 }
2962
2963 static int ffs_func_set_alt(struct usb_function *f,
2964 unsigned interface, unsigned alt)
2965 {
2966 struct ffs_function *func = ffs_func_from_usb(f);
2967 struct ffs_data *ffs = func->ffs;
2968 int ret = 0, intf;
2969
2970 if (alt != (unsigned)-1) {
2971 intf = ffs_func_revmap_intf(func, interface);
2972 if (unlikely(intf < 0))
2973 return intf;
2974 }
2975
2976 if (ffs->func)
2977 ffs_func_eps_disable(ffs->func);
2978
2979 if (ffs->state == FFS_DEACTIVATED) {
2980 ffs->state = FFS_CLOSING;
2981 INIT_WORK(&ffs->reset_work, ffs_reset_work);
2982 schedule_work(&ffs->reset_work);
2983 return -ENODEV;
2984 }
2985
2986 if (ffs->state != FFS_ACTIVE)
2987 return -ENODEV;
2988
2989 if (alt == (unsigned)-1) {
2990 ffs->func = NULL;
2991 ffs_event_add(ffs, FUNCTIONFS_DISABLE);
2992 return 0;
2993 }
2994
2995 ffs->func = func;
2996 ret = ffs_func_eps_enable(func);
2997 if (likely(ret >= 0))
2998 ffs_event_add(ffs, FUNCTIONFS_ENABLE);
2999 return ret;
3000 }
3001
3002 static void ffs_func_disable(struct usb_function *f)
3003 {
3004 ffs_func_set_alt(f, 0, (unsigned)-1);
3005 }
3006
3007 static int ffs_func_setup(struct usb_function *f,
3008 const struct usb_ctrlrequest *creq)
3009 {
3010 struct ffs_function *func = ffs_func_from_usb(f);
3011 struct ffs_data *ffs = func->ffs;
3012 unsigned long flags;
3013 int ret;
3014
3015 ENTER();
3016
3017 pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
3018 pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
3019 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
3020 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
3021 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
3022
3023 /*
3024 * Most requests directed to interface go through here
3025 * (notable exceptions are set/get interface) so we need to
3026 * handle them. All other either handled by composite or
3027 * passed to usb_configuration->setup() (if one is set). No
3028 * matter, we will handle requests directed to endpoint here
3029 * as well (as it's straightforward) but what to do with any
3030 * other request?
3031 */
3032 if (ffs->state != FFS_ACTIVE)
3033 return -ENODEV;
3034
3035 switch (creq->bRequestType & USB_RECIP_MASK) {
3036 case USB_RECIP_INTERFACE:
3037 ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
3038 if (unlikely(ret < 0))
3039 return ret;
3040 break;
3041
3042 case USB_RECIP_ENDPOINT:
3043 ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
3044 if (unlikely(ret < 0))
3045 return ret;
3046 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
3047 ret = func->ffs->eps_addrmap[ret];
3048 break;
3049
3050 default:
3051 return -EOPNOTSUPP;
3052 }
3053
3054 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
3055 ffs->ev.setup = *creq;
3056 ffs->ev.setup.wIndex = cpu_to_le16(ret);
3057 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3058 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3059
3060 return 0;
3061 }
3062
3063 static void ffs_func_suspend(struct usb_function *f)
3064 {
3065 ENTER();
3066 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
3067 }
3068
3069 static void ffs_func_resume(struct usb_function *f)
3070 {
3071 ENTER();
3072 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
3073 }
3074
3075
3076 /* Endpoint and interface numbers reverse mapping ***************************/
3077
3078 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
3079 {
3080 num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
3081 return num ? num : -EDOM;
3082 }
3083
3084 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
3085 {
3086 short *nums = func->interfaces_nums;
3087 unsigned count = func->ffs->interfaces_count;
3088
3089 for (; count; --count, ++nums) {
3090 if (*nums >= 0 && *nums == intf)
3091 return nums - func->interfaces_nums;
3092 }
3093
3094 return -EDOM;
3095 }
3096
3097
3098 /* Devices management *******************************************************/
3099
3100 static LIST_HEAD(ffs_devices);
3101
3102 static struct ffs_dev *_ffs_do_find_dev(const char *name)
3103 {
3104 struct ffs_dev *dev;
3105
3106 list_for_each_entry(dev, &ffs_devices, entry) {
3107 if (!dev->name || !name)
3108 continue;
3109 if (strcmp(dev->name, name) == 0)
3110 return dev;
3111 }
3112
3113 return NULL;
3114 }
3115
3116 /*
3117 * ffs_lock must be taken by the caller of this function
3118 */
3119 static struct ffs_dev *_ffs_get_single_dev(void)
3120 {
3121 struct ffs_dev *dev;
3122
3123 if (list_is_singular(&ffs_devices)) {
3124 dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
3125 if (dev->single)
3126 return dev;
3127 }
3128
3129 return NULL;
3130 }
3131
3132 /*
3133 * ffs_lock must be taken by the caller of this function
3134 */
3135 static struct ffs_dev *_ffs_find_dev(const char *name)
3136 {
3137 struct ffs_dev *dev;
3138
3139 dev = _ffs_get_single_dev();
3140 if (dev)
3141 return dev;
3142
3143 return _ffs_do_find_dev(name);
3144 }
3145
3146 /* Configfs support *********************************************************/
3147
3148 static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
3149 {
3150 return container_of(to_config_group(item), struct f_fs_opts,
3151 func_inst.group);
3152 }
3153
3154 static void ffs_attr_release(struct config_item *item)
3155 {
3156 struct f_fs_opts *opts = to_ffs_opts(item);
3157
3158 usb_put_function_instance(&opts->func_inst);
3159 }
3160
3161 static struct configfs_item_operations ffs_item_ops = {
3162 .release = ffs_attr_release,
3163 };
3164
3165 static struct config_item_type ffs_func_type = {
3166 .ct_item_ops = &ffs_item_ops,
3167 .ct_owner = THIS_MODULE,
3168 };
3169
3170
3171 /* Function registration interface ******************************************/
3172
3173 static void ffs_free_inst(struct usb_function_instance *f)
3174 {
3175 struct f_fs_opts *opts;
3176
3177 opts = to_f_fs_opts(f);
3178 ffs_dev_lock();
3179 _ffs_free_dev(opts->dev);
3180 ffs_dev_unlock();
3181 kfree(opts);
3182 }
3183
3184 #define MAX_INST_NAME_LEN 40
3185
3186 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
3187 {
3188 struct f_fs_opts *opts;
3189 char *ptr;
3190 const char *tmp;
3191 int name_len, ret;
3192
3193 name_len = strlen(name) + 1;
3194 if (name_len > MAX_INST_NAME_LEN)
3195 return -ENAMETOOLONG;
3196
3197 ptr = kstrndup(name, name_len, GFP_KERNEL);
3198 if (!ptr)
3199 return -ENOMEM;
3200
3201 opts = to_f_fs_opts(fi);
3202 tmp = NULL;
3203
3204 ffs_dev_lock();
3205
3206 tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
3207 ret = _ffs_name_dev(opts->dev, ptr);
3208 if (ret) {
3209 kfree(ptr);
3210 ffs_dev_unlock();
3211 return ret;
3212 }
3213 opts->dev->name_allocated = true;
3214
3215 ffs_dev_unlock();
3216
3217 kfree(tmp);
3218
3219 return 0;
3220 }
3221
3222 static struct usb_function_instance *ffs_alloc_inst(void)
3223 {
3224 struct f_fs_opts *opts;
3225 struct ffs_dev *dev;
3226
3227 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3228 if (!opts)
3229 return ERR_PTR(-ENOMEM);
3230
3231 opts->func_inst.set_inst_name = ffs_set_inst_name;
3232 opts->func_inst.free_func_inst = ffs_free_inst;
3233 ffs_dev_lock();
3234 dev = _ffs_alloc_dev();
3235 ffs_dev_unlock();
3236 if (IS_ERR(dev)) {
3237 kfree(opts);
3238 return ERR_CAST(dev);
3239 }
3240 opts->dev = dev;
3241 dev->opts = opts;
3242
3243 config_group_init_type_name(&opts->func_inst.group, "",
3244 &ffs_func_type);
3245 return &opts->func_inst;
3246 }
3247
3248 static void ffs_free(struct usb_function *f)
3249 {
3250 kfree(ffs_func_from_usb(f));
3251 }
3252
3253 static void ffs_func_unbind(struct usb_configuration *c,
3254 struct usb_function *f)
3255 {
3256 struct ffs_function *func = ffs_func_from_usb(f);
3257 struct ffs_data *ffs = func->ffs;
3258 struct f_fs_opts *opts =
3259 container_of(f->fi, struct f_fs_opts, func_inst);
3260 struct ffs_ep *ep = func->eps;
3261 unsigned count = ffs->eps_count;
3262 unsigned long flags;
3263
3264 ENTER();
3265 if (ffs->func == func) {
3266 ffs_func_eps_disable(func);
3267 ffs->func = NULL;
3268 }
3269
3270 if (!--opts->refcnt)
3271 functionfs_unbind(ffs);
3272
3273 /* cleanup after autoconfig */
3274 spin_lock_irqsave(&func->ffs->eps_lock, flags);
3275 do {
3276 if (ep->ep && ep->req)
3277 usb_ep_free_request(ep->ep, ep->req);
3278 ep->req = NULL;
3279 ++ep;
3280 } while (--count);
3281 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3282 kfree(func->eps);
3283 func->eps = NULL;
3284 /*
3285 * eps, descriptors and interfaces_nums are allocated in the
3286 * same chunk so only one free is required.
3287 */
3288 func->function.fs_descriptors = NULL;
3289 func->function.hs_descriptors = NULL;
3290 func->function.ss_descriptors = NULL;
3291 func->interfaces_nums = NULL;
3292
3293 ffs_event_add(ffs, FUNCTIONFS_UNBIND);
3294 }
3295
3296 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
3297 {
3298 struct ffs_function *func;
3299
3300 ENTER();
3301
3302 func = kzalloc(sizeof(*func), GFP_KERNEL);
3303 if (unlikely(!func))
3304 return ERR_PTR(-ENOMEM);
3305
3306 func->function.name = "Function FS Gadget";
3307
3308 func->function.bind = ffs_func_bind;
3309 func->function.unbind = ffs_func_unbind;
3310 func->function.set_alt = ffs_func_set_alt;
3311 func->function.disable = ffs_func_disable;
3312 func->function.setup = ffs_func_setup;
3313 func->function.suspend = ffs_func_suspend;
3314 func->function.resume = ffs_func_resume;
3315 func->function.free_func = ffs_free;
3316
3317 return &func->function;
3318 }
3319
3320 /*
3321 * ffs_lock must be taken by the caller of this function
3322 */
3323 static struct ffs_dev *_ffs_alloc_dev(void)
3324 {
3325 struct ffs_dev *dev;
3326 int ret;
3327
3328 if (_ffs_get_single_dev())
3329 return ERR_PTR(-EBUSY);
3330
3331 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3332 if (!dev)
3333 return ERR_PTR(-ENOMEM);
3334
3335 if (list_empty(&ffs_devices)) {
3336 ret = functionfs_init();
3337 if (ret) {
3338 kfree(dev);
3339 return ERR_PTR(ret);
3340 }
3341 }
3342
3343 list_add(&dev->entry, &ffs_devices);
3344
3345 return dev;
3346 }
3347
3348 /*
3349 * ffs_lock must be taken by the caller of this function
3350 * The caller is responsible for "name" being available whenever f_fs needs it
3351 */
3352 static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
3353 {
3354 struct ffs_dev *existing;
3355
3356 existing = _ffs_do_find_dev(name);
3357 if (existing)
3358 return -EBUSY;
3359
3360 dev->name = name;
3361
3362 return 0;
3363 }
3364
3365 /*
3366 * The caller is responsible for "name" being available whenever f_fs needs it
3367 */
3368 int ffs_name_dev(struct ffs_dev *dev, const char *name)
3369 {
3370 int ret;
3371
3372 ffs_dev_lock();
3373 ret = _ffs_name_dev(dev, name);
3374 ffs_dev_unlock();
3375
3376 return ret;
3377 }
3378 EXPORT_SYMBOL_GPL(ffs_name_dev);
3379
3380 int ffs_single_dev(struct ffs_dev *dev)
3381 {
3382 int ret;
3383
3384 ret = 0;
3385 ffs_dev_lock();
3386
3387 if (!list_is_singular(&ffs_devices))
3388 ret = -EBUSY;
3389 else
3390 dev->single = true;
3391
3392 ffs_dev_unlock();
3393 return ret;
3394 }
3395 EXPORT_SYMBOL_GPL(ffs_single_dev);
3396
3397 /*
3398 * ffs_lock must be taken by the caller of this function
3399 */
3400 static void _ffs_free_dev(struct ffs_dev *dev)
3401 {
3402 list_del(&dev->entry);
3403 if (dev->name_allocated)
3404 kfree(dev->name);
3405 kfree(dev);
3406 if (list_empty(&ffs_devices))
3407 functionfs_cleanup();
3408 }
3409
3410 static void *ffs_acquire_dev(const char *dev_name)
3411 {
3412 struct ffs_dev *ffs_dev;
3413
3414 ENTER();
3415 ffs_dev_lock();
3416
3417 ffs_dev = _ffs_find_dev(dev_name);
3418 if (!ffs_dev)
3419 ffs_dev = ERR_PTR(-ENOENT);
3420 else if (ffs_dev->mounted)
3421 ffs_dev = ERR_PTR(-EBUSY);
3422 else if (ffs_dev->ffs_acquire_dev_callback &&
3423 ffs_dev->ffs_acquire_dev_callback(ffs_dev))
3424 ffs_dev = ERR_PTR(-ENOENT);
3425 else
3426 ffs_dev->mounted = true;
3427
3428 ffs_dev_unlock();
3429 return ffs_dev;
3430 }
3431
3432 static void ffs_release_dev(struct ffs_data *ffs_data)
3433 {
3434 struct ffs_dev *ffs_dev;
3435
3436 ENTER();
3437 ffs_dev_lock();
3438
3439 ffs_dev = ffs_data->private_data;
3440 if (ffs_dev) {
3441 ffs_dev->mounted = false;
3442
3443 if (ffs_dev->ffs_release_dev_callback)
3444 ffs_dev->ffs_release_dev_callback(ffs_dev);
3445 }
3446
3447 ffs_dev_unlock();
3448 }
3449
3450 static int ffs_ready(struct ffs_data *ffs)
3451 {
3452 struct ffs_dev *ffs_obj;
3453 int ret = 0;
3454
3455 ENTER();
3456 ffs_dev_lock();
3457
3458 ffs_obj = ffs->private_data;
3459 if (!ffs_obj) {
3460 ret = -EINVAL;
3461 goto done;
3462 }
3463 if (WARN_ON(ffs_obj->desc_ready)) {
3464 ret = -EBUSY;
3465 goto done;
3466 }
3467
3468 ffs_obj->desc_ready = true;
3469 ffs_obj->ffs_data = ffs;
3470
3471 if (ffs_obj->ffs_ready_callback)
3472 ret = ffs_obj->ffs_ready_callback(ffs);
3473
3474 done:
3475 ffs_dev_unlock();
3476 return ret;
3477 }
3478
3479 static void ffs_closed(struct ffs_data *ffs)
3480 {
3481 struct ffs_dev *ffs_obj;
3482
3483 ENTER();
3484 ffs_dev_lock();
3485
3486 ffs_obj = ffs->private_data;
3487 if (!ffs_obj)
3488 goto done;
3489
3490 ffs_obj->desc_ready = false;
3491
3492 if (ffs_obj->ffs_closed_callback)
3493 ffs_obj->ffs_closed_callback(ffs);
3494
3495 if (!ffs_obj->opts || ffs_obj->opts->no_configfs
3496 || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
3497 goto done;
3498
3499 unregister_gadget_item(ffs_obj->opts->
3500 func_inst.group.cg_item.ci_parent->ci_parent);
3501 done:
3502 ffs_dev_unlock();
3503 }
3504
3505 /* Misc helper functions ****************************************************/
3506
3507 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
3508 {
3509 return nonblock
3510 ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
3511 : mutex_lock_interruptible(mutex);
3512 }
3513
3514 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
3515 {
3516 char *data;
3517
3518 if (unlikely(!len))
3519 return NULL;
3520
3521 data = kmalloc(len, GFP_KERNEL);
3522 if (unlikely(!data))
3523 return ERR_PTR(-ENOMEM);
3524
3525 if (unlikely(__copy_from_user(data, buf, len))) {
3526 kfree(data);
3527 return ERR_PTR(-EFAULT);
3528 }
3529
3530 pr_vdebug("Buffer from user space:\n");
3531 ffs_dump_mem("", data, len);
3532
3533 return data;
3534 }
3535
3536 DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
3537 MODULE_LICENSE("GPL");
3538 MODULE_AUTHOR("Michal Nazarewicz");
This page took 0.211508 seconds and 5 git commands to generate.