Merge branch 'timer/cleanup' into late/mvebu2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / gadget / f_fs.c
1 /*
2 * f_fs.c -- user mode file system API for USB composite function controllers
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 * Author: Michal Nazarewicz <mina86@mina86.com>
6 *
7 * Based on inode.c (GadgetFS) which was:
8 * Copyright (C) 2003-2004 David Brownell
9 * Copyright (C) 2003 Agilent Technologies
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17
18 /* #define DEBUG */
19 /* #define VERBOSE_DEBUG */
20
21 #include <linux/blkdev.h>
22 #include <linux/pagemap.h>
23 #include <linux/export.h>
24 #include <linux/hid.h>
25 #include <asm/unaligned.h>
26
27 #include <linux/usb/composite.h>
28 #include <linux/usb/functionfs.h>
29
30
31 #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
32
33
34 /* Debugging ****************************************************************/
35
36 #ifdef VERBOSE_DEBUG
37 #ifndef pr_vdebug
38 # define pr_vdebug pr_debug
39 #endif /* pr_vdebug */
40 # define ffs_dump_mem(prefix, ptr, len) \
41 print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len)
42 #else
43 #ifndef pr_vdebug
44 # define pr_vdebug(...) do { } while (0)
45 #endif /* pr_vdebug */
46 # define ffs_dump_mem(prefix, ptr, len) do { } while (0)
47 #endif /* VERBOSE_DEBUG */
48
49 #define ENTER() pr_vdebug("%s()\n", __func__)
50
51
52 /* The data structure and setup file ****************************************/
53
54 enum ffs_state {
55 /*
56 * Waiting for descriptors and strings.
57 *
58 * In this state no open(2), read(2) or write(2) on epfiles
59 * may succeed (which should not be the problem as there
60 * should be no such files opened in the first place).
61 */
62 FFS_READ_DESCRIPTORS,
63 FFS_READ_STRINGS,
64
65 /*
66 * We've got descriptors and strings. We are or have called
67 * functionfs_ready_callback(). functionfs_bind() may have
68 * been called but we don't know.
69 *
70 * This is the only state in which operations on epfiles may
71 * succeed.
72 */
73 FFS_ACTIVE,
74
75 /*
76 * All endpoints have been closed. This state is also set if
77 * we encounter an unrecoverable error. The only
78 * unrecoverable error is situation when after reading strings
79 * from user space we fail to initialise epfiles or
80 * functionfs_ready_callback() returns with error (<0).
81 *
82 * In this state no open(2), read(2) or write(2) (both on ep0
83 * as well as epfile) may succeed (at this point epfiles are
84 * unlinked and all closed so this is not a problem; ep0 is
85 * also closed but ep0 file exists and so open(2) on ep0 must
86 * fail).
87 */
88 FFS_CLOSING
89 };
90
91
92 enum ffs_setup_state {
93 /* There is no setup request pending. */
94 FFS_NO_SETUP,
95 /*
96 * User has read events and there was a setup request event
97 * there. The next read/write on ep0 will handle the
98 * request.
99 */
100 FFS_SETUP_PENDING,
101 /*
102 * There was event pending but before user space handled it
103 * some other event was introduced which canceled existing
104 * setup. If this state is set read/write on ep0 return
105 * -EIDRM. This state is only set when adding event.
106 */
107 FFS_SETUP_CANCELED
108 };
109
110
111
112 struct ffs_epfile;
113 struct ffs_function;
114
115 struct ffs_data {
116 struct usb_gadget *gadget;
117
118 /*
119 * Protect access read/write operations, only one read/write
120 * at a time. As a consequence protects ep0req and company.
121 * While setup request is being processed (queued) this is
122 * held.
123 */
124 struct mutex mutex;
125
126 /*
127 * Protect access to endpoint related structures (basically
128 * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for
129 * endpoint zero.
130 */
131 spinlock_t eps_lock;
132
133 /*
134 * XXX REVISIT do we need our own request? Since we are not
135 * handling setup requests immediately user space may be so
136 * slow that another setup will be sent to the gadget but this
137 * time not to us but another function and then there could be
138 * a race. Is that the case? Or maybe we can use cdev->req
139 * after all, maybe we just need some spinlock for that?
140 */
141 struct usb_request *ep0req; /* P: mutex */
142 struct completion ep0req_completion; /* P: mutex */
143 int ep0req_status; /* P: mutex */
144
145 /* reference counter */
146 atomic_t ref;
147 /* how many files are opened (EP0 and others) */
148 atomic_t opened;
149
150 /* EP0 state */
151 enum ffs_state state;
152
153 /*
154 * Possible transitions:
155 * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
156 * happens only in ep0 read which is P: mutex
157 * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
158 * happens only in ep0 i/o which is P: mutex
159 * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELED -- P: ev.waitq.lock
160 * + FFS_SETUP_CANCELED -> FFS_NO_SETUP -- cmpxchg
161 */
162 enum ffs_setup_state setup_state;
163
164 #define FFS_SETUP_STATE(ffs) \
165 ((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state, \
166 FFS_SETUP_CANCELED, FFS_NO_SETUP))
167
168 /* Events & such. */
169 struct {
170 u8 types[4];
171 unsigned short count;
172 /* XXX REVISIT need to update it in some places, or do we? */
173 unsigned short can_stall;
174 struct usb_ctrlrequest setup;
175
176 wait_queue_head_t waitq;
177 } ev; /* the whole structure, P: ev.waitq.lock */
178
179 /* Flags */
180 unsigned long flags;
181 #define FFS_FL_CALL_CLOSED_CALLBACK 0
182 #define FFS_FL_BOUND 1
183
184 /* Active function */
185 struct ffs_function *func;
186
187 /*
188 * Device name, write once when file system is mounted.
189 * Intended for user to read if she wants.
190 */
191 const char *dev_name;
192 /* Private data for our user (ie. gadget). Managed by user. */
193 void *private_data;
194
195 /* filled by __ffs_data_got_descs() */
196 /*
197 * Real descriptors are 16 bytes after raw_descs (so you need
198 * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
199 * first full speed descriptor). raw_descs_length and
200 * raw_fs_descs_length do not have those 16 bytes added.
201 */
202 const void *raw_descs;
203 unsigned raw_descs_length;
204 unsigned raw_fs_descs_length;
205 unsigned fs_descs_count;
206 unsigned hs_descs_count;
207
208 unsigned short strings_count;
209 unsigned short interfaces_count;
210 unsigned short eps_count;
211 unsigned short _pad1;
212
213 /* filled by __ffs_data_got_strings() */
214 /* ids in stringtabs are set in functionfs_bind() */
215 const void *raw_strings;
216 struct usb_gadget_strings **stringtabs;
217
218 /*
219 * File system's super block, write once when file system is
220 * mounted.
221 */
222 struct super_block *sb;
223
224 /* File permissions, written once when fs is mounted */
225 struct ffs_file_perms {
226 umode_t mode;
227 kuid_t uid;
228 kgid_t gid;
229 } file_perms;
230
231 /*
232 * The endpoint files, filled by ffs_epfiles_create(),
233 * destroyed by ffs_epfiles_destroy().
234 */
235 struct ffs_epfile *epfiles;
236 };
237
238 /* Reference counter handling */
239 static void ffs_data_get(struct ffs_data *ffs);
240 static void ffs_data_put(struct ffs_data *ffs);
241 /* Creates new ffs_data object. */
242 static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
243
244 /* Opened counter handling. */
245 static void ffs_data_opened(struct ffs_data *ffs);
246 static void ffs_data_closed(struct ffs_data *ffs);
247
248 /* Called with ffs->mutex held; take over ownership of data. */
249 static int __must_check
250 __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
251 static int __must_check
252 __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
253
254
255 /* The function structure ***************************************************/
256
257 struct ffs_ep;
258
259 struct ffs_function {
260 struct usb_configuration *conf;
261 struct usb_gadget *gadget;
262 struct ffs_data *ffs;
263
264 struct ffs_ep *eps;
265 u8 eps_revmap[16];
266 short *interfaces_nums;
267
268 struct usb_function function;
269 };
270
271
272 static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
273 {
274 return container_of(f, struct ffs_function, function);
275 }
276
277 static void ffs_func_free(struct ffs_function *func);
278
279 static void ffs_func_eps_disable(struct ffs_function *func);
280 static int __must_check ffs_func_eps_enable(struct ffs_function *func);
281
282 static int ffs_func_bind(struct usb_configuration *,
283 struct usb_function *);
284 static void ffs_func_unbind(struct usb_configuration *,
285 struct usb_function *);
286 static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
287 static void ffs_func_disable(struct usb_function *);
288 static int ffs_func_setup(struct usb_function *,
289 const struct usb_ctrlrequest *);
290 static void ffs_func_suspend(struct usb_function *);
291 static void ffs_func_resume(struct usb_function *);
292
293
294 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
295 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
296
297
298 /* The endpoints structures *************************************************/
299
300 struct ffs_ep {
301 struct usb_ep *ep; /* P: ffs->eps_lock */
302 struct usb_request *req; /* P: epfile->mutex */
303
304 /* [0]: full speed, [1]: high speed */
305 struct usb_endpoint_descriptor *descs[2];
306
307 u8 num;
308
309 int status; /* P: epfile->mutex */
310 };
311
312 struct ffs_epfile {
313 /* Protects ep->ep and ep->req. */
314 struct mutex mutex;
315 wait_queue_head_t wait;
316
317 struct ffs_data *ffs;
318 struct ffs_ep *ep; /* P: ffs->eps_lock */
319
320 struct dentry *dentry;
321
322 char name[5];
323
324 unsigned char in; /* P: ffs->eps_lock */
325 unsigned char isoc; /* P: ffs->eps_lock */
326
327 unsigned char _pad;
328 };
329
330 static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
331 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
332
333 static struct inode *__must_check
334 ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
335 const struct file_operations *fops,
336 struct dentry **dentry_p);
337
338
339 /* Misc helper functions ****************************************************/
340
341 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
342 __attribute__((warn_unused_result, nonnull));
343 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
344 __attribute__((warn_unused_result, nonnull));
345
346
347 /* Control file aka ep0 *****************************************************/
348
349 static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
350 {
351 struct ffs_data *ffs = req->context;
352
353 complete_all(&ffs->ep0req_completion);
354 }
355
356 static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
357 {
358 struct usb_request *req = ffs->ep0req;
359 int ret;
360
361 req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
362
363 spin_unlock_irq(&ffs->ev.waitq.lock);
364
365 req->buf = data;
366 req->length = len;
367
368 /*
369 * UDC layer requires to provide a buffer even for ZLP, but should
370 * not use it at all. Let's provide some poisoned pointer to catch
371 * possible bug in the driver.
372 */
373 if (req->buf == NULL)
374 req->buf = (void *)0xDEADBABE;
375
376 INIT_COMPLETION(ffs->ep0req_completion);
377
378 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
379 if (unlikely(ret < 0))
380 return ret;
381
382 ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
383 if (unlikely(ret)) {
384 usb_ep_dequeue(ffs->gadget->ep0, req);
385 return -EINTR;
386 }
387
388 ffs->setup_state = FFS_NO_SETUP;
389 return ffs->ep0req_status;
390 }
391
392 static int __ffs_ep0_stall(struct ffs_data *ffs)
393 {
394 if (ffs->ev.can_stall) {
395 pr_vdebug("ep0 stall\n");
396 usb_ep_set_halt(ffs->gadget->ep0);
397 ffs->setup_state = FFS_NO_SETUP;
398 return -EL2HLT;
399 } else {
400 pr_debug("bogus ep0 stall!\n");
401 return -ESRCH;
402 }
403 }
404
405 static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
406 size_t len, loff_t *ptr)
407 {
408 struct ffs_data *ffs = file->private_data;
409 ssize_t ret;
410 char *data;
411
412 ENTER();
413
414 /* Fast check if setup was canceled */
415 if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
416 return -EIDRM;
417
418 /* Acquire mutex */
419 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
420 if (unlikely(ret < 0))
421 return ret;
422
423 /* Check state */
424 switch (ffs->state) {
425 case FFS_READ_DESCRIPTORS:
426 case FFS_READ_STRINGS:
427 /* Copy data */
428 if (unlikely(len < 16)) {
429 ret = -EINVAL;
430 break;
431 }
432
433 data = ffs_prepare_buffer(buf, len);
434 if (IS_ERR(data)) {
435 ret = PTR_ERR(data);
436 break;
437 }
438
439 /* Handle data */
440 if (ffs->state == FFS_READ_DESCRIPTORS) {
441 pr_info("read descriptors\n");
442 ret = __ffs_data_got_descs(ffs, data, len);
443 if (unlikely(ret < 0))
444 break;
445
446 ffs->state = FFS_READ_STRINGS;
447 ret = len;
448 } else {
449 pr_info("read strings\n");
450 ret = __ffs_data_got_strings(ffs, data, len);
451 if (unlikely(ret < 0))
452 break;
453
454 ret = ffs_epfiles_create(ffs);
455 if (unlikely(ret)) {
456 ffs->state = FFS_CLOSING;
457 break;
458 }
459
460 ffs->state = FFS_ACTIVE;
461 mutex_unlock(&ffs->mutex);
462
463 ret = functionfs_ready_callback(ffs);
464 if (unlikely(ret < 0)) {
465 ffs->state = FFS_CLOSING;
466 return ret;
467 }
468
469 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
470 return len;
471 }
472 break;
473
474 case FFS_ACTIVE:
475 data = NULL;
476 /*
477 * We're called from user space, we can use _irq
478 * rather then _irqsave
479 */
480 spin_lock_irq(&ffs->ev.waitq.lock);
481 switch (FFS_SETUP_STATE(ffs)) {
482 case FFS_SETUP_CANCELED:
483 ret = -EIDRM;
484 goto done_spin;
485
486 case FFS_NO_SETUP:
487 ret = -ESRCH;
488 goto done_spin;
489
490 case FFS_SETUP_PENDING:
491 break;
492 }
493
494 /* FFS_SETUP_PENDING */
495 if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
496 spin_unlock_irq(&ffs->ev.waitq.lock);
497 ret = __ffs_ep0_stall(ffs);
498 break;
499 }
500
501 /* FFS_SETUP_PENDING and not stall */
502 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
503
504 spin_unlock_irq(&ffs->ev.waitq.lock);
505
506 data = ffs_prepare_buffer(buf, len);
507 if (IS_ERR(data)) {
508 ret = PTR_ERR(data);
509 break;
510 }
511
512 spin_lock_irq(&ffs->ev.waitq.lock);
513
514 /*
515 * We are guaranteed to be still in FFS_ACTIVE state
516 * but the state of setup could have changed from
517 * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need
518 * to check for that. If that happened we copied data
519 * from user space in vain but it's unlikely.
520 *
521 * For sure we are not in FFS_NO_SETUP since this is
522 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
523 * transition can be performed and it's protected by
524 * mutex.
525 */
526 if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
527 ret = -EIDRM;
528 done_spin:
529 spin_unlock_irq(&ffs->ev.waitq.lock);
530 } else {
531 /* unlocks spinlock */
532 ret = __ffs_ep0_queue_wait(ffs, data, len);
533 }
534 kfree(data);
535 break;
536
537 default:
538 ret = -EBADFD;
539 break;
540 }
541
542 mutex_unlock(&ffs->mutex);
543 return ret;
544 }
545
546 static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
547 size_t n)
548 {
549 /*
550 * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
551 * to release them.
552 */
553 struct usb_functionfs_event events[n];
554 unsigned i = 0;
555
556 memset(events, 0, sizeof events);
557
558 do {
559 events[i].type = ffs->ev.types[i];
560 if (events[i].type == FUNCTIONFS_SETUP) {
561 events[i].u.setup = ffs->ev.setup;
562 ffs->setup_state = FFS_SETUP_PENDING;
563 }
564 } while (++i < n);
565
566 if (n < ffs->ev.count) {
567 ffs->ev.count -= n;
568 memmove(ffs->ev.types, ffs->ev.types + n,
569 ffs->ev.count * sizeof *ffs->ev.types);
570 } else {
571 ffs->ev.count = 0;
572 }
573
574 spin_unlock_irq(&ffs->ev.waitq.lock);
575 mutex_unlock(&ffs->mutex);
576
577 return unlikely(__copy_to_user(buf, events, sizeof events))
578 ? -EFAULT : sizeof events;
579 }
580
581 static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
582 size_t len, loff_t *ptr)
583 {
584 struct ffs_data *ffs = file->private_data;
585 char *data = NULL;
586 size_t n;
587 int ret;
588
589 ENTER();
590
591 /* Fast check if setup was canceled */
592 if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
593 return -EIDRM;
594
595 /* Acquire mutex */
596 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
597 if (unlikely(ret < 0))
598 return ret;
599
600 /* Check state */
601 if (ffs->state != FFS_ACTIVE) {
602 ret = -EBADFD;
603 goto done_mutex;
604 }
605
606 /*
607 * We're called from user space, we can use _irq rather then
608 * _irqsave
609 */
610 spin_lock_irq(&ffs->ev.waitq.lock);
611
612 switch (FFS_SETUP_STATE(ffs)) {
613 case FFS_SETUP_CANCELED:
614 ret = -EIDRM;
615 break;
616
617 case FFS_NO_SETUP:
618 n = len / sizeof(struct usb_functionfs_event);
619 if (unlikely(!n)) {
620 ret = -EINVAL;
621 break;
622 }
623
624 if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
625 ret = -EAGAIN;
626 break;
627 }
628
629 if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
630 ffs->ev.count)) {
631 ret = -EINTR;
632 break;
633 }
634
635 return __ffs_ep0_read_events(ffs, buf,
636 min(n, (size_t)ffs->ev.count));
637
638 case FFS_SETUP_PENDING:
639 if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
640 spin_unlock_irq(&ffs->ev.waitq.lock);
641 ret = __ffs_ep0_stall(ffs);
642 goto done_mutex;
643 }
644
645 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
646
647 spin_unlock_irq(&ffs->ev.waitq.lock);
648
649 if (likely(len)) {
650 data = kmalloc(len, GFP_KERNEL);
651 if (unlikely(!data)) {
652 ret = -ENOMEM;
653 goto done_mutex;
654 }
655 }
656
657 spin_lock_irq(&ffs->ev.waitq.lock);
658
659 /* See ffs_ep0_write() */
660 if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
661 ret = -EIDRM;
662 break;
663 }
664
665 /* unlocks spinlock */
666 ret = __ffs_ep0_queue_wait(ffs, data, len);
667 if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
668 ret = -EFAULT;
669 goto done_mutex;
670
671 default:
672 ret = -EBADFD;
673 break;
674 }
675
676 spin_unlock_irq(&ffs->ev.waitq.lock);
677 done_mutex:
678 mutex_unlock(&ffs->mutex);
679 kfree(data);
680 return ret;
681 }
682
683 static int ffs_ep0_open(struct inode *inode, struct file *file)
684 {
685 struct ffs_data *ffs = inode->i_private;
686
687 ENTER();
688
689 if (unlikely(ffs->state == FFS_CLOSING))
690 return -EBUSY;
691
692 file->private_data = ffs;
693 ffs_data_opened(ffs);
694
695 return 0;
696 }
697
698 static int ffs_ep0_release(struct inode *inode, struct file *file)
699 {
700 struct ffs_data *ffs = file->private_data;
701
702 ENTER();
703
704 ffs_data_closed(ffs);
705
706 return 0;
707 }
708
709 static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
710 {
711 struct ffs_data *ffs = file->private_data;
712 struct usb_gadget *gadget = ffs->gadget;
713 long ret;
714
715 ENTER();
716
717 if (code == FUNCTIONFS_INTERFACE_REVMAP) {
718 struct ffs_function *func = ffs->func;
719 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
720 } else if (gadget && gadget->ops->ioctl) {
721 ret = gadget->ops->ioctl(gadget, code, value);
722 } else {
723 ret = -ENOTTY;
724 }
725
726 return ret;
727 }
728
729 static const struct file_operations ffs_ep0_operations = {
730 .owner = THIS_MODULE,
731 .llseek = no_llseek,
732
733 .open = ffs_ep0_open,
734 .write = ffs_ep0_write,
735 .read = ffs_ep0_read,
736 .release = ffs_ep0_release,
737 .unlocked_ioctl = ffs_ep0_ioctl,
738 };
739
740
741 /* "Normal" endpoints operations ********************************************/
742
743 static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
744 {
745 ENTER();
746 if (likely(req->context)) {
747 struct ffs_ep *ep = _ep->driver_data;
748 ep->status = req->status ? req->status : req->actual;
749 complete(req->context);
750 }
751 }
752
753 static ssize_t ffs_epfile_io(struct file *file,
754 char __user *buf, size_t len, int read)
755 {
756 struct ffs_epfile *epfile = file->private_data;
757 struct ffs_ep *ep;
758 char *data = NULL;
759 ssize_t ret;
760 int halt;
761
762 goto first_try;
763 do {
764 spin_unlock_irq(&epfile->ffs->eps_lock);
765 mutex_unlock(&epfile->mutex);
766
767 first_try:
768 /* Are we still active? */
769 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
770 ret = -ENODEV;
771 goto error;
772 }
773
774 /* Wait for endpoint to be enabled */
775 ep = epfile->ep;
776 if (!ep) {
777 if (file->f_flags & O_NONBLOCK) {
778 ret = -EAGAIN;
779 goto error;
780 }
781
782 if (wait_event_interruptible(epfile->wait,
783 (ep = epfile->ep))) {
784 ret = -EINTR;
785 goto error;
786 }
787 }
788
789 /* Do we halt? */
790 halt = !read == !epfile->in;
791 if (halt && epfile->isoc) {
792 ret = -EINVAL;
793 goto error;
794 }
795
796 /* Allocate & copy */
797 if (!halt && !data) {
798 data = kzalloc(len, GFP_KERNEL);
799 if (unlikely(!data))
800 return -ENOMEM;
801
802 if (!read &&
803 unlikely(__copy_from_user(data, buf, len))) {
804 ret = -EFAULT;
805 goto error;
806 }
807 }
808
809 /* We will be using request */
810 ret = ffs_mutex_lock(&epfile->mutex,
811 file->f_flags & O_NONBLOCK);
812 if (unlikely(ret))
813 goto error;
814
815 /*
816 * We're called from user space, we can use _irq rather then
817 * _irqsave
818 */
819 spin_lock_irq(&epfile->ffs->eps_lock);
820
821 /*
822 * While we were acquiring mutex endpoint got disabled
823 * or changed?
824 */
825 } while (unlikely(epfile->ep != ep));
826
827 /* Halt */
828 if (unlikely(halt)) {
829 if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
830 usb_ep_set_halt(ep->ep);
831 spin_unlock_irq(&epfile->ffs->eps_lock);
832 ret = -EBADMSG;
833 } else {
834 /* Fire the request */
835 DECLARE_COMPLETION_ONSTACK(done);
836
837 struct usb_request *req = ep->req;
838 req->context = &done;
839 req->complete = ffs_epfile_io_complete;
840 req->buf = data;
841 req->length = len;
842
843 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
844
845 spin_unlock_irq(&epfile->ffs->eps_lock);
846
847 if (unlikely(ret < 0)) {
848 /* nop */
849 } else if (unlikely(wait_for_completion_interruptible(&done))) {
850 ret = -EINTR;
851 usb_ep_dequeue(ep->ep, req);
852 } else {
853 ret = ep->status;
854 if (read && ret > 0 &&
855 unlikely(copy_to_user(buf, data, ret)))
856 ret = -EFAULT;
857 }
858 }
859
860 mutex_unlock(&epfile->mutex);
861 error:
862 kfree(data);
863 return ret;
864 }
865
866 static ssize_t
867 ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
868 loff_t *ptr)
869 {
870 ENTER();
871
872 return ffs_epfile_io(file, (char __user *)buf, len, 0);
873 }
874
875 static ssize_t
876 ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
877 {
878 ENTER();
879
880 return ffs_epfile_io(file, buf, len, 1);
881 }
882
883 static int
884 ffs_epfile_open(struct inode *inode, struct file *file)
885 {
886 struct ffs_epfile *epfile = inode->i_private;
887
888 ENTER();
889
890 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
891 return -ENODEV;
892
893 file->private_data = epfile;
894 ffs_data_opened(epfile->ffs);
895
896 return 0;
897 }
898
899 static int
900 ffs_epfile_release(struct inode *inode, struct file *file)
901 {
902 struct ffs_epfile *epfile = inode->i_private;
903
904 ENTER();
905
906 ffs_data_closed(epfile->ffs);
907
908 return 0;
909 }
910
911 static long ffs_epfile_ioctl(struct file *file, unsigned code,
912 unsigned long value)
913 {
914 struct ffs_epfile *epfile = file->private_data;
915 int ret;
916
917 ENTER();
918
919 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
920 return -ENODEV;
921
922 spin_lock_irq(&epfile->ffs->eps_lock);
923 if (likely(epfile->ep)) {
924 switch (code) {
925 case FUNCTIONFS_FIFO_STATUS:
926 ret = usb_ep_fifo_status(epfile->ep->ep);
927 break;
928 case FUNCTIONFS_FIFO_FLUSH:
929 usb_ep_fifo_flush(epfile->ep->ep);
930 ret = 0;
931 break;
932 case FUNCTIONFS_CLEAR_HALT:
933 ret = usb_ep_clear_halt(epfile->ep->ep);
934 break;
935 case FUNCTIONFS_ENDPOINT_REVMAP:
936 ret = epfile->ep->num;
937 break;
938 default:
939 ret = -ENOTTY;
940 }
941 } else {
942 ret = -ENODEV;
943 }
944 spin_unlock_irq(&epfile->ffs->eps_lock);
945
946 return ret;
947 }
948
949 static const struct file_operations ffs_epfile_operations = {
950 .owner = THIS_MODULE,
951 .llseek = no_llseek,
952
953 .open = ffs_epfile_open,
954 .write = ffs_epfile_write,
955 .read = ffs_epfile_read,
956 .release = ffs_epfile_release,
957 .unlocked_ioctl = ffs_epfile_ioctl,
958 };
959
960
961 /* File system and super block operations ***********************************/
962
963 /*
964 * Mounting the file system creates a controller file, used first for
965 * function configuration then later for event monitoring.
966 */
967
968 static struct inode *__must_check
969 ffs_sb_make_inode(struct super_block *sb, void *data,
970 const struct file_operations *fops,
971 const struct inode_operations *iops,
972 struct ffs_file_perms *perms)
973 {
974 struct inode *inode;
975
976 ENTER();
977
978 inode = new_inode(sb);
979
980 if (likely(inode)) {
981 struct timespec current_time = CURRENT_TIME;
982
983 inode->i_ino = get_next_ino();
984 inode->i_mode = perms->mode;
985 inode->i_uid = perms->uid;
986 inode->i_gid = perms->gid;
987 inode->i_atime = current_time;
988 inode->i_mtime = current_time;
989 inode->i_ctime = current_time;
990 inode->i_private = data;
991 if (fops)
992 inode->i_fop = fops;
993 if (iops)
994 inode->i_op = iops;
995 }
996
997 return inode;
998 }
999
1000 /* Create "regular" file */
1001 static struct inode *ffs_sb_create_file(struct super_block *sb,
1002 const char *name, void *data,
1003 const struct file_operations *fops,
1004 struct dentry **dentry_p)
1005 {
1006 struct ffs_data *ffs = sb->s_fs_info;
1007 struct dentry *dentry;
1008 struct inode *inode;
1009
1010 ENTER();
1011
1012 dentry = d_alloc_name(sb->s_root, name);
1013 if (unlikely(!dentry))
1014 return NULL;
1015
1016 inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
1017 if (unlikely(!inode)) {
1018 dput(dentry);
1019 return NULL;
1020 }
1021
1022 d_add(dentry, inode);
1023 if (dentry_p)
1024 *dentry_p = dentry;
1025
1026 return inode;
1027 }
1028
1029 /* Super block */
1030 static const struct super_operations ffs_sb_operations = {
1031 .statfs = simple_statfs,
1032 .drop_inode = generic_delete_inode,
1033 };
1034
1035 struct ffs_sb_fill_data {
1036 struct ffs_file_perms perms;
1037 umode_t root_mode;
1038 const char *dev_name;
1039 union {
1040 /* set by ffs_fs_mount(), read by ffs_sb_fill() */
1041 void *private_data;
1042 /* set by ffs_sb_fill(), read by ffs_fs_mount */
1043 struct ffs_data *ffs_data;
1044 };
1045 };
1046
1047 static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1048 {
1049 struct ffs_sb_fill_data *data = _data;
1050 struct inode *inode;
1051 struct ffs_data *ffs;
1052
1053 ENTER();
1054
1055 /* Initialise data */
1056 ffs = ffs_data_new();
1057 if (unlikely(!ffs))
1058 goto Enomem;
1059
1060 ffs->sb = sb;
1061 ffs->dev_name = kstrdup(data->dev_name, GFP_KERNEL);
1062 if (unlikely(!ffs->dev_name))
1063 goto Enomem;
1064 ffs->file_perms = data->perms;
1065 ffs->private_data = data->private_data;
1066
1067 /* used by the caller of this function */
1068 data->ffs_data = ffs;
1069
1070 sb->s_fs_info = ffs;
1071 sb->s_blocksize = PAGE_CACHE_SIZE;
1072 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1073 sb->s_magic = FUNCTIONFS_MAGIC;
1074 sb->s_op = &ffs_sb_operations;
1075 sb->s_time_gran = 1;
1076
1077 /* Root inode */
1078 data->perms.mode = data->root_mode;
1079 inode = ffs_sb_make_inode(sb, NULL,
1080 &simple_dir_operations,
1081 &simple_dir_inode_operations,
1082 &data->perms);
1083 sb->s_root = d_make_root(inode);
1084 if (unlikely(!sb->s_root))
1085 goto Enomem;
1086
1087 /* EP0 file */
1088 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1089 &ffs_ep0_operations, NULL)))
1090 goto Enomem;
1091
1092 return 0;
1093
1094 Enomem:
1095 return -ENOMEM;
1096 }
1097
1098 static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1099 {
1100 ENTER();
1101
1102 if (!opts || !*opts)
1103 return 0;
1104
1105 for (;;) {
1106 char *end, *eq, *comma;
1107 unsigned long value;
1108
1109 /* Option limit */
1110 comma = strchr(opts, ',');
1111 if (comma)
1112 *comma = 0;
1113
1114 /* Value limit */
1115 eq = strchr(opts, '=');
1116 if (unlikely(!eq)) {
1117 pr_err("'=' missing in %s\n", opts);
1118 return -EINVAL;
1119 }
1120 *eq = 0;
1121
1122 /* Parse value */
1123 value = simple_strtoul(eq + 1, &end, 0);
1124 if (unlikely(*end != ',' && *end != 0)) {
1125 pr_err("%s: invalid value: %s\n", opts, eq + 1);
1126 return -EINVAL;
1127 }
1128
1129 /* Interpret option */
1130 switch (eq - opts) {
1131 case 5:
1132 if (!memcmp(opts, "rmode", 5))
1133 data->root_mode = (value & 0555) | S_IFDIR;
1134 else if (!memcmp(opts, "fmode", 5))
1135 data->perms.mode = (value & 0666) | S_IFREG;
1136 else
1137 goto invalid;
1138 break;
1139
1140 case 4:
1141 if (!memcmp(opts, "mode", 4)) {
1142 data->root_mode = (value & 0555) | S_IFDIR;
1143 data->perms.mode = (value & 0666) | S_IFREG;
1144 } else {
1145 goto invalid;
1146 }
1147 break;
1148
1149 case 3:
1150 if (!memcmp(opts, "uid", 3)) {
1151 data->perms.uid = make_kuid(current_user_ns(), value);
1152 if (!uid_valid(data->perms.uid)) {
1153 pr_err("%s: unmapped value: %lu\n", opts, value);
1154 return -EINVAL;
1155 }
1156 } else if (!memcmp(opts, "gid", 3)) {
1157 data->perms.gid = make_kgid(current_user_ns(), value);
1158 if (!gid_valid(data->perms.gid)) {
1159 pr_err("%s: unmapped value: %lu\n", opts, value);
1160 return -EINVAL;
1161 }
1162 } else {
1163 goto invalid;
1164 }
1165 break;
1166
1167 default:
1168 invalid:
1169 pr_err("%s: invalid option\n", opts);
1170 return -EINVAL;
1171 }
1172
1173 /* Next iteration */
1174 if (!comma)
1175 break;
1176 opts = comma + 1;
1177 }
1178
1179 return 0;
1180 }
1181
1182 /* "mount -t functionfs dev_name /dev/function" ends up here */
1183
1184 static struct dentry *
1185 ffs_fs_mount(struct file_system_type *t, int flags,
1186 const char *dev_name, void *opts)
1187 {
1188 struct ffs_sb_fill_data data = {
1189 .perms = {
1190 .mode = S_IFREG | 0600,
1191 .uid = GLOBAL_ROOT_UID,
1192 .gid = GLOBAL_ROOT_GID,
1193 },
1194 .root_mode = S_IFDIR | 0500,
1195 };
1196 struct dentry *rv;
1197 int ret;
1198 void *ffs_dev;
1199
1200 ENTER();
1201
1202 ret = ffs_fs_parse_opts(&data, opts);
1203 if (unlikely(ret < 0))
1204 return ERR_PTR(ret);
1205
1206 ffs_dev = functionfs_acquire_dev_callback(dev_name);
1207 if (IS_ERR(ffs_dev))
1208 return ffs_dev;
1209
1210 data.dev_name = dev_name;
1211 data.private_data = ffs_dev;
1212 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
1213
1214 /* data.ffs_data is set by ffs_sb_fill */
1215 if (IS_ERR(rv))
1216 functionfs_release_dev_callback(data.ffs_data);
1217
1218 return rv;
1219 }
1220
1221 static void
1222 ffs_fs_kill_sb(struct super_block *sb)
1223 {
1224 ENTER();
1225
1226 kill_litter_super(sb);
1227 if (sb->s_fs_info) {
1228 functionfs_release_dev_callback(sb->s_fs_info);
1229 ffs_data_put(sb->s_fs_info);
1230 }
1231 }
1232
1233 static struct file_system_type ffs_fs_type = {
1234 .owner = THIS_MODULE,
1235 .name = "functionfs",
1236 .mount = ffs_fs_mount,
1237 .kill_sb = ffs_fs_kill_sb,
1238 };
1239
1240
1241 /* Driver's main init/cleanup functions *************************************/
1242
1243 static int functionfs_init(void)
1244 {
1245 int ret;
1246
1247 ENTER();
1248
1249 ret = register_filesystem(&ffs_fs_type);
1250 if (likely(!ret))
1251 pr_info("file system registered\n");
1252 else
1253 pr_err("failed registering file system (%d)\n", ret);
1254
1255 return ret;
1256 }
1257
1258 static void functionfs_cleanup(void)
1259 {
1260 ENTER();
1261
1262 pr_info("unloading\n");
1263 unregister_filesystem(&ffs_fs_type);
1264 }
1265
1266
1267 /* ffs_data and ffs_function construction and destruction code **************/
1268
1269 static void ffs_data_clear(struct ffs_data *ffs);
1270 static void ffs_data_reset(struct ffs_data *ffs);
1271
1272 static void ffs_data_get(struct ffs_data *ffs)
1273 {
1274 ENTER();
1275
1276 atomic_inc(&ffs->ref);
1277 }
1278
1279 static void ffs_data_opened(struct ffs_data *ffs)
1280 {
1281 ENTER();
1282
1283 atomic_inc(&ffs->ref);
1284 atomic_inc(&ffs->opened);
1285 }
1286
1287 static void ffs_data_put(struct ffs_data *ffs)
1288 {
1289 ENTER();
1290
1291 if (unlikely(atomic_dec_and_test(&ffs->ref))) {
1292 pr_info("%s(): freeing\n", __func__);
1293 ffs_data_clear(ffs);
1294 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
1295 waitqueue_active(&ffs->ep0req_completion.wait));
1296 kfree(ffs->dev_name);
1297 kfree(ffs);
1298 }
1299 }
1300
1301 static void ffs_data_closed(struct ffs_data *ffs)
1302 {
1303 ENTER();
1304
1305 if (atomic_dec_and_test(&ffs->opened)) {
1306 ffs->state = FFS_CLOSING;
1307 ffs_data_reset(ffs);
1308 }
1309
1310 ffs_data_put(ffs);
1311 }
1312
1313 static struct ffs_data *ffs_data_new(void)
1314 {
1315 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1316 if (unlikely(!ffs))
1317 return 0;
1318
1319 ENTER();
1320
1321 atomic_set(&ffs->ref, 1);
1322 atomic_set(&ffs->opened, 0);
1323 ffs->state = FFS_READ_DESCRIPTORS;
1324 mutex_init(&ffs->mutex);
1325 spin_lock_init(&ffs->eps_lock);
1326 init_waitqueue_head(&ffs->ev.waitq);
1327 init_completion(&ffs->ep0req_completion);
1328
1329 /* XXX REVISIT need to update it in some places, or do we? */
1330 ffs->ev.can_stall = 1;
1331
1332 return ffs;
1333 }
1334
1335 static void ffs_data_clear(struct ffs_data *ffs)
1336 {
1337 ENTER();
1338
1339 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
1340 functionfs_closed_callback(ffs);
1341
1342 BUG_ON(ffs->gadget);
1343
1344 if (ffs->epfiles)
1345 ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1346
1347 kfree(ffs->raw_descs);
1348 kfree(ffs->raw_strings);
1349 kfree(ffs->stringtabs);
1350 }
1351
1352 static void ffs_data_reset(struct ffs_data *ffs)
1353 {
1354 ENTER();
1355
1356 ffs_data_clear(ffs);
1357
1358 ffs->epfiles = NULL;
1359 ffs->raw_descs = NULL;
1360 ffs->raw_strings = NULL;
1361 ffs->stringtabs = NULL;
1362
1363 ffs->raw_descs_length = 0;
1364 ffs->raw_fs_descs_length = 0;
1365 ffs->fs_descs_count = 0;
1366 ffs->hs_descs_count = 0;
1367
1368 ffs->strings_count = 0;
1369 ffs->interfaces_count = 0;
1370 ffs->eps_count = 0;
1371
1372 ffs->ev.count = 0;
1373
1374 ffs->state = FFS_READ_DESCRIPTORS;
1375 ffs->setup_state = FFS_NO_SETUP;
1376 ffs->flags = 0;
1377 }
1378
1379
1380 static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1381 {
1382 struct usb_gadget_strings **lang;
1383 int first_id;
1384
1385 ENTER();
1386
1387 if (WARN_ON(ffs->state != FFS_ACTIVE
1388 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
1389 return -EBADFD;
1390
1391 first_id = usb_string_ids_n(cdev, ffs->strings_count);
1392 if (unlikely(first_id < 0))
1393 return first_id;
1394
1395 ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
1396 if (unlikely(!ffs->ep0req))
1397 return -ENOMEM;
1398 ffs->ep0req->complete = ffs_ep0_complete;
1399 ffs->ep0req->context = ffs;
1400
1401 lang = ffs->stringtabs;
1402 for (lang = ffs->stringtabs; *lang; ++lang) {
1403 struct usb_string *str = (*lang)->strings;
1404 int id = first_id;
1405 for (; str->s; ++id, ++str)
1406 str->id = id;
1407 }
1408
1409 ffs->gadget = cdev->gadget;
1410 ffs_data_get(ffs);
1411 return 0;
1412 }
1413
1414 static void functionfs_unbind(struct ffs_data *ffs)
1415 {
1416 ENTER();
1417
1418 if (!WARN_ON(!ffs->gadget)) {
1419 usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
1420 ffs->ep0req = NULL;
1421 ffs->gadget = NULL;
1422 ffs_data_put(ffs);
1423 clear_bit(FFS_FL_BOUND, &ffs->flags);
1424 }
1425 }
1426
1427 static int ffs_epfiles_create(struct ffs_data *ffs)
1428 {
1429 struct ffs_epfile *epfile, *epfiles;
1430 unsigned i, count;
1431
1432 ENTER();
1433
1434 count = ffs->eps_count;
1435 epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
1436 if (!epfiles)
1437 return -ENOMEM;
1438
1439 epfile = epfiles;
1440 for (i = 1; i <= count; ++i, ++epfile) {
1441 epfile->ffs = ffs;
1442 mutex_init(&epfile->mutex);
1443 init_waitqueue_head(&epfile->wait);
1444 sprintf(epfiles->name, "ep%u", i);
1445 if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile,
1446 &ffs_epfile_operations,
1447 &epfile->dentry))) {
1448 ffs_epfiles_destroy(epfiles, i - 1);
1449 return -ENOMEM;
1450 }
1451 }
1452
1453 ffs->epfiles = epfiles;
1454 return 0;
1455 }
1456
1457 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1458 {
1459 struct ffs_epfile *epfile = epfiles;
1460
1461 ENTER();
1462
1463 for (; count; --count, ++epfile) {
1464 BUG_ON(mutex_is_locked(&epfile->mutex) ||
1465 waitqueue_active(&epfile->wait));
1466 if (epfile->dentry) {
1467 d_delete(epfile->dentry);
1468 dput(epfile->dentry);
1469 epfile->dentry = NULL;
1470 }
1471 }
1472
1473 kfree(epfiles);
1474 }
1475
1476 static int functionfs_bind_config(struct usb_composite_dev *cdev,
1477 struct usb_configuration *c,
1478 struct ffs_data *ffs)
1479 {
1480 struct ffs_function *func;
1481 int ret;
1482
1483 ENTER();
1484
1485 func = kzalloc(sizeof *func, GFP_KERNEL);
1486 if (unlikely(!func))
1487 return -ENOMEM;
1488
1489 func->function.name = "Function FS Gadget";
1490 func->function.strings = ffs->stringtabs;
1491
1492 func->function.bind = ffs_func_bind;
1493 func->function.unbind = ffs_func_unbind;
1494 func->function.set_alt = ffs_func_set_alt;
1495 func->function.disable = ffs_func_disable;
1496 func->function.setup = ffs_func_setup;
1497 func->function.suspend = ffs_func_suspend;
1498 func->function.resume = ffs_func_resume;
1499
1500 func->conf = c;
1501 func->gadget = cdev->gadget;
1502 func->ffs = ffs;
1503 ffs_data_get(ffs);
1504
1505 ret = usb_add_function(c, &func->function);
1506 if (unlikely(ret))
1507 ffs_func_free(func);
1508
1509 return ret;
1510 }
1511
1512 static void ffs_func_free(struct ffs_function *func)
1513 {
1514 struct ffs_ep *ep = func->eps;
1515 unsigned count = func->ffs->eps_count;
1516 unsigned long flags;
1517
1518 ENTER();
1519
1520 /* cleanup after autoconfig */
1521 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1522 do {
1523 if (ep->ep && ep->req)
1524 usb_ep_free_request(ep->ep, ep->req);
1525 ep->req = NULL;
1526 ++ep;
1527 } while (--count);
1528 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1529
1530 ffs_data_put(func->ffs);
1531
1532 kfree(func->eps);
1533 /*
1534 * eps and interfaces_nums are allocated in the same chunk so
1535 * only one free is required. Descriptors are also allocated
1536 * in the same chunk.
1537 */
1538
1539 kfree(func);
1540 }
1541
1542 static void ffs_func_eps_disable(struct ffs_function *func)
1543 {
1544 struct ffs_ep *ep = func->eps;
1545 struct ffs_epfile *epfile = func->ffs->epfiles;
1546 unsigned count = func->ffs->eps_count;
1547 unsigned long flags;
1548
1549 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1550 do {
1551 /* pending requests get nuked */
1552 if (likely(ep->ep))
1553 usb_ep_disable(ep->ep);
1554 epfile->ep = NULL;
1555
1556 ++ep;
1557 ++epfile;
1558 } while (--count);
1559 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1560 }
1561
1562 static int ffs_func_eps_enable(struct ffs_function *func)
1563 {
1564 struct ffs_data *ffs = func->ffs;
1565 struct ffs_ep *ep = func->eps;
1566 struct ffs_epfile *epfile = ffs->epfiles;
1567 unsigned count = ffs->eps_count;
1568 unsigned long flags;
1569 int ret = 0;
1570
1571 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1572 do {
1573 struct usb_endpoint_descriptor *ds;
1574 ds = ep->descs[ep->descs[1] ? 1 : 0];
1575
1576 ep->ep->driver_data = ep;
1577 ep->ep->desc = ds;
1578 ret = usb_ep_enable(ep->ep);
1579 if (likely(!ret)) {
1580 epfile->ep = ep;
1581 epfile->in = usb_endpoint_dir_in(ds);
1582 epfile->isoc = usb_endpoint_xfer_isoc(ds);
1583 } else {
1584 break;
1585 }
1586
1587 wake_up(&epfile->wait);
1588
1589 ++ep;
1590 ++epfile;
1591 } while (--count);
1592 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1593
1594 return ret;
1595 }
1596
1597
1598 /* Parsing and building descriptors and strings *****************************/
1599
1600 /*
1601 * This validates if data pointed by data is a valid USB descriptor as
1602 * well as record how many interfaces, endpoints and strings are
1603 * required by given configuration. Returns address after the
1604 * descriptor or NULL if data is invalid.
1605 */
1606
1607 enum ffs_entity_type {
1608 FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
1609 };
1610
1611 typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
1612 u8 *valuep,
1613 struct usb_descriptor_header *desc,
1614 void *priv);
1615
1616 static int __must_check ffs_do_desc(char *data, unsigned len,
1617 ffs_entity_callback entity, void *priv)
1618 {
1619 struct usb_descriptor_header *_ds = (void *)data;
1620 u8 length;
1621 int ret;
1622
1623 ENTER();
1624
1625 /* At least two bytes are required: length and type */
1626 if (len < 2) {
1627 pr_vdebug("descriptor too short\n");
1628 return -EINVAL;
1629 }
1630
1631 /* If we have at least as many bytes as the descriptor takes? */
1632 length = _ds->bLength;
1633 if (len < length) {
1634 pr_vdebug("descriptor longer then available data\n");
1635 return -EINVAL;
1636 }
1637
1638 #define __entity_check_INTERFACE(val) 1
1639 #define __entity_check_STRING(val) (val)
1640 #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
1641 #define __entity(type, val) do { \
1642 pr_vdebug("entity " #type "(%02x)\n", (val)); \
1643 if (unlikely(!__entity_check_ ##type(val))) { \
1644 pr_vdebug("invalid entity's value\n"); \
1645 return -EINVAL; \
1646 } \
1647 ret = entity(FFS_ ##type, &val, _ds, priv); \
1648 if (unlikely(ret < 0)) { \
1649 pr_debug("entity " #type "(%02x); ret = %d\n", \
1650 (val), ret); \
1651 return ret; \
1652 } \
1653 } while (0)
1654
1655 /* Parse descriptor depending on type. */
1656 switch (_ds->bDescriptorType) {
1657 case USB_DT_DEVICE:
1658 case USB_DT_CONFIG:
1659 case USB_DT_STRING:
1660 case USB_DT_DEVICE_QUALIFIER:
1661 /* function can't have any of those */
1662 pr_vdebug("descriptor reserved for gadget: %d\n",
1663 _ds->bDescriptorType);
1664 return -EINVAL;
1665
1666 case USB_DT_INTERFACE: {
1667 struct usb_interface_descriptor *ds = (void *)_ds;
1668 pr_vdebug("interface descriptor\n");
1669 if (length != sizeof *ds)
1670 goto inv_length;
1671
1672 __entity(INTERFACE, ds->bInterfaceNumber);
1673 if (ds->iInterface)
1674 __entity(STRING, ds->iInterface);
1675 }
1676 break;
1677
1678 case USB_DT_ENDPOINT: {
1679 struct usb_endpoint_descriptor *ds = (void *)_ds;
1680 pr_vdebug("endpoint descriptor\n");
1681 if (length != USB_DT_ENDPOINT_SIZE &&
1682 length != USB_DT_ENDPOINT_AUDIO_SIZE)
1683 goto inv_length;
1684 __entity(ENDPOINT, ds->bEndpointAddress);
1685 }
1686 break;
1687
1688 case HID_DT_HID:
1689 pr_vdebug("hid descriptor\n");
1690 if (length != sizeof(struct hid_descriptor))
1691 goto inv_length;
1692 break;
1693
1694 case USB_DT_OTG:
1695 if (length != sizeof(struct usb_otg_descriptor))
1696 goto inv_length;
1697 break;
1698
1699 case USB_DT_INTERFACE_ASSOCIATION: {
1700 struct usb_interface_assoc_descriptor *ds = (void *)_ds;
1701 pr_vdebug("interface association descriptor\n");
1702 if (length != sizeof *ds)
1703 goto inv_length;
1704 if (ds->iFunction)
1705 __entity(STRING, ds->iFunction);
1706 }
1707 break;
1708
1709 case USB_DT_OTHER_SPEED_CONFIG:
1710 case USB_DT_INTERFACE_POWER:
1711 case USB_DT_DEBUG:
1712 case USB_DT_SECURITY:
1713 case USB_DT_CS_RADIO_CONTROL:
1714 /* TODO */
1715 pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
1716 return -EINVAL;
1717
1718 default:
1719 /* We should never be here */
1720 pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
1721 return -EINVAL;
1722
1723 inv_length:
1724 pr_vdebug("invalid length: %d (descriptor %d)\n",
1725 _ds->bLength, _ds->bDescriptorType);
1726 return -EINVAL;
1727 }
1728
1729 #undef __entity
1730 #undef __entity_check_DESCRIPTOR
1731 #undef __entity_check_INTERFACE
1732 #undef __entity_check_STRING
1733 #undef __entity_check_ENDPOINT
1734
1735 return length;
1736 }
1737
1738 static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1739 ffs_entity_callback entity, void *priv)
1740 {
1741 const unsigned _len = len;
1742 unsigned long num = 0;
1743
1744 ENTER();
1745
1746 for (;;) {
1747 int ret;
1748
1749 if (num == count)
1750 data = NULL;
1751
1752 /* Record "descriptor" entity */
1753 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
1754 if (unlikely(ret < 0)) {
1755 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
1756 num, ret);
1757 return ret;
1758 }
1759
1760 if (!data)
1761 return _len - len;
1762
1763 ret = ffs_do_desc(data, len, entity, priv);
1764 if (unlikely(ret < 0)) {
1765 pr_debug("%s returns %d\n", __func__, ret);
1766 return ret;
1767 }
1768
1769 len -= ret;
1770 data += ret;
1771 ++num;
1772 }
1773 }
1774
1775 static int __ffs_data_do_entity(enum ffs_entity_type type,
1776 u8 *valuep, struct usb_descriptor_header *desc,
1777 void *priv)
1778 {
1779 struct ffs_data *ffs = priv;
1780
1781 ENTER();
1782
1783 switch (type) {
1784 case FFS_DESCRIPTOR:
1785 break;
1786
1787 case FFS_INTERFACE:
1788 /*
1789 * Interfaces are indexed from zero so if we
1790 * encountered interface "n" then there are at least
1791 * "n+1" interfaces.
1792 */
1793 if (*valuep >= ffs->interfaces_count)
1794 ffs->interfaces_count = *valuep + 1;
1795 break;
1796
1797 case FFS_STRING:
1798 /*
1799 * Strings are indexed from 1 (0 is magic ;) reserved
1800 * for languages list or some such)
1801 */
1802 if (*valuep > ffs->strings_count)
1803 ffs->strings_count = *valuep;
1804 break;
1805
1806 case FFS_ENDPOINT:
1807 /* Endpoints are indexed from 1 as well. */
1808 if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
1809 ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
1810 break;
1811 }
1812
1813 return 0;
1814 }
1815
1816 static int __ffs_data_got_descs(struct ffs_data *ffs,
1817 char *const _data, size_t len)
1818 {
1819 unsigned fs_count, hs_count;
1820 int fs_len, ret = -EINVAL;
1821 char *data = _data;
1822
1823 ENTER();
1824
1825 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_DESCRIPTORS_MAGIC ||
1826 get_unaligned_le32(data + 4) != len))
1827 goto error;
1828 fs_count = get_unaligned_le32(data + 8);
1829 hs_count = get_unaligned_le32(data + 12);
1830
1831 if (!fs_count && !hs_count)
1832 goto einval;
1833
1834 data += 16;
1835 len -= 16;
1836
1837 if (likely(fs_count)) {
1838 fs_len = ffs_do_descs(fs_count, data, len,
1839 __ffs_data_do_entity, ffs);
1840 if (unlikely(fs_len < 0)) {
1841 ret = fs_len;
1842 goto error;
1843 }
1844
1845 data += fs_len;
1846 len -= fs_len;
1847 } else {
1848 fs_len = 0;
1849 }
1850
1851 if (likely(hs_count)) {
1852 ret = ffs_do_descs(hs_count, data, len,
1853 __ffs_data_do_entity, ffs);
1854 if (unlikely(ret < 0))
1855 goto error;
1856 } else {
1857 ret = 0;
1858 }
1859
1860 if (unlikely(len != ret))
1861 goto einval;
1862
1863 ffs->raw_fs_descs_length = fs_len;
1864 ffs->raw_descs_length = fs_len + ret;
1865 ffs->raw_descs = _data;
1866 ffs->fs_descs_count = fs_count;
1867 ffs->hs_descs_count = hs_count;
1868
1869 return 0;
1870
1871 einval:
1872 ret = -EINVAL;
1873 error:
1874 kfree(_data);
1875 return ret;
1876 }
1877
1878 static int __ffs_data_got_strings(struct ffs_data *ffs,
1879 char *const _data, size_t len)
1880 {
1881 u32 str_count, needed_count, lang_count;
1882 struct usb_gadget_strings **stringtabs, *t;
1883 struct usb_string *strings, *s;
1884 const char *data = _data;
1885
1886 ENTER();
1887
1888 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
1889 get_unaligned_le32(data + 4) != len))
1890 goto error;
1891 str_count = get_unaligned_le32(data + 8);
1892 lang_count = get_unaligned_le32(data + 12);
1893
1894 /* if one is zero the other must be zero */
1895 if (unlikely(!str_count != !lang_count))
1896 goto error;
1897
1898 /* Do we have at least as many strings as descriptors need? */
1899 needed_count = ffs->strings_count;
1900 if (unlikely(str_count < needed_count))
1901 goto error;
1902
1903 /*
1904 * If we don't need any strings just return and free all
1905 * memory.
1906 */
1907 if (!needed_count) {
1908 kfree(_data);
1909 return 0;
1910 }
1911
1912 /* Allocate everything in one chunk so there's less maintenance. */
1913 {
1914 struct {
1915 struct usb_gadget_strings *stringtabs[lang_count + 1];
1916 struct usb_gadget_strings stringtab[lang_count];
1917 struct usb_string strings[lang_count*(needed_count+1)];
1918 } *d;
1919 unsigned i = 0;
1920
1921 d = kmalloc(sizeof *d, GFP_KERNEL);
1922 if (unlikely(!d)) {
1923 kfree(_data);
1924 return -ENOMEM;
1925 }
1926
1927 stringtabs = d->stringtabs;
1928 t = d->stringtab;
1929 i = lang_count;
1930 do {
1931 *stringtabs++ = t++;
1932 } while (--i);
1933 *stringtabs = NULL;
1934
1935 stringtabs = d->stringtabs;
1936 t = d->stringtab;
1937 s = d->strings;
1938 strings = s;
1939 }
1940
1941 /* For each language */
1942 data += 16;
1943 len -= 16;
1944
1945 do { /* lang_count > 0 so we can use do-while */
1946 unsigned needed = needed_count;
1947
1948 if (unlikely(len < 3))
1949 goto error_free;
1950 t->language = get_unaligned_le16(data);
1951 t->strings = s;
1952 ++t;
1953
1954 data += 2;
1955 len -= 2;
1956
1957 /* For each string */
1958 do { /* str_count > 0 so we can use do-while */
1959 size_t length = strnlen(data, len);
1960
1961 if (unlikely(length == len))
1962 goto error_free;
1963
1964 /*
1965 * User may provide more strings then we need,
1966 * if that's the case we simply ignore the
1967 * rest
1968 */
1969 if (likely(needed)) {
1970 /*
1971 * s->id will be set while adding
1972 * function to configuration so for
1973 * now just leave garbage here.
1974 */
1975 s->s = data;
1976 --needed;
1977 ++s;
1978 }
1979
1980 data += length + 1;
1981 len -= length + 1;
1982 } while (--str_count);
1983
1984 s->id = 0; /* terminator */
1985 s->s = NULL;
1986 ++s;
1987
1988 } while (--lang_count);
1989
1990 /* Some garbage left? */
1991 if (unlikely(len))
1992 goto error_free;
1993
1994 /* Done! */
1995 ffs->stringtabs = stringtabs;
1996 ffs->raw_strings = _data;
1997
1998 return 0;
1999
2000 error_free:
2001 kfree(stringtabs);
2002 error:
2003 kfree(_data);
2004 return -EINVAL;
2005 }
2006
2007
2008 /* Events handling and management *******************************************/
2009
2010 static void __ffs_event_add(struct ffs_data *ffs,
2011 enum usb_functionfs_event_type type)
2012 {
2013 enum usb_functionfs_event_type rem_type1, rem_type2 = type;
2014 int neg = 0;
2015
2016 /*
2017 * Abort any unhandled setup
2018 *
2019 * We do not need to worry about some cmpxchg() changing value
2020 * of ffs->setup_state without holding the lock because when
2021 * state is FFS_SETUP_PENDING cmpxchg() in several places in
2022 * the source does nothing.
2023 */
2024 if (ffs->setup_state == FFS_SETUP_PENDING)
2025 ffs->setup_state = FFS_SETUP_CANCELED;
2026
2027 switch (type) {
2028 case FUNCTIONFS_RESUME:
2029 rem_type2 = FUNCTIONFS_SUSPEND;
2030 /* FALL THROUGH */
2031 case FUNCTIONFS_SUSPEND:
2032 case FUNCTIONFS_SETUP:
2033 rem_type1 = type;
2034 /* Discard all similar events */
2035 break;
2036
2037 case FUNCTIONFS_BIND:
2038 case FUNCTIONFS_UNBIND:
2039 case FUNCTIONFS_DISABLE:
2040 case FUNCTIONFS_ENABLE:
2041 /* Discard everything other then power management. */
2042 rem_type1 = FUNCTIONFS_SUSPEND;
2043 rem_type2 = FUNCTIONFS_RESUME;
2044 neg = 1;
2045 break;
2046
2047 default:
2048 BUG();
2049 }
2050
2051 {
2052 u8 *ev = ffs->ev.types, *out = ev;
2053 unsigned n = ffs->ev.count;
2054 for (; n; --n, ++ev)
2055 if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2056 *out++ = *ev;
2057 else
2058 pr_vdebug("purging event %d\n", *ev);
2059 ffs->ev.count = out - ffs->ev.types;
2060 }
2061
2062 pr_vdebug("adding event %d\n", type);
2063 ffs->ev.types[ffs->ev.count++] = type;
2064 wake_up_locked(&ffs->ev.waitq);
2065 }
2066
2067 static void ffs_event_add(struct ffs_data *ffs,
2068 enum usb_functionfs_event_type type)
2069 {
2070 unsigned long flags;
2071 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2072 __ffs_event_add(ffs, type);
2073 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2074 }
2075
2076
2077 /* Bind/unbind USB function hooks *******************************************/
2078
2079 static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2080 struct usb_descriptor_header *desc,
2081 void *priv)
2082 {
2083 struct usb_endpoint_descriptor *ds = (void *)desc;
2084 struct ffs_function *func = priv;
2085 struct ffs_ep *ffs_ep;
2086
2087 /*
2088 * If hs_descriptors is not NULL then we are reading hs
2089 * descriptors now
2090 */
2091 const int isHS = func->function.hs_descriptors != NULL;
2092 unsigned idx;
2093
2094 if (type != FFS_DESCRIPTOR)
2095 return 0;
2096
2097 if (isHS)
2098 func->function.hs_descriptors[(long)valuep] = desc;
2099 else
2100 func->function.fs_descriptors[(long)valuep] = desc;
2101
2102 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
2103 return 0;
2104
2105 idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
2106 ffs_ep = func->eps + idx;
2107
2108 if (unlikely(ffs_ep->descs[isHS])) {
2109 pr_vdebug("two %sspeed descriptors for EP %d\n",
2110 isHS ? "high" : "full",
2111 ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2112 return -EINVAL;
2113 }
2114 ffs_ep->descs[isHS] = ds;
2115
2116 ffs_dump_mem(": Original ep desc", ds, ds->bLength);
2117 if (ffs_ep->ep) {
2118 ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
2119 if (!ds->wMaxPacketSize)
2120 ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
2121 } else {
2122 struct usb_request *req;
2123 struct usb_ep *ep;
2124
2125 pr_vdebug("autoconfig\n");
2126 ep = usb_ep_autoconfig(func->gadget, ds);
2127 if (unlikely(!ep))
2128 return -ENOTSUPP;
2129 ep->driver_data = func->eps + idx;
2130
2131 req = usb_ep_alloc_request(ep, GFP_KERNEL);
2132 if (unlikely(!req))
2133 return -ENOMEM;
2134
2135 ffs_ep->ep = ep;
2136 ffs_ep->req = req;
2137 func->eps_revmap[ds->bEndpointAddress &
2138 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
2139 }
2140 ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
2141
2142 return 0;
2143 }
2144
2145 static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2146 struct usb_descriptor_header *desc,
2147 void *priv)
2148 {
2149 struct ffs_function *func = priv;
2150 unsigned idx;
2151 u8 newValue;
2152
2153 switch (type) {
2154 default:
2155 case FFS_DESCRIPTOR:
2156 /* Handled in previous pass by __ffs_func_bind_do_descs() */
2157 return 0;
2158
2159 case FFS_INTERFACE:
2160 idx = *valuep;
2161 if (func->interfaces_nums[idx] < 0) {
2162 int id = usb_interface_id(func->conf, &func->function);
2163 if (unlikely(id < 0))
2164 return id;
2165 func->interfaces_nums[idx] = id;
2166 }
2167 newValue = func->interfaces_nums[idx];
2168 break;
2169
2170 case FFS_STRING:
2171 /* String' IDs are allocated when fsf_data is bound to cdev */
2172 newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
2173 break;
2174
2175 case FFS_ENDPOINT:
2176 /*
2177 * USB_DT_ENDPOINT are handled in
2178 * __ffs_func_bind_do_descs().
2179 */
2180 if (desc->bDescriptorType == USB_DT_ENDPOINT)
2181 return 0;
2182
2183 idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
2184 if (unlikely(!func->eps[idx].ep))
2185 return -EINVAL;
2186
2187 {
2188 struct usb_endpoint_descriptor **descs;
2189 descs = func->eps[idx].descs;
2190 newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
2191 }
2192 break;
2193 }
2194
2195 pr_vdebug("%02x -> %02x\n", *valuep, newValue);
2196 *valuep = newValue;
2197 return 0;
2198 }
2199
2200 static int ffs_func_bind(struct usb_configuration *c,
2201 struct usb_function *f)
2202 {
2203 struct ffs_function *func = ffs_func_from_usb(f);
2204 struct ffs_data *ffs = func->ffs;
2205
2206 const int full = !!func->ffs->fs_descs_count;
2207 const int high = gadget_is_dualspeed(func->gadget) &&
2208 func->ffs->hs_descs_count;
2209
2210 int ret;
2211
2212 /* Make it a single chunk, less management later on */
2213 struct {
2214 struct ffs_ep eps[ffs->eps_count];
2215 struct usb_descriptor_header
2216 *fs_descs[full ? ffs->fs_descs_count + 1 : 0];
2217 struct usb_descriptor_header
2218 *hs_descs[high ? ffs->hs_descs_count + 1 : 0];
2219 short inums[ffs->interfaces_count];
2220 char raw_descs[high ? ffs->raw_descs_length
2221 : ffs->raw_fs_descs_length];
2222 } *data;
2223
2224 ENTER();
2225
2226 /* Only high speed but not supported by gadget? */
2227 if (unlikely(!(full | high)))
2228 return -ENOTSUPP;
2229
2230 /* Allocate */
2231 data = kmalloc(sizeof *data, GFP_KERNEL);
2232 if (unlikely(!data))
2233 return -ENOMEM;
2234
2235 /* Zero */
2236 memset(data->eps, 0, sizeof data->eps);
2237 memcpy(data->raw_descs, ffs->raw_descs + 16, sizeof data->raw_descs);
2238 memset(data->inums, 0xff, sizeof data->inums);
2239 for (ret = ffs->eps_count; ret; --ret)
2240 data->eps[ret].num = -1;
2241
2242 /* Save pointers */
2243 func->eps = data->eps;
2244 func->interfaces_nums = data->inums;
2245
2246 /*
2247 * Go through all the endpoint descriptors and allocate
2248 * endpoints first, so that later we can rewrite the endpoint
2249 * numbers without worrying that it may be described later on.
2250 */
2251 if (likely(full)) {
2252 func->function.fs_descriptors = data->fs_descs;
2253 ret = ffs_do_descs(ffs->fs_descs_count,
2254 data->raw_descs,
2255 sizeof data->raw_descs,
2256 __ffs_func_bind_do_descs, func);
2257 if (unlikely(ret < 0))
2258 goto error;
2259 } else {
2260 ret = 0;
2261 }
2262
2263 if (likely(high)) {
2264 func->function.hs_descriptors = data->hs_descs;
2265 ret = ffs_do_descs(ffs->hs_descs_count,
2266 data->raw_descs + ret,
2267 (sizeof data->raw_descs) - ret,
2268 __ffs_func_bind_do_descs, func);
2269 }
2270
2271 /*
2272 * Now handle interface numbers allocation and interface and
2273 * endpoint numbers rewriting. We can do that in one go
2274 * now.
2275 */
2276 ret = ffs_do_descs(ffs->fs_descs_count +
2277 (high ? ffs->hs_descs_count : 0),
2278 data->raw_descs, sizeof data->raw_descs,
2279 __ffs_func_bind_do_nums, func);
2280 if (unlikely(ret < 0))
2281 goto error;
2282
2283 /* And we're done */
2284 ffs_event_add(ffs, FUNCTIONFS_BIND);
2285 return 0;
2286
2287 error:
2288 /* XXX Do we need to release all claimed endpoints here? */
2289 return ret;
2290 }
2291
2292
2293 /* Other USB function hooks *************************************************/
2294
2295 static void ffs_func_unbind(struct usb_configuration *c,
2296 struct usb_function *f)
2297 {
2298 struct ffs_function *func = ffs_func_from_usb(f);
2299 struct ffs_data *ffs = func->ffs;
2300
2301 ENTER();
2302
2303 if (ffs->func == func) {
2304 ffs_func_eps_disable(func);
2305 ffs->func = NULL;
2306 }
2307
2308 ffs_event_add(ffs, FUNCTIONFS_UNBIND);
2309
2310 ffs_func_free(func);
2311 }
2312
2313 static int ffs_func_set_alt(struct usb_function *f,
2314 unsigned interface, unsigned alt)
2315 {
2316 struct ffs_function *func = ffs_func_from_usb(f);
2317 struct ffs_data *ffs = func->ffs;
2318 int ret = 0, intf;
2319
2320 if (alt != (unsigned)-1) {
2321 intf = ffs_func_revmap_intf(func, interface);
2322 if (unlikely(intf < 0))
2323 return intf;
2324 }
2325
2326 if (ffs->func)
2327 ffs_func_eps_disable(ffs->func);
2328
2329 if (ffs->state != FFS_ACTIVE)
2330 return -ENODEV;
2331
2332 if (alt == (unsigned)-1) {
2333 ffs->func = NULL;
2334 ffs_event_add(ffs, FUNCTIONFS_DISABLE);
2335 return 0;
2336 }
2337
2338 ffs->func = func;
2339 ret = ffs_func_eps_enable(func);
2340 if (likely(ret >= 0))
2341 ffs_event_add(ffs, FUNCTIONFS_ENABLE);
2342 return ret;
2343 }
2344
2345 static void ffs_func_disable(struct usb_function *f)
2346 {
2347 ffs_func_set_alt(f, 0, (unsigned)-1);
2348 }
2349
2350 static int ffs_func_setup(struct usb_function *f,
2351 const struct usb_ctrlrequest *creq)
2352 {
2353 struct ffs_function *func = ffs_func_from_usb(f);
2354 struct ffs_data *ffs = func->ffs;
2355 unsigned long flags;
2356 int ret;
2357
2358 ENTER();
2359
2360 pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
2361 pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
2362 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
2363 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
2364 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
2365
2366 /*
2367 * Most requests directed to interface go through here
2368 * (notable exceptions are set/get interface) so we need to
2369 * handle them. All other either handled by composite or
2370 * passed to usb_configuration->setup() (if one is set). No
2371 * matter, we will handle requests directed to endpoint here
2372 * as well (as it's straightforward) but what to do with any
2373 * other request?
2374 */
2375 if (ffs->state != FFS_ACTIVE)
2376 return -ENODEV;
2377
2378 switch (creq->bRequestType & USB_RECIP_MASK) {
2379 case USB_RECIP_INTERFACE:
2380 ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
2381 if (unlikely(ret < 0))
2382 return ret;
2383 break;
2384
2385 case USB_RECIP_ENDPOINT:
2386 ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
2387 if (unlikely(ret < 0))
2388 return ret;
2389 break;
2390
2391 default:
2392 return -EOPNOTSUPP;
2393 }
2394
2395 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2396 ffs->ev.setup = *creq;
2397 ffs->ev.setup.wIndex = cpu_to_le16(ret);
2398 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
2399 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2400
2401 return 0;
2402 }
2403
2404 static void ffs_func_suspend(struct usb_function *f)
2405 {
2406 ENTER();
2407 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
2408 }
2409
2410 static void ffs_func_resume(struct usb_function *f)
2411 {
2412 ENTER();
2413 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
2414 }
2415
2416
2417 /* Endpoint and interface numbers reverse mapping ***************************/
2418
2419 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
2420 {
2421 num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
2422 return num ? num : -EDOM;
2423 }
2424
2425 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
2426 {
2427 short *nums = func->interfaces_nums;
2428 unsigned count = func->ffs->interfaces_count;
2429
2430 for (; count; --count, ++nums) {
2431 if (*nums >= 0 && *nums == intf)
2432 return nums - func->interfaces_nums;
2433 }
2434
2435 return -EDOM;
2436 }
2437
2438
2439 /* Misc helper functions ****************************************************/
2440
2441 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
2442 {
2443 return nonblock
2444 ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
2445 : mutex_lock_interruptible(mutex);
2446 }
2447
2448 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
2449 {
2450 char *data;
2451
2452 if (unlikely(!len))
2453 return NULL;
2454
2455 data = kmalloc(len, GFP_KERNEL);
2456 if (unlikely(!data))
2457 return ERR_PTR(-ENOMEM);
2458
2459 if (unlikely(__copy_from_user(data, buf, len))) {
2460 kfree(data);
2461 return ERR_PTR(-EFAULT);
2462 }
2463
2464 pr_vdebug("Buffer from user space:\n");
2465 ffs_dump_mem("", data, len);
2466
2467 return data;
2468 }