]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/usb/gadget/function/f_fs.c
fs: move struct kiocb to fs.h
[mirror_ubuntu-jammy-kernel.git] / drivers / usb / gadget / function / f_fs.c
1 /*
2 * f_fs.c -- user mode file system API for USB composite function controllers
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 * Author: Michal Nazarewicz <mina86@mina86.com>
6 *
7 * Based on inode.c (GadgetFS) which was:
8 * Copyright (C) 2003-2004 David Brownell
9 * Copyright (C) 2003 Agilent Technologies
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17
18 /* #define DEBUG */
19 /* #define VERBOSE_DEBUG */
20
21 #include <linux/blkdev.h>
22 #include <linux/pagemap.h>
23 #include <linux/export.h>
24 #include <linux/hid.h>
25 #include <linux/module.h>
26 #include <linux/uio.h>
27 #include <asm/unaligned.h>
28
29 #include <linux/usb/composite.h>
30 #include <linux/usb/functionfs.h>
31
32 #include <linux/aio.h>
33 #include <linux/mmu_context.h>
34 #include <linux/poll.h>
35 #include <linux/eventfd.h>
36
37 #include "u_fs.h"
38 #include "u_f.h"
39 #include "u_os_desc.h"
40 #include "configfs.h"
41
42 #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
43
44 /* Reference counter handling */
45 static void ffs_data_get(struct ffs_data *ffs);
46 static void ffs_data_put(struct ffs_data *ffs);
47 /* Creates new ffs_data object. */
48 static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
49
50 /* Opened counter handling. */
51 static void ffs_data_opened(struct ffs_data *ffs);
52 static void ffs_data_closed(struct ffs_data *ffs);
53
54 /* Called with ffs->mutex held; take over ownership of data. */
55 static int __must_check
56 __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
57 static int __must_check
58 __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
59
60
61 /* The function structure ***************************************************/
62
63 struct ffs_ep;
64
65 struct ffs_function {
66 struct usb_configuration *conf;
67 struct usb_gadget *gadget;
68 struct ffs_data *ffs;
69
70 struct ffs_ep *eps;
71 u8 eps_revmap[16];
72 short *interfaces_nums;
73
74 struct usb_function function;
75 };
76
77
78 static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
79 {
80 return container_of(f, struct ffs_function, function);
81 }
82
83
84 static inline enum ffs_setup_state
85 ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
86 {
87 return (enum ffs_setup_state)
88 cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
89 }
90
91
92 static void ffs_func_eps_disable(struct ffs_function *func);
93 static int __must_check ffs_func_eps_enable(struct ffs_function *func);
94
95 static int ffs_func_bind(struct usb_configuration *,
96 struct usb_function *);
97 static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
98 static void ffs_func_disable(struct usb_function *);
99 static int ffs_func_setup(struct usb_function *,
100 const struct usb_ctrlrequest *);
101 static void ffs_func_suspend(struct usb_function *);
102 static void ffs_func_resume(struct usb_function *);
103
104
105 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
106 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
107
108
109 /* The endpoints structures *************************************************/
110
111 struct ffs_ep {
112 struct usb_ep *ep; /* P: ffs->eps_lock */
113 struct usb_request *req; /* P: epfile->mutex */
114
115 /* [0]: full speed, [1]: high speed, [2]: super speed */
116 struct usb_endpoint_descriptor *descs[3];
117
118 u8 num;
119
120 int status; /* P: epfile->mutex */
121 };
122
123 struct ffs_epfile {
124 /* Protects ep->ep and ep->req. */
125 struct mutex mutex;
126 wait_queue_head_t wait;
127
128 struct ffs_data *ffs;
129 struct ffs_ep *ep; /* P: ffs->eps_lock */
130
131 struct dentry *dentry;
132
133 char name[5];
134
135 unsigned char in; /* P: ffs->eps_lock */
136 unsigned char isoc; /* P: ffs->eps_lock */
137
138 unsigned char _pad;
139 };
140
141 /* ffs_io_data structure ***************************************************/
142
143 struct ffs_io_data {
144 bool aio;
145 bool read;
146
147 struct kiocb *kiocb;
148 struct iov_iter data;
149 const void *to_free;
150 char *buf;
151
152 struct mm_struct *mm;
153 struct work_struct work;
154
155 struct usb_ep *ep;
156 struct usb_request *req;
157
158 struct ffs_data *ffs;
159 };
160
161 struct ffs_desc_helper {
162 struct ffs_data *ffs;
163 unsigned interfaces_count;
164 unsigned eps_count;
165 };
166
167 static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
168 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
169
170 static struct dentry *
171 ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
172 const struct file_operations *fops);
173
174 /* Devices management *******************************************************/
175
176 DEFINE_MUTEX(ffs_lock);
177 EXPORT_SYMBOL_GPL(ffs_lock);
178
179 static struct ffs_dev *_ffs_find_dev(const char *name);
180 static struct ffs_dev *_ffs_alloc_dev(void);
181 static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
182 static void _ffs_free_dev(struct ffs_dev *dev);
183 static void *ffs_acquire_dev(const char *dev_name);
184 static void ffs_release_dev(struct ffs_data *ffs_data);
185 static int ffs_ready(struct ffs_data *ffs);
186 static void ffs_closed(struct ffs_data *ffs);
187
188 /* Misc helper functions ****************************************************/
189
190 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
191 __attribute__((warn_unused_result, nonnull));
192 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
193 __attribute__((warn_unused_result, nonnull));
194
195
196 /* Control file aka ep0 *****************************************************/
197
198 static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
199 {
200 struct ffs_data *ffs = req->context;
201
202 complete_all(&ffs->ep0req_completion);
203 }
204
205 static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
206 {
207 struct usb_request *req = ffs->ep0req;
208 int ret;
209
210 req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
211
212 spin_unlock_irq(&ffs->ev.waitq.lock);
213
214 req->buf = data;
215 req->length = len;
216
217 /*
218 * UDC layer requires to provide a buffer even for ZLP, but should
219 * not use it at all. Let's provide some poisoned pointer to catch
220 * possible bug in the driver.
221 */
222 if (req->buf == NULL)
223 req->buf = (void *)0xDEADBABE;
224
225 reinit_completion(&ffs->ep0req_completion);
226
227 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
228 if (unlikely(ret < 0))
229 return ret;
230
231 ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
232 if (unlikely(ret)) {
233 usb_ep_dequeue(ffs->gadget->ep0, req);
234 return -EINTR;
235 }
236
237 ffs->setup_state = FFS_NO_SETUP;
238 return req->status ? req->status : req->actual;
239 }
240
241 static int __ffs_ep0_stall(struct ffs_data *ffs)
242 {
243 if (ffs->ev.can_stall) {
244 pr_vdebug("ep0 stall\n");
245 usb_ep_set_halt(ffs->gadget->ep0);
246 ffs->setup_state = FFS_NO_SETUP;
247 return -EL2HLT;
248 } else {
249 pr_debug("bogus ep0 stall!\n");
250 return -ESRCH;
251 }
252 }
253
254 static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
255 size_t len, loff_t *ptr)
256 {
257 struct ffs_data *ffs = file->private_data;
258 ssize_t ret;
259 char *data;
260
261 ENTER();
262
263 /* Fast check if setup was canceled */
264 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
265 return -EIDRM;
266
267 /* Acquire mutex */
268 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
269 if (unlikely(ret < 0))
270 return ret;
271
272 /* Check state */
273 switch (ffs->state) {
274 case FFS_READ_DESCRIPTORS:
275 case FFS_READ_STRINGS:
276 /* Copy data */
277 if (unlikely(len < 16)) {
278 ret = -EINVAL;
279 break;
280 }
281
282 data = ffs_prepare_buffer(buf, len);
283 if (IS_ERR(data)) {
284 ret = PTR_ERR(data);
285 break;
286 }
287
288 /* Handle data */
289 if (ffs->state == FFS_READ_DESCRIPTORS) {
290 pr_info("read descriptors\n");
291 ret = __ffs_data_got_descs(ffs, data, len);
292 if (unlikely(ret < 0))
293 break;
294
295 ffs->state = FFS_READ_STRINGS;
296 ret = len;
297 } else {
298 pr_info("read strings\n");
299 ret = __ffs_data_got_strings(ffs, data, len);
300 if (unlikely(ret < 0))
301 break;
302
303 ret = ffs_epfiles_create(ffs);
304 if (unlikely(ret)) {
305 ffs->state = FFS_CLOSING;
306 break;
307 }
308
309 ffs->state = FFS_ACTIVE;
310 mutex_unlock(&ffs->mutex);
311
312 ret = ffs_ready(ffs);
313 if (unlikely(ret < 0)) {
314 ffs->state = FFS_CLOSING;
315 return ret;
316 }
317
318 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
319 return len;
320 }
321 break;
322
323 case FFS_ACTIVE:
324 data = NULL;
325 /*
326 * We're called from user space, we can use _irq
327 * rather then _irqsave
328 */
329 spin_lock_irq(&ffs->ev.waitq.lock);
330 switch (ffs_setup_state_clear_cancelled(ffs)) {
331 case FFS_SETUP_CANCELLED:
332 ret = -EIDRM;
333 goto done_spin;
334
335 case FFS_NO_SETUP:
336 ret = -ESRCH;
337 goto done_spin;
338
339 case FFS_SETUP_PENDING:
340 break;
341 }
342
343 /* FFS_SETUP_PENDING */
344 if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
345 spin_unlock_irq(&ffs->ev.waitq.lock);
346 ret = __ffs_ep0_stall(ffs);
347 break;
348 }
349
350 /* FFS_SETUP_PENDING and not stall */
351 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
352
353 spin_unlock_irq(&ffs->ev.waitq.lock);
354
355 data = ffs_prepare_buffer(buf, len);
356 if (IS_ERR(data)) {
357 ret = PTR_ERR(data);
358 break;
359 }
360
361 spin_lock_irq(&ffs->ev.waitq.lock);
362
363 /*
364 * We are guaranteed to be still in FFS_ACTIVE state
365 * but the state of setup could have changed from
366 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
367 * to check for that. If that happened we copied data
368 * from user space in vain but it's unlikely.
369 *
370 * For sure we are not in FFS_NO_SETUP since this is
371 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
372 * transition can be performed and it's protected by
373 * mutex.
374 */
375 if (ffs_setup_state_clear_cancelled(ffs) ==
376 FFS_SETUP_CANCELLED) {
377 ret = -EIDRM;
378 done_spin:
379 spin_unlock_irq(&ffs->ev.waitq.lock);
380 } else {
381 /* unlocks spinlock */
382 ret = __ffs_ep0_queue_wait(ffs, data, len);
383 }
384 kfree(data);
385 break;
386
387 default:
388 ret = -EBADFD;
389 break;
390 }
391
392 mutex_unlock(&ffs->mutex);
393 return ret;
394 }
395
396 /* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
397 static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
398 size_t n)
399 {
400 /*
401 * n cannot be bigger than ffs->ev.count, which cannot be bigger than
402 * size of ffs->ev.types array (which is four) so that's how much space
403 * we reserve.
404 */
405 struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
406 const size_t size = n * sizeof *events;
407 unsigned i = 0;
408
409 memset(events, 0, size);
410
411 do {
412 events[i].type = ffs->ev.types[i];
413 if (events[i].type == FUNCTIONFS_SETUP) {
414 events[i].u.setup = ffs->ev.setup;
415 ffs->setup_state = FFS_SETUP_PENDING;
416 }
417 } while (++i < n);
418
419 ffs->ev.count -= n;
420 if (ffs->ev.count)
421 memmove(ffs->ev.types, ffs->ev.types + n,
422 ffs->ev.count * sizeof *ffs->ev.types);
423
424 spin_unlock_irq(&ffs->ev.waitq.lock);
425 mutex_unlock(&ffs->mutex);
426
427 return unlikely(__copy_to_user(buf, events, size)) ? -EFAULT : size;
428 }
429
430 static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
431 size_t len, loff_t *ptr)
432 {
433 struct ffs_data *ffs = file->private_data;
434 char *data = NULL;
435 size_t n;
436 int ret;
437
438 ENTER();
439
440 /* Fast check if setup was canceled */
441 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
442 return -EIDRM;
443
444 /* Acquire mutex */
445 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
446 if (unlikely(ret < 0))
447 return ret;
448
449 /* Check state */
450 if (ffs->state != FFS_ACTIVE) {
451 ret = -EBADFD;
452 goto done_mutex;
453 }
454
455 /*
456 * We're called from user space, we can use _irq rather then
457 * _irqsave
458 */
459 spin_lock_irq(&ffs->ev.waitq.lock);
460
461 switch (ffs_setup_state_clear_cancelled(ffs)) {
462 case FFS_SETUP_CANCELLED:
463 ret = -EIDRM;
464 break;
465
466 case FFS_NO_SETUP:
467 n = len / sizeof(struct usb_functionfs_event);
468 if (unlikely(!n)) {
469 ret = -EINVAL;
470 break;
471 }
472
473 if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
474 ret = -EAGAIN;
475 break;
476 }
477
478 if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
479 ffs->ev.count)) {
480 ret = -EINTR;
481 break;
482 }
483
484 return __ffs_ep0_read_events(ffs, buf,
485 min(n, (size_t)ffs->ev.count));
486
487 case FFS_SETUP_PENDING:
488 if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
489 spin_unlock_irq(&ffs->ev.waitq.lock);
490 ret = __ffs_ep0_stall(ffs);
491 goto done_mutex;
492 }
493
494 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
495
496 spin_unlock_irq(&ffs->ev.waitq.lock);
497
498 if (likely(len)) {
499 data = kmalloc(len, GFP_KERNEL);
500 if (unlikely(!data)) {
501 ret = -ENOMEM;
502 goto done_mutex;
503 }
504 }
505
506 spin_lock_irq(&ffs->ev.waitq.lock);
507
508 /* See ffs_ep0_write() */
509 if (ffs_setup_state_clear_cancelled(ffs) ==
510 FFS_SETUP_CANCELLED) {
511 ret = -EIDRM;
512 break;
513 }
514
515 /* unlocks spinlock */
516 ret = __ffs_ep0_queue_wait(ffs, data, len);
517 if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
518 ret = -EFAULT;
519 goto done_mutex;
520
521 default:
522 ret = -EBADFD;
523 break;
524 }
525
526 spin_unlock_irq(&ffs->ev.waitq.lock);
527 done_mutex:
528 mutex_unlock(&ffs->mutex);
529 kfree(data);
530 return ret;
531 }
532
533 static int ffs_ep0_open(struct inode *inode, struct file *file)
534 {
535 struct ffs_data *ffs = inode->i_private;
536
537 ENTER();
538
539 if (unlikely(ffs->state == FFS_CLOSING))
540 return -EBUSY;
541
542 file->private_data = ffs;
543 ffs_data_opened(ffs);
544
545 return 0;
546 }
547
548 static int ffs_ep0_release(struct inode *inode, struct file *file)
549 {
550 struct ffs_data *ffs = file->private_data;
551
552 ENTER();
553
554 ffs_data_closed(ffs);
555
556 return 0;
557 }
558
559 static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
560 {
561 struct ffs_data *ffs = file->private_data;
562 struct usb_gadget *gadget = ffs->gadget;
563 long ret;
564
565 ENTER();
566
567 if (code == FUNCTIONFS_INTERFACE_REVMAP) {
568 struct ffs_function *func = ffs->func;
569 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
570 } else if (gadget && gadget->ops->ioctl) {
571 ret = gadget->ops->ioctl(gadget, code, value);
572 } else {
573 ret = -ENOTTY;
574 }
575
576 return ret;
577 }
578
579 static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
580 {
581 struct ffs_data *ffs = file->private_data;
582 unsigned int mask = POLLWRNORM;
583 int ret;
584
585 poll_wait(file, &ffs->ev.waitq, wait);
586
587 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
588 if (unlikely(ret < 0))
589 return mask;
590
591 switch (ffs->state) {
592 case FFS_READ_DESCRIPTORS:
593 case FFS_READ_STRINGS:
594 mask |= POLLOUT;
595 break;
596
597 case FFS_ACTIVE:
598 switch (ffs->setup_state) {
599 case FFS_NO_SETUP:
600 if (ffs->ev.count)
601 mask |= POLLIN;
602 break;
603
604 case FFS_SETUP_PENDING:
605 case FFS_SETUP_CANCELLED:
606 mask |= (POLLIN | POLLOUT);
607 break;
608 }
609 case FFS_CLOSING:
610 break;
611 case FFS_DEACTIVATED:
612 break;
613 }
614
615 mutex_unlock(&ffs->mutex);
616
617 return mask;
618 }
619
620 static const struct file_operations ffs_ep0_operations = {
621 .llseek = no_llseek,
622
623 .open = ffs_ep0_open,
624 .write = ffs_ep0_write,
625 .read = ffs_ep0_read,
626 .release = ffs_ep0_release,
627 .unlocked_ioctl = ffs_ep0_ioctl,
628 .poll = ffs_ep0_poll,
629 };
630
631
632 /* "Normal" endpoints operations ********************************************/
633
634 static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
635 {
636 ENTER();
637 if (likely(req->context)) {
638 struct ffs_ep *ep = _ep->driver_data;
639 ep->status = req->status ? req->status : req->actual;
640 complete(req->context);
641 }
642 }
643
644 static void ffs_user_copy_worker(struct work_struct *work)
645 {
646 struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
647 work);
648 int ret = io_data->req->status ? io_data->req->status :
649 io_data->req->actual;
650
651 if (io_data->read && ret > 0) {
652 use_mm(io_data->mm);
653 ret = copy_to_iter(io_data->buf, ret, &io_data->data);
654 if (iov_iter_count(&io_data->data))
655 ret = -EFAULT;
656 unuse_mm(io_data->mm);
657 }
658
659 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
660
661 if (io_data->ffs->ffs_eventfd &&
662 !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
663 eventfd_signal(io_data->ffs->ffs_eventfd, 1);
664
665 usb_ep_free_request(io_data->ep, io_data->req);
666
667 io_data->kiocb->private = NULL;
668 if (io_data->read)
669 kfree(io_data->to_free);
670 kfree(io_data->buf);
671 kfree(io_data);
672 }
673
674 static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
675 struct usb_request *req)
676 {
677 struct ffs_io_data *io_data = req->context;
678
679 ENTER();
680
681 INIT_WORK(&io_data->work, ffs_user_copy_worker);
682 schedule_work(&io_data->work);
683 }
684
685 static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
686 {
687 struct ffs_epfile *epfile = file->private_data;
688 struct ffs_ep *ep;
689 char *data = NULL;
690 ssize_t ret, data_len = -EINVAL;
691 int halt;
692
693 /* Are we still active? */
694 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
695 ret = -ENODEV;
696 goto error;
697 }
698
699 /* Wait for endpoint to be enabled */
700 ep = epfile->ep;
701 if (!ep) {
702 if (file->f_flags & O_NONBLOCK) {
703 ret = -EAGAIN;
704 goto error;
705 }
706
707 ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
708 if (ret) {
709 ret = -EINTR;
710 goto error;
711 }
712 }
713
714 /* Do we halt? */
715 halt = (!io_data->read == !epfile->in);
716 if (halt && epfile->isoc) {
717 ret = -EINVAL;
718 goto error;
719 }
720
721 /* Allocate & copy */
722 if (!halt) {
723 /*
724 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
725 * before the waiting completes, so do not assign to 'gadget' earlier
726 */
727 struct usb_gadget *gadget = epfile->ffs->gadget;
728 size_t copied;
729
730 spin_lock_irq(&epfile->ffs->eps_lock);
731 /* In the meantime, endpoint got disabled or changed. */
732 if (epfile->ep != ep) {
733 spin_unlock_irq(&epfile->ffs->eps_lock);
734 return -ESHUTDOWN;
735 }
736 data_len = iov_iter_count(&io_data->data);
737 /*
738 * Controller may require buffer size to be aligned to
739 * maxpacketsize of an out endpoint.
740 */
741 if (io_data->read)
742 data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
743 spin_unlock_irq(&epfile->ffs->eps_lock);
744
745 data = kmalloc(data_len, GFP_KERNEL);
746 if (unlikely(!data))
747 return -ENOMEM;
748 if (!io_data->read) {
749 copied = copy_from_iter(data, data_len, &io_data->data);
750 if (copied != data_len) {
751 ret = -EFAULT;
752 goto error;
753 }
754 }
755 }
756
757 /* We will be using request */
758 ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
759 if (unlikely(ret))
760 goto error;
761
762 spin_lock_irq(&epfile->ffs->eps_lock);
763
764 if (epfile->ep != ep) {
765 /* In the meantime, endpoint got disabled or changed. */
766 ret = -ESHUTDOWN;
767 spin_unlock_irq(&epfile->ffs->eps_lock);
768 } else if (halt) {
769 /* Halt */
770 if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
771 usb_ep_set_halt(ep->ep);
772 spin_unlock_irq(&epfile->ffs->eps_lock);
773 ret = -EBADMSG;
774 } else {
775 /* Fire the request */
776 struct usb_request *req;
777
778 /*
779 * Sanity Check: even though data_len can't be used
780 * uninitialized at the time I write this comment, some
781 * compilers complain about this situation.
782 * In order to keep the code clean from warnings, data_len is
783 * being initialized to -EINVAL during its declaration, which
784 * means we can't rely on compiler anymore to warn no future
785 * changes won't result in data_len being used uninitialized.
786 * For such reason, we're adding this redundant sanity check
787 * here.
788 */
789 if (unlikely(data_len == -EINVAL)) {
790 WARN(1, "%s: data_len == -EINVAL\n", __func__);
791 ret = -EINVAL;
792 goto error_lock;
793 }
794
795 if (io_data->aio) {
796 req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
797 if (unlikely(!req))
798 goto error_lock;
799
800 req->buf = data;
801 req->length = data_len;
802
803 io_data->buf = data;
804 io_data->ep = ep->ep;
805 io_data->req = req;
806 io_data->ffs = epfile->ffs;
807
808 req->context = io_data;
809 req->complete = ffs_epfile_async_io_complete;
810
811 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
812 if (unlikely(ret)) {
813 usb_ep_free_request(ep->ep, req);
814 goto error_lock;
815 }
816 ret = -EIOCBQUEUED;
817
818 spin_unlock_irq(&epfile->ffs->eps_lock);
819 } else {
820 DECLARE_COMPLETION_ONSTACK(done);
821
822 req = ep->req;
823 req->buf = data;
824 req->length = data_len;
825
826 req->context = &done;
827 req->complete = ffs_epfile_io_complete;
828
829 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
830
831 spin_unlock_irq(&epfile->ffs->eps_lock);
832
833 if (unlikely(ret < 0)) {
834 /* nop */
835 } else if (unlikely(
836 wait_for_completion_interruptible(&done))) {
837 ret = -EINTR;
838 usb_ep_dequeue(ep->ep, req);
839 } else {
840 /*
841 * XXX We may end up silently droping data
842 * here. Since data_len (i.e. req->length) may
843 * be bigger than len (after being rounded up
844 * to maxpacketsize), we may end up with more
845 * data then user space has space for.
846 */
847 ret = ep->status;
848 if (io_data->read && ret > 0) {
849 ret = copy_to_iter(data, ret, &io_data->data);
850 if (unlikely(iov_iter_count(&io_data->data)))
851 ret = -EFAULT;
852 }
853 }
854 kfree(data);
855 }
856 }
857
858 mutex_unlock(&epfile->mutex);
859 return ret;
860
861 error_lock:
862 spin_unlock_irq(&epfile->ffs->eps_lock);
863 mutex_unlock(&epfile->mutex);
864 error:
865 kfree(data);
866 return ret;
867 }
868
869 static int
870 ffs_epfile_open(struct inode *inode, struct file *file)
871 {
872 struct ffs_epfile *epfile = inode->i_private;
873
874 ENTER();
875
876 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
877 return -ENODEV;
878
879 file->private_data = epfile;
880 ffs_data_opened(epfile->ffs);
881
882 return 0;
883 }
884
885 static int ffs_aio_cancel(struct kiocb *kiocb)
886 {
887 struct ffs_io_data *io_data = kiocb->private;
888 struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
889 int value;
890
891 ENTER();
892
893 spin_lock_irq(&epfile->ffs->eps_lock);
894
895 if (likely(io_data && io_data->ep && io_data->req))
896 value = usb_ep_dequeue(io_data->ep, io_data->req);
897 else
898 value = -EINVAL;
899
900 spin_unlock_irq(&epfile->ffs->eps_lock);
901
902 return value;
903 }
904
905 static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
906 {
907 struct ffs_io_data io_data, *p = &io_data;
908 ssize_t res;
909
910 ENTER();
911
912 if (!is_sync_kiocb(kiocb)) {
913 p = kmalloc(sizeof(io_data), GFP_KERNEL);
914 if (unlikely(!p))
915 return -ENOMEM;
916 p->aio = true;
917 } else {
918 p->aio = false;
919 }
920
921 p->read = false;
922 p->kiocb = kiocb;
923 p->data = *from;
924 p->mm = current->mm;
925
926 kiocb->private = p;
927
928 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
929
930 res = ffs_epfile_io(kiocb->ki_filp, p);
931 if (res == -EIOCBQUEUED)
932 return res;
933 if (p->aio)
934 kfree(p);
935 else
936 *from = p->data;
937 return res;
938 }
939
940 static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
941 {
942 struct ffs_io_data io_data, *p = &io_data;
943 ssize_t res;
944
945 ENTER();
946
947 if (!is_sync_kiocb(kiocb)) {
948 p = kmalloc(sizeof(io_data), GFP_KERNEL);
949 if (unlikely(!p))
950 return -ENOMEM;
951 p->aio = true;
952 } else {
953 p->aio = false;
954 }
955
956 p->read = true;
957 p->kiocb = kiocb;
958 if (p->aio) {
959 p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
960 if (!p->to_free) {
961 kfree(p);
962 return -ENOMEM;
963 }
964 } else {
965 p->data = *to;
966 p->to_free = NULL;
967 }
968 p->mm = current->mm;
969
970 kiocb->private = p;
971
972 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
973
974 res = ffs_epfile_io(kiocb->ki_filp, p);
975 if (res == -EIOCBQUEUED)
976 return res;
977
978 if (p->aio) {
979 kfree(p->to_free);
980 kfree(p);
981 } else {
982 *to = p->data;
983 }
984 return res;
985 }
986
987 static int
988 ffs_epfile_release(struct inode *inode, struct file *file)
989 {
990 struct ffs_epfile *epfile = inode->i_private;
991
992 ENTER();
993
994 ffs_data_closed(epfile->ffs);
995
996 return 0;
997 }
998
999 static long ffs_epfile_ioctl(struct file *file, unsigned code,
1000 unsigned long value)
1001 {
1002 struct ffs_epfile *epfile = file->private_data;
1003 int ret;
1004
1005 ENTER();
1006
1007 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1008 return -ENODEV;
1009
1010 spin_lock_irq(&epfile->ffs->eps_lock);
1011 if (likely(epfile->ep)) {
1012 switch (code) {
1013 case FUNCTIONFS_FIFO_STATUS:
1014 ret = usb_ep_fifo_status(epfile->ep->ep);
1015 break;
1016 case FUNCTIONFS_FIFO_FLUSH:
1017 usb_ep_fifo_flush(epfile->ep->ep);
1018 ret = 0;
1019 break;
1020 case FUNCTIONFS_CLEAR_HALT:
1021 ret = usb_ep_clear_halt(epfile->ep->ep);
1022 break;
1023 case FUNCTIONFS_ENDPOINT_REVMAP:
1024 ret = epfile->ep->num;
1025 break;
1026 case FUNCTIONFS_ENDPOINT_DESC:
1027 {
1028 int desc_idx;
1029 struct usb_endpoint_descriptor *desc;
1030
1031 switch (epfile->ffs->gadget->speed) {
1032 case USB_SPEED_SUPER:
1033 desc_idx = 2;
1034 break;
1035 case USB_SPEED_HIGH:
1036 desc_idx = 1;
1037 break;
1038 default:
1039 desc_idx = 0;
1040 }
1041 desc = epfile->ep->descs[desc_idx];
1042
1043 spin_unlock_irq(&epfile->ffs->eps_lock);
1044 ret = copy_to_user((void *)value, desc, sizeof(*desc));
1045 if (ret)
1046 ret = -EFAULT;
1047 return ret;
1048 }
1049 default:
1050 ret = -ENOTTY;
1051 }
1052 } else {
1053 ret = -ENODEV;
1054 }
1055 spin_unlock_irq(&epfile->ffs->eps_lock);
1056
1057 return ret;
1058 }
1059
1060 static const struct file_operations ffs_epfile_operations = {
1061 .llseek = no_llseek,
1062
1063 .open = ffs_epfile_open,
1064 .write = new_sync_write,
1065 .read = new_sync_read,
1066 .write_iter = ffs_epfile_write_iter,
1067 .read_iter = ffs_epfile_read_iter,
1068 .release = ffs_epfile_release,
1069 .unlocked_ioctl = ffs_epfile_ioctl,
1070 };
1071
1072
1073 /* File system and super block operations ***********************************/
1074
1075 /*
1076 * Mounting the file system creates a controller file, used first for
1077 * function configuration then later for event monitoring.
1078 */
1079
1080 static struct inode *__must_check
1081 ffs_sb_make_inode(struct super_block *sb, void *data,
1082 const struct file_operations *fops,
1083 const struct inode_operations *iops,
1084 struct ffs_file_perms *perms)
1085 {
1086 struct inode *inode;
1087
1088 ENTER();
1089
1090 inode = new_inode(sb);
1091
1092 if (likely(inode)) {
1093 struct timespec current_time = CURRENT_TIME;
1094
1095 inode->i_ino = get_next_ino();
1096 inode->i_mode = perms->mode;
1097 inode->i_uid = perms->uid;
1098 inode->i_gid = perms->gid;
1099 inode->i_atime = current_time;
1100 inode->i_mtime = current_time;
1101 inode->i_ctime = current_time;
1102 inode->i_private = data;
1103 if (fops)
1104 inode->i_fop = fops;
1105 if (iops)
1106 inode->i_op = iops;
1107 }
1108
1109 return inode;
1110 }
1111
1112 /* Create "regular" file */
1113 static struct dentry *ffs_sb_create_file(struct super_block *sb,
1114 const char *name, void *data,
1115 const struct file_operations *fops)
1116 {
1117 struct ffs_data *ffs = sb->s_fs_info;
1118 struct dentry *dentry;
1119 struct inode *inode;
1120
1121 ENTER();
1122
1123 dentry = d_alloc_name(sb->s_root, name);
1124 if (unlikely(!dentry))
1125 return NULL;
1126
1127 inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
1128 if (unlikely(!inode)) {
1129 dput(dentry);
1130 return NULL;
1131 }
1132
1133 d_add(dentry, inode);
1134 return dentry;
1135 }
1136
1137 /* Super block */
1138 static const struct super_operations ffs_sb_operations = {
1139 .statfs = simple_statfs,
1140 .drop_inode = generic_delete_inode,
1141 };
1142
1143 struct ffs_sb_fill_data {
1144 struct ffs_file_perms perms;
1145 umode_t root_mode;
1146 const char *dev_name;
1147 bool no_disconnect;
1148 struct ffs_data *ffs_data;
1149 };
1150
1151 static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1152 {
1153 struct ffs_sb_fill_data *data = _data;
1154 struct inode *inode;
1155 struct ffs_data *ffs = data->ffs_data;
1156
1157 ENTER();
1158
1159 ffs->sb = sb;
1160 data->ffs_data = NULL;
1161 sb->s_fs_info = ffs;
1162 sb->s_blocksize = PAGE_CACHE_SIZE;
1163 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1164 sb->s_magic = FUNCTIONFS_MAGIC;
1165 sb->s_op = &ffs_sb_operations;
1166 sb->s_time_gran = 1;
1167
1168 /* Root inode */
1169 data->perms.mode = data->root_mode;
1170 inode = ffs_sb_make_inode(sb, NULL,
1171 &simple_dir_operations,
1172 &simple_dir_inode_operations,
1173 &data->perms);
1174 sb->s_root = d_make_root(inode);
1175 if (unlikely(!sb->s_root))
1176 return -ENOMEM;
1177
1178 /* EP0 file */
1179 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1180 &ffs_ep0_operations)))
1181 return -ENOMEM;
1182
1183 return 0;
1184 }
1185
1186 static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1187 {
1188 ENTER();
1189
1190 if (!opts || !*opts)
1191 return 0;
1192
1193 for (;;) {
1194 unsigned long value;
1195 char *eq, *comma;
1196
1197 /* Option limit */
1198 comma = strchr(opts, ',');
1199 if (comma)
1200 *comma = 0;
1201
1202 /* Value limit */
1203 eq = strchr(opts, '=');
1204 if (unlikely(!eq)) {
1205 pr_err("'=' missing in %s\n", opts);
1206 return -EINVAL;
1207 }
1208 *eq = 0;
1209
1210 /* Parse value */
1211 if (kstrtoul(eq + 1, 0, &value)) {
1212 pr_err("%s: invalid value: %s\n", opts, eq + 1);
1213 return -EINVAL;
1214 }
1215
1216 /* Interpret option */
1217 switch (eq - opts) {
1218 case 13:
1219 if (!memcmp(opts, "no_disconnect", 13))
1220 data->no_disconnect = !!value;
1221 else
1222 goto invalid;
1223 break;
1224 case 5:
1225 if (!memcmp(opts, "rmode", 5))
1226 data->root_mode = (value & 0555) | S_IFDIR;
1227 else if (!memcmp(opts, "fmode", 5))
1228 data->perms.mode = (value & 0666) | S_IFREG;
1229 else
1230 goto invalid;
1231 break;
1232
1233 case 4:
1234 if (!memcmp(opts, "mode", 4)) {
1235 data->root_mode = (value & 0555) | S_IFDIR;
1236 data->perms.mode = (value & 0666) | S_IFREG;
1237 } else {
1238 goto invalid;
1239 }
1240 break;
1241
1242 case 3:
1243 if (!memcmp(opts, "uid", 3)) {
1244 data->perms.uid = make_kuid(current_user_ns(), value);
1245 if (!uid_valid(data->perms.uid)) {
1246 pr_err("%s: unmapped value: %lu\n", opts, value);
1247 return -EINVAL;
1248 }
1249 } else if (!memcmp(opts, "gid", 3)) {
1250 data->perms.gid = make_kgid(current_user_ns(), value);
1251 if (!gid_valid(data->perms.gid)) {
1252 pr_err("%s: unmapped value: %lu\n", opts, value);
1253 return -EINVAL;
1254 }
1255 } else {
1256 goto invalid;
1257 }
1258 break;
1259
1260 default:
1261 invalid:
1262 pr_err("%s: invalid option\n", opts);
1263 return -EINVAL;
1264 }
1265
1266 /* Next iteration */
1267 if (!comma)
1268 break;
1269 opts = comma + 1;
1270 }
1271
1272 return 0;
1273 }
1274
1275 /* "mount -t functionfs dev_name /dev/function" ends up here */
1276
1277 static struct dentry *
1278 ffs_fs_mount(struct file_system_type *t, int flags,
1279 const char *dev_name, void *opts)
1280 {
1281 struct ffs_sb_fill_data data = {
1282 .perms = {
1283 .mode = S_IFREG | 0600,
1284 .uid = GLOBAL_ROOT_UID,
1285 .gid = GLOBAL_ROOT_GID,
1286 },
1287 .root_mode = S_IFDIR | 0500,
1288 .no_disconnect = false,
1289 };
1290 struct dentry *rv;
1291 int ret;
1292 void *ffs_dev;
1293 struct ffs_data *ffs;
1294
1295 ENTER();
1296
1297 ret = ffs_fs_parse_opts(&data, opts);
1298 if (unlikely(ret < 0))
1299 return ERR_PTR(ret);
1300
1301 ffs = ffs_data_new();
1302 if (unlikely(!ffs))
1303 return ERR_PTR(-ENOMEM);
1304 ffs->file_perms = data.perms;
1305 ffs->no_disconnect = data.no_disconnect;
1306
1307 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1308 if (unlikely(!ffs->dev_name)) {
1309 ffs_data_put(ffs);
1310 return ERR_PTR(-ENOMEM);
1311 }
1312
1313 ffs_dev = ffs_acquire_dev(dev_name);
1314 if (IS_ERR(ffs_dev)) {
1315 ffs_data_put(ffs);
1316 return ERR_CAST(ffs_dev);
1317 }
1318 ffs->private_data = ffs_dev;
1319 data.ffs_data = ffs;
1320
1321 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
1322 if (IS_ERR(rv) && data.ffs_data) {
1323 ffs_release_dev(data.ffs_data);
1324 ffs_data_put(data.ffs_data);
1325 }
1326 return rv;
1327 }
1328
1329 static void
1330 ffs_fs_kill_sb(struct super_block *sb)
1331 {
1332 ENTER();
1333
1334 kill_litter_super(sb);
1335 if (sb->s_fs_info) {
1336 ffs_release_dev(sb->s_fs_info);
1337 ffs_data_closed(sb->s_fs_info);
1338 ffs_data_put(sb->s_fs_info);
1339 }
1340 }
1341
1342 static struct file_system_type ffs_fs_type = {
1343 .owner = THIS_MODULE,
1344 .name = "functionfs",
1345 .mount = ffs_fs_mount,
1346 .kill_sb = ffs_fs_kill_sb,
1347 };
1348 MODULE_ALIAS_FS("functionfs");
1349
1350
1351 /* Driver's main init/cleanup functions *************************************/
1352
1353 static int functionfs_init(void)
1354 {
1355 int ret;
1356
1357 ENTER();
1358
1359 ret = register_filesystem(&ffs_fs_type);
1360 if (likely(!ret))
1361 pr_info("file system registered\n");
1362 else
1363 pr_err("failed registering file system (%d)\n", ret);
1364
1365 return ret;
1366 }
1367
1368 static void functionfs_cleanup(void)
1369 {
1370 ENTER();
1371
1372 pr_info("unloading\n");
1373 unregister_filesystem(&ffs_fs_type);
1374 }
1375
1376
1377 /* ffs_data and ffs_function construction and destruction code **************/
1378
1379 static void ffs_data_clear(struct ffs_data *ffs);
1380 static void ffs_data_reset(struct ffs_data *ffs);
1381
1382 static void ffs_data_get(struct ffs_data *ffs)
1383 {
1384 ENTER();
1385
1386 atomic_inc(&ffs->ref);
1387 }
1388
1389 static void ffs_data_opened(struct ffs_data *ffs)
1390 {
1391 ENTER();
1392
1393 atomic_inc(&ffs->ref);
1394 if (atomic_add_return(1, &ffs->opened) == 1 &&
1395 ffs->state == FFS_DEACTIVATED) {
1396 ffs->state = FFS_CLOSING;
1397 ffs_data_reset(ffs);
1398 }
1399 }
1400
1401 static void ffs_data_put(struct ffs_data *ffs)
1402 {
1403 ENTER();
1404
1405 if (unlikely(atomic_dec_and_test(&ffs->ref))) {
1406 pr_info("%s(): freeing\n", __func__);
1407 ffs_data_clear(ffs);
1408 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
1409 waitqueue_active(&ffs->ep0req_completion.wait));
1410 kfree(ffs->dev_name);
1411 kfree(ffs);
1412 }
1413 }
1414
1415 static void ffs_data_closed(struct ffs_data *ffs)
1416 {
1417 ENTER();
1418
1419 if (atomic_dec_and_test(&ffs->opened)) {
1420 if (ffs->no_disconnect) {
1421 ffs->state = FFS_DEACTIVATED;
1422 if (ffs->epfiles) {
1423 ffs_epfiles_destroy(ffs->epfiles,
1424 ffs->eps_count);
1425 ffs->epfiles = NULL;
1426 }
1427 if (ffs->setup_state == FFS_SETUP_PENDING)
1428 __ffs_ep0_stall(ffs);
1429 } else {
1430 ffs->state = FFS_CLOSING;
1431 ffs_data_reset(ffs);
1432 }
1433 }
1434 if (atomic_read(&ffs->opened) < 0) {
1435 ffs->state = FFS_CLOSING;
1436 ffs_data_reset(ffs);
1437 }
1438
1439 ffs_data_put(ffs);
1440 }
1441
1442 static struct ffs_data *ffs_data_new(void)
1443 {
1444 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1445 if (unlikely(!ffs))
1446 return NULL;
1447
1448 ENTER();
1449
1450 atomic_set(&ffs->ref, 1);
1451 atomic_set(&ffs->opened, 0);
1452 ffs->state = FFS_READ_DESCRIPTORS;
1453 mutex_init(&ffs->mutex);
1454 spin_lock_init(&ffs->eps_lock);
1455 init_waitqueue_head(&ffs->ev.waitq);
1456 init_completion(&ffs->ep0req_completion);
1457
1458 /* XXX REVISIT need to update it in some places, or do we? */
1459 ffs->ev.can_stall = 1;
1460
1461 return ffs;
1462 }
1463
1464 static void ffs_data_clear(struct ffs_data *ffs)
1465 {
1466 ENTER();
1467
1468 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
1469 ffs_closed(ffs);
1470
1471 BUG_ON(ffs->gadget);
1472
1473 if (ffs->epfiles)
1474 ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1475
1476 if (ffs->ffs_eventfd)
1477 eventfd_ctx_put(ffs->ffs_eventfd);
1478
1479 kfree(ffs->raw_descs_data);
1480 kfree(ffs->raw_strings);
1481 kfree(ffs->stringtabs);
1482 }
1483
1484 static void ffs_data_reset(struct ffs_data *ffs)
1485 {
1486 ENTER();
1487
1488 ffs_data_clear(ffs);
1489
1490 ffs->epfiles = NULL;
1491 ffs->raw_descs_data = NULL;
1492 ffs->raw_descs = NULL;
1493 ffs->raw_strings = NULL;
1494 ffs->stringtabs = NULL;
1495
1496 ffs->raw_descs_length = 0;
1497 ffs->fs_descs_count = 0;
1498 ffs->hs_descs_count = 0;
1499 ffs->ss_descs_count = 0;
1500
1501 ffs->strings_count = 0;
1502 ffs->interfaces_count = 0;
1503 ffs->eps_count = 0;
1504
1505 ffs->ev.count = 0;
1506
1507 ffs->state = FFS_READ_DESCRIPTORS;
1508 ffs->setup_state = FFS_NO_SETUP;
1509 ffs->flags = 0;
1510 }
1511
1512
1513 static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1514 {
1515 struct usb_gadget_strings **lang;
1516 int first_id;
1517
1518 ENTER();
1519
1520 if (WARN_ON(ffs->state != FFS_ACTIVE
1521 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
1522 return -EBADFD;
1523
1524 first_id = usb_string_ids_n(cdev, ffs->strings_count);
1525 if (unlikely(first_id < 0))
1526 return first_id;
1527
1528 ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
1529 if (unlikely(!ffs->ep0req))
1530 return -ENOMEM;
1531 ffs->ep0req->complete = ffs_ep0_complete;
1532 ffs->ep0req->context = ffs;
1533
1534 lang = ffs->stringtabs;
1535 if (lang) {
1536 for (; *lang; ++lang) {
1537 struct usb_string *str = (*lang)->strings;
1538 int id = first_id;
1539 for (; str->s; ++id, ++str)
1540 str->id = id;
1541 }
1542 }
1543
1544 ffs->gadget = cdev->gadget;
1545 ffs_data_get(ffs);
1546 return 0;
1547 }
1548
1549 static void functionfs_unbind(struct ffs_data *ffs)
1550 {
1551 ENTER();
1552
1553 if (!WARN_ON(!ffs->gadget)) {
1554 usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
1555 ffs->ep0req = NULL;
1556 ffs->gadget = NULL;
1557 clear_bit(FFS_FL_BOUND, &ffs->flags);
1558 ffs_data_put(ffs);
1559 }
1560 }
1561
1562 static int ffs_epfiles_create(struct ffs_data *ffs)
1563 {
1564 struct ffs_epfile *epfile, *epfiles;
1565 unsigned i, count;
1566
1567 ENTER();
1568
1569 count = ffs->eps_count;
1570 epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
1571 if (!epfiles)
1572 return -ENOMEM;
1573
1574 epfile = epfiles;
1575 for (i = 1; i <= count; ++i, ++epfile) {
1576 epfile->ffs = ffs;
1577 mutex_init(&epfile->mutex);
1578 init_waitqueue_head(&epfile->wait);
1579 if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
1580 sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
1581 else
1582 sprintf(epfile->name, "ep%u", i);
1583 epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
1584 epfile,
1585 &ffs_epfile_operations);
1586 if (unlikely(!epfile->dentry)) {
1587 ffs_epfiles_destroy(epfiles, i - 1);
1588 return -ENOMEM;
1589 }
1590 }
1591
1592 ffs->epfiles = epfiles;
1593 return 0;
1594 }
1595
1596 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1597 {
1598 struct ffs_epfile *epfile = epfiles;
1599
1600 ENTER();
1601
1602 for (; count; --count, ++epfile) {
1603 BUG_ON(mutex_is_locked(&epfile->mutex) ||
1604 waitqueue_active(&epfile->wait));
1605 if (epfile->dentry) {
1606 d_delete(epfile->dentry);
1607 dput(epfile->dentry);
1608 epfile->dentry = NULL;
1609 }
1610 }
1611
1612 kfree(epfiles);
1613 }
1614
1615 static void ffs_func_eps_disable(struct ffs_function *func)
1616 {
1617 struct ffs_ep *ep = func->eps;
1618 struct ffs_epfile *epfile = func->ffs->epfiles;
1619 unsigned count = func->ffs->eps_count;
1620 unsigned long flags;
1621
1622 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1623 do {
1624 /* pending requests get nuked */
1625 if (likely(ep->ep))
1626 usb_ep_disable(ep->ep);
1627 ++ep;
1628
1629 if (epfile) {
1630 epfile->ep = NULL;
1631 ++epfile;
1632 }
1633 } while (--count);
1634 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1635 }
1636
1637 static int ffs_func_eps_enable(struct ffs_function *func)
1638 {
1639 struct ffs_data *ffs = func->ffs;
1640 struct ffs_ep *ep = func->eps;
1641 struct ffs_epfile *epfile = ffs->epfiles;
1642 unsigned count = ffs->eps_count;
1643 unsigned long flags;
1644 int ret = 0;
1645
1646 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1647 do {
1648 struct usb_endpoint_descriptor *ds;
1649 int desc_idx;
1650
1651 if (ffs->gadget->speed == USB_SPEED_SUPER)
1652 desc_idx = 2;
1653 else if (ffs->gadget->speed == USB_SPEED_HIGH)
1654 desc_idx = 1;
1655 else
1656 desc_idx = 0;
1657
1658 /* fall-back to lower speed if desc missing for current speed */
1659 do {
1660 ds = ep->descs[desc_idx];
1661 } while (!ds && --desc_idx >= 0);
1662
1663 if (!ds) {
1664 ret = -EINVAL;
1665 break;
1666 }
1667
1668 ep->ep->driver_data = ep;
1669 ep->ep->desc = ds;
1670 ret = usb_ep_enable(ep->ep);
1671 if (likely(!ret)) {
1672 epfile->ep = ep;
1673 epfile->in = usb_endpoint_dir_in(ds);
1674 epfile->isoc = usb_endpoint_xfer_isoc(ds);
1675 } else {
1676 break;
1677 }
1678
1679 wake_up(&epfile->wait);
1680
1681 ++ep;
1682 ++epfile;
1683 } while (--count);
1684 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1685
1686 return ret;
1687 }
1688
1689
1690 /* Parsing and building descriptors and strings *****************************/
1691
1692 /*
1693 * This validates if data pointed by data is a valid USB descriptor as
1694 * well as record how many interfaces, endpoints and strings are
1695 * required by given configuration. Returns address after the
1696 * descriptor or NULL if data is invalid.
1697 */
1698
1699 enum ffs_entity_type {
1700 FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
1701 };
1702
1703 enum ffs_os_desc_type {
1704 FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
1705 };
1706
1707 typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
1708 u8 *valuep,
1709 struct usb_descriptor_header *desc,
1710 void *priv);
1711
1712 typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
1713 struct usb_os_desc_header *h, void *data,
1714 unsigned len, void *priv);
1715
1716 static int __must_check ffs_do_single_desc(char *data, unsigned len,
1717 ffs_entity_callback entity,
1718 void *priv)
1719 {
1720 struct usb_descriptor_header *_ds = (void *)data;
1721 u8 length;
1722 int ret;
1723
1724 ENTER();
1725
1726 /* At least two bytes are required: length and type */
1727 if (len < 2) {
1728 pr_vdebug("descriptor too short\n");
1729 return -EINVAL;
1730 }
1731
1732 /* If we have at least as many bytes as the descriptor takes? */
1733 length = _ds->bLength;
1734 if (len < length) {
1735 pr_vdebug("descriptor longer then available data\n");
1736 return -EINVAL;
1737 }
1738
1739 #define __entity_check_INTERFACE(val) 1
1740 #define __entity_check_STRING(val) (val)
1741 #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
1742 #define __entity(type, val) do { \
1743 pr_vdebug("entity " #type "(%02x)\n", (val)); \
1744 if (unlikely(!__entity_check_ ##type(val))) { \
1745 pr_vdebug("invalid entity's value\n"); \
1746 return -EINVAL; \
1747 } \
1748 ret = entity(FFS_ ##type, &val, _ds, priv); \
1749 if (unlikely(ret < 0)) { \
1750 pr_debug("entity " #type "(%02x); ret = %d\n", \
1751 (val), ret); \
1752 return ret; \
1753 } \
1754 } while (0)
1755
1756 /* Parse descriptor depending on type. */
1757 switch (_ds->bDescriptorType) {
1758 case USB_DT_DEVICE:
1759 case USB_DT_CONFIG:
1760 case USB_DT_STRING:
1761 case USB_DT_DEVICE_QUALIFIER:
1762 /* function can't have any of those */
1763 pr_vdebug("descriptor reserved for gadget: %d\n",
1764 _ds->bDescriptorType);
1765 return -EINVAL;
1766
1767 case USB_DT_INTERFACE: {
1768 struct usb_interface_descriptor *ds = (void *)_ds;
1769 pr_vdebug("interface descriptor\n");
1770 if (length != sizeof *ds)
1771 goto inv_length;
1772
1773 __entity(INTERFACE, ds->bInterfaceNumber);
1774 if (ds->iInterface)
1775 __entity(STRING, ds->iInterface);
1776 }
1777 break;
1778
1779 case USB_DT_ENDPOINT: {
1780 struct usb_endpoint_descriptor *ds = (void *)_ds;
1781 pr_vdebug("endpoint descriptor\n");
1782 if (length != USB_DT_ENDPOINT_SIZE &&
1783 length != USB_DT_ENDPOINT_AUDIO_SIZE)
1784 goto inv_length;
1785 __entity(ENDPOINT, ds->bEndpointAddress);
1786 }
1787 break;
1788
1789 case HID_DT_HID:
1790 pr_vdebug("hid descriptor\n");
1791 if (length != sizeof(struct hid_descriptor))
1792 goto inv_length;
1793 break;
1794
1795 case USB_DT_OTG:
1796 if (length != sizeof(struct usb_otg_descriptor))
1797 goto inv_length;
1798 break;
1799
1800 case USB_DT_INTERFACE_ASSOCIATION: {
1801 struct usb_interface_assoc_descriptor *ds = (void *)_ds;
1802 pr_vdebug("interface association descriptor\n");
1803 if (length != sizeof *ds)
1804 goto inv_length;
1805 if (ds->iFunction)
1806 __entity(STRING, ds->iFunction);
1807 }
1808 break;
1809
1810 case USB_DT_SS_ENDPOINT_COMP:
1811 pr_vdebug("EP SS companion descriptor\n");
1812 if (length != sizeof(struct usb_ss_ep_comp_descriptor))
1813 goto inv_length;
1814 break;
1815
1816 case USB_DT_OTHER_SPEED_CONFIG:
1817 case USB_DT_INTERFACE_POWER:
1818 case USB_DT_DEBUG:
1819 case USB_DT_SECURITY:
1820 case USB_DT_CS_RADIO_CONTROL:
1821 /* TODO */
1822 pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
1823 return -EINVAL;
1824
1825 default:
1826 /* We should never be here */
1827 pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
1828 return -EINVAL;
1829
1830 inv_length:
1831 pr_vdebug("invalid length: %d (descriptor %d)\n",
1832 _ds->bLength, _ds->bDescriptorType);
1833 return -EINVAL;
1834 }
1835
1836 #undef __entity
1837 #undef __entity_check_DESCRIPTOR
1838 #undef __entity_check_INTERFACE
1839 #undef __entity_check_STRING
1840 #undef __entity_check_ENDPOINT
1841
1842 return length;
1843 }
1844
1845 static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1846 ffs_entity_callback entity, void *priv)
1847 {
1848 const unsigned _len = len;
1849 unsigned long num = 0;
1850
1851 ENTER();
1852
1853 for (;;) {
1854 int ret;
1855
1856 if (num == count)
1857 data = NULL;
1858
1859 /* Record "descriptor" entity */
1860 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
1861 if (unlikely(ret < 0)) {
1862 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
1863 num, ret);
1864 return ret;
1865 }
1866
1867 if (!data)
1868 return _len - len;
1869
1870 ret = ffs_do_single_desc(data, len, entity, priv);
1871 if (unlikely(ret < 0)) {
1872 pr_debug("%s returns %d\n", __func__, ret);
1873 return ret;
1874 }
1875
1876 len -= ret;
1877 data += ret;
1878 ++num;
1879 }
1880 }
1881
1882 static int __ffs_data_do_entity(enum ffs_entity_type type,
1883 u8 *valuep, struct usb_descriptor_header *desc,
1884 void *priv)
1885 {
1886 struct ffs_desc_helper *helper = priv;
1887 struct usb_endpoint_descriptor *d;
1888
1889 ENTER();
1890
1891 switch (type) {
1892 case FFS_DESCRIPTOR:
1893 break;
1894
1895 case FFS_INTERFACE:
1896 /*
1897 * Interfaces are indexed from zero so if we
1898 * encountered interface "n" then there are at least
1899 * "n+1" interfaces.
1900 */
1901 if (*valuep >= helper->interfaces_count)
1902 helper->interfaces_count = *valuep + 1;
1903 break;
1904
1905 case FFS_STRING:
1906 /*
1907 * Strings are indexed from 1 (0 is magic ;) reserved
1908 * for languages list or some such)
1909 */
1910 if (*valuep > helper->ffs->strings_count)
1911 helper->ffs->strings_count = *valuep;
1912 break;
1913
1914 case FFS_ENDPOINT:
1915 d = (void *)desc;
1916 helper->eps_count++;
1917 if (helper->eps_count >= 15)
1918 return -EINVAL;
1919 /* Check if descriptors for any speed were already parsed */
1920 if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
1921 helper->ffs->eps_addrmap[helper->eps_count] =
1922 d->bEndpointAddress;
1923 else if (helper->ffs->eps_addrmap[helper->eps_count] !=
1924 d->bEndpointAddress)
1925 return -EINVAL;
1926 break;
1927 }
1928
1929 return 0;
1930 }
1931
1932 static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
1933 struct usb_os_desc_header *desc)
1934 {
1935 u16 bcd_version = le16_to_cpu(desc->bcdVersion);
1936 u16 w_index = le16_to_cpu(desc->wIndex);
1937
1938 if (bcd_version != 1) {
1939 pr_vdebug("unsupported os descriptors version: %d",
1940 bcd_version);
1941 return -EINVAL;
1942 }
1943 switch (w_index) {
1944 case 0x4:
1945 *next_type = FFS_OS_DESC_EXT_COMPAT;
1946 break;
1947 case 0x5:
1948 *next_type = FFS_OS_DESC_EXT_PROP;
1949 break;
1950 default:
1951 pr_vdebug("unsupported os descriptor type: %d", w_index);
1952 return -EINVAL;
1953 }
1954
1955 return sizeof(*desc);
1956 }
1957
1958 /*
1959 * Process all extended compatibility/extended property descriptors
1960 * of a feature descriptor
1961 */
1962 static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
1963 enum ffs_os_desc_type type,
1964 u16 feature_count,
1965 ffs_os_desc_callback entity,
1966 void *priv,
1967 struct usb_os_desc_header *h)
1968 {
1969 int ret;
1970 const unsigned _len = len;
1971
1972 ENTER();
1973
1974 /* loop over all ext compat/ext prop descriptors */
1975 while (feature_count--) {
1976 ret = entity(type, h, data, len, priv);
1977 if (unlikely(ret < 0)) {
1978 pr_debug("bad OS descriptor, type: %d\n", type);
1979 return ret;
1980 }
1981 data += ret;
1982 len -= ret;
1983 }
1984 return _len - len;
1985 }
1986
1987 /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
1988 static int __must_check ffs_do_os_descs(unsigned count,
1989 char *data, unsigned len,
1990 ffs_os_desc_callback entity, void *priv)
1991 {
1992 const unsigned _len = len;
1993 unsigned long num = 0;
1994
1995 ENTER();
1996
1997 for (num = 0; num < count; ++num) {
1998 int ret;
1999 enum ffs_os_desc_type type;
2000 u16 feature_count;
2001 struct usb_os_desc_header *desc = (void *)data;
2002
2003 if (len < sizeof(*desc))
2004 return -EINVAL;
2005
2006 /*
2007 * Record "descriptor" entity.
2008 * Process dwLength, bcdVersion, wIndex, get b/wCount.
2009 * Move the data pointer to the beginning of extended
2010 * compatibilities proper or extended properties proper
2011 * portions of the data
2012 */
2013 if (le32_to_cpu(desc->dwLength) > len)
2014 return -EINVAL;
2015
2016 ret = __ffs_do_os_desc_header(&type, desc);
2017 if (unlikely(ret < 0)) {
2018 pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2019 num, ret);
2020 return ret;
2021 }
2022 /*
2023 * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2024 */
2025 feature_count = le16_to_cpu(desc->wCount);
2026 if (type == FFS_OS_DESC_EXT_COMPAT &&
2027 (feature_count > 255 || desc->Reserved))
2028 return -EINVAL;
2029 len -= ret;
2030 data += ret;
2031
2032 /*
2033 * Process all function/property descriptors
2034 * of this Feature Descriptor
2035 */
2036 ret = ffs_do_single_os_desc(data, len, type,
2037 feature_count, entity, priv, desc);
2038 if (unlikely(ret < 0)) {
2039 pr_debug("%s returns %d\n", __func__, ret);
2040 return ret;
2041 }
2042
2043 len -= ret;
2044 data += ret;
2045 }
2046 return _len - len;
2047 }
2048
2049 /**
2050 * Validate contents of the buffer from userspace related to OS descriptors.
2051 */
2052 static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2053 struct usb_os_desc_header *h, void *data,
2054 unsigned len, void *priv)
2055 {
2056 struct ffs_data *ffs = priv;
2057 u8 length;
2058
2059 ENTER();
2060
2061 switch (type) {
2062 case FFS_OS_DESC_EXT_COMPAT: {
2063 struct usb_ext_compat_desc *d = data;
2064 int i;
2065
2066 if (len < sizeof(*d) ||
2067 d->bFirstInterfaceNumber >= ffs->interfaces_count ||
2068 d->Reserved1)
2069 return -EINVAL;
2070 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2071 if (d->Reserved2[i])
2072 return -EINVAL;
2073
2074 length = sizeof(struct usb_ext_compat_desc);
2075 }
2076 break;
2077 case FFS_OS_DESC_EXT_PROP: {
2078 struct usb_ext_prop_desc *d = data;
2079 u32 type, pdl;
2080 u16 pnl;
2081
2082 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2083 return -EINVAL;
2084 length = le32_to_cpu(d->dwSize);
2085 type = le32_to_cpu(d->dwPropertyDataType);
2086 if (type < USB_EXT_PROP_UNICODE ||
2087 type > USB_EXT_PROP_UNICODE_MULTI) {
2088 pr_vdebug("unsupported os descriptor property type: %d",
2089 type);
2090 return -EINVAL;
2091 }
2092 pnl = le16_to_cpu(d->wPropertyNameLength);
2093 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
2094 if (length != 14 + pnl + pdl) {
2095 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2096 length, pnl, pdl, type);
2097 return -EINVAL;
2098 }
2099 ++ffs->ms_os_descs_ext_prop_count;
2100 /* property name reported to the host as "WCHAR"s */
2101 ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
2102 ffs->ms_os_descs_ext_prop_data_len += pdl;
2103 }
2104 break;
2105 default:
2106 pr_vdebug("unknown descriptor: %d\n", type);
2107 return -EINVAL;
2108 }
2109 return length;
2110 }
2111
2112 static int __ffs_data_got_descs(struct ffs_data *ffs,
2113 char *const _data, size_t len)
2114 {
2115 char *data = _data, *raw_descs;
2116 unsigned os_descs_count = 0, counts[3], flags;
2117 int ret = -EINVAL, i;
2118 struct ffs_desc_helper helper;
2119
2120 ENTER();
2121
2122 if (get_unaligned_le32(data + 4) != len)
2123 goto error;
2124
2125 switch (get_unaligned_le32(data)) {
2126 case FUNCTIONFS_DESCRIPTORS_MAGIC:
2127 flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
2128 data += 8;
2129 len -= 8;
2130 break;
2131 case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
2132 flags = get_unaligned_le32(data + 8);
2133 ffs->user_flags = flags;
2134 if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
2135 FUNCTIONFS_HAS_HS_DESC |
2136 FUNCTIONFS_HAS_SS_DESC |
2137 FUNCTIONFS_HAS_MS_OS_DESC |
2138 FUNCTIONFS_VIRTUAL_ADDR |
2139 FUNCTIONFS_EVENTFD)) {
2140 ret = -ENOSYS;
2141 goto error;
2142 }
2143 data += 12;
2144 len -= 12;
2145 break;
2146 default:
2147 goto error;
2148 }
2149
2150 if (flags & FUNCTIONFS_EVENTFD) {
2151 if (len < 4)
2152 goto error;
2153 ffs->ffs_eventfd =
2154 eventfd_ctx_fdget((int)get_unaligned_le32(data));
2155 if (IS_ERR(ffs->ffs_eventfd)) {
2156 ret = PTR_ERR(ffs->ffs_eventfd);
2157 ffs->ffs_eventfd = NULL;
2158 goto error;
2159 }
2160 data += 4;
2161 len -= 4;
2162 }
2163
2164 /* Read fs_count, hs_count and ss_count (if present) */
2165 for (i = 0; i < 3; ++i) {
2166 if (!(flags & (1 << i))) {
2167 counts[i] = 0;
2168 } else if (len < 4) {
2169 goto error;
2170 } else {
2171 counts[i] = get_unaligned_le32(data);
2172 data += 4;
2173 len -= 4;
2174 }
2175 }
2176 if (flags & (1 << i)) {
2177 os_descs_count = get_unaligned_le32(data);
2178 data += 4;
2179 len -= 4;
2180 };
2181
2182 /* Read descriptors */
2183 raw_descs = data;
2184 helper.ffs = ffs;
2185 for (i = 0; i < 3; ++i) {
2186 if (!counts[i])
2187 continue;
2188 helper.interfaces_count = 0;
2189 helper.eps_count = 0;
2190 ret = ffs_do_descs(counts[i], data, len,
2191 __ffs_data_do_entity, &helper);
2192 if (ret < 0)
2193 goto error;
2194 if (!ffs->eps_count && !ffs->interfaces_count) {
2195 ffs->eps_count = helper.eps_count;
2196 ffs->interfaces_count = helper.interfaces_count;
2197 } else {
2198 if (ffs->eps_count != helper.eps_count) {
2199 ret = -EINVAL;
2200 goto error;
2201 }
2202 if (ffs->interfaces_count != helper.interfaces_count) {
2203 ret = -EINVAL;
2204 goto error;
2205 }
2206 }
2207 data += ret;
2208 len -= ret;
2209 }
2210 if (os_descs_count) {
2211 ret = ffs_do_os_descs(os_descs_count, data, len,
2212 __ffs_data_do_os_desc, ffs);
2213 if (ret < 0)
2214 goto error;
2215 data += ret;
2216 len -= ret;
2217 }
2218
2219 if (raw_descs == data || len) {
2220 ret = -EINVAL;
2221 goto error;
2222 }
2223
2224 ffs->raw_descs_data = _data;
2225 ffs->raw_descs = raw_descs;
2226 ffs->raw_descs_length = data - raw_descs;
2227 ffs->fs_descs_count = counts[0];
2228 ffs->hs_descs_count = counts[1];
2229 ffs->ss_descs_count = counts[2];
2230 ffs->ms_os_descs_count = os_descs_count;
2231
2232 return 0;
2233
2234 error:
2235 kfree(_data);
2236 return ret;
2237 }
2238
2239 static int __ffs_data_got_strings(struct ffs_data *ffs,
2240 char *const _data, size_t len)
2241 {
2242 u32 str_count, needed_count, lang_count;
2243 struct usb_gadget_strings **stringtabs, *t;
2244 struct usb_string *strings, *s;
2245 const char *data = _data;
2246
2247 ENTER();
2248
2249 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2250 get_unaligned_le32(data + 4) != len))
2251 goto error;
2252 str_count = get_unaligned_le32(data + 8);
2253 lang_count = get_unaligned_le32(data + 12);
2254
2255 /* if one is zero the other must be zero */
2256 if (unlikely(!str_count != !lang_count))
2257 goto error;
2258
2259 /* Do we have at least as many strings as descriptors need? */
2260 needed_count = ffs->strings_count;
2261 if (unlikely(str_count < needed_count))
2262 goto error;
2263
2264 /*
2265 * If we don't need any strings just return and free all
2266 * memory.
2267 */
2268 if (!needed_count) {
2269 kfree(_data);
2270 return 0;
2271 }
2272
2273 /* Allocate everything in one chunk so there's less maintenance. */
2274 {
2275 unsigned i = 0;
2276 vla_group(d);
2277 vla_item(d, struct usb_gadget_strings *, stringtabs,
2278 lang_count + 1);
2279 vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
2280 vla_item(d, struct usb_string, strings,
2281 lang_count*(needed_count+1));
2282
2283 char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
2284
2285 if (unlikely(!vlabuf)) {
2286 kfree(_data);
2287 return -ENOMEM;
2288 }
2289
2290 /* Initialize the VLA pointers */
2291 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2292 t = vla_ptr(vlabuf, d, stringtab);
2293 i = lang_count;
2294 do {
2295 *stringtabs++ = t++;
2296 } while (--i);
2297 *stringtabs = NULL;
2298
2299 /* stringtabs = vlabuf = d_stringtabs for later kfree */
2300 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2301 t = vla_ptr(vlabuf, d, stringtab);
2302 s = vla_ptr(vlabuf, d, strings);
2303 strings = s;
2304 }
2305
2306 /* For each language */
2307 data += 16;
2308 len -= 16;
2309
2310 do { /* lang_count > 0 so we can use do-while */
2311 unsigned needed = needed_count;
2312
2313 if (unlikely(len < 3))
2314 goto error_free;
2315 t->language = get_unaligned_le16(data);
2316 t->strings = s;
2317 ++t;
2318
2319 data += 2;
2320 len -= 2;
2321
2322 /* For each string */
2323 do { /* str_count > 0 so we can use do-while */
2324 size_t length = strnlen(data, len);
2325
2326 if (unlikely(length == len))
2327 goto error_free;
2328
2329 /*
2330 * User may provide more strings then we need,
2331 * if that's the case we simply ignore the
2332 * rest
2333 */
2334 if (likely(needed)) {
2335 /*
2336 * s->id will be set while adding
2337 * function to configuration so for
2338 * now just leave garbage here.
2339 */
2340 s->s = data;
2341 --needed;
2342 ++s;
2343 }
2344
2345 data += length + 1;
2346 len -= length + 1;
2347 } while (--str_count);
2348
2349 s->id = 0; /* terminator */
2350 s->s = NULL;
2351 ++s;
2352
2353 } while (--lang_count);
2354
2355 /* Some garbage left? */
2356 if (unlikely(len))
2357 goto error_free;
2358
2359 /* Done! */
2360 ffs->stringtabs = stringtabs;
2361 ffs->raw_strings = _data;
2362
2363 return 0;
2364
2365 error_free:
2366 kfree(stringtabs);
2367 error:
2368 kfree(_data);
2369 return -EINVAL;
2370 }
2371
2372
2373 /* Events handling and management *******************************************/
2374
2375 static void __ffs_event_add(struct ffs_data *ffs,
2376 enum usb_functionfs_event_type type)
2377 {
2378 enum usb_functionfs_event_type rem_type1, rem_type2 = type;
2379 int neg = 0;
2380
2381 /*
2382 * Abort any unhandled setup
2383 *
2384 * We do not need to worry about some cmpxchg() changing value
2385 * of ffs->setup_state without holding the lock because when
2386 * state is FFS_SETUP_PENDING cmpxchg() in several places in
2387 * the source does nothing.
2388 */
2389 if (ffs->setup_state == FFS_SETUP_PENDING)
2390 ffs->setup_state = FFS_SETUP_CANCELLED;
2391
2392 /*
2393 * Logic of this function guarantees that there are at most four pending
2394 * evens on ffs->ev.types queue. This is important because the queue
2395 * has space for four elements only and __ffs_ep0_read_events function
2396 * depends on that limit as well. If more event types are added, those
2397 * limits have to be revisited or guaranteed to still hold.
2398 */
2399 switch (type) {
2400 case FUNCTIONFS_RESUME:
2401 rem_type2 = FUNCTIONFS_SUSPEND;
2402 /* FALL THROUGH */
2403 case FUNCTIONFS_SUSPEND:
2404 case FUNCTIONFS_SETUP:
2405 rem_type1 = type;
2406 /* Discard all similar events */
2407 break;
2408
2409 case FUNCTIONFS_BIND:
2410 case FUNCTIONFS_UNBIND:
2411 case FUNCTIONFS_DISABLE:
2412 case FUNCTIONFS_ENABLE:
2413 /* Discard everything other then power management. */
2414 rem_type1 = FUNCTIONFS_SUSPEND;
2415 rem_type2 = FUNCTIONFS_RESUME;
2416 neg = 1;
2417 break;
2418
2419 default:
2420 WARN(1, "%d: unknown event, this should not happen\n", type);
2421 return;
2422 }
2423
2424 {
2425 u8 *ev = ffs->ev.types, *out = ev;
2426 unsigned n = ffs->ev.count;
2427 for (; n; --n, ++ev)
2428 if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2429 *out++ = *ev;
2430 else
2431 pr_vdebug("purging event %d\n", *ev);
2432 ffs->ev.count = out - ffs->ev.types;
2433 }
2434
2435 pr_vdebug("adding event %d\n", type);
2436 ffs->ev.types[ffs->ev.count++] = type;
2437 wake_up_locked(&ffs->ev.waitq);
2438 if (ffs->ffs_eventfd)
2439 eventfd_signal(ffs->ffs_eventfd, 1);
2440 }
2441
2442 static void ffs_event_add(struct ffs_data *ffs,
2443 enum usb_functionfs_event_type type)
2444 {
2445 unsigned long flags;
2446 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2447 __ffs_event_add(ffs, type);
2448 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2449 }
2450
2451 /* Bind/unbind USB function hooks *******************************************/
2452
2453 static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
2454 {
2455 int i;
2456
2457 for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
2458 if (ffs->eps_addrmap[i] == endpoint_address)
2459 return i;
2460 return -ENOENT;
2461 }
2462
2463 static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2464 struct usb_descriptor_header *desc,
2465 void *priv)
2466 {
2467 struct usb_endpoint_descriptor *ds = (void *)desc;
2468 struct ffs_function *func = priv;
2469 struct ffs_ep *ffs_ep;
2470 unsigned ep_desc_id;
2471 int idx;
2472 static const char *speed_names[] = { "full", "high", "super" };
2473
2474 if (type != FFS_DESCRIPTOR)
2475 return 0;
2476
2477 /*
2478 * If ss_descriptors is not NULL, we are reading super speed
2479 * descriptors; if hs_descriptors is not NULL, we are reading high
2480 * speed descriptors; otherwise, we are reading full speed
2481 * descriptors.
2482 */
2483 if (func->function.ss_descriptors) {
2484 ep_desc_id = 2;
2485 func->function.ss_descriptors[(long)valuep] = desc;
2486 } else if (func->function.hs_descriptors) {
2487 ep_desc_id = 1;
2488 func->function.hs_descriptors[(long)valuep] = desc;
2489 } else {
2490 ep_desc_id = 0;
2491 func->function.fs_descriptors[(long)valuep] = desc;
2492 }
2493
2494 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
2495 return 0;
2496
2497 idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
2498 if (idx < 0)
2499 return idx;
2500
2501 ffs_ep = func->eps + idx;
2502
2503 if (unlikely(ffs_ep->descs[ep_desc_id])) {
2504 pr_err("two %sspeed descriptors for EP %d\n",
2505 speed_names[ep_desc_id],
2506 ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2507 return -EINVAL;
2508 }
2509 ffs_ep->descs[ep_desc_id] = ds;
2510
2511 ffs_dump_mem(": Original ep desc", ds, ds->bLength);
2512 if (ffs_ep->ep) {
2513 ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
2514 if (!ds->wMaxPacketSize)
2515 ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
2516 } else {
2517 struct usb_request *req;
2518 struct usb_ep *ep;
2519 u8 bEndpointAddress;
2520
2521 /*
2522 * We back up bEndpointAddress because autoconfig overwrites
2523 * it with physical endpoint address.
2524 */
2525 bEndpointAddress = ds->bEndpointAddress;
2526 pr_vdebug("autoconfig\n");
2527 ep = usb_ep_autoconfig(func->gadget, ds);
2528 if (unlikely(!ep))
2529 return -ENOTSUPP;
2530 ep->driver_data = func->eps + idx;
2531
2532 req = usb_ep_alloc_request(ep, GFP_KERNEL);
2533 if (unlikely(!req))
2534 return -ENOMEM;
2535
2536 ffs_ep->ep = ep;
2537 ffs_ep->req = req;
2538 func->eps_revmap[ds->bEndpointAddress &
2539 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
2540 /*
2541 * If we use virtual address mapping, we restore
2542 * original bEndpointAddress value.
2543 */
2544 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
2545 ds->bEndpointAddress = bEndpointAddress;
2546 }
2547 ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
2548
2549 return 0;
2550 }
2551
2552 static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2553 struct usb_descriptor_header *desc,
2554 void *priv)
2555 {
2556 struct ffs_function *func = priv;
2557 unsigned idx;
2558 u8 newValue;
2559
2560 switch (type) {
2561 default:
2562 case FFS_DESCRIPTOR:
2563 /* Handled in previous pass by __ffs_func_bind_do_descs() */
2564 return 0;
2565
2566 case FFS_INTERFACE:
2567 idx = *valuep;
2568 if (func->interfaces_nums[idx] < 0) {
2569 int id = usb_interface_id(func->conf, &func->function);
2570 if (unlikely(id < 0))
2571 return id;
2572 func->interfaces_nums[idx] = id;
2573 }
2574 newValue = func->interfaces_nums[idx];
2575 break;
2576
2577 case FFS_STRING:
2578 /* String' IDs are allocated when fsf_data is bound to cdev */
2579 newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
2580 break;
2581
2582 case FFS_ENDPOINT:
2583 /*
2584 * USB_DT_ENDPOINT are handled in
2585 * __ffs_func_bind_do_descs().
2586 */
2587 if (desc->bDescriptorType == USB_DT_ENDPOINT)
2588 return 0;
2589
2590 idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
2591 if (unlikely(!func->eps[idx].ep))
2592 return -EINVAL;
2593
2594 {
2595 struct usb_endpoint_descriptor **descs;
2596 descs = func->eps[idx].descs;
2597 newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
2598 }
2599 break;
2600 }
2601
2602 pr_vdebug("%02x -> %02x\n", *valuep, newValue);
2603 *valuep = newValue;
2604 return 0;
2605 }
2606
2607 static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
2608 struct usb_os_desc_header *h, void *data,
2609 unsigned len, void *priv)
2610 {
2611 struct ffs_function *func = priv;
2612 u8 length = 0;
2613
2614 switch (type) {
2615 case FFS_OS_DESC_EXT_COMPAT: {
2616 struct usb_ext_compat_desc *desc = data;
2617 struct usb_os_desc_table *t;
2618
2619 t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
2620 t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
2621 memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
2622 ARRAY_SIZE(desc->CompatibleID) +
2623 ARRAY_SIZE(desc->SubCompatibleID));
2624 length = sizeof(*desc);
2625 }
2626 break;
2627 case FFS_OS_DESC_EXT_PROP: {
2628 struct usb_ext_prop_desc *desc = data;
2629 struct usb_os_desc_table *t;
2630 struct usb_os_desc_ext_prop *ext_prop;
2631 char *ext_prop_name;
2632 char *ext_prop_data;
2633
2634 t = &func->function.os_desc_table[h->interface];
2635 t->if_id = func->interfaces_nums[h->interface];
2636
2637 ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
2638 func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
2639
2640 ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
2641 ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
2642 ext_prop->data_len = le32_to_cpu(*(u32 *)
2643 usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
2644 length = ext_prop->name_len + ext_prop->data_len + 14;
2645
2646 ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
2647 func->ffs->ms_os_descs_ext_prop_name_avail +=
2648 ext_prop->name_len;
2649
2650 ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
2651 func->ffs->ms_os_descs_ext_prop_data_avail +=
2652 ext_prop->data_len;
2653 memcpy(ext_prop_data,
2654 usb_ext_prop_data_ptr(data, ext_prop->name_len),
2655 ext_prop->data_len);
2656 /* unicode data reported to the host as "WCHAR"s */
2657 switch (ext_prop->type) {
2658 case USB_EXT_PROP_UNICODE:
2659 case USB_EXT_PROP_UNICODE_ENV:
2660 case USB_EXT_PROP_UNICODE_LINK:
2661 case USB_EXT_PROP_UNICODE_MULTI:
2662 ext_prop->data_len *= 2;
2663 break;
2664 }
2665 ext_prop->data = ext_prop_data;
2666
2667 memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
2668 ext_prop->name_len);
2669 /* property name reported to the host as "WCHAR"s */
2670 ext_prop->name_len *= 2;
2671 ext_prop->name = ext_prop_name;
2672
2673 t->os_desc->ext_prop_len +=
2674 ext_prop->name_len + ext_prop->data_len + 14;
2675 ++t->os_desc->ext_prop_count;
2676 list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
2677 }
2678 break;
2679 default:
2680 pr_vdebug("unknown descriptor: %d\n", type);
2681 }
2682
2683 return length;
2684 }
2685
2686 static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
2687 struct usb_configuration *c)
2688 {
2689 struct ffs_function *func = ffs_func_from_usb(f);
2690 struct f_fs_opts *ffs_opts =
2691 container_of(f->fi, struct f_fs_opts, func_inst);
2692 int ret;
2693
2694 ENTER();
2695
2696 /*
2697 * Legacy gadget triggers binding in functionfs_ready_callback,
2698 * which already uses locking; taking the same lock here would
2699 * cause a deadlock.
2700 *
2701 * Configfs-enabled gadgets however do need ffs_dev_lock.
2702 */
2703 if (!ffs_opts->no_configfs)
2704 ffs_dev_lock();
2705 ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
2706 func->ffs = ffs_opts->dev->ffs_data;
2707 if (!ffs_opts->no_configfs)
2708 ffs_dev_unlock();
2709 if (ret)
2710 return ERR_PTR(ret);
2711
2712 func->conf = c;
2713 func->gadget = c->cdev->gadget;
2714
2715 /*
2716 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
2717 * configurations are bound in sequence with list_for_each_entry,
2718 * in each configuration its functions are bound in sequence
2719 * with list_for_each_entry, so we assume no race condition
2720 * with regard to ffs_opts->bound access
2721 */
2722 if (!ffs_opts->refcnt) {
2723 ret = functionfs_bind(func->ffs, c->cdev);
2724 if (ret)
2725 return ERR_PTR(ret);
2726 }
2727 ffs_opts->refcnt++;
2728 func->function.strings = func->ffs->stringtabs;
2729
2730 return ffs_opts;
2731 }
2732
2733 static int _ffs_func_bind(struct usb_configuration *c,
2734 struct usb_function *f)
2735 {
2736 struct ffs_function *func = ffs_func_from_usb(f);
2737 struct ffs_data *ffs = func->ffs;
2738
2739 const int full = !!func->ffs->fs_descs_count;
2740 const int high = gadget_is_dualspeed(func->gadget) &&
2741 func->ffs->hs_descs_count;
2742 const int super = gadget_is_superspeed(func->gadget) &&
2743 func->ffs->ss_descs_count;
2744
2745 int fs_len, hs_len, ss_len, ret, i;
2746
2747 /* Make it a single chunk, less management later on */
2748 vla_group(d);
2749 vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
2750 vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
2751 full ? ffs->fs_descs_count + 1 : 0);
2752 vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
2753 high ? ffs->hs_descs_count + 1 : 0);
2754 vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
2755 super ? ffs->ss_descs_count + 1 : 0);
2756 vla_item_with_sz(d, short, inums, ffs->interfaces_count);
2757 vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
2758 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2759 vla_item_with_sz(d, char[16], ext_compat,
2760 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2761 vla_item_with_sz(d, struct usb_os_desc, os_desc,
2762 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2763 vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
2764 ffs->ms_os_descs_ext_prop_count);
2765 vla_item_with_sz(d, char, ext_prop_name,
2766 ffs->ms_os_descs_ext_prop_name_len);
2767 vla_item_with_sz(d, char, ext_prop_data,
2768 ffs->ms_os_descs_ext_prop_data_len);
2769 vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
2770 char *vlabuf;
2771
2772 ENTER();
2773
2774 /* Has descriptors only for speeds gadget does not support */
2775 if (unlikely(!(full | high | super)))
2776 return -ENOTSUPP;
2777
2778 /* Allocate a single chunk, less management later on */
2779 vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
2780 if (unlikely(!vlabuf))
2781 return -ENOMEM;
2782
2783 ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
2784 ffs->ms_os_descs_ext_prop_name_avail =
2785 vla_ptr(vlabuf, d, ext_prop_name);
2786 ffs->ms_os_descs_ext_prop_data_avail =
2787 vla_ptr(vlabuf, d, ext_prop_data);
2788
2789 /* Copy descriptors */
2790 memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
2791 ffs->raw_descs_length);
2792
2793 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
2794 for (ret = ffs->eps_count; ret; --ret) {
2795 struct ffs_ep *ptr;
2796
2797 ptr = vla_ptr(vlabuf, d, eps);
2798 ptr[ret].num = -1;
2799 }
2800
2801 /* Save pointers
2802 * d_eps == vlabuf, func->eps used to kfree vlabuf later
2803 */
2804 func->eps = vla_ptr(vlabuf, d, eps);
2805 func->interfaces_nums = vla_ptr(vlabuf, d, inums);
2806
2807 /*
2808 * Go through all the endpoint descriptors and allocate
2809 * endpoints first, so that later we can rewrite the endpoint
2810 * numbers without worrying that it may be described later on.
2811 */
2812 if (likely(full)) {
2813 func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
2814 fs_len = ffs_do_descs(ffs->fs_descs_count,
2815 vla_ptr(vlabuf, d, raw_descs),
2816 d_raw_descs__sz,
2817 __ffs_func_bind_do_descs, func);
2818 if (unlikely(fs_len < 0)) {
2819 ret = fs_len;
2820 goto error;
2821 }
2822 } else {
2823 fs_len = 0;
2824 }
2825
2826 if (likely(high)) {
2827 func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
2828 hs_len = ffs_do_descs(ffs->hs_descs_count,
2829 vla_ptr(vlabuf, d, raw_descs) + fs_len,
2830 d_raw_descs__sz - fs_len,
2831 __ffs_func_bind_do_descs, func);
2832 if (unlikely(hs_len < 0)) {
2833 ret = hs_len;
2834 goto error;
2835 }
2836 } else {
2837 hs_len = 0;
2838 }
2839
2840 if (likely(super)) {
2841 func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
2842 ss_len = ffs_do_descs(ffs->ss_descs_count,
2843 vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
2844 d_raw_descs__sz - fs_len - hs_len,
2845 __ffs_func_bind_do_descs, func);
2846 if (unlikely(ss_len < 0)) {
2847 ret = ss_len;
2848 goto error;
2849 }
2850 } else {
2851 ss_len = 0;
2852 }
2853
2854 /*
2855 * Now handle interface numbers allocation and interface and
2856 * endpoint numbers rewriting. We can do that in one go
2857 * now.
2858 */
2859 ret = ffs_do_descs(ffs->fs_descs_count +
2860 (high ? ffs->hs_descs_count : 0) +
2861 (super ? ffs->ss_descs_count : 0),
2862 vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
2863 __ffs_func_bind_do_nums, func);
2864 if (unlikely(ret < 0))
2865 goto error;
2866
2867 func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
2868 if (c->cdev->use_os_string)
2869 for (i = 0; i < ffs->interfaces_count; ++i) {
2870 struct usb_os_desc *desc;
2871
2872 desc = func->function.os_desc_table[i].os_desc =
2873 vla_ptr(vlabuf, d, os_desc) +
2874 i * sizeof(struct usb_os_desc);
2875 desc->ext_compat_id =
2876 vla_ptr(vlabuf, d, ext_compat) + i * 16;
2877 INIT_LIST_HEAD(&desc->ext_prop);
2878 }
2879 ret = ffs_do_os_descs(ffs->ms_os_descs_count,
2880 vla_ptr(vlabuf, d, raw_descs) +
2881 fs_len + hs_len + ss_len,
2882 d_raw_descs__sz - fs_len - hs_len - ss_len,
2883 __ffs_func_bind_do_os_desc, func);
2884 if (unlikely(ret < 0))
2885 goto error;
2886 func->function.os_desc_n =
2887 c->cdev->use_os_string ? ffs->interfaces_count : 0;
2888
2889 /* And we're done */
2890 ffs_event_add(ffs, FUNCTIONFS_BIND);
2891 return 0;
2892
2893 error:
2894 /* XXX Do we need to release all claimed endpoints here? */
2895 return ret;
2896 }
2897
2898 static int ffs_func_bind(struct usb_configuration *c,
2899 struct usb_function *f)
2900 {
2901 struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
2902
2903 if (IS_ERR(ffs_opts))
2904 return PTR_ERR(ffs_opts);
2905
2906 return _ffs_func_bind(c, f);
2907 }
2908
2909
2910 /* Other USB function hooks *************************************************/
2911
2912 static void ffs_reset_work(struct work_struct *work)
2913 {
2914 struct ffs_data *ffs = container_of(work,
2915 struct ffs_data, reset_work);
2916 ffs_data_reset(ffs);
2917 }
2918
2919 static int ffs_func_set_alt(struct usb_function *f,
2920 unsigned interface, unsigned alt)
2921 {
2922 struct ffs_function *func = ffs_func_from_usb(f);
2923 struct ffs_data *ffs = func->ffs;
2924 int ret = 0, intf;
2925
2926 if (alt != (unsigned)-1) {
2927 intf = ffs_func_revmap_intf(func, interface);
2928 if (unlikely(intf < 0))
2929 return intf;
2930 }
2931
2932 if (ffs->func)
2933 ffs_func_eps_disable(ffs->func);
2934
2935 if (ffs->state == FFS_DEACTIVATED) {
2936 ffs->state = FFS_CLOSING;
2937 INIT_WORK(&ffs->reset_work, ffs_reset_work);
2938 schedule_work(&ffs->reset_work);
2939 return -ENODEV;
2940 }
2941
2942 if (ffs->state != FFS_ACTIVE)
2943 return -ENODEV;
2944
2945 if (alt == (unsigned)-1) {
2946 ffs->func = NULL;
2947 ffs_event_add(ffs, FUNCTIONFS_DISABLE);
2948 return 0;
2949 }
2950
2951 ffs->func = func;
2952 ret = ffs_func_eps_enable(func);
2953 if (likely(ret >= 0))
2954 ffs_event_add(ffs, FUNCTIONFS_ENABLE);
2955 return ret;
2956 }
2957
2958 static void ffs_func_disable(struct usb_function *f)
2959 {
2960 ffs_func_set_alt(f, 0, (unsigned)-1);
2961 }
2962
2963 static int ffs_func_setup(struct usb_function *f,
2964 const struct usb_ctrlrequest *creq)
2965 {
2966 struct ffs_function *func = ffs_func_from_usb(f);
2967 struct ffs_data *ffs = func->ffs;
2968 unsigned long flags;
2969 int ret;
2970
2971 ENTER();
2972
2973 pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
2974 pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
2975 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
2976 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
2977 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
2978
2979 /*
2980 * Most requests directed to interface go through here
2981 * (notable exceptions are set/get interface) so we need to
2982 * handle them. All other either handled by composite or
2983 * passed to usb_configuration->setup() (if one is set). No
2984 * matter, we will handle requests directed to endpoint here
2985 * as well (as it's straightforward) but what to do with any
2986 * other request?
2987 */
2988 if (ffs->state != FFS_ACTIVE)
2989 return -ENODEV;
2990
2991 switch (creq->bRequestType & USB_RECIP_MASK) {
2992 case USB_RECIP_INTERFACE:
2993 ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
2994 if (unlikely(ret < 0))
2995 return ret;
2996 break;
2997
2998 case USB_RECIP_ENDPOINT:
2999 ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
3000 if (unlikely(ret < 0))
3001 return ret;
3002 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
3003 ret = func->ffs->eps_addrmap[ret];
3004 break;
3005
3006 default:
3007 return -EOPNOTSUPP;
3008 }
3009
3010 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
3011 ffs->ev.setup = *creq;
3012 ffs->ev.setup.wIndex = cpu_to_le16(ret);
3013 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3014 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3015
3016 return 0;
3017 }
3018
3019 static void ffs_func_suspend(struct usb_function *f)
3020 {
3021 ENTER();
3022 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
3023 }
3024
3025 static void ffs_func_resume(struct usb_function *f)
3026 {
3027 ENTER();
3028 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
3029 }
3030
3031
3032 /* Endpoint and interface numbers reverse mapping ***************************/
3033
3034 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
3035 {
3036 num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
3037 return num ? num : -EDOM;
3038 }
3039
3040 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
3041 {
3042 short *nums = func->interfaces_nums;
3043 unsigned count = func->ffs->interfaces_count;
3044
3045 for (; count; --count, ++nums) {
3046 if (*nums >= 0 && *nums == intf)
3047 return nums - func->interfaces_nums;
3048 }
3049
3050 return -EDOM;
3051 }
3052
3053
3054 /* Devices management *******************************************************/
3055
3056 static LIST_HEAD(ffs_devices);
3057
3058 static struct ffs_dev *_ffs_do_find_dev(const char *name)
3059 {
3060 struct ffs_dev *dev;
3061
3062 list_for_each_entry(dev, &ffs_devices, entry) {
3063 if (!dev->name || !name)
3064 continue;
3065 if (strcmp(dev->name, name) == 0)
3066 return dev;
3067 }
3068
3069 return NULL;
3070 }
3071
3072 /*
3073 * ffs_lock must be taken by the caller of this function
3074 */
3075 static struct ffs_dev *_ffs_get_single_dev(void)
3076 {
3077 struct ffs_dev *dev;
3078
3079 if (list_is_singular(&ffs_devices)) {
3080 dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
3081 if (dev->single)
3082 return dev;
3083 }
3084
3085 return NULL;
3086 }
3087
3088 /*
3089 * ffs_lock must be taken by the caller of this function
3090 */
3091 static struct ffs_dev *_ffs_find_dev(const char *name)
3092 {
3093 struct ffs_dev *dev;
3094
3095 dev = _ffs_get_single_dev();
3096 if (dev)
3097 return dev;
3098
3099 return _ffs_do_find_dev(name);
3100 }
3101
3102 /* Configfs support *********************************************************/
3103
3104 static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
3105 {
3106 return container_of(to_config_group(item), struct f_fs_opts,
3107 func_inst.group);
3108 }
3109
3110 static void ffs_attr_release(struct config_item *item)
3111 {
3112 struct f_fs_opts *opts = to_ffs_opts(item);
3113
3114 usb_put_function_instance(&opts->func_inst);
3115 }
3116
3117 static struct configfs_item_operations ffs_item_ops = {
3118 .release = ffs_attr_release,
3119 };
3120
3121 static struct config_item_type ffs_func_type = {
3122 .ct_item_ops = &ffs_item_ops,
3123 .ct_owner = THIS_MODULE,
3124 };
3125
3126
3127 /* Function registration interface ******************************************/
3128
3129 static void ffs_free_inst(struct usb_function_instance *f)
3130 {
3131 struct f_fs_opts *opts;
3132
3133 opts = to_f_fs_opts(f);
3134 ffs_dev_lock();
3135 _ffs_free_dev(opts->dev);
3136 ffs_dev_unlock();
3137 kfree(opts);
3138 }
3139
3140 #define MAX_INST_NAME_LEN 40
3141
3142 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
3143 {
3144 struct f_fs_opts *opts;
3145 char *ptr;
3146 const char *tmp;
3147 int name_len, ret;
3148
3149 name_len = strlen(name) + 1;
3150 if (name_len > MAX_INST_NAME_LEN)
3151 return -ENAMETOOLONG;
3152
3153 ptr = kstrndup(name, name_len, GFP_KERNEL);
3154 if (!ptr)
3155 return -ENOMEM;
3156
3157 opts = to_f_fs_opts(fi);
3158 tmp = NULL;
3159
3160 ffs_dev_lock();
3161
3162 tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
3163 ret = _ffs_name_dev(opts->dev, ptr);
3164 if (ret) {
3165 kfree(ptr);
3166 ffs_dev_unlock();
3167 return ret;
3168 }
3169 opts->dev->name_allocated = true;
3170
3171 ffs_dev_unlock();
3172
3173 kfree(tmp);
3174
3175 return 0;
3176 }
3177
3178 static struct usb_function_instance *ffs_alloc_inst(void)
3179 {
3180 struct f_fs_opts *opts;
3181 struct ffs_dev *dev;
3182
3183 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3184 if (!opts)
3185 return ERR_PTR(-ENOMEM);
3186
3187 opts->func_inst.set_inst_name = ffs_set_inst_name;
3188 opts->func_inst.free_func_inst = ffs_free_inst;
3189 ffs_dev_lock();
3190 dev = _ffs_alloc_dev();
3191 ffs_dev_unlock();
3192 if (IS_ERR(dev)) {
3193 kfree(opts);
3194 return ERR_CAST(dev);
3195 }
3196 opts->dev = dev;
3197 dev->opts = opts;
3198
3199 config_group_init_type_name(&opts->func_inst.group, "",
3200 &ffs_func_type);
3201 return &opts->func_inst;
3202 }
3203
3204 static void ffs_free(struct usb_function *f)
3205 {
3206 kfree(ffs_func_from_usb(f));
3207 }
3208
3209 static void ffs_func_unbind(struct usb_configuration *c,
3210 struct usb_function *f)
3211 {
3212 struct ffs_function *func = ffs_func_from_usb(f);
3213 struct ffs_data *ffs = func->ffs;
3214 struct f_fs_opts *opts =
3215 container_of(f->fi, struct f_fs_opts, func_inst);
3216 struct ffs_ep *ep = func->eps;
3217 unsigned count = ffs->eps_count;
3218 unsigned long flags;
3219
3220 ENTER();
3221 if (ffs->func == func) {
3222 ffs_func_eps_disable(func);
3223 ffs->func = NULL;
3224 }
3225
3226 if (!--opts->refcnt)
3227 functionfs_unbind(ffs);
3228
3229 /* cleanup after autoconfig */
3230 spin_lock_irqsave(&func->ffs->eps_lock, flags);
3231 do {
3232 if (ep->ep && ep->req)
3233 usb_ep_free_request(ep->ep, ep->req);
3234 ep->req = NULL;
3235 ++ep;
3236 } while (--count);
3237 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3238 kfree(func->eps);
3239 func->eps = NULL;
3240 /*
3241 * eps, descriptors and interfaces_nums are allocated in the
3242 * same chunk so only one free is required.
3243 */
3244 func->function.fs_descriptors = NULL;
3245 func->function.hs_descriptors = NULL;
3246 func->function.ss_descriptors = NULL;
3247 func->interfaces_nums = NULL;
3248
3249 ffs_event_add(ffs, FUNCTIONFS_UNBIND);
3250 }
3251
3252 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
3253 {
3254 struct ffs_function *func;
3255
3256 ENTER();
3257
3258 func = kzalloc(sizeof(*func), GFP_KERNEL);
3259 if (unlikely(!func))
3260 return ERR_PTR(-ENOMEM);
3261
3262 func->function.name = "Function FS Gadget";
3263
3264 func->function.bind = ffs_func_bind;
3265 func->function.unbind = ffs_func_unbind;
3266 func->function.set_alt = ffs_func_set_alt;
3267 func->function.disable = ffs_func_disable;
3268 func->function.setup = ffs_func_setup;
3269 func->function.suspend = ffs_func_suspend;
3270 func->function.resume = ffs_func_resume;
3271 func->function.free_func = ffs_free;
3272
3273 return &func->function;
3274 }
3275
3276 /*
3277 * ffs_lock must be taken by the caller of this function
3278 */
3279 static struct ffs_dev *_ffs_alloc_dev(void)
3280 {
3281 struct ffs_dev *dev;
3282 int ret;
3283
3284 if (_ffs_get_single_dev())
3285 return ERR_PTR(-EBUSY);
3286
3287 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3288 if (!dev)
3289 return ERR_PTR(-ENOMEM);
3290
3291 if (list_empty(&ffs_devices)) {
3292 ret = functionfs_init();
3293 if (ret) {
3294 kfree(dev);
3295 return ERR_PTR(ret);
3296 }
3297 }
3298
3299 list_add(&dev->entry, &ffs_devices);
3300
3301 return dev;
3302 }
3303
3304 /*
3305 * ffs_lock must be taken by the caller of this function
3306 * The caller is responsible for "name" being available whenever f_fs needs it
3307 */
3308 static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
3309 {
3310 struct ffs_dev *existing;
3311
3312 existing = _ffs_do_find_dev(name);
3313 if (existing)
3314 return -EBUSY;
3315
3316 dev->name = name;
3317
3318 return 0;
3319 }
3320
3321 /*
3322 * The caller is responsible for "name" being available whenever f_fs needs it
3323 */
3324 int ffs_name_dev(struct ffs_dev *dev, const char *name)
3325 {
3326 int ret;
3327
3328 ffs_dev_lock();
3329 ret = _ffs_name_dev(dev, name);
3330 ffs_dev_unlock();
3331
3332 return ret;
3333 }
3334 EXPORT_SYMBOL_GPL(ffs_name_dev);
3335
3336 int ffs_single_dev(struct ffs_dev *dev)
3337 {
3338 int ret;
3339
3340 ret = 0;
3341 ffs_dev_lock();
3342
3343 if (!list_is_singular(&ffs_devices))
3344 ret = -EBUSY;
3345 else
3346 dev->single = true;
3347
3348 ffs_dev_unlock();
3349 return ret;
3350 }
3351 EXPORT_SYMBOL_GPL(ffs_single_dev);
3352
3353 /*
3354 * ffs_lock must be taken by the caller of this function
3355 */
3356 static void _ffs_free_dev(struct ffs_dev *dev)
3357 {
3358 list_del(&dev->entry);
3359 if (dev->name_allocated)
3360 kfree(dev->name);
3361 kfree(dev);
3362 if (list_empty(&ffs_devices))
3363 functionfs_cleanup();
3364 }
3365
3366 static void *ffs_acquire_dev(const char *dev_name)
3367 {
3368 struct ffs_dev *ffs_dev;
3369
3370 ENTER();
3371 ffs_dev_lock();
3372
3373 ffs_dev = _ffs_find_dev(dev_name);
3374 if (!ffs_dev)
3375 ffs_dev = ERR_PTR(-ENOENT);
3376 else if (ffs_dev->mounted)
3377 ffs_dev = ERR_PTR(-EBUSY);
3378 else if (ffs_dev->ffs_acquire_dev_callback &&
3379 ffs_dev->ffs_acquire_dev_callback(ffs_dev))
3380 ffs_dev = ERR_PTR(-ENOENT);
3381 else
3382 ffs_dev->mounted = true;
3383
3384 ffs_dev_unlock();
3385 return ffs_dev;
3386 }
3387
3388 static void ffs_release_dev(struct ffs_data *ffs_data)
3389 {
3390 struct ffs_dev *ffs_dev;
3391
3392 ENTER();
3393 ffs_dev_lock();
3394
3395 ffs_dev = ffs_data->private_data;
3396 if (ffs_dev) {
3397 ffs_dev->mounted = false;
3398
3399 if (ffs_dev->ffs_release_dev_callback)
3400 ffs_dev->ffs_release_dev_callback(ffs_dev);
3401 }
3402
3403 ffs_dev_unlock();
3404 }
3405
3406 static int ffs_ready(struct ffs_data *ffs)
3407 {
3408 struct ffs_dev *ffs_obj;
3409 int ret = 0;
3410
3411 ENTER();
3412 ffs_dev_lock();
3413
3414 ffs_obj = ffs->private_data;
3415 if (!ffs_obj) {
3416 ret = -EINVAL;
3417 goto done;
3418 }
3419 if (WARN_ON(ffs_obj->desc_ready)) {
3420 ret = -EBUSY;
3421 goto done;
3422 }
3423
3424 ffs_obj->desc_ready = true;
3425 ffs_obj->ffs_data = ffs;
3426
3427 if (ffs_obj->ffs_ready_callback)
3428 ret = ffs_obj->ffs_ready_callback(ffs);
3429
3430 done:
3431 ffs_dev_unlock();
3432 return ret;
3433 }
3434
3435 static void ffs_closed(struct ffs_data *ffs)
3436 {
3437 struct ffs_dev *ffs_obj;
3438
3439 ENTER();
3440 ffs_dev_lock();
3441
3442 ffs_obj = ffs->private_data;
3443 if (!ffs_obj)
3444 goto done;
3445
3446 ffs_obj->desc_ready = false;
3447
3448 if (ffs_obj->ffs_closed_callback)
3449 ffs_obj->ffs_closed_callback(ffs);
3450
3451 if (!ffs_obj->opts || ffs_obj->opts->no_configfs
3452 || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
3453 goto done;
3454
3455 unregister_gadget_item(ffs_obj->opts->
3456 func_inst.group.cg_item.ci_parent->ci_parent);
3457 done:
3458 ffs_dev_unlock();
3459 }
3460
3461 /* Misc helper functions ****************************************************/
3462
3463 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
3464 {
3465 return nonblock
3466 ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
3467 : mutex_lock_interruptible(mutex);
3468 }
3469
3470 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
3471 {
3472 char *data;
3473
3474 if (unlikely(!len))
3475 return NULL;
3476
3477 data = kmalloc(len, GFP_KERNEL);
3478 if (unlikely(!data))
3479 return ERR_PTR(-ENOMEM);
3480
3481 if (unlikely(__copy_from_user(data, buf, len))) {
3482 kfree(data);
3483 return ERR_PTR(-EFAULT);
3484 }
3485
3486 pr_vdebug("Buffer from user space:\n");
3487 ffs_dump_mem("", data, len);
3488
3489 return data;
3490 }
3491
3492 DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
3493 MODULE_LICENSE("GPL");
3494 MODULE_AUTHOR("Michal Nazarewicz");