2 * inode.c -- user mode filesystem api for usb gadget controllers
4 * Copyright (C) 2003-2004 David Brownell
5 * Copyright (C) 2003 Agilent Technologies
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
14 /* #define VERBOSE_DEBUG */
16 #include <linux/init.h>
17 #include <linux/module.h>
19 #include <linux/pagemap.h>
20 #include <linux/uts.h>
21 #include <linux/wait.h>
22 #include <linux/compiler.h>
23 #include <linux/uaccess.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/poll.h>
27 #include <linux/mmu_context.h>
28 #include <linux/aio.h>
29 #include <linux/uio.h>
31 #include <linux/device.h>
32 #include <linux/moduleparam.h>
34 #include <linux/usb/gadgetfs.h>
35 #include <linux/usb/gadget.h>
39 * The gadgetfs API maps each endpoint to a file descriptor so that you
40 * can use standard synchronous read/write calls for I/O. There's some
41 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
42 * drivers show how this works in practice. You can also use AIO to
43 * eliminate I/O gaps between requests, to help when streaming data.
45 * Key parts that must be USB-specific are protocols defining how the
46 * read/write operations relate to the hardware state machines. There
47 * are two types of files. One type is for the device, implementing ep0.
48 * The other type is for each IN or OUT endpoint. In both cases, the
49 * user mode driver must configure the hardware before using it.
51 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
52 * (by writing configuration and device descriptors). Afterwards it
53 * may serve as a source of device events, used to handle all control
54 * requests other than basic enumeration.
56 * - Then, after a SET_CONFIGURATION control request, ep_config() is
57 * called when each /dev/gadget/ep* file is configured (by writing
58 * endpoint descriptors). Afterwards these files are used to write()
59 * IN data or to read() OUT data. To halt the endpoint, a "wrong
60 * direction" request is issued (like reading an IN endpoint).
62 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
63 * not possible on all hardware. For example, precise fault handling with
64 * respect to data left in endpoint fifos after aborted operations; or
65 * selective clearing of endpoint halts, to implement SET_INTERFACE.
68 #define DRIVER_DESC "USB Gadget filesystem"
69 #define DRIVER_VERSION "24 Aug 2004"
71 static const char driver_desc
[] = DRIVER_DESC
;
72 static const char shortname
[] = "gadgetfs";
74 MODULE_DESCRIPTION (DRIVER_DESC
);
75 MODULE_AUTHOR ("David Brownell");
76 MODULE_LICENSE ("GPL");
78 static int ep_open(struct inode
*, struct file
*);
81 /*----------------------------------------------------------------------*/
83 #define GADGETFS_MAGIC 0xaee71ee7
85 /* /dev/gadget/$CHIP represents ep0 and the whole device */
87 /* DISABLED is the initial state. */
88 STATE_DEV_DISABLED
= 0,
90 /* Only one open() of /dev/gadget/$CHIP; only one file tracks
91 * ep0/device i/o modes and binding to the controller. Driver
92 * must always write descriptors to initialize the device, then
93 * the device becomes UNCONNECTED until enumeration.
97 /* From then on, ep0 fd is in either of two basic modes:
98 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
99 * - SETUP: read/write will transfer control data and succeed;
100 * or if "wrong direction", performs protocol stall
102 STATE_DEV_UNCONNECTED
,
106 /* UNBOUND means the driver closed ep0, so the device won't be
107 * accessible again (DEV_DISABLED) until all fds are closed.
112 /* enough for the whole queue: most events invalidate others */
118 enum ep0_state state
; /* P: lock */
119 struct usb_gadgetfs_event event
[N_EVENT
];
121 struct fasync_struct
*fasync
;
124 /* drivers reading ep0 MUST handle control requests (SETUP)
125 * reported that way; else the host will time out.
127 unsigned usermode_setup
: 1,
133 gadget_registered
: 1;
134 unsigned setup_wLength
;
136 /* the rest is basically write-once */
137 struct usb_config_descriptor
*config
, *hs_config
;
138 struct usb_device_descriptor
*dev
;
139 struct usb_request
*req
;
140 struct usb_gadget
*gadget
;
141 struct list_head epfiles
;
143 wait_queue_head_t wait
;
144 struct super_block
*sb
;
145 struct dentry
*dentry
;
147 /* except this scratch i/o buffer for ep0 */
151 static inline void get_dev (struct dev_data
*data
)
153 atomic_inc (&data
->count
);
156 static void put_dev (struct dev_data
*data
)
158 if (likely (!atomic_dec_and_test (&data
->count
)))
160 /* needs no more cleanup */
161 BUG_ON (waitqueue_active (&data
->wait
));
165 static struct dev_data
*dev_new (void)
167 struct dev_data
*dev
;
169 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
172 dev
->state
= STATE_DEV_DISABLED
;
173 atomic_set (&dev
->count
, 1);
174 spin_lock_init (&dev
->lock
);
175 INIT_LIST_HEAD (&dev
->epfiles
);
176 init_waitqueue_head (&dev
->wait
);
180 /*----------------------------------------------------------------------*/
182 /* other /dev/gadget/$ENDPOINT files represent endpoints */
184 STATE_EP_DISABLED
= 0,
194 struct dev_data
*dev
;
195 /* must hold dev->lock before accessing ep or req */
197 struct usb_request
*req
;
200 struct usb_endpoint_descriptor desc
, hs_desc
;
201 struct list_head epfiles
;
202 wait_queue_head_t wait
;
203 struct dentry
*dentry
;
206 static inline void get_ep (struct ep_data
*data
)
208 atomic_inc (&data
->count
);
211 static void put_ep (struct ep_data
*data
)
213 if (likely (!atomic_dec_and_test (&data
->count
)))
216 /* needs no more cleanup */
217 BUG_ON (!list_empty (&data
->epfiles
));
218 BUG_ON (waitqueue_active (&data
->wait
));
222 /*----------------------------------------------------------------------*/
224 /* most "how to use the hardware" policy choices are in userspace:
225 * mapping endpoint roles (which the driver needs) to the capabilities
226 * which the usb controller has. most of those capabilities are exposed
227 * implicitly, starting with the driver name and then endpoint names.
230 static const char *CHIP
;
232 /*----------------------------------------------------------------------*/
234 /* NOTE: don't use dev_printk calls before binding to the gadget
235 * at the end of ep0 configuration, or after unbind.
238 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
239 #define xprintk(d,level,fmt,args...) \
240 printk(level "%s: " fmt , shortname , ## args)
243 #define DBG(dev,fmt,args...) \
244 xprintk(dev , KERN_DEBUG , fmt , ## args)
246 #define DBG(dev,fmt,args...) \
253 #define VDEBUG(dev,fmt,args...) \
257 #define ERROR(dev,fmt,args...) \
258 xprintk(dev , KERN_ERR , fmt , ## args)
259 #define INFO(dev,fmt,args...) \
260 xprintk(dev , KERN_INFO , fmt , ## args)
263 /*----------------------------------------------------------------------*/
265 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
267 * After opening, configure non-control endpoints. Then use normal
268 * stream read() and write() requests; and maybe ioctl() to get more
269 * precise FIFO status when recovering from cancellation.
272 static void epio_complete (struct usb_ep
*ep
, struct usb_request
*req
)
274 struct ep_data
*epdata
= ep
->driver_data
;
279 epdata
->status
= req
->status
;
281 epdata
->status
= req
->actual
;
282 complete ((struct completion
*)req
->context
);
285 /* tasklock endpoint, returning when it's connected.
286 * still need dev->lock to use epdata->ep.
289 get_ready_ep (unsigned f_flags
, struct ep_data
*epdata
, bool is_write
)
293 if (f_flags
& O_NONBLOCK
) {
294 if (!mutex_trylock(&epdata
->lock
))
296 if (epdata
->state
!= STATE_EP_ENABLED
&&
297 (!is_write
|| epdata
->state
!= STATE_EP_READY
)) {
298 mutex_unlock(&epdata
->lock
);
306 val
= mutex_lock_interruptible(&epdata
->lock
);
310 switch (epdata
->state
) {
311 case STATE_EP_ENABLED
:
313 case STATE_EP_READY
: /* not configured yet */
317 case STATE_EP_UNBOUND
: /* clean disconnect */
319 // case STATE_EP_DISABLED: /* "can't happen" */
320 default: /* error! */
321 pr_debug ("%s: ep %p not available, state %d\n",
322 shortname
, epdata
, epdata
->state
);
324 mutex_unlock(&epdata
->lock
);
329 ep_io (struct ep_data
*epdata
, void *buf
, unsigned len
)
331 DECLARE_COMPLETION_ONSTACK (done
);
334 spin_lock_irq (&epdata
->dev
->lock
);
335 if (likely (epdata
->ep
!= NULL
)) {
336 struct usb_request
*req
= epdata
->req
;
338 req
->context
= &done
;
339 req
->complete
= epio_complete
;
342 value
= usb_ep_queue (epdata
->ep
, req
, GFP_ATOMIC
);
345 spin_unlock_irq (&epdata
->dev
->lock
);
347 if (likely (value
== 0)) {
348 value
= wait_event_interruptible (done
.wait
, done
.done
);
350 spin_lock_irq (&epdata
->dev
->lock
);
351 if (likely (epdata
->ep
!= NULL
)) {
352 DBG (epdata
->dev
, "%s i/o interrupted\n",
354 usb_ep_dequeue (epdata
->ep
, epdata
->req
);
355 spin_unlock_irq (&epdata
->dev
->lock
);
357 wait_event (done
.wait
, done
.done
);
358 if (epdata
->status
== -ECONNRESET
)
359 epdata
->status
= -EINTR
;
361 spin_unlock_irq (&epdata
->dev
->lock
);
363 DBG (epdata
->dev
, "endpoint gone\n");
364 epdata
->status
= -ENODEV
;
367 return epdata
->status
;
373 ep_release (struct inode
*inode
, struct file
*fd
)
375 struct ep_data
*data
= fd
->private_data
;
378 value
= mutex_lock_interruptible(&data
->lock
);
382 /* clean up if this can be reopened */
383 if (data
->state
!= STATE_EP_UNBOUND
) {
384 data
->state
= STATE_EP_DISABLED
;
385 data
->desc
.bDescriptorType
= 0;
386 data
->hs_desc
.bDescriptorType
= 0;
387 usb_ep_disable(data
->ep
);
389 mutex_unlock(&data
->lock
);
394 static long ep_ioctl(struct file
*fd
, unsigned code
, unsigned long value
)
396 struct ep_data
*data
= fd
->private_data
;
399 if ((status
= get_ready_ep (fd
->f_flags
, data
, false)) < 0)
402 spin_lock_irq (&data
->dev
->lock
);
403 if (likely (data
->ep
!= NULL
)) {
405 case GADGETFS_FIFO_STATUS
:
406 status
= usb_ep_fifo_status (data
->ep
);
408 case GADGETFS_FIFO_FLUSH
:
409 usb_ep_fifo_flush (data
->ep
);
411 case GADGETFS_CLEAR_HALT
:
412 status
= usb_ep_clear_halt (data
->ep
);
419 spin_unlock_irq (&data
->dev
->lock
);
420 mutex_unlock(&data
->lock
);
424 /*----------------------------------------------------------------------*/
426 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
429 struct usb_request
*req
;
430 struct ep_data
*epdata
;
432 struct mm_struct
*mm
;
433 struct work_struct work
;
440 static int ep_aio_cancel(struct kiocb
*iocb
)
442 struct kiocb_priv
*priv
= iocb
->private;
443 struct ep_data
*epdata
;
447 epdata
= priv
->epdata
;
448 // spin_lock(&epdata->dev->lock);
449 if (likely(epdata
&& epdata
->ep
&& priv
->req
))
450 value
= usb_ep_dequeue (epdata
->ep
, priv
->req
);
453 // spin_unlock(&epdata->dev->lock);
459 static void ep_user_copy_worker(struct work_struct
*work
)
461 struct kiocb_priv
*priv
= container_of(work
, struct kiocb_priv
, work
);
462 struct mm_struct
*mm
= priv
->mm
;
463 struct kiocb
*iocb
= priv
->iocb
;
467 ret
= copy_to_iter(priv
->buf
, priv
->actual
, &priv
->to
);
472 /* completing the iocb can drop the ctx and mm, don't touch mm after */
473 iocb
->ki_complete(iocb
, ret
, ret
);
476 kfree(priv
->to_free
);
480 static void ep_aio_complete(struct usb_ep
*ep
, struct usb_request
*req
)
482 struct kiocb
*iocb
= req
->context
;
483 struct kiocb_priv
*priv
= iocb
->private;
484 struct ep_data
*epdata
= priv
->epdata
;
486 /* lock against disconnect (and ideally, cancel) */
487 spin_lock(&epdata
->dev
->lock
);
491 /* if this was a write or a read returning no data then we
492 * don't need to copy anything to userspace, so we can
493 * complete the aio request immediately.
495 if (priv
->to_free
== NULL
|| unlikely(req
->actual
== 0)) {
497 kfree(priv
->to_free
);
499 iocb
->private = NULL
;
500 /* aio_complete() reports bytes-transferred _and_ faults */
502 iocb
->ki_complete(iocb
, req
->actual
? req
->actual
: req
->status
,
505 /* ep_copy_to_user() won't report both; we hide some faults */
506 if (unlikely(0 != req
->status
))
507 DBG(epdata
->dev
, "%s fault %d len %d\n",
508 ep
->name
, req
->status
, req
->actual
);
510 priv
->buf
= req
->buf
;
511 priv
->actual
= req
->actual
;
512 INIT_WORK(&priv
->work
, ep_user_copy_worker
);
513 schedule_work(&priv
->work
);
515 spin_unlock(&epdata
->dev
->lock
);
517 usb_ep_free_request(ep
, req
);
521 static ssize_t
ep_aio(struct kiocb
*iocb
,
522 struct kiocb_priv
*priv
,
523 struct ep_data
*epdata
,
527 struct usb_request
*req
;
530 iocb
->private = priv
;
533 kiocb_set_cancel_fn(iocb
, ep_aio_cancel
);
535 priv
->epdata
= epdata
;
537 priv
->mm
= current
->mm
; /* mm teardown waits for iocbs in exit_aio() */
539 /* each kiocb is coupled to one usb_request, but we can't
540 * allocate or submit those if the host disconnected.
542 spin_lock_irq(&epdata
->dev
->lock
);
544 if (unlikely(epdata
->ep
== NULL
))
547 req
= usb_ep_alloc_request(epdata
->ep
, GFP_ATOMIC
);
555 req
->complete
= ep_aio_complete
;
557 value
= usb_ep_queue(epdata
->ep
, req
, GFP_ATOMIC
);
558 if (unlikely(0 != value
)) {
559 usb_ep_free_request(epdata
->ep
, req
);
562 spin_unlock_irq(&epdata
->dev
->lock
);
566 spin_unlock_irq(&epdata
->dev
->lock
);
567 kfree(priv
->to_free
);
574 ep_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
576 struct file
*file
= iocb
->ki_filp
;
577 struct ep_data
*epdata
= file
->private_data
;
578 size_t len
= iov_iter_count(to
);
582 if ((value
= get_ready_ep(file
->f_flags
, epdata
, false)) < 0)
585 /* halt any endpoint by doing a "wrong direction" i/o call */
586 if (usb_endpoint_dir_in(&epdata
->desc
)) {
587 if (usb_endpoint_xfer_isoc(&epdata
->desc
) ||
588 !is_sync_kiocb(iocb
)) {
589 mutex_unlock(&epdata
->lock
);
592 DBG (epdata
->dev
, "%s halt\n", epdata
->name
);
593 spin_lock_irq(&epdata
->dev
->lock
);
594 if (likely(epdata
->ep
!= NULL
))
595 usb_ep_set_halt(epdata
->ep
);
596 spin_unlock_irq(&epdata
->dev
->lock
);
597 mutex_unlock(&epdata
->lock
);
601 buf
= kmalloc(len
, GFP_KERNEL
);
602 if (unlikely(!buf
)) {
603 mutex_unlock(&epdata
->lock
);
606 if (is_sync_kiocb(iocb
)) {
607 value
= ep_io(epdata
, buf
, len
);
608 if (value
>= 0 && (copy_to_iter(buf
, value
, to
) != value
))
611 struct kiocb_priv
*priv
= kzalloc(sizeof *priv
, GFP_KERNEL
);
615 priv
->to_free
= dup_iter(&priv
->to
, to
, GFP_KERNEL
);
616 if (!priv
->to_free
) {
620 value
= ep_aio(iocb
, priv
, epdata
, buf
, len
);
621 if (value
== -EIOCBQUEUED
)
626 mutex_unlock(&epdata
->lock
);
630 static ssize_t
ep_config(struct ep_data
*, const char *, size_t);
633 ep_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
635 struct file
*file
= iocb
->ki_filp
;
636 struct ep_data
*epdata
= file
->private_data
;
637 size_t len
= iov_iter_count(from
);
642 if ((value
= get_ready_ep(file
->f_flags
, epdata
, true)) < 0)
645 configured
= epdata
->state
== STATE_EP_ENABLED
;
647 /* halt any endpoint by doing a "wrong direction" i/o call */
648 if (configured
&& !usb_endpoint_dir_in(&epdata
->desc
)) {
649 if (usb_endpoint_xfer_isoc(&epdata
->desc
) ||
650 !is_sync_kiocb(iocb
)) {
651 mutex_unlock(&epdata
->lock
);
654 DBG (epdata
->dev
, "%s halt\n", epdata
->name
);
655 spin_lock_irq(&epdata
->dev
->lock
);
656 if (likely(epdata
->ep
!= NULL
))
657 usb_ep_set_halt(epdata
->ep
);
658 spin_unlock_irq(&epdata
->dev
->lock
);
659 mutex_unlock(&epdata
->lock
);
663 buf
= kmalloc(len
, GFP_KERNEL
);
664 if (unlikely(!buf
)) {
665 mutex_unlock(&epdata
->lock
);
669 if (unlikely(!copy_from_iter_full(buf
, len
, from
))) {
674 if (unlikely(!configured
)) {
675 value
= ep_config(epdata
, buf
, len
);
676 } else if (is_sync_kiocb(iocb
)) {
677 value
= ep_io(epdata
, buf
, len
);
679 struct kiocb_priv
*priv
= kzalloc(sizeof *priv
, GFP_KERNEL
);
682 value
= ep_aio(iocb
, priv
, epdata
, buf
, len
);
683 if (value
== -EIOCBQUEUED
)
689 mutex_unlock(&epdata
->lock
);
693 /*----------------------------------------------------------------------*/
695 /* used after endpoint configuration */
696 static const struct file_operations ep_io_operations
= {
697 .owner
= THIS_MODULE
,
700 .release
= ep_release
,
702 .unlocked_ioctl
= ep_ioctl
,
703 .read_iter
= ep_read_iter
,
704 .write_iter
= ep_write_iter
,
707 /* ENDPOINT INITIALIZATION
709 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
710 * status = write (fd, descriptors, sizeof descriptors)
712 * That write establishes the endpoint configuration, configuring
713 * the controller to process bulk, interrupt, or isochronous transfers
714 * at the right maxpacket size, and so on.
716 * The descriptors are message type 1, identified by a host order u32
717 * at the beginning of what's written. Descriptor order is: full/low
718 * speed descriptor, then optional high speed descriptor.
721 ep_config (struct ep_data
*data
, const char *buf
, size_t len
)
725 int value
, length
= len
;
727 if (data
->state
!= STATE_EP_READY
) {
733 if (len
< USB_DT_ENDPOINT_SIZE
+ 4)
736 /* we might need to change message format someday */
737 memcpy(&tag
, buf
, 4);
739 DBG(data
->dev
, "config %s, bad tag %d\n", data
->name
, tag
);
745 /* NOTE: audio endpoint extensions not accepted here;
746 * just don't include the extra bytes.
749 /* full/low speed descriptor, then high speed */
750 memcpy(&data
->desc
, buf
, USB_DT_ENDPOINT_SIZE
);
751 if (data
->desc
.bLength
!= USB_DT_ENDPOINT_SIZE
752 || data
->desc
.bDescriptorType
!= USB_DT_ENDPOINT
)
754 if (len
!= USB_DT_ENDPOINT_SIZE
) {
755 if (len
!= 2 * USB_DT_ENDPOINT_SIZE
)
757 memcpy(&data
->hs_desc
, buf
+ USB_DT_ENDPOINT_SIZE
,
758 USB_DT_ENDPOINT_SIZE
);
759 if (data
->hs_desc
.bLength
!= USB_DT_ENDPOINT_SIZE
760 || data
->hs_desc
.bDescriptorType
761 != USB_DT_ENDPOINT
) {
762 DBG(data
->dev
, "config %s, bad hs length or type\n",
768 spin_lock_irq (&data
->dev
->lock
);
769 if (data
->dev
->state
== STATE_DEV_UNBOUND
) {
779 switch (data
->dev
->gadget
->speed
) {
782 ep
->desc
= &data
->desc
;
785 /* fails if caller didn't provide that descriptor... */
786 ep
->desc
= &data
->hs_desc
;
789 DBG(data
->dev
, "unconnected, %s init abandoned\n",
794 value
= usb_ep_enable(ep
);
796 data
->state
= STATE_EP_ENABLED
;
800 spin_unlock_irq (&data
->dev
->lock
);
803 data
->desc
.bDescriptorType
= 0;
804 data
->hs_desc
.bDescriptorType
= 0;
813 ep_open (struct inode
*inode
, struct file
*fd
)
815 struct ep_data
*data
= inode
->i_private
;
818 if (mutex_lock_interruptible(&data
->lock
) != 0)
820 spin_lock_irq (&data
->dev
->lock
);
821 if (data
->dev
->state
== STATE_DEV_UNBOUND
)
823 else if (data
->state
== STATE_EP_DISABLED
) {
825 data
->state
= STATE_EP_READY
;
827 fd
->private_data
= data
;
828 VDEBUG (data
->dev
, "%s ready\n", data
->name
);
830 DBG (data
->dev
, "%s state %d\n",
831 data
->name
, data
->state
);
832 spin_unlock_irq (&data
->dev
->lock
);
833 mutex_unlock(&data
->lock
);
837 /*----------------------------------------------------------------------*/
839 /* EP0 IMPLEMENTATION can be partly in userspace.
841 * Drivers that use this facility receive various events, including
842 * control requests the kernel doesn't handle. Drivers that don't
843 * use this facility may be too simple-minded for real applications.
846 static inline void ep0_readable (struct dev_data
*dev
)
848 wake_up (&dev
->wait
);
849 kill_fasync (&dev
->fasync
, SIGIO
, POLL_IN
);
852 static void clean_req (struct usb_ep
*ep
, struct usb_request
*req
)
854 struct dev_data
*dev
= ep
->driver_data
;
856 if (req
->buf
!= dev
->rbuf
) {
858 req
->buf
= dev
->rbuf
;
860 req
->complete
= epio_complete
;
861 dev
->setup_out_ready
= 0;
864 static void ep0_complete (struct usb_ep
*ep
, struct usb_request
*req
)
866 struct dev_data
*dev
= ep
->driver_data
;
870 /* for control OUT, data must still get to userspace */
871 spin_lock_irqsave(&dev
->lock
, flags
);
872 if (!dev
->setup_in
) {
873 dev
->setup_out_error
= (req
->status
!= 0);
874 if (!dev
->setup_out_error
)
876 dev
->setup_out_ready
= 1;
880 /* clean up as appropriate */
881 if (free
&& req
->buf
!= &dev
->rbuf
)
883 req
->complete
= epio_complete
;
884 spin_unlock_irqrestore(&dev
->lock
, flags
);
887 static int setup_req (struct usb_ep
*ep
, struct usb_request
*req
, u16 len
)
889 struct dev_data
*dev
= ep
->driver_data
;
891 if (dev
->setup_out_ready
) {
892 DBG (dev
, "ep0 request busy!\n");
895 if (len
> sizeof (dev
->rbuf
))
896 req
->buf
= kmalloc(len
, GFP_ATOMIC
);
897 if (req
->buf
== NULL
) {
898 req
->buf
= dev
->rbuf
;
901 req
->complete
= ep0_complete
;
908 ep0_read (struct file
*fd
, char __user
*buf
, size_t len
, loff_t
*ptr
)
910 struct dev_data
*dev
= fd
->private_data
;
912 enum ep0_state state
;
914 spin_lock_irq (&dev
->lock
);
915 if (dev
->state
<= STATE_DEV_OPENED
) {
920 /* report fd mode change before acting on it */
921 if (dev
->setup_abort
) {
922 dev
->setup_abort
= 0;
927 /* control DATA stage */
928 if ((state
= dev
->state
) == STATE_DEV_SETUP
) {
930 if (dev
->setup_in
) { /* stall IN */
931 VDEBUG(dev
, "ep0in stall\n");
932 (void) usb_ep_set_halt (dev
->gadget
->ep0
);
934 dev
->state
= STATE_DEV_CONNECTED
;
936 } else if (len
== 0) { /* ack SET_CONFIGURATION etc */
937 struct usb_ep
*ep
= dev
->gadget
->ep0
;
938 struct usb_request
*req
= dev
->req
;
940 if ((retval
= setup_req (ep
, req
, 0)) == 0) {
941 spin_unlock_irq (&dev
->lock
);
942 retval
= usb_ep_queue (ep
, req
, GFP_KERNEL
);
943 spin_lock_irq (&dev
->lock
);
945 dev
->state
= STATE_DEV_CONNECTED
;
947 /* assume that was SET_CONFIGURATION */
948 if (dev
->current_config
) {
951 if (gadget_is_dualspeed(dev
->gadget
)
952 && (dev
->gadget
->speed
954 power
= dev
->hs_config
->bMaxPower
;
956 power
= dev
->config
->bMaxPower
;
957 usb_gadget_vbus_draw(dev
->gadget
, 2 * power
);
960 } else { /* collect OUT data */
961 if ((fd
->f_flags
& O_NONBLOCK
) != 0
962 && !dev
->setup_out_ready
) {
966 spin_unlock_irq (&dev
->lock
);
967 retval
= wait_event_interruptible (dev
->wait
,
968 dev
->setup_out_ready
!= 0);
970 /* FIXME state could change from under us */
971 spin_lock_irq (&dev
->lock
);
975 if (dev
->state
!= STATE_DEV_SETUP
) {
979 dev
->state
= STATE_DEV_CONNECTED
;
981 if (dev
->setup_out_error
)
984 len
= min (len
, (size_t)dev
->req
->actual
);
985 // FIXME don't call this with the spinlock held ...
986 if (copy_to_user (buf
, dev
->req
->buf
, len
))
990 clean_req (dev
->gadget
->ep0
, dev
->req
);
991 /* NOTE userspace can't yet choose to stall */
997 /* else normal: return event data */
998 if (len
< sizeof dev
->event
[0]) {
1002 len
-= len
% sizeof (struct usb_gadgetfs_event
);
1003 dev
->usermode_setup
= 1;
1006 /* return queued events right away */
1007 if (dev
->ev_next
!= 0) {
1010 n
= len
/ sizeof (struct usb_gadgetfs_event
);
1011 if (dev
->ev_next
< n
)
1014 /* ep0 i/o has special semantics during STATE_DEV_SETUP */
1015 for (i
= 0; i
< n
; i
++) {
1016 if (dev
->event
[i
].type
== GADGETFS_SETUP
) {
1017 dev
->state
= STATE_DEV_SETUP
;
1022 spin_unlock_irq (&dev
->lock
);
1023 len
= n
* sizeof (struct usb_gadgetfs_event
);
1024 if (copy_to_user (buf
, &dev
->event
, len
))
1029 /* NOTE this doesn't guard against broken drivers;
1030 * concurrent ep0 readers may lose events.
1032 spin_lock_irq (&dev
->lock
);
1033 if (dev
->ev_next
> n
) {
1034 memmove(&dev
->event
[0], &dev
->event
[n
],
1035 sizeof (struct usb_gadgetfs_event
)
1036 * (dev
->ev_next
- n
));
1039 spin_unlock_irq (&dev
->lock
);
1043 if (fd
->f_flags
& O_NONBLOCK
) {
1050 DBG (dev
, "fail %s, state %d\n", __func__
, state
);
1053 case STATE_DEV_UNCONNECTED
:
1054 case STATE_DEV_CONNECTED
:
1055 spin_unlock_irq (&dev
->lock
);
1056 DBG (dev
, "%s wait\n", __func__
);
1058 /* wait for events */
1059 retval
= wait_event_interruptible (dev
->wait
,
1063 spin_lock_irq (&dev
->lock
);
1068 spin_unlock_irq (&dev
->lock
);
1072 static struct usb_gadgetfs_event
*
1073 next_event (struct dev_data
*dev
, enum usb_gadgetfs_event_type type
)
1075 struct usb_gadgetfs_event
*event
;
1079 /* these events purge the queue */
1080 case GADGETFS_DISCONNECT
:
1081 if (dev
->state
== STATE_DEV_SETUP
)
1082 dev
->setup_abort
= 1;
1084 case GADGETFS_CONNECT
:
1087 case GADGETFS_SETUP
: /* previous request timed out */
1088 case GADGETFS_SUSPEND
: /* same effect */
1089 /* these events can't be repeated */
1090 for (i
= 0; i
!= dev
->ev_next
; i
++) {
1091 if (dev
->event
[i
].type
!= type
)
1093 DBG(dev
, "discard old event[%d] %d\n", i
, type
);
1095 if (i
== dev
->ev_next
)
1097 /* indices start at zero, for simplicity */
1098 memmove (&dev
->event
[i
], &dev
->event
[i
+ 1],
1099 sizeof (struct usb_gadgetfs_event
)
1100 * (dev
->ev_next
- i
));
1106 VDEBUG(dev
, "event[%d] = %d\n", dev
->ev_next
, type
);
1107 event
= &dev
->event
[dev
->ev_next
++];
1108 BUG_ON (dev
->ev_next
> N_EVENT
);
1109 memset (event
, 0, sizeof *event
);
1115 ep0_write (struct file
*fd
, const char __user
*buf
, size_t len
, loff_t
*ptr
)
1117 struct dev_data
*dev
= fd
->private_data
;
1118 ssize_t retval
= -ESRCH
;
1120 /* report fd mode change before acting on it */
1121 if (dev
->setup_abort
) {
1122 dev
->setup_abort
= 0;
1125 /* data and/or status stage for control request */
1126 } else if (dev
->state
== STATE_DEV_SETUP
) {
1128 len
= min_t(size_t, len
, dev
->setup_wLength
);
1129 if (dev
->setup_in
) {
1130 retval
= setup_req (dev
->gadget
->ep0
, dev
->req
, len
);
1132 dev
->state
= STATE_DEV_CONNECTED
;
1133 spin_unlock_irq (&dev
->lock
);
1134 if (copy_from_user (dev
->req
->buf
, buf
, len
))
1137 if (len
< dev
->setup_wLength
)
1139 retval
= usb_ep_queue (
1140 dev
->gadget
->ep0
, dev
->req
,
1143 spin_lock_irq(&dev
->lock
);
1145 clean_req (dev
->gadget
->ep0
, dev
->req
);
1152 /* can stall some OUT transfers */
1153 } else if (dev
->setup_can_stall
) {
1154 VDEBUG(dev
, "ep0out stall\n");
1155 (void) usb_ep_set_halt (dev
->gadget
->ep0
);
1157 dev
->state
= STATE_DEV_CONNECTED
;
1159 DBG(dev
, "bogus ep0out stall!\n");
1162 DBG (dev
, "fail %s, state %d\n", __func__
, dev
->state
);
1168 ep0_fasync (int f
, struct file
*fd
, int on
)
1170 struct dev_data
*dev
= fd
->private_data
;
1171 // caller must F_SETOWN before signal delivery happens
1172 VDEBUG (dev
, "%s %s\n", __func__
, on
? "on" : "off");
1173 return fasync_helper (f
, fd
, on
, &dev
->fasync
);
1176 static struct usb_gadget_driver gadgetfs_driver
;
1179 dev_release (struct inode
*inode
, struct file
*fd
)
1181 struct dev_data
*dev
= fd
->private_data
;
1183 /* closing ep0 === shutdown all */
1185 if (dev
->gadget_registered
)
1186 usb_gadget_unregister_driver (&gadgetfs_driver
);
1188 /* at this point "good" hardware has disconnected the
1189 * device from USB; the host won't see it any more.
1190 * alternatively, all host requests will time out.
1196 /* other endpoints were all decoupled from this device */
1197 spin_lock_irq(&dev
->lock
);
1198 dev
->state
= STATE_DEV_DISABLED
;
1199 spin_unlock_irq(&dev
->lock
);
1206 ep0_poll (struct file
*fd
, poll_table
*wait
)
1208 struct dev_data
*dev
= fd
->private_data
;
1211 if (dev
->state
<= STATE_DEV_OPENED
)
1212 return DEFAULT_POLLMASK
;
1214 poll_wait(fd
, &dev
->wait
, wait
);
1216 spin_lock_irq (&dev
->lock
);
1218 /* report fd mode change before acting on it */
1219 if (dev
->setup_abort
) {
1220 dev
->setup_abort
= 0;
1225 if (dev
->state
== STATE_DEV_SETUP
) {
1226 if (dev
->setup_in
|| dev
->setup_can_stall
)
1229 if (dev
->ev_next
!= 0)
1233 spin_unlock_irq(&dev
->lock
);
1237 static long dev_ioctl (struct file
*fd
, unsigned code
, unsigned long value
)
1239 struct dev_data
*dev
= fd
->private_data
;
1240 struct usb_gadget
*gadget
= dev
->gadget
;
1243 if (gadget
->ops
->ioctl
)
1244 ret
= gadget
->ops
->ioctl (gadget
, code
, value
);
1249 /*----------------------------------------------------------------------*/
1251 /* The in-kernel gadget driver handles most ep0 issues, in particular
1252 * enumerating the single configuration (as provided from user space).
1254 * Unrecognized ep0 requests may be handled in user space.
1257 static void make_qualifier (struct dev_data
*dev
)
1259 struct usb_qualifier_descriptor qual
;
1260 struct usb_device_descriptor
*desc
;
1262 qual
.bLength
= sizeof qual
;
1263 qual
.bDescriptorType
= USB_DT_DEVICE_QUALIFIER
;
1264 qual
.bcdUSB
= cpu_to_le16 (0x0200);
1267 qual
.bDeviceClass
= desc
->bDeviceClass
;
1268 qual
.bDeviceSubClass
= desc
->bDeviceSubClass
;
1269 qual
.bDeviceProtocol
= desc
->bDeviceProtocol
;
1271 /* assumes ep0 uses the same value for both speeds ... */
1272 qual
.bMaxPacketSize0
= dev
->gadget
->ep0
->maxpacket
;
1274 qual
.bNumConfigurations
= 1;
1277 memcpy (dev
->rbuf
, &qual
, sizeof qual
);
1281 config_buf (struct dev_data
*dev
, u8 type
, unsigned index
)
1286 /* only one configuration */
1290 if (gadget_is_dualspeed(dev
->gadget
)) {
1291 hs
= (dev
->gadget
->speed
== USB_SPEED_HIGH
);
1292 if (type
== USB_DT_OTHER_SPEED_CONFIG
)
1296 dev
->req
->buf
= dev
->hs_config
;
1297 len
= le16_to_cpu(dev
->hs_config
->wTotalLength
);
1299 dev
->req
->buf
= dev
->config
;
1300 len
= le16_to_cpu(dev
->config
->wTotalLength
);
1302 ((u8
*)dev
->req
->buf
) [1] = type
;
1307 gadgetfs_setup (struct usb_gadget
*gadget
, const struct usb_ctrlrequest
*ctrl
)
1309 struct dev_data
*dev
= get_gadget_data (gadget
);
1310 struct usb_request
*req
= dev
->req
;
1311 int value
= -EOPNOTSUPP
;
1312 struct usb_gadgetfs_event
*event
;
1313 u16 w_value
= le16_to_cpu(ctrl
->wValue
);
1314 u16 w_length
= le16_to_cpu(ctrl
->wLength
);
1316 spin_lock (&dev
->lock
);
1317 dev
->setup_abort
= 0;
1318 if (dev
->state
== STATE_DEV_UNCONNECTED
) {
1319 if (gadget_is_dualspeed(gadget
)
1320 && gadget
->speed
== USB_SPEED_HIGH
1321 && dev
->hs_config
== NULL
) {
1322 spin_unlock(&dev
->lock
);
1323 ERROR (dev
, "no high speed config??\n");
1327 dev
->state
= STATE_DEV_CONNECTED
;
1329 INFO (dev
, "connected\n");
1330 event
= next_event (dev
, GADGETFS_CONNECT
);
1331 event
->u
.speed
= gadget
->speed
;
1334 /* host may have given up waiting for response. we can miss control
1335 * requests handled lower down (device/endpoint status and features);
1336 * then ep0_{read,write} will report the wrong status. controller
1337 * driver will have aborted pending i/o.
1339 } else if (dev
->state
== STATE_DEV_SETUP
)
1340 dev
->setup_abort
= 1;
1342 req
->buf
= dev
->rbuf
;
1343 req
->context
= NULL
;
1344 value
= -EOPNOTSUPP
;
1345 switch (ctrl
->bRequest
) {
1347 case USB_REQ_GET_DESCRIPTOR
:
1348 if (ctrl
->bRequestType
!= USB_DIR_IN
)
1350 switch (w_value
>> 8) {
1353 value
= min (w_length
, (u16
) sizeof *dev
->dev
);
1354 dev
->dev
->bMaxPacketSize0
= dev
->gadget
->ep0
->maxpacket
;
1355 req
->buf
= dev
->dev
;
1357 case USB_DT_DEVICE_QUALIFIER
:
1358 if (!dev
->hs_config
)
1360 value
= min (w_length
, (u16
)
1361 sizeof (struct usb_qualifier_descriptor
));
1362 make_qualifier (dev
);
1364 case USB_DT_OTHER_SPEED_CONFIG
:
1367 value
= config_buf (dev
,
1371 value
= min (w_length
, (u16
) value
);
1376 default: // all others are errors
1381 /* currently one config, two speeds */
1382 case USB_REQ_SET_CONFIGURATION
:
1383 if (ctrl
->bRequestType
!= 0)
1385 if (0 == (u8
) w_value
) {
1387 dev
->current_config
= 0;
1388 usb_gadget_vbus_draw(gadget
, 8 /* mA */ );
1389 // user mode expected to disable endpoints
1393 if (gadget_is_dualspeed(gadget
)
1394 && gadget
->speed
== USB_SPEED_HIGH
) {
1395 config
= dev
->hs_config
->bConfigurationValue
;
1396 power
= dev
->hs_config
->bMaxPower
;
1398 config
= dev
->config
->bConfigurationValue
;
1399 power
= dev
->config
->bMaxPower
;
1402 if (config
== (u8
) w_value
) {
1404 dev
->current_config
= config
;
1405 usb_gadget_vbus_draw(gadget
, 2 * power
);
1409 /* report SET_CONFIGURATION like any other control request,
1410 * except that usermode may not stall this. the next
1411 * request mustn't be allowed start until this finishes:
1412 * endpoints and threads set up, etc.
1414 * NOTE: older PXA hardware (before PXA 255: without UDCCFR)
1415 * has bad/racey automagic that prevents synchronizing here.
1416 * even kernel mode drivers often miss them.
1419 INFO (dev
, "configuration #%d\n", dev
->current_config
);
1420 usb_gadget_set_state(gadget
, USB_STATE_CONFIGURED
);
1421 if (dev
->usermode_setup
) {
1422 dev
->setup_can_stall
= 0;
1428 #ifndef CONFIG_USB_PXA25X
1429 /* PXA automagically handles this request too */
1430 case USB_REQ_GET_CONFIGURATION
:
1431 if (ctrl
->bRequestType
!= 0x80)
1433 *(u8
*)req
->buf
= dev
->current_config
;
1434 value
= min (w_length
, (u16
) 1);
1440 VDEBUG (dev
, "%s req%02x.%02x v%04x i%04x l%d\n",
1441 dev
->usermode_setup
? "delegate" : "fail",
1442 ctrl
->bRequestType
, ctrl
->bRequest
,
1443 w_value
, le16_to_cpu(ctrl
->wIndex
), w_length
);
1445 /* if there's an ep0 reader, don't stall */
1446 if (dev
->usermode_setup
) {
1447 dev
->setup_can_stall
= 1;
1449 dev
->setup_in
= (ctrl
->bRequestType
& USB_DIR_IN
)
1451 dev
->setup_wLength
= w_length
;
1452 dev
->setup_out_ready
= 0;
1453 dev
->setup_out_error
= 0;
1456 /* read DATA stage for OUT right away */
1457 if (unlikely (!dev
->setup_in
&& w_length
)) {
1458 value
= setup_req (gadget
->ep0
, dev
->req
,
1463 spin_unlock (&dev
->lock
);
1464 value
= usb_ep_queue (gadget
->ep0
, dev
->req
,
1466 spin_lock (&dev
->lock
);
1468 clean_req (gadget
->ep0
, dev
->req
);
1472 /* we can't currently stall these */
1473 dev
->setup_can_stall
= 0;
1476 /* state changes when reader collects event */
1477 event
= next_event (dev
, GADGETFS_SETUP
);
1478 event
->u
.setup
= *ctrl
;
1480 spin_unlock (&dev
->lock
);
1485 /* proceed with data transfer and status phases? */
1486 if (value
>= 0 && dev
->state
!= STATE_DEV_SETUP
) {
1487 req
->length
= value
;
1488 req
->zero
= value
< w_length
;
1490 spin_unlock (&dev
->lock
);
1491 value
= usb_ep_queue (gadget
->ep0
, req
, GFP_KERNEL
);
1493 DBG (dev
, "ep_queue --> %d\n", value
);
1499 /* device stalls when value < 0 */
1500 spin_unlock (&dev
->lock
);
1504 static void destroy_ep_files (struct dev_data
*dev
)
1506 DBG (dev
, "%s %d\n", __func__
, dev
->state
);
1508 /* dev->state must prevent interference */
1509 spin_lock_irq (&dev
->lock
);
1510 while (!list_empty(&dev
->epfiles
)) {
1512 struct inode
*parent
;
1513 struct dentry
*dentry
;
1515 /* break link to FS */
1516 ep
= list_first_entry (&dev
->epfiles
, struct ep_data
, epfiles
);
1517 list_del_init (&ep
->epfiles
);
1518 dentry
= ep
->dentry
;
1520 parent
= d_inode(dentry
->d_parent
);
1522 /* break link to controller */
1523 if (ep
->state
== STATE_EP_ENABLED
)
1524 (void) usb_ep_disable (ep
->ep
);
1525 ep
->state
= STATE_EP_UNBOUND
;
1526 usb_ep_free_request (ep
->ep
, ep
->req
);
1528 wake_up (&ep
->wait
);
1531 spin_unlock_irq (&dev
->lock
);
1533 /* break link to dcache */
1537 inode_unlock(parent
);
1539 spin_lock_irq (&dev
->lock
);
1541 spin_unlock_irq (&dev
->lock
);
1545 static struct dentry
*
1546 gadgetfs_create_file (struct super_block
*sb
, char const *name
,
1547 void *data
, const struct file_operations
*fops
);
1549 static int activate_ep_files (struct dev_data
*dev
)
1552 struct ep_data
*data
;
1554 gadget_for_each_ep (ep
, dev
->gadget
) {
1556 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1559 data
->state
= STATE_EP_DISABLED
;
1560 mutex_init(&data
->lock
);
1561 init_waitqueue_head (&data
->wait
);
1563 strncpy (data
->name
, ep
->name
, sizeof (data
->name
) - 1);
1564 atomic_set (&data
->count
, 1);
1569 ep
->driver_data
= data
;
1571 data
->req
= usb_ep_alloc_request (ep
, GFP_KERNEL
);
1575 data
->dentry
= gadgetfs_create_file (dev
->sb
, data
->name
,
1576 data
, &ep_io_operations
);
1579 list_add_tail (&data
->epfiles
, &dev
->epfiles
);
1584 usb_ep_free_request (ep
, data
->req
);
1589 DBG (dev
, "%s enomem\n", __func__
);
1590 destroy_ep_files (dev
);
1595 gadgetfs_unbind (struct usb_gadget
*gadget
)
1597 struct dev_data
*dev
= get_gadget_data (gadget
);
1599 DBG (dev
, "%s\n", __func__
);
1601 spin_lock_irq (&dev
->lock
);
1602 dev
->state
= STATE_DEV_UNBOUND
;
1603 spin_unlock_irq (&dev
->lock
);
1605 destroy_ep_files (dev
);
1606 gadget
->ep0
->driver_data
= NULL
;
1607 set_gadget_data (gadget
, NULL
);
1609 /* we've already been disconnected ... no i/o is active */
1611 usb_ep_free_request (gadget
->ep0
, dev
->req
);
1612 DBG (dev
, "%s done\n", __func__
);
1616 static struct dev_data
*the_device
;
1618 static int gadgetfs_bind(struct usb_gadget
*gadget
,
1619 struct usb_gadget_driver
*driver
)
1621 struct dev_data
*dev
= the_device
;
1625 if (0 != strcmp (CHIP
, gadget
->name
)) {
1626 pr_err("%s expected %s controller not %s\n",
1627 shortname
, CHIP
, gadget
->name
);
1631 set_gadget_data (gadget
, dev
);
1632 dev
->gadget
= gadget
;
1633 gadget
->ep0
->driver_data
= dev
;
1635 /* preallocate control response and buffer */
1636 dev
->req
= usb_ep_alloc_request (gadget
->ep0
, GFP_KERNEL
);
1639 dev
->req
->context
= NULL
;
1640 dev
->req
->complete
= epio_complete
;
1642 if (activate_ep_files (dev
) < 0)
1645 INFO (dev
, "bound to %s driver\n", gadget
->name
);
1646 spin_lock_irq(&dev
->lock
);
1647 dev
->state
= STATE_DEV_UNCONNECTED
;
1648 spin_unlock_irq(&dev
->lock
);
1653 gadgetfs_unbind (gadget
);
1658 gadgetfs_disconnect (struct usb_gadget
*gadget
)
1660 struct dev_data
*dev
= get_gadget_data (gadget
);
1661 unsigned long flags
;
1663 spin_lock_irqsave (&dev
->lock
, flags
);
1664 if (dev
->state
== STATE_DEV_UNCONNECTED
)
1666 dev
->state
= STATE_DEV_UNCONNECTED
;
1668 INFO (dev
, "disconnected\n");
1669 next_event (dev
, GADGETFS_DISCONNECT
);
1672 spin_unlock_irqrestore (&dev
->lock
, flags
);
1676 gadgetfs_suspend (struct usb_gadget
*gadget
)
1678 struct dev_data
*dev
= get_gadget_data (gadget
);
1680 INFO (dev
, "suspended from state %d\n", dev
->state
);
1681 spin_lock (&dev
->lock
);
1682 switch (dev
->state
) {
1683 case STATE_DEV_SETUP
: // VERY odd... host died??
1684 case STATE_DEV_CONNECTED
:
1685 case STATE_DEV_UNCONNECTED
:
1686 next_event (dev
, GADGETFS_SUSPEND
);
1692 spin_unlock (&dev
->lock
);
1695 static struct usb_gadget_driver gadgetfs_driver
= {
1696 .function
= (char *) driver_desc
,
1697 .bind
= gadgetfs_bind
,
1698 .unbind
= gadgetfs_unbind
,
1699 .setup
= gadgetfs_setup
,
1700 .reset
= gadgetfs_disconnect
,
1701 .disconnect
= gadgetfs_disconnect
,
1702 .suspend
= gadgetfs_suspend
,
1705 .name
= (char *) shortname
,
1709 /*----------------------------------------------------------------------*/
1710 /* DEVICE INITIALIZATION
1712 * fd = open ("/dev/gadget/$CHIP", O_RDWR)
1713 * status = write (fd, descriptors, sizeof descriptors)
1715 * That write establishes the device configuration, so the kernel can
1716 * bind to the controller ... guaranteeing it can handle enumeration
1717 * at all necessary speeds. Descriptor order is:
1719 * . message tag (u32, host order) ... for now, must be zero; it
1720 * would change to support features like multi-config devices
1721 * . full/low speed config ... all wTotalLength bytes (with interface,
1722 * class, altsetting, endpoint, and other descriptors)
1723 * . high speed config ... all descriptors, for high speed operation;
1724 * this one's optional except for high-speed hardware
1725 * . device descriptor
1727 * Endpoints are not yet enabled. Drivers must wait until device
1728 * configuration and interface altsetting changes create
1729 * the need to configure (or unconfigure) them.
1731 * After initialization, the device stays active for as long as that
1732 * $CHIP file is open. Events must then be read from that descriptor,
1733 * such as configuration notifications.
1736 static int is_valid_config(struct usb_config_descriptor
*config
,
1739 return config
->bDescriptorType
== USB_DT_CONFIG
1740 && config
->bLength
== USB_DT_CONFIG_SIZE
1741 && total
>= USB_DT_CONFIG_SIZE
1742 && config
->bConfigurationValue
!= 0
1743 && (config
->bmAttributes
& USB_CONFIG_ATT_ONE
) != 0
1744 && (config
->bmAttributes
& USB_CONFIG_ATT_WAKEUP
) == 0;
1745 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1746 /* FIXME check lengths: walk to end */
1750 dev_config (struct file
*fd
, const char __user
*buf
, size_t len
, loff_t
*ptr
)
1752 struct dev_data
*dev
= fd
->private_data
;
1753 ssize_t value
= len
, length
= len
;
1758 spin_lock_irq(&dev
->lock
);
1759 if (dev
->state
> STATE_DEV_OPENED
) {
1760 value
= ep0_write(fd
, buf
, len
, ptr
);
1761 spin_unlock_irq(&dev
->lock
);
1764 spin_unlock_irq(&dev
->lock
);
1766 if ((len
< (USB_DT_CONFIG_SIZE
+ USB_DT_DEVICE_SIZE
+ 4)) ||
1767 (len
> PAGE_SIZE
* 4))
1770 /* we might need to change message format someday */
1771 if (copy_from_user (&tag
, buf
, 4))
1778 kbuf
= memdup_user(buf
, length
);
1780 return PTR_ERR(kbuf
);
1782 spin_lock_irq (&dev
->lock
);
1790 /* full or low speed config */
1791 dev
->config
= (void *) kbuf
;
1792 total
= le16_to_cpu(dev
->config
->wTotalLength
);
1793 if (!is_valid_config(dev
->config
, total
) ||
1794 total
> length
- USB_DT_DEVICE_SIZE
)
1799 /* optional high speed config */
1800 if (kbuf
[1] == USB_DT_CONFIG
) {
1801 dev
->hs_config
= (void *) kbuf
;
1802 total
= le16_to_cpu(dev
->hs_config
->wTotalLength
);
1803 if (!is_valid_config(dev
->hs_config
, total
) ||
1804 total
> length
- USB_DT_DEVICE_SIZE
)
1809 dev
->hs_config
= NULL
;
1812 /* could support multiple configs, using another encoding! */
1814 /* device descriptor (tweaked for paranoia) */
1815 if (length
!= USB_DT_DEVICE_SIZE
)
1817 dev
->dev
= (void *)kbuf
;
1818 if (dev
->dev
->bLength
!= USB_DT_DEVICE_SIZE
1819 || dev
->dev
->bDescriptorType
!= USB_DT_DEVICE
1820 || dev
->dev
->bNumConfigurations
!= 1)
1822 dev
->dev
->bcdUSB
= cpu_to_le16 (0x0200);
1824 /* triggers gadgetfs_bind(); then we can enumerate. */
1825 spin_unlock_irq (&dev
->lock
);
1827 gadgetfs_driver
.max_speed
= USB_SPEED_HIGH
;
1829 gadgetfs_driver
.max_speed
= USB_SPEED_FULL
;
1831 value
= usb_gadget_probe_driver(&gadgetfs_driver
);
1836 /* at this point "good" hardware has for the first time
1837 * let the USB the host see us. alternatively, if users
1838 * unplug/replug that will clear all the error state.
1840 * note: everything running before here was guaranteed
1841 * to choke driver model style diagnostics. from here
1842 * on, they can work ... except in cleanup paths that
1843 * kick in after the ep0 descriptor is closed.
1846 dev
->gadget_registered
= true;
1851 spin_unlock_irq (&dev
->lock
);
1852 pr_debug ("%s: %s fail %zd, %p\n", shortname
, __func__
, value
, dev
);
1859 dev_open (struct inode
*inode
, struct file
*fd
)
1861 struct dev_data
*dev
= inode
->i_private
;
1864 spin_lock_irq(&dev
->lock
);
1865 if (dev
->state
== STATE_DEV_DISABLED
) {
1867 dev
->state
= STATE_DEV_OPENED
;
1868 fd
->private_data
= dev
;
1872 spin_unlock_irq(&dev
->lock
);
1876 static const struct file_operations ep0_operations
= {
1877 .llseek
= no_llseek
,
1881 .write
= dev_config
,
1882 .fasync
= ep0_fasync
,
1884 .unlocked_ioctl
= dev_ioctl
,
1885 .release
= dev_release
,
1888 /*----------------------------------------------------------------------*/
1890 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1892 * Mounting the filesystem creates a controller file, used first for
1893 * device configuration then later for event monitoring.
1897 /* FIXME PAM etc could set this security policy without mount options
1898 * if epfiles inherited ownership and permissons from ep0 ...
1901 static unsigned default_uid
;
1902 static unsigned default_gid
;
1903 static unsigned default_perm
= S_IRUSR
| S_IWUSR
;
1905 module_param (default_uid
, uint
, 0644);
1906 module_param (default_gid
, uint
, 0644);
1907 module_param (default_perm
, uint
, 0644);
1910 static struct inode
*
1911 gadgetfs_make_inode (struct super_block
*sb
,
1912 void *data
, const struct file_operations
*fops
,
1915 struct inode
*inode
= new_inode (sb
);
1918 inode
->i_ino
= get_next_ino();
1919 inode
->i_mode
= mode
;
1920 inode
->i_uid
= make_kuid(&init_user_ns
, default_uid
);
1921 inode
->i_gid
= make_kgid(&init_user_ns
, default_gid
);
1922 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
1923 = current_time(inode
);
1924 inode
->i_private
= data
;
1925 inode
->i_fop
= fops
;
1930 /* creates in fs root directory, so non-renamable and non-linkable.
1931 * so inode and dentry are paired, until device reconfig.
1933 static struct dentry
*
1934 gadgetfs_create_file (struct super_block
*sb
, char const *name
,
1935 void *data
, const struct file_operations
*fops
)
1937 struct dentry
*dentry
;
1938 struct inode
*inode
;
1940 dentry
= d_alloc_name(sb
->s_root
, name
);
1944 inode
= gadgetfs_make_inode (sb
, data
, fops
,
1945 S_IFREG
| (default_perm
& S_IRWXUGO
));
1950 d_add (dentry
, inode
);
1954 static const struct super_operations gadget_fs_operations
= {
1955 .statfs
= simple_statfs
,
1956 .drop_inode
= generic_delete_inode
,
1960 gadgetfs_fill_super (struct super_block
*sb
, void *opts
, int silent
)
1962 struct inode
*inode
;
1963 struct dev_data
*dev
;
1968 CHIP
= usb_get_gadget_udc_name();
1973 sb
->s_blocksize
= PAGE_SIZE
;
1974 sb
->s_blocksize_bits
= PAGE_SHIFT
;
1975 sb
->s_magic
= GADGETFS_MAGIC
;
1976 sb
->s_op
= &gadget_fs_operations
;
1977 sb
->s_time_gran
= 1;
1980 inode
= gadgetfs_make_inode (sb
,
1981 NULL
, &simple_dir_operations
,
1982 S_IFDIR
| S_IRUGO
| S_IXUGO
);
1985 inode
->i_op
= &simple_dir_inode_operations
;
1986 if (!(sb
->s_root
= d_make_root (inode
)))
1989 /* the ep0 file is named after the controller we expect;
1990 * user mode code can use it for sanity checks, like we do.
1997 dev
->dentry
= gadgetfs_create_file(sb
, CHIP
, dev
, &ep0_operations
);
2003 /* other endpoint files are available after hardware setup,
2004 * from binding to a controller.
2013 /* "mount -t gadgetfs path /dev/gadget" ends up here */
2014 static struct dentry
*
2015 gadgetfs_mount (struct file_system_type
*t
, int flags
,
2016 const char *path
, void *opts
)
2018 return mount_single (t
, flags
, opts
, gadgetfs_fill_super
);
2022 gadgetfs_kill_sb (struct super_block
*sb
)
2024 kill_litter_super (sb
);
2026 put_dev (the_device
);
2033 /*----------------------------------------------------------------------*/
2035 static struct file_system_type gadgetfs_type
= {
2036 .owner
= THIS_MODULE
,
2038 .mount
= gadgetfs_mount
,
2039 .kill_sb
= gadgetfs_kill_sb
,
2041 MODULE_ALIAS_FS("gadgetfs");
2043 /*----------------------------------------------------------------------*/
2045 static int __init
init (void)
2049 status
= register_filesystem (&gadgetfs_type
);
2051 pr_info ("%s: %s, version " DRIVER_VERSION
"\n",
2052 shortname
, driver_desc
);
2057 static void __exit
cleanup (void)
2059 pr_debug ("unregister %s\n", shortname
);
2060 unregister_filesystem (&gadgetfs_type
);
2062 module_exit (cleanup
);