]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/usb/gadget/pxa2xx_udc.c
USB: pxa2xx_udc: use generic gpio layer
[mirror_ubuntu-hirsute-kernel.git] / drivers / usb / gadget / pxa2xx_udc.c
1 /*
2 * linux/drivers/usb/gadget/pxa2xx_udc.c
3 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
4 *
5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
6 * Copyright (C) 2003 Robert Schwebel, Pengutronix
7 * Copyright (C) 2003 Benedikt Spranger, Pengutronix
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003 Joshua Wise
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 */
26
27 #undef DEBUG
28 // #define VERBOSE DBG_VERBOSE
29
30 #include <linux/device.h>
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/ioport.h>
34 #include <linux/types.h>
35 #include <linux/errno.h>
36 #include <linux/delay.h>
37 #include <linux/slab.h>
38 #include <linux/init.h>
39 #include <linux/timer.h>
40 #include <linux/list.h>
41 #include <linux/interrupt.h>
42 #include <linux/proc_fs.h>
43 #include <linux/mm.h>
44 #include <linux/platform_device.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/irq.h>
47
48 #include <asm/byteorder.h>
49 #include <asm/dma.h>
50 #include <asm/gpio.h>
51 #include <asm/io.h>
52 #include <asm/system.h>
53 #include <asm/mach-types.h>
54 #include <asm/unaligned.h>
55 #include <asm/hardware.h>
56
57 #include <linux/usb/ch9.h>
58 #include <linux/usb_gadget.h>
59
60 #include <asm/mach/udc_pxa2xx.h>
61
62
63 /*
64 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
65 * series processors. The UDC for the IXP 4xx series is very similar.
66 * There are fifteen endpoints, in addition to ep0.
67 *
68 * Such controller drivers work with a gadget driver. The gadget driver
69 * returns descriptors, implements configuration and data protocols used
70 * by the host to interact with this device, and allocates endpoints to
71 * the different protocol interfaces. The controller driver virtualizes
72 * usb hardware so that the gadget drivers will be more portable.
73 *
74 * This UDC hardware wants to implement a bit too much USB protocol, so
75 * it constrains the sorts of USB configuration change events that work.
76 * The errata for these chips are misleading; some "fixed" bugs from
77 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
78 */
79
80 #define DRIVER_VERSION "4-May-2005"
81 #define DRIVER_DESC "PXA 25x USB Device Controller driver"
82
83
84 static const char driver_name [] = "pxa2xx_udc";
85
86 static const char ep0name [] = "ep0";
87
88
89 // #define USE_DMA
90 // #define USE_OUT_DMA
91 // #define DISABLE_TEST_MODE
92
93 #ifdef CONFIG_ARCH_IXP4XX
94 #undef USE_DMA
95
96 /* cpu-specific register addresses are compiled in to this code */
97 #ifdef CONFIG_ARCH_PXA
98 #error "Can't configure both IXP and PXA"
99 #endif
100
101 #endif
102
103 #include "pxa2xx_udc.h"
104
105
106 #ifdef USE_DMA
107 static int use_dma = 1;
108 module_param(use_dma, bool, 0);
109 MODULE_PARM_DESC (use_dma, "true to use dma");
110
111 static void dma_nodesc_handler (int dmach, void *_ep);
112 static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
113
114 #ifdef USE_OUT_DMA
115 #define DMASTR " (dma support)"
116 #else
117 #define DMASTR " (dma in)"
118 #endif
119
120 #else /* !USE_DMA */
121 #define DMASTR " (pio only)"
122 #undef USE_OUT_DMA
123 #endif
124
125 #ifdef CONFIG_USB_PXA2XX_SMALL
126 #define SIZE_STR " (small)"
127 #else
128 #define SIZE_STR ""
129 #endif
130
131 #ifdef DISABLE_TEST_MODE
132 /* (mode == 0) == no undocumented chip tweaks
133 * (mode & 1) == double buffer bulk IN
134 * (mode & 2) == double buffer bulk OUT
135 * ... so mode = 3 (or 7, 15, etc) does it for both
136 */
137 static ushort fifo_mode = 0;
138 module_param(fifo_mode, ushort, 0);
139 MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
140 #endif
141
142 /* ---------------------------------------------------------------------------
143 * endpoint related parts of the api to the usb controller hardware,
144 * used by gadget driver; and the inner talker-to-hardware core.
145 * ---------------------------------------------------------------------------
146 */
147
148 static void pxa2xx_ep_fifo_flush (struct usb_ep *ep);
149 static void nuke (struct pxa2xx_ep *, int status);
150
151 /* one GPIO should be used to detect VBUS from the host */
152 static int is_vbus_present(void)
153 {
154 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
155
156 if (mach->gpio_vbus)
157 return gpio_get_value(mach->gpio_vbus);
158 if (mach->udc_is_connected)
159 return mach->udc_is_connected();
160 return 1;
161 }
162
163 /* one GPIO should control a D+ pullup, so host sees this device (or not) */
164 static void pullup_off(void)
165 {
166 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
167
168 if (mach->gpio_pullup)
169 gpio_set_value(mach->gpio_pullup, 0);
170 else if (mach->udc_command)
171 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
172 }
173
174 static void pullup_on(void)
175 {
176 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
177
178 if (mach->gpio_pullup)
179 gpio_set_value(mach->gpio_pullup, 1);
180 else if (mach->udc_command)
181 mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
182 }
183
184 static void pio_irq_enable(int bEndpointAddress)
185 {
186 bEndpointAddress &= 0xf;
187 if (bEndpointAddress < 8)
188 UICR0 &= ~(1 << bEndpointAddress);
189 else {
190 bEndpointAddress -= 8;
191 UICR1 &= ~(1 << bEndpointAddress);
192 }
193 }
194
195 static void pio_irq_disable(int bEndpointAddress)
196 {
197 bEndpointAddress &= 0xf;
198 if (bEndpointAddress < 8)
199 UICR0 |= 1 << bEndpointAddress;
200 else {
201 bEndpointAddress -= 8;
202 UICR1 |= 1 << bEndpointAddress;
203 }
204 }
205
206 /* The UDCCR reg contains mask and interrupt status bits,
207 * so using '|=' isn't safe as it may ack an interrupt.
208 */
209 #define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
210
211 static inline void udc_set_mask_UDCCR(int mask)
212 {
213 UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
214 }
215
216 static inline void udc_clear_mask_UDCCR(int mask)
217 {
218 UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
219 }
220
221 static inline void udc_ack_int_UDCCR(int mask)
222 {
223 /* udccr contains the bits we dont want to change */
224 __u32 udccr = UDCCR & UDCCR_MASK_BITS;
225
226 UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
227 }
228
229 /*
230 * endpoint enable/disable
231 *
232 * we need to verify the descriptors used to enable endpoints. since pxa2xx
233 * endpoint configurations are fixed, and are pretty much always enabled,
234 * there's not a lot to manage here.
235 *
236 * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints,
237 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
238 * for a single interface (with only the default altsetting) and for gadget
239 * drivers that don't halt endpoints (not reset by set_interface). that also
240 * means that if you use ISO, you must violate the USB spec rule that all
241 * iso endpoints must be in non-default altsettings.
242 */
243 static int pxa2xx_ep_enable (struct usb_ep *_ep,
244 const struct usb_endpoint_descriptor *desc)
245 {
246 struct pxa2xx_ep *ep;
247 struct pxa2xx_udc *dev;
248
249 ep = container_of (_ep, struct pxa2xx_ep, ep);
250 if (!_ep || !desc || ep->desc || _ep->name == ep0name
251 || desc->bDescriptorType != USB_DT_ENDPOINT
252 || ep->bEndpointAddress != desc->bEndpointAddress
253 || ep->fifo_size < le16_to_cpu
254 (desc->wMaxPacketSize)) {
255 DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
256 return -EINVAL;
257 }
258
259 /* xfer types must match, except that interrupt ~= bulk */
260 if (ep->bmAttributes != desc->bmAttributes
261 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
262 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
263 DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
264 return -EINVAL;
265 }
266
267 /* hardware _could_ do smaller, but driver doesn't */
268 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
269 && le16_to_cpu (desc->wMaxPacketSize)
270 != BULK_FIFO_SIZE)
271 || !desc->wMaxPacketSize) {
272 DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
273 return -ERANGE;
274 }
275
276 dev = ep->dev;
277 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
278 DMSG("%s, bogus device state\n", __FUNCTION__);
279 return -ESHUTDOWN;
280 }
281
282 ep->desc = desc;
283 ep->dma = -1;
284 ep->stopped = 0;
285 ep->pio_irqs = ep->dma_irqs = 0;
286 ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
287
288 /* flush fifo (mostly for OUT buffers) */
289 pxa2xx_ep_fifo_flush (_ep);
290
291 /* ... reset halt state too, if we could ... */
292
293 #ifdef USE_DMA
294 /* for (some) bulk and ISO endpoints, try to get a DMA channel and
295 * bind it to the endpoint. otherwise use PIO.
296 */
297 switch (ep->bmAttributes) {
298 case USB_ENDPOINT_XFER_ISOC:
299 if (le16_to_cpu(desc->wMaxPacketSize) % 32)
300 break;
301 // fall through
302 case USB_ENDPOINT_XFER_BULK:
303 if (!use_dma || !ep->reg_drcmr)
304 break;
305 ep->dma = pxa_request_dma ((char *)_ep->name,
306 (le16_to_cpu (desc->wMaxPacketSize) > 64)
307 ? DMA_PRIO_MEDIUM /* some iso */
308 : DMA_PRIO_LOW,
309 dma_nodesc_handler, ep);
310 if (ep->dma >= 0) {
311 *ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
312 DMSG("%s using dma%d\n", _ep->name, ep->dma);
313 }
314 }
315 #endif
316
317 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
318 return 0;
319 }
320
321 static int pxa2xx_ep_disable (struct usb_ep *_ep)
322 {
323 struct pxa2xx_ep *ep;
324 unsigned long flags;
325
326 ep = container_of (_ep, struct pxa2xx_ep, ep);
327 if (!_ep || !ep->desc) {
328 DMSG("%s, %s not enabled\n", __FUNCTION__,
329 _ep ? ep->ep.name : NULL);
330 return -EINVAL;
331 }
332 local_irq_save(flags);
333
334 nuke (ep, -ESHUTDOWN);
335
336 #ifdef USE_DMA
337 if (ep->dma >= 0) {
338 *ep->reg_drcmr = 0;
339 pxa_free_dma (ep->dma);
340 ep->dma = -1;
341 }
342 #endif
343
344 /* flush fifo (mostly for IN buffers) */
345 pxa2xx_ep_fifo_flush (_ep);
346
347 ep->desc = NULL;
348 ep->stopped = 1;
349
350 local_irq_restore(flags);
351 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
352 return 0;
353 }
354
355 /*-------------------------------------------------------------------------*/
356
357 /* for the pxa2xx, these can just wrap kmalloc/kfree. gadget drivers
358 * must still pass correctly initialized endpoints, since other controller
359 * drivers may care about how it's currently set up (dma issues etc).
360 */
361
362 /*
363 * pxa2xx_ep_alloc_request - allocate a request data structure
364 */
365 static struct usb_request *
366 pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
367 {
368 struct pxa2xx_request *req;
369
370 req = kzalloc(sizeof(*req), gfp_flags);
371 if (!req)
372 return NULL;
373
374 INIT_LIST_HEAD (&req->queue);
375 return &req->req;
376 }
377
378
379 /*
380 * pxa2xx_ep_free_request - deallocate a request data structure
381 */
382 static void
383 pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
384 {
385 struct pxa2xx_request *req;
386
387 req = container_of (_req, struct pxa2xx_request, req);
388 WARN_ON (!list_empty (&req->queue));
389 kfree(req);
390 }
391
392
393 /* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
394 * no device-affinity and the heap works perfectly well for i/o buffers.
395 * It wastes much less memory than dma_alloc_coherent() would, and even
396 * prevents cacheline (32 bytes wide) sharing problems.
397 */
398 static void *
399 pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
400 dma_addr_t *dma, gfp_t gfp_flags)
401 {
402 char *retval;
403
404 retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
405 if (retval)
406 #ifdef USE_DMA
407 *dma = virt_to_bus (retval);
408 #else
409 *dma = (dma_addr_t)~0;
410 #endif
411 return retval;
412 }
413
414 static void
415 pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
416 unsigned bytes)
417 {
418 kfree (buf);
419 }
420
421 /*-------------------------------------------------------------------------*/
422
423 /*
424 * done - retire a request; caller blocked irqs
425 */
426 static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
427 {
428 unsigned stopped = ep->stopped;
429
430 list_del_init(&req->queue);
431
432 if (likely (req->req.status == -EINPROGRESS))
433 req->req.status = status;
434 else
435 status = req->req.status;
436
437 if (status && status != -ESHUTDOWN)
438 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
439 ep->ep.name, &req->req, status,
440 req->req.actual, req->req.length);
441
442 /* don't modify queue heads during completion callback */
443 ep->stopped = 1;
444 req->req.complete(&ep->ep, &req->req);
445 ep->stopped = stopped;
446 }
447
448
449 static inline void ep0_idle (struct pxa2xx_udc *dev)
450 {
451 dev->ep0state = EP0_IDLE;
452 }
453
454 static int
455 write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
456 {
457 u8 *buf;
458 unsigned length, count;
459
460 buf = req->req.buf + req->req.actual;
461 prefetch(buf);
462
463 /* how big will this packet be? */
464 length = min(req->req.length - req->req.actual, max);
465 req->req.actual += length;
466
467 count = length;
468 while (likely(count--))
469 *uddr = *buf++;
470
471 return length;
472 }
473
474 /*
475 * write to an IN endpoint fifo, as many packets as possible.
476 * irqs will use this to write the rest later.
477 * caller guarantees at least one packet buffer is ready (or a zlp).
478 */
479 static int
480 write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
481 {
482 unsigned max;
483
484 max = le16_to_cpu(ep->desc->wMaxPacketSize);
485 do {
486 unsigned count;
487 int is_last, is_short;
488
489 count = write_packet(ep->reg_uddr, req, max);
490
491 /* last packet is usually short (or a zlp) */
492 if (unlikely (count != max))
493 is_last = is_short = 1;
494 else {
495 if (likely(req->req.length != req->req.actual)
496 || req->req.zero)
497 is_last = 0;
498 else
499 is_last = 1;
500 /* interrupt/iso maxpacket may not fill the fifo */
501 is_short = unlikely (max < ep->fifo_size);
502 }
503
504 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
505 ep->ep.name, count,
506 is_last ? "/L" : "", is_short ? "/S" : "",
507 req->req.length - req->req.actual, req);
508
509 /* let loose that packet. maybe try writing another one,
510 * double buffering might work. TSP, TPC, and TFS
511 * bit values are the same for all normal IN endpoints.
512 */
513 *ep->reg_udccs = UDCCS_BI_TPC;
514 if (is_short)
515 *ep->reg_udccs = UDCCS_BI_TSP;
516
517 /* requests complete when all IN data is in the FIFO */
518 if (is_last) {
519 done (ep, req, 0);
520 if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
521 pio_irq_disable (ep->bEndpointAddress);
522 #ifdef USE_DMA
523 /* unaligned data and zlps couldn't use dma */
524 if (unlikely(!list_empty(&ep->queue))) {
525 req = list_entry(ep->queue.next,
526 struct pxa2xx_request, queue);
527 kick_dma(ep,req);
528 return 0;
529 }
530 #endif
531 }
532 return 1;
533 }
534
535 // TODO experiment: how robust can fifo mode tweaking be?
536 // double buffering is off in the default fifo mode, which
537 // prevents TFS from being set here.
538
539 } while (*ep->reg_udccs & UDCCS_BI_TFS);
540 return 0;
541 }
542
543 /* caller asserts req->pending (ep0 irq status nyet cleared); starts
544 * ep0 data stage. these chips want very simple state transitions.
545 */
546 static inline
547 void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
548 {
549 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
550 USIR0 = USIR0_IR0;
551 dev->req_pending = 0;
552 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
553 __FUNCTION__, tag, UDCCS0, flags);
554 }
555
556 static int
557 write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
558 {
559 unsigned count;
560 int is_short;
561
562 count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
563 ep->dev->stats.write.bytes += count;
564
565 /* last packet "must be" short (or a zlp) */
566 is_short = (count != EP0_FIFO_SIZE);
567
568 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
569 req->req.length - req->req.actual, req);
570
571 if (unlikely (is_short)) {
572 if (ep->dev->req_pending)
573 ep0start(ep->dev, UDCCS0_IPR, "short IN");
574 else
575 UDCCS0 = UDCCS0_IPR;
576
577 count = req->req.length;
578 done (ep, req, 0);
579 ep0_idle(ep->dev);
580 #ifndef CONFIG_ARCH_IXP4XX
581 #if 1
582 /* This seems to get rid of lost status irqs in some cases:
583 * host responds quickly, or next request involves config
584 * change automagic, or should have been hidden, or ...
585 *
586 * FIXME get rid of all udelays possible...
587 */
588 if (count >= EP0_FIFO_SIZE) {
589 count = 100;
590 do {
591 if ((UDCCS0 & UDCCS0_OPR) != 0) {
592 /* clear OPR, generate ack */
593 UDCCS0 = UDCCS0_OPR;
594 break;
595 }
596 count--;
597 udelay(1);
598 } while (count);
599 }
600 #endif
601 #endif
602 } else if (ep->dev->req_pending)
603 ep0start(ep->dev, 0, "IN");
604 return is_short;
605 }
606
607
608 /*
609 * read_fifo - unload packet(s) from the fifo we use for usb OUT
610 * transfers and put them into the request. caller should have made
611 * sure there's at least one packet ready.
612 *
613 * returns true if the request completed because of short packet or the
614 * request buffer having filled (and maybe overran till end-of-packet).
615 */
616 static int
617 read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
618 {
619 for (;;) {
620 u32 udccs;
621 u8 *buf;
622 unsigned bufferspace, count, is_short;
623
624 /* make sure there's a packet in the FIFO.
625 * UDCCS_{BO,IO}_RPC are all the same bit value.
626 * UDCCS_{BO,IO}_RNE are all the same bit value.
627 */
628 udccs = *ep->reg_udccs;
629 if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
630 break;
631 buf = req->req.buf + req->req.actual;
632 prefetchw(buf);
633 bufferspace = req->req.length - req->req.actual;
634
635 /* read all bytes from this packet */
636 if (likely (udccs & UDCCS_BO_RNE)) {
637 count = 1 + (0x0ff & *ep->reg_ubcr);
638 req->req.actual += min (count, bufferspace);
639 } else /* zlp */
640 count = 0;
641 is_short = (count < ep->ep.maxpacket);
642 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
643 ep->ep.name, udccs, count,
644 is_short ? "/S" : "",
645 req, req->req.actual, req->req.length);
646 while (likely (count-- != 0)) {
647 u8 byte = (u8) *ep->reg_uddr;
648
649 if (unlikely (bufferspace == 0)) {
650 /* this happens when the driver's buffer
651 * is smaller than what the host sent.
652 * discard the extra data.
653 */
654 if (req->req.status != -EOVERFLOW)
655 DMSG("%s overflow %d\n",
656 ep->ep.name, count);
657 req->req.status = -EOVERFLOW;
658 } else {
659 *buf++ = byte;
660 bufferspace--;
661 }
662 }
663 *ep->reg_udccs = UDCCS_BO_RPC;
664 /* RPC/RSP/RNE could now reflect the other packet buffer */
665
666 /* iso is one request per packet */
667 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
668 if (udccs & UDCCS_IO_ROF)
669 req->req.status = -EHOSTUNREACH;
670 /* more like "is_done" */
671 is_short = 1;
672 }
673
674 /* completion */
675 if (is_short || req->req.actual == req->req.length) {
676 done (ep, req, 0);
677 if (list_empty(&ep->queue))
678 pio_irq_disable (ep->bEndpointAddress);
679 return 1;
680 }
681
682 /* finished that packet. the next one may be waiting... */
683 }
684 return 0;
685 }
686
687 /*
688 * special ep0 version of the above. no UBCR0 or double buffering; status
689 * handshaking is magic. most device protocols don't need control-OUT.
690 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
691 * protocols do use them.
692 */
693 static int
694 read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
695 {
696 u8 *buf, byte;
697 unsigned bufferspace;
698
699 buf = req->req.buf + req->req.actual;
700 bufferspace = req->req.length - req->req.actual;
701
702 while (UDCCS0 & UDCCS0_RNE) {
703 byte = (u8) UDDR0;
704
705 if (unlikely (bufferspace == 0)) {
706 /* this happens when the driver's buffer
707 * is smaller than what the host sent.
708 * discard the extra data.
709 */
710 if (req->req.status != -EOVERFLOW)
711 DMSG("%s overflow\n", ep->ep.name);
712 req->req.status = -EOVERFLOW;
713 } else {
714 *buf++ = byte;
715 req->req.actual++;
716 bufferspace--;
717 }
718 }
719
720 UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
721
722 /* completion */
723 if (req->req.actual >= req->req.length)
724 return 1;
725
726 /* finished that packet. the next one may be waiting... */
727 return 0;
728 }
729
730 #ifdef USE_DMA
731
732 #define MAX_IN_DMA ((DCMD_LENGTH + 1) - BULK_FIFO_SIZE)
733
734 static void
735 start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
736 {
737 u32 dcmd = req->req.length;
738 u32 buf = req->req.dma;
739 u32 fifo = io_v2p ((u32)ep->reg_uddr);
740
741 /* caller guarantees there's a packet or more remaining
742 * - IN may end with a short packet (TSP set separately),
743 * - OUT is always full length
744 */
745 buf += req->req.actual;
746 dcmd -= req->req.actual;
747 ep->dma_fixup = 0;
748
749 /* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
750 DCSR(ep->dma) = DCSR_NODESC;
751 if (is_in) {
752 DSADR(ep->dma) = buf;
753 DTADR(ep->dma) = fifo;
754 if (dcmd > MAX_IN_DMA)
755 dcmd = MAX_IN_DMA;
756 else
757 ep->dma_fixup = (dcmd % ep->ep.maxpacket) != 0;
758 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
759 | DCMD_FLOWTRG | DCMD_INCSRCADDR;
760 } else {
761 #ifdef USE_OUT_DMA
762 DSADR(ep->dma) = fifo;
763 DTADR(ep->dma) = buf;
764 if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
765 dcmd = ep->ep.maxpacket;
766 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
767 | DCMD_FLOWSRC | DCMD_INCTRGADDR;
768 #endif
769 }
770 DCMD(ep->dma) = dcmd;
771 DCSR(ep->dma) = DCSR_RUN | DCSR_NODESC
772 | (unlikely(is_in)
773 ? DCSR_STOPIRQEN /* use dma_nodesc_handler() */
774 : 0); /* use handle_ep() */
775 }
776
777 static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
778 {
779 int is_in = ep->bEndpointAddress & USB_DIR_IN;
780
781 if (is_in) {
782 /* unaligned tx buffers and zlps only work with PIO */
783 if ((req->req.dma & 0x0f) != 0
784 || unlikely((req->req.length - req->req.actual)
785 == 0)) {
786 pio_irq_enable(ep->bEndpointAddress);
787 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
788 (void) write_fifo(ep, req);
789 } else {
790 start_dma_nodesc(ep, req, USB_DIR_IN);
791 }
792 } else {
793 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) {
794 DMSG("%s short dma read...\n", ep->ep.name);
795 /* we're always set up for pio out */
796 read_fifo (ep, req);
797 } else {
798 *ep->reg_udccs = UDCCS_BO_DME
799 | (*ep->reg_udccs & UDCCS_BO_FST);
800 start_dma_nodesc(ep, req, USB_DIR_OUT);
801 }
802 }
803 }
804
805 static void cancel_dma(struct pxa2xx_ep *ep)
806 {
807 struct pxa2xx_request *req;
808 u32 tmp;
809
810 if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
811 return;
812
813 DCSR(ep->dma) = 0;
814 while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
815 cpu_relax();
816
817 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
818 tmp = DCMD(ep->dma) & DCMD_LENGTH;
819 req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
820
821 /* the last tx packet may be incomplete, so flush the fifo.
822 * FIXME correct req.actual if we can
823 */
824 if (ep->bEndpointAddress & USB_DIR_IN)
825 *ep->reg_udccs = UDCCS_BI_FTF;
826 }
827
828 /* dma channel stopped ... normal tx end (IN), or on error (IN/OUT) */
829 static void dma_nodesc_handler(int dmach, void *_ep)
830 {
831 struct pxa2xx_ep *ep = _ep;
832 struct pxa2xx_request *req;
833 u32 tmp, completed;
834
835 local_irq_disable();
836
837 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
838
839 ep->dma_irqs++;
840 ep->dev->stats.irqs++;
841 HEX_DISPLAY(ep->dev->stats.irqs);
842
843 /* ack/clear */
844 tmp = DCSR(ep->dma);
845 DCSR(ep->dma) = tmp;
846 if ((tmp & DCSR_STOPSTATE) == 0
847 || (DDADR(ep->dma) & DDADR_STOP) != 0) {
848 DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
849 ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
850 goto done;
851 }
852 DCSR(ep->dma) = 0; /* clear DCSR_STOPSTATE */
853
854 /* update transfer status */
855 completed = tmp & DCSR_BUSERR;
856 if (ep->bEndpointAddress & USB_DIR_IN)
857 tmp = DSADR(ep->dma);
858 else
859 tmp = DTADR(ep->dma);
860 req->req.actual = tmp - req->req.dma;
861
862 /* FIXME seems we sometimes see partial transfers... */
863
864 if (unlikely(completed != 0))
865 req->req.status = -EIO;
866 else if (req->req.actual) {
867 /* these registers have zeroes in low bits; they miscount
868 * some (end-of-transfer) short packets: tx 14 as tx 12
869 */
870 if (ep->dma_fixup)
871 req->req.actual = min(req->req.actual + 3,
872 req->req.length);
873
874 tmp = (req->req.length - req->req.actual);
875 completed = (tmp == 0);
876 if (completed && (ep->bEndpointAddress & USB_DIR_IN)) {
877
878 /* maybe validate final short packet ... */
879 if ((req->req.actual % ep->ep.maxpacket) != 0)
880 *ep->reg_udccs = UDCCS_BI_TSP/*|UDCCS_BI_TPC*/;
881
882 /* ... or zlp, using pio fallback */
883 else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
884 && req->req.zero) {
885 DMSG("%s zlp terminate ...\n", ep->ep.name);
886 completed = 0;
887 }
888 }
889 }
890
891 if (likely(completed)) {
892 done(ep, req, 0);
893
894 /* maybe re-activate after completion */
895 if (ep->stopped || list_empty(&ep->queue))
896 goto done;
897 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
898 }
899 kick_dma(ep, req);
900 done:
901 local_irq_enable();
902 }
903
904 #endif
905
906 /*-------------------------------------------------------------------------*/
907
908 static int
909 pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
910 {
911 struct pxa2xx_request *req;
912 struct pxa2xx_ep *ep;
913 struct pxa2xx_udc *dev;
914 unsigned long flags;
915
916 req = container_of(_req, struct pxa2xx_request, req);
917 if (unlikely (!_req || !_req->complete || !_req->buf
918 || !list_empty(&req->queue))) {
919 DMSG("%s, bad params\n", __FUNCTION__);
920 return -EINVAL;
921 }
922
923 ep = container_of(_ep, struct pxa2xx_ep, ep);
924 if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
925 DMSG("%s, bad ep\n", __FUNCTION__);
926 return -EINVAL;
927 }
928
929 dev = ep->dev;
930 if (unlikely (!dev->driver
931 || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
932 DMSG("%s, bogus device state\n", __FUNCTION__);
933 return -ESHUTDOWN;
934 }
935
936 /* iso is always one packet per request, that's the only way
937 * we can report per-packet status. that also helps with dma.
938 */
939 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
940 && req->req.length > le16_to_cpu
941 (ep->desc->wMaxPacketSize)))
942 return -EMSGSIZE;
943
944 #ifdef USE_DMA
945 // FIXME caller may already have done the dma mapping
946 if (ep->dma >= 0) {
947 _req->dma = dma_map_single(dev->dev,
948 _req->buf, _req->length,
949 ((ep->bEndpointAddress & USB_DIR_IN) != 0)
950 ? DMA_TO_DEVICE
951 : DMA_FROM_DEVICE);
952 }
953 #endif
954
955 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
956 _ep->name, _req, _req->length, _req->buf);
957
958 local_irq_save(flags);
959
960 _req->status = -EINPROGRESS;
961 _req->actual = 0;
962
963 /* kickstart this i/o queue? */
964 if (list_empty(&ep->queue) && !ep->stopped) {
965 if (ep->desc == 0 /* ep0 */) {
966 unsigned length = _req->length;
967
968 switch (dev->ep0state) {
969 case EP0_IN_DATA_PHASE:
970 dev->stats.write.ops++;
971 if (write_ep0_fifo(ep, req))
972 req = NULL;
973 break;
974
975 case EP0_OUT_DATA_PHASE:
976 dev->stats.read.ops++;
977 /* messy ... */
978 if (dev->req_config) {
979 DBG(DBG_VERBOSE, "ep0 config ack%s\n",
980 dev->has_cfr ? "" : " raced");
981 if (dev->has_cfr)
982 UDCCFR = UDCCFR_AREN|UDCCFR_ACM
983 |UDCCFR_MB1;
984 done(ep, req, 0);
985 dev->ep0state = EP0_END_XFER;
986 local_irq_restore (flags);
987 return 0;
988 }
989 if (dev->req_pending)
990 ep0start(dev, UDCCS0_IPR, "OUT");
991 if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
992 && read_ep0_fifo(ep, req))) {
993 ep0_idle(dev);
994 done(ep, req, 0);
995 req = NULL;
996 }
997 break;
998
999 default:
1000 DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
1001 local_irq_restore (flags);
1002 return -EL2HLT;
1003 }
1004 #ifdef USE_DMA
1005 /* either start dma or prime pio pump */
1006 } else if (ep->dma >= 0) {
1007 kick_dma(ep, req);
1008 #endif
1009 /* can the FIFO can satisfy the request immediately? */
1010 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
1011 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0
1012 && write_fifo(ep, req))
1013 req = NULL;
1014 } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
1015 && read_fifo(ep, req)) {
1016 req = NULL;
1017 }
1018
1019 if (likely (req && ep->desc) && ep->dma < 0)
1020 pio_irq_enable(ep->bEndpointAddress);
1021 }
1022
1023 /* pio or dma irq handler advances the queue. */
1024 if (likely (req != 0))
1025 list_add_tail(&req->queue, &ep->queue);
1026 local_irq_restore(flags);
1027
1028 return 0;
1029 }
1030
1031
1032 /*
1033 * nuke - dequeue ALL requests
1034 */
1035 static void nuke(struct pxa2xx_ep *ep, int status)
1036 {
1037 struct pxa2xx_request *req;
1038
1039 /* called with irqs blocked */
1040 #ifdef USE_DMA
1041 if (ep->dma >= 0 && !ep->stopped)
1042 cancel_dma(ep);
1043 #endif
1044 while (!list_empty(&ep->queue)) {
1045 req = list_entry(ep->queue.next,
1046 struct pxa2xx_request,
1047 queue);
1048 done(ep, req, status);
1049 }
1050 if (ep->desc)
1051 pio_irq_disable (ep->bEndpointAddress);
1052 }
1053
1054
1055 /* dequeue JUST ONE request */
1056 static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1057 {
1058 struct pxa2xx_ep *ep;
1059 struct pxa2xx_request *req;
1060 unsigned long flags;
1061
1062 ep = container_of(_ep, struct pxa2xx_ep, ep);
1063 if (!_ep || ep->ep.name == ep0name)
1064 return -EINVAL;
1065
1066 local_irq_save(flags);
1067
1068 /* make sure it's actually queued on this endpoint */
1069 list_for_each_entry (req, &ep->queue, queue) {
1070 if (&req->req == _req)
1071 break;
1072 }
1073 if (&req->req != _req) {
1074 local_irq_restore(flags);
1075 return -EINVAL;
1076 }
1077
1078 #ifdef USE_DMA
1079 if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
1080 cancel_dma(ep);
1081 done(ep, req, -ECONNRESET);
1082 /* restart i/o */
1083 if (!list_empty(&ep->queue)) {
1084 req = list_entry(ep->queue.next,
1085 struct pxa2xx_request, queue);
1086 kick_dma(ep, req);
1087 }
1088 } else
1089 #endif
1090 done(ep, req, -ECONNRESET);
1091
1092 local_irq_restore(flags);
1093 return 0;
1094 }
1095
1096 /*-------------------------------------------------------------------------*/
1097
1098 static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
1099 {
1100 struct pxa2xx_ep *ep;
1101 unsigned long flags;
1102
1103 ep = container_of(_ep, struct pxa2xx_ep, ep);
1104 if (unlikely (!_ep
1105 || (!ep->desc && ep->ep.name != ep0name))
1106 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
1107 DMSG("%s, bad ep\n", __FUNCTION__);
1108 return -EINVAL;
1109 }
1110 if (value == 0) {
1111 /* this path (reset toggle+halt) is needed to implement
1112 * SET_INTERFACE on normal hardware. but it can't be
1113 * done from software on the PXA UDC, and the hardware
1114 * forgets to do it as part of SET_INTERFACE automagic.
1115 */
1116 DMSG("only host can clear %s halt\n", _ep->name);
1117 return -EROFS;
1118 }
1119
1120 local_irq_save(flags);
1121
1122 if ((ep->bEndpointAddress & USB_DIR_IN) != 0
1123 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
1124 || !list_empty(&ep->queue))) {
1125 local_irq_restore(flags);
1126 return -EAGAIN;
1127 }
1128
1129 /* FST bit is the same for control, bulk in, bulk out, interrupt in */
1130 *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
1131
1132 /* ep0 needs special care */
1133 if (!ep->desc) {
1134 start_watchdog(ep->dev);
1135 ep->dev->req_pending = 0;
1136 ep->dev->ep0state = EP0_STALL;
1137
1138 /* and bulk/intr endpoints like dropping stalls too */
1139 } else {
1140 unsigned i;
1141 for (i = 0; i < 1000; i += 20) {
1142 if (*ep->reg_udccs & UDCCS_BI_SST)
1143 break;
1144 udelay(20);
1145 }
1146 }
1147 local_irq_restore(flags);
1148
1149 DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
1150 return 0;
1151 }
1152
1153 static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
1154 {
1155 struct pxa2xx_ep *ep;
1156
1157 ep = container_of(_ep, struct pxa2xx_ep, ep);
1158 if (!_ep) {
1159 DMSG("%s, bad ep\n", __FUNCTION__);
1160 return -ENODEV;
1161 }
1162 /* pxa can't report unclaimed bytes from IN fifos */
1163 if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
1164 return -EOPNOTSUPP;
1165 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
1166 || (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
1167 return 0;
1168 else
1169 return (*ep->reg_ubcr & 0xfff) + 1;
1170 }
1171
1172 static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
1173 {
1174 struct pxa2xx_ep *ep;
1175
1176 ep = container_of(_ep, struct pxa2xx_ep, ep);
1177 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
1178 DMSG("%s, bad ep\n", __FUNCTION__);
1179 return;
1180 }
1181
1182 /* toggle and halt bits stay unchanged */
1183
1184 /* for OUT, just read and discard the FIFO contents. */
1185 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
1186 while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
1187 (void) *ep->reg_uddr;
1188 return;
1189 }
1190
1191 /* most IN status is the same, but ISO can't stall */
1192 *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
1193 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
1194 ? 0 : UDCCS_BI_SST;
1195 }
1196
1197
1198 static struct usb_ep_ops pxa2xx_ep_ops = {
1199 .enable = pxa2xx_ep_enable,
1200 .disable = pxa2xx_ep_disable,
1201
1202 .alloc_request = pxa2xx_ep_alloc_request,
1203 .free_request = pxa2xx_ep_free_request,
1204
1205 .alloc_buffer = pxa2xx_ep_alloc_buffer,
1206 .free_buffer = pxa2xx_ep_free_buffer,
1207
1208 .queue = pxa2xx_ep_queue,
1209 .dequeue = pxa2xx_ep_dequeue,
1210
1211 .set_halt = pxa2xx_ep_set_halt,
1212 .fifo_status = pxa2xx_ep_fifo_status,
1213 .fifo_flush = pxa2xx_ep_fifo_flush,
1214 };
1215
1216
1217 /* ---------------------------------------------------------------------------
1218 * device-scoped parts of the api to the usb controller hardware
1219 * ---------------------------------------------------------------------------
1220 */
1221
1222 static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget)
1223 {
1224 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
1225 }
1226
1227 static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
1228 {
1229 /* host may not have enabled remote wakeup */
1230 if ((UDCCS0 & UDCCS0_DRWF) == 0)
1231 return -EHOSTUNREACH;
1232 udc_set_mask_UDCCR(UDCCR_RSM);
1233 return 0;
1234 }
1235
1236 static void stop_activity(struct pxa2xx_udc *, struct usb_gadget_driver *);
1237 static void udc_enable (struct pxa2xx_udc *);
1238 static void udc_disable(struct pxa2xx_udc *);
1239
1240 /* We disable the UDC -- and its 48 MHz clock -- whenever it's not
1241 * in active use.
1242 */
1243 static int pullup(struct pxa2xx_udc *udc, int is_active)
1244 {
1245 is_active = is_active && udc->vbus && udc->pullup;
1246 DMSG("%s\n", is_active ? "active" : "inactive");
1247 if (is_active)
1248 udc_enable(udc);
1249 else {
1250 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1251 DMSG("disconnect %s\n", udc->driver
1252 ? udc->driver->driver.name
1253 : "(no driver)");
1254 stop_activity(udc, udc->driver);
1255 }
1256 udc_disable(udc);
1257 }
1258 return 0;
1259 }
1260
1261 /* VBUS reporting logically comes from a transceiver */
1262 static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1263 {
1264 struct pxa2xx_udc *udc;
1265
1266 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1267 udc->vbus = is_active = (is_active != 0);
1268 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
1269 pullup(udc, is_active);
1270 return 0;
1271 }
1272
1273 /* drivers may have software control over D+ pullup */
1274 static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
1275 {
1276 struct pxa2xx_udc *udc;
1277
1278 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1279
1280 /* not all boards support pullup control */
1281 if (!udc->mach->udc_command)
1282 return -EOPNOTSUPP;
1283
1284 is_active = (is_active != 0);
1285 udc->pullup = is_active;
1286 pullup(udc, is_active);
1287 return 0;
1288 }
1289
1290 static const struct usb_gadget_ops pxa2xx_udc_ops = {
1291 .get_frame = pxa2xx_udc_get_frame,
1292 .wakeup = pxa2xx_udc_wakeup,
1293 .vbus_session = pxa2xx_udc_vbus_session,
1294 .pullup = pxa2xx_udc_pullup,
1295
1296 // .vbus_draw ... boards may consume current from VBUS, up to
1297 // 100-500mA based on config. the 500uA suspend ceiling means
1298 // that exclusively vbus-powered PXA designs violate USB specs.
1299 };
1300
1301 /*-------------------------------------------------------------------------*/
1302
1303 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1304
1305 static const char proc_node_name [] = "driver/udc";
1306
1307 static int
1308 udc_proc_read(char *page, char **start, off_t off, int count,
1309 int *eof, void *_dev)
1310 {
1311 char *buf = page;
1312 struct pxa2xx_udc *dev = _dev;
1313 char *next = buf;
1314 unsigned size = count;
1315 unsigned long flags;
1316 int i, t;
1317 u32 tmp;
1318
1319 if (off != 0)
1320 return 0;
1321
1322 local_irq_save(flags);
1323
1324 /* basic device status */
1325 t = scnprintf(next, size, DRIVER_DESC "\n"
1326 "%s version: %s\nGadget driver: %s\nHost %s\n\n",
1327 driver_name, DRIVER_VERSION SIZE_STR DMASTR,
1328 dev->driver ? dev->driver->driver.name : "(none)",
1329 is_vbus_present() ? "full speed" : "disconnected");
1330 size -= t;
1331 next += t;
1332
1333 /* registers for device and ep0 */
1334 t = scnprintf(next, size,
1335 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
1336 UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
1337 size -= t;
1338 next += t;
1339
1340 tmp = UDCCR;
1341 t = scnprintf(next, size,
1342 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
1343 (tmp & UDCCR_REM) ? " rem" : "",
1344 (tmp & UDCCR_RSTIR) ? " rstir" : "",
1345 (tmp & UDCCR_SRM) ? " srm" : "",
1346 (tmp & UDCCR_SUSIR) ? " susir" : "",
1347 (tmp & UDCCR_RESIR) ? " resir" : "",
1348 (tmp & UDCCR_RSM) ? " rsm" : "",
1349 (tmp & UDCCR_UDA) ? " uda" : "",
1350 (tmp & UDCCR_UDE) ? " ude" : "");
1351 size -= t;
1352 next += t;
1353
1354 tmp = UDCCS0;
1355 t = scnprintf(next, size,
1356 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
1357 (tmp & UDCCS0_SA) ? " sa" : "",
1358 (tmp & UDCCS0_RNE) ? " rne" : "",
1359 (tmp & UDCCS0_FST) ? " fst" : "",
1360 (tmp & UDCCS0_SST) ? " sst" : "",
1361 (tmp & UDCCS0_DRWF) ? " dwrf" : "",
1362 (tmp & UDCCS0_FTF) ? " ftf" : "",
1363 (tmp & UDCCS0_IPR) ? " ipr" : "",
1364 (tmp & UDCCS0_OPR) ? " opr" : "");
1365 size -= t;
1366 next += t;
1367
1368 if (dev->has_cfr) {
1369 tmp = UDCCFR;
1370 t = scnprintf(next, size,
1371 "udccfr %02X =%s%s\n", tmp,
1372 (tmp & UDCCFR_AREN) ? " aren" : "",
1373 (tmp & UDCCFR_ACM) ? " acm" : "");
1374 size -= t;
1375 next += t;
1376 }
1377
1378 if (!is_vbus_present() || !dev->driver)
1379 goto done;
1380
1381 t = scnprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
1382 dev->stats.write.bytes, dev->stats.write.ops,
1383 dev->stats.read.bytes, dev->stats.read.ops,
1384 dev->stats.irqs);
1385 size -= t;
1386 next += t;
1387
1388 /* dump endpoint queues */
1389 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1390 struct pxa2xx_ep *ep = &dev->ep [i];
1391 struct pxa2xx_request *req;
1392 int t;
1393
1394 if (i != 0) {
1395 const struct usb_endpoint_descriptor *d;
1396
1397 d = ep->desc;
1398 if (!d)
1399 continue;
1400 tmp = *dev->ep [i].reg_udccs;
1401 t = scnprintf(next, size,
1402 "%s max %d %s udccs %02x irqs %lu/%lu\n",
1403 ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
1404 (ep->dma >= 0) ? "dma" : "pio", tmp,
1405 ep->pio_irqs, ep->dma_irqs);
1406 /* TODO translate all five groups of udccs bits! */
1407
1408 } else /* ep0 should only have one transfer queued */
1409 t = scnprintf(next, size, "ep0 max 16 pio irqs %lu\n",
1410 ep->pio_irqs);
1411 if (t <= 0 || t > size)
1412 goto done;
1413 size -= t;
1414 next += t;
1415
1416 if (list_empty(&ep->queue)) {
1417 t = scnprintf(next, size, "\t(nothing queued)\n");
1418 if (t <= 0 || t > size)
1419 goto done;
1420 size -= t;
1421 next += t;
1422 continue;
1423 }
1424 list_for_each_entry(req, &ep->queue, queue) {
1425 #ifdef USE_DMA
1426 if (ep->dma >= 0 && req->queue.prev == &ep->queue)
1427 t = scnprintf(next, size,
1428 "\treq %p len %d/%d "
1429 "buf %p (dma%d dcmd %08x)\n",
1430 &req->req, req->req.actual,
1431 req->req.length, req->req.buf,
1432 ep->dma, DCMD(ep->dma)
1433 // low 13 bits == bytes-to-go
1434 );
1435 else
1436 #endif
1437 t = scnprintf(next, size,
1438 "\treq %p len %d/%d buf %p\n",
1439 &req->req, req->req.actual,
1440 req->req.length, req->req.buf);
1441 if (t <= 0 || t > size)
1442 goto done;
1443 size -= t;
1444 next += t;
1445 }
1446 }
1447
1448 done:
1449 local_irq_restore(flags);
1450 *eof = 1;
1451 return count - size;
1452 }
1453
1454 #define create_proc_files() \
1455 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
1456 #define remove_proc_files() \
1457 remove_proc_entry(proc_node_name, NULL)
1458
1459 #else /* !CONFIG_USB_GADGET_DEBUG_FILES */
1460
1461 #define create_proc_files() do {} while (0)
1462 #define remove_proc_files() do {} while (0)
1463
1464 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1465
1466 /*-------------------------------------------------------------------------*/
1467
1468 /*
1469 * udc_disable - disable USB device controller
1470 */
1471 static void udc_disable(struct pxa2xx_udc *dev)
1472 {
1473 /* block all irqs */
1474 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
1475 UICR0 = UICR1 = 0xff;
1476 UFNRH = UFNRH_SIM;
1477
1478 /* if hardware supports it, disconnect from usb */
1479 pullup_off();
1480
1481 udc_clear_mask_UDCCR(UDCCR_UDE);
1482
1483 #ifdef CONFIG_ARCH_PXA
1484 /* Disable clock for USB device */
1485 pxa_set_cken(CKEN_USB, 0);
1486 #endif
1487
1488 ep0_idle (dev);
1489 dev->gadget.speed = USB_SPEED_UNKNOWN;
1490 LED_CONNECTED_OFF;
1491 }
1492
1493
1494 /*
1495 * udc_reinit - initialize software state
1496 */
1497 static void udc_reinit(struct pxa2xx_udc *dev)
1498 {
1499 u32 i;
1500
1501 /* device/ep0 records init */
1502 INIT_LIST_HEAD (&dev->gadget.ep_list);
1503 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1504 dev->ep0state = EP0_IDLE;
1505
1506 /* basic endpoint records init */
1507 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1508 struct pxa2xx_ep *ep = &dev->ep[i];
1509
1510 if (i != 0)
1511 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1512
1513 ep->desc = NULL;
1514 ep->stopped = 0;
1515 INIT_LIST_HEAD (&ep->queue);
1516 ep->pio_irqs = ep->dma_irqs = 0;
1517 }
1518
1519 /* the rest was statically initialized, and is read-only */
1520 }
1521
1522 /* until it's enabled, this UDC should be completely invisible
1523 * to any USB host.
1524 */
1525 static void udc_enable (struct pxa2xx_udc *dev)
1526 {
1527 udc_clear_mask_UDCCR(UDCCR_UDE);
1528
1529 #ifdef CONFIG_ARCH_PXA
1530 /* Enable clock for USB device */
1531 pxa_set_cken(CKEN_USB, 1);
1532 udelay(5);
1533 #endif
1534
1535 /* try to clear these bits before we enable the udc */
1536 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1537
1538 ep0_idle(dev);
1539 dev->gadget.speed = USB_SPEED_UNKNOWN;
1540 dev->stats.irqs = 0;
1541
1542 /*
1543 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
1544 * - enable UDC
1545 * - if RESET is already in progress, ack interrupt
1546 * - unmask reset interrupt
1547 */
1548 udc_set_mask_UDCCR(UDCCR_UDE);
1549 if (!(UDCCR & UDCCR_UDA))
1550 udc_ack_int_UDCCR(UDCCR_RSTIR);
1551
1552 if (dev->has_cfr /* UDC_RES2 is defined */) {
1553 /* pxa255 (a0+) can avoid a set_config race that could
1554 * prevent gadget drivers from configuring correctly
1555 */
1556 UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
1557 } else {
1558 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
1559 * which could result in missing packets and interrupts.
1560 * supposedly one bit per endpoint, controlling whether it
1561 * double buffers or not; ACM/AREN bits fit into the holes.
1562 * zero bits (like USIR0_IRx) disable double buffering.
1563 */
1564 UDC_RES1 = 0x00;
1565 UDC_RES2 = 0x00;
1566 }
1567
1568 #ifdef DISABLE_TEST_MODE
1569 /* "test mode" seems to have become the default in later chip
1570 * revs, preventing double buffering (and invalidating docs).
1571 * this EXPERIMENT enables it for bulk endpoints by tweaking
1572 * undefined/reserved register bits (that other drivers clear).
1573 * Belcarra code comments noted this usage.
1574 */
1575 if (fifo_mode & 1) { /* IN endpoints */
1576 UDC_RES1 |= USIR0_IR1|USIR0_IR6;
1577 UDC_RES2 |= USIR1_IR11;
1578 }
1579 if (fifo_mode & 2) { /* OUT endpoints */
1580 UDC_RES1 |= USIR0_IR2|USIR0_IR7;
1581 UDC_RES2 |= USIR1_IR12;
1582 }
1583 #endif
1584
1585 /* enable suspend/resume and reset irqs */
1586 udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
1587
1588 /* enable ep0 irqs */
1589 UICR0 &= ~UICR0_IM0;
1590
1591 /* if hardware supports it, pullup D+ and wait for reset */
1592 pullup_on();
1593 }
1594
1595
1596 /* when a driver is successfully registered, it will receive
1597 * control requests including set_configuration(), which enables
1598 * non-control requests. then usb traffic follows until a
1599 * disconnect is reported. then a host may connect again, or
1600 * the driver might get unbound.
1601 */
1602 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1603 {
1604 struct pxa2xx_udc *dev = the_controller;
1605 int retval;
1606
1607 if (!driver
1608 || driver->speed < USB_SPEED_FULL
1609 || !driver->bind
1610 || !driver->disconnect
1611 || !driver->setup)
1612 return -EINVAL;
1613 if (!dev)
1614 return -ENODEV;
1615 if (dev->driver)
1616 return -EBUSY;
1617
1618 /* first hook up the driver ... */
1619 dev->driver = driver;
1620 dev->gadget.dev.driver = &driver->driver;
1621 dev->pullup = 1;
1622
1623 retval = device_add (&dev->gadget.dev);
1624 if (retval) {
1625 fail:
1626 dev->driver = NULL;
1627 dev->gadget.dev.driver = NULL;
1628 return retval;
1629 }
1630 retval = driver->bind(&dev->gadget);
1631 if (retval) {
1632 DMSG("bind to driver %s --> error %d\n",
1633 driver->driver.name, retval);
1634 device_del (&dev->gadget.dev);
1635 goto fail;
1636 }
1637
1638 /* ... then enable host detection and ep0; and we're ready
1639 * for set_configuration as well as eventual disconnect.
1640 */
1641 DMSG("registered gadget driver '%s'\n", driver->driver.name);
1642 pullup(dev, 1);
1643 dump_state(dev);
1644 return 0;
1645 }
1646 EXPORT_SYMBOL(usb_gadget_register_driver);
1647
1648 static void
1649 stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
1650 {
1651 int i;
1652
1653 /* don't disconnect drivers more than once */
1654 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1655 driver = NULL;
1656 dev->gadget.speed = USB_SPEED_UNKNOWN;
1657
1658 /* prevent new request submissions, kill any outstanding requests */
1659 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1660 struct pxa2xx_ep *ep = &dev->ep[i];
1661
1662 ep->stopped = 1;
1663 nuke(ep, -ESHUTDOWN);
1664 }
1665 del_timer_sync(&dev->timer);
1666
1667 /* report disconnect; the driver is already quiesced */
1668 LED_CONNECTED_OFF;
1669 if (driver)
1670 driver->disconnect(&dev->gadget);
1671
1672 /* re-init driver-visible data structures */
1673 udc_reinit(dev);
1674 }
1675
1676 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1677 {
1678 struct pxa2xx_udc *dev = the_controller;
1679
1680 if (!dev)
1681 return -ENODEV;
1682 if (!driver || driver != dev->driver || !driver->unbind)
1683 return -EINVAL;
1684
1685 local_irq_disable();
1686 pullup(dev, 0);
1687 stop_activity(dev, driver);
1688 local_irq_enable();
1689
1690 driver->unbind(&dev->gadget);
1691 dev->driver = NULL;
1692
1693 device_del (&dev->gadget.dev);
1694
1695 DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
1696 dump_state(dev);
1697 return 0;
1698 }
1699 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1700
1701
1702 /*-------------------------------------------------------------------------*/
1703
1704 #ifdef CONFIG_ARCH_LUBBOCK
1705
1706 /* Lubbock has separate connect and disconnect irqs. More typical designs
1707 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
1708 */
1709
1710 static irqreturn_t
1711 lubbock_vbus_irq(int irq, void *_dev)
1712 {
1713 struct pxa2xx_udc *dev = _dev;
1714 int vbus;
1715
1716 dev->stats.irqs++;
1717 HEX_DISPLAY(dev->stats.irqs);
1718 switch (irq) {
1719 case LUBBOCK_USB_IRQ:
1720 LED_CONNECTED_ON;
1721 vbus = 1;
1722 disable_irq(LUBBOCK_USB_IRQ);
1723 enable_irq(LUBBOCK_USB_DISC_IRQ);
1724 break;
1725 case LUBBOCK_USB_DISC_IRQ:
1726 LED_CONNECTED_OFF;
1727 vbus = 0;
1728 disable_irq(LUBBOCK_USB_DISC_IRQ);
1729 enable_irq(LUBBOCK_USB_IRQ);
1730 break;
1731 default:
1732 return IRQ_NONE;
1733 }
1734
1735 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1736 return IRQ_HANDLED;
1737 }
1738
1739 #endif
1740
1741 static irqreturn_t udc_vbus_irq(int irq, void *_dev)
1742 {
1743 struct pxa2xx_udc *dev = _dev;
1744 int vbus = gpio_get_value(dev->mach->gpio_vbus);
1745
1746 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1747 return IRQ_HANDLED;
1748 }
1749
1750
1751 /*-------------------------------------------------------------------------*/
1752
1753 static inline void clear_ep_state (struct pxa2xx_udc *dev)
1754 {
1755 unsigned i;
1756
1757 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
1758 * fifos, and pending transactions mustn't be continued in any case.
1759 */
1760 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
1761 nuke(&dev->ep[i], -ECONNABORTED);
1762 }
1763
1764 static void udc_watchdog(unsigned long _dev)
1765 {
1766 struct pxa2xx_udc *dev = (void *)_dev;
1767
1768 local_irq_disable();
1769 if (dev->ep0state == EP0_STALL
1770 && (UDCCS0 & UDCCS0_FST) == 0
1771 && (UDCCS0 & UDCCS0_SST) == 0) {
1772 UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
1773 DBG(DBG_VERBOSE, "ep0 re-stall\n");
1774 start_watchdog(dev);
1775 }
1776 local_irq_enable();
1777 }
1778
1779 static void handle_ep0 (struct pxa2xx_udc *dev)
1780 {
1781 u32 udccs0 = UDCCS0;
1782 struct pxa2xx_ep *ep = &dev->ep [0];
1783 struct pxa2xx_request *req;
1784 union {
1785 struct usb_ctrlrequest r;
1786 u8 raw [8];
1787 u32 word [2];
1788 } u;
1789
1790 if (list_empty(&ep->queue))
1791 req = NULL;
1792 else
1793 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
1794
1795 /* clear stall status */
1796 if (udccs0 & UDCCS0_SST) {
1797 nuke(ep, -EPIPE);
1798 UDCCS0 = UDCCS0_SST;
1799 del_timer(&dev->timer);
1800 ep0_idle(dev);
1801 }
1802
1803 /* previous request unfinished? non-error iff back-to-back ... */
1804 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
1805 nuke(ep, 0);
1806 del_timer(&dev->timer);
1807 ep0_idle(dev);
1808 }
1809
1810 switch (dev->ep0state) {
1811 case EP0_IDLE:
1812 /* late-breaking status? */
1813 udccs0 = UDCCS0;
1814
1815 /* start control request? */
1816 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
1817 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
1818 int i;
1819
1820 nuke (ep, -EPROTO);
1821
1822 /* read SETUP packet */
1823 for (i = 0; i < 8; i++) {
1824 if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
1825 bad_setup:
1826 DMSG("SETUP %d!\n", i);
1827 goto stall;
1828 }
1829 u.raw [i] = (u8) UDDR0;
1830 }
1831 if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
1832 goto bad_setup;
1833
1834 got_setup:
1835 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1836 u.r.bRequestType, u.r.bRequest,
1837 le16_to_cpu(u.r.wValue),
1838 le16_to_cpu(u.r.wIndex),
1839 le16_to_cpu(u.r.wLength));
1840
1841 /* cope with automagic for some standard requests. */
1842 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
1843 == USB_TYPE_STANDARD;
1844 dev->req_config = 0;
1845 dev->req_pending = 1;
1846 switch (u.r.bRequest) {
1847 /* hardware restricts gadget drivers here! */
1848 case USB_REQ_SET_CONFIGURATION:
1849 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1850 /* reflect hardware's automagic
1851 * up to the gadget driver.
1852 */
1853 config_change:
1854 dev->req_config = 1;
1855 clear_ep_state(dev);
1856 /* if !has_cfr, there's no synch
1857 * else use AREN (later) not SA|OPR
1858 * USIR0_IR0 acts edge sensitive
1859 */
1860 }
1861 break;
1862 /* ... and here, even more ... */
1863 case USB_REQ_SET_INTERFACE:
1864 if (u.r.bRequestType == USB_RECIP_INTERFACE) {
1865 /* udc hardware is broken by design:
1866 * - altsetting may only be zero;
1867 * - hw resets all interfaces' eps;
1868 * - ep reset doesn't include halt(?).
1869 */
1870 DMSG("broken set_interface (%d/%d)\n",
1871 le16_to_cpu(u.r.wIndex),
1872 le16_to_cpu(u.r.wValue));
1873 goto config_change;
1874 }
1875 break;
1876 /* hardware was supposed to hide this */
1877 case USB_REQ_SET_ADDRESS:
1878 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1879 ep0start(dev, 0, "address");
1880 return;
1881 }
1882 break;
1883 }
1884
1885 if (u.r.bRequestType & USB_DIR_IN)
1886 dev->ep0state = EP0_IN_DATA_PHASE;
1887 else
1888 dev->ep0state = EP0_OUT_DATA_PHASE;
1889
1890 i = dev->driver->setup(&dev->gadget, &u.r);
1891 if (i < 0) {
1892 /* hardware automagic preventing STALL... */
1893 if (dev->req_config) {
1894 /* hardware sometimes neglects to tell
1895 * tell us about config change events,
1896 * so later ones may fail...
1897 */
1898 WARN("config change %02x fail %d?\n",
1899 u.r.bRequest, i);
1900 return;
1901 /* TODO experiment: if has_cfr,
1902 * hardware didn't ACK; maybe we
1903 * could actually STALL!
1904 */
1905 }
1906 DBG(DBG_VERBOSE, "protocol STALL, "
1907 "%02x err %d\n", UDCCS0, i);
1908 stall:
1909 /* the watchdog timer helps deal with cases
1910 * where udc seems to clear FST wrongly, and
1911 * then NAKs instead of STALLing.
1912 */
1913 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
1914 start_watchdog(dev);
1915 dev->ep0state = EP0_STALL;
1916
1917 /* deferred i/o == no response yet */
1918 } else if (dev->req_pending) {
1919 if (likely(dev->ep0state == EP0_IN_DATA_PHASE
1920 || dev->req_std || u.r.wLength))
1921 ep0start(dev, 0, "defer");
1922 else
1923 ep0start(dev, UDCCS0_IPR, "defer/IPR");
1924 }
1925
1926 /* expect at least one data or status stage irq */
1927 return;
1928
1929 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
1930 == (UDCCS0_OPR|UDCCS0_SA))) {
1931 unsigned i;
1932
1933 /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
1934 * still observed on a pxa255 a0.
1935 */
1936 DBG(DBG_VERBOSE, "e131\n");
1937 nuke(ep, -EPROTO);
1938
1939 /* read SETUP data, but don't trust it too much */
1940 for (i = 0; i < 8; i++)
1941 u.raw [i] = (u8) UDDR0;
1942 if ((u.r.bRequestType & USB_RECIP_MASK)
1943 > USB_RECIP_OTHER)
1944 goto stall;
1945 if (u.word [0] == 0 && u.word [1] == 0)
1946 goto stall;
1947 goto got_setup;
1948 } else {
1949 /* some random early IRQ:
1950 * - we acked FST
1951 * - IPR cleared
1952 * - OPR got set, without SA (likely status stage)
1953 */
1954 UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
1955 }
1956 break;
1957 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
1958 if (udccs0 & UDCCS0_OPR) {
1959 UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
1960 DBG(DBG_VERBOSE, "ep0in premature status\n");
1961 if (req)
1962 done(ep, req, 0);
1963 ep0_idle(dev);
1964 } else /* irq was IPR clearing */ {
1965 if (req) {
1966 /* this IN packet might finish the request */
1967 (void) write_ep0_fifo(ep, req);
1968 } /* else IN token before response was written */
1969 }
1970 break;
1971 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
1972 if (udccs0 & UDCCS0_OPR) {
1973 if (req) {
1974 /* this OUT packet might finish the request */
1975 if (read_ep0_fifo(ep, req))
1976 done(ep, req, 0);
1977 /* else more OUT packets expected */
1978 } /* else OUT token before read was issued */
1979 } else /* irq was IPR clearing */ {
1980 DBG(DBG_VERBOSE, "ep0out premature status\n");
1981 if (req)
1982 done(ep, req, 0);
1983 ep0_idle(dev);
1984 }
1985 break;
1986 case EP0_END_XFER:
1987 if (req)
1988 done(ep, req, 0);
1989 /* ack control-IN status (maybe in-zlp was skipped)
1990 * also appears after some config change events.
1991 */
1992 if (udccs0 & UDCCS0_OPR)
1993 UDCCS0 = UDCCS0_OPR;
1994 ep0_idle(dev);
1995 break;
1996 case EP0_STALL:
1997 UDCCS0 = UDCCS0_FST;
1998 break;
1999 }
2000 USIR0 = USIR0_IR0;
2001 }
2002
2003 static void handle_ep(struct pxa2xx_ep *ep)
2004 {
2005 struct pxa2xx_request *req;
2006 int is_in = ep->bEndpointAddress & USB_DIR_IN;
2007 int completed;
2008 u32 udccs, tmp;
2009
2010 do {
2011 completed = 0;
2012 if (likely (!list_empty(&ep->queue)))
2013 req = list_entry(ep->queue.next,
2014 struct pxa2xx_request, queue);
2015 else
2016 req = NULL;
2017
2018 // TODO check FST handling
2019
2020 udccs = *ep->reg_udccs;
2021 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
2022 tmp = UDCCS_BI_TUR;
2023 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2024 tmp |= UDCCS_BI_SST;
2025 tmp &= udccs;
2026 if (likely (tmp))
2027 *ep->reg_udccs = tmp;
2028 if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
2029 completed = write_fifo(ep, req);
2030
2031 } else { /* irq from RPC (or for ISO, ROF) */
2032 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2033 tmp = UDCCS_BO_SST | UDCCS_BO_DME;
2034 else
2035 tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
2036 tmp &= udccs;
2037 if (likely(tmp))
2038 *ep->reg_udccs = tmp;
2039
2040 /* fifos can hold packets, ready for reading... */
2041 if (likely(req)) {
2042 #ifdef USE_OUT_DMA
2043 // TODO didn't yet debug out-dma. this approach assumes
2044 // the worst about short packets and RPC; it might be better.
2045
2046 if (likely(ep->dma >= 0)) {
2047 if (!(udccs & UDCCS_BO_RSP)) {
2048 *ep->reg_udccs = UDCCS_BO_RPC;
2049 ep->dma_irqs++;
2050 return;
2051 }
2052 }
2053 #endif
2054 completed = read_fifo(ep, req);
2055 } else
2056 pio_irq_disable (ep->bEndpointAddress);
2057 }
2058 ep->pio_irqs++;
2059 } while (completed);
2060 }
2061
2062 /*
2063 * pxa2xx_udc_irq - interrupt handler
2064 *
2065 * avoid delays in ep0 processing. the control handshaking isn't always
2066 * under software control (pxa250c0 and the pxa255 are better), and delays
2067 * could cause usb protocol errors.
2068 */
2069 static irqreturn_t
2070 pxa2xx_udc_irq(int irq, void *_dev)
2071 {
2072 struct pxa2xx_udc *dev = _dev;
2073 int handled;
2074
2075 dev->stats.irqs++;
2076 HEX_DISPLAY(dev->stats.irqs);
2077 do {
2078 u32 udccr = UDCCR;
2079
2080 handled = 0;
2081
2082 /* SUSpend Interrupt Request */
2083 if (unlikely(udccr & UDCCR_SUSIR)) {
2084 udc_ack_int_UDCCR(UDCCR_SUSIR);
2085 handled = 1;
2086 DBG(DBG_VERBOSE, "USB suspend%s\n", is_vbus_present()
2087 ? "" : "+disconnect");
2088
2089 if (!is_vbus_present())
2090 stop_activity(dev, dev->driver);
2091 else if (dev->gadget.speed != USB_SPEED_UNKNOWN
2092 && dev->driver
2093 && dev->driver->suspend)
2094 dev->driver->suspend(&dev->gadget);
2095 ep0_idle (dev);
2096 }
2097
2098 /* RESume Interrupt Request */
2099 if (unlikely(udccr & UDCCR_RESIR)) {
2100 udc_ack_int_UDCCR(UDCCR_RESIR);
2101 handled = 1;
2102 DBG(DBG_VERBOSE, "USB resume\n");
2103
2104 if (dev->gadget.speed != USB_SPEED_UNKNOWN
2105 && dev->driver
2106 && dev->driver->resume
2107 && is_vbus_present())
2108 dev->driver->resume(&dev->gadget);
2109 }
2110
2111 /* ReSeT Interrupt Request - USB reset */
2112 if (unlikely(udccr & UDCCR_RSTIR)) {
2113 udc_ack_int_UDCCR(UDCCR_RSTIR);
2114 handled = 1;
2115
2116 if ((UDCCR & UDCCR_UDA) == 0) {
2117 DBG(DBG_VERBOSE, "USB reset start\n");
2118
2119 /* reset driver and endpoints,
2120 * in case that's not yet done
2121 */
2122 stop_activity (dev, dev->driver);
2123
2124 } else {
2125 DBG(DBG_VERBOSE, "USB reset end\n");
2126 dev->gadget.speed = USB_SPEED_FULL;
2127 LED_CONNECTED_ON;
2128 memset(&dev->stats, 0, sizeof dev->stats);
2129 /* driver and endpoints are still reset */
2130 }
2131
2132 } else {
2133 u32 usir0 = USIR0 & ~UICR0;
2134 u32 usir1 = USIR1 & ~UICR1;
2135 int i;
2136
2137 if (unlikely (!usir0 && !usir1))
2138 continue;
2139
2140 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
2141
2142 /* control traffic */
2143 if (usir0 & USIR0_IR0) {
2144 dev->ep[0].pio_irqs++;
2145 handle_ep0(dev);
2146 handled = 1;
2147 }
2148
2149 /* endpoint data transfers */
2150 for (i = 0; i < 8; i++) {
2151 u32 tmp = 1 << i;
2152
2153 if (i && (usir0 & tmp)) {
2154 handle_ep(&dev->ep[i]);
2155 USIR0 |= tmp;
2156 handled = 1;
2157 }
2158 if (usir1 & tmp) {
2159 handle_ep(&dev->ep[i+8]);
2160 USIR1 |= tmp;
2161 handled = 1;
2162 }
2163 }
2164 }
2165
2166 /* we could also ask for 1 msec SOF (SIR) interrupts */
2167
2168 } while (handled);
2169 return IRQ_HANDLED;
2170 }
2171
2172 /*-------------------------------------------------------------------------*/
2173
2174 static void nop_release (struct device *dev)
2175 {
2176 DMSG("%s %s\n", __FUNCTION__, dev->bus_id);
2177 }
2178
2179 /* this uses load-time allocation and initialization (instead of
2180 * doing it at run-time) to save code, eliminate fault paths, and
2181 * be more obviously correct.
2182 */
2183 static struct pxa2xx_udc memory = {
2184 .gadget = {
2185 .ops = &pxa2xx_udc_ops,
2186 .ep0 = &memory.ep[0].ep,
2187 .name = driver_name,
2188 .dev = {
2189 .bus_id = "gadget",
2190 .release = nop_release,
2191 },
2192 },
2193
2194 /* control endpoint */
2195 .ep[0] = {
2196 .ep = {
2197 .name = ep0name,
2198 .ops = &pxa2xx_ep_ops,
2199 .maxpacket = EP0_FIFO_SIZE,
2200 },
2201 .dev = &memory,
2202 .reg_udccs = &UDCCS0,
2203 .reg_uddr = &UDDR0,
2204 },
2205
2206 /* first group of endpoints */
2207 .ep[1] = {
2208 .ep = {
2209 .name = "ep1in-bulk",
2210 .ops = &pxa2xx_ep_ops,
2211 .maxpacket = BULK_FIFO_SIZE,
2212 },
2213 .dev = &memory,
2214 .fifo_size = BULK_FIFO_SIZE,
2215 .bEndpointAddress = USB_DIR_IN | 1,
2216 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2217 .reg_udccs = &UDCCS1,
2218 .reg_uddr = &UDDR1,
2219 drcmr (25)
2220 },
2221 .ep[2] = {
2222 .ep = {
2223 .name = "ep2out-bulk",
2224 .ops = &pxa2xx_ep_ops,
2225 .maxpacket = BULK_FIFO_SIZE,
2226 },
2227 .dev = &memory,
2228 .fifo_size = BULK_FIFO_SIZE,
2229 .bEndpointAddress = 2,
2230 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2231 .reg_udccs = &UDCCS2,
2232 .reg_ubcr = &UBCR2,
2233 .reg_uddr = &UDDR2,
2234 drcmr (26)
2235 },
2236 #ifndef CONFIG_USB_PXA2XX_SMALL
2237 .ep[3] = {
2238 .ep = {
2239 .name = "ep3in-iso",
2240 .ops = &pxa2xx_ep_ops,
2241 .maxpacket = ISO_FIFO_SIZE,
2242 },
2243 .dev = &memory,
2244 .fifo_size = ISO_FIFO_SIZE,
2245 .bEndpointAddress = USB_DIR_IN | 3,
2246 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2247 .reg_udccs = &UDCCS3,
2248 .reg_uddr = &UDDR3,
2249 drcmr (27)
2250 },
2251 .ep[4] = {
2252 .ep = {
2253 .name = "ep4out-iso",
2254 .ops = &pxa2xx_ep_ops,
2255 .maxpacket = ISO_FIFO_SIZE,
2256 },
2257 .dev = &memory,
2258 .fifo_size = ISO_FIFO_SIZE,
2259 .bEndpointAddress = 4,
2260 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2261 .reg_udccs = &UDCCS4,
2262 .reg_ubcr = &UBCR4,
2263 .reg_uddr = &UDDR4,
2264 drcmr (28)
2265 },
2266 .ep[5] = {
2267 .ep = {
2268 .name = "ep5in-int",
2269 .ops = &pxa2xx_ep_ops,
2270 .maxpacket = INT_FIFO_SIZE,
2271 },
2272 .dev = &memory,
2273 .fifo_size = INT_FIFO_SIZE,
2274 .bEndpointAddress = USB_DIR_IN | 5,
2275 .bmAttributes = USB_ENDPOINT_XFER_INT,
2276 .reg_udccs = &UDCCS5,
2277 .reg_uddr = &UDDR5,
2278 },
2279
2280 /* second group of endpoints */
2281 .ep[6] = {
2282 .ep = {
2283 .name = "ep6in-bulk",
2284 .ops = &pxa2xx_ep_ops,
2285 .maxpacket = BULK_FIFO_SIZE,
2286 },
2287 .dev = &memory,
2288 .fifo_size = BULK_FIFO_SIZE,
2289 .bEndpointAddress = USB_DIR_IN | 6,
2290 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2291 .reg_udccs = &UDCCS6,
2292 .reg_uddr = &UDDR6,
2293 drcmr (30)
2294 },
2295 .ep[7] = {
2296 .ep = {
2297 .name = "ep7out-bulk",
2298 .ops = &pxa2xx_ep_ops,
2299 .maxpacket = BULK_FIFO_SIZE,
2300 },
2301 .dev = &memory,
2302 .fifo_size = BULK_FIFO_SIZE,
2303 .bEndpointAddress = 7,
2304 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2305 .reg_udccs = &UDCCS7,
2306 .reg_ubcr = &UBCR7,
2307 .reg_uddr = &UDDR7,
2308 drcmr (31)
2309 },
2310 .ep[8] = {
2311 .ep = {
2312 .name = "ep8in-iso",
2313 .ops = &pxa2xx_ep_ops,
2314 .maxpacket = ISO_FIFO_SIZE,
2315 },
2316 .dev = &memory,
2317 .fifo_size = ISO_FIFO_SIZE,
2318 .bEndpointAddress = USB_DIR_IN | 8,
2319 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2320 .reg_udccs = &UDCCS8,
2321 .reg_uddr = &UDDR8,
2322 drcmr (32)
2323 },
2324 .ep[9] = {
2325 .ep = {
2326 .name = "ep9out-iso",
2327 .ops = &pxa2xx_ep_ops,
2328 .maxpacket = ISO_FIFO_SIZE,
2329 },
2330 .dev = &memory,
2331 .fifo_size = ISO_FIFO_SIZE,
2332 .bEndpointAddress = 9,
2333 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2334 .reg_udccs = &UDCCS9,
2335 .reg_ubcr = &UBCR9,
2336 .reg_uddr = &UDDR9,
2337 drcmr (33)
2338 },
2339 .ep[10] = {
2340 .ep = {
2341 .name = "ep10in-int",
2342 .ops = &pxa2xx_ep_ops,
2343 .maxpacket = INT_FIFO_SIZE,
2344 },
2345 .dev = &memory,
2346 .fifo_size = INT_FIFO_SIZE,
2347 .bEndpointAddress = USB_DIR_IN | 10,
2348 .bmAttributes = USB_ENDPOINT_XFER_INT,
2349 .reg_udccs = &UDCCS10,
2350 .reg_uddr = &UDDR10,
2351 },
2352
2353 /* third group of endpoints */
2354 .ep[11] = {
2355 .ep = {
2356 .name = "ep11in-bulk",
2357 .ops = &pxa2xx_ep_ops,
2358 .maxpacket = BULK_FIFO_SIZE,
2359 },
2360 .dev = &memory,
2361 .fifo_size = BULK_FIFO_SIZE,
2362 .bEndpointAddress = USB_DIR_IN | 11,
2363 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2364 .reg_udccs = &UDCCS11,
2365 .reg_uddr = &UDDR11,
2366 drcmr (35)
2367 },
2368 .ep[12] = {
2369 .ep = {
2370 .name = "ep12out-bulk",
2371 .ops = &pxa2xx_ep_ops,
2372 .maxpacket = BULK_FIFO_SIZE,
2373 },
2374 .dev = &memory,
2375 .fifo_size = BULK_FIFO_SIZE,
2376 .bEndpointAddress = 12,
2377 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2378 .reg_udccs = &UDCCS12,
2379 .reg_ubcr = &UBCR12,
2380 .reg_uddr = &UDDR12,
2381 drcmr (36)
2382 },
2383 .ep[13] = {
2384 .ep = {
2385 .name = "ep13in-iso",
2386 .ops = &pxa2xx_ep_ops,
2387 .maxpacket = ISO_FIFO_SIZE,
2388 },
2389 .dev = &memory,
2390 .fifo_size = ISO_FIFO_SIZE,
2391 .bEndpointAddress = USB_DIR_IN | 13,
2392 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2393 .reg_udccs = &UDCCS13,
2394 .reg_uddr = &UDDR13,
2395 drcmr (37)
2396 },
2397 .ep[14] = {
2398 .ep = {
2399 .name = "ep14out-iso",
2400 .ops = &pxa2xx_ep_ops,
2401 .maxpacket = ISO_FIFO_SIZE,
2402 },
2403 .dev = &memory,
2404 .fifo_size = ISO_FIFO_SIZE,
2405 .bEndpointAddress = 14,
2406 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2407 .reg_udccs = &UDCCS14,
2408 .reg_ubcr = &UBCR14,
2409 .reg_uddr = &UDDR14,
2410 drcmr (38)
2411 },
2412 .ep[15] = {
2413 .ep = {
2414 .name = "ep15in-int",
2415 .ops = &pxa2xx_ep_ops,
2416 .maxpacket = INT_FIFO_SIZE,
2417 },
2418 .dev = &memory,
2419 .fifo_size = INT_FIFO_SIZE,
2420 .bEndpointAddress = USB_DIR_IN | 15,
2421 .bmAttributes = USB_ENDPOINT_XFER_INT,
2422 .reg_udccs = &UDCCS15,
2423 .reg_uddr = &UDDR15,
2424 },
2425 #endif /* !CONFIG_USB_PXA2XX_SMALL */
2426 };
2427
2428 #define CP15R0_VENDOR_MASK 0xffffe000
2429
2430 #if defined(CONFIG_ARCH_PXA)
2431 #define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
2432
2433 #elif defined(CONFIG_ARCH_IXP4XX)
2434 #define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
2435
2436 #endif
2437
2438 #define CP15R0_PROD_MASK 0x000003f0
2439 #define PXA25x 0x00000100 /* and PXA26x */
2440 #define PXA210 0x00000120
2441
2442 #define CP15R0_REV_MASK 0x0000000f
2443
2444 #define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
2445
2446 #define PXA255_A0 0x00000106 /* or PXA260_B1 */
2447 #define PXA250_C0 0x00000105 /* or PXA26x_B0 */
2448 #define PXA250_B2 0x00000104
2449 #define PXA250_B1 0x00000103 /* or PXA260_A0 */
2450 #define PXA250_B0 0x00000102
2451 #define PXA250_A1 0x00000101
2452 #define PXA250_A0 0x00000100
2453
2454 #define PXA210_C0 0x00000125
2455 #define PXA210_B2 0x00000124
2456 #define PXA210_B1 0x00000123
2457 #define PXA210_B0 0x00000122
2458 #define IXP425_A0 0x000001c1
2459 #define IXP425_B0 0x000001f1
2460 #define IXP465_AD 0x00000200
2461
2462 /*
2463 * probe - binds to the platform device
2464 */
2465 static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2466 {
2467 struct pxa2xx_udc *dev = &memory;
2468 int retval, out_dma = 1, vbus_irq, irq;
2469 u32 chiprev;
2470
2471 /* insist on Intel/ARM/XScale */
2472 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
2473 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
2474 printk(KERN_ERR "%s: not XScale!\n", driver_name);
2475 return -ENODEV;
2476 }
2477
2478 /* trigger chiprev-specific logic */
2479 switch (chiprev & CP15R0_PRODREV_MASK) {
2480 #if defined(CONFIG_ARCH_PXA)
2481 case PXA255_A0:
2482 dev->has_cfr = 1;
2483 break;
2484 case PXA250_A0:
2485 case PXA250_A1:
2486 /* A0/A1 "not released"; ep 13, 15 unusable */
2487 /* fall through */
2488 case PXA250_B2: case PXA210_B2:
2489 case PXA250_B1: case PXA210_B1:
2490 case PXA250_B0: case PXA210_B0:
2491 out_dma = 0;
2492 /* fall through */
2493 case PXA250_C0: case PXA210_C0:
2494 break;
2495 #elif defined(CONFIG_ARCH_IXP4XX)
2496 case IXP425_A0:
2497 case IXP425_B0:
2498 case IXP465_AD:
2499 dev->has_cfr = 1;
2500 out_dma = 0;
2501 break;
2502 #endif
2503 default:
2504 out_dma = 0;
2505 printk(KERN_ERR "%s: unrecognized processor: %08x\n",
2506 driver_name, chiprev);
2507 /* iop3xx, ixp4xx, ... */
2508 return -ENODEV;
2509 }
2510
2511 irq = platform_get_irq(pdev, 0);
2512 if (irq < 0)
2513 return -ENODEV;
2514
2515 pr_debug("%s: IRQ %d%s%s%s\n", driver_name, irq,
2516 dev->has_cfr ? "" : " (!cfr)",
2517 out_dma ? "" : " (broken dma-out)",
2518 SIZE_STR DMASTR
2519 );
2520
2521 #ifdef USE_DMA
2522 #ifndef USE_OUT_DMA
2523 out_dma = 0;
2524 #endif
2525 /* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
2526 if (!out_dma) {
2527 DMSG("disabled OUT dma\n");
2528 dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
2529 dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
2530 dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
2531 }
2532 #endif
2533
2534 /* other non-static parts of init */
2535 dev->dev = &pdev->dev;
2536 dev->mach = pdev->dev.platform_data;
2537
2538 if (dev->mach->gpio_vbus) {
2539 if ((retval = gpio_request(dev->mach->gpio_vbus,
2540 "pxa2xx_udc GPIO VBUS"))) {
2541 dev_dbg(&pdev->dev,
2542 "can't get vbus gpio %d, err: %d\n",
2543 dev->mach->gpio_vbus, retval);
2544 return -EBUSY;
2545 }
2546 gpio_direction_input(dev->mach->gpio_vbus);
2547 vbus_irq = gpio_to_irq(dev->mach->gpio_vbus);
2548 set_irq_type(vbus_irq, IRQT_BOTHEDGE);
2549 } else
2550 vbus_irq = 0;
2551
2552 if (dev->mach->gpio_pullup) {
2553 if ((retval = gpio_request(dev->mach->gpio_pullup,
2554 "pca2xx_udc GPIO PULLUP"))) {
2555 dev_dbg(&pdev->dev,
2556 "can't get pullup gpio %d, err: %d\n",
2557 dev->mach->gpio_pullup, retval);
2558 if (dev->mach->gpio_vbus)
2559 gpio_free(dev->mach->gpio_vbus);
2560 return -EBUSY;
2561 }
2562 gpio_direction_output(dev->mach->gpio_pullup, 0);
2563 }
2564
2565 init_timer(&dev->timer);
2566 dev->timer.function = udc_watchdog;
2567 dev->timer.data = (unsigned long) dev;
2568
2569 device_initialize(&dev->gadget.dev);
2570 dev->gadget.dev.parent = &pdev->dev;
2571 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2572
2573 the_controller = dev;
2574 platform_set_drvdata(pdev, dev);
2575
2576 udc_disable(dev);
2577 udc_reinit(dev);
2578
2579 dev->vbus = is_vbus_present();
2580
2581 /* irq setup after old hardware state is cleaned up */
2582 retval = request_irq(irq, pxa2xx_udc_irq,
2583 IRQF_DISABLED, driver_name, dev);
2584 if (retval != 0) {
2585 printk(KERN_ERR "%s: can't get irq %d, err %d\n",
2586 driver_name, irq, retval);
2587 if (dev->mach->gpio_pullup)
2588 gpio_free(dev->mach->gpio_pullup);
2589 if (dev->mach->gpio_vbus)
2590 gpio_free(dev->mach->gpio_vbus);
2591 return -EBUSY;
2592 }
2593 dev->got_irq = 1;
2594
2595 #ifdef CONFIG_ARCH_LUBBOCK
2596 if (machine_is_lubbock()) {
2597 retval = request_irq(LUBBOCK_USB_DISC_IRQ,
2598 lubbock_vbus_irq,
2599 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
2600 driver_name, dev);
2601 if (retval != 0) {
2602 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2603 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2604 lubbock_fail0:
2605 free_irq(irq, dev);
2606 if (dev->mach->gpio_pullup)
2607 gpio_free(dev->mach->gpio_pullup);
2608 if (dev->mach->gpio_vbus)
2609 gpio_free(dev->mach->gpio_vbus);
2610 return -EBUSY;
2611 }
2612 retval = request_irq(LUBBOCK_USB_IRQ,
2613 lubbock_vbus_irq,
2614 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
2615 driver_name, dev);
2616 if (retval != 0) {
2617 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2618 driver_name, LUBBOCK_USB_IRQ, retval);
2619 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2620 goto lubbock_fail0;
2621 }
2622 #ifdef DEBUG
2623 /* with U-Boot (but not BLOB), hex is off by default */
2624 HEX_DISPLAY(dev->stats.irqs);
2625 LUB_DISC_BLNK_LED &= 0xff;
2626 #endif
2627 } else
2628 #endif
2629 if (vbus_irq) {
2630 retval = request_irq(vbus_irq, udc_vbus_irq,
2631 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
2632 driver_name, dev);
2633 if (retval != 0) {
2634 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2635 driver_name, vbus_irq, retval);
2636 free_irq(irq, dev);
2637 if (dev->mach->gpio_pullup)
2638 gpio_free(dev->mach->gpio_pullup);
2639 if (dev->mach->gpio_vbus)
2640 gpio_free(dev->mach->gpio_vbus);
2641 return -EBUSY;
2642 }
2643 }
2644 create_proc_files();
2645
2646 return 0;
2647 }
2648
2649 static void pxa2xx_udc_shutdown(struct platform_device *_dev)
2650 {
2651 pullup_off();
2652 }
2653
2654 static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
2655 {
2656 struct pxa2xx_udc *dev = platform_get_drvdata(pdev);
2657
2658 if (dev->driver)
2659 return -EBUSY;
2660
2661 udc_disable(dev);
2662 remove_proc_files();
2663
2664 if (dev->got_irq) {
2665 free_irq(platform_get_irq(pdev, 0), dev);
2666 dev->got_irq = 0;
2667 }
2668 #ifdef CONFIG_ARCH_LUBBOCK
2669 if (machine_is_lubbock()) {
2670 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2671 free_irq(LUBBOCK_USB_IRQ, dev);
2672 }
2673 #endif
2674 if (dev->mach->gpio_vbus) {
2675 free_irq(gpio_to_irq(dev->mach->gpio_vbus), dev);
2676 gpio_free(dev->mach->gpio_vbus);
2677 }
2678 if (dev->mach->gpio_pullup)
2679 gpio_free(dev->mach->gpio_pullup);
2680
2681 platform_set_drvdata(pdev, NULL);
2682 the_controller = NULL;
2683 return 0;
2684 }
2685
2686 /*-------------------------------------------------------------------------*/
2687
2688 #ifdef CONFIG_PM
2689
2690 /* USB suspend (controlled by the host) and system suspend (controlled
2691 * by the PXA) don't necessarily work well together. If USB is active,
2692 * the 48 MHz clock is required; so the system can't enter 33 MHz idle
2693 * mode, or any deeper PM saving state.
2694 *
2695 * For now, we punt and forcibly disconnect from the USB host when PXA
2696 * enters any suspend state. While we're disconnected, we always disable
2697 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
2698 * Boards without software pullup control shouldn't use those states.
2699 * VBUS IRQs should probably be ignored so that the PXA device just acts
2700 * "dead" to USB hosts until system resume.
2701 */
2702 static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
2703 {
2704 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
2705
2706 if (!udc->mach->udc_command)
2707 WARN("USB host won't detect disconnect!\n");
2708 pullup(udc, 0);
2709
2710 return 0;
2711 }
2712
2713 static int pxa2xx_udc_resume(struct platform_device *dev)
2714 {
2715 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
2716
2717 pullup(udc, 1);
2718
2719 return 0;
2720 }
2721
2722 #else
2723 #define pxa2xx_udc_suspend NULL
2724 #define pxa2xx_udc_resume NULL
2725 #endif
2726
2727 /*-------------------------------------------------------------------------*/
2728
2729 static struct platform_driver udc_driver = {
2730 .shutdown = pxa2xx_udc_shutdown,
2731 .remove = __exit_p(pxa2xx_udc_remove),
2732 .suspend = pxa2xx_udc_suspend,
2733 .resume = pxa2xx_udc_resume,
2734 .driver = {
2735 .owner = THIS_MODULE,
2736 .name = "pxa2xx-udc",
2737 },
2738 };
2739
2740 static int __init udc_init(void)
2741 {
2742 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
2743 return platform_driver_probe(&udc_driver, pxa2xx_udc_probe);
2744 }
2745 module_init(udc_init);
2746
2747 static void __exit udc_exit(void)
2748 {
2749 platform_driver_unregister(&udc_driver);
2750 }
2751 module_exit(udc_exit);
2752
2753 MODULE_DESCRIPTION(DRIVER_DESC);
2754 MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
2755 MODULE_LICENSE("GPL");
2756