]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/usb/gadget/pxa2xx_udc.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[mirror_ubuntu-hirsute-kernel.git] / drivers / usb / gadget / pxa2xx_udc.c
1 /*
2 * linux/drivers/usb/gadget/pxa2xx_udc.c
3 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
4 *
5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
6 * Copyright (C) 2003 Robert Schwebel, Pengutronix
7 * Copyright (C) 2003 Benedikt Spranger, Pengutronix
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003 Joshua Wise
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 */
26
27 #undef DEBUG
28 // #define VERBOSE DBG_VERBOSE
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/ioport.h>
33 #include <linux/types.h>
34 #include <linux/errno.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/init.h>
39 #include <linux/timer.h>
40 #include <linux/list.h>
41 #include <linux/interrupt.h>
42 #include <linux/proc_fs.h>
43 #include <linux/mm.h>
44 #include <linux/platform_device.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/irq.h>
47
48 #include <asm/byteorder.h>
49 #include <asm/dma.h>
50 #include <asm/io.h>
51 #include <asm/system.h>
52 #include <asm/mach-types.h>
53 #include <asm/unaligned.h>
54 #include <asm/hardware.h>
55 #ifdef CONFIG_ARCH_PXA
56 #include <asm/arch/pxa-regs.h>
57 #endif
58
59 #include <linux/usb/ch9.h>
60 #include <linux/usb_gadget.h>
61
62 #include <asm/arch/udc.h>
63
64
65 /*
66 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
67 * series processors. The UDC for the IXP 4xx series is very similar.
68 * There are fifteen endpoints, in addition to ep0.
69 *
70 * Such controller drivers work with a gadget driver. The gadget driver
71 * returns descriptors, implements configuration and data protocols used
72 * by the host to interact with this device, and allocates endpoints to
73 * the different protocol interfaces. The controller driver virtualizes
74 * usb hardware so that the gadget drivers will be more portable.
75 *
76 * This UDC hardware wants to implement a bit too much USB protocol, so
77 * it constrains the sorts of USB configuration change events that work.
78 * The errata for these chips are misleading; some "fixed" bugs from
79 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
80 */
81
82 #define DRIVER_VERSION "4-May-2005"
83 #define DRIVER_DESC "PXA 25x USB Device Controller driver"
84
85
86 static const char driver_name [] = "pxa2xx_udc";
87
88 static const char ep0name [] = "ep0";
89
90
91 // #define USE_DMA
92 // #define USE_OUT_DMA
93 // #define DISABLE_TEST_MODE
94
95 #ifdef CONFIG_ARCH_IXP4XX
96 #undef USE_DMA
97
98 /* cpu-specific register addresses are compiled in to this code */
99 #ifdef CONFIG_ARCH_PXA
100 #error "Can't configure both IXP and PXA"
101 #endif
102
103 #endif
104
105 #include "pxa2xx_udc.h"
106
107
108 #ifdef USE_DMA
109 static int use_dma = 1;
110 module_param(use_dma, bool, 0);
111 MODULE_PARM_DESC (use_dma, "true to use dma");
112
113 static void dma_nodesc_handler (int dmach, void *_ep);
114 static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
115
116 #ifdef USE_OUT_DMA
117 #define DMASTR " (dma support)"
118 #else
119 #define DMASTR " (dma in)"
120 #endif
121
122 #else /* !USE_DMA */
123 #define DMASTR " (pio only)"
124 #undef USE_OUT_DMA
125 #endif
126
127 #ifdef CONFIG_USB_PXA2XX_SMALL
128 #define SIZE_STR " (small)"
129 #else
130 #define SIZE_STR ""
131 #endif
132
133 #ifdef DISABLE_TEST_MODE
134 /* (mode == 0) == no undocumented chip tweaks
135 * (mode & 1) == double buffer bulk IN
136 * (mode & 2) == double buffer bulk OUT
137 * ... so mode = 3 (or 7, 15, etc) does it for both
138 */
139 static ushort fifo_mode = 0;
140 module_param(fifo_mode, ushort, 0);
141 MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
142 #endif
143
144 /* ---------------------------------------------------------------------------
145 * endpoint related parts of the api to the usb controller hardware,
146 * used by gadget driver; and the inner talker-to-hardware core.
147 * ---------------------------------------------------------------------------
148 */
149
150 static void pxa2xx_ep_fifo_flush (struct usb_ep *ep);
151 static void nuke (struct pxa2xx_ep *, int status);
152
153 /* one GPIO should be used to detect VBUS from the host */
154 static int is_vbus_present(void)
155 {
156 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
157
158 if (mach->gpio_vbus)
159 return pxa_gpio_get(mach->gpio_vbus);
160 if (mach->udc_is_connected)
161 return mach->udc_is_connected();
162 return 1;
163 }
164
165 /* one GPIO should control a D+ pullup, so host sees this device (or not) */
166 static void pullup_off(void)
167 {
168 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
169
170 if (mach->gpio_pullup)
171 pxa_gpio_set(mach->gpio_pullup, 0);
172 else if (mach->udc_command)
173 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
174 }
175
176 static void pullup_on(void)
177 {
178 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
179
180 if (mach->gpio_pullup)
181 pxa_gpio_set(mach->gpio_pullup, 1);
182 else if (mach->udc_command)
183 mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
184 }
185
186 static void pio_irq_enable(int bEndpointAddress)
187 {
188 bEndpointAddress &= 0xf;
189 if (bEndpointAddress < 8)
190 UICR0 &= ~(1 << bEndpointAddress);
191 else {
192 bEndpointAddress -= 8;
193 UICR1 &= ~(1 << bEndpointAddress);
194 }
195 }
196
197 static void pio_irq_disable(int bEndpointAddress)
198 {
199 bEndpointAddress &= 0xf;
200 if (bEndpointAddress < 8)
201 UICR0 |= 1 << bEndpointAddress;
202 else {
203 bEndpointAddress -= 8;
204 UICR1 |= 1 << bEndpointAddress;
205 }
206 }
207
208 /* The UDCCR reg contains mask and interrupt status bits,
209 * so using '|=' isn't safe as it may ack an interrupt.
210 */
211 #define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
212
213 static inline void udc_set_mask_UDCCR(int mask)
214 {
215 UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
216 }
217
218 static inline void udc_clear_mask_UDCCR(int mask)
219 {
220 UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
221 }
222
223 static inline void udc_ack_int_UDCCR(int mask)
224 {
225 /* udccr contains the bits we dont want to change */
226 __u32 udccr = UDCCR & UDCCR_MASK_BITS;
227
228 UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
229 }
230
231 /*
232 * endpoint enable/disable
233 *
234 * we need to verify the descriptors used to enable endpoints. since pxa2xx
235 * endpoint configurations are fixed, and are pretty much always enabled,
236 * there's not a lot to manage here.
237 *
238 * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints,
239 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
240 * for a single interface (with only the default altsetting) and for gadget
241 * drivers that don't halt endpoints (not reset by set_interface). that also
242 * means that if you use ISO, you must violate the USB spec rule that all
243 * iso endpoints must be in non-default altsettings.
244 */
245 static int pxa2xx_ep_enable (struct usb_ep *_ep,
246 const struct usb_endpoint_descriptor *desc)
247 {
248 struct pxa2xx_ep *ep;
249 struct pxa2xx_udc *dev;
250
251 ep = container_of (_ep, struct pxa2xx_ep, ep);
252 if (!_ep || !desc || ep->desc || _ep->name == ep0name
253 || desc->bDescriptorType != USB_DT_ENDPOINT
254 || ep->bEndpointAddress != desc->bEndpointAddress
255 || ep->fifo_size < le16_to_cpu
256 (desc->wMaxPacketSize)) {
257 DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
258 return -EINVAL;
259 }
260
261 /* xfer types must match, except that interrupt ~= bulk */
262 if (ep->bmAttributes != desc->bmAttributes
263 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
264 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
265 DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
266 return -EINVAL;
267 }
268
269 /* hardware _could_ do smaller, but driver doesn't */
270 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
271 && le16_to_cpu (desc->wMaxPacketSize)
272 != BULK_FIFO_SIZE)
273 || !desc->wMaxPacketSize) {
274 DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
275 return -ERANGE;
276 }
277
278 dev = ep->dev;
279 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
280 DMSG("%s, bogus device state\n", __FUNCTION__);
281 return -ESHUTDOWN;
282 }
283
284 ep->desc = desc;
285 ep->dma = -1;
286 ep->stopped = 0;
287 ep->pio_irqs = ep->dma_irqs = 0;
288 ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
289
290 /* flush fifo (mostly for OUT buffers) */
291 pxa2xx_ep_fifo_flush (_ep);
292
293 /* ... reset halt state too, if we could ... */
294
295 #ifdef USE_DMA
296 /* for (some) bulk and ISO endpoints, try to get a DMA channel and
297 * bind it to the endpoint. otherwise use PIO.
298 */
299 switch (ep->bmAttributes) {
300 case USB_ENDPOINT_XFER_ISOC:
301 if (le16_to_cpu(desc->wMaxPacketSize) % 32)
302 break;
303 // fall through
304 case USB_ENDPOINT_XFER_BULK:
305 if (!use_dma || !ep->reg_drcmr)
306 break;
307 ep->dma = pxa_request_dma ((char *)_ep->name,
308 (le16_to_cpu (desc->wMaxPacketSize) > 64)
309 ? DMA_PRIO_MEDIUM /* some iso */
310 : DMA_PRIO_LOW,
311 dma_nodesc_handler, ep);
312 if (ep->dma >= 0) {
313 *ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
314 DMSG("%s using dma%d\n", _ep->name, ep->dma);
315 }
316 }
317 #endif
318
319 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
320 return 0;
321 }
322
323 static int pxa2xx_ep_disable (struct usb_ep *_ep)
324 {
325 struct pxa2xx_ep *ep;
326 unsigned long flags;
327
328 ep = container_of (_ep, struct pxa2xx_ep, ep);
329 if (!_ep || !ep->desc) {
330 DMSG("%s, %s not enabled\n", __FUNCTION__,
331 _ep ? ep->ep.name : NULL);
332 return -EINVAL;
333 }
334 local_irq_save(flags);
335
336 nuke (ep, -ESHUTDOWN);
337
338 #ifdef USE_DMA
339 if (ep->dma >= 0) {
340 *ep->reg_drcmr = 0;
341 pxa_free_dma (ep->dma);
342 ep->dma = -1;
343 }
344 #endif
345
346 /* flush fifo (mostly for IN buffers) */
347 pxa2xx_ep_fifo_flush (_ep);
348
349 ep->desc = NULL;
350 ep->stopped = 1;
351
352 local_irq_restore(flags);
353 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
354 return 0;
355 }
356
357 /*-------------------------------------------------------------------------*/
358
359 /* for the pxa2xx, these can just wrap kmalloc/kfree. gadget drivers
360 * must still pass correctly initialized endpoints, since other controller
361 * drivers may care about how it's currently set up (dma issues etc).
362 */
363
364 /*
365 * pxa2xx_ep_alloc_request - allocate a request data structure
366 */
367 static struct usb_request *
368 pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
369 {
370 struct pxa2xx_request *req;
371
372 req = kzalloc(sizeof(*req), gfp_flags);
373 if (!req)
374 return NULL;
375
376 INIT_LIST_HEAD (&req->queue);
377 return &req->req;
378 }
379
380
381 /*
382 * pxa2xx_ep_free_request - deallocate a request data structure
383 */
384 static void
385 pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
386 {
387 struct pxa2xx_request *req;
388
389 req = container_of (_req, struct pxa2xx_request, req);
390 WARN_ON (!list_empty (&req->queue));
391 kfree(req);
392 }
393
394
395 /* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
396 * no device-affinity and the heap works perfectly well for i/o buffers.
397 * It wastes much less memory than dma_alloc_coherent() would, and even
398 * prevents cacheline (32 bytes wide) sharing problems.
399 */
400 static void *
401 pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
402 dma_addr_t *dma, gfp_t gfp_flags)
403 {
404 char *retval;
405
406 retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
407 if (retval)
408 #ifdef USE_DMA
409 *dma = virt_to_bus (retval);
410 #else
411 *dma = (dma_addr_t)~0;
412 #endif
413 return retval;
414 }
415
416 static void
417 pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
418 unsigned bytes)
419 {
420 kfree (buf);
421 }
422
423 /*-------------------------------------------------------------------------*/
424
425 /*
426 * done - retire a request; caller blocked irqs
427 */
428 static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
429 {
430 unsigned stopped = ep->stopped;
431
432 list_del_init(&req->queue);
433
434 if (likely (req->req.status == -EINPROGRESS))
435 req->req.status = status;
436 else
437 status = req->req.status;
438
439 if (status && status != -ESHUTDOWN)
440 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
441 ep->ep.name, &req->req, status,
442 req->req.actual, req->req.length);
443
444 /* don't modify queue heads during completion callback */
445 ep->stopped = 1;
446 req->req.complete(&ep->ep, &req->req);
447 ep->stopped = stopped;
448 }
449
450
451 static inline void ep0_idle (struct pxa2xx_udc *dev)
452 {
453 dev->ep0state = EP0_IDLE;
454 }
455
456 static int
457 write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
458 {
459 u8 *buf;
460 unsigned length, count;
461
462 buf = req->req.buf + req->req.actual;
463 prefetch(buf);
464
465 /* how big will this packet be? */
466 length = min(req->req.length - req->req.actual, max);
467 req->req.actual += length;
468
469 count = length;
470 while (likely(count--))
471 *uddr = *buf++;
472
473 return length;
474 }
475
476 /*
477 * write to an IN endpoint fifo, as many packets as possible.
478 * irqs will use this to write the rest later.
479 * caller guarantees at least one packet buffer is ready (or a zlp).
480 */
481 static int
482 write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
483 {
484 unsigned max;
485
486 max = le16_to_cpu(ep->desc->wMaxPacketSize);
487 do {
488 unsigned count;
489 int is_last, is_short;
490
491 count = write_packet(ep->reg_uddr, req, max);
492
493 /* last packet is usually short (or a zlp) */
494 if (unlikely (count != max))
495 is_last = is_short = 1;
496 else {
497 if (likely(req->req.length != req->req.actual)
498 || req->req.zero)
499 is_last = 0;
500 else
501 is_last = 1;
502 /* interrupt/iso maxpacket may not fill the fifo */
503 is_short = unlikely (max < ep->fifo_size);
504 }
505
506 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
507 ep->ep.name, count,
508 is_last ? "/L" : "", is_short ? "/S" : "",
509 req->req.length - req->req.actual, req);
510
511 /* let loose that packet. maybe try writing another one,
512 * double buffering might work. TSP, TPC, and TFS
513 * bit values are the same for all normal IN endpoints.
514 */
515 *ep->reg_udccs = UDCCS_BI_TPC;
516 if (is_short)
517 *ep->reg_udccs = UDCCS_BI_TSP;
518
519 /* requests complete when all IN data is in the FIFO */
520 if (is_last) {
521 done (ep, req, 0);
522 if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
523 pio_irq_disable (ep->bEndpointAddress);
524 #ifdef USE_DMA
525 /* unaligned data and zlps couldn't use dma */
526 if (unlikely(!list_empty(&ep->queue))) {
527 req = list_entry(ep->queue.next,
528 struct pxa2xx_request, queue);
529 kick_dma(ep,req);
530 return 0;
531 }
532 #endif
533 }
534 return 1;
535 }
536
537 // TODO experiment: how robust can fifo mode tweaking be?
538 // double buffering is off in the default fifo mode, which
539 // prevents TFS from being set here.
540
541 } while (*ep->reg_udccs & UDCCS_BI_TFS);
542 return 0;
543 }
544
545 /* caller asserts req->pending (ep0 irq status nyet cleared); starts
546 * ep0 data stage. these chips want very simple state transitions.
547 */
548 static inline
549 void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
550 {
551 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
552 USIR0 = USIR0_IR0;
553 dev->req_pending = 0;
554 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
555 __FUNCTION__, tag, UDCCS0, flags);
556 }
557
558 static int
559 write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
560 {
561 unsigned count;
562 int is_short;
563
564 count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
565 ep->dev->stats.write.bytes += count;
566
567 /* last packet "must be" short (or a zlp) */
568 is_short = (count != EP0_FIFO_SIZE);
569
570 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
571 req->req.length - req->req.actual, req);
572
573 if (unlikely (is_short)) {
574 if (ep->dev->req_pending)
575 ep0start(ep->dev, UDCCS0_IPR, "short IN");
576 else
577 UDCCS0 = UDCCS0_IPR;
578
579 count = req->req.length;
580 done (ep, req, 0);
581 ep0_idle(ep->dev);
582 #ifndef CONFIG_ARCH_IXP4XX
583 #if 1
584 /* This seems to get rid of lost status irqs in some cases:
585 * host responds quickly, or next request involves config
586 * change automagic, or should have been hidden, or ...
587 *
588 * FIXME get rid of all udelays possible...
589 */
590 if (count >= EP0_FIFO_SIZE) {
591 count = 100;
592 do {
593 if ((UDCCS0 & UDCCS0_OPR) != 0) {
594 /* clear OPR, generate ack */
595 UDCCS0 = UDCCS0_OPR;
596 break;
597 }
598 count--;
599 udelay(1);
600 } while (count);
601 }
602 #endif
603 #endif
604 } else if (ep->dev->req_pending)
605 ep0start(ep->dev, 0, "IN");
606 return is_short;
607 }
608
609
610 /*
611 * read_fifo - unload packet(s) from the fifo we use for usb OUT
612 * transfers and put them into the request. caller should have made
613 * sure there's at least one packet ready.
614 *
615 * returns true if the request completed because of short packet or the
616 * request buffer having filled (and maybe overran till end-of-packet).
617 */
618 static int
619 read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
620 {
621 for (;;) {
622 u32 udccs;
623 u8 *buf;
624 unsigned bufferspace, count, is_short;
625
626 /* make sure there's a packet in the FIFO.
627 * UDCCS_{BO,IO}_RPC are all the same bit value.
628 * UDCCS_{BO,IO}_RNE are all the same bit value.
629 */
630 udccs = *ep->reg_udccs;
631 if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
632 break;
633 buf = req->req.buf + req->req.actual;
634 prefetchw(buf);
635 bufferspace = req->req.length - req->req.actual;
636
637 /* read all bytes from this packet */
638 if (likely (udccs & UDCCS_BO_RNE)) {
639 count = 1 + (0x0ff & *ep->reg_ubcr);
640 req->req.actual += min (count, bufferspace);
641 } else /* zlp */
642 count = 0;
643 is_short = (count < ep->ep.maxpacket);
644 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
645 ep->ep.name, udccs, count,
646 is_short ? "/S" : "",
647 req, req->req.actual, req->req.length);
648 while (likely (count-- != 0)) {
649 u8 byte = (u8) *ep->reg_uddr;
650
651 if (unlikely (bufferspace == 0)) {
652 /* this happens when the driver's buffer
653 * is smaller than what the host sent.
654 * discard the extra data.
655 */
656 if (req->req.status != -EOVERFLOW)
657 DMSG("%s overflow %d\n",
658 ep->ep.name, count);
659 req->req.status = -EOVERFLOW;
660 } else {
661 *buf++ = byte;
662 bufferspace--;
663 }
664 }
665 *ep->reg_udccs = UDCCS_BO_RPC;
666 /* RPC/RSP/RNE could now reflect the other packet buffer */
667
668 /* iso is one request per packet */
669 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
670 if (udccs & UDCCS_IO_ROF)
671 req->req.status = -EHOSTUNREACH;
672 /* more like "is_done" */
673 is_short = 1;
674 }
675
676 /* completion */
677 if (is_short || req->req.actual == req->req.length) {
678 done (ep, req, 0);
679 if (list_empty(&ep->queue))
680 pio_irq_disable (ep->bEndpointAddress);
681 return 1;
682 }
683
684 /* finished that packet. the next one may be waiting... */
685 }
686 return 0;
687 }
688
689 /*
690 * special ep0 version of the above. no UBCR0 or double buffering; status
691 * handshaking is magic. most device protocols don't need control-OUT.
692 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
693 * protocols do use them.
694 */
695 static int
696 read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
697 {
698 u8 *buf, byte;
699 unsigned bufferspace;
700
701 buf = req->req.buf + req->req.actual;
702 bufferspace = req->req.length - req->req.actual;
703
704 while (UDCCS0 & UDCCS0_RNE) {
705 byte = (u8) UDDR0;
706
707 if (unlikely (bufferspace == 0)) {
708 /* this happens when the driver's buffer
709 * is smaller than what the host sent.
710 * discard the extra data.
711 */
712 if (req->req.status != -EOVERFLOW)
713 DMSG("%s overflow\n", ep->ep.name);
714 req->req.status = -EOVERFLOW;
715 } else {
716 *buf++ = byte;
717 req->req.actual++;
718 bufferspace--;
719 }
720 }
721
722 UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
723
724 /* completion */
725 if (req->req.actual >= req->req.length)
726 return 1;
727
728 /* finished that packet. the next one may be waiting... */
729 return 0;
730 }
731
732 #ifdef USE_DMA
733
734 #define MAX_IN_DMA ((DCMD_LENGTH + 1) - BULK_FIFO_SIZE)
735
736 static void
737 start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
738 {
739 u32 dcmd = req->req.length;
740 u32 buf = req->req.dma;
741 u32 fifo = io_v2p ((u32)ep->reg_uddr);
742
743 /* caller guarantees there's a packet or more remaining
744 * - IN may end with a short packet (TSP set separately),
745 * - OUT is always full length
746 */
747 buf += req->req.actual;
748 dcmd -= req->req.actual;
749 ep->dma_fixup = 0;
750
751 /* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
752 DCSR(ep->dma) = DCSR_NODESC;
753 if (is_in) {
754 DSADR(ep->dma) = buf;
755 DTADR(ep->dma) = fifo;
756 if (dcmd > MAX_IN_DMA)
757 dcmd = MAX_IN_DMA;
758 else
759 ep->dma_fixup = (dcmd % ep->ep.maxpacket) != 0;
760 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
761 | DCMD_FLOWTRG | DCMD_INCSRCADDR;
762 } else {
763 #ifdef USE_OUT_DMA
764 DSADR(ep->dma) = fifo;
765 DTADR(ep->dma) = buf;
766 if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
767 dcmd = ep->ep.maxpacket;
768 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
769 | DCMD_FLOWSRC | DCMD_INCTRGADDR;
770 #endif
771 }
772 DCMD(ep->dma) = dcmd;
773 DCSR(ep->dma) = DCSR_RUN | DCSR_NODESC
774 | (unlikely(is_in)
775 ? DCSR_STOPIRQEN /* use dma_nodesc_handler() */
776 : 0); /* use handle_ep() */
777 }
778
779 static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
780 {
781 int is_in = ep->bEndpointAddress & USB_DIR_IN;
782
783 if (is_in) {
784 /* unaligned tx buffers and zlps only work with PIO */
785 if ((req->req.dma & 0x0f) != 0
786 || unlikely((req->req.length - req->req.actual)
787 == 0)) {
788 pio_irq_enable(ep->bEndpointAddress);
789 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
790 (void) write_fifo(ep, req);
791 } else {
792 start_dma_nodesc(ep, req, USB_DIR_IN);
793 }
794 } else {
795 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) {
796 DMSG("%s short dma read...\n", ep->ep.name);
797 /* we're always set up for pio out */
798 read_fifo (ep, req);
799 } else {
800 *ep->reg_udccs = UDCCS_BO_DME
801 | (*ep->reg_udccs & UDCCS_BO_FST);
802 start_dma_nodesc(ep, req, USB_DIR_OUT);
803 }
804 }
805 }
806
807 static void cancel_dma(struct pxa2xx_ep *ep)
808 {
809 struct pxa2xx_request *req;
810 u32 tmp;
811
812 if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
813 return;
814
815 DCSR(ep->dma) = 0;
816 while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
817 cpu_relax();
818
819 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
820 tmp = DCMD(ep->dma) & DCMD_LENGTH;
821 req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
822
823 /* the last tx packet may be incomplete, so flush the fifo.
824 * FIXME correct req.actual if we can
825 */
826 if (ep->bEndpointAddress & USB_DIR_IN)
827 *ep->reg_udccs = UDCCS_BI_FTF;
828 }
829
830 /* dma channel stopped ... normal tx end (IN), or on error (IN/OUT) */
831 static void dma_nodesc_handler(int dmach, void *_ep)
832 {
833 struct pxa2xx_ep *ep = _ep;
834 struct pxa2xx_request *req;
835 u32 tmp, completed;
836
837 local_irq_disable();
838
839 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
840
841 ep->dma_irqs++;
842 ep->dev->stats.irqs++;
843 HEX_DISPLAY(ep->dev->stats.irqs);
844
845 /* ack/clear */
846 tmp = DCSR(ep->dma);
847 DCSR(ep->dma) = tmp;
848 if ((tmp & DCSR_STOPSTATE) == 0
849 || (DDADR(ep->dma) & DDADR_STOP) != 0) {
850 DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
851 ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
852 goto done;
853 }
854 DCSR(ep->dma) = 0; /* clear DCSR_STOPSTATE */
855
856 /* update transfer status */
857 completed = tmp & DCSR_BUSERR;
858 if (ep->bEndpointAddress & USB_DIR_IN)
859 tmp = DSADR(ep->dma);
860 else
861 tmp = DTADR(ep->dma);
862 req->req.actual = tmp - req->req.dma;
863
864 /* FIXME seems we sometimes see partial transfers... */
865
866 if (unlikely(completed != 0))
867 req->req.status = -EIO;
868 else if (req->req.actual) {
869 /* these registers have zeroes in low bits; they miscount
870 * some (end-of-transfer) short packets: tx 14 as tx 12
871 */
872 if (ep->dma_fixup)
873 req->req.actual = min(req->req.actual + 3,
874 req->req.length);
875
876 tmp = (req->req.length - req->req.actual);
877 completed = (tmp == 0);
878 if (completed && (ep->bEndpointAddress & USB_DIR_IN)) {
879
880 /* maybe validate final short packet ... */
881 if ((req->req.actual % ep->ep.maxpacket) != 0)
882 *ep->reg_udccs = UDCCS_BI_TSP/*|UDCCS_BI_TPC*/;
883
884 /* ... or zlp, using pio fallback */
885 else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
886 && req->req.zero) {
887 DMSG("%s zlp terminate ...\n", ep->ep.name);
888 completed = 0;
889 }
890 }
891 }
892
893 if (likely(completed)) {
894 done(ep, req, 0);
895
896 /* maybe re-activate after completion */
897 if (ep->stopped || list_empty(&ep->queue))
898 goto done;
899 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
900 }
901 kick_dma(ep, req);
902 done:
903 local_irq_enable();
904 }
905
906 #endif
907
908 /*-------------------------------------------------------------------------*/
909
910 static int
911 pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
912 {
913 struct pxa2xx_request *req;
914 struct pxa2xx_ep *ep;
915 struct pxa2xx_udc *dev;
916 unsigned long flags;
917
918 req = container_of(_req, struct pxa2xx_request, req);
919 if (unlikely (!_req || !_req->complete || !_req->buf
920 || !list_empty(&req->queue))) {
921 DMSG("%s, bad params\n", __FUNCTION__);
922 return -EINVAL;
923 }
924
925 ep = container_of(_ep, struct pxa2xx_ep, ep);
926 if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
927 DMSG("%s, bad ep\n", __FUNCTION__);
928 return -EINVAL;
929 }
930
931 dev = ep->dev;
932 if (unlikely (!dev->driver
933 || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
934 DMSG("%s, bogus device state\n", __FUNCTION__);
935 return -ESHUTDOWN;
936 }
937
938 /* iso is always one packet per request, that's the only way
939 * we can report per-packet status. that also helps with dma.
940 */
941 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
942 && req->req.length > le16_to_cpu
943 (ep->desc->wMaxPacketSize)))
944 return -EMSGSIZE;
945
946 #ifdef USE_DMA
947 // FIXME caller may already have done the dma mapping
948 if (ep->dma >= 0) {
949 _req->dma = dma_map_single(dev->dev,
950 _req->buf, _req->length,
951 ((ep->bEndpointAddress & USB_DIR_IN) != 0)
952 ? DMA_TO_DEVICE
953 : DMA_FROM_DEVICE);
954 }
955 #endif
956
957 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
958 _ep->name, _req, _req->length, _req->buf);
959
960 local_irq_save(flags);
961
962 _req->status = -EINPROGRESS;
963 _req->actual = 0;
964
965 /* kickstart this i/o queue? */
966 if (list_empty(&ep->queue) && !ep->stopped) {
967 if (ep->desc == 0 /* ep0 */) {
968 unsigned length = _req->length;
969
970 switch (dev->ep0state) {
971 case EP0_IN_DATA_PHASE:
972 dev->stats.write.ops++;
973 if (write_ep0_fifo(ep, req))
974 req = NULL;
975 break;
976
977 case EP0_OUT_DATA_PHASE:
978 dev->stats.read.ops++;
979 /* messy ... */
980 if (dev->req_config) {
981 DBG(DBG_VERBOSE, "ep0 config ack%s\n",
982 dev->has_cfr ? "" : " raced");
983 if (dev->has_cfr)
984 UDCCFR = UDCCFR_AREN|UDCCFR_ACM
985 |UDCCFR_MB1;
986 done(ep, req, 0);
987 dev->ep0state = EP0_END_XFER;
988 local_irq_restore (flags);
989 return 0;
990 }
991 if (dev->req_pending)
992 ep0start(dev, UDCCS0_IPR, "OUT");
993 if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
994 && read_ep0_fifo(ep, req))) {
995 ep0_idle(dev);
996 done(ep, req, 0);
997 req = NULL;
998 }
999 break;
1000
1001 default:
1002 DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
1003 local_irq_restore (flags);
1004 return -EL2HLT;
1005 }
1006 #ifdef USE_DMA
1007 /* either start dma or prime pio pump */
1008 } else if (ep->dma >= 0) {
1009 kick_dma(ep, req);
1010 #endif
1011 /* can the FIFO can satisfy the request immediately? */
1012 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
1013 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0
1014 && write_fifo(ep, req))
1015 req = NULL;
1016 } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
1017 && read_fifo(ep, req)) {
1018 req = NULL;
1019 }
1020
1021 if (likely (req && ep->desc) && ep->dma < 0)
1022 pio_irq_enable(ep->bEndpointAddress);
1023 }
1024
1025 /* pio or dma irq handler advances the queue. */
1026 if (likely (req != 0))
1027 list_add_tail(&req->queue, &ep->queue);
1028 local_irq_restore(flags);
1029
1030 return 0;
1031 }
1032
1033
1034 /*
1035 * nuke - dequeue ALL requests
1036 */
1037 static void nuke(struct pxa2xx_ep *ep, int status)
1038 {
1039 struct pxa2xx_request *req;
1040
1041 /* called with irqs blocked */
1042 #ifdef USE_DMA
1043 if (ep->dma >= 0 && !ep->stopped)
1044 cancel_dma(ep);
1045 #endif
1046 while (!list_empty(&ep->queue)) {
1047 req = list_entry(ep->queue.next,
1048 struct pxa2xx_request,
1049 queue);
1050 done(ep, req, status);
1051 }
1052 if (ep->desc)
1053 pio_irq_disable (ep->bEndpointAddress);
1054 }
1055
1056
1057 /* dequeue JUST ONE request */
1058 static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1059 {
1060 struct pxa2xx_ep *ep;
1061 struct pxa2xx_request *req;
1062 unsigned long flags;
1063
1064 ep = container_of(_ep, struct pxa2xx_ep, ep);
1065 if (!_ep || ep->ep.name == ep0name)
1066 return -EINVAL;
1067
1068 local_irq_save(flags);
1069
1070 /* make sure it's actually queued on this endpoint */
1071 list_for_each_entry (req, &ep->queue, queue) {
1072 if (&req->req == _req)
1073 break;
1074 }
1075 if (&req->req != _req) {
1076 local_irq_restore(flags);
1077 return -EINVAL;
1078 }
1079
1080 #ifdef USE_DMA
1081 if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
1082 cancel_dma(ep);
1083 done(ep, req, -ECONNRESET);
1084 /* restart i/o */
1085 if (!list_empty(&ep->queue)) {
1086 req = list_entry(ep->queue.next,
1087 struct pxa2xx_request, queue);
1088 kick_dma(ep, req);
1089 }
1090 } else
1091 #endif
1092 done(ep, req, -ECONNRESET);
1093
1094 local_irq_restore(flags);
1095 return 0;
1096 }
1097
1098 /*-------------------------------------------------------------------------*/
1099
1100 static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
1101 {
1102 struct pxa2xx_ep *ep;
1103 unsigned long flags;
1104
1105 ep = container_of(_ep, struct pxa2xx_ep, ep);
1106 if (unlikely (!_ep
1107 || (!ep->desc && ep->ep.name != ep0name))
1108 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
1109 DMSG("%s, bad ep\n", __FUNCTION__);
1110 return -EINVAL;
1111 }
1112 if (value == 0) {
1113 /* this path (reset toggle+halt) is needed to implement
1114 * SET_INTERFACE on normal hardware. but it can't be
1115 * done from software on the PXA UDC, and the hardware
1116 * forgets to do it as part of SET_INTERFACE automagic.
1117 */
1118 DMSG("only host can clear %s halt\n", _ep->name);
1119 return -EROFS;
1120 }
1121
1122 local_irq_save(flags);
1123
1124 if ((ep->bEndpointAddress & USB_DIR_IN) != 0
1125 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
1126 || !list_empty(&ep->queue))) {
1127 local_irq_restore(flags);
1128 return -EAGAIN;
1129 }
1130
1131 /* FST bit is the same for control, bulk in, bulk out, interrupt in */
1132 *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
1133
1134 /* ep0 needs special care */
1135 if (!ep->desc) {
1136 start_watchdog(ep->dev);
1137 ep->dev->req_pending = 0;
1138 ep->dev->ep0state = EP0_STALL;
1139
1140 /* and bulk/intr endpoints like dropping stalls too */
1141 } else {
1142 unsigned i;
1143 for (i = 0; i < 1000; i += 20) {
1144 if (*ep->reg_udccs & UDCCS_BI_SST)
1145 break;
1146 udelay(20);
1147 }
1148 }
1149 local_irq_restore(flags);
1150
1151 DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
1152 return 0;
1153 }
1154
1155 static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
1156 {
1157 struct pxa2xx_ep *ep;
1158
1159 ep = container_of(_ep, struct pxa2xx_ep, ep);
1160 if (!_ep) {
1161 DMSG("%s, bad ep\n", __FUNCTION__);
1162 return -ENODEV;
1163 }
1164 /* pxa can't report unclaimed bytes from IN fifos */
1165 if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
1166 return -EOPNOTSUPP;
1167 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
1168 || (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
1169 return 0;
1170 else
1171 return (*ep->reg_ubcr & 0xfff) + 1;
1172 }
1173
1174 static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
1175 {
1176 struct pxa2xx_ep *ep;
1177
1178 ep = container_of(_ep, struct pxa2xx_ep, ep);
1179 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
1180 DMSG("%s, bad ep\n", __FUNCTION__);
1181 return;
1182 }
1183
1184 /* toggle and halt bits stay unchanged */
1185
1186 /* for OUT, just read and discard the FIFO contents. */
1187 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
1188 while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
1189 (void) *ep->reg_uddr;
1190 return;
1191 }
1192
1193 /* most IN status is the same, but ISO can't stall */
1194 *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
1195 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
1196 ? 0 : UDCCS_BI_SST;
1197 }
1198
1199
1200 static struct usb_ep_ops pxa2xx_ep_ops = {
1201 .enable = pxa2xx_ep_enable,
1202 .disable = pxa2xx_ep_disable,
1203
1204 .alloc_request = pxa2xx_ep_alloc_request,
1205 .free_request = pxa2xx_ep_free_request,
1206
1207 .alloc_buffer = pxa2xx_ep_alloc_buffer,
1208 .free_buffer = pxa2xx_ep_free_buffer,
1209
1210 .queue = pxa2xx_ep_queue,
1211 .dequeue = pxa2xx_ep_dequeue,
1212
1213 .set_halt = pxa2xx_ep_set_halt,
1214 .fifo_status = pxa2xx_ep_fifo_status,
1215 .fifo_flush = pxa2xx_ep_fifo_flush,
1216 };
1217
1218
1219 /* ---------------------------------------------------------------------------
1220 * device-scoped parts of the api to the usb controller hardware
1221 * ---------------------------------------------------------------------------
1222 */
1223
1224 static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget)
1225 {
1226 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
1227 }
1228
1229 static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
1230 {
1231 /* host may not have enabled remote wakeup */
1232 if ((UDCCS0 & UDCCS0_DRWF) == 0)
1233 return -EHOSTUNREACH;
1234 udc_set_mask_UDCCR(UDCCR_RSM);
1235 return 0;
1236 }
1237
1238 static void stop_activity(struct pxa2xx_udc *, struct usb_gadget_driver *);
1239 static void udc_enable (struct pxa2xx_udc *);
1240 static void udc_disable(struct pxa2xx_udc *);
1241
1242 /* We disable the UDC -- and its 48 MHz clock -- whenever it's not
1243 * in active use.
1244 */
1245 static int pullup(struct pxa2xx_udc *udc, int is_active)
1246 {
1247 is_active = is_active && udc->vbus && udc->pullup;
1248 DMSG("%s\n", is_active ? "active" : "inactive");
1249 if (is_active)
1250 udc_enable(udc);
1251 else {
1252 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1253 DMSG("disconnect %s\n", udc->driver
1254 ? udc->driver->driver.name
1255 : "(no driver)");
1256 stop_activity(udc, udc->driver);
1257 }
1258 udc_disable(udc);
1259 }
1260 return 0;
1261 }
1262
1263 /* VBUS reporting logically comes from a transceiver */
1264 static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1265 {
1266 struct pxa2xx_udc *udc;
1267
1268 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1269 udc->vbus = is_active = (is_active != 0);
1270 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
1271 pullup(udc, is_active);
1272 return 0;
1273 }
1274
1275 /* drivers may have software control over D+ pullup */
1276 static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
1277 {
1278 struct pxa2xx_udc *udc;
1279
1280 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1281
1282 /* not all boards support pullup control */
1283 if (!udc->mach->udc_command)
1284 return -EOPNOTSUPP;
1285
1286 is_active = (is_active != 0);
1287 udc->pullup = is_active;
1288 pullup(udc, is_active);
1289 return 0;
1290 }
1291
1292 static const struct usb_gadget_ops pxa2xx_udc_ops = {
1293 .get_frame = pxa2xx_udc_get_frame,
1294 .wakeup = pxa2xx_udc_wakeup,
1295 .vbus_session = pxa2xx_udc_vbus_session,
1296 .pullup = pxa2xx_udc_pullup,
1297
1298 // .vbus_draw ... boards may consume current from VBUS, up to
1299 // 100-500mA based on config. the 500uA suspend ceiling means
1300 // that exclusively vbus-powered PXA designs violate USB specs.
1301 };
1302
1303 /*-------------------------------------------------------------------------*/
1304
1305 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1306
1307 static const char proc_node_name [] = "driver/udc";
1308
1309 static int
1310 udc_proc_read(char *page, char **start, off_t off, int count,
1311 int *eof, void *_dev)
1312 {
1313 char *buf = page;
1314 struct pxa2xx_udc *dev = _dev;
1315 char *next = buf;
1316 unsigned size = count;
1317 unsigned long flags;
1318 int i, t;
1319 u32 tmp;
1320
1321 if (off != 0)
1322 return 0;
1323
1324 local_irq_save(flags);
1325
1326 /* basic device status */
1327 t = scnprintf(next, size, DRIVER_DESC "\n"
1328 "%s version: %s\nGadget driver: %s\nHost %s\n\n",
1329 driver_name, DRIVER_VERSION SIZE_STR DMASTR,
1330 dev->driver ? dev->driver->driver.name : "(none)",
1331 is_vbus_present() ? "full speed" : "disconnected");
1332 size -= t;
1333 next += t;
1334
1335 /* registers for device and ep0 */
1336 t = scnprintf(next, size,
1337 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
1338 UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
1339 size -= t;
1340 next += t;
1341
1342 tmp = UDCCR;
1343 t = scnprintf(next, size,
1344 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
1345 (tmp & UDCCR_REM) ? " rem" : "",
1346 (tmp & UDCCR_RSTIR) ? " rstir" : "",
1347 (tmp & UDCCR_SRM) ? " srm" : "",
1348 (tmp & UDCCR_SUSIR) ? " susir" : "",
1349 (tmp & UDCCR_RESIR) ? " resir" : "",
1350 (tmp & UDCCR_RSM) ? " rsm" : "",
1351 (tmp & UDCCR_UDA) ? " uda" : "",
1352 (tmp & UDCCR_UDE) ? " ude" : "");
1353 size -= t;
1354 next += t;
1355
1356 tmp = UDCCS0;
1357 t = scnprintf(next, size,
1358 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
1359 (tmp & UDCCS0_SA) ? " sa" : "",
1360 (tmp & UDCCS0_RNE) ? " rne" : "",
1361 (tmp & UDCCS0_FST) ? " fst" : "",
1362 (tmp & UDCCS0_SST) ? " sst" : "",
1363 (tmp & UDCCS0_DRWF) ? " dwrf" : "",
1364 (tmp & UDCCS0_FTF) ? " ftf" : "",
1365 (tmp & UDCCS0_IPR) ? " ipr" : "",
1366 (tmp & UDCCS0_OPR) ? " opr" : "");
1367 size -= t;
1368 next += t;
1369
1370 if (dev->has_cfr) {
1371 tmp = UDCCFR;
1372 t = scnprintf(next, size,
1373 "udccfr %02X =%s%s\n", tmp,
1374 (tmp & UDCCFR_AREN) ? " aren" : "",
1375 (tmp & UDCCFR_ACM) ? " acm" : "");
1376 size -= t;
1377 next += t;
1378 }
1379
1380 if (!is_vbus_present() || !dev->driver)
1381 goto done;
1382
1383 t = scnprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
1384 dev->stats.write.bytes, dev->stats.write.ops,
1385 dev->stats.read.bytes, dev->stats.read.ops,
1386 dev->stats.irqs);
1387 size -= t;
1388 next += t;
1389
1390 /* dump endpoint queues */
1391 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1392 struct pxa2xx_ep *ep = &dev->ep [i];
1393 struct pxa2xx_request *req;
1394 int t;
1395
1396 if (i != 0) {
1397 const struct usb_endpoint_descriptor *d;
1398
1399 d = ep->desc;
1400 if (!d)
1401 continue;
1402 tmp = *dev->ep [i].reg_udccs;
1403 t = scnprintf(next, size,
1404 "%s max %d %s udccs %02x irqs %lu/%lu\n",
1405 ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
1406 (ep->dma >= 0) ? "dma" : "pio", tmp,
1407 ep->pio_irqs, ep->dma_irqs);
1408 /* TODO translate all five groups of udccs bits! */
1409
1410 } else /* ep0 should only have one transfer queued */
1411 t = scnprintf(next, size, "ep0 max 16 pio irqs %lu\n",
1412 ep->pio_irqs);
1413 if (t <= 0 || t > size)
1414 goto done;
1415 size -= t;
1416 next += t;
1417
1418 if (list_empty(&ep->queue)) {
1419 t = scnprintf(next, size, "\t(nothing queued)\n");
1420 if (t <= 0 || t > size)
1421 goto done;
1422 size -= t;
1423 next += t;
1424 continue;
1425 }
1426 list_for_each_entry(req, &ep->queue, queue) {
1427 #ifdef USE_DMA
1428 if (ep->dma >= 0 && req->queue.prev == &ep->queue)
1429 t = scnprintf(next, size,
1430 "\treq %p len %d/%d "
1431 "buf %p (dma%d dcmd %08x)\n",
1432 &req->req, req->req.actual,
1433 req->req.length, req->req.buf,
1434 ep->dma, DCMD(ep->dma)
1435 // low 13 bits == bytes-to-go
1436 );
1437 else
1438 #endif
1439 t = scnprintf(next, size,
1440 "\treq %p len %d/%d buf %p\n",
1441 &req->req, req->req.actual,
1442 req->req.length, req->req.buf);
1443 if (t <= 0 || t > size)
1444 goto done;
1445 size -= t;
1446 next += t;
1447 }
1448 }
1449
1450 done:
1451 local_irq_restore(flags);
1452 *eof = 1;
1453 return count - size;
1454 }
1455
1456 #define create_proc_files() \
1457 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
1458 #define remove_proc_files() \
1459 remove_proc_entry(proc_node_name, NULL)
1460
1461 #else /* !CONFIG_USB_GADGET_DEBUG_FILES */
1462
1463 #define create_proc_files() do {} while (0)
1464 #define remove_proc_files() do {} while (0)
1465
1466 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1467
1468 /* "function" sysfs attribute */
1469 static ssize_t
1470 show_function (struct device *_dev, struct device_attribute *attr, char *buf)
1471 {
1472 struct pxa2xx_udc *dev = dev_get_drvdata (_dev);
1473
1474 if (!dev->driver
1475 || !dev->driver->function
1476 || strlen (dev->driver->function) > PAGE_SIZE)
1477 return 0;
1478 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1479 }
1480 static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1481
1482 /*-------------------------------------------------------------------------*/
1483
1484 /*
1485 * udc_disable - disable USB device controller
1486 */
1487 static void udc_disable(struct pxa2xx_udc *dev)
1488 {
1489 /* block all irqs */
1490 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
1491 UICR0 = UICR1 = 0xff;
1492 UFNRH = UFNRH_SIM;
1493
1494 /* if hardware supports it, disconnect from usb */
1495 pullup_off();
1496
1497 udc_clear_mask_UDCCR(UDCCR_UDE);
1498
1499 #ifdef CONFIG_ARCH_PXA
1500 /* Disable clock for USB device */
1501 pxa_set_cken(CKEN11_USB, 0);
1502 #endif
1503
1504 ep0_idle (dev);
1505 dev->gadget.speed = USB_SPEED_UNKNOWN;
1506 LED_CONNECTED_OFF;
1507 }
1508
1509
1510 /*
1511 * udc_reinit - initialize software state
1512 */
1513 static void udc_reinit(struct pxa2xx_udc *dev)
1514 {
1515 u32 i;
1516
1517 /* device/ep0 records init */
1518 INIT_LIST_HEAD (&dev->gadget.ep_list);
1519 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1520 dev->ep0state = EP0_IDLE;
1521
1522 /* basic endpoint records init */
1523 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1524 struct pxa2xx_ep *ep = &dev->ep[i];
1525
1526 if (i != 0)
1527 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1528
1529 ep->desc = NULL;
1530 ep->stopped = 0;
1531 INIT_LIST_HEAD (&ep->queue);
1532 ep->pio_irqs = ep->dma_irqs = 0;
1533 }
1534
1535 /* the rest was statically initialized, and is read-only */
1536 }
1537
1538 /* until it's enabled, this UDC should be completely invisible
1539 * to any USB host.
1540 */
1541 static void udc_enable (struct pxa2xx_udc *dev)
1542 {
1543 udc_clear_mask_UDCCR(UDCCR_UDE);
1544
1545 #ifdef CONFIG_ARCH_PXA
1546 /* Enable clock for USB device */
1547 pxa_set_cken(CKEN11_USB, 1);
1548 udelay(5);
1549 #endif
1550
1551 /* try to clear these bits before we enable the udc */
1552 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1553
1554 ep0_idle(dev);
1555 dev->gadget.speed = USB_SPEED_UNKNOWN;
1556 dev->stats.irqs = 0;
1557
1558 /*
1559 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
1560 * - enable UDC
1561 * - if RESET is already in progress, ack interrupt
1562 * - unmask reset interrupt
1563 */
1564 udc_set_mask_UDCCR(UDCCR_UDE);
1565 if (!(UDCCR & UDCCR_UDA))
1566 udc_ack_int_UDCCR(UDCCR_RSTIR);
1567
1568 if (dev->has_cfr /* UDC_RES2 is defined */) {
1569 /* pxa255 (a0+) can avoid a set_config race that could
1570 * prevent gadget drivers from configuring correctly
1571 */
1572 UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
1573 } else {
1574 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
1575 * which could result in missing packets and interrupts.
1576 * supposedly one bit per endpoint, controlling whether it
1577 * double buffers or not; ACM/AREN bits fit into the holes.
1578 * zero bits (like USIR0_IRx) disable double buffering.
1579 */
1580 UDC_RES1 = 0x00;
1581 UDC_RES2 = 0x00;
1582 }
1583
1584 #ifdef DISABLE_TEST_MODE
1585 /* "test mode" seems to have become the default in later chip
1586 * revs, preventing double buffering (and invalidating docs).
1587 * this EXPERIMENT enables it for bulk endpoints by tweaking
1588 * undefined/reserved register bits (that other drivers clear).
1589 * Belcarra code comments noted this usage.
1590 */
1591 if (fifo_mode & 1) { /* IN endpoints */
1592 UDC_RES1 |= USIR0_IR1|USIR0_IR6;
1593 UDC_RES2 |= USIR1_IR11;
1594 }
1595 if (fifo_mode & 2) { /* OUT endpoints */
1596 UDC_RES1 |= USIR0_IR2|USIR0_IR7;
1597 UDC_RES2 |= USIR1_IR12;
1598 }
1599 #endif
1600
1601 /* enable suspend/resume and reset irqs */
1602 udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
1603
1604 /* enable ep0 irqs */
1605 UICR0 &= ~UICR0_IM0;
1606
1607 /* if hardware supports it, pullup D+ and wait for reset */
1608 pullup_on();
1609 }
1610
1611
1612 /* when a driver is successfully registered, it will receive
1613 * control requests including set_configuration(), which enables
1614 * non-control requests. then usb traffic follows until a
1615 * disconnect is reported. then a host may connect again, or
1616 * the driver might get unbound.
1617 */
1618 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1619 {
1620 struct pxa2xx_udc *dev = the_controller;
1621 int retval;
1622
1623 if (!driver
1624 || driver->speed < USB_SPEED_FULL
1625 || !driver->bind
1626 || !driver->disconnect
1627 || !driver->setup)
1628 return -EINVAL;
1629 if (!dev)
1630 return -ENODEV;
1631 if (dev->driver)
1632 return -EBUSY;
1633
1634 /* first hook up the driver ... */
1635 dev->driver = driver;
1636 dev->gadget.dev.driver = &driver->driver;
1637 dev->pullup = 1;
1638
1639 device_add (&dev->gadget.dev);
1640 retval = driver->bind(&dev->gadget);
1641 if (retval) {
1642 DMSG("bind to driver %s --> error %d\n",
1643 driver->driver.name, retval);
1644 device_del (&dev->gadget.dev);
1645
1646 dev->driver = NULL;
1647 dev->gadget.dev.driver = NULL;
1648 return retval;
1649 }
1650 device_create_file(dev->dev, &dev_attr_function);
1651
1652 /* ... then enable host detection and ep0; and we're ready
1653 * for set_configuration as well as eventual disconnect.
1654 */
1655 DMSG("registered gadget driver '%s'\n", driver->driver.name);
1656 pullup(dev, 1);
1657 dump_state(dev);
1658 return 0;
1659 }
1660 EXPORT_SYMBOL(usb_gadget_register_driver);
1661
1662 static void
1663 stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
1664 {
1665 int i;
1666
1667 /* don't disconnect drivers more than once */
1668 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1669 driver = NULL;
1670 dev->gadget.speed = USB_SPEED_UNKNOWN;
1671
1672 /* prevent new request submissions, kill any outstanding requests */
1673 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1674 struct pxa2xx_ep *ep = &dev->ep[i];
1675
1676 ep->stopped = 1;
1677 nuke(ep, -ESHUTDOWN);
1678 }
1679 del_timer_sync(&dev->timer);
1680
1681 /* report disconnect; the driver is already quiesced */
1682 LED_CONNECTED_OFF;
1683 if (driver)
1684 driver->disconnect(&dev->gadget);
1685
1686 /* re-init driver-visible data structures */
1687 udc_reinit(dev);
1688 }
1689
1690 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1691 {
1692 struct pxa2xx_udc *dev = the_controller;
1693
1694 if (!dev)
1695 return -ENODEV;
1696 if (!driver || driver != dev->driver || !driver->unbind)
1697 return -EINVAL;
1698
1699 local_irq_disable();
1700 pullup(dev, 0);
1701 stop_activity(dev, driver);
1702 local_irq_enable();
1703
1704 driver->unbind(&dev->gadget);
1705 dev->driver = NULL;
1706
1707 device_del (&dev->gadget.dev);
1708 device_remove_file(dev->dev, &dev_attr_function);
1709
1710 DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
1711 dump_state(dev);
1712 return 0;
1713 }
1714 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1715
1716
1717 /*-------------------------------------------------------------------------*/
1718
1719 #ifdef CONFIG_ARCH_LUBBOCK
1720
1721 /* Lubbock has separate connect and disconnect irqs. More typical designs
1722 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
1723 */
1724
1725 static irqreturn_t
1726 lubbock_vbus_irq(int irq, void *_dev)
1727 {
1728 struct pxa2xx_udc *dev = _dev;
1729 int vbus;
1730
1731 dev->stats.irqs++;
1732 HEX_DISPLAY(dev->stats.irqs);
1733 switch (irq) {
1734 case LUBBOCK_USB_IRQ:
1735 LED_CONNECTED_ON;
1736 vbus = 1;
1737 disable_irq(LUBBOCK_USB_IRQ);
1738 enable_irq(LUBBOCK_USB_DISC_IRQ);
1739 break;
1740 case LUBBOCK_USB_DISC_IRQ:
1741 LED_CONNECTED_OFF;
1742 vbus = 0;
1743 disable_irq(LUBBOCK_USB_DISC_IRQ);
1744 enable_irq(LUBBOCK_USB_IRQ);
1745 break;
1746 default:
1747 return IRQ_NONE;
1748 }
1749
1750 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1751 return IRQ_HANDLED;
1752 }
1753
1754 #endif
1755
1756 static irqreturn_t udc_vbus_irq(int irq, void *_dev)
1757 {
1758 struct pxa2xx_udc *dev = _dev;
1759 int vbus = pxa_gpio_get(dev->mach->gpio_vbus);
1760
1761 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1762 return IRQ_HANDLED;
1763 }
1764
1765
1766 /*-------------------------------------------------------------------------*/
1767
1768 static inline void clear_ep_state (struct pxa2xx_udc *dev)
1769 {
1770 unsigned i;
1771
1772 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
1773 * fifos, and pending transactions mustn't be continued in any case.
1774 */
1775 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
1776 nuke(&dev->ep[i], -ECONNABORTED);
1777 }
1778
1779 static void udc_watchdog(unsigned long _dev)
1780 {
1781 struct pxa2xx_udc *dev = (void *)_dev;
1782
1783 local_irq_disable();
1784 if (dev->ep0state == EP0_STALL
1785 && (UDCCS0 & UDCCS0_FST) == 0
1786 && (UDCCS0 & UDCCS0_SST) == 0) {
1787 UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
1788 DBG(DBG_VERBOSE, "ep0 re-stall\n");
1789 start_watchdog(dev);
1790 }
1791 local_irq_enable();
1792 }
1793
1794 static void handle_ep0 (struct pxa2xx_udc *dev)
1795 {
1796 u32 udccs0 = UDCCS0;
1797 struct pxa2xx_ep *ep = &dev->ep [0];
1798 struct pxa2xx_request *req;
1799 union {
1800 struct usb_ctrlrequest r;
1801 u8 raw [8];
1802 u32 word [2];
1803 } u;
1804
1805 if (list_empty(&ep->queue))
1806 req = NULL;
1807 else
1808 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
1809
1810 /* clear stall status */
1811 if (udccs0 & UDCCS0_SST) {
1812 nuke(ep, -EPIPE);
1813 UDCCS0 = UDCCS0_SST;
1814 del_timer(&dev->timer);
1815 ep0_idle(dev);
1816 }
1817
1818 /* previous request unfinished? non-error iff back-to-back ... */
1819 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
1820 nuke(ep, 0);
1821 del_timer(&dev->timer);
1822 ep0_idle(dev);
1823 }
1824
1825 switch (dev->ep0state) {
1826 case EP0_IDLE:
1827 /* late-breaking status? */
1828 udccs0 = UDCCS0;
1829
1830 /* start control request? */
1831 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
1832 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
1833 int i;
1834
1835 nuke (ep, -EPROTO);
1836
1837 /* read SETUP packet */
1838 for (i = 0; i < 8; i++) {
1839 if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
1840 bad_setup:
1841 DMSG("SETUP %d!\n", i);
1842 goto stall;
1843 }
1844 u.raw [i] = (u8) UDDR0;
1845 }
1846 if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
1847 goto bad_setup;
1848
1849 got_setup:
1850 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1851 u.r.bRequestType, u.r.bRequest,
1852 le16_to_cpu(u.r.wValue),
1853 le16_to_cpu(u.r.wIndex),
1854 le16_to_cpu(u.r.wLength));
1855
1856 /* cope with automagic for some standard requests. */
1857 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
1858 == USB_TYPE_STANDARD;
1859 dev->req_config = 0;
1860 dev->req_pending = 1;
1861 switch (u.r.bRequest) {
1862 /* hardware restricts gadget drivers here! */
1863 case USB_REQ_SET_CONFIGURATION:
1864 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1865 /* reflect hardware's automagic
1866 * up to the gadget driver.
1867 */
1868 config_change:
1869 dev->req_config = 1;
1870 clear_ep_state(dev);
1871 /* if !has_cfr, there's no synch
1872 * else use AREN (later) not SA|OPR
1873 * USIR0_IR0 acts edge sensitive
1874 */
1875 }
1876 break;
1877 /* ... and here, even more ... */
1878 case USB_REQ_SET_INTERFACE:
1879 if (u.r.bRequestType == USB_RECIP_INTERFACE) {
1880 /* udc hardware is broken by design:
1881 * - altsetting may only be zero;
1882 * - hw resets all interfaces' eps;
1883 * - ep reset doesn't include halt(?).
1884 */
1885 DMSG("broken set_interface (%d/%d)\n",
1886 le16_to_cpu(u.r.wIndex),
1887 le16_to_cpu(u.r.wValue));
1888 goto config_change;
1889 }
1890 break;
1891 /* hardware was supposed to hide this */
1892 case USB_REQ_SET_ADDRESS:
1893 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1894 ep0start(dev, 0, "address");
1895 return;
1896 }
1897 break;
1898 }
1899
1900 if (u.r.bRequestType & USB_DIR_IN)
1901 dev->ep0state = EP0_IN_DATA_PHASE;
1902 else
1903 dev->ep0state = EP0_OUT_DATA_PHASE;
1904
1905 i = dev->driver->setup(&dev->gadget, &u.r);
1906 if (i < 0) {
1907 /* hardware automagic preventing STALL... */
1908 if (dev->req_config) {
1909 /* hardware sometimes neglects to tell
1910 * tell us about config change events,
1911 * so later ones may fail...
1912 */
1913 WARN("config change %02x fail %d?\n",
1914 u.r.bRequest, i);
1915 return;
1916 /* TODO experiment: if has_cfr,
1917 * hardware didn't ACK; maybe we
1918 * could actually STALL!
1919 */
1920 }
1921 DBG(DBG_VERBOSE, "protocol STALL, "
1922 "%02x err %d\n", UDCCS0, i);
1923 stall:
1924 /* the watchdog timer helps deal with cases
1925 * where udc seems to clear FST wrongly, and
1926 * then NAKs instead of STALLing.
1927 */
1928 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
1929 start_watchdog(dev);
1930 dev->ep0state = EP0_STALL;
1931
1932 /* deferred i/o == no response yet */
1933 } else if (dev->req_pending) {
1934 if (likely(dev->ep0state == EP0_IN_DATA_PHASE
1935 || dev->req_std || u.r.wLength))
1936 ep0start(dev, 0, "defer");
1937 else
1938 ep0start(dev, UDCCS0_IPR, "defer/IPR");
1939 }
1940
1941 /* expect at least one data or status stage irq */
1942 return;
1943
1944 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
1945 == (UDCCS0_OPR|UDCCS0_SA))) {
1946 unsigned i;
1947
1948 /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
1949 * still observed on a pxa255 a0.
1950 */
1951 DBG(DBG_VERBOSE, "e131\n");
1952 nuke(ep, -EPROTO);
1953
1954 /* read SETUP data, but don't trust it too much */
1955 for (i = 0; i < 8; i++)
1956 u.raw [i] = (u8) UDDR0;
1957 if ((u.r.bRequestType & USB_RECIP_MASK)
1958 > USB_RECIP_OTHER)
1959 goto stall;
1960 if (u.word [0] == 0 && u.word [1] == 0)
1961 goto stall;
1962 goto got_setup;
1963 } else {
1964 /* some random early IRQ:
1965 * - we acked FST
1966 * - IPR cleared
1967 * - OPR got set, without SA (likely status stage)
1968 */
1969 UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
1970 }
1971 break;
1972 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
1973 if (udccs0 & UDCCS0_OPR) {
1974 UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
1975 DBG(DBG_VERBOSE, "ep0in premature status\n");
1976 if (req)
1977 done(ep, req, 0);
1978 ep0_idle(dev);
1979 } else /* irq was IPR clearing */ {
1980 if (req) {
1981 /* this IN packet might finish the request */
1982 (void) write_ep0_fifo(ep, req);
1983 } /* else IN token before response was written */
1984 }
1985 break;
1986 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
1987 if (udccs0 & UDCCS0_OPR) {
1988 if (req) {
1989 /* this OUT packet might finish the request */
1990 if (read_ep0_fifo(ep, req))
1991 done(ep, req, 0);
1992 /* else more OUT packets expected */
1993 } /* else OUT token before read was issued */
1994 } else /* irq was IPR clearing */ {
1995 DBG(DBG_VERBOSE, "ep0out premature status\n");
1996 if (req)
1997 done(ep, req, 0);
1998 ep0_idle(dev);
1999 }
2000 break;
2001 case EP0_END_XFER:
2002 if (req)
2003 done(ep, req, 0);
2004 /* ack control-IN status (maybe in-zlp was skipped)
2005 * also appears after some config change events.
2006 */
2007 if (udccs0 & UDCCS0_OPR)
2008 UDCCS0 = UDCCS0_OPR;
2009 ep0_idle(dev);
2010 break;
2011 case EP0_STALL:
2012 UDCCS0 = UDCCS0_FST;
2013 break;
2014 }
2015 USIR0 = USIR0_IR0;
2016 }
2017
2018 static void handle_ep(struct pxa2xx_ep *ep)
2019 {
2020 struct pxa2xx_request *req;
2021 int is_in = ep->bEndpointAddress & USB_DIR_IN;
2022 int completed;
2023 u32 udccs, tmp;
2024
2025 do {
2026 completed = 0;
2027 if (likely (!list_empty(&ep->queue)))
2028 req = list_entry(ep->queue.next,
2029 struct pxa2xx_request, queue);
2030 else
2031 req = NULL;
2032
2033 // TODO check FST handling
2034
2035 udccs = *ep->reg_udccs;
2036 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
2037 tmp = UDCCS_BI_TUR;
2038 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2039 tmp |= UDCCS_BI_SST;
2040 tmp &= udccs;
2041 if (likely (tmp))
2042 *ep->reg_udccs = tmp;
2043 if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
2044 completed = write_fifo(ep, req);
2045
2046 } else { /* irq from RPC (or for ISO, ROF) */
2047 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2048 tmp = UDCCS_BO_SST | UDCCS_BO_DME;
2049 else
2050 tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
2051 tmp &= udccs;
2052 if (likely(tmp))
2053 *ep->reg_udccs = tmp;
2054
2055 /* fifos can hold packets, ready for reading... */
2056 if (likely(req)) {
2057 #ifdef USE_OUT_DMA
2058 // TODO didn't yet debug out-dma. this approach assumes
2059 // the worst about short packets and RPC; it might be better.
2060
2061 if (likely(ep->dma >= 0)) {
2062 if (!(udccs & UDCCS_BO_RSP)) {
2063 *ep->reg_udccs = UDCCS_BO_RPC;
2064 ep->dma_irqs++;
2065 return;
2066 }
2067 }
2068 #endif
2069 completed = read_fifo(ep, req);
2070 } else
2071 pio_irq_disable (ep->bEndpointAddress);
2072 }
2073 ep->pio_irqs++;
2074 } while (completed);
2075 }
2076
2077 /*
2078 * pxa2xx_udc_irq - interrupt handler
2079 *
2080 * avoid delays in ep0 processing. the control handshaking isn't always
2081 * under software control (pxa250c0 and the pxa255 are better), and delays
2082 * could cause usb protocol errors.
2083 */
2084 static irqreturn_t
2085 pxa2xx_udc_irq(int irq, void *_dev)
2086 {
2087 struct pxa2xx_udc *dev = _dev;
2088 int handled;
2089
2090 dev->stats.irqs++;
2091 HEX_DISPLAY(dev->stats.irqs);
2092 do {
2093 u32 udccr = UDCCR;
2094
2095 handled = 0;
2096
2097 /* SUSpend Interrupt Request */
2098 if (unlikely(udccr & UDCCR_SUSIR)) {
2099 udc_ack_int_UDCCR(UDCCR_SUSIR);
2100 handled = 1;
2101 DBG(DBG_VERBOSE, "USB suspend%s\n", is_vbus_present()
2102 ? "" : "+disconnect");
2103
2104 if (!is_vbus_present())
2105 stop_activity(dev, dev->driver);
2106 else if (dev->gadget.speed != USB_SPEED_UNKNOWN
2107 && dev->driver
2108 && dev->driver->suspend)
2109 dev->driver->suspend(&dev->gadget);
2110 ep0_idle (dev);
2111 }
2112
2113 /* RESume Interrupt Request */
2114 if (unlikely(udccr & UDCCR_RESIR)) {
2115 udc_ack_int_UDCCR(UDCCR_RESIR);
2116 handled = 1;
2117 DBG(DBG_VERBOSE, "USB resume\n");
2118
2119 if (dev->gadget.speed != USB_SPEED_UNKNOWN
2120 && dev->driver
2121 && dev->driver->resume
2122 && is_vbus_present())
2123 dev->driver->resume(&dev->gadget);
2124 }
2125
2126 /* ReSeT Interrupt Request - USB reset */
2127 if (unlikely(udccr & UDCCR_RSTIR)) {
2128 udc_ack_int_UDCCR(UDCCR_RSTIR);
2129 handled = 1;
2130
2131 if ((UDCCR & UDCCR_UDA) == 0) {
2132 DBG(DBG_VERBOSE, "USB reset start\n");
2133
2134 /* reset driver and endpoints,
2135 * in case that's not yet done
2136 */
2137 stop_activity (dev, dev->driver);
2138
2139 } else {
2140 DBG(DBG_VERBOSE, "USB reset end\n");
2141 dev->gadget.speed = USB_SPEED_FULL;
2142 LED_CONNECTED_ON;
2143 memset(&dev->stats, 0, sizeof dev->stats);
2144 /* driver and endpoints are still reset */
2145 }
2146
2147 } else {
2148 u32 usir0 = USIR0 & ~UICR0;
2149 u32 usir1 = USIR1 & ~UICR1;
2150 int i;
2151
2152 if (unlikely (!usir0 && !usir1))
2153 continue;
2154
2155 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
2156
2157 /* control traffic */
2158 if (usir0 & USIR0_IR0) {
2159 dev->ep[0].pio_irqs++;
2160 handle_ep0(dev);
2161 handled = 1;
2162 }
2163
2164 /* endpoint data transfers */
2165 for (i = 0; i < 8; i++) {
2166 u32 tmp = 1 << i;
2167
2168 if (i && (usir0 & tmp)) {
2169 handle_ep(&dev->ep[i]);
2170 USIR0 |= tmp;
2171 handled = 1;
2172 }
2173 if (usir1 & tmp) {
2174 handle_ep(&dev->ep[i+8]);
2175 USIR1 |= tmp;
2176 handled = 1;
2177 }
2178 }
2179 }
2180
2181 /* we could also ask for 1 msec SOF (SIR) interrupts */
2182
2183 } while (handled);
2184 return IRQ_HANDLED;
2185 }
2186
2187 /*-------------------------------------------------------------------------*/
2188
2189 static void nop_release (struct device *dev)
2190 {
2191 DMSG("%s %s\n", __FUNCTION__, dev->bus_id);
2192 }
2193
2194 /* this uses load-time allocation and initialization (instead of
2195 * doing it at run-time) to save code, eliminate fault paths, and
2196 * be more obviously correct.
2197 */
2198 static struct pxa2xx_udc memory = {
2199 .gadget = {
2200 .ops = &pxa2xx_udc_ops,
2201 .ep0 = &memory.ep[0].ep,
2202 .name = driver_name,
2203 .dev = {
2204 .bus_id = "gadget",
2205 .release = nop_release,
2206 },
2207 },
2208
2209 /* control endpoint */
2210 .ep[0] = {
2211 .ep = {
2212 .name = ep0name,
2213 .ops = &pxa2xx_ep_ops,
2214 .maxpacket = EP0_FIFO_SIZE,
2215 },
2216 .dev = &memory,
2217 .reg_udccs = &UDCCS0,
2218 .reg_uddr = &UDDR0,
2219 },
2220
2221 /* first group of endpoints */
2222 .ep[1] = {
2223 .ep = {
2224 .name = "ep1in-bulk",
2225 .ops = &pxa2xx_ep_ops,
2226 .maxpacket = BULK_FIFO_SIZE,
2227 },
2228 .dev = &memory,
2229 .fifo_size = BULK_FIFO_SIZE,
2230 .bEndpointAddress = USB_DIR_IN | 1,
2231 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2232 .reg_udccs = &UDCCS1,
2233 .reg_uddr = &UDDR1,
2234 drcmr (25)
2235 },
2236 .ep[2] = {
2237 .ep = {
2238 .name = "ep2out-bulk",
2239 .ops = &pxa2xx_ep_ops,
2240 .maxpacket = BULK_FIFO_SIZE,
2241 },
2242 .dev = &memory,
2243 .fifo_size = BULK_FIFO_SIZE,
2244 .bEndpointAddress = 2,
2245 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2246 .reg_udccs = &UDCCS2,
2247 .reg_ubcr = &UBCR2,
2248 .reg_uddr = &UDDR2,
2249 drcmr (26)
2250 },
2251 #ifndef CONFIG_USB_PXA2XX_SMALL
2252 .ep[3] = {
2253 .ep = {
2254 .name = "ep3in-iso",
2255 .ops = &pxa2xx_ep_ops,
2256 .maxpacket = ISO_FIFO_SIZE,
2257 },
2258 .dev = &memory,
2259 .fifo_size = ISO_FIFO_SIZE,
2260 .bEndpointAddress = USB_DIR_IN | 3,
2261 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2262 .reg_udccs = &UDCCS3,
2263 .reg_uddr = &UDDR3,
2264 drcmr (27)
2265 },
2266 .ep[4] = {
2267 .ep = {
2268 .name = "ep4out-iso",
2269 .ops = &pxa2xx_ep_ops,
2270 .maxpacket = ISO_FIFO_SIZE,
2271 },
2272 .dev = &memory,
2273 .fifo_size = ISO_FIFO_SIZE,
2274 .bEndpointAddress = 4,
2275 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2276 .reg_udccs = &UDCCS4,
2277 .reg_ubcr = &UBCR4,
2278 .reg_uddr = &UDDR4,
2279 drcmr (28)
2280 },
2281 .ep[5] = {
2282 .ep = {
2283 .name = "ep5in-int",
2284 .ops = &pxa2xx_ep_ops,
2285 .maxpacket = INT_FIFO_SIZE,
2286 },
2287 .dev = &memory,
2288 .fifo_size = INT_FIFO_SIZE,
2289 .bEndpointAddress = USB_DIR_IN | 5,
2290 .bmAttributes = USB_ENDPOINT_XFER_INT,
2291 .reg_udccs = &UDCCS5,
2292 .reg_uddr = &UDDR5,
2293 },
2294
2295 /* second group of endpoints */
2296 .ep[6] = {
2297 .ep = {
2298 .name = "ep6in-bulk",
2299 .ops = &pxa2xx_ep_ops,
2300 .maxpacket = BULK_FIFO_SIZE,
2301 },
2302 .dev = &memory,
2303 .fifo_size = BULK_FIFO_SIZE,
2304 .bEndpointAddress = USB_DIR_IN | 6,
2305 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2306 .reg_udccs = &UDCCS6,
2307 .reg_uddr = &UDDR6,
2308 drcmr (30)
2309 },
2310 .ep[7] = {
2311 .ep = {
2312 .name = "ep7out-bulk",
2313 .ops = &pxa2xx_ep_ops,
2314 .maxpacket = BULK_FIFO_SIZE,
2315 },
2316 .dev = &memory,
2317 .fifo_size = BULK_FIFO_SIZE,
2318 .bEndpointAddress = 7,
2319 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2320 .reg_udccs = &UDCCS7,
2321 .reg_ubcr = &UBCR7,
2322 .reg_uddr = &UDDR7,
2323 drcmr (31)
2324 },
2325 .ep[8] = {
2326 .ep = {
2327 .name = "ep8in-iso",
2328 .ops = &pxa2xx_ep_ops,
2329 .maxpacket = ISO_FIFO_SIZE,
2330 },
2331 .dev = &memory,
2332 .fifo_size = ISO_FIFO_SIZE,
2333 .bEndpointAddress = USB_DIR_IN | 8,
2334 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2335 .reg_udccs = &UDCCS8,
2336 .reg_uddr = &UDDR8,
2337 drcmr (32)
2338 },
2339 .ep[9] = {
2340 .ep = {
2341 .name = "ep9out-iso",
2342 .ops = &pxa2xx_ep_ops,
2343 .maxpacket = ISO_FIFO_SIZE,
2344 },
2345 .dev = &memory,
2346 .fifo_size = ISO_FIFO_SIZE,
2347 .bEndpointAddress = 9,
2348 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2349 .reg_udccs = &UDCCS9,
2350 .reg_ubcr = &UBCR9,
2351 .reg_uddr = &UDDR9,
2352 drcmr (33)
2353 },
2354 .ep[10] = {
2355 .ep = {
2356 .name = "ep10in-int",
2357 .ops = &pxa2xx_ep_ops,
2358 .maxpacket = INT_FIFO_SIZE,
2359 },
2360 .dev = &memory,
2361 .fifo_size = INT_FIFO_SIZE,
2362 .bEndpointAddress = USB_DIR_IN | 10,
2363 .bmAttributes = USB_ENDPOINT_XFER_INT,
2364 .reg_udccs = &UDCCS10,
2365 .reg_uddr = &UDDR10,
2366 },
2367
2368 /* third group of endpoints */
2369 .ep[11] = {
2370 .ep = {
2371 .name = "ep11in-bulk",
2372 .ops = &pxa2xx_ep_ops,
2373 .maxpacket = BULK_FIFO_SIZE,
2374 },
2375 .dev = &memory,
2376 .fifo_size = BULK_FIFO_SIZE,
2377 .bEndpointAddress = USB_DIR_IN | 11,
2378 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2379 .reg_udccs = &UDCCS11,
2380 .reg_uddr = &UDDR11,
2381 drcmr (35)
2382 },
2383 .ep[12] = {
2384 .ep = {
2385 .name = "ep12out-bulk",
2386 .ops = &pxa2xx_ep_ops,
2387 .maxpacket = BULK_FIFO_SIZE,
2388 },
2389 .dev = &memory,
2390 .fifo_size = BULK_FIFO_SIZE,
2391 .bEndpointAddress = 12,
2392 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2393 .reg_udccs = &UDCCS12,
2394 .reg_ubcr = &UBCR12,
2395 .reg_uddr = &UDDR12,
2396 drcmr (36)
2397 },
2398 .ep[13] = {
2399 .ep = {
2400 .name = "ep13in-iso",
2401 .ops = &pxa2xx_ep_ops,
2402 .maxpacket = ISO_FIFO_SIZE,
2403 },
2404 .dev = &memory,
2405 .fifo_size = ISO_FIFO_SIZE,
2406 .bEndpointAddress = USB_DIR_IN | 13,
2407 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2408 .reg_udccs = &UDCCS13,
2409 .reg_uddr = &UDDR13,
2410 drcmr (37)
2411 },
2412 .ep[14] = {
2413 .ep = {
2414 .name = "ep14out-iso",
2415 .ops = &pxa2xx_ep_ops,
2416 .maxpacket = ISO_FIFO_SIZE,
2417 },
2418 .dev = &memory,
2419 .fifo_size = ISO_FIFO_SIZE,
2420 .bEndpointAddress = 14,
2421 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2422 .reg_udccs = &UDCCS14,
2423 .reg_ubcr = &UBCR14,
2424 .reg_uddr = &UDDR14,
2425 drcmr (38)
2426 },
2427 .ep[15] = {
2428 .ep = {
2429 .name = "ep15in-int",
2430 .ops = &pxa2xx_ep_ops,
2431 .maxpacket = INT_FIFO_SIZE,
2432 },
2433 .dev = &memory,
2434 .fifo_size = INT_FIFO_SIZE,
2435 .bEndpointAddress = USB_DIR_IN | 15,
2436 .bmAttributes = USB_ENDPOINT_XFER_INT,
2437 .reg_udccs = &UDCCS15,
2438 .reg_uddr = &UDDR15,
2439 },
2440 #endif /* !CONFIG_USB_PXA2XX_SMALL */
2441 };
2442
2443 #define CP15R0_VENDOR_MASK 0xffffe000
2444
2445 #if defined(CONFIG_ARCH_PXA)
2446 #define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
2447
2448 #elif defined(CONFIG_ARCH_IXP4XX)
2449 #define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
2450
2451 #endif
2452
2453 #define CP15R0_PROD_MASK 0x000003f0
2454 #define PXA25x 0x00000100 /* and PXA26x */
2455 #define PXA210 0x00000120
2456
2457 #define CP15R0_REV_MASK 0x0000000f
2458
2459 #define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
2460
2461 #define PXA255_A0 0x00000106 /* or PXA260_B1 */
2462 #define PXA250_C0 0x00000105 /* or PXA26x_B0 */
2463 #define PXA250_B2 0x00000104
2464 #define PXA250_B1 0x00000103 /* or PXA260_A0 */
2465 #define PXA250_B0 0x00000102
2466 #define PXA250_A1 0x00000101
2467 #define PXA250_A0 0x00000100
2468
2469 #define PXA210_C0 0x00000125
2470 #define PXA210_B2 0x00000124
2471 #define PXA210_B1 0x00000123
2472 #define PXA210_B0 0x00000122
2473 #define IXP425_A0 0x000001c1
2474 #define IXP425_B0 0x000001f1
2475 #define IXP465_AD 0x00000200
2476
2477 /*
2478 * probe - binds to the platform device
2479 */
2480 static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2481 {
2482 struct pxa2xx_udc *dev = &memory;
2483 int retval, out_dma = 1, vbus_irq;
2484 u32 chiprev;
2485
2486 /* insist on Intel/ARM/XScale */
2487 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
2488 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
2489 printk(KERN_ERR "%s: not XScale!\n", driver_name);
2490 return -ENODEV;
2491 }
2492
2493 /* trigger chiprev-specific logic */
2494 switch (chiprev & CP15R0_PRODREV_MASK) {
2495 #if defined(CONFIG_ARCH_PXA)
2496 case PXA255_A0:
2497 dev->has_cfr = 1;
2498 break;
2499 case PXA250_A0:
2500 case PXA250_A1:
2501 /* A0/A1 "not released"; ep 13, 15 unusable */
2502 /* fall through */
2503 case PXA250_B2: case PXA210_B2:
2504 case PXA250_B1: case PXA210_B1:
2505 case PXA250_B0: case PXA210_B0:
2506 out_dma = 0;
2507 /* fall through */
2508 case PXA250_C0: case PXA210_C0:
2509 break;
2510 #elif defined(CONFIG_ARCH_IXP4XX)
2511 case IXP425_A0:
2512 case IXP425_B0:
2513 case IXP465_AD:
2514 dev->has_cfr = 1;
2515 out_dma = 0;
2516 break;
2517 #endif
2518 default:
2519 out_dma = 0;
2520 printk(KERN_ERR "%s: unrecognized processor: %08x\n",
2521 driver_name, chiprev);
2522 /* iop3xx, ixp4xx, ... */
2523 return -ENODEV;
2524 }
2525
2526 pr_debug("%s: IRQ %d%s%s%s\n", driver_name, IRQ_USB,
2527 dev->has_cfr ? "" : " (!cfr)",
2528 out_dma ? "" : " (broken dma-out)",
2529 SIZE_STR DMASTR
2530 );
2531
2532 #ifdef USE_DMA
2533 #ifndef USE_OUT_DMA
2534 out_dma = 0;
2535 #endif
2536 /* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
2537 if (!out_dma) {
2538 DMSG("disabled OUT dma\n");
2539 dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
2540 dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
2541 dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
2542 }
2543 #endif
2544
2545 /* other non-static parts of init */
2546 dev->dev = &pdev->dev;
2547 dev->mach = pdev->dev.platform_data;
2548 if (dev->mach->gpio_vbus) {
2549 vbus_irq = IRQ_GPIO(dev->mach->gpio_vbus & GPIO_MD_MASK_NR);
2550 pxa_gpio_mode((dev->mach->gpio_vbus & GPIO_MD_MASK_NR)
2551 | GPIO_IN);
2552 set_irq_type(vbus_irq, IRQT_BOTHEDGE);
2553 } else
2554 vbus_irq = 0;
2555 if (dev->mach->gpio_pullup)
2556 pxa_gpio_mode((dev->mach->gpio_pullup & GPIO_MD_MASK_NR)
2557 | GPIO_OUT | GPIO_DFLT_LOW);
2558
2559 init_timer(&dev->timer);
2560 dev->timer.function = udc_watchdog;
2561 dev->timer.data = (unsigned long) dev;
2562
2563 device_initialize(&dev->gadget.dev);
2564 dev->gadget.dev.parent = &pdev->dev;
2565 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2566
2567 the_controller = dev;
2568 platform_set_drvdata(pdev, dev);
2569
2570 udc_disable(dev);
2571 udc_reinit(dev);
2572
2573 dev->vbus = is_vbus_present();
2574
2575 /* irq setup after old hardware state is cleaned up */
2576 retval = request_irq(IRQ_USB, pxa2xx_udc_irq,
2577 IRQF_DISABLED, driver_name, dev);
2578 if (retval != 0) {
2579 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2580 driver_name, IRQ_USB, retval);
2581 return -EBUSY;
2582 }
2583 dev->got_irq = 1;
2584
2585 #ifdef CONFIG_ARCH_LUBBOCK
2586 if (machine_is_lubbock()) {
2587 retval = request_irq(LUBBOCK_USB_DISC_IRQ,
2588 lubbock_vbus_irq,
2589 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
2590 driver_name, dev);
2591 if (retval != 0) {
2592 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2593 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2594 lubbock_fail0:
2595 free_irq(IRQ_USB, dev);
2596 return -EBUSY;
2597 }
2598 retval = request_irq(LUBBOCK_USB_IRQ,
2599 lubbock_vbus_irq,
2600 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
2601 driver_name, dev);
2602 if (retval != 0) {
2603 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2604 driver_name, LUBBOCK_USB_IRQ, retval);
2605 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2606 goto lubbock_fail0;
2607 }
2608 #ifdef DEBUG
2609 /* with U-Boot (but not BLOB), hex is off by default */
2610 HEX_DISPLAY(dev->stats.irqs);
2611 LUB_DISC_BLNK_LED &= 0xff;
2612 #endif
2613 } else
2614 #endif
2615 if (vbus_irq) {
2616 retval = request_irq(vbus_irq, udc_vbus_irq,
2617 SA_INTERRUPT | SA_SAMPLE_RANDOM,
2618 driver_name, dev);
2619 if (retval != 0) {
2620 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2621 driver_name, vbus_irq, retval);
2622 free_irq(IRQ_USB, dev);
2623 return -EBUSY;
2624 }
2625 }
2626 create_proc_files();
2627
2628 return 0;
2629 }
2630
2631 static void pxa2xx_udc_shutdown(struct platform_device *_dev)
2632 {
2633 pullup_off();
2634 }
2635
2636 static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
2637 {
2638 struct pxa2xx_udc *dev = platform_get_drvdata(pdev);
2639
2640 if (dev->driver)
2641 return -EBUSY;
2642
2643 udc_disable(dev);
2644 remove_proc_files();
2645
2646 if (dev->got_irq) {
2647 free_irq(IRQ_USB, dev);
2648 dev->got_irq = 0;
2649 }
2650 #ifdef CONFIG_ARCH_LUBBOCK
2651 if (machine_is_lubbock()) {
2652 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2653 free_irq(LUBBOCK_USB_IRQ, dev);
2654 }
2655 #endif
2656 if (dev->mach->gpio_vbus)
2657 free_irq(IRQ_GPIO(dev->mach->gpio_vbus), dev);
2658 platform_set_drvdata(pdev, NULL);
2659 the_controller = NULL;
2660 return 0;
2661 }
2662
2663 /*-------------------------------------------------------------------------*/
2664
2665 #ifdef CONFIG_PM
2666
2667 /* USB suspend (controlled by the host) and system suspend (controlled
2668 * by the PXA) don't necessarily work well together. If USB is active,
2669 * the 48 MHz clock is required; so the system can't enter 33 MHz idle
2670 * mode, or any deeper PM saving state.
2671 *
2672 * For now, we punt and forcibly disconnect from the USB host when PXA
2673 * enters any suspend state. While we're disconnected, we always disable
2674 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
2675 * Boards without software pullup control shouldn't use those states.
2676 * VBUS IRQs should probably be ignored so that the PXA device just acts
2677 * "dead" to USB hosts until system resume.
2678 */
2679 static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
2680 {
2681 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
2682
2683 if (!udc->mach->udc_command)
2684 WARN("USB host won't detect disconnect!\n");
2685 pullup(udc, 0);
2686
2687 return 0;
2688 }
2689
2690 static int pxa2xx_udc_resume(struct platform_device *dev)
2691 {
2692 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
2693
2694 pullup(udc, 1);
2695
2696 return 0;
2697 }
2698
2699 #else
2700 #define pxa2xx_udc_suspend NULL
2701 #define pxa2xx_udc_resume NULL
2702 #endif
2703
2704 /*-------------------------------------------------------------------------*/
2705
2706 static struct platform_driver udc_driver = {
2707 .probe = pxa2xx_udc_probe,
2708 .shutdown = pxa2xx_udc_shutdown,
2709 .remove = __exit_p(pxa2xx_udc_remove),
2710 .suspend = pxa2xx_udc_suspend,
2711 .resume = pxa2xx_udc_resume,
2712 .driver = {
2713 .owner = THIS_MODULE,
2714 .name = "pxa2xx-udc",
2715 },
2716 };
2717
2718 static int __init udc_init(void)
2719 {
2720 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
2721 return platform_driver_register(&udc_driver);
2722 }
2723 module_init(udc_init);
2724
2725 static void __exit udc_exit(void)
2726 {
2727 platform_driver_unregister(&udc_driver);
2728 }
2729 module_exit(udc_exit);
2730
2731 MODULE_DESCRIPTION(DRIVER_DESC);
2732 MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
2733 MODULE_LICENSE("GPL");
2734