]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/usb/gadget/udc/fsl_qe_udc.c
ASoC: sti: fix missing clk_disable_unprepare() on error in uni_player_start()
[mirror_ubuntu-zesty-kernel.git] / drivers / usb / gadget / udc / fsl_qe_udc.c
1 /*
2 * driver/usb/gadget/fsl_qe_udc.c
3 *
4 * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Xie Xiaobo <X.Xie@freescale.com>
7 * Li Yang <leoli@freescale.com>
8 * Based on bareboard code from Shlomi Gridish.
9 *
10 * Description:
11 * Freescle QE/CPM USB Pheripheral Controller Driver
12 * The controller can be found on MPC8360, MPC8272, and etc.
13 * MPC8360 Rev 1.1 may need QE mircocode update
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 */
20
21 #undef USB_TRACE
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/ioport.h>
26 #include <linux/types.h>
27 #include <linux/errno.h>
28 #include <linux/err.h>
29 #include <linux/slab.h>
30 #include <linux/list.h>
31 #include <linux/interrupt.h>
32 #include <linux/io.h>
33 #include <linux/moduleparam.h>
34 #include <linux/of_address.h>
35 #include <linux/of_irq.h>
36 #include <linux/of_platform.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/usb/ch9.h>
39 #include <linux/usb/gadget.h>
40 #include <linux/usb/otg.h>
41 #include <soc/fsl/qe/qe.h>
42 #include <asm/cpm.h>
43 #include <asm/dma.h>
44 #include <asm/reg.h>
45 #include "fsl_qe_udc.h"
46
47 #define DRIVER_DESC "Freescale QE/CPM USB Device Controller driver"
48 #define DRIVER_AUTHOR "Xie XiaoBo"
49 #define DRIVER_VERSION "1.0"
50
51 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
52
53 static const char driver_name[] = "fsl_qe_udc";
54 static const char driver_desc[] = DRIVER_DESC;
55
56 /*ep name is important in gadget, it should obey the convention of ep_match()*/
57 static const char *const ep_name[] = {
58 "ep0-control", /* everyone has ep0 */
59 /* 3 configurable endpoints */
60 "ep1",
61 "ep2",
62 "ep3",
63 };
64
65 static struct usb_endpoint_descriptor qe_ep0_desc = {
66 .bLength = USB_DT_ENDPOINT_SIZE,
67 .bDescriptorType = USB_DT_ENDPOINT,
68
69 .bEndpointAddress = 0,
70 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
71 .wMaxPacketSize = USB_MAX_CTRL_PAYLOAD,
72 };
73
74 /********************************************************************
75 * Internal Used Function Start
76 ********************************************************************/
77 /*-----------------------------------------------------------------
78 * done() - retire a request; caller blocked irqs
79 *--------------------------------------------------------------*/
80 static void done(struct qe_ep *ep, struct qe_req *req, int status)
81 {
82 struct qe_udc *udc = ep->udc;
83 unsigned char stopped = ep->stopped;
84
85 /* the req->queue pointer is used by ep_queue() func, in which
86 * the request will be added into a udc_ep->queue 'd tail
87 * so here the req will be dropped from the ep->queue
88 */
89 list_del_init(&req->queue);
90
91 /* req.status should be set as -EINPROGRESS in ep_queue() */
92 if (req->req.status == -EINPROGRESS)
93 req->req.status = status;
94 else
95 status = req->req.status;
96
97 if (req->mapped) {
98 dma_unmap_single(udc->gadget.dev.parent,
99 req->req.dma, req->req.length,
100 ep_is_in(ep)
101 ? DMA_TO_DEVICE
102 : DMA_FROM_DEVICE);
103 req->req.dma = DMA_ADDR_INVALID;
104 req->mapped = 0;
105 } else
106 dma_sync_single_for_cpu(udc->gadget.dev.parent,
107 req->req.dma, req->req.length,
108 ep_is_in(ep)
109 ? DMA_TO_DEVICE
110 : DMA_FROM_DEVICE);
111
112 if (status && (status != -ESHUTDOWN))
113 dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
114 ep->ep.name, &req->req, status,
115 req->req.actual, req->req.length);
116
117 /* don't modify queue heads during completion callback */
118 ep->stopped = 1;
119 spin_unlock(&udc->lock);
120
121 usb_gadget_giveback_request(&ep->ep, &req->req);
122
123 spin_lock(&udc->lock);
124
125 ep->stopped = stopped;
126 }
127
128 /*-----------------------------------------------------------------
129 * nuke(): delete all requests related to this ep
130 *--------------------------------------------------------------*/
131 static void nuke(struct qe_ep *ep, int status)
132 {
133 /* Whether this eq has request linked */
134 while (!list_empty(&ep->queue)) {
135 struct qe_req *req = NULL;
136 req = list_entry(ep->queue.next, struct qe_req, queue);
137
138 done(ep, req, status);
139 }
140 }
141
142 /*---------------------------------------------------------------------------*
143 * USB and Endpoint manipulate process, include parameter and register *
144 *---------------------------------------------------------------------------*/
145 /* @value: 1--set stall 0--clean stall */
146 static int qe_eprx_stall_change(struct qe_ep *ep, int value)
147 {
148 u16 tem_usep;
149 u8 epnum = ep->epnum;
150 struct qe_udc *udc = ep->udc;
151
152 tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
153 tem_usep = tem_usep & ~USB_RHS_MASK;
154 if (value == 1)
155 tem_usep |= USB_RHS_STALL;
156 else if (ep->dir == USB_DIR_IN)
157 tem_usep |= USB_RHS_IGNORE_OUT;
158
159 out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
160 return 0;
161 }
162
163 static int qe_eptx_stall_change(struct qe_ep *ep, int value)
164 {
165 u16 tem_usep;
166 u8 epnum = ep->epnum;
167 struct qe_udc *udc = ep->udc;
168
169 tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
170 tem_usep = tem_usep & ~USB_THS_MASK;
171 if (value == 1)
172 tem_usep |= USB_THS_STALL;
173 else if (ep->dir == USB_DIR_OUT)
174 tem_usep |= USB_THS_IGNORE_IN;
175
176 out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
177
178 return 0;
179 }
180
181 static int qe_ep0_stall(struct qe_udc *udc)
182 {
183 qe_eptx_stall_change(&udc->eps[0], 1);
184 qe_eprx_stall_change(&udc->eps[0], 1);
185 udc->ep0_state = WAIT_FOR_SETUP;
186 udc->ep0_dir = 0;
187 return 0;
188 }
189
190 static int qe_eprx_nack(struct qe_ep *ep)
191 {
192 u8 epnum = ep->epnum;
193 struct qe_udc *udc = ep->udc;
194
195 if (ep->state == EP_STATE_IDLE) {
196 /* Set the ep's nack */
197 clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
198 USB_RHS_MASK, USB_RHS_NACK);
199
200 /* Mask Rx and Busy interrupts */
201 clrbits16(&udc->usb_regs->usb_usbmr,
202 (USB_E_RXB_MASK | USB_E_BSY_MASK));
203
204 ep->state = EP_STATE_NACK;
205 }
206 return 0;
207 }
208
209 static int qe_eprx_normal(struct qe_ep *ep)
210 {
211 struct qe_udc *udc = ep->udc;
212
213 if (ep->state == EP_STATE_NACK) {
214 clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
215 USB_RTHS_MASK, USB_THS_IGNORE_IN);
216
217 /* Unmask RX interrupts */
218 out_be16(&udc->usb_regs->usb_usber,
219 USB_E_BSY_MASK | USB_E_RXB_MASK);
220 setbits16(&udc->usb_regs->usb_usbmr,
221 (USB_E_RXB_MASK | USB_E_BSY_MASK));
222
223 ep->state = EP_STATE_IDLE;
224 ep->has_data = 0;
225 }
226
227 return 0;
228 }
229
230 static int qe_ep_cmd_stoptx(struct qe_ep *ep)
231 {
232 if (ep->udc->soc_type == PORT_CPM)
233 cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
234 CPM_USB_STOP_TX_OPCODE);
235 else
236 qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
237 ep->epnum, 0);
238
239 return 0;
240 }
241
242 static int qe_ep_cmd_restarttx(struct qe_ep *ep)
243 {
244 if (ep->udc->soc_type == PORT_CPM)
245 cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
246 CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
247 else
248 qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
249 ep->epnum, 0);
250
251 return 0;
252 }
253
254 static int qe_ep_flushtxfifo(struct qe_ep *ep)
255 {
256 struct qe_udc *udc = ep->udc;
257 int i;
258
259 i = (int)ep->epnum;
260
261 qe_ep_cmd_stoptx(ep);
262 out_8(&udc->usb_regs->usb_uscom,
263 USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
264 out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
265 out_be32(&udc->ep_param[i]->tstate, 0);
266 out_be16(&udc->ep_param[i]->tbcnt, 0);
267
268 ep->c_txbd = ep->txbase;
269 ep->n_txbd = ep->txbase;
270 qe_ep_cmd_restarttx(ep);
271 return 0;
272 }
273
274 static int qe_ep_filltxfifo(struct qe_ep *ep)
275 {
276 struct qe_udc *udc = ep->udc;
277
278 out_8(&udc->usb_regs->usb_uscom,
279 USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
280 return 0;
281 }
282
283 static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
284 {
285 struct qe_ep *ep;
286 u32 bdring_len;
287 struct qe_bd __iomem *bd;
288 int i;
289
290 ep = &udc->eps[pipe_num];
291
292 if (ep->dir == USB_DIR_OUT)
293 bdring_len = USB_BDRING_LEN_RX;
294 else
295 bdring_len = USB_BDRING_LEN;
296
297 bd = ep->rxbase;
298 for (i = 0; i < (bdring_len - 1); i++) {
299 out_be32((u32 __iomem *)bd, R_E | R_I);
300 bd++;
301 }
302 out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
303
304 bd = ep->txbase;
305 for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
306 out_be32(&bd->buf, 0);
307 out_be32((u32 __iomem *)bd, 0);
308 bd++;
309 }
310 out_be32((u32 __iomem *)bd, T_W);
311
312 return 0;
313 }
314
315 static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
316 {
317 struct qe_ep *ep;
318 u16 tmpusep;
319
320 ep = &udc->eps[pipe_num];
321 tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
322 tmpusep &= ~USB_RTHS_MASK;
323
324 switch (ep->dir) {
325 case USB_DIR_BOTH:
326 qe_ep_flushtxfifo(ep);
327 break;
328 case USB_DIR_OUT:
329 tmpusep |= USB_THS_IGNORE_IN;
330 break;
331 case USB_DIR_IN:
332 qe_ep_flushtxfifo(ep);
333 tmpusep |= USB_RHS_IGNORE_OUT;
334 break;
335 default:
336 break;
337 }
338 out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
339
340 qe_epbds_reset(udc, pipe_num);
341
342 return 0;
343 }
344
345 static int qe_ep_toggledata01(struct qe_ep *ep)
346 {
347 ep->data01 ^= 0x1;
348 return 0;
349 }
350
351 static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
352 {
353 struct qe_ep *ep = &udc->eps[pipe_num];
354 unsigned long tmp_addr = 0;
355 struct usb_ep_para __iomem *epparam;
356 int i;
357 struct qe_bd __iomem *bd;
358 int bdring_len;
359
360 if (ep->dir == USB_DIR_OUT)
361 bdring_len = USB_BDRING_LEN_RX;
362 else
363 bdring_len = USB_BDRING_LEN;
364
365 epparam = udc->ep_param[pipe_num];
366 /* alloc multi-ram for BD rings and set the ep parameters */
367 tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
368 USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
369 if (IS_ERR_VALUE(tmp_addr))
370 return -ENOMEM;
371
372 out_be16(&epparam->rbase, (u16)tmp_addr);
373 out_be16(&epparam->tbase, (u16)(tmp_addr +
374 (sizeof(struct qe_bd) * bdring_len)));
375
376 out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
377 out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
378
379 ep->rxbase = cpm_muram_addr(tmp_addr);
380 ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
381 * bdring_len));
382 ep->n_rxbd = ep->rxbase;
383 ep->e_rxbd = ep->rxbase;
384 ep->n_txbd = ep->txbase;
385 ep->c_txbd = ep->txbase;
386 ep->data01 = 0; /* data0 */
387
388 /* Init TX and RX bds */
389 bd = ep->rxbase;
390 for (i = 0; i < bdring_len - 1; i++) {
391 out_be32(&bd->buf, 0);
392 out_be32((u32 __iomem *)bd, 0);
393 bd++;
394 }
395 out_be32(&bd->buf, 0);
396 out_be32((u32 __iomem *)bd, R_W);
397
398 bd = ep->txbase;
399 for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
400 out_be32(&bd->buf, 0);
401 out_be32((u32 __iomem *)bd, 0);
402 bd++;
403 }
404 out_be32(&bd->buf, 0);
405 out_be32((u32 __iomem *)bd, T_W);
406
407 return 0;
408 }
409
410 static int qe_ep_rxbd_update(struct qe_ep *ep)
411 {
412 unsigned int size;
413 int i;
414 unsigned int tmp;
415 struct qe_bd __iomem *bd;
416 unsigned int bdring_len;
417
418 if (ep->rxbase == NULL)
419 return -EINVAL;
420
421 bd = ep->rxbase;
422
423 ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
424 if (ep->rxframe == NULL) {
425 dev_err(ep->udc->dev, "malloc rxframe failed\n");
426 return -ENOMEM;
427 }
428
429 qe_frame_init(ep->rxframe);
430
431 if (ep->dir == USB_DIR_OUT)
432 bdring_len = USB_BDRING_LEN_RX;
433 else
434 bdring_len = USB_BDRING_LEN;
435
436 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
437 ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
438 if (ep->rxbuffer == NULL) {
439 dev_err(ep->udc->dev, "malloc rxbuffer failed,size=%d\n",
440 size);
441 kfree(ep->rxframe);
442 return -ENOMEM;
443 }
444
445 ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
446 if (ep->rxbuf_d == DMA_ADDR_INVALID) {
447 ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent,
448 ep->rxbuffer,
449 size,
450 DMA_FROM_DEVICE);
451 ep->rxbufmap = 1;
452 } else {
453 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
454 ep->rxbuf_d, size,
455 DMA_FROM_DEVICE);
456 ep->rxbufmap = 0;
457 }
458
459 size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
460 tmp = ep->rxbuf_d;
461 tmp = (u32)(((tmp >> 2) << 2) + 4);
462
463 for (i = 0; i < bdring_len - 1; i++) {
464 out_be32(&bd->buf, tmp);
465 out_be32((u32 __iomem *)bd, (R_E | R_I));
466 tmp = tmp + size;
467 bd++;
468 }
469 out_be32(&bd->buf, tmp);
470 out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
471
472 return 0;
473 }
474
475 static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
476 {
477 struct qe_ep *ep = &udc->eps[pipe_num];
478 struct usb_ep_para __iomem *epparam;
479 u16 usep, logepnum;
480 u16 tmp;
481 u8 rtfcr = 0;
482
483 epparam = udc->ep_param[pipe_num];
484
485 usep = 0;
486 logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
487 usep |= (logepnum << USB_EPNUM_SHIFT);
488
489 switch (ep->ep.desc->bmAttributes & 0x03) {
490 case USB_ENDPOINT_XFER_BULK:
491 usep |= USB_TRANS_BULK;
492 break;
493 case USB_ENDPOINT_XFER_ISOC:
494 usep |= USB_TRANS_ISO;
495 break;
496 case USB_ENDPOINT_XFER_INT:
497 usep |= USB_TRANS_INT;
498 break;
499 default:
500 usep |= USB_TRANS_CTR;
501 break;
502 }
503
504 switch (ep->dir) {
505 case USB_DIR_OUT:
506 usep |= USB_THS_IGNORE_IN;
507 break;
508 case USB_DIR_IN:
509 usep |= USB_RHS_IGNORE_OUT;
510 break;
511 default:
512 break;
513 }
514 out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
515
516 rtfcr = 0x30;
517 out_8(&epparam->rbmr, rtfcr);
518 out_8(&epparam->tbmr, rtfcr);
519
520 tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
521 /* MRBLR must be divisble by 4 */
522 tmp = (u16)(((tmp >> 2) << 2) + 4);
523 out_be16(&epparam->mrblr, tmp);
524
525 return 0;
526 }
527
528 static int qe_ep_init(struct qe_udc *udc,
529 unsigned char pipe_num,
530 const struct usb_endpoint_descriptor *desc)
531 {
532 struct qe_ep *ep = &udc->eps[pipe_num];
533 unsigned long flags;
534 int reval = 0;
535 u16 max = 0;
536
537 max = usb_endpoint_maxp(desc);
538
539 /* check the max package size validate for this endpoint */
540 /* Refer to USB2.0 spec table 9-13,
541 */
542 if (pipe_num != 0) {
543 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
544 case USB_ENDPOINT_XFER_BULK:
545 if (strstr(ep->ep.name, "-iso")
546 || strstr(ep->ep.name, "-int"))
547 goto en_done;
548 switch (udc->gadget.speed) {
549 case USB_SPEED_HIGH:
550 if ((max == 128) || (max == 256) || (max == 512))
551 break;
552 default:
553 switch (max) {
554 case 4:
555 case 8:
556 case 16:
557 case 32:
558 case 64:
559 break;
560 default:
561 case USB_SPEED_LOW:
562 goto en_done;
563 }
564 }
565 break;
566 case USB_ENDPOINT_XFER_INT:
567 if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
568 goto en_done;
569 switch (udc->gadget.speed) {
570 case USB_SPEED_HIGH:
571 if (max <= 1024)
572 break;
573 case USB_SPEED_FULL:
574 if (max <= 64)
575 break;
576 default:
577 if (max <= 8)
578 break;
579 goto en_done;
580 }
581 break;
582 case USB_ENDPOINT_XFER_ISOC:
583 if (strstr(ep->ep.name, "-bulk")
584 || strstr(ep->ep.name, "-int"))
585 goto en_done;
586 switch (udc->gadget.speed) {
587 case USB_SPEED_HIGH:
588 if (max <= 1024)
589 break;
590 case USB_SPEED_FULL:
591 if (max <= 1023)
592 break;
593 default:
594 goto en_done;
595 }
596 break;
597 case USB_ENDPOINT_XFER_CONTROL:
598 if (strstr(ep->ep.name, "-iso")
599 || strstr(ep->ep.name, "-int"))
600 goto en_done;
601 switch (udc->gadget.speed) {
602 case USB_SPEED_HIGH:
603 case USB_SPEED_FULL:
604 switch (max) {
605 case 1:
606 case 2:
607 case 4:
608 case 8:
609 case 16:
610 case 32:
611 case 64:
612 break;
613 default:
614 goto en_done;
615 }
616 case USB_SPEED_LOW:
617 switch (max) {
618 case 1:
619 case 2:
620 case 4:
621 case 8:
622 break;
623 default:
624 goto en_done;
625 }
626 default:
627 goto en_done;
628 }
629 break;
630
631 default:
632 goto en_done;
633 }
634 } /* if ep0*/
635
636 spin_lock_irqsave(&udc->lock, flags);
637
638 /* initialize ep structure */
639 ep->ep.maxpacket = max;
640 ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
641 ep->ep.desc = desc;
642 ep->stopped = 0;
643 ep->init = 1;
644
645 if (pipe_num == 0) {
646 ep->dir = USB_DIR_BOTH;
647 udc->ep0_dir = USB_DIR_OUT;
648 udc->ep0_state = WAIT_FOR_SETUP;
649 } else {
650 switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
651 case USB_DIR_OUT:
652 ep->dir = USB_DIR_OUT;
653 break;
654 case USB_DIR_IN:
655 ep->dir = USB_DIR_IN;
656 default:
657 break;
658 }
659 }
660
661 /* hardware special operation */
662 qe_ep_bd_init(udc, pipe_num);
663 if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
664 reval = qe_ep_rxbd_update(ep);
665 if (reval)
666 goto en_done1;
667 }
668
669 if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
670 ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
671 if (ep->txframe == NULL) {
672 dev_err(udc->dev, "malloc txframe failed\n");
673 goto en_done2;
674 }
675 qe_frame_init(ep->txframe);
676 }
677
678 qe_ep_register_init(udc, pipe_num);
679
680 /* Now HW will be NAKing transfers to that EP,
681 * until a buffer is queued to it. */
682 spin_unlock_irqrestore(&udc->lock, flags);
683
684 return 0;
685 en_done2:
686 kfree(ep->rxbuffer);
687 kfree(ep->rxframe);
688 en_done1:
689 spin_unlock_irqrestore(&udc->lock, flags);
690 en_done:
691 dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
692 return -ENODEV;
693 }
694
695 static inline void qe_usb_enable(struct qe_udc *udc)
696 {
697 setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
698 }
699
700 static inline void qe_usb_disable(struct qe_udc *udc)
701 {
702 clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
703 }
704
705 /*----------------------------------------------------------------------------*
706 * USB and EP basic manipulate function end *
707 *----------------------------------------------------------------------------*/
708
709
710 /******************************************************************************
711 UDC transmit and receive process
712 ******************************************************************************/
713 static void recycle_one_rxbd(struct qe_ep *ep)
714 {
715 u32 bdstatus;
716
717 bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
718 bdstatus = R_I | R_E | (bdstatus & R_W);
719 out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
720
721 if (bdstatus & R_W)
722 ep->e_rxbd = ep->rxbase;
723 else
724 ep->e_rxbd++;
725 }
726
727 static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
728 {
729 u32 bdstatus;
730 struct qe_bd __iomem *bd, *nextbd;
731 unsigned char stop = 0;
732
733 nextbd = ep->n_rxbd;
734 bd = ep->e_rxbd;
735 bdstatus = in_be32((u32 __iomem *)bd);
736
737 while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
738 bdstatus = R_E | R_I | (bdstatus & R_W);
739 out_be32((u32 __iomem *)bd, bdstatus);
740
741 if (bdstatus & R_W)
742 bd = ep->rxbase;
743 else
744 bd++;
745
746 bdstatus = in_be32((u32 __iomem *)bd);
747 if (stopatnext && (bd == nextbd))
748 stop = 1;
749 }
750
751 ep->e_rxbd = bd;
752 }
753
754 static void ep_recycle_rxbds(struct qe_ep *ep)
755 {
756 struct qe_bd __iomem *bd = ep->n_rxbd;
757 u32 bdstatus;
758 u8 epnum = ep->epnum;
759 struct qe_udc *udc = ep->udc;
760
761 bdstatus = in_be32((u32 __iomem *)bd);
762 if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
763 bd = ep->rxbase +
764 ((in_be16(&udc->ep_param[epnum]->rbptr) -
765 in_be16(&udc->ep_param[epnum]->rbase))
766 >> 3);
767 bdstatus = in_be32((u32 __iomem *)bd);
768
769 if (bdstatus & R_W)
770 bd = ep->rxbase;
771 else
772 bd++;
773
774 ep->e_rxbd = bd;
775 recycle_rxbds(ep, 0);
776 ep->e_rxbd = ep->n_rxbd;
777 } else
778 recycle_rxbds(ep, 1);
779
780 if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
781 out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
782
783 if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
784 qe_eprx_normal(ep);
785
786 ep->localnack = 0;
787 }
788
789 static void setup_received_handle(struct qe_udc *udc,
790 struct usb_ctrlrequest *setup);
791 static int qe_ep_rxframe_handle(struct qe_ep *ep);
792 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
793 /* when BD PID is setup, handle the packet */
794 static int ep0_setup_handle(struct qe_udc *udc)
795 {
796 struct qe_ep *ep = &udc->eps[0];
797 struct qe_frame *pframe;
798 unsigned int fsize;
799 u8 *cp;
800
801 pframe = ep->rxframe;
802 if ((frame_get_info(pframe) & PID_SETUP)
803 && (udc->ep0_state == WAIT_FOR_SETUP)) {
804 fsize = frame_get_length(pframe);
805 if (unlikely(fsize != 8))
806 return -EINVAL;
807 cp = (u8 *)&udc->local_setup_buff;
808 memcpy(cp, pframe->data, fsize);
809 ep->data01 = 1;
810
811 /* handle the usb command base on the usb_ctrlrequest */
812 setup_received_handle(udc, &udc->local_setup_buff);
813 return 0;
814 }
815 return -EINVAL;
816 }
817
818 static int qe_ep0_rx(struct qe_udc *udc)
819 {
820 struct qe_ep *ep = &udc->eps[0];
821 struct qe_frame *pframe;
822 struct qe_bd __iomem *bd;
823 u32 bdstatus, length;
824 u32 vaddr;
825
826 pframe = ep->rxframe;
827
828 if (ep->dir == USB_DIR_IN) {
829 dev_err(udc->dev, "ep0 not a control endpoint\n");
830 return -EINVAL;
831 }
832
833 bd = ep->n_rxbd;
834 bdstatus = in_be32((u32 __iomem *)bd);
835 length = bdstatus & BD_LENGTH_MASK;
836
837 while (!(bdstatus & R_E) && length) {
838 if ((bdstatus & R_F) && (bdstatus & R_L)
839 && !(bdstatus & R_ERROR)) {
840 if (length == USB_CRC_SIZE) {
841 udc->ep0_state = WAIT_FOR_SETUP;
842 dev_vdbg(udc->dev,
843 "receive a ZLP in status phase\n");
844 } else {
845 qe_frame_clean(pframe);
846 vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
847 frame_set_data(pframe, (u8 *)vaddr);
848 frame_set_length(pframe,
849 (length - USB_CRC_SIZE));
850 frame_set_status(pframe, FRAME_OK);
851 switch (bdstatus & R_PID) {
852 case R_PID_SETUP:
853 frame_set_info(pframe, PID_SETUP);
854 break;
855 case R_PID_DATA1:
856 frame_set_info(pframe, PID_DATA1);
857 break;
858 default:
859 frame_set_info(pframe, PID_DATA0);
860 break;
861 }
862
863 if ((bdstatus & R_PID) == R_PID_SETUP)
864 ep0_setup_handle(udc);
865 else
866 qe_ep_rxframe_handle(ep);
867 }
868 } else {
869 dev_err(udc->dev, "The receive frame with error!\n");
870 }
871
872 /* note: don't clear the rxbd's buffer address */
873 recycle_one_rxbd(ep);
874
875 /* Get next BD */
876 if (bdstatus & R_W)
877 bd = ep->rxbase;
878 else
879 bd++;
880
881 bdstatus = in_be32((u32 __iomem *)bd);
882 length = bdstatus & BD_LENGTH_MASK;
883
884 }
885
886 ep->n_rxbd = bd;
887
888 return 0;
889 }
890
891 static int qe_ep_rxframe_handle(struct qe_ep *ep)
892 {
893 struct qe_frame *pframe;
894 u8 framepid = 0;
895 unsigned int fsize;
896 u8 *cp;
897 struct qe_req *req;
898
899 pframe = ep->rxframe;
900
901 if (frame_get_info(pframe) & PID_DATA1)
902 framepid = 0x1;
903
904 if (framepid != ep->data01) {
905 dev_err(ep->udc->dev, "the data01 error!\n");
906 return -EIO;
907 }
908
909 fsize = frame_get_length(pframe);
910 if (list_empty(&ep->queue)) {
911 dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
912 } else {
913 req = list_entry(ep->queue.next, struct qe_req, queue);
914
915 cp = (u8 *)(req->req.buf) + req->req.actual;
916 if (cp) {
917 memcpy(cp, pframe->data, fsize);
918 req->req.actual += fsize;
919 if ((fsize < ep->ep.maxpacket) ||
920 (req->req.actual >= req->req.length)) {
921 if (ep->epnum == 0)
922 ep0_req_complete(ep->udc, req);
923 else
924 done(ep, req, 0);
925 if (list_empty(&ep->queue) && ep->epnum != 0)
926 qe_eprx_nack(ep);
927 }
928 }
929 }
930
931 qe_ep_toggledata01(ep);
932
933 return 0;
934 }
935
936 static void ep_rx_tasklet(unsigned long data)
937 {
938 struct qe_udc *udc = (struct qe_udc *)data;
939 struct qe_ep *ep;
940 struct qe_frame *pframe;
941 struct qe_bd __iomem *bd;
942 unsigned long flags;
943 u32 bdstatus, length;
944 u32 vaddr, i;
945
946 spin_lock_irqsave(&udc->lock, flags);
947
948 for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
949 ep = &udc->eps[i];
950
951 if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
952 dev_dbg(udc->dev,
953 "This is a transmit ep or disable tasklet!\n");
954 continue;
955 }
956
957 pframe = ep->rxframe;
958 bd = ep->n_rxbd;
959 bdstatus = in_be32((u32 __iomem *)bd);
960 length = bdstatus & BD_LENGTH_MASK;
961
962 while (!(bdstatus & R_E) && length) {
963 if (list_empty(&ep->queue)) {
964 qe_eprx_nack(ep);
965 dev_dbg(udc->dev,
966 "The rxep have noreq %d\n",
967 ep->has_data);
968 break;
969 }
970
971 if ((bdstatus & R_F) && (bdstatus & R_L)
972 && !(bdstatus & R_ERROR)) {
973 qe_frame_clean(pframe);
974 vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
975 frame_set_data(pframe, (u8 *)vaddr);
976 frame_set_length(pframe,
977 (length - USB_CRC_SIZE));
978 frame_set_status(pframe, FRAME_OK);
979 switch (bdstatus & R_PID) {
980 case R_PID_DATA1:
981 frame_set_info(pframe, PID_DATA1);
982 break;
983 case R_PID_SETUP:
984 frame_set_info(pframe, PID_SETUP);
985 break;
986 default:
987 frame_set_info(pframe, PID_DATA0);
988 break;
989 }
990 /* handle the rx frame */
991 qe_ep_rxframe_handle(ep);
992 } else {
993 dev_err(udc->dev,
994 "error in received frame\n");
995 }
996 /* note: don't clear the rxbd's buffer address */
997 /*clear the length */
998 out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
999 ep->has_data--;
1000 if (!(ep->localnack))
1001 recycle_one_rxbd(ep);
1002
1003 /* Get next BD */
1004 if (bdstatus & R_W)
1005 bd = ep->rxbase;
1006 else
1007 bd++;
1008
1009 bdstatus = in_be32((u32 __iomem *)bd);
1010 length = bdstatus & BD_LENGTH_MASK;
1011 }
1012
1013 ep->n_rxbd = bd;
1014
1015 if (ep->localnack)
1016 ep_recycle_rxbds(ep);
1017
1018 ep->enable_tasklet = 0;
1019 } /* for i=1 */
1020
1021 spin_unlock_irqrestore(&udc->lock, flags);
1022 }
1023
1024 static int qe_ep_rx(struct qe_ep *ep)
1025 {
1026 struct qe_udc *udc;
1027 struct qe_frame *pframe;
1028 struct qe_bd __iomem *bd;
1029 u16 swoffs, ucoffs, emptybds;
1030
1031 udc = ep->udc;
1032 pframe = ep->rxframe;
1033
1034 if (ep->dir == USB_DIR_IN) {
1035 dev_err(udc->dev, "transmit ep in rx function\n");
1036 return -EINVAL;
1037 }
1038
1039 bd = ep->n_rxbd;
1040
1041 swoffs = (u16)(bd - ep->rxbase);
1042 ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
1043 in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
1044 if (swoffs < ucoffs)
1045 emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
1046 else
1047 emptybds = swoffs - ucoffs;
1048
1049 if (emptybds < MIN_EMPTY_BDS) {
1050 qe_eprx_nack(ep);
1051 ep->localnack = 1;
1052 dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
1053 }
1054 ep->has_data = USB_BDRING_LEN_RX - emptybds;
1055
1056 if (list_empty(&ep->queue)) {
1057 qe_eprx_nack(ep);
1058 dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
1059 ep->has_data);
1060 return 0;
1061 }
1062
1063 tasklet_schedule(&udc->rx_tasklet);
1064 ep->enable_tasklet = 1;
1065
1066 return 0;
1067 }
1068
1069 /* send data from a frame, no matter what tx_req */
1070 static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
1071 {
1072 struct qe_udc *udc = ep->udc;
1073 struct qe_bd __iomem *bd;
1074 u16 saveusbmr;
1075 u32 bdstatus, pidmask;
1076 u32 paddr;
1077
1078 if (ep->dir == USB_DIR_OUT) {
1079 dev_err(udc->dev, "receive ep passed to tx function\n");
1080 return -EINVAL;
1081 }
1082
1083 /* Disable the Tx interrupt */
1084 saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
1085 out_be16(&udc->usb_regs->usb_usbmr,
1086 saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
1087
1088 bd = ep->n_txbd;
1089 bdstatus = in_be32((u32 __iomem *)bd);
1090
1091 if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
1092 if (frame_get_length(frame) == 0) {
1093 frame_set_data(frame, udc->nullbuf);
1094 frame_set_length(frame, 2);
1095 frame->info |= (ZLP | NO_CRC);
1096 dev_vdbg(udc->dev, "the frame size = 0\n");
1097 }
1098 paddr = virt_to_phys((void *)frame->data);
1099 out_be32(&bd->buf, paddr);
1100 bdstatus = (bdstatus&T_W);
1101 if (!(frame_get_info(frame) & NO_CRC))
1102 bdstatus |= T_R | T_I | T_L | T_TC
1103 | frame_get_length(frame);
1104 else
1105 bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
1106
1107 /* if the packet is a ZLP in status phase */
1108 if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
1109 ep->data01 = 0x1;
1110
1111 if (ep->data01) {
1112 pidmask = T_PID_DATA1;
1113 frame->info |= PID_DATA1;
1114 } else {
1115 pidmask = T_PID_DATA0;
1116 frame->info |= PID_DATA0;
1117 }
1118 bdstatus |= T_CNF;
1119 bdstatus |= pidmask;
1120 out_be32((u32 __iomem *)bd, bdstatus);
1121 qe_ep_filltxfifo(ep);
1122
1123 /* enable the TX interrupt */
1124 out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1125
1126 qe_ep_toggledata01(ep);
1127 if (bdstatus & T_W)
1128 ep->n_txbd = ep->txbase;
1129 else
1130 ep->n_txbd++;
1131
1132 return 0;
1133 } else {
1134 out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1135 dev_vdbg(udc->dev, "The tx bd is not ready!\n");
1136 return -EBUSY;
1137 }
1138 }
1139
1140 /* when a bd was transmitted, the function can
1141 * handle the tx_req, not include ep0 */
1142 static int txcomplete(struct qe_ep *ep, unsigned char restart)
1143 {
1144 if (ep->tx_req != NULL) {
1145 struct qe_req *req = ep->tx_req;
1146 unsigned zlp = 0, last_len = 0;
1147
1148 last_len = min_t(unsigned, req->req.length - ep->sent,
1149 ep->ep.maxpacket);
1150
1151 if (!restart) {
1152 int asent = ep->last;
1153 ep->sent += asent;
1154 ep->last -= asent;
1155 } else {
1156 ep->last = 0;
1157 }
1158
1159 /* zlp needed when req->re.zero is set */
1160 if (req->req.zero) {
1161 if (last_len == 0 ||
1162 (req->req.length % ep->ep.maxpacket) != 0)
1163 zlp = 0;
1164 else
1165 zlp = 1;
1166 } else
1167 zlp = 0;
1168
1169 /* a request already were transmitted completely */
1170 if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
1171 done(ep, ep->tx_req, 0);
1172 ep->tx_req = NULL;
1173 ep->last = 0;
1174 ep->sent = 0;
1175 }
1176 }
1177
1178 /* we should gain a new tx_req fot this endpoint */
1179 if (ep->tx_req == NULL) {
1180 if (!list_empty(&ep->queue)) {
1181 ep->tx_req = list_entry(ep->queue.next, struct qe_req,
1182 queue);
1183 ep->last = 0;
1184 ep->sent = 0;
1185 }
1186 }
1187
1188 return 0;
1189 }
1190
1191 /* give a frame and a tx_req, send some data */
1192 static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
1193 {
1194 unsigned int size;
1195 u8 *buf;
1196
1197 qe_frame_clean(frame);
1198 size = min_t(u32, (ep->tx_req->req.length - ep->sent),
1199 ep->ep.maxpacket);
1200 buf = (u8 *)ep->tx_req->req.buf + ep->sent;
1201 if (buf && size) {
1202 ep->last = size;
1203 ep->tx_req->req.actual += size;
1204 frame_set_data(frame, buf);
1205 frame_set_length(frame, size);
1206 frame_set_status(frame, FRAME_OK);
1207 frame_set_info(frame, 0);
1208 return qe_ep_tx(ep, frame);
1209 }
1210 return -EIO;
1211 }
1212
1213 /* give a frame struct,send a ZLP */
1214 static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
1215 {
1216 struct qe_udc *udc = ep->udc;
1217
1218 if (frame == NULL)
1219 return -ENODEV;
1220
1221 qe_frame_clean(frame);
1222 frame_set_data(frame, (u8 *)udc->nullbuf);
1223 frame_set_length(frame, 2);
1224 frame_set_status(frame, FRAME_OK);
1225 frame_set_info(frame, (ZLP | NO_CRC | infor));
1226
1227 return qe_ep_tx(ep, frame);
1228 }
1229
1230 static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
1231 {
1232 struct qe_req *req = ep->tx_req;
1233 int reval;
1234
1235 if (req == NULL)
1236 return -ENODEV;
1237
1238 if ((req->req.length - ep->sent) > 0)
1239 reval = qe_usb_senddata(ep, frame);
1240 else
1241 reval = sendnulldata(ep, frame, 0);
1242
1243 return reval;
1244 }
1245
1246 /* if direction is DIR_IN, the status is Device->Host
1247 * if direction is DIR_OUT, the status transaction is Device<-Host
1248 * in status phase, udc create a request and gain status */
1249 static int ep0_prime_status(struct qe_udc *udc, int direction)
1250 {
1251
1252 struct qe_ep *ep = &udc->eps[0];
1253
1254 if (direction == USB_DIR_IN) {
1255 udc->ep0_state = DATA_STATE_NEED_ZLP;
1256 udc->ep0_dir = USB_DIR_IN;
1257 sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1258 } else {
1259 udc->ep0_dir = USB_DIR_OUT;
1260 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1261 }
1262
1263 return 0;
1264 }
1265
1266 /* a request complete in ep0, whether gadget request or udc request */
1267 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
1268 {
1269 struct qe_ep *ep = &udc->eps[0];
1270 /* because usb and ep's status already been set in ch9setaddress() */
1271
1272 switch (udc->ep0_state) {
1273 case DATA_STATE_XMIT:
1274 done(ep, req, 0);
1275 /* receive status phase */
1276 if (ep0_prime_status(udc, USB_DIR_OUT))
1277 qe_ep0_stall(udc);
1278 break;
1279
1280 case DATA_STATE_NEED_ZLP:
1281 done(ep, req, 0);
1282 udc->ep0_state = WAIT_FOR_SETUP;
1283 break;
1284
1285 case DATA_STATE_RECV:
1286 done(ep, req, 0);
1287 /* send status phase */
1288 if (ep0_prime_status(udc, USB_DIR_IN))
1289 qe_ep0_stall(udc);
1290 break;
1291
1292 case WAIT_FOR_OUT_STATUS:
1293 done(ep, req, 0);
1294 udc->ep0_state = WAIT_FOR_SETUP;
1295 break;
1296
1297 case WAIT_FOR_SETUP:
1298 dev_vdbg(udc->dev, "Unexpected interrupt\n");
1299 break;
1300
1301 default:
1302 qe_ep0_stall(udc);
1303 break;
1304 }
1305 }
1306
1307 static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
1308 {
1309 struct qe_req *tx_req = NULL;
1310 struct qe_frame *frame = ep->txframe;
1311
1312 if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
1313 if (!restart)
1314 ep->udc->ep0_state = WAIT_FOR_SETUP;
1315 else
1316 sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1317 return 0;
1318 }
1319
1320 tx_req = ep->tx_req;
1321 if (tx_req != NULL) {
1322 if (!restart) {
1323 int asent = ep->last;
1324 ep->sent += asent;
1325 ep->last -= asent;
1326 } else {
1327 ep->last = 0;
1328 }
1329
1330 /* a request already were transmitted completely */
1331 if ((ep->tx_req->req.length - ep->sent) <= 0) {
1332 ep->tx_req->req.actual = (unsigned int)ep->sent;
1333 ep0_req_complete(ep->udc, ep->tx_req);
1334 ep->tx_req = NULL;
1335 ep->last = 0;
1336 ep->sent = 0;
1337 }
1338 } else {
1339 dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
1340 }
1341
1342 return 0;
1343 }
1344
1345 static int ep0_txframe_handle(struct qe_ep *ep)
1346 {
1347 /* if have error, transmit again */
1348 if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1349 qe_ep_flushtxfifo(ep);
1350 dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1351 if (frame_get_info(ep->txframe) & PID_DATA0)
1352 ep->data01 = 0;
1353 else
1354 ep->data01 = 1;
1355
1356 ep0_txcomplete(ep, 1);
1357 } else
1358 ep0_txcomplete(ep, 0);
1359
1360 frame_create_tx(ep, ep->txframe);
1361 return 0;
1362 }
1363
1364 static int qe_ep0_txconf(struct qe_ep *ep)
1365 {
1366 struct qe_bd __iomem *bd;
1367 struct qe_frame *pframe;
1368 u32 bdstatus;
1369
1370 bd = ep->c_txbd;
1371 bdstatus = in_be32((u32 __iomem *)bd);
1372 while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1373 pframe = ep->txframe;
1374
1375 /* clear and recycle the BD */
1376 out_be32((u32 __iomem *)bd, bdstatus & T_W);
1377 out_be32(&bd->buf, 0);
1378 if (bdstatus & T_W)
1379 ep->c_txbd = ep->txbase;
1380 else
1381 ep->c_txbd++;
1382
1383 if (ep->c_txbd == ep->n_txbd) {
1384 if (bdstatus & DEVICE_T_ERROR) {
1385 frame_set_status(pframe, FRAME_ERROR);
1386 if (bdstatus & T_TO)
1387 pframe->status |= TX_ER_TIMEOUT;
1388 if (bdstatus & T_UN)
1389 pframe->status |= TX_ER_UNDERUN;
1390 }
1391 ep0_txframe_handle(ep);
1392 }
1393
1394 bd = ep->c_txbd;
1395 bdstatus = in_be32((u32 __iomem *)bd);
1396 }
1397
1398 return 0;
1399 }
1400
1401 static int ep_txframe_handle(struct qe_ep *ep)
1402 {
1403 if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1404 qe_ep_flushtxfifo(ep);
1405 dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1406 if (frame_get_info(ep->txframe) & PID_DATA0)
1407 ep->data01 = 0;
1408 else
1409 ep->data01 = 1;
1410
1411 txcomplete(ep, 1);
1412 } else
1413 txcomplete(ep, 0);
1414
1415 frame_create_tx(ep, ep->txframe); /* send the data */
1416 return 0;
1417 }
1418
1419 /* confirm the already trainsmited bd */
1420 static int qe_ep_txconf(struct qe_ep *ep)
1421 {
1422 struct qe_bd __iomem *bd;
1423 struct qe_frame *pframe = NULL;
1424 u32 bdstatus;
1425 unsigned char breakonrxinterrupt = 0;
1426
1427 bd = ep->c_txbd;
1428 bdstatus = in_be32((u32 __iomem *)bd);
1429 while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1430 pframe = ep->txframe;
1431 if (bdstatus & DEVICE_T_ERROR) {
1432 frame_set_status(pframe, FRAME_ERROR);
1433 if (bdstatus & T_TO)
1434 pframe->status |= TX_ER_TIMEOUT;
1435 if (bdstatus & T_UN)
1436 pframe->status |= TX_ER_UNDERUN;
1437 }
1438
1439 /* clear and recycle the BD */
1440 out_be32((u32 __iomem *)bd, bdstatus & T_W);
1441 out_be32(&bd->buf, 0);
1442 if (bdstatus & T_W)
1443 ep->c_txbd = ep->txbase;
1444 else
1445 ep->c_txbd++;
1446
1447 /* handle the tx frame */
1448 ep_txframe_handle(ep);
1449 bd = ep->c_txbd;
1450 bdstatus = in_be32((u32 __iomem *)bd);
1451 }
1452 if (breakonrxinterrupt)
1453 return -EIO;
1454 else
1455 return 0;
1456 }
1457
1458 /* Add a request in queue, and try to transmit a packet */
1459 static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
1460 {
1461 int reval = 0;
1462
1463 if (ep->tx_req == NULL) {
1464 ep->sent = 0;
1465 ep->last = 0;
1466 txcomplete(ep, 0); /* can gain a new tx_req */
1467 reval = frame_create_tx(ep, ep->txframe);
1468 }
1469 return reval;
1470 }
1471
1472 /* Maybe this is a good ideal */
1473 static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
1474 {
1475 struct qe_udc *udc = ep->udc;
1476 struct qe_frame *pframe = NULL;
1477 struct qe_bd __iomem *bd;
1478 u32 bdstatus, length;
1479 u32 vaddr, fsize;
1480 u8 *cp;
1481 u8 finish_req = 0;
1482 u8 framepid;
1483
1484 if (list_empty(&ep->queue)) {
1485 dev_vdbg(udc->dev, "the req already finish!\n");
1486 return 0;
1487 }
1488 pframe = ep->rxframe;
1489
1490 bd = ep->n_rxbd;
1491 bdstatus = in_be32((u32 __iomem *)bd);
1492 length = bdstatus & BD_LENGTH_MASK;
1493
1494 while (!(bdstatus & R_E) && length) {
1495 if (finish_req)
1496 break;
1497 if ((bdstatus & R_F) && (bdstatus & R_L)
1498 && !(bdstatus & R_ERROR)) {
1499 qe_frame_clean(pframe);
1500 vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
1501 frame_set_data(pframe, (u8 *)vaddr);
1502 frame_set_length(pframe, (length - USB_CRC_SIZE));
1503 frame_set_status(pframe, FRAME_OK);
1504 switch (bdstatus & R_PID) {
1505 case R_PID_DATA1:
1506 frame_set_info(pframe, PID_DATA1); break;
1507 default:
1508 frame_set_info(pframe, PID_DATA0); break;
1509 }
1510 /* handle the rx frame */
1511
1512 if (frame_get_info(pframe) & PID_DATA1)
1513 framepid = 0x1;
1514 else
1515 framepid = 0;
1516
1517 if (framepid != ep->data01) {
1518 dev_vdbg(udc->dev, "the data01 error!\n");
1519 } else {
1520 fsize = frame_get_length(pframe);
1521
1522 cp = (u8 *)(req->req.buf) + req->req.actual;
1523 if (cp) {
1524 memcpy(cp, pframe->data, fsize);
1525 req->req.actual += fsize;
1526 if ((fsize < ep->ep.maxpacket)
1527 || (req->req.actual >=
1528 req->req.length)) {
1529 finish_req = 1;
1530 done(ep, req, 0);
1531 if (list_empty(&ep->queue))
1532 qe_eprx_nack(ep);
1533 }
1534 }
1535 qe_ep_toggledata01(ep);
1536 }
1537 } else {
1538 dev_err(udc->dev, "The receive frame with error!\n");
1539 }
1540
1541 /* note: don't clear the rxbd's buffer address *
1542 * only Clear the length */
1543 out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
1544 ep->has_data--;
1545
1546 /* Get next BD */
1547 if (bdstatus & R_W)
1548 bd = ep->rxbase;
1549 else
1550 bd++;
1551
1552 bdstatus = in_be32((u32 __iomem *)bd);
1553 length = bdstatus & BD_LENGTH_MASK;
1554 }
1555
1556 ep->n_rxbd = bd;
1557 ep_recycle_rxbds(ep);
1558
1559 return 0;
1560 }
1561
1562 /* only add the request in queue */
1563 static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
1564 {
1565 if (ep->state == EP_STATE_NACK) {
1566 if (ep->has_data <= 0) {
1567 /* Enable rx and unmask rx interrupt */
1568 qe_eprx_normal(ep);
1569 } else {
1570 /* Copy the exist BD data */
1571 ep_req_rx(ep, req);
1572 }
1573 }
1574
1575 return 0;
1576 }
1577
1578 /********************************************************************
1579 Internal Used Function End
1580 ********************************************************************/
1581
1582 /*-----------------------------------------------------------------------
1583 Endpoint Management Functions For Gadget
1584 -----------------------------------------------------------------------*/
1585 static int qe_ep_enable(struct usb_ep *_ep,
1586 const struct usb_endpoint_descriptor *desc)
1587 {
1588 struct qe_udc *udc;
1589 struct qe_ep *ep;
1590 int retval = 0;
1591 unsigned char epnum;
1592
1593 ep = container_of(_ep, struct qe_ep, ep);
1594
1595 /* catch various bogus parameters */
1596 if (!_ep || !desc || _ep->name == ep_name[0] ||
1597 (desc->bDescriptorType != USB_DT_ENDPOINT))
1598 return -EINVAL;
1599
1600 udc = ep->udc;
1601 if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
1602 return -ESHUTDOWN;
1603
1604 epnum = (u8)desc->bEndpointAddress & 0xF;
1605
1606 retval = qe_ep_init(udc, epnum, desc);
1607 if (retval != 0) {
1608 cpm_muram_free(cpm_muram_offset(ep->rxbase));
1609 dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
1610 return -EINVAL;
1611 }
1612 dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
1613 return 0;
1614 }
1615
1616 static int qe_ep_disable(struct usb_ep *_ep)
1617 {
1618 struct qe_udc *udc;
1619 struct qe_ep *ep;
1620 unsigned long flags;
1621 unsigned int size;
1622
1623 ep = container_of(_ep, struct qe_ep, ep);
1624 udc = ep->udc;
1625
1626 if (!_ep || !ep->ep.desc) {
1627 dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
1628 return -EINVAL;
1629 }
1630
1631 spin_lock_irqsave(&udc->lock, flags);
1632 /* Nuke all pending requests (does flush) */
1633 nuke(ep, -ESHUTDOWN);
1634 ep->ep.desc = NULL;
1635 ep->stopped = 1;
1636 ep->tx_req = NULL;
1637 qe_ep_reset(udc, ep->epnum);
1638 spin_unlock_irqrestore(&udc->lock, flags);
1639
1640 cpm_muram_free(cpm_muram_offset(ep->rxbase));
1641
1642 if (ep->dir == USB_DIR_OUT)
1643 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1644 (USB_BDRING_LEN_RX + 1);
1645 else
1646 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1647 (USB_BDRING_LEN + 1);
1648
1649 if (ep->dir != USB_DIR_IN) {
1650 kfree(ep->rxframe);
1651 if (ep->rxbufmap) {
1652 dma_unmap_single(udc->gadget.dev.parent,
1653 ep->rxbuf_d, size,
1654 DMA_FROM_DEVICE);
1655 ep->rxbuf_d = DMA_ADDR_INVALID;
1656 } else {
1657 dma_sync_single_for_cpu(
1658 udc->gadget.dev.parent,
1659 ep->rxbuf_d, size,
1660 DMA_FROM_DEVICE);
1661 }
1662 kfree(ep->rxbuffer);
1663 }
1664
1665 if (ep->dir != USB_DIR_OUT)
1666 kfree(ep->txframe);
1667
1668 dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
1669 return 0;
1670 }
1671
1672 static struct usb_request *qe_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
1673 {
1674 struct qe_req *req;
1675
1676 req = kzalloc(sizeof(*req), gfp_flags);
1677 if (!req)
1678 return NULL;
1679
1680 req->req.dma = DMA_ADDR_INVALID;
1681
1682 INIT_LIST_HEAD(&req->queue);
1683
1684 return &req->req;
1685 }
1686
1687 static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
1688 {
1689 struct qe_req *req;
1690
1691 req = container_of(_req, struct qe_req, req);
1692
1693 if (_req)
1694 kfree(req);
1695 }
1696
1697 static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
1698 {
1699 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1700 struct qe_req *req = container_of(_req, struct qe_req, req);
1701 struct qe_udc *udc;
1702 int reval;
1703
1704 udc = ep->udc;
1705 /* catch various bogus parameters */
1706 if (!_req || !req->req.complete || !req->req.buf
1707 || !list_empty(&req->queue)) {
1708 dev_dbg(udc->dev, "bad params\n");
1709 return -EINVAL;
1710 }
1711 if (!_ep || (!ep->ep.desc && ep_index(ep))) {
1712 dev_dbg(udc->dev, "bad ep\n");
1713 return -EINVAL;
1714 }
1715
1716 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
1717 return -ESHUTDOWN;
1718
1719 req->ep = ep;
1720
1721 /* map virtual address to hardware */
1722 if (req->req.dma == DMA_ADDR_INVALID) {
1723 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1724 req->req.buf,
1725 req->req.length,
1726 ep_is_in(ep)
1727 ? DMA_TO_DEVICE :
1728 DMA_FROM_DEVICE);
1729 req->mapped = 1;
1730 } else {
1731 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
1732 req->req.dma, req->req.length,
1733 ep_is_in(ep)
1734 ? DMA_TO_DEVICE :
1735 DMA_FROM_DEVICE);
1736 req->mapped = 0;
1737 }
1738
1739 req->req.status = -EINPROGRESS;
1740 req->req.actual = 0;
1741
1742 list_add_tail(&req->queue, &ep->queue);
1743 dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
1744 ep->name, req->req.length);
1745
1746 /* push the request to device */
1747 if (ep_is_in(ep))
1748 reval = ep_req_send(ep, req);
1749
1750 /* EP0 */
1751 if (ep_index(ep) == 0 && req->req.length > 0) {
1752 if (ep_is_in(ep))
1753 udc->ep0_state = DATA_STATE_XMIT;
1754 else
1755 udc->ep0_state = DATA_STATE_RECV;
1756 }
1757
1758 if (ep->dir == USB_DIR_OUT)
1759 reval = ep_req_receive(ep, req);
1760
1761 return 0;
1762 }
1763
1764 /* queues (submits) an I/O request to an endpoint */
1765 static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1766 gfp_t gfp_flags)
1767 {
1768 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1769 struct qe_udc *udc = ep->udc;
1770 unsigned long flags;
1771 int ret;
1772
1773 spin_lock_irqsave(&udc->lock, flags);
1774 ret = __qe_ep_queue(_ep, _req);
1775 spin_unlock_irqrestore(&udc->lock, flags);
1776 return ret;
1777 }
1778
1779 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
1780 static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1781 {
1782 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1783 struct qe_req *req;
1784 unsigned long flags;
1785
1786 if (!_ep || !_req)
1787 return -EINVAL;
1788
1789 spin_lock_irqsave(&ep->udc->lock, flags);
1790
1791 /* make sure it's actually queued on this endpoint */
1792 list_for_each_entry(req, &ep->queue, queue) {
1793 if (&req->req == _req)
1794 break;
1795 }
1796
1797 if (&req->req != _req) {
1798 spin_unlock_irqrestore(&ep->udc->lock, flags);
1799 return -EINVAL;
1800 }
1801
1802 done(ep, req, -ECONNRESET);
1803
1804 spin_unlock_irqrestore(&ep->udc->lock, flags);
1805 return 0;
1806 }
1807
1808 /*-----------------------------------------------------------------
1809 * modify the endpoint halt feature
1810 * @ep: the non-isochronous endpoint being stalled
1811 * @value: 1--set halt 0--clear halt
1812 * Returns zero, or a negative error code.
1813 *----------------------------------------------------------------*/
1814 static int qe_ep_set_halt(struct usb_ep *_ep, int value)
1815 {
1816 struct qe_ep *ep;
1817 unsigned long flags;
1818 int status = -EOPNOTSUPP;
1819 struct qe_udc *udc;
1820
1821 ep = container_of(_ep, struct qe_ep, ep);
1822 if (!_ep || !ep->ep.desc) {
1823 status = -EINVAL;
1824 goto out;
1825 }
1826
1827 udc = ep->udc;
1828 /* Attempt to halt IN ep will fail if any transfer requests
1829 * are still queue */
1830 if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
1831 status = -EAGAIN;
1832 goto out;
1833 }
1834
1835 status = 0;
1836 spin_lock_irqsave(&ep->udc->lock, flags);
1837 qe_eptx_stall_change(ep, value);
1838 qe_eprx_stall_change(ep, value);
1839 spin_unlock_irqrestore(&ep->udc->lock, flags);
1840
1841 if (ep->epnum == 0) {
1842 udc->ep0_state = WAIT_FOR_SETUP;
1843 udc->ep0_dir = 0;
1844 }
1845
1846 /* set data toggle to DATA0 on clear halt */
1847 if (value == 0)
1848 ep->data01 = 0;
1849 out:
1850 dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
1851 value ? "set" : "clear", status);
1852
1853 return status;
1854 }
1855
1856 static struct usb_ep_ops qe_ep_ops = {
1857 .enable = qe_ep_enable,
1858 .disable = qe_ep_disable,
1859
1860 .alloc_request = qe_alloc_request,
1861 .free_request = qe_free_request,
1862
1863 .queue = qe_ep_queue,
1864 .dequeue = qe_ep_dequeue,
1865
1866 .set_halt = qe_ep_set_halt,
1867 };
1868
1869 /*------------------------------------------------------------------------
1870 Gadget Driver Layer Operations
1871 ------------------------------------------------------------------------*/
1872
1873 /* Get the current frame number */
1874 static int qe_get_frame(struct usb_gadget *gadget)
1875 {
1876 struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget);
1877 u16 tmp;
1878
1879 tmp = in_be16(&udc->usb_param->frame_n);
1880 if (tmp & 0x8000)
1881 tmp = tmp & 0x07ff;
1882 else
1883 tmp = -EINVAL;
1884
1885 return (int)tmp;
1886 }
1887
1888 static int fsl_qe_start(struct usb_gadget *gadget,
1889 struct usb_gadget_driver *driver);
1890 static int fsl_qe_stop(struct usb_gadget *gadget);
1891
1892 /* defined in usb_gadget.h */
1893 static const struct usb_gadget_ops qe_gadget_ops = {
1894 .get_frame = qe_get_frame,
1895 .udc_start = fsl_qe_start,
1896 .udc_stop = fsl_qe_stop,
1897 };
1898
1899 /*-------------------------------------------------------------------------
1900 USB ep0 Setup process in BUS Enumeration
1901 -------------------------------------------------------------------------*/
1902 static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
1903 {
1904 struct qe_ep *ep = &udc->eps[pipe];
1905
1906 nuke(ep, -ECONNRESET);
1907 ep->tx_req = NULL;
1908 return 0;
1909 }
1910
1911 static int reset_queues(struct qe_udc *udc)
1912 {
1913 u8 pipe;
1914
1915 for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
1916 udc_reset_ep_queue(udc, pipe);
1917
1918 /* report disconnect; the driver is already quiesced */
1919 spin_unlock(&udc->lock);
1920 usb_gadget_udc_reset(&udc->gadget, udc->driver);
1921 spin_lock(&udc->lock);
1922
1923 return 0;
1924 }
1925
1926 static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
1927 u16 length)
1928 {
1929 /* Save the new address to device struct */
1930 udc->device_address = (u8) value;
1931 /* Update usb state */
1932 udc->usb_state = USB_STATE_ADDRESS;
1933
1934 /* Status phase , send a ZLP */
1935 if (ep0_prime_status(udc, USB_DIR_IN))
1936 qe_ep0_stall(udc);
1937 }
1938
1939 static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
1940 {
1941 struct qe_req *req = container_of(_req, struct qe_req, req);
1942
1943 req->req.buf = NULL;
1944 kfree(req);
1945 }
1946
1947 static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
1948 u16 index, u16 length)
1949 {
1950 u16 usb_status = 0;
1951 struct qe_req *req;
1952 struct qe_ep *ep;
1953 int status = 0;
1954
1955 ep = &udc->eps[0];
1956 if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1957 /* Get device status */
1958 usb_status = 1 << USB_DEVICE_SELF_POWERED;
1959 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
1960 /* Get interface status */
1961 /* We don't have interface information in udc driver */
1962 usb_status = 0;
1963 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
1964 /* Get endpoint status */
1965 int pipe = index & USB_ENDPOINT_NUMBER_MASK;
1966 struct qe_ep *target_ep = &udc->eps[pipe];
1967 u16 usep;
1968
1969 /* stall if endpoint doesn't exist */
1970 if (!target_ep->ep.desc)
1971 goto stall;
1972
1973 usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
1974 if (index & USB_DIR_IN) {
1975 if (target_ep->dir != USB_DIR_IN)
1976 goto stall;
1977 if ((usep & USB_THS_MASK) == USB_THS_STALL)
1978 usb_status = 1 << USB_ENDPOINT_HALT;
1979 } else {
1980 if (target_ep->dir != USB_DIR_OUT)
1981 goto stall;
1982 if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
1983 usb_status = 1 << USB_ENDPOINT_HALT;
1984 }
1985 }
1986
1987 req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
1988 struct qe_req, req);
1989 req->req.length = 2;
1990 req->req.buf = udc->statusbuf;
1991 *(u16 *)req->req.buf = cpu_to_le16(usb_status);
1992 req->req.status = -EINPROGRESS;
1993 req->req.actual = 0;
1994 req->req.complete = ownercomplete;
1995
1996 udc->ep0_dir = USB_DIR_IN;
1997
1998 /* data phase */
1999 status = __qe_ep_queue(&ep->ep, &req->req);
2000
2001 if (status == 0)
2002 return;
2003 stall:
2004 dev_err(udc->dev, "Can't respond to getstatus request \n");
2005 qe_ep0_stall(udc);
2006 }
2007
2008 /* only handle the setup request, suppose the device in normal status */
2009 static void setup_received_handle(struct qe_udc *udc,
2010 struct usb_ctrlrequest *setup)
2011 {
2012 /* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
2013 u16 wValue = le16_to_cpu(setup->wValue);
2014 u16 wIndex = le16_to_cpu(setup->wIndex);
2015 u16 wLength = le16_to_cpu(setup->wLength);
2016
2017 /* clear the previous request in the ep0 */
2018 udc_reset_ep_queue(udc, 0);
2019
2020 if (setup->bRequestType & USB_DIR_IN)
2021 udc->ep0_dir = USB_DIR_IN;
2022 else
2023 udc->ep0_dir = USB_DIR_OUT;
2024
2025 switch (setup->bRequest) {
2026 case USB_REQ_GET_STATUS:
2027 /* Data+Status phase form udc */
2028 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2029 != (USB_DIR_IN | USB_TYPE_STANDARD))
2030 break;
2031 ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
2032 wLength);
2033 return;
2034
2035 case USB_REQ_SET_ADDRESS:
2036 /* Status phase from udc */
2037 if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
2038 USB_RECIP_DEVICE))
2039 break;
2040 ch9setaddress(udc, wValue, wIndex, wLength);
2041 return;
2042
2043 case USB_REQ_CLEAR_FEATURE:
2044 case USB_REQ_SET_FEATURE:
2045 /* Requests with no data phase, status phase from udc */
2046 if ((setup->bRequestType & USB_TYPE_MASK)
2047 != USB_TYPE_STANDARD)
2048 break;
2049
2050 if ((setup->bRequestType & USB_RECIP_MASK)
2051 == USB_RECIP_ENDPOINT) {
2052 int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
2053 struct qe_ep *ep;
2054
2055 if (wValue != 0 || wLength != 0
2056 || pipe > USB_MAX_ENDPOINTS)
2057 break;
2058 ep = &udc->eps[pipe];
2059
2060 spin_unlock(&udc->lock);
2061 qe_ep_set_halt(&ep->ep,
2062 (setup->bRequest == USB_REQ_SET_FEATURE)
2063 ? 1 : 0);
2064 spin_lock(&udc->lock);
2065 }
2066
2067 ep0_prime_status(udc, USB_DIR_IN);
2068
2069 return;
2070
2071 default:
2072 break;
2073 }
2074
2075 if (wLength) {
2076 /* Data phase from gadget, status phase from udc */
2077 if (setup->bRequestType & USB_DIR_IN) {
2078 udc->ep0_state = DATA_STATE_XMIT;
2079 udc->ep0_dir = USB_DIR_IN;
2080 } else {
2081 udc->ep0_state = DATA_STATE_RECV;
2082 udc->ep0_dir = USB_DIR_OUT;
2083 }
2084 spin_unlock(&udc->lock);
2085 if (udc->driver->setup(&udc->gadget,
2086 &udc->local_setup_buff) < 0)
2087 qe_ep0_stall(udc);
2088 spin_lock(&udc->lock);
2089 } else {
2090 /* No data phase, IN status from gadget */
2091 udc->ep0_dir = USB_DIR_IN;
2092 spin_unlock(&udc->lock);
2093 if (udc->driver->setup(&udc->gadget,
2094 &udc->local_setup_buff) < 0)
2095 qe_ep0_stall(udc);
2096 spin_lock(&udc->lock);
2097 udc->ep0_state = DATA_STATE_NEED_ZLP;
2098 }
2099 }
2100
2101 /*-------------------------------------------------------------------------
2102 USB Interrupt handlers
2103 -------------------------------------------------------------------------*/
2104 static void suspend_irq(struct qe_udc *udc)
2105 {
2106 udc->resume_state = udc->usb_state;
2107 udc->usb_state = USB_STATE_SUSPENDED;
2108
2109 /* report suspend to the driver ,serial.c not support this*/
2110 if (udc->driver->suspend)
2111 udc->driver->suspend(&udc->gadget);
2112 }
2113
2114 static void resume_irq(struct qe_udc *udc)
2115 {
2116 udc->usb_state = udc->resume_state;
2117 udc->resume_state = 0;
2118
2119 /* report resume to the driver , serial.c not support this*/
2120 if (udc->driver->resume)
2121 udc->driver->resume(&udc->gadget);
2122 }
2123
2124 static void idle_irq(struct qe_udc *udc)
2125 {
2126 u8 usbs;
2127
2128 usbs = in_8(&udc->usb_regs->usb_usbs);
2129 if (usbs & USB_IDLE_STATUS_MASK) {
2130 if ((udc->usb_state) != USB_STATE_SUSPENDED)
2131 suspend_irq(udc);
2132 } else {
2133 if (udc->usb_state == USB_STATE_SUSPENDED)
2134 resume_irq(udc);
2135 }
2136 }
2137
2138 static int reset_irq(struct qe_udc *udc)
2139 {
2140 unsigned char i;
2141
2142 if (udc->usb_state == USB_STATE_DEFAULT)
2143 return 0;
2144
2145 qe_usb_disable(udc);
2146 out_8(&udc->usb_regs->usb_usadr, 0);
2147
2148 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2149 if (udc->eps[i].init)
2150 qe_ep_reset(udc, i);
2151 }
2152
2153 reset_queues(udc);
2154 udc->usb_state = USB_STATE_DEFAULT;
2155 udc->ep0_state = WAIT_FOR_SETUP;
2156 udc->ep0_dir = USB_DIR_OUT;
2157 qe_usb_enable(udc);
2158 return 0;
2159 }
2160
2161 static int bsy_irq(struct qe_udc *udc)
2162 {
2163 return 0;
2164 }
2165
2166 static int txe_irq(struct qe_udc *udc)
2167 {
2168 return 0;
2169 }
2170
2171 /* ep0 tx interrupt also in here */
2172 static int tx_irq(struct qe_udc *udc)
2173 {
2174 struct qe_ep *ep;
2175 struct qe_bd __iomem *bd;
2176 int i, res = 0;
2177
2178 if ((udc->usb_state == USB_STATE_ADDRESS)
2179 && (in_8(&udc->usb_regs->usb_usadr) == 0))
2180 out_8(&udc->usb_regs->usb_usadr, udc->device_address);
2181
2182 for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
2183 ep = &udc->eps[i];
2184 if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
2185 bd = ep->c_txbd;
2186 if (!(in_be32((u32 __iomem *)bd) & T_R)
2187 && (in_be32(&bd->buf))) {
2188 /* confirm the transmitted bd */
2189 if (ep->epnum == 0)
2190 res = qe_ep0_txconf(ep);
2191 else
2192 res = qe_ep_txconf(ep);
2193 }
2194 }
2195 }
2196 return res;
2197 }
2198
2199
2200 /* setup packect's rx is handle in the function too */
2201 static void rx_irq(struct qe_udc *udc)
2202 {
2203 struct qe_ep *ep;
2204 struct qe_bd __iomem *bd;
2205 int i;
2206
2207 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2208 ep = &udc->eps[i];
2209 if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
2210 bd = ep->n_rxbd;
2211 if (!(in_be32((u32 __iomem *)bd) & R_E)
2212 && (in_be32(&bd->buf))) {
2213 if (ep->epnum == 0) {
2214 qe_ep0_rx(udc);
2215 } else {
2216 /*non-setup package receive*/
2217 qe_ep_rx(ep);
2218 }
2219 }
2220 }
2221 }
2222 }
2223
2224 static irqreturn_t qe_udc_irq(int irq, void *_udc)
2225 {
2226 struct qe_udc *udc = (struct qe_udc *)_udc;
2227 u16 irq_src;
2228 irqreturn_t status = IRQ_NONE;
2229 unsigned long flags;
2230
2231 spin_lock_irqsave(&udc->lock, flags);
2232
2233 irq_src = in_be16(&udc->usb_regs->usb_usber) &
2234 in_be16(&udc->usb_regs->usb_usbmr);
2235 /* Clear notification bits */
2236 out_be16(&udc->usb_regs->usb_usber, irq_src);
2237 /* USB Interrupt */
2238 if (irq_src & USB_E_IDLE_MASK) {
2239 idle_irq(udc);
2240 irq_src &= ~USB_E_IDLE_MASK;
2241 status = IRQ_HANDLED;
2242 }
2243
2244 if (irq_src & USB_E_TXB_MASK) {
2245 tx_irq(udc);
2246 irq_src &= ~USB_E_TXB_MASK;
2247 status = IRQ_HANDLED;
2248 }
2249
2250 if (irq_src & USB_E_RXB_MASK) {
2251 rx_irq(udc);
2252 irq_src &= ~USB_E_RXB_MASK;
2253 status = IRQ_HANDLED;
2254 }
2255
2256 if (irq_src & USB_E_RESET_MASK) {
2257 reset_irq(udc);
2258 irq_src &= ~USB_E_RESET_MASK;
2259 status = IRQ_HANDLED;
2260 }
2261
2262 if (irq_src & USB_E_BSY_MASK) {
2263 bsy_irq(udc);
2264 irq_src &= ~USB_E_BSY_MASK;
2265 status = IRQ_HANDLED;
2266 }
2267
2268 if (irq_src & USB_E_TXE_MASK) {
2269 txe_irq(udc);
2270 irq_src &= ~USB_E_TXE_MASK;
2271 status = IRQ_HANDLED;
2272 }
2273
2274 spin_unlock_irqrestore(&udc->lock, flags);
2275
2276 return status;
2277 }
2278
2279 /*-------------------------------------------------------------------------
2280 Gadget driver probe and unregister.
2281 --------------------------------------------------------------------------*/
2282 static int fsl_qe_start(struct usb_gadget *gadget,
2283 struct usb_gadget_driver *driver)
2284 {
2285 struct qe_udc *udc;
2286 unsigned long flags;
2287
2288 udc = container_of(gadget, struct qe_udc, gadget);
2289 /* lock is needed but whether should use this lock or another */
2290 spin_lock_irqsave(&udc->lock, flags);
2291
2292 driver->driver.bus = NULL;
2293 /* hook up the driver */
2294 udc->driver = driver;
2295 udc->gadget.speed = driver->max_speed;
2296
2297 /* Enable IRQ reg and Set usbcmd reg EN bit */
2298 qe_usb_enable(udc);
2299
2300 out_be16(&udc->usb_regs->usb_usber, 0xffff);
2301 out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
2302 udc->usb_state = USB_STATE_ATTACHED;
2303 udc->ep0_state = WAIT_FOR_SETUP;
2304 udc->ep0_dir = USB_DIR_OUT;
2305 spin_unlock_irqrestore(&udc->lock, flags);
2306
2307 return 0;
2308 }
2309
2310 static int fsl_qe_stop(struct usb_gadget *gadget)
2311 {
2312 struct qe_udc *udc;
2313 struct qe_ep *loop_ep;
2314 unsigned long flags;
2315
2316 udc = container_of(gadget, struct qe_udc, gadget);
2317 /* stop usb controller, disable intr */
2318 qe_usb_disable(udc);
2319
2320 /* in fact, no needed */
2321 udc->usb_state = USB_STATE_ATTACHED;
2322 udc->ep0_state = WAIT_FOR_SETUP;
2323 udc->ep0_dir = 0;
2324
2325 /* stand operation */
2326 spin_lock_irqsave(&udc->lock, flags);
2327 udc->gadget.speed = USB_SPEED_UNKNOWN;
2328 nuke(&udc->eps[0], -ESHUTDOWN);
2329 list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list)
2330 nuke(loop_ep, -ESHUTDOWN);
2331 spin_unlock_irqrestore(&udc->lock, flags);
2332
2333 udc->driver = NULL;
2334
2335 return 0;
2336 }
2337
2338 /* udc structure's alloc and setup, include ep-param alloc */
2339 static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2340 {
2341 struct qe_udc *udc;
2342 struct device_node *np = ofdev->dev.of_node;
2343 unsigned long tmp_addr = 0;
2344 struct usb_device_para __iomem *usbpram;
2345 unsigned int i;
2346 u64 size;
2347 u32 offset;
2348
2349 udc = kzalloc(sizeof(*udc), GFP_KERNEL);
2350 if (udc == NULL) {
2351 dev_err(&ofdev->dev, "malloc udc failed\n");
2352 goto cleanup;
2353 }
2354
2355 udc->dev = &ofdev->dev;
2356
2357 /* get default address of usb parameter in MURAM from device tree */
2358 offset = *of_get_address(np, 1, &size, NULL);
2359 udc->usb_param = cpm_muram_addr(offset);
2360 memset_io(udc->usb_param, 0, size);
2361
2362 usbpram = udc->usb_param;
2363 out_be16(&usbpram->frame_n, 0);
2364 out_be32(&usbpram->rstate, 0);
2365
2366 tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
2367 sizeof(struct usb_ep_para)),
2368 USB_EP_PARA_ALIGNMENT);
2369 if (IS_ERR_VALUE(tmp_addr))
2370 goto cleanup;
2371
2372 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2373 out_be16(&usbpram->epptr[i], (u16)tmp_addr);
2374 udc->ep_param[i] = cpm_muram_addr(tmp_addr);
2375 tmp_addr += 32;
2376 }
2377
2378 memset_io(udc->ep_param[0], 0,
2379 USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
2380
2381 udc->resume_state = USB_STATE_NOTATTACHED;
2382 udc->usb_state = USB_STATE_POWERED;
2383 udc->ep0_dir = 0;
2384
2385 spin_lock_init(&udc->lock);
2386 return udc;
2387
2388 cleanup:
2389 kfree(udc);
2390 return NULL;
2391 }
2392
2393 /* USB Controller register init */
2394 static int qe_udc_reg_init(struct qe_udc *udc)
2395 {
2396 struct usb_ctlr __iomem *qe_usbregs;
2397 qe_usbregs = udc->usb_regs;
2398
2399 /* Spec says that we must enable the USB controller to change mode. */
2400 out_8(&qe_usbregs->usb_usmod, 0x01);
2401 /* Mode changed, now disable it, since muram isn't initialized yet. */
2402 out_8(&qe_usbregs->usb_usmod, 0x00);
2403
2404 /* Initialize the rest. */
2405 out_be16(&qe_usbregs->usb_usbmr, 0);
2406 out_8(&qe_usbregs->usb_uscom, 0);
2407 out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
2408
2409 return 0;
2410 }
2411
2412 static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
2413 {
2414 struct qe_ep *ep = &udc->eps[pipe_num];
2415
2416 ep->udc = udc;
2417 strcpy(ep->name, ep_name[pipe_num]);
2418 ep->ep.name = ep_name[pipe_num];
2419
2420 if (pipe_num == 0) {
2421 ep->ep.caps.type_control = true;
2422 } else {
2423 ep->ep.caps.type_iso = true;
2424 ep->ep.caps.type_bulk = true;
2425 ep->ep.caps.type_int = true;
2426 }
2427
2428 ep->ep.caps.dir_in = true;
2429 ep->ep.caps.dir_out = true;
2430
2431 ep->ep.ops = &qe_ep_ops;
2432 ep->stopped = 1;
2433 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
2434 ep->ep.desc = NULL;
2435 ep->dir = 0xff;
2436 ep->epnum = (u8)pipe_num;
2437 ep->sent = 0;
2438 ep->last = 0;
2439 ep->init = 0;
2440 ep->rxframe = NULL;
2441 ep->txframe = NULL;
2442 ep->tx_req = NULL;
2443 ep->state = EP_STATE_IDLE;
2444 ep->has_data = 0;
2445
2446 /* the queue lists any req for this ep */
2447 INIT_LIST_HEAD(&ep->queue);
2448
2449 /* gagdet.ep_list used for ep_autoconfig so no ep0*/
2450 if (pipe_num != 0)
2451 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2452
2453 ep->gadget = &udc->gadget;
2454
2455 return 0;
2456 }
2457
2458 /*-----------------------------------------------------------------------
2459 * UDC device Driver operation functions *
2460 *----------------------------------------------------------------------*/
2461 static void qe_udc_release(struct device *dev)
2462 {
2463 struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev);
2464 int i;
2465
2466 complete(udc->done);
2467 cpm_muram_free(cpm_muram_offset(udc->ep_param[0]));
2468 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
2469 udc->ep_param[i] = NULL;
2470
2471 kfree(udc);
2472 }
2473
2474 /* Driver probe functions */
2475 static const struct of_device_id qe_udc_match[];
2476 static int qe_udc_probe(struct platform_device *ofdev)
2477 {
2478 struct qe_udc *udc;
2479 const struct of_device_id *match;
2480 struct device_node *np = ofdev->dev.of_node;
2481 struct qe_ep *ep;
2482 unsigned int ret = 0;
2483 unsigned int i;
2484 const void *prop;
2485
2486 match = of_match_device(qe_udc_match, &ofdev->dev);
2487 if (!match)
2488 return -EINVAL;
2489
2490 prop = of_get_property(np, "mode", NULL);
2491 if (!prop || strcmp(prop, "peripheral"))
2492 return -ENODEV;
2493
2494 /* Initialize the udc structure including QH member and other member */
2495 udc = qe_udc_config(ofdev);
2496 if (!udc) {
2497 dev_err(&ofdev->dev, "failed to initialize\n");
2498 return -ENOMEM;
2499 }
2500
2501 udc->soc_type = (unsigned long)match->data;
2502 udc->usb_regs = of_iomap(np, 0);
2503 if (!udc->usb_regs) {
2504 ret = -ENOMEM;
2505 goto err1;
2506 }
2507
2508 /* initialize usb hw reg except for regs for EP,
2509 * leave usbintr reg untouched*/
2510 qe_udc_reg_init(udc);
2511
2512 /* here comes the stand operations for probe
2513 * set the qe_udc->gadget.xxx */
2514 udc->gadget.ops = &qe_gadget_ops;
2515
2516 /* gadget.ep0 is a pointer */
2517 udc->gadget.ep0 = &udc->eps[0].ep;
2518
2519 INIT_LIST_HEAD(&udc->gadget.ep_list);
2520
2521 /* modify in register gadget process */
2522 udc->gadget.speed = USB_SPEED_UNKNOWN;
2523
2524 /* name: Identifies the controller hardware type. */
2525 udc->gadget.name = driver_name;
2526 udc->gadget.dev.parent = &ofdev->dev;
2527
2528 /* initialize qe_ep struct */
2529 for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
2530 /* because the ep type isn't decide here so
2531 * qe_ep_init() should be called in ep_enable() */
2532
2533 /* setup the qe_ep struct and link ep.ep.list
2534 * into gadget.ep_list */
2535 qe_ep_config(udc, (unsigned char)i);
2536 }
2537
2538 /* ep0 initialization in here */
2539 ret = qe_ep_init(udc, 0, &qe_ep0_desc);
2540 if (ret)
2541 goto err2;
2542
2543 /* create a buf for ZLP send, need to remain zeroed */
2544 udc->nullbuf = devm_kzalloc(&ofdev->dev, 256, GFP_KERNEL);
2545 if (udc->nullbuf == NULL) {
2546 ret = -ENOMEM;
2547 goto err3;
2548 }
2549
2550 /* buffer for data of get_status request */
2551 udc->statusbuf = devm_kzalloc(&ofdev->dev, 2, GFP_KERNEL);
2552 if (udc->statusbuf == NULL) {
2553 ret = -ENOMEM;
2554 goto err3;
2555 }
2556
2557 udc->nullp = virt_to_phys((void *)udc->nullbuf);
2558 if (udc->nullp == DMA_ADDR_INVALID) {
2559 udc->nullp = dma_map_single(
2560 udc->gadget.dev.parent,
2561 udc->nullbuf,
2562 256,
2563 DMA_TO_DEVICE);
2564 udc->nullmap = 1;
2565 } else {
2566 dma_sync_single_for_device(udc->gadget.dev.parent,
2567 udc->nullp, 256,
2568 DMA_TO_DEVICE);
2569 }
2570
2571 tasklet_init(&udc->rx_tasklet, ep_rx_tasklet,
2572 (unsigned long)udc);
2573 /* request irq and disable DR */
2574 udc->usb_irq = irq_of_parse_and_map(np, 0);
2575 if (!udc->usb_irq) {
2576 ret = -EINVAL;
2577 goto err_noirq;
2578 }
2579
2580 ret = request_irq(udc->usb_irq, qe_udc_irq, 0,
2581 driver_name, udc);
2582 if (ret) {
2583 dev_err(udc->dev, "cannot request irq %d err %d\n",
2584 udc->usb_irq, ret);
2585 goto err4;
2586 }
2587
2588 ret = usb_add_gadget_udc_release(&ofdev->dev, &udc->gadget,
2589 qe_udc_release);
2590 if (ret)
2591 goto err5;
2592
2593 platform_set_drvdata(ofdev, udc);
2594 dev_info(udc->dev,
2595 "%s USB controller initialized as device\n",
2596 (udc->soc_type == PORT_QE) ? "QE" : "CPM");
2597 return 0;
2598
2599 err5:
2600 free_irq(udc->usb_irq, udc);
2601 err4:
2602 irq_dispose_mapping(udc->usb_irq);
2603 err_noirq:
2604 if (udc->nullmap) {
2605 dma_unmap_single(udc->gadget.dev.parent,
2606 udc->nullp, 256,
2607 DMA_TO_DEVICE);
2608 udc->nullp = DMA_ADDR_INVALID;
2609 } else {
2610 dma_sync_single_for_cpu(udc->gadget.dev.parent,
2611 udc->nullp, 256,
2612 DMA_TO_DEVICE);
2613 }
2614 err3:
2615 ep = &udc->eps[0];
2616 cpm_muram_free(cpm_muram_offset(ep->rxbase));
2617 kfree(ep->rxframe);
2618 kfree(ep->rxbuffer);
2619 kfree(ep->txframe);
2620 err2:
2621 iounmap(udc->usb_regs);
2622 err1:
2623 kfree(udc);
2624 return ret;
2625 }
2626
2627 #ifdef CONFIG_PM
2628 static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
2629 {
2630 return -ENOTSUPP;
2631 }
2632
2633 static int qe_udc_resume(struct platform_device *dev)
2634 {
2635 return -ENOTSUPP;
2636 }
2637 #endif
2638
2639 static int qe_udc_remove(struct platform_device *ofdev)
2640 {
2641 struct qe_udc *udc = platform_get_drvdata(ofdev);
2642 struct qe_ep *ep;
2643 unsigned int size;
2644 DECLARE_COMPLETION_ONSTACK(done);
2645
2646 usb_del_gadget_udc(&udc->gadget);
2647
2648 udc->done = &done;
2649 tasklet_disable(&udc->rx_tasklet);
2650
2651 if (udc->nullmap) {
2652 dma_unmap_single(udc->gadget.dev.parent,
2653 udc->nullp, 256,
2654 DMA_TO_DEVICE);
2655 udc->nullp = DMA_ADDR_INVALID;
2656 } else {
2657 dma_sync_single_for_cpu(udc->gadget.dev.parent,
2658 udc->nullp, 256,
2659 DMA_TO_DEVICE);
2660 }
2661
2662 ep = &udc->eps[0];
2663 cpm_muram_free(cpm_muram_offset(ep->rxbase));
2664 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
2665
2666 kfree(ep->rxframe);
2667 if (ep->rxbufmap) {
2668 dma_unmap_single(udc->gadget.dev.parent,
2669 ep->rxbuf_d, size,
2670 DMA_FROM_DEVICE);
2671 ep->rxbuf_d = DMA_ADDR_INVALID;
2672 } else {
2673 dma_sync_single_for_cpu(udc->gadget.dev.parent,
2674 ep->rxbuf_d, size,
2675 DMA_FROM_DEVICE);
2676 }
2677
2678 kfree(ep->rxbuffer);
2679 kfree(ep->txframe);
2680
2681 free_irq(udc->usb_irq, udc);
2682 irq_dispose_mapping(udc->usb_irq);
2683
2684 tasklet_kill(&udc->rx_tasklet);
2685
2686 iounmap(udc->usb_regs);
2687
2688 /* wait for release() of gadget.dev to free udc */
2689 wait_for_completion(&done);
2690
2691 return 0;
2692 }
2693
2694 /*-------------------------------------------------------------------------*/
2695 static const struct of_device_id qe_udc_match[] = {
2696 {
2697 .compatible = "fsl,mpc8323-qe-usb",
2698 .data = (void *)PORT_QE,
2699 },
2700 {
2701 .compatible = "fsl,mpc8360-qe-usb",
2702 .data = (void *)PORT_QE,
2703 },
2704 {
2705 .compatible = "fsl,mpc8272-cpm-usb",
2706 .data = (void *)PORT_CPM,
2707 },
2708 {},
2709 };
2710
2711 MODULE_DEVICE_TABLE(of, qe_udc_match);
2712
2713 static struct platform_driver udc_driver = {
2714 .driver = {
2715 .name = driver_name,
2716 .of_match_table = qe_udc_match,
2717 },
2718 .probe = qe_udc_probe,
2719 .remove = qe_udc_remove,
2720 #ifdef CONFIG_PM
2721 .suspend = qe_udc_suspend,
2722 .resume = qe_udc_resume,
2723 #endif
2724 };
2725
2726 module_platform_driver(udc_driver);
2727
2728 MODULE_DESCRIPTION(DRIVER_DESC);
2729 MODULE_AUTHOR(DRIVER_AUTHOR);
2730 MODULE_LICENSE("GPL");