]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/usb/dwc3/gadget.c
Merge remote-tracking branch 'asoc/topic/spear' into asoc-next
[mirror_ubuntu-artful-kernel.git] / drivers / usb / dwc3 / gadget.c
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "core.h"
34 #include "gadget.h"
35 #include "io.h"
36
37 /**
38 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
39 * @dwc: pointer to our context structure
40 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
41 *
42 * Caller should take care of locking. This function will
43 * return 0 on success or -EINVAL if wrong Test Selector
44 * is passed
45 */
46 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
47 {
48 u32 reg;
49
50 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
51 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
52
53 switch (mode) {
54 case TEST_J:
55 case TEST_K:
56 case TEST_SE0_NAK:
57 case TEST_PACKET:
58 case TEST_FORCE_EN:
59 reg |= mode << 1;
60 break;
61 default:
62 return -EINVAL;
63 }
64
65 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
66
67 return 0;
68 }
69
70 /**
71 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
72 * @dwc: pointer to our context structure
73 * @state: the state to put link into
74 *
75 * Caller should take care of locking. This function will
76 * return 0 on success or -ETIMEDOUT.
77 */
78 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
79 {
80 int retries = 10000;
81 u32 reg;
82
83 /*
84 * Wait until device controller is ready. Only applies to 1.94a and
85 * later RTL.
86 */
87 if (dwc->revision >= DWC3_REVISION_194A) {
88 while (--retries) {
89 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
90 if (reg & DWC3_DSTS_DCNRD)
91 udelay(5);
92 else
93 break;
94 }
95
96 if (retries <= 0)
97 return -ETIMEDOUT;
98 }
99
100 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
101 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
102
103 /* set requested state */
104 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
105 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
106
107 /*
108 * The following code is racy when called from dwc3_gadget_wakeup,
109 * and is not needed, at least on newer versions
110 */
111 if (dwc->revision >= DWC3_REVISION_194A)
112 return 0;
113
114 /* wait for a change in DSTS */
115 retries = 10000;
116 while (--retries) {
117 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
118
119 if (DWC3_DSTS_USBLNKST(reg) == state)
120 return 0;
121
122 udelay(5);
123 }
124
125 dev_vdbg(dwc->dev, "link state change request timed out\n");
126
127 return -ETIMEDOUT;
128 }
129
130 /**
131 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
132 * @dwc: pointer to our context structure
133 *
134 * This function will a best effort FIFO allocation in order
135 * to improve FIFO usage and throughput, while still allowing
136 * us to enable as many endpoints as possible.
137 *
138 * Keep in mind that this operation will be highly dependent
139 * on the configured size for RAM1 - which contains TxFifo -,
140 * the amount of endpoints enabled on coreConsultant tool, and
141 * the width of the Master Bus.
142 *
143 * In the ideal world, we would always be able to satisfy the
144 * following equation:
145 *
146 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
147 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
148 *
149 * Unfortunately, due to many variables that's not always the case.
150 */
151 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
152 {
153 int last_fifo_depth = 0;
154 int ram1_depth;
155 int fifo_size;
156 int mdwidth;
157 int num;
158
159 if (!dwc->needs_fifo_resize)
160 return 0;
161
162 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
163 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
164
165 /* MDWIDTH is represented in bits, we need it in bytes */
166 mdwidth >>= 3;
167
168 /*
169 * FIXME For now we will only allocate 1 wMaxPacketSize space
170 * for each enabled endpoint, later patches will come to
171 * improve this algorithm so that we better use the internal
172 * FIFO space
173 */
174 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
175 struct dwc3_ep *dep = dwc->eps[num];
176 int fifo_number = dep->number >> 1;
177 int mult = 1;
178 int tmp;
179
180 if (!(dep->number & 1))
181 continue;
182
183 if (!(dep->flags & DWC3_EP_ENABLED))
184 continue;
185
186 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
187 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
188 mult = 3;
189
190 /*
191 * REVISIT: the following assumes we will always have enough
192 * space available on the FIFO RAM for all possible use cases.
193 * Make sure that's true somehow and change FIFO allocation
194 * accordingly.
195 *
196 * If we have Bulk or Isochronous endpoints, we want
197 * them to be able to be very, very fast. So we're giving
198 * those endpoints a fifo_size which is enough for 3 full
199 * packets
200 */
201 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
202 tmp += mdwidth;
203
204 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
205
206 fifo_size |= (last_fifo_depth << 16);
207
208 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
209 dep->name, last_fifo_depth, fifo_size & 0xffff);
210
211 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
212 fifo_size);
213
214 last_fifo_depth += (fifo_size & 0xffff);
215 }
216
217 return 0;
218 }
219
220 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
221 int status)
222 {
223 struct dwc3 *dwc = dep->dwc;
224 int i;
225
226 if (req->queued) {
227 i = 0;
228 do {
229 dep->busy_slot++;
230 /*
231 * Skip LINK TRB. We can't use req->trb and check for
232 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
233 * just completed (not the LINK TRB).
234 */
235 if (((dep->busy_slot & DWC3_TRB_MASK) ==
236 DWC3_TRB_NUM- 1) &&
237 usb_endpoint_xfer_isoc(dep->endpoint.desc))
238 dep->busy_slot++;
239 } while(++i < req->request.num_mapped_sgs);
240 req->queued = false;
241 }
242 list_del(&req->list);
243 req->trb = NULL;
244
245 if (req->request.status == -EINPROGRESS)
246 req->request.status = status;
247
248 if (dwc->ep0_bounced && dep->number == 0)
249 dwc->ep0_bounced = false;
250 else
251 usb_gadget_unmap_request(&dwc->gadget, &req->request,
252 req->direction);
253
254 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
255 req, dep->name, req->request.actual,
256 req->request.length, status);
257
258 spin_unlock(&dwc->lock);
259 req->request.complete(&dep->endpoint, &req->request);
260 spin_lock(&dwc->lock);
261 }
262
263 static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
264 {
265 switch (cmd) {
266 case DWC3_DEPCMD_DEPSTARTCFG:
267 return "Start New Configuration";
268 case DWC3_DEPCMD_ENDTRANSFER:
269 return "End Transfer";
270 case DWC3_DEPCMD_UPDATETRANSFER:
271 return "Update Transfer";
272 case DWC3_DEPCMD_STARTTRANSFER:
273 return "Start Transfer";
274 case DWC3_DEPCMD_CLEARSTALL:
275 return "Clear Stall";
276 case DWC3_DEPCMD_SETSTALL:
277 return "Set Stall";
278 case DWC3_DEPCMD_GETEPSTATE:
279 return "Get Endpoint State";
280 case DWC3_DEPCMD_SETTRANSFRESOURCE:
281 return "Set Endpoint Transfer Resource";
282 case DWC3_DEPCMD_SETEPCONFIG:
283 return "Set Endpoint Configuration";
284 default:
285 return "UNKNOWN command";
286 }
287 }
288
289 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
290 {
291 u32 timeout = 500;
292 u32 reg;
293
294 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
295 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
296
297 do {
298 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
299 if (!(reg & DWC3_DGCMD_CMDACT)) {
300 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
301 DWC3_DGCMD_STATUS(reg));
302 return 0;
303 }
304
305 /*
306 * We can't sleep here, because it's also called from
307 * interrupt context.
308 */
309 timeout--;
310 if (!timeout)
311 return -ETIMEDOUT;
312 udelay(1);
313 } while (1);
314 }
315
316 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
317 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
318 {
319 struct dwc3_ep *dep = dwc->eps[ep];
320 u32 timeout = 500;
321 u32 reg;
322
323 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
324 dep->name,
325 dwc3_gadget_ep_cmd_string(cmd), params->param0,
326 params->param1, params->param2);
327
328 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
329 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
330 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
331
332 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
333 do {
334 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
335 if (!(reg & DWC3_DEPCMD_CMDACT)) {
336 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
337 DWC3_DEPCMD_STATUS(reg));
338 return 0;
339 }
340
341 /*
342 * We can't sleep here, because it is also called from
343 * interrupt context.
344 */
345 timeout--;
346 if (!timeout)
347 return -ETIMEDOUT;
348
349 udelay(1);
350 } while (1);
351 }
352
353 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
354 struct dwc3_trb *trb)
355 {
356 u32 offset = (char *) trb - (char *) dep->trb_pool;
357
358 return dep->trb_pool_dma + offset;
359 }
360
361 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
362 {
363 struct dwc3 *dwc = dep->dwc;
364
365 if (dep->trb_pool)
366 return 0;
367
368 if (dep->number == 0 || dep->number == 1)
369 return 0;
370
371 dep->trb_pool = dma_alloc_coherent(dwc->dev,
372 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
373 &dep->trb_pool_dma, GFP_KERNEL);
374 if (!dep->trb_pool) {
375 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
376 dep->name);
377 return -ENOMEM;
378 }
379
380 return 0;
381 }
382
383 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
384 {
385 struct dwc3 *dwc = dep->dwc;
386
387 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
388 dep->trb_pool, dep->trb_pool_dma);
389
390 dep->trb_pool = NULL;
391 dep->trb_pool_dma = 0;
392 }
393
394 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
395 {
396 struct dwc3_gadget_ep_cmd_params params;
397 u32 cmd;
398
399 memset(&params, 0x00, sizeof(params));
400
401 if (dep->number != 1) {
402 cmd = DWC3_DEPCMD_DEPSTARTCFG;
403 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
404 if (dep->number > 1) {
405 if (dwc->start_config_issued)
406 return 0;
407 dwc->start_config_issued = true;
408 cmd |= DWC3_DEPCMD_PARAM(2);
409 }
410
411 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
412 }
413
414 return 0;
415 }
416
417 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
418 const struct usb_endpoint_descriptor *desc,
419 const struct usb_ss_ep_comp_descriptor *comp_desc,
420 bool ignore)
421 {
422 struct dwc3_gadget_ep_cmd_params params;
423
424 memset(&params, 0x00, sizeof(params));
425
426 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
427 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
428
429 /* Burst size is only needed in SuperSpeed mode */
430 if (dwc->gadget.speed == USB_SPEED_SUPER) {
431 u32 burst = dep->endpoint.maxburst - 1;
432
433 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
434 }
435
436 if (ignore)
437 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
438
439 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
440 | DWC3_DEPCFG_XFER_NOT_READY_EN;
441
442 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
443 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
444 | DWC3_DEPCFG_STREAM_EVENT_EN;
445 dep->stream_capable = true;
446 }
447
448 if (usb_endpoint_xfer_isoc(desc))
449 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
450
451 /*
452 * We are doing 1:1 mapping for endpoints, meaning
453 * Physical Endpoints 2 maps to Logical Endpoint 2 and
454 * so on. We consider the direction bit as part of the physical
455 * endpoint number. So USB endpoint 0x81 is 0x03.
456 */
457 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
458
459 /*
460 * We must use the lower 16 TX FIFOs even though
461 * HW might have more
462 */
463 if (dep->direction)
464 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
465
466 if (desc->bInterval) {
467 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
468 dep->interval = 1 << (desc->bInterval - 1);
469 }
470
471 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
472 DWC3_DEPCMD_SETEPCONFIG, &params);
473 }
474
475 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
476 {
477 struct dwc3_gadget_ep_cmd_params params;
478
479 memset(&params, 0x00, sizeof(params));
480
481 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
482
483 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
484 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
485 }
486
487 /**
488 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
489 * @dep: endpoint to be initialized
490 * @desc: USB Endpoint Descriptor
491 *
492 * Caller should take care of locking
493 */
494 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
495 const struct usb_endpoint_descriptor *desc,
496 const struct usb_ss_ep_comp_descriptor *comp_desc,
497 bool ignore)
498 {
499 struct dwc3 *dwc = dep->dwc;
500 u32 reg;
501 int ret = -ENOMEM;
502
503 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
504
505 if (!(dep->flags & DWC3_EP_ENABLED)) {
506 ret = dwc3_gadget_start_config(dwc, dep);
507 if (ret)
508 return ret;
509 }
510
511 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
512 if (ret)
513 return ret;
514
515 if (!(dep->flags & DWC3_EP_ENABLED)) {
516 struct dwc3_trb *trb_st_hw;
517 struct dwc3_trb *trb_link;
518
519 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
520 if (ret)
521 return ret;
522
523 dep->endpoint.desc = desc;
524 dep->comp_desc = comp_desc;
525 dep->type = usb_endpoint_type(desc);
526 dep->flags |= DWC3_EP_ENABLED;
527
528 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
529 reg |= DWC3_DALEPENA_EP(dep->number);
530 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
531
532 if (!usb_endpoint_xfer_isoc(desc))
533 return 0;
534
535 memset(&trb_link, 0, sizeof(trb_link));
536
537 /* Link TRB for ISOC. The HWO bit is never reset */
538 trb_st_hw = &dep->trb_pool[0];
539
540 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
541
542 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
543 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
544 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
545 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
546 }
547
548 return 0;
549 }
550
551 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
552 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
553 {
554 struct dwc3_request *req;
555
556 if (!list_empty(&dep->req_queued)) {
557 dwc3_stop_active_transfer(dwc, dep->number);
558
559 /* - giveback all requests to gadget driver */
560 while (!list_empty(&dep->req_queued)) {
561 req = next_request(&dep->req_queued);
562
563 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
564 }
565 }
566
567 while (!list_empty(&dep->request_list)) {
568 req = next_request(&dep->request_list);
569
570 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
571 }
572 }
573
574 /**
575 * __dwc3_gadget_ep_disable - Disables a HW endpoint
576 * @dep: the endpoint to disable
577 *
578 * This function also removes requests which are currently processed ny the
579 * hardware and those which are not yet scheduled.
580 * Caller should take care of locking.
581 */
582 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
583 {
584 struct dwc3 *dwc = dep->dwc;
585 u32 reg;
586
587 dwc3_remove_requests(dwc, dep);
588
589 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
590 reg &= ~DWC3_DALEPENA_EP(dep->number);
591 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
592
593 dep->stream_capable = false;
594 dep->endpoint.desc = NULL;
595 dep->comp_desc = NULL;
596 dep->type = 0;
597 dep->flags = 0;
598
599 return 0;
600 }
601
602 /* -------------------------------------------------------------------------- */
603
604 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
605 const struct usb_endpoint_descriptor *desc)
606 {
607 return -EINVAL;
608 }
609
610 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
611 {
612 return -EINVAL;
613 }
614
615 /* -------------------------------------------------------------------------- */
616
617 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
618 const struct usb_endpoint_descriptor *desc)
619 {
620 struct dwc3_ep *dep;
621 struct dwc3 *dwc;
622 unsigned long flags;
623 int ret;
624
625 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
626 pr_debug("dwc3: invalid parameters\n");
627 return -EINVAL;
628 }
629
630 if (!desc->wMaxPacketSize) {
631 pr_debug("dwc3: missing wMaxPacketSize\n");
632 return -EINVAL;
633 }
634
635 dep = to_dwc3_ep(ep);
636 dwc = dep->dwc;
637
638 if (dep->flags & DWC3_EP_ENABLED) {
639 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
640 dep->name);
641 return 0;
642 }
643
644 switch (usb_endpoint_type(desc)) {
645 case USB_ENDPOINT_XFER_CONTROL:
646 strlcat(dep->name, "-control", sizeof(dep->name));
647 break;
648 case USB_ENDPOINT_XFER_ISOC:
649 strlcat(dep->name, "-isoc", sizeof(dep->name));
650 break;
651 case USB_ENDPOINT_XFER_BULK:
652 strlcat(dep->name, "-bulk", sizeof(dep->name));
653 break;
654 case USB_ENDPOINT_XFER_INT:
655 strlcat(dep->name, "-int", sizeof(dep->name));
656 break;
657 default:
658 dev_err(dwc->dev, "invalid endpoint transfer type\n");
659 }
660
661 spin_lock_irqsave(&dwc->lock, flags);
662 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
663 spin_unlock_irqrestore(&dwc->lock, flags);
664
665 return ret;
666 }
667
668 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
669 {
670 struct dwc3_ep *dep;
671 struct dwc3 *dwc;
672 unsigned long flags;
673 int ret;
674
675 if (!ep) {
676 pr_debug("dwc3: invalid parameters\n");
677 return -EINVAL;
678 }
679
680 dep = to_dwc3_ep(ep);
681 dwc = dep->dwc;
682
683 if (!(dep->flags & DWC3_EP_ENABLED)) {
684 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
685 dep->name);
686 return 0;
687 }
688
689 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
690 dep->number >> 1,
691 (dep->number & 1) ? "in" : "out");
692
693 spin_lock_irqsave(&dwc->lock, flags);
694 ret = __dwc3_gadget_ep_disable(dep);
695 spin_unlock_irqrestore(&dwc->lock, flags);
696
697 return ret;
698 }
699
700 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
701 gfp_t gfp_flags)
702 {
703 struct dwc3_request *req;
704 struct dwc3_ep *dep = to_dwc3_ep(ep);
705 struct dwc3 *dwc = dep->dwc;
706
707 req = kzalloc(sizeof(*req), gfp_flags);
708 if (!req) {
709 dev_err(dwc->dev, "not enough memory\n");
710 return NULL;
711 }
712
713 req->epnum = dep->number;
714 req->dep = dep;
715
716 return &req->request;
717 }
718
719 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
720 struct usb_request *request)
721 {
722 struct dwc3_request *req = to_dwc3_request(request);
723
724 kfree(req);
725 }
726
727 /**
728 * dwc3_prepare_one_trb - setup one TRB from one request
729 * @dep: endpoint for which this request is prepared
730 * @req: dwc3_request pointer
731 */
732 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
733 struct dwc3_request *req, dma_addr_t dma,
734 unsigned length, unsigned last, unsigned chain, unsigned node)
735 {
736 struct dwc3 *dwc = dep->dwc;
737 struct dwc3_trb *trb;
738
739 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
740 dep->name, req, (unsigned long long) dma,
741 length, last ? " last" : "",
742 chain ? " chain" : "");
743
744 /* Skip the LINK-TRB on ISOC */
745 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
746 usb_endpoint_xfer_isoc(dep->endpoint.desc))
747 dep->free_slot++;
748
749 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
750
751 if (!req->trb) {
752 dwc3_gadget_move_request_queued(req);
753 req->trb = trb;
754 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
755 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
756 }
757
758 dep->free_slot++;
759
760 trb->size = DWC3_TRB_SIZE_LENGTH(length);
761 trb->bpl = lower_32_bits(dma);
762 trb->bph = upper_32_bits(dma);
763
764 switch (usb_endpoint_type(dep->endpoint.desc)) {
765 case USB_ENDPOINT_XFER_CONTROL:
766 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
767 break;
768
769 case USB_ENDPOINT_XFER_ISOC:
770 if (!node)
771 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
772 else
773 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
774
775 if (!req->request.no_interrupt && !chain)
776 trb->ctrl |= DWC3_TRB_CTRL_IOC;
777 break;
778
779 case USB_ENDPOINT_XFER_BULK:
780 case USB_ENDPOINT_XFER_INT:
781 trb->ctrl = DWC3_TRBCTL_NORMAL;
782 break;
783 default:
784 /*
785 * This is only possible with faulty memory because we
786 * checked it already :)
787 */
788 BUG();
789 }
790
791 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
792 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
793 trb->ctrl |= DWC3_TRB_CTRL_CSP;
794 } else if (last) {
795 trb->ctrl |= DWC3_TRB_CTRL_LST;
796 }
797
798 if (chain)
799 trb->ctrl |= DWC3_TRB_CTRL_CHN;
800
801 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
802 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
803
804 trb->ctrl |= DWC3_TRB_CTRL_HWO;
805 }
806
807 /*
808 * dwc3_prepare_trbs - setup TRBs from requests
809 * @dep: endpoint for which requests are being prepared
810 * @starting: true if the endpoint is idle and no requests are queued.
811 *
812 * The function goes through the requests list and sets up TRBs for the
813 * transfers. The function returns once there are no more TRBs available or
814 * it runs out of requests.
815 */
816 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
817 {
818 struct dwc3_request *req, *n;
819 u32 trbs_left;
820 u32 max;
821 unsigned int last_one = 0;
822
823 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
824
825 /* the first request must not be queued */
826 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
827
828 /* Can't wrap around on a non-isoc EP since there's no link TRB */
829 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
830 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
831 if (trbs_left > max)
832 trbs_left = max;
833 }
834
835 /*
836 * If busy & slot are equal than it is either full or empty. If we are
837 * starting to process requests then we are empty. Otherwise we are
838 * full and don't do anything
839 */
840 if (!trbs_left) {
841 if (!starting)
842 return;
843 trbs_left = DWC3_TRB_NUM;
844 /*
845 * In case we start from scratch, we queue the ISOC requests
846 * starting from slot 1. This is done because we use ring
847 * buffer and have no LST bit to stop us. Instead, we place
848 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
849 * after the first request so we start at slot 1 and have
850 * 7 requests proceed before we hit the first IOC.
851 * Other transfer types don't use the ring buffer and are
852 * processed from the first TRB until the last one. Since we
853 * don't wrap around we have to start at the beginning.
854 */
855 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
856 dep->busy_slot = 1;
857 dep->free_slot = 1;
858 } else {
859 dep->busy_slot = 0;
860 dep->free_slot = 0;
861 }
862 }
863
864 /* The last TRB is a link TRB, not used for xfer */
865 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
866 return;
867
868 list_for_each_entry_safe(req, n, &dep->request_list, list) {
869 unsigned length;
870 dma_addr_t dma;
871 last_one = false;
872
873 if (req->request.num_mapped_sgs > 0) {
874 struct usb_request *request = &req->request;
875 struct scatterlist *sg = request->sg;
876 struct scatterlist *s;
877 int i;
878
879 for_each_sg(sg, s, request->num_mapped_sgs, i) {
880 unsigned chain = true;
881
882 length = sg_dma_len(s);
883 dma = sg_dma_address(s);
884
885 if (i == (request->num_mapped_sgs - 1) ||
886 sg_is_last(s)) {
887 if (list_is_last(&req->list,
888 &dep->request_list))
889 last_one = true;
890 chain = false;
891 }
892
893 trbs_left--;
894 if (!trbs_left)
895 last_one = true;
896
897 if (last_one)
898 chain = false;
899
900 dwc3_prepare_one_trb(dep, req, dma, length,
901 last_one, chain, i);
902
903 if (last_one)
904 break;
905 }
906 } else {
907 dma = req->request.dma;
908 length = req->request.length;
909 trbs_left--;
910
911 if (!trbs_left)
912 last_one = 1;
913
914 /* Is this the last request? */
915 if (list_is_last(&req->list, &dep->request_list))
916 last_one = 1;
917
918 dwc3_prepare_one_trb(dep, req, dma, length,
919 last_one, false, 0);
920
921 if (last_one)
922 break;
923 }
924 }
925 }
926
927 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
928 int start_new)
929 {
930 struct dwc3_gadget_ep_cmd_params params;
931 struct dwc3_request *req;
932 struct dwc3 *dwc = dep->dwc;
933 int ret;
934 u32 cmd;
935
936 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
937 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
938 return -EBUSY;
939 }
940 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
941
942 /*
943 * If we are getting here after a short-out-packet we don't enqueue any
944 * new requests as we try to set the IOC bit only on the last request.
945 */
946 if (start_new) {
947 if (list_empty(&dep->req_queued))
948 dwc3_prepare_trbs(dep, start_new);
949
950 /* req points to the first request which will be sent */
951 req = next_request(&dep->req_queued);
952 } else {
953 dwc3_prepare_trbs(dep, start_new);
954
955 /*
956 * req points to the first request where HWO changed from 0 to 1
957 */
958 req = next_request(&dep->req_queued);
959 }
960 if (!req) {
961 dep->flags |= DWC3_EP_PENDING_REQUEST;
962 return 0;
963 }
964
965 memset(&params, 0, sizeof(params));
966
967 if (start_new) {
968 params.param0 = upper_32_bits(req->trb_dma);
969 params.param1 = lower_32_bits(req->trb_dma);
970 cmd = DWC3_DEPCMD_STARTTRANSFER;
971 } else {
972 cmd = DWC3_DEPCMD_UPDATETRANSFER;
973 }
974
975 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
976 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
977 if (ret < 0) {
978 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
979
980 /*
981 * FIXME we need to iterate over the list of requests
982 * here and stop, unmap, free and del each of the linked
983 * requests instead of what we do now.
984 */
985 usb_gadget_unmap_request(&dwc->gadget, &req->request,
986 req->direction);
987 list_del(&req->list);
988 return ret;
989 }
990
991 dep->flags |= DWC3_EP_BUSY;
992
993 if (start_new) {
994 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
995 dep->number);
996 WARN_ON_ONCE(!dep->resource_index);
997 }
998
999 return 0;
1000 }
1001
1002 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1003 struct dwc3_ep *dep, u32 cur_uf)
1004 {
1005 u32 uf;
1006
1007 if (list_empty(&dep->request_list)) {
1008 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1009 dep->name);
1010 dep->flags |= DWC3_EP_PENDING_REQUEST;
1011 return;
1012 }
1013
1014 /* 4 micro frames in the future */
1015 uf = cur_uf + dep->interval * 4;
1016
1017 __dwc3_gadget_kick_transfer(dep, uf, 1);
1018 }
1019
1020 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1021 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1022 {
1023 u32 cur_uf, mask;
1024
1025 mask = ~(dep->interval - 1);
1026 cur_uf = event->parameters & mask;
1027
1028 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1029 }
1030
1031 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1032 {
1033 struct dwc3 *dwc = dep->dwc;
1034 int ret;
1035
1036 req->request.actual = 0;
1037 req->request.status = -EINPROGRESS;
1038 req->direction = dep->direction;
1039 req->epnum = dep->number;
1040
1041 /*
1042 * We only add to our list of requests now and
1043 * start consuming the list once we get XferNotReady
1044 * IRQ.
1045 *
1046 * That way, we avoid doing anything that we don't need
1047 * to do now and defer it until the point we receive a
1048 * particular token from the Host side.
1049 *
1050 * This will also avoid Host cancelling URBs due to too
1051 * many NAKs.
1052 */
1053 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1054 dep->direction);
1055 if (ret)
1056 return ret;
1057
1058 list_add_tail(&req->list, &dep->request_list);
1059
1060 /*
1061 * There are a few special cases:
1062 *
1063 * 1. XferNotReady with empty list of requests. We need to kick the
1064 * transfer here in that situation, otherwise we will be NAKing
1065 * forever. If we get XferNotReady before gadget driver has a
1066 * chance to queue a request, we will ACK the IRQ but won't be
1067 * able to receive the data until the next request is queued.
1068 * The following code is handling exactly that.
1069 *
1070 */
1071 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1072 /*
1073 * If xfernotready is already elapsed and it is a case
1074 * of isoc transfer, then issue END TRANSFER, so that
1075 * you can receive xfernotready again and can have
1076 * notion of current microframe.
1077 */
1078 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1079 if (list_empty(&dep->req_queued)) {
1080 dwc3_stop_active_transfer(dwc, dep->number);
1081 dep->flags = DWC3_EP_ENABLED;
1082 }
1083 return 0;
1084 }
1085
1086 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1087 if (ret && ret != -EBUSY)
1088 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1089 dep->name);
1090 return ret;
1091 }
1092
1093 /*
1094 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1095 * kick the transfer here after queuing a request, otherwise the
1096 * core may not see the modified TRB(s).
1097 */
1098 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1099 (dep->flags & DWC3_EP_BUSY) &&
1100 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1101 WARN_ON_ONCE(!dep->resource_index);
1102 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1103 false);
1104 if (ret && ret != -EBUSY)
1105 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1106 dep->name);
1107 return ret;
1108 }
1109
1110 return 0;
1111 }
1112
1113 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1114 gfp_t gfp_flags)
1115 {
1116 struct dwc3_request *req = to_dwc3_request(request);
1117 struct dwc3_ep *dep = to_dwc3_ep(ep);
1118 struct dwc3 *dwc = dep->dwc;
1119
1120 unsigned long flags;
1121
1122 int ret;
1123
1124 if (!dep->endpoint.desc) {
1125 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1126 request, ep->name);
1127 return -ESHUTDOWN;
1128 }
1129
1130 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1131 request, ep->name, request->length);
1132
1133 spin_lock_irqsave(&dwc->lock, flags);
1134 ret = __dwc3_gadget_ep_queue(dep, req);
1135 spin_unlock_irqrestore(&dwc->lock, flags);
1136
1137 return ret;
1138 }
1139
1140 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1141 struct usb_request *request)
1142 {
1143 struct dwc3_request *req = to_dwc3_request(request);
1144 struct dwc3_request *r = NULL;
1145
1146 struct dwc3_ep *dep = to_dwc3_ep(ep);
1147 struct dwc3 *dwc = dep->dwc;
1148
1149 unsigned long flags;
1150 int ret = 0;
1151
1152 spin_lock_irqsave(&dwc->lock, flags);
1153
1154 list_for_each_entry(r, &dep->request_list, list) {
1155 if (r == req)
1156 break;
1157 }
1158
1159 if (r != req) {
1160 list_for_each_entry(r, &dep->req_queued, list) {
1161 if (r == req)
1162 break;
1163 }
1164 if (r == req) {
1165 /* wait until it is processed */
1166 dwc3_stop_active_transfer(dwc, dep->number);
1167 goto out1;
1168 }
1169 dev_err(dwc->dev, "request %p was not queued to %s\n",
1170 request, ep->name);
1171 ret = -EINVAL;
1172 goto out0;
1173 }
1174
1175 out1:
1176 /* giveback the request */
1177 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1178
1179 out0:
1180 spin_unlock_irqrestore(&dwc->lock, flags);
1181
1182 return ret;
1183 }
1184
1185 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1186 {
1187 struct dwc3_gadget_ep_cmd_params params;
1188 struct dwc3 *dwc = dep->dwc;
1189 int ret;
1190
1191 memset(&params, 0x00, sizeof(params));
1192
1193 if (value) {
1194 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1195 DWC3_DEPCMD_SETSTALL, &params);
1196 if (ret)
1197 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1198 value ? "set" : "clear",
1199 dep->name);
1200 else
1201 dep->flags |= DWC3_EP_STALL;
1202 } else {
1203 if (dep->flags & DWC3_EP_WEDGE)
1204 return 0;
1205
1206 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1207 DWC3_DEPCMD_CLEARSTALL, &params);
1208 if (ret)
1209 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1210 value ? "set" : "clear",
1211 dep->name);
1212 else
1213 dep->flags &= ~DWC3_EP_STALL;
1214 }
1215
1216 return ret;
1217 }
1218
1219 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1220 {
1221 struct dwc3_ep *dep = to_dwc3_ep(ep);
1222 struct dwc3 *dwc = dep->dwc;
1223
1224 unsigned long flags;
1225
1226 int ret;
1227
1228 spin_lock_irqsave(&dwc->lock, flags);
1229
1230 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1231 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1232 ret = -EINVAL;
1233 goto out;
1234 }
1235
1236 ret = __dwc3_gadget_ep_set_halt(dep, value);
1237 out:
1238 spin_unlock_irqrestore(&dwc->lock, flags);
1239
1240 return ret;
1241 }
1242
1243 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1244 {
1245 struct dwc3_ep *dep = to_dwc3_ep(ep);
1246 struct dwc3 *dwc = dep->dwc;
1247 unsigned long flags;
1248
1249 spin_lock_irqsave(&dwc->lock, flags);
1250 dep->flags |= DWC3_EP_WEDGE;
1251 spin_unlock_irqrestore(&dwc->lock, flags);
1252
1253 if (dep->number == 0 || dep->number == 1)
1254 return dwc3_gadget_ep0_set_halt(ep, 1);
1255 else
1256 return dwc3_gadget_ep_set_halt(ep, 1);
1257 }
1258
1259 /* -------------------------------------------------------------------------- */
1260
1261 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1262 .bLength = USB_DT_ENDPOINT_SIZE,
1263 .bDescriptorType = USB_DT_ENDPOINT,
1264 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1265 };
1266
1267 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1268 .enable = dwc3_gadget_ep0_enable,
1269 .disable = dwc3_gadget_ep0_disable,
1270 .alloc_request = dwc3_gadget_ep_alloc_request,
1271 .free_request = dwc3_gadget_ep_free_request,
1272 .queue = dwc3_gadget_ep0_queue,
1273 .dequeue = dwc3_gadget_ep_dequeue,
1274 .set_halt = dwc3_gadget_ep0_set_halt,
1275 .set_wedge = dwc3_gadget_ep_set_wedge,
1276 };
1277
1278 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1279 .enable = dwc3_gadget_ep_enable,
1280 .disable = dwc3_gadget_ep_disable,
1281 .alloc_request = dwc3_gadget_ep_alloc_request,
1282 .free_request = dwc3_gadget_ep_free_request,
1283 .queue = dwc3_gadget_ep_queue,
1284 .dequeue = dwc3_gadget_ep_dequeue,
1285 .set_halt = dwc3_gadget_ep_set_halt,
1286 .set_wedge = dwc3_gadget_ep_set_wedge,
1287 };
1288
1289 /* -------------------------------------------------------------------------- */
1290
1291 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1292 {
1293 struct dwc3 *dwc = gadget_to_dwc(g);
1294 u32 reg;
1295
1296 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1297 return DWC3_DSTS_SOFFN(reg);
1298 }
1299
1300 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1301 {
1302 struct dwc3 *dwc = gadget_to_dwc(g);
1303
1304 unsigned long timeout;
1305 unsigned long flags;
1306
1307 u32 reg;
1308
1309 int ret = 0;
1310
1311 u8 link_state;
1312 u8 speed;
1313
1314 spin_lock_irqsave(&dwc->lock, flags);
1315
1316 /*
1317 * According to the Databook Remote wakeup request should
1318 * be issued only when the device is in early suspend state.
1319 *
1320 * We can check that via USB Link State bits in DSTS register.
1321 */
1322 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1323
1324 speed = reg & DWC3_DSTS_CONNECTSPD;
1325 if (speed == DWC3_DSTS_SUPERSPEED) {
1326 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1327 ret = -EINVAL;
1328 goto out;
1329 }
1330
1331 link_state = DWC3_DSTS_USBLNKST(reg);
1332
1333 switch (link_state) {
1334 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1335 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1336 break;
1337 default:
1338 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1339 link_state);
1340 ret = -EINVAL;
1341 goto out;
1342 }
1343
1344 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1345 if (ret < 0) {
1346 dev_err(dwc->dev, "failed to put link in Recovery\n");
1347 goto out;
1348 }
1349
1350 /* Recent versions do this automatically */
1351 if (dwc->revision < DWC3_REVISION_194A) {
1352 /* write zeroes to Link Change Request */
1353 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1354 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1355 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1356 }
1357
1358 /* poll until Link State changes to ON */
1359 timeout = jiffies + msecs_to_jiffies(100);
1360
1361 while (!time_after(jiffies, timeout)) {
1362 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1363
1364 /* in HS, means ON */
1365 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1366 break;
1367 }
1368
1369 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1370 dev_err(dwc->dev, "failed to send remote wakeup\n");
1371 ret = -EINVAL;
1372 }
1373
1374 out:
1375 spin_unlock_irqrestore(&dwc->lock, flags);
1376
1377 return ret;
1378 }
1379
1380 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1381 int is_selfpowered)
1382 {
1383 struct dwc3 *dwc = gadget_to_dwc(g);
1384 unsigned long flags;
1385
1386 spin_lock_irqsave(&dwc->lock, flags);
1387 dwc->is_selfpowered = !!is_selfpowered;
1388 spin_unlock_irqrestore(&dwc->lock, flags);
1389
1390 return 0;
1391 }
1392
1393 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1394 {
1395 u32 reg;
1396 u32 timeout = 500;
1397
1398 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1399 if (is_on) {
1400 if (dwc->revision <= DWC3_REVISION_187A) {
1401 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1402 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1403 }
1404
1405 if (dwc->revision >= DWC3_REVISION_194A)
1406 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1407 reg |= DWC3_DCTL_RUN_STOP;
1408 dwc->pullups_connected = true;
1409 } else {
1410 reg &= ~DWC3_DCTL_RUN_STOP;
1411 dwc->pullups_connected = false;
1412 }
1413
1414 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1415
1416 do {
1417 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1418 if (is_on) {
1419 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1420 break;
1421 } else {
1422 if (reg & DWC3_DSTS_DEVCTRLHLT)
1423 break;
1424 }
1425 timeout--;
1426 if (!timeout)
1427 return -ETIMEDOUT;
1428 udelay(1);
1429 } while (1);
1430
1431 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1432 dwc->gadget_driver
1433 ? dwc->gadget_driver->function : "no-function",
1434 is_on ? "connect" : "disconnect");
1435
1436 return 0;
1437 }
1438
1439 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1440 {
1441 struct dwc3 *dwc = gadget_to_dwc(g);
1442 unsigned long flags;
1443 int ret;
1444
1445 is_on = !!is_on;
1446
1447 spin_lock_irqsave(&dwc->lock, flags);
1448 ret = dwc3_gadget_run_stop(dwc, is_on);
1449 spin_unlock_irqrestore(&dwc->lock, flags);
1450
1451 return ret;
1452 }
1453
1454 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1455 {
1456 u32 reg;
1457
1458 /* Enable all but Start and End of Frame IRQs */
1459 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1460 DWC3_DEVTEN_EVNTOVERFLOWEN |
1461 DWC3_DEVTEN_CMDCMPLTEN |
1462 DWC3_DEVTEN_ERRTICERREN |
1463 DWC3_DEVTEN_WKUPEVTEN |
1464 DWC3_DEVTEN_ULSTCNGEN |
1465 DWC3_DEVTEN_CONNECTDONEEN |
1466 DWC3_DEVTEN_USBRSTEN |
1467 DWC3_DEVTEN_DISCONNEVTEN);
1468
1469 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1470 }
1471
1472 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1473 {
1474 /* mask all interrupts */
1475 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1476 }
1477
1478 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1479 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1480
1481 static int dwc3_gadget_start(struct usb_gadget *g,
1482 struct usb_gadget_driver *driver)
1483 {
1484 struct dwc3 *dwc = gadget_to_dwc(g);
1485 struct dwc3_ep *dep;
1486 unsigned long flags;
1487 int ret = 0;
1488 int irq;
1489 u32 reg;
1490
1491 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1492 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1493 IRQF_SHARED, "dwc3", dwc);
1494 if (ret) {
1495 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1496 irq, ret);
1497 goto err0;
1498 }
1499
1500 spin_lock_irqsave(&dwc->lock, flags);
1501
1502 if (dwc->gadget_driver) {
1503 dev_err(dwc->dev, "%s is already bound to %s\n",
1504 dwc->gadget.name,
1505 dwc->gadget_driver->driver.name);
1506 ret = -EBUSY;
1507 goto err1;
1508 }
1509
1510 dwc->gadget_driver = driver;
1511
1512 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1513 reg &= ~(DWC3_DCFG_SPEED_MASK);
1514
1515 /**
1516 * WORKAROUND: DWC3 revision < 2.20a have an issue
1517 * which would cause metastability state on Run/Stop
1518 * bit if we try to force the IP to USB2-only mode.
1519 *
1520 * Because of that, we cannot configure the IP to any
1521 * speed other than the SuperSpeed
1522 *
1523 * Refers to:
1524 *
1525 * STAR#9000525659: Clock Domain Crossing on DCTL in
1526 * USB 2.0 Mode
1527 */
1528 if (dwc->revision < DWC3_REVISION_220A) {
1529 reg |= DWC3_DCFG_SUPERSPEED;
1530 } else {
1531 switch (dwc->maximum_speed) {
1532 case USB_SPEED_LOW:
1533 reg |= DWC3_DSTS_LOWSPEED;
1534 break;
1535 case USB_SPEED_FULL:
1536 reg |= DWC3_DSTS_FULLSPEED1;
1537 break;
1538 case USB_SPEED_HIGH:
1539 reg |= DWC3_DSTS_HIGHSPEED;
1540 break;
1541 case USB_SPEED_SUPER: /* FALLTHROUGH */
1542 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1543 default:
1544 reg |= DWC3_DSTS_SUPERSPEED;
1545 }
1546 }
1547 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1548
1549 dwc->start_config_issued = false;
1550
1551 /* Start with SuperSpeed Default */
1552 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1553
1554 dep = dwc->eps[0];
1555 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
1556 if (ret) {
1557 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1558 goto err2;
1559 }
1560
1561 dep = dwc->eps[1];
1562 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
1563 if (ret) {
1564 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1565 goto err3;
1566 }
1567
1568 /* begin to receive SETUP packets */
1569 dwc->ep0state = EP0_SETUP_PHASE;
1570 dwc3_ep0_out_start(dwc);
1571
1572 dwc3_gadget_enable_irq(dwc);
1573
1574 spin_unlock_irqrestore(&dwc->lock, flags);
1575
1576 return 0;
1577
1578 err3:
1579 __dwc3_gadget_ep_disable(dwc->eps[0]);
1580
1581 err2:
1582 dwc->gadget_driver = NULL;
1583
1584 err1:
1585 spin_unlock_irqrestore(&dwc->lock, flags);
1586
1587 free_irq(irq, dwc);
1588
1589 err0:
1590 return ret;
1591 }
1592
1593 static int dwc3_gadget_stop(struct usb_gadget *g,
1594 struct usb_gadget_driver *driver)
1595 {
1596 struct dwc3 *dwc = gadget_to_dwc(g);
1597 unsigned long flags;
1598 int irq;
1599
1600 spin_lock_irqsave(&dwc->lock, flags);
1601
1602 dwc3_gadget_disable_irq(dwc);
1603 __dwc3_gadget_ep_disable(dwc->eps[0]);
1604 __dwc3_gadget_ep_disable(dwc->eps[1]);
1605
1606 dwc->gadget_driver = NULL;
1607
1608 spin_unlock_irqrestore(&dwc->lock, flags);
1609
1610 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1611 free_irq(irq, dwc);
1612
1613 return 0;
1614 }
1615
1616 static const struct usb_gadget_ops dwc3_gadget_ops = {
1617 .get_frame = dwc3_gadget_get_frame,
1618 .wakeup = dwc3_gadget_wakeup,
1619 .set_selfpowered = dwc3_gadget_set_selfpowered,
1620 .pullup = dwc3_gadget_pullup,
1621 .udc_start = dwc3_gadget_start,
1622 .udc_stop = dwc3_gadget_stop,
1623 };
1624
1625 /* -------------------------------------------------------------------------- */
1626
1627 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1628 u8 num, u32 direction)
1629 {
1630 struct dwc3_ep *dep;
1631 u8 i;
1632
1633 for (i = 0; i < num; i++) {
1634 u8 epnum = (i << 1) | (!!direction);
1635
1636 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1637 if (!dep) {
1638 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1639 epnum);
1640 return -ENOMEM;
1641 }
1642
1643 dep->dwc = dwc;
1644 dep->number = epnum;
1645 dep->direction = !!direction;
1646 dwc->eps[epnum] = dep;
1647
1648 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1649 (epnum & 1) ? "in" : "out");
1650
1651 dep->endpoint.name = dep->name;
1652
1653 dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1654
1655 if (epnum == 0 || epnum == 1) {
1656 dep->endpoint.maxpacket = 512;
1657 dep->endpoint.maxburst = 1;
1658 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1659 if (!epnum)
1660 dwc->gadget.ep0 = &dep->endpoint;
1661 } else {
1662 int ret;
1663
1664 dep->endpoint.maxpacket = 1024;
1665 dep->endpoint.max_streams = 15;
1666 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1667 list_add_tail(&dep->endpoint.ep_list,
1668 &dwc->gadget.ep_list);
1669
1670 ret = dwc3_alloc_trb_pool(dep);
1671 if (ret)
1672 return ret;
1673 }
1674
1675 INIT_LIST_HEAD(&dep->request_list);
1676 INIT_LIST_HEAD(&dep->req_queued);
1677 }
1678
1679 return 0;
1680 }
1681
1682 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1683 {
1684 int ret;
1685
1686 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1687
1688 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1689 if (ret < 0) {
1690 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1691 return ret;
1692 }
1693
1694 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1695 if (ret < 0) {
1696 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1697 return ret;
1698 }
1699
1700 return 0;
1701 }
1702
1703 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1704 {
1705 struct dwc3_ep *dep;
1706 u8 epnum;
1707
1708 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1709 dep = dwc->eps[epnum];
1710 if (!dep)
1711 continue;
1712 /*
1713 * Physical endpoints 0 and 1 are special; they form the
1714 * bi-directional USB endpoint 0.
1715 *
1716 * For those two physical endpoints, we don't allocate a TRB
1717 * pool nor do we add them the endpoints list. Due to that, we
1718 * shouldn't do these two operations otherwise we would end up
1719 * with all sorts of bugs when removing dwc3.ko.
1720 */
1721 if (epnum != 0 && epnum != 1) {
1722 dwc3_free_trb_pool(dep);
1723 list_del(&dep->endpoint.ep_list);
1724 }
1725
1726 kfree(dep);
1727 }
1728 }
1729
1730 /* -------------------------------------------------------------------------- */
1731
1732 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1733 struct dwc3_request *req, struct dwc3_trb *trb,
1734 const struct dwc3_event_depevt *event, int status)
1735 {
1736 unsigned int count;
1737 unsigned int s_pkt = 0;
1738 unsigned int trb_status;
1739
1740 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1741 /*
1742 * We continue despite the error. There is not much we
1743 * can do. If we don't clean it up we loop forever. If
1744 * we skip the TRB then it gets overwritten after a
1745 * while since we use them in a ring buffer. A BUG()
1746 * would help. Lets hope that if this occurs, someone
1747 * fixes the root cause instead of looking away :)
1748 */
1749 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1750 dep->name, trb);
1751 count = trb->size & DWC3_TRB_SIZE_MASK;
1752
1753 if (dep->direction) {
1754 if (count) {
1755 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1756 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1757 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1758 dep->name);
1759 /*
1760 * If missed isoc occurred and there is
1761 * no request queued then issue END
1762 * TRANSFER, so that core generates
1763 * next xfernotready and we will issue
1764 * a fresh START TRANSFER.
1765 * If there are still queued request
1766 * then wait, do not issue either END
1767 * or UPDATE TRANSFER, just attach next
1768 * request in request_list during
1769 * giveback.If any future queued request
1770 * is successfully transferred then we
1771 * will issue UPDATE TRANSFER for all
1772 * request in the request_list.
1773 */
1774 dep->flags |= DWC3_EP_MISSED_ISOC;
1775 } else {
1776 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1777 dep->name);
1778 status = -ECONNRESET;
1779 }
1780 } else {
1781 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1782 }
1783 } else {
1784 if (count && (event->status & DEPEVT_STATUS_SHORT))
1785 s_pkt = 1;
1786 }
1787
1788 /*
1789 * We assume here we will always receive the entire data block
1790 * which we should receive. Meaning, if we program RX to
1791 * receive 4K but we receive only 2K, we assume that's all we
1792 * should receive and we simply bounce the request back to the
1793 * gadget driver for further processing.
1794 */
1795 req->request.actual += req->request.length - count;
1796 if (s_pkt)
1797 return 1;
1798 if ((event->status & DEPEVT_STATUS_LST) &&
1799 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1800 DWC3_TRB_CTRL_HWO)))
1801 return 1;
1802 if ((event->status & DEPEVT_STATUS_IOC) &&
1803 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1804 return 1;
1805 return 0;
1806 }
1807
1808 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1809 const struct dwc3_event_depevt *event, int status)
1810 {
1811 struct dwc3_request *req;
1812 struct dwc3_trb *trb;
1813 unsigned int slot;
1814 unsigned int i;
1815 int ret;
1816
1817 do {
1818 req = next_request(&dep->req_queued);
1819 if (!req) {
1820 WARN_ON_ONCE(1);
1821 return 1;
1822 }
1823 i = 0;
1824 do {
1825 slot = req->start_slot + i;
1826 if ((slot == DWC3_TRB_NUM - 1) &&
1827 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1828 slot++;
1829 slot %= DWC3_TRB_NUM;
1830 trb = &dep->trb_pool[slot];
1831
1832 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1833 event, status);
1834 if (ret)
1835 break;
1836 }while (++i < req->request.num_mapped_sgs);
1837
1838 dwc3_gadget_giveback(dep, req, status);
1839
1840 if (ret)
1841 break;
1842 } while (1);
1843
1844 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1845 list_empty(&dep->req_queued)) {
1846 if (list_empty(&dep->request_list)) {
1847 /*
1848 * If there is no entry in request list then do
1849 * not issue END TRANSFER now. Just set PENDING
1850 * flag, so that END TRANSFER is issued when an
1851 * entry is added into request list.
1852 */
1853 dep->flags = DWC3_EP_PENDING_REQUEST;
1854 } else {
1855 dwc3_stop_active_transfer(dwc, dep->number);
1856 dep->flags = DWC3_EP_ENABLED;
1857 }
1858 return 1;
1859 }
1860
1861 if ((event->status & DEPEVT_STATUS_IOC) &&
1862 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1863 return 0;
1864 return 1;
1865 }
1866
1867 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1868 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1869 int start_new)
1870 {
1871 unsigned status = 0;
1872 int clean_busy;
1873
1874 if (event->status & DEPEVT_STATUS_BUSERR)
1875 status = -ECONNRESET;
1876
1877 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1878 if (clean_busy)
1879 dep->flags &= ~DWC3_EP_BUSY;
1880
1881 /*
1882 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1883 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1884 */
1885 if (dwc->revision < DWC3_REVISION_183A) {
1886 u32 reg;
1887 int i;
1888
1889 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1890 dep = dwc->eps[i];
1891
1892 if (!(dep->flags & DWC3_EP_ENABLED))
1893 continue;
1894
1895 if (!list_empty(&dep->req_queued))
1896 return;
1897 }
1898
1899 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1900 reg |= dwc->u1u2;
1901 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1902
1903 dwc->u1u2 = 0;
1904 }
1905 }
1906
1907 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1908 const struct dwc3_event_depevt *event)
1909 {
1910 struct dwc3_ep *dep;
1911 u8 epnum = event->endpoint_number;
1912
1913 dep = dwc->eps[epnum];
1914
1915 if (!(dep->flags & DWC3_EP_ENABLED))
1916 return;
1917
1918 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1919 dwc3_ep_event_string(event->endpoint_event));
1920
1921 if (epnum == 0 || epnum == 1) {
1922 dwc3_ep0_interrupt(dwc, event);
1923 return;
1924 }
1925
1926 switch (event->endpoint_event) {
1927 case DWC3_DEPEVT_XFERCOMPLETE:
1928 dep->resource_index = 0;
1929
1930 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1931 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1932 dep->name);
1933 return;
1934 }
1935
1936 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1937 break;
1938 case DWC3_DEPEVT_XFERINPROGRESS:
1939 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1940 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1941 dep->name);
1942 return;
1943 }
1944
1945 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1946 break;
1947 case DWC3_DEPEVT_XFERNOTREADY:
1948 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1949 dwc3_gadget_start_isoc(dwc, dep, event);
1950 } else {
1951 int ret;
1952
1953 dev_vdbg(dwc->dev, "%s: reason %s\n",
1954 dep->name, event->status &
1955 DEPEVT_STATUS_TRANSFER_ACTIVE
1956 ? "Transfer Active"
1957 : "Transfer Not Active");
1958
1959 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1960 if (!ret || ret == -EBUSY)
1961 return;
1962
1963 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1964 dep->name);
1965 }
1966
1967 break;
1968 case DWC3_DEPEVT_STREAMEVT:
1969 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
1970 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1971 dep->name);
1972 return;
1973 }
1974
1975 switch (event->status) {
1976 case DEPEVT_STREAMEVT_FOUND:
1977 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1978 event->parameters);
1979
1980 break;
1981 case DEPEVT_STREAMEVT_NOTFOUND:
1982 /* FALLTHROUGH */
1983 default:
1984 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1985 }
1986 break;
1987 case DWC3_DEPEVT_RXTXFIFOEVT:
1988 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1989 break;
1990 case DWC3_DEPEVT_EPCMDCMPLT:
1991 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
1992 break;
1993 }
1994 }
1995
1996 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1997 {
1998 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1999 spin_unlock(&dwc->lock);
2000 dwc->gadget_driver->disconnect(&dwc->gadget);
2001 spin_lock(&dwc->lock);
2002 }
2003 }
2004
2005 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
2006 {
2007 struct dwc3_ep *dep;
2008 struct dwc3_gadget_ep_cmd_params params;
2009 u32 cmd;
2010 int ret;
2011
2012 dep = dwc->eps[epnum];
2013
2014 if (!dep->resource_index)
2015 return;
2016
2017 /*
2018 * NOTICE: We are violating what the Databook says about the
2019 * EndTransfer command. Ideally we would _always_ wait for the
2020 * EndTransfer Command Completion IRQ, but that's causing too
2021 * much trouble synchronizing between us and gadget driver.
2022 *
2023 * We have discussed this with the IP Provider and it was
2024 * suggested to giveback all requests here, but give HW some
2025 * extra time to synchronize with the interconnect. We're using
2026 * an arbitraty 100us delay for that.
2027 *
2028 * Note also that a similar handling was tested by Synopsys
2029 * (thanks a lot Paul) and nothing bad has come out of it.
2030 * In short, what we're doing is:
2031 *
2032 * - Issue EndTransfer WITH CMDIOC bit set
2033 * - Wait 100us
2034 */
2035
2036 cmd = DWC3_DEPCMD_ENDTRANSFER;
2037 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
2038 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2039 memset(&params, 0, sizeof(params));
2040 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2041 WARN_ON_ONCE(ret);
2042 dep->resource_index = 0;
2043 dep->flags &= ~DWC3_EP_BUSY;
2044 udelay(100);
2045 }
2046
2047 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2048 {
2049 u32 epnum;
2050
2051 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2052 struct dwc3_ep *dep;
2053
2054 dep = dwc->eps[epnum];
2055 if (!dep)
2056 continue;
2057
2058 if (!(dep->flags & DWC3_EP_ENABLED))
2059 continue;
2060
2061 dwc3_remove_requests(dwc, dep);
2062 }
2063 }
2064
2065 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2066 {
2067 u32 epnum;
2068
2069 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2070 struct dwc3_ep *dep;
2071 struct dwc3_gadget_ep_cmd_params params;
2072 int ret;
2073
2074 dep = dwc->eps[epnum];
2075 if (!dep)
2076 continue;
2077
2078 if (!(dep->flags & DWC3_EP_STALL))
2079 continue;
2080
2081 dep->flags &= ~DWC3_EP_STALL;
2082
2083 memset(&params, 0, sizeof(params));
2084 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2085 DWC3_DEPCMD_CLEARSTALL, &params);
2086 WARN_ON_ONCE(ret);
2087 }
2088 }
2089
2090 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2091 {
2092 int reg;
2093
2094 dev_vdbg(dwc->dev, "%s\n", __func__);
2095
2096 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2097 reg &= ~DWC3_DCTL_INITU1ENA;
2098 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2099
2100 reg &= ~DWC3_DCTL_INITU2ENA;
2101 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2102
2103 dwc3_disconnect_gadget(dwc);
2104 dwc->start_config_issued = false;
2105
2106 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2107 dwc->setup_packet_pending = false;
2108 }
2109
2110 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2111 {
2112 u32 reg;
2113
2114 dev_vdbg(dwc->dev, "%s\n", __func__);
2115
2116 /*
2117 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2118 * would cause a missing Disconnect Event if there's a
2119 * pending Setup Packet in the FIFO.
2120 *
2121 * There's no suggested workaround on the official Bug
2122 * report, which states that "unless the driver/application
2123 * is doing any special handling of a disconnect event,
2124 * there is no functional issue".
2125 *
2126 * Unfortunately, it turns out that we _do_ some special
2127 * handling of a disconnect event, namely complete all
2128 * pending transfers, notify gadget driver of the
2129 * disconnection, and so on.
2130 *
2131 * Our suggested workaround is to follow the Disconnect
2132 * Event steps here, instead, based on a setup_packet_pending
2133 * flag. Such flag gets set whenever we have a XferNotReady
2134 * event on EP0 and gets cleared on XferComplete for the
2135 * same endpoint.
2136 *
2137 * Refers to:
2138 *
2139 * STAR#9000466709: RTL: Device : Disconnect event not
2140 * generated if setup packet pending in FIFO
2141 */
2142 if (dwc->revision < DWC3_REVISION_188A) {
2143 if (dwc->setup_packet_pending)
2144 dwc3_gadget_disconnect_interrupt(dwc);
2145 }
2146
2147 /* after reset -> Default State */
2148 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
2149
2150 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
2151 dwc3_disconnect_gadget(dwc);
2152
2153 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2154 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2155 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2156 dwc->test_mode = false;
2157
2158 dwc3_stop_active_transfers(dwc);
2159 dwc3_clear_stall_all_ep(dwc);
2160 dwc->start_config_issued = false;
2161
2162 /* Reset device address to zero */
2163 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2164 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2165 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2166 }
2167
2168 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2169 {
2170 u32 reg;
2171 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2172
2173 /*
2174 * We change the clock only at SS but I dunno why I would want to do
2175 * this. Maybe it becomes part of the power saving plan.
2176 */
2177
2178 if (speed != DWC3_DSTS_SUPERSPEED)
2179 return;
2180
2181 /*
2182 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2183 * each time on Connect Done.
2184 */
2185 if (!usb30_clock)
2186 return;
2187
2188 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2189 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2190 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2191 }
2192
2193 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2194 {
2195 struct dwc3_ep *dep;
2196 int ret;
2197 u32 reg;
2198 u8 speed;
2199
2200 dev_vdbg(dwc->dev, "%s\n", __func__);
2201
2202 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2203 speed = reg & DWC3_DSTS_CONNECTSPD;
2204 dwc->speed = speed;
2205
2206 dwc3_update_ram_clk_sel(dwc, speed);
2207
2208 switch (speed) {
2209 case DWC3_DCFG_SUPERSPEED:
2210 /*
2211 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2212 * would cause a missing USB3 Reset event.
2213 *
2214 * In such situations, we should force a USB3 Reset
2215 * event by calling our dwc3_gadget_reset_interrupt()
2216 * routine.
2217 *
2218 * Refers to:
2219 *
2220 * STAR#9000483510: RTL: SS : USB3 reset event may
2221 * not be generated always when the link enters poll
2222 */
2223 if (dwc->revision < DWC3_REVISION_190A)
2224 dwc3_gadget_reset_interrupt(dwc);
2225
2226 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2227 dwc->gadget.ep0->maxpacket = 512;
2228 dwc->gadget.speed = USB_SPEED_SUPER;
2229 break;
2230 case DWC3_DCFG_HIGHSPEED:
2231 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2232 dwc->gadget.ep0->maxpacket = 64;
2233 dwc->gadget.speed = USB_SPEED_HIGH;
2234 break;
2235 case DWC3_DCFG_FULLSPEED2:
2236 case DWC3_DCFG_FULLSPEED1:
2237 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2238 dwc->gadget.ep0->maxpacket = 64;
2239 dwc->gadget.speed = USB_SPEED_FULL;
2240 break;
2241 case DWC3_DCFG_LOWSPEED:
2242 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2243 dwc->gadget.ep0->maxpacket = 8;
2244 dwc->gadget.speed = USB_SPEED_LOW;
2245 break;
2246 }
2247
2248 /* Enable USB2 LPM Capability */
2249
2250 if ((dwc->revision > DWC3_REVISION_194A)
2251 && (speed != DWC3_DCFG_SUPERSPEED)) {
2252 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2253 reg |= DWC3_DCFG_LPM_CAP;
2254 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2255
2256 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2257 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2258
2259 /*
2260 * TODO: This should be configurable. For now using
2261 * maximum allowed HIRD threshold value of 0b1100
2262 */
2263 reg |= DWC3_DCTL_HIRD_THRES(12);
2264
2265 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2266 }
2267
2268 dep = dwc->eps[0];
2269 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
2270 if (ret) {
2271 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2272 return;
2273 }
2274
2275 dep = dwc->eps[1];
2276 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
2277 if (ret) {
2278 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2279 return;
2280 }
2281
2282 /*
2283 * Configure PHY via GUSB3PIPECTLn if required.
2284 *
2285 * Update GTXFIFOSIZn
2286 *
2287 * In both cases reset values should be sufficient.
2288 */
2289 }
2290
2291 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2292 {
2293 dev_vdbg(dwc->dev, "%s\n", __func__);
2294
2295 /*
2296 * TODO take core out of low power mode when that's
2297 * implemented.
2298 */
2299
2300 dwc->gadget_driver->resume(&dwc->gadget);
2301 }
2302
2303 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2304 unsigned int evtinfo)
2305 {
2306 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2307 unsigned int pwropt;
2308
2309 /*
2310 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2311 * Hibernation mode enabled which would show up when device detects
2312 * host-initiated U3 exit.
2313 *
2314 * In that case, device will generate a Link State Change Interrupt
2315 * from U3 to RESUME which is only necessary if Hibernation is
2316 * configured in.
2317 *
2318 * There are no functional changes due to such spurious event and we
2319 * just need to ignore it.
2320 *
2321 * Refers to:
2322 *
2323 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2324 * operational mode
2325 */
2326 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2327 if ((dwc->revision < DWC3_REVISION_250A) &&
2328 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2329 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2330 (next == DWC3_LINK_STATE_RESUME)) {
2331 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2332 return;
2333 }
2334 }
2335
2336 /*
2337 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2338 * on the link partner, the USB session might do multiple entry/exit
2339 * of low power states before a transfer takes place.
2340 *
2341 * Due to this problem, we might experience lower throughput. The
2342 * suggested workaround is to disable DCTL[12:9] bits if we're
2343 * transitioning from U1/U2 to U0 and enable those bits again
2344 * after a transfer completes and there are no pending transfers
2345 * on any of the enabled endpoints.
2346 *
2347 * This is the first half of that workaround.
2348 *
2349 * Refers to:
2350 *
2351 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2352 * core send LGO_Ux entering U0
2353 */
2354 if (dwc->revision < DWC3_REVISION_183A) {
2355 if (next == DWC3_LINK_STATE_U0) {
2356 u32 u1u2;
2357 u32 reg;
2358
2359 switch (dwc->link_state) {
2360 case DWC3_LINK_STATE_U1:
2361 case DWC3_LINK_STATE_U2:
2362 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2363 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2364 | DWC3_DCTL_ACCEPTU2ENA
2365 | DWC3_DCTL_INITU1ENA
2366 | DWC3_DCTL_ACCEPTU1ENA);
2367
2368 if (!dwc->u1u2)
2369 dwc->u1u2 = reg & u1u2;
2370
2371 reg &= ~u1u2;
2372
2373 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2374 break;
2375 default:
2376 /* do nothing */
2377 break;
2378 }
2379 }
2380 }
2381
2382 dwc->link_state = next;
2383
2384 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
2385 }
2386
2387 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2388 const struct dwc3_event_devt *event)
2389 {
2390 switch (event->type) {
2391 case DWC3_DEVICE_EVENT_DISCONNECT:
2392 dwc3_gadget_disconnect_interrupt(dwc);
2393 break;
2394 case DWC3_DEVICE_EVENT_RESET:
2395 dwc3_gadget_reset_interrupt(dwc);
2396 break;
2397 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2398 dwc3_gadget_conndone_interrupt(dwc);
2399 break;
2400 case DWC3_DEVICE_EVENT_WAKEUP:
2401 dwc3_gadget_wakeup_interrupt(dwc);
2402 break;
2403 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2404 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2405 break;
2406 case DWC3_DEVICE_EVENT_EOPF:
2407 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2408 break;
2409 case DWC3_DEVICE_EVENT_SOF:
2410 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2411 break;
2412 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2413 dev_vdbg(dwc->dev, "Erratic Error\n");
2414 break;
2415 case DWC3_DEVICE_EVENT_CMD_CMPL:
2416 dev_vdbg(dwc->dev, "Command Complete\n");
2417 break;
2418 case DWC3_DEVICE_EVENT_OVERFLOW:
2419 dev_vdbg(dwc->dev, "Overflow\n");
2420 break;
2421 default:
2422 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2423 }
2424 }
2425
2426 static void dwc3_process_event_entry(struct dwc3 *dwc,
2427 const union dwc3_event *event)
2428 {
2429 /* Endpoint IRQ, handle it and return early */
2430 if (event->type.is_devspec == 0) {
2431 /* depevt */
2432 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2433 }
2434
2435 switch (event->type.type) {
2436 case DWC3_EVENT_TYPE_DEV:
2437 dwc3_gadget_interrupt(dwc, &event->devt);
2438 break;
2439 /* REVISIT what to do with Carkit and I2C events ? */
2440 default:
2441 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2442 }
2443 }
2444
2445 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2446 {
2447 struct dwc3_event_buffer *evt;
2448 irqreturn_t ret = IRQ_NONE;
2449 int left;
2450 u32 reg;
2451
2452 evt = dwc->ev_buffs[buf];
2453 left = evt->count;
2454
2455 if (!(evt->flags & DWC3_EVENT_PENDING))
2456 return IRQ_NONE;
2457
2458 while (left > 0) {
2459 union dwc3_event event;
2460
2461 event.raw = *(u32 *) (evt->buf + evt->lpos);
2462
2463 dwc3_process_event_entry(dwc, &event);
2464
2465 /*
2466 * FIXME we wrap around correctly to the next entry as
2467 * almost all entries are 4 bytes in size. There is one
2468 * entry which has 12 bytes which is a regular entry
2469 * followed by 8 bytes data. ATM I don't know how
2470 * things are organized if we get next to the a
2471 * boundary so I worry about that once we try to handle
2472 * that.
2473 */
2474 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2475 left -= 4;
2476
2477 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2478 }
2479
2480 evt->count = 0;
2481 evt->flags &= ~DWC3_EVENT_PENDING;
2482 ret = IRQ_HANDLED;
2483
2484 /* Unmask interrupt */
2485 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2486 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2487 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2488
2489 return ret;
2490 }
2491
2492 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2493 {
2494 struct dwc3 *dwc = _dwc;
2495 unsigned long flags;
2496 irqreturn_t ret = IRQ_NONE;
2497 int i;
2498
2499 spin_lock_irqsave(&dwc->lock, flags);
2500
2501 for (i = 0; i < dwc->num_event_buffers; i++)
2502 ret |= dwc3_process_event_buf(dwc, i);
2503
2504 spin_unlock_irqrestore(&dwc->lock, flags);
2505
2506 return ret;
2507 }
2508
2509 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2510 {
2511 struct dwc3_event_buffer *evt;
2512 u32 count;
2513 u32 reg;
2514
2515 evt = dwc->ev_buffs[buf];
2516
2517 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2518 count &= DWC3_GEVNTCOUNT_MASK;
2519 if (!count)
2520 return IRQ_NONE;
2521
2522 evt->count = count;
2523 evt->flags |= DWC3_EVENT_PENDING;
2524
2525 /* Mask interrupt */
2526 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2527 reg |= DWC3_GEVNTSIZ_INTMASK;
2528 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2529
2530 return IRQ_WAKE_THREAD;
2531 }
2532
2533 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2534 {
2535 struct dwc3 *dwc = _dwc;
2536 int i;
2537 irqreturn_t ret = IRQ_NONE;
2538
2539 spin_lock(&dwc->lock);
2540
2541 for (i = 0; i < dwc->num_event_buffers; i++) {
2542 irqreturn_t status;
2543
2544 status = dwc3_check_event_buf(dwc, i);
2545 if (status == IRQ_WAKE_THREAD)
2546 ret = status;
2547 }
2548
2549 spin_unlock(&dwc->lock);
2550
2551 return ret;
2552 }
2553
2554 /**
2555 * dwc3_gadget_init - Initializes gadget related registers
2556 * @dwc: pointer to our controller context structure
2557 *
2558 * Returns 0 on success otherwise negative errno.
2559 */
2560 int dwc3_gadget_init(struct dwc3 *dwc)
2561 {
2562 int ret;
2563
2564 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2565 &dwc->ctrl_req_addr, GFP_KERNEL);
2566 if (!dwc->ctrl_req) {
2567 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2568 ret = -ENOMEM;
2569 goto err0;
2570 }
2571
2572 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2573 &dwc->ep0_trb_addr, GFP_KERNEL);
2574 if (!dwc->ep0_trb) {
2575 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2576 ret = -ENOMEM;
2577 goto err1;
2578 }
2579
2580 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2581 if (!dwc->setup_buf) {
2582 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2583 ret = -ENOMEM;
2584 goto err2;
2585 }
2586
2587 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2588 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2589 GFP_KERNEL);
2590 if (!dwc->ep0_bounce) {
2591 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2592 ret = -ENOMEM;
2593 goto err3;
2594 }
2595
2596 dwc->gadget.ops = &dwc3_gadget_ops;
2597 dwc->gadget.max_speed = USB_SPEED_SUPER;
2598 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2599 dwc->gadget.sg_supported = true;
2600 dwc->gadget.name = "dwc3-gadget";
2601
2602 /*
2603 * REVISIT: Here we should clear all pending IRQs to be
2604 * sure we're starting from a well known location.
2605 */
2606
2607 ret = dwc3_gadget_init_endpoints(dwc);
2608 if (ret)
2609 goto err4;
2610
2611 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2612 if (ret) {
2613 dev_err(dwc->dev, "failed to register udc\n");
2614 goto err4;
2615 }
2616
2617 return 0;
2618
2619 err4:
2620 dwc3_gadget_free_endpoints(dwc);
2621 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2622 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2623
2624 err3:
2625 kfree(dwc->setup_buf);
2626
2627 err2:
2628 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2629 dwc->ep0_trb, dwc->ep0_trb_addr);
2630
2631 err1:
2632 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2633 dwc->ctrl_req, dwc->ctrl_req_addr);
2634
2635 err0:
2636 return ret;
2637 }
2638
2639 /* -------------------------------------------------------------------------- */
2640
2641 void dwc3_gadget_exit(struct dwc3 *dwc)
2642 {
2643 usb_del_gadget_udc(&dwc->gadget);
2644
2645 dwc3_gadget_free_endpoints(dwc);
2646
2647 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2648 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2649
2650 kfree(dwc->setup_buf);
2651
2652 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2653 dwc->ep0_trb, dwc->ep0_trb_addr);
2654
2655 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2656 dwc->ctrl_req, dwc->ctrl_req_addr);
2657 }
2658
2659 int dwc3_gadget_prepare(struct dwc3 *dwc)
2660 {
2661 if (dwc->pullups_connected)
2662 dwc3_gadget_disable_irq(dwc);
2663
2664 return 0;
2665 }
2666
2667 void dwc3_gadget_complete(struct dwc3 *dwc)
2668 {
2669 if (dwc->pullups_connected) {
2670 dwc3_gadget_enable_irq(dwc);
2671 dwc3_gadget_run_stop(dwc, true);
2672 }
2673 }
2674
2675 int dwc3_gadget_suspend(struct dwc3 *dwc)
2676 {
2677 __dwc3_gadget_ep_disable(dwc->eps[0]);
2678 __dwc3_gadget_ep_disable(dwc->eps[1]);
2679
2680 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2681
2682 return 0;
2683 }
2684
2685 int dwc3_gadget_resume(struct dwc3 *dwc)
2686 {
2687 struct dwc3_ep *dep;
2688 int ret;
2689
2690 /* Start with SuperSpeed Default */
2691 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2692
2693 dep = dwc->eps[0];
2694 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2695 if (ret)
2696 goto err0;
2697
2698 dep = dwc->eps[1];
2699 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2700 if (ret)
2701 goto err1;
2702
2703 /* begin to receive SETUP packets */
2704 dwc->ep0state = EP0_SETUP_PHASE;
2705 dwc3_ep0_out_start(dwc);
2706
2707 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2708
2709 return 0;
2710
2711 err1:
2712 __dwc3_gadget_ep_disable(dwc->eps[0]);
2713
2714 err0:
2715 return ret;
2716 }