]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/usb/dwc3/gadget.c
usb: dwc3: clean up redundant parameter comment
[mirror_ubuntu-artful-kernel.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
04a9bfcd
FB
57/**
58 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
59 * @dwc: pointer to our context structure
60 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
61 *
62 * Caller should take care of locking. This function will
63 * return 0 on success or -EINVAL if wrong Test Selector
64 * is passed
65 */
66int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
67{
68 u32 reg;
69
70 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
71 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
72
73 switch (mode) {
74 case TEST_J:
75 case TEST_K:
76 case TEST_SE0_NAK:
77 case TEST_PACKET:
78 case TEST_FORCE_EN:
79 reg |= mode << 1;
80 break;
81 default:
82 return -EINVAL;
83 }
84
85 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
86
87 return 0;
88}
89
8598bde7
FB
90/**
91 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
92 * @dwc: pointer to our context structure
93 * @state: the state to put link into
94 *
95 * Caller should take care of locking. This function will
aee63e3c 96 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
97 */
98int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
99{
aee63e3c 100 int retries = 10000;
8598bde7
FB
101 u32 reg;
102
802fde98
PZ
103 /*
104 * Wait until device controller is ready. Only applies to 1.94a and
105 * later RTL.
106 */
107 if (dwc->revision >= DWC3_REVISION_194A) {
108 while (--retries) {
109 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
110 if (reg & DWC3_DSTS_DCNRD)
111 udelay(5);
112 else
113 break;
114 }
115
116 if (retries <= 0)
117 return -ETIMEDOUT;
118 }
119
8598bde7
FB
120 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
121 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
122
123 /* set requested state */
124 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
125 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
126
802fde98
PZ
127 /*
128 * The following code is racy when called from dwc3_gadget_wakeup,
129 * and is not needed, at least on newer versions
130 */
131 if (dwc->revision >= DWC3_REVISION_194A)
132 return 0;
133
8598bde7 134 /* wait for a change in DSTS */
aed430e5 135 retries = 10000;
8598bde7
FB
136 while (--retries) {
137 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
138
8598bde7
FB
139 if (DWC3_DSTS_USBLNKST(reg) == state)
140 return 0;
141
aee63e3c 142 udelay(5);
8598bde7
FB
143 }
144
145 dev_vdbg(dwc->dev, "link state change request timed out\n");
146
147 return -ETIMEDOUT;
148}
149
457e84b6
FB
150/**
151 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
152 * @dwc: pointer to our context structure
153 *
154 * This function will a best effort FIFO allocation in order
155 * to improve FIFO usage and throughput, while still allowing
156 * us to enable as many endpoints as possible.
157 *
158 * Keep in mind that this operation will be highly dependent
159 * on the configured size for RAM1 - which contains TxFifo -,
160 * the amount of endpoints enabled on coreConsultant tool, and
161 * the width of the Master Bus.
162 *
163 * In the ideal world, we would always be able to satisfy the
164 * following equation:
165 *
166 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
167 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
168 *
169 * Unfortunately, due to many variables that's not always the case.
170 */
171int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
172{
173 int last_fifo_depth = 0;
174 int ram1_depth;
175 int fifo_size;
176 int mdwidth;
177 int num;
178
179 if (!dwc->needs_fifo_resize)
180 return 0;
181
182 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
183 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
184
185 /* MDWIDTH is represented in bits, we need it in bytes */
186 mdwidth >>= 3;
187
188 /*
189 * FIXME For now we will only allocate 1 wMaxPacketSize space
190 * for each enabled endpoint, later patches will come to
191 * improve this algorithm so that we better use the internal
192 * FIFO space
193 */
194 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
195 struct dwc3_ep *dep = dwc->eps[num];
196 int fifo_number = dep->number >> 1;
2e81c36a 197 int mult = 1;
457e84b6
FB
198 int tmp;
199
200 if (!(dep->number & 1))
201 continue;
202
203 if (!(dep->flags & DWC3_EP_ENABLED))
204 continue;
205
16e78db7
IS
206 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
207 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
2e81c36a
FB
208 mult = 3;
209
210 /*
211 * REVISIT: the following assumes we will always have enough
212 * space available on the FIFO RAM for all possible use cases.
213 * Make sure that's true somehow and change FIFO allocation
214 * accordingly.
215 *
216 * If we have Bulk or Isochronous endpoints, we want
217 * them to be able to be very, very fast. So we're giving
218 * those endpoints a fifo_size which is enough for 3 full
219 * packets
220 */
221 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
457e84b6
FB
222 tmp += mdwidth;
223
224 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
2e81c36a 225
457e84b6
FB
226 fifo_size |= (last_fifo_depth << 16);
227
228 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
229 dep->name, last_fifo_depth, fifo_size & 0xffff);
230
231 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
232 fifo_size);
233
234 last_fifo_depth += (fifo_size & 0xffff);
235 }
236
237 return 0;
238}
239
72246da4
FB
240void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
241 int status)
242{
243 struct dwc3 *dwc = dep->dwc;
e5ba5ec8 244 int i;
72246da4
FB
245
246 if (req->queued) {
e5ba5ec8
PA
247 i = 0;
248 do {
eeb720fb 249 dep->busy_slot++;
e5ba5ec8
PA
250 /*
251 * Skip LINK TRB. We can't use req->trb and check for
252 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
253 * just completed (not the LINK TRB).
254 */
255 if (((dep->busy_slot & DWC3_TRB_MASK) ==
256 DWC3_TRB_NUM- 1) &&
16e78db7 257 usb_endpoint_xfer_isoc(dep->endpoint.desc))
e5ba5ec8
PA
258 dep->busy_slot++;
259 } while(++i < req->request.num_mapped_sgs);
c9fda7d6 260 req->queued = false;
72246da4
FB
261 }
262 list_del(&req->list);
eeb720fb 263 req->trb = NULL;
72246da4
FB
264
265 if (req->request.status == -EINPROGRESS)
266 req->request.status = status;
267
0416e494
PA
268 if (dwc->ep0_bounced && dep->number == 0)
269 dwc->ep0_bounced = false;
270 else
271 usb_gadget_unmap_request(&dwc->gadget, &req->request,
272 req->direction);
72246da4
FB
273
274 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
275 req, dep->name, req->request.actual,
276 req->request.length, status);
277
278 spin_unlock(&dwc->lock);
0fc9a1be 279 req->request.complete(&dep->endpoint, &req->request);
72246da4
FB
280 spin_lock(&dwc->lock);
281}
282
283static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
284{
285 switch (cmd) {
286 case DWC3_DEPCMD_DEPSTARTCFG:
287 return "Start New Configuration";
288 case DWC3_DEPCMD_ENDTRANSFER:
289 return "End Transfer";
290 case DWC3_DEPCMD_UPDATETRANSFER:
291 return "Update Transfer";
292 case DWC3_DEPCMD_STARTTRANSFER:
293 return "Start Transfer";
294 case DWC3_DEPCMD_CLEARSTALL:
295 return "Clear Stall";
296 case DWC3_DEPCMD_SETSTALL:
297 return "Set Stall";
802fde98
PZ
298 case DWC3_DEPCMD_GETEPSTATE:
299 return "Get Endpoint State";
72246da4
FB
300 case DWC3_DEPCMD_SETTRANSFRESOURCE:
301 return "Set Endpoint Transfer Resource";
302 case DWC3_DEPCMD_SETEPCONFIG:
303 return "Set Endpoint Configuration";
304 default:
305 return "UNKNOWN command";
306 }
307}
308
b09bb642
FB
309int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
310{
311 u32 timeout = 500;
312 u32 reg;
313
314 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
315 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
316
317 do {
318 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
319 if (!(reg & DWC3_DGCMD_CMDACT)) {
320 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
321 DWC3_DGCMD_STATUS(reg));
322 return 0;
323 }
324
325 /*
326 * We can't sleep here, because it's also called from
327 * interrupt context.
328 */
329 timeout--;
330 if (!timeout)
331 return -ETIMEDOUT;
332 udelay(1);
333 } while (1);
334}
335
72246da4
FB
336int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
337 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
338{
339 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 340 u32 timeout = 500;
72246da4
FB
341 u32 reg;
342
343 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
344 dep->name,
dc1c70a7
FB
345 dwc3_gadget_ep_cmd_string(cmd), params->param0,
346 params->param1, params->param2);
72246da4 347
dc1c70a7
FB
348 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
349 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
350 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
351
352 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
353 do {
354 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
355 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
356 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
357 DWC3_DEPCMD_STATUS(reg));
72246da4
FB
358 return 0;
359 }
360
361 /*
72246da4
FB
362 * We can't sleep here, because it is also called from
363 * interrupt context.
364 */
365 timeout--;
366 if (!timeout)
367 return -ETIMEDOUT;
368
61d58242 369 udelay(1);
72246da4
FB
370 } while (1);
371}
372
373static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 374 struct dwc3_trb *trb)
72246da4 375{
c439ef87 376 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
377
378 return dep->trb_pool_dma + offset;
379}
380
381static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
382{
383 struct dwc3 *dwc = dep->dwc;
384
385 if (dep->trb_pool)
386 return 0;
387
388 if (dep->number == 0 || dep->number == 1)
389 return 0;
390
391 dep->trb_pool = dma_alloc_coherent(dwc->dev,
392 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
393 &dep->trb_pool_dma, GFP_KERNEL);
394 if (!dep->trb_pool) {
395 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
396 dep->name);
397 return -ENOMEM;
398 }
399
400 return 0;
401}
402
403static void dwc3_free_trb_pool(struct dwc3_ep *dep)
404{
405 struct dwc3 *dwc = dep->dwc;
406
407 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
408 dep->trb_pool, dep->trb_pool_dma);
409
410 dep->trb_pool = NULL;
411 dep->trb_pool_dma = 0;
412}
413
414static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
415{
416 struct dwc3_gadget_ep_cmd_params params;
417 u32 cmd;
418
419 memset(&params, 0x00, sizeof(params));
420
421 if (dep->number != 1) {
422 cmd = DWC3_DEPCMD_DEPSTARTCFG;
423 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
424 if (dep->number > 1) {
425 if (dwc->start_config_issued)
426 return 0;
427 dwc->start_config_issued = true;
72246da4 428 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 429 }
72246da4
FB
430
431 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
432 }
433
434 return 0;
435}
436
437static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 438 const struct usb_endpoint_descriptor *desc,
4b345c9a
FB
439 const struct usb_ss_ep_comp_descriptor *comp_desc,
440 bool ignore)
72246da4
FB
441{
442 struct dwc3_gadget_ep_cmd_params params;
443
444 memset(&params, 0x00, sizeof(params));
445
dc1c70a7 446 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
447 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
448
449 /* Burst size is only needed in SuperSpeed mode */
450 if (dwc->gadget.speed == USB_SPEED_SUPER) {
451 u32 burst = dep->endpoint.maxburst - 1;
452
453 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
454 }
72246da4 455
4b345c9a
FB
456 if (ignore)
457 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
458
dc1c70a7
FB
459 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
460 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 461
18b7ede5 462 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
463 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
464 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
465 dep->stream_capable = true;
466 }
467
72246da4 468 if (usb_endpoint_xfer_isoc(desc))
dc1c70a7 469 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
470
471 /*
472 * We are doing 1:1 mapping for endpoints, meaning
473 * Physical Endpoints 2 maps to Logical Endpoint 2 and
474 * so on. We consider the direction bit as part of the physical
475 * endpoint number. So USB endpoint 0x81 is 0x03.
476 */
dc1c70a7 477 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
478
479 /*
480 * We must use the lower 16 TX FIFOs even though
481 * HW might have more
482 */
483 if (dep->direction)
dc1c70a7 484 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
485
486 if (desc->bInterval) {
dc1c70a7 487 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
488 dep->interval = 1 << (desc->bInterval - 1);
489 }
490
491 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
492 DWC3_DEPCMD_SETEPCONFIG, &params);
493}
494
495static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
496{
497 struct dwc3_gadget_ep_cmd_params params;
498
499 memset(&params, 0x00, sizeof(params));
500
dc1c70a7 501 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
502
503 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
504 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
505}
506
507/**
508 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
509 * @dep: endpoint to be initialized
510 * @desc: USB Endpoint Descriptor
511 *
512 * Caller should take care of locking
513 */
514static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 515 const struct usb_endpoint_descriptor *desc,
4b345c9a
FB
516 const struct usb_ss_ep_comp_descriptor *comp_desc,
517 bool ignore)
72246da4
FB
518{
519 struct dwc3 *dwc = dep->dwc;
520 u32 reg;
521 int ret = -ENOMEM;
522
523 if (!(dep->flags & DWC3_EP_ENABLED)) {
524 ret = dwc3_gadget_start_config(dwc, dep);
525 if (ret)
526 return ret;
527 }
528
4b345c9a 529 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
72246da4
FB
530 if (ret)
531 return ret;
532
533 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
534 struct dwc3_trb *trb_st_hw;
535 struct dwc3_trb *trb_link;
72246da4
FB
536
537 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
538 if (ret)
539 return ret;
540
16e78db7 541 dep->endpoint.desc = desc;
c90bfaec 542 dep->comp_desc = comp_desc;
72246da4
FB
543 dep->type = usb_endpoint_type(desc);
544 dep->flags |= DWC3_EP_ENABLED;
545
546 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
547 reg |= DWC3_DALEPENA_EP(dep->number);
548 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
549
550 if (!usb_endpoint_xfer_isoc(desc))
551 return 0;
552
553 memset(&trb_link, 0, sizeof(trb_link));
554
1d046793 555 /* Link TRB for ISOC. The HWO bit is never reset */
72246da4
FB
556 trb_st_hw = &dep->trb_pool[0];
557
f6bafc6a 558 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
72246da4 559
f6bafc6a
FB
560 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
561 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
562 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
563 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
564 }
565
566 return 0;
567}
568
624407f9
SAS
569static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
570static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
571{
572 struct dwc3_request *req;
573
ea53b882 574 if (!list_empty(&dep->req_queued)) {
624407f9
SAS
575 dwc3_stop_active_transfer(dwc, dep->number);
576
57911504 577 /* - giveback all requests to gadget driver */
1591633e
PA
578 while (!list_empty(&dep->req_queued)) {
579 req = next_request(&dep->req_queued);
580
581 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
582 }
ea53b882
FB
583 }
584
72246da4
FB
585 while (!list_empty(&dep->request_list)) {
586 req = next_request(&dep->request_list);
587
624407f9 588 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 589 }
72246da4
FB
590}
591
592/**
593 * __dwc3_gadget_ep_disable - Disables a HW endpoint
594 * @dep: the endpoint to disable
595 *
624407f9
SAS
596 * This function also removes requests which are currently processed ny the
597 * hardware and those which are not yet scheduled.
598 * Caller should take care of locking.
72246da4 599 */
72246da4
FB
600static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
601{
602 struct dwc3 *dwc = dep->dwc;
603 u32 reg;
604
624407f9 605 dwc3_remove_requests(dwc, dep);
72246da4
FB
606
607 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
608 reg &= ~DWC3_DALEPENA_EP(dep->number);
609 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
610
879631aa 611 dep->stream_capable = false;
f9c56cdd 612 dep->endpoint.desc = NULL;
c90bfaec 613 dep->comp_desc = NULL;
72246da4 614 dep->type = 0;
879631aa 615 dep->flags = 0;
72246da4
FB
616
617 return 0;
618}
619
620/* -------------------------------------------------------------------------- */
621
622static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
623 const struct usb_endpoint_descriptor *desc)
624{
625 return -EINVAL;
626}
627
628static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
629{
630 return -EINVAL;
631}
632
633/* -------------------------------------------------------------------------- */
634
635static int dwc3_gadget_ep_enable(struct usb_ep *ep,
636 const struct usb_endpoint_descriptor *desc)
637{
638 struct dwc3_ep *dep;
639 struct dwc3 *dwc;
640 unsigned long flags;
641 int ret;
642
643 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
644 pr_debug("dwc3: invalid parameters\n");
645 return -EINVAL;
646 }
647
648 if (!desc->wMaxPacketSize) {
649 pr_debug("dwc3: missing wMaxPacketSize\n");
650 return -EINVAL;
651 }
652
653 dep = to_dwc3_ep(ep);
654 dwc = dep->dwc;
655
c6f83f38
FB
656 if (dep->flags & DWC3_EP_ENABLED) {
657 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
658 dep->name);
659 return 0;
660 }
661
72246da4
FB
662 switch (usb_endpoint_type(desc)) {
663 case USB_ENDPOINT_XFER_CONTROL:
27a78d6a 664 strlcat(dep->name, "-control", sizeof(dep->name));
72246da4
FB
665 break;
666 case USB_ENDPOINT_XFER_ISOC:
27a78d6a 667 strlcat(dep->name, "-isoc", sizeof(dep->name));
72246da4
FB
668 break;
669 case USB_ENDPOINT_XFER_BULK:
27a78d6a 670 strlcat(dep->name, "-bulk", sizeof(dep->name));
72246da4
FB
671 break;
672 case USB_ENDPOINT_XFER_INT:
27a78d6a 673 strlcat(dep->name, "-int", sizeof(dep->name));
72246da4
FB
674 break;
675 default:
676 dev_err(dwc->dev, "invalid endpoint transfer type\n");
677 }
678
72246da4
FB
679 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
680
681 spin_lock_irqsave(&dwc->lock, flags);
4b345c9a 682 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
72246da4
FB
683 spin_unlock_irqrestore(&dwc->lock, flags);
684
685 return ret;
686}
687
688static int dwc3_gadget_ep_disable(struct usb_ep *ep)
689{
690 struct dwc3_ep *dep;
691 struct dwc3 *dwc;
692 unsigned long flags;
693 int ret;
694
695 if (!ep) {
696 pr_debug("dwc3: invalid parameters\n");
697 return -EINVAL;
698 }
699
700 dep = to_dwc3_ep(ep);
701 dwc = dep->dwc;
702
703 if (!(dep->flags & DWC3_EP_ENABLED)) {
704 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
705 dep->name);
706 return 0;
707 }
708
709 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
710 dep->number >> 1,
711 (dep->number & 1) ? "in" : "out");
712
713 spin_lock_irqsave(&dwc->lock, flags);
714 ret = __dwc3_gadget_ep_disable(dep);
715 spin_unlock_irqrestore(&dwc->lock, flags);
716
717 return ret;
718}
719
720static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
721 gfp_t gfp_flags)
722{
723 struct dwc3_request *req;
724 struct dwc3_ep *dep = to_dwc3_ep(ep);
725 struct dwc3 *dwc = dep->dwc;
726
727 req = kzalloc(sizeof(*req), gfp_flags);
728 if (!req) {
729 dev_err(dwc->dev, "not enough memory\n");
730 return NULL;
731 }
732
733 req->epnum = dep->number;
734 req->dep = dep;
72246da4
FB
735
736 return &req->request;
737}
738
739static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
740 struct usb_request *request)
741{
742 struct dwc3_request *req = to_dwc3_request(request);
743
744 kfree(req);
745}
746
c71fc37c
FB
747/**
748 * dwc3_prepare_one_trb - setup one TRB from one request
749 * @dep: endpoint for which this request is prepared
750 * @req: dwc3_request pointer
751 */
68e823e2 752static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb 753 struct dwc3_request *req, dma_addr_t dma,
e5ba5ec8 754 unsigned length, unsigned last, unsigned chain, unsigned node)
c71fc37c 755{
eeb720fb 756 struct dwc3 *dwc = dep->dwc;
f6bafc6a 757 struct dwc3_trb *trb;
c71fc37c 758
eeb720fb
FB
759 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
760 dep->name, req, (unsigned long long) dma,
761 length, last ? " last" : "",
762 chain ? " chain" : "");
763
c71fc37c 764 /* Skip the LINK-TRB on ISOC */
915e202a 765 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
16e78db7 766 usb_endpoint_xfer_isoc(dep->endpoint.desc))
915e202a
PA
767 dep->free_slot++;
768
769 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
c71fc37c 770
eeb720fb
FB
771 if (!req->trb) {
772 dwc3_gadget_move_request_queued(req);
f6bafc6a
FB
773 req->trb = trb;
774 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
e5ba5ec8 775 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
eeb720fb 776 }
c71fc37c 777
e5ba5ec8
PA
778 dep->free_slot++;
779
f6bafc6a
FB
780 trb->size = DWC3_TRB_SIZE_LENGTH(length);
781 trb->bpl = lower_32_bits(dma);
782 trb->bph = upper_32_bits(dma);
c71fc37c 783
16e78db7 784 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 785 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 786 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
787 break;
788
789 case USB_ENDPOINT_XFER_ISOC:
e5ba5ec8
PA
790 if (!node)
791 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
792 else
793 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
c71fc37c 794
e5ba5ec8 795 if (!req->request.no_interrupt && !chain)
f6bafc6a 796 trb->ctrl |= DWC3_TRB_CTRL_IOC;
c71fc37c
FB
797 break;
798
799 case USB_ENDPOINT_XFER_BULK:
800 case USB_ENDPOINT_XFER_INT:
f6bafc6a 801 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
802 break;
803 default:
804 /*
805 * This is only possible with faulty memory because we
806 * checked it already :)
807 */
808 BUG();
809 }
810
16e78db7 811 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
f6bafc6a
FB
812 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
813 trb->ctrl |= DWC3_TRB_CTRL_CSP;
e5ba5ec8
PA
814 } else if (last) {
815 trb->ctrl |= DWC3_TRB_CTRL_LST;
f6bafc6a 816 }
c71fc37c 817
e5ba5ec8
PA
818 if (chain)
819 trb->ctrl |= DWC3_TRB_CTRL_CHN;
820
16e78db7 821 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 822 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 823
f6bafc6a 824 trb->ctrl |= DWC3_TRB_CTRL_HWO;
c71fc37c
FB
825}
826
72246da4
FB
827/*
828 * dwc3_prepare_trbs - setup TRBs from requests
829 * @dep: endpoint for which requests are being prepared
830 * @starting: true if the endpoint is idle and no requests are queued.
831 *
1d046793
PZ
832 * The function goes through the requests list and sets up TRBs for the
833 * transfers. The function returns once there are no more TRBs available or
834 * it runs out of requests.
72246da4 835 */
68e823e2 836static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 837{
68e823e2 838 struct dwc3_request *req, *n;
72246da4 839 u32 trbs_left;
8d62cd65 840 u32 max;
c71fc37c 841 unsigned int last_one = 0;
72246da4
FB
842
843 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
844
845 /* the first request must not be queued */
846 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 847
8d62cd65 848 /* Can't wrap around on a non-isoc EP since there's no link TRB */
16e78db7 849 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8d62cd65
PZ
850 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
851 if (trbs_left > max)
852 trbs_left = max;
853 }
854
72246da4 855 /*
1d046793
PZ
856 * If busy & slot are equal than it is either full or empty. If we are
857 * starting to process requests then we are empty. Otherwise we are
72246da4
FB
858 * full and don't do anything
859 */
860 if (!trbs_left) {
861 if (!starting)
68e823e2 862 return;
72246da4
FB
863 trbs_left = DWC3_TRB_NUM;
864 /*
865 * In case we start from scratch, we queue the ISOC requests
866 * starting from slot 1. This is done because we use ring
867 * buffer and have no LST bit to stop us. Instead, we place
1d046793 868 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
72246da4
FB
869 * after the first request so we start at slot 1 and have
870 * 7 requests proceed before we hit the first IOC.
871 * Other transfer types don't use the ring buffer and are
872 * processed from the first TRB until the last one. Since we
873 * don't wrap around we have to start at the beginning.
874 */
16e78db7 875 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
876 dep->busy_slot = 1;
877 dep->free_slot = 1;
878 } else {
879 dep->busy_slot = 0;
880 dep->free_slot = 0;
881 }
882 }
883
884 /* The last TRB is a link TRB, not used for xfer */
16e78db7 885 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
68e823e2 886 return;
72246da4
FB
887
888 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
889 unsigned length;
890 dma_addr_t dma;
e5ba5ec8 891 last_one = false;
72246da4 892
eeb720fb
FB
893 if (req->request.num_mapped_sgs > 0) {
894 struct usb_request *request = &req->request;
895 struct scatterlist *sg = request->sg;
896 struct scatterlist *s;
897 int i;
72246da4 898
eeb720fb
FB
899 for_each_sg(sg, s, request->num_mapped_sgs, i) {
900 unsigned chain = true;
72246da4 901
eeb720fb
FB
902 length = sg_dma_len(s);
903 dma = sg_dma_address(s);
72246da4 904
1d046793
PZ
905 if (i == (request->num_mapped_sgs - 1) ||
906 sg_is_last(s)) {
e5ba5ec8
PA
907 if (list_is_last(&req->list,
908 &dep->request_list))
909 last_one = true;
eeb720fb
FB
910 chain = false;
911 }
72246da4 912
eeb720fb
FB
913 trbs_left--;
914 if (!trbs_left)
915 last_one = true;
72246da4 916
eeb720fb
FB
917 if (last_one)
918 chain = false;
72246da4 919
eeb720fb 920 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 921 last_one, chain, i);
72246da4 922
eeb720fb
FB
923 if (last_one)
924 break;
925 }
72246da4 926 } else {
eeb720fb
FB
927 dma = req->request.dma;
928 length = req->request.length;
929 trbs_left--;
72246da4 930
eeb720fb
FB
931 if (!trbs_left)
932 last_one = 1;
879631aa 933
eeb720fb
FB
934 /* Is this the last request? */
935 if (list_is_last(&req->list, &dep->request_list))
936 last_one = 1;
72246da4 937
eeb720fb 938 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 939 last_one, false, 0);
72246da4 940
eeb720fb
FB
941 if (last_one)
942 break;
72246da4 943 }
72246da4 944 }
72246da4
FB
945}
946
947static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
948 int start_new)
949{
950 struct dwc3_gadget_ep_cmd_params params;
951 struct dwc3_request *req;
952 struct dwc3 *dwc = dep->dwc;
953 int ret;
954 u32 cmd;
955
956 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
957 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
958 return -EBUSY;
959 }
960 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
961
962 /*
963 * If we are getting here after a short-out-packet we don't enqueue any
964 * new requests as we try to set the IOC bit only on the last request.
965 */
966 if (start_new) {
967 if (list_empty(&dep->req_queued))
968 dwc3_prepare_trbs(dep, start_new);
969
970 /* req points to the first request which will be sent */
971 req = next_request(&dep->req_queued);
972 } else {
68e823e2
FB
973 dwc3_prepare_trbs(dep, start_new);
974
72246da4 975 /*
1d046793 976 * req points to the first request where HWO changed from 0 to 1
72246da4 977 */
68e823e2 978 req = next_request(&dep->req_queued);
72246da4
FB
979 }
980 if (!req) {
981 dep->flags |= DWC3_EP_PENDING_REQUEST;
982 return 0;
983 }
984
985 memset(&params, 0, sizeof(params));
72246da4 986
1877d6c9
PA
987 if (start_new) {
988 params.param0 = upper_32_bits(req->trb_dma);
989 params.param1 = lower_32_bits(req->trb_dma);
72246da4 990 cmd = DWC3_DEPCMD_STARTTRANSFER;
1877d6c9 991 } else {
72246da4 992 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1877d6c9 993 }
72246da4
FB
994
995 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
996 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
997 if (ret < 0) {
998 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
999
1000 /*
1001 * FIXME we need to iterate over the list of requests
1002 * here and stop, unmap, free and del each of the linked
1d046793 1003 * requests instead of what we do now.
72246da4 1004 */
0fc9a1be
FB
1005 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1006 req->direction);
72246da4
FB
1007 list_del(&req->list);
1008 return ret;
1009 }
1010
1011 dep->flags |= DWC3_EP_BUSY;
25b8ff68 1012
f898ae09 1013 if (start_new) {
b4996a86 1014 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
f898ae09 1015 dep->number);
b4996a86 1016 WARN_ON_ONCE(!dep->resource_index);
f898ae09 1017 }
25b8ff68 1018
72246da4
FB
1019 return 0;
1020}
1021
d6d6ec7b
PA
1022static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1023 struct dwc3_ep *dep, u32 cur_uf)
1024{
1025 u32 uf;
1026
1027 if (list_empty(&dep->request_list)) {
1028 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1029 dep->name);
f4a53c55 1030 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1031 return;
1032 }
1033
1034 /* 4 micro frames in the future */
1035 uf = cur_uf + dep->interval * 4;
1036
1037 __dwc3_gadget_kick_transfer(dep, uf, 1);
1038}
1039
1040static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1041 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1042{
1043 u32 cur_uf, mask;
1044
1045 mask = ~(dep->interval - 1);
1046 cur_uf = event->parameters & mask;
1047
1048 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1049}
1050
72246da4
FB
1051static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1052{
0fc9a1be
FB
1053 struct dwc3 *dwc = dep->dwc;
1054 int ret;
1055
72246da4
FB
1056 req->request.actual = 0;
1057 req->request.status = -EINPROGRESS;
1058 req->direction = dep->direction;
1059 req->epnum = dep->number;
1060
1061 /*
1062 * We only add to our list of requests now and
1063 * start consuming the list once we get XferNotReady
1064 * IRQ.
1065 *
1066 * That way, we avoid doing anything that we don't need
1067 * to do now and defer it until the point we receive a
1068 * particular token from the Host side.
1069 *
1070 * This will also avoid Host cancelling URBs due to too
1d046793 1071 * many NAKs.
72246da4 1072 */
0fc9a1be
FB
1073 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1074 dep->direction);
1075 if (ret)
1076 return ret;
1077
72246da4
FB
1078 list_add_tail(&req->list, &dep->request_list);
1079
1080 /*
b511e5e7 1081 * There are a few special cases:
72246da4 1082 *
f898ae09
PZ
1083 * 1. XferNotReady with empty list of requests. We need to kick the
1084 * transfer here in that situation, otherwise we will be NAKing
1085 * forever. If we get XferNotReady before gadget driver has a
1086 * chance to queue a request, we will ACK the IRQ but won't be
1087 * able to receive the data until the next request is queued.
1088 * The following code is handling exactly that.
72246da4 1089 *
72246da4
FB
1090 */
1091 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1092 /*
1093 * If xfernotready is already elapsed and it is a case
1094 * of isoc transfer, then issue END TRANSFER, so that
1095 * you can receive xfernotready again and can have
1096 * notion of current microframe.
1097 */
1098 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
cdc359dd
PA
1099 if (list_empty(&dep->req_queued)) {
1100 dwc3_stop_active_transfer(dwc, dep->number);
1101 dep->flags = DWC3_EP_ENABLED;
1102 }
f4a53c55
PA
1103 return 0;
1104 }
1105
b511e5e7 1106 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
348e026f 1107 if (ret && ret != -EBUSY)
b511e5e7
FB
1108 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1109 dep->name);
15f86bde 1110 return ret;
b511e5e7 1111 }
72246da4 1112
b511e5e7
FB
1113 /*
1114 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1115 * kick the transfer here after queuing a request, otherwise the
1116 * core may not see the modified TRB(s).
1117 */
1118 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1119 (dep->flags & DWC3_EP_BUSY) &&
1120 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86
FB
1121 WARN_ON_ONCE(!dep->resource_index);
1122 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
b511e5e7 1123 false);
348e026f 1124 if (ret && ret != -EBUSY)
72246da4
FB
1125 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1126 dep->name);
15f86bde 1127 return ret;
a0925324 1128 }
72246da4
FB
1129
1130 return 0;
1131}
1132
1133static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1134 gfp_t gfp_flags)
1135{
1136 struct dwc3_request *req = to_dwc3_request(request);
1137 struct dwc3_ep *dep = to_dwc3_ep(ep);
1138 struct dwc3 *dwc = dep->dwc;
1139
1140 unsigned long flags;
1141
1142 int ret;
1143
16e78db7 1144 if (!dep->endpoint.desc) {
72246da4
FB
1145 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1146 request, ep->name);
1147 return -ESHUTDOWN;
1148 }
1149
1150 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1151 request, ep->name, request->length);
1152
1153 spin_lock_irqsave(&dwc->lock, flags);
1154 ret = __dwc3_gadget_ep_queue(dep, req);
1155 spin_unlock_irqrestore(&dwc->lock, flags);
1156
1157 return ret;
1158}
1159
1160static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1161 struct usb_request *request)
1162{
1163 struct dwc3_request *req = to_dwc3_request(request);
1164 struct dwc3_request *r = NULL;
1165
1166 struct dwc3_ep *dep = to_dwc3_ep(ep);
1167 struct dwc3 *dwc = dep->dwc;
1168
1169 unsigned long flags;
1170 int ret = 0;
1171
1172 spin_lock_irqsave(&dwc->lock, flags);
1173
1174 list_for_each_entry(r, &dep->request_list, list) {
1175 if (r == req)
1176 break;
1177 }
1178
1179 if (r != req) {
1180 list_for_each_entry(r, &dep->req_queued, list) {
1181 if (r == req)
1182 break;
1183 }
1184 if (r == req) {
1185 /* wait until it is processed */
1186 dwc3_stop_active_transfer(dwc, dep->number);
e8d4e8be 1187 goto out1;
72246da4
FB
1188 }
1189 dev_err(dwc->dev, "request %p was not queued to %s\n",
1190 request, ep->name);
1191 ret = -EINVAL;
1192 goto out0;
1193 }
1194
e8d4e8be 1195out1:
72246da4
FB
1196 /* giveback the request */
1197 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1198
1199out0:
1200 spin_unlock_irqrestore(&dwc->lock, flags);
1201
1202 return ret;
1203}
1204
1205int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1206{
1207 struct dwc3_gadget_ep_cmd_params params;
1208 struct dwc3 *dwc = dep->dwc;
1209 int ret;
1210
1211 memset(&params, 0x00, sizeof(params));
1212
1213 if (value) {
72246da4
FB
1214 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1215 DWC3_DEPCMD_SETSTALL, &params);
1216 if (ret)
1217 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1218 value ? "set" : "clear",
1219 dep->name);
1220 else
1221 dep->flags |= DWC3_EP_STALL;
1222 } else {
5275455a
PZ
1223 if (dep->flags & DWC3_EP_WEDGE)
1224 return 0;
1225
72246da4
FB
1226 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1227 DWC3_DEPCMD_CLEARSTALL, &params);
1228 if (ret)
1229 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1230 value ? "set" : "clear",
1231 dep->name);
1232 else
1233 dep->flags &= ~DWC3_EP_STALL;
1234 }
5275455a 1235
72246da4
FB
1236 return ret;
1237}
1238
1239static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1240{
1241 struct dwc3_ep *dep = to_dwc3_ep(ep);
1242 struct dwc3 *dwc = dep->dwc;
1243
1244 unsigned long flags;
1245
1246 int ret;
1247
1248 spin_lock_irqsave(&dwc->lock, flags);
1249
16e78db7 1250 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1251 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1252 ret = -EINVAL;
1253 goto out;
1254 }
1255
1256 ret = __dwc3_gadget_ep_set_halt(dep, value);
1257out:
1258 spin_unlock_irqrestore(&dwc->lock, flags);
1259
1260 return ret;
1261}
1262
1263static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1264{
1265 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1266 struct dwc3 *dwc = dep->dwc;
1267 unsigned long flags;
72246da4 1268
249a4569 1269 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1270 dep->flags |= DWC3_EP_WEDGE;
249a4569 1271 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4 1272
08f0d966
PA
1273 if (dep->number == 0 || dep->number == 1)
1274 return dwc3_gadget_ep0_set_halt(ep, 1);
1275 else
1276 return dwc3_gadget_ep_set_halt(ep, 1);
72246da4
FB
1277}
1278
1279/* -------------------------------------------------------------------------- */
1280
1281static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1282 .bLength = USB_DT_ENDPOINT_SIZE,
1283 .bDescriptorType = USB_DT_ENDPOINT,
1284 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1285};
1286
1287static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1288 .enable = dwc3_gadget_ep0_enable,
1289 .disable = dwc3_gadget_ep0_disable,
1290 .alloc_request = dwc3_gadget_ep_alloc_request,
1291 .free_request = dwc3_gadget_ep_free_request,
1292 .queue = dwc3_gadget_ep0_queue,
1293 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1294 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1295 .set_wedge = dwc3_gadget_ep_set_wedge,
1296};
1297
1298static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1299 .enable = dwc3_gadget_ep_enable,
1300 .disable = dwc3_gadget_ep_disable,
1301 .alloc_request = dwc3_gadget_ep_alloc_request,
1302 .free_request = dwc3_gadget_ep_free_request,
1303 .queue = dwc3_gadget_ep_queue,
1304 .dequeue = dwc3_gadget_ep_dequeue,
1305 .set_halt = dwc3_gadget_ep_set_halt,
1306 .set_wedge = dwc3_gadget_ep_set_wedge,
1307};
1308
1309/* -------------------------------------------------------------------------- */
1310
1311static int dwc3_gadget_get_frame(struct usb_gadget *g)
1312{
1313 struct dwc3 *dwc = gadget_to_dwc(g);
1314 u32 reg;
1315
1316 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1317 return DWC3_DSTS_SOFFN(reg);
1318}
1319
1320static int dwc3_gadget_wakeup(struct usb_gadget *g)
1321{
1322 struct dwc3 *dwc = gadget_to_dwc(g);
1323
1324 unsigned long timeout;
1325 unsigned long flags;
1326
1327 u32 reg;
1328
1329 int ret = 0;
1330
1331 u8 link_state;
1332 u8 speed;
1333
1334 spin_lock_irqsave(&dwc->lock, flags);
1335
1336 /*
1337 * According to the Databook Remote wakeup request should
1338 * be issued only when the device is in early suspend state.
1339 *
1340 * We can check that via USB Link State bits in DSTS register.
1341 */
1342 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1343
1344 speed = reg & DWC3_DSTS_CONNECTSPD;
1345 if (speed == DWC3_DSTS_SUPERSPEED) {
1346 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1347 ret = -EINVAL;
1348 goto out;
1349 }
1350
1351 link_state = DWC3_DSTS_USBLNKST(reg);
1352
1353 switch (link_state) {
1354 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1355 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1356 break;
1357 default:
1358 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1359 link_state);
1360 ret = -EINVAL;
1361 goto out;
1362 }
1363
8598bde7
FB
1364 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1365 if (ret < 0) {
1366 dev_err(dwc->dev, "failed to put link in Recovery\n");
1367 goto out;
1368 }
72246da4 1369
802fde98
PZ
1370 /* Recent versions do this automatically */
1371 if (dwc->revision < DWC3_REVISION_194A) {
1372 /* write zeroes to Link Change Request */
fcc023c7 1373 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1374 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1375 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1376 }
72246da4 1377
1d046793 1378 /* poll until Link State changes to ON */
72246da4
FB
1379 timeout = jiffies + msecs_to_jiffies(100);
1380
1d046793 1381 while (!time_after(jiffies, timeout)) {
72246da4
FB
1382 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1383
1384 /* in HS, means ON */
1385 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1386 break;
1387 }
1388
1389 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1390 dev_err(dwc->dev, "failed to send remote wakeup\n");
1391 ret = -EINVAL;
1392 }
1393
1394out:
1395 spin_unlock_irqrestore(&dwc->lock, flags);
1396
1397 return ret;
1398}
1399
1400static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1401 int is_selfpowered)
1402{
1403 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1404 unsigned long flags;
72246da4 1405
249a4569 1406 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1407 dwc->is_selfpowered = !!is_selfpowered;
249a4569 1408 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1409
1410 return 0;
1411}
1412
6f17f74b 1413static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
72246da4
FB
1414{
1415 u32 reg;
61d58242 1416 u32 timeout = 500;
72246da4
FB
1417
1418 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1419 if (is_on) {
802fde98
PZ
1420 if (dwc->revision <= DWC3_REVISION_187A) {
1421 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1422 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1423 }
1424
1425 if (dwc->revision >= DWC3_REVISION_194A)
1426 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1427 reg |= DWC3_DCTL_RUN_STOP;
9fcb3bd8 1428 dwc->pullups_connected = true;
8db7ed15 1429 } else {
72246da4 1430 reg &= ~DWC3_DCTL_RUN_STOP;
9fcb3bd8 1431 dwc->pullups_connected = false;
8db7ed15 1432 }
72246da4
FB
1433
1434 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1435
1436 do {
1437 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1438 if (is_on) {
1439 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1440 break;
1441 } else {
1442 if (reg & DWC3_DSTS_DEVCTRLHLT)
1443 break;
1444 }
72246da4
FB
1445 timeout--;
1446 if (!timeout)
6f17f74b 1447 return -ETIMEDOUT;
61d58242 1448 udelay(1);
72246da4
FB
1449 } while (1);
1450
1451 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1452 dwc->gadget_driver
1453 ? dwc->gadget_driver->function : "no-function",
1454 is_on ? "connect" : "disconnect");
6f17f74b
PA
1455
1456 return 0;
72246da4
FB
1457}
1458
1459static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1460{
1461 struct dwc3 *dwc = gadget_to_dwc(g);
1462 unsigned long flags;
6f17f74b 1463 int ret;
72246da4
FB
1464
1465 is_on = !!is_on;
1466
1467 spin_lock_irqsave(&dwc->lock, flags);
6f17f74b 1468 ret = dwc3_gadget_run_stop(dwc, is_on);
72246da4
FB
1469 spin_unlock_irqrestore(&dwc->lock, flags);
1470
6f17f74b 1471 return ret;
72246da4
FB
1472}
1473
8698e2ac
FB
1474static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1475{
1476 u32 reg;
1477
1478 /* Enable all but Start and End of Frame IRQs */
1479 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1480 DWC3_DEVTEN_EVNTOVERFLOWEN |
1481 DWC3_DEVTEN_CMDCMPLTEN |
1482 DWC3_DEVTEN_ERRTICERREN |
1483 DWC3_DEVTEN_WKUPEVTEN |
1484 DWC3_DEVTEN_ULSTCNGEN |
1485 DWC3_DEVTEN_CONNECTDONEEN |
1486 DWC3_DEVTEN_USBRSTEN |
1487 DWC3_DEVTEN_DISCONNEVTEN);
1488
1489 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1490}
1491
1492static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1493{
1494 /* mask all interrupts */
1495 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1496}
1497
1498static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
b15a762f 1499static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
8698e2ac 1500
72246da4
FB
1501static int dwc3_gadget_start(struct usb_gadget *g,
1502 struct usb_gadget_driver *driver)
1503{
1504 struct dwc3 *dwc = gadget_to_dwc(g);
1505 struct dwc3_ep *dep;
1506 unsigned long flags;
1507 int ret = 0;
8698e2ac 1508 int irq;
72246da4
FB
1509 u32 reg;
1510
b0d7ffd4
FB
1511 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1512 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1513 IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
1514 if (ret) {
1515 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1516 irq, ret);
1517 goto err0;
1518 }
1519
72246da4
FB
1520 spin_lock_irqsave(&dwc->lock, flags);
1521
1522 if (dwc->gadget_driver) {
1523 dev_err(dwc->dev, "%s is already bound to %s\n",
1524 dwc->gadget.name,
1525 dwc->gadget_driver->driver.name);
1526 ret = -EBUSY;
b0d7ffd4 1527 goto err1;
72246da4
FB
1528 }
1529
1530 dwc->gadget_driver = driver;
72246da4 1531
72246da4
FB
1532 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1533 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1534
1535 /**
1536 * WORKAROUND: DWC3 revision < 2.20a have an issue
1537 * which would cause metastability state on Run/Stop
1538 * bit if we try to force the IP to USB2-only mode.
1539 *
1540 * Because of that, we cannot configure the IP to any
1541 * speed other than the SuperSpeed
1542 *
1543 * Refers to:
1544 *
1545 * STAR#9000525659: Clock Domain Crossing on DCTL in
1546 * USB 2.0 Mode
1547 */
1548 if (dwc->revision < DWC3_REVISION_220A)
1549 reg |= DWC3_DCFG_SUPERSPEED;
1550 else
1551 reg |= dwc->maximum_speed;
72246da4
FB
1552 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1553
b23c8439
PZ
1554 dwc->start_config_issued = false;
1555
72246da4
FB
1556 /* Start with SuperSpeed Default */
1557 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1558
1559 dep = dwc->eps[0];
4b345c9a 1560 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
72246da4
FB
1561 if (ret) {
1562 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1563 goto err2;
72246da4
FB
1564 }
1565
1566 dep = dwc->eps[1];
4b345c9a 1567 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
72246da4
FB
1568 if (ret) {
1569 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1570 goto err3;
72246da4
FB
1571 }
1572
1573 /* begin to receive SETUP packets */
c7fcdeb2 1574 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1575 dwc3_ep0_out_start(dwc);
1576
8698e2ac
FB
1577 dwc3_gadget_enable_irq(dwc);
1578
72246da4
FB
1579 spin_unlock_irqrestore(&dwc->lock, flags);
1580
1581 return 0;
1582
b0d7ffd4 1583err3:
72246da4
FB
1584 __dwc3_gadget_ep_disable(dwc->eps[0]);
1585
b0d7ffd4 1586err2:
cdcedd69 1587 dwc->gadget_driver = NULL;
b0d7ffd4
FB
1588
1589err1:
72246da4
FB
1590 spin_unlock_irqrestore(&dwc->lock, flags);
1591
b0d7ffd4
FB
1592 free_irq(irq, dwc);
1593
1594err0:
72246da4
FB
1595 return ret;
1596}
1597
1598static int dwc3_gadget_stop(struct usb_gadget *g,
1599 struct usb_gadget_driver *driver)
1600{
1601 struct dwc3 *dwc = gadget_to_dwc(g);
1602 unsigned long flags;
8698e2ac 1603 int irq;
72246da4
FB
1604
1605 spin_lock_irqsave(&dwc->lock, flags);
1606
8698e2ac 1607 dwc3_gadget_disable_irq(dwc);
72246da4
FB
1608 __dwc3_gadget_ep_disable(dwc->eps[0]);
1609 __dwc3_gadget_ep_disable(dwc->eps[1]);
1610
1611 dwc->gadget_driver = NULL;
72246da4
FB
1612
1613 spin_unlock_irqrestore(&dwc->lock, flags);
1614
b0d7ffd4
FB
1615 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1616 free_irq(irq, dwc);
1617
72246da4
FB
1618 return 0;
1619}
802fde98 1620
72246da4
FB
1621static const struct usb_gadget_ops dwc3_gadget_ops = {
1622 .get_frame = dwc3_gadget_get_frame,
1623 .wakeup = dwc3_gadget_wakeup,
1624 .set_selfpowered = dwc3_gadget_set_selfpowered,
1625 .pullup = dwc3_gadget_pullup,
1626 .udc_start = dwc3_gadget_start,
1627 .udc_stop = dwc3_gadget_stop,
1628};
1629
1630/* -------------------------------------------------------------------------- */
1631
6a1e3ef4
FB
1632static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1633 u8 num, u32 direction)
72246da4
FB
1634{
1635 struct dwc3_ep *dep;
6a1e3ef4 1636 u8 i;
72246da4 1637
6a1e3ef4
FB
1638 for (i = 0; i < num; i++) {
1639 u8 epnum = (i << 1) | (!!direction);
72246da4 1640
72246da4
FB
1641 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1642 if (!dep) {
1643 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1644 epnum);
1645 return -ENOMEM;
1646 }
1647
1648 dep->dwc = dwc;
1649 dep->number = epnum;
1650 dwc->eps[epnum] = dep;
1651
1652 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1653 (epnum & 1) ? "in" : "out");
6a1e3ef4 1654
72246da4
FB
1655 dep->endpoint.name = dep->name;
1656 dep->direction = (epnum & 1);
1657
1658 if (epnum == 0 || epnum == 1) {
1659 dep->endpoint.maxpacket = 512;
6048e4c6 1660 dep->endpoint.maxburst = 1;
72246da4
FB
1661 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1662 if (!epnum)
1663 dwc->gadget.ep0 = &dep->endpoint;
1664 } else {
1665 int ret;
1666
1667 dep->endpoint.maxpacket = 1024;
12d36c16 1668 dep->endpoint.max_streams = 15;
72246da4
FB
1669 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1670 list_add_tail(&dep->endpoint.ep_list,
1671 &dwc->gadget.ep_list);
1672
1673 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1674 if (ret)
72246da4 1675 return ret;
72246da4 1676 }
25b8ff68 1677
72246da4
FB
1678 INIT_LIST_HEAD(&dep->request_list);
1679 INIT_LIST_HEAD(&dep->req_queued);
1680 }
1681
1682 return 0;
1683}
1684
6a1e3ef4
FB
1685static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1686{
1687 int ret;
1688
1689 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1690
1691 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1692 if (ret < 0) {
1693 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1694 return ret;
1695 }
1696
1697 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1698 if (ret < 0) {
1699 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1700 return ret;
1701 }
1702
1703 return 0;
1704}
1705
72246da4
FB
1706static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1707{
1708 struct dwc3_ep *dep;
1709 u8 epnum;
1710
1711 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1712 dep = dwc->eps[epnum];
6a1e3ef4
FB
1713 if (!dep)
1714 continue;
5bf8fae3
GC
1715 /*
1716 * Physical endpoints 0 and 1 are special; they form the
1717 * bi-directional USB endpoint 0.
1718 *
1719 * For those two physical endpoints, we don't allocate a TRB
1720 * pool nor do we add them the endpoints list. Due to that, we
1721 * shouldn't do these two operations otherwise we would end up
1722 * with all sorts of bugs when removing dwc3.ko.
1723 */
1724 if (epnum != 0 && epnum != 1) {
1725 dwc3_free_trb_pool(dep);
72246da4 1726 list_del(&dep->endpoint.ep_list);
5bf8fae3 1727 }
72246da4
FB
1728
1729 kfree(dep);
1730 }
1731}
1732
72246da4 1733/* -------------------------------------------------------------------------- */
e5caff68 1734
e5ba5ec8
PA
1735static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1736 struct dwc3_request *req, struct dwc3_trb *trb,
72246da4
FB
1737 const struct dwc3_event_depevt *event, int status)
1738{
72246da4
FB
1739 unsigned int count;
1740 unsigned int s_pkt = 0;
d6d6ec7b 1741 unsigned int trb_status;
72246da4 1742
e5ba5ec8
PA
1743 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1744 /*
1745 * We continue despite the error. There is not much we
1746 * can do. If we don't clean it up we loop forever. If
1747 * we skip the TRB then it gets overwritten after a
1748 * while since we use them in a ring buffer. A BUG()
1749 * would help. Lets hope that if this occurs, someone
1750 * fixes the root cause instead of looking away :)
1751 */
1752 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1753 dep->name, trb);
1754 count = trb->size & DWC3_TRB_SIZE_MASK;
1755
1756 if (dep->direction) {
1757 if (count) {
1758 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1759 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1760 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1761 dep->name);
1762 /*
1763 * If missed isoc occurred and there is
1764 * no request queued then issue END
1765 * TRANSFER, so that core generates
1766 * next xfernotready and we will issue
1767 * a fresh START TRANSFER.
1768 * If there are still queued request
1769 * then wait, do not issue either END
1770 * or UPDATE TRANSFER, just attach next
1771 * request in request_list during
1772 * giveback.If any future queued request
1773 * is successfully transferred then we
1774 * will issue UPDATE TRANSFER for all
1775 * request in the request_list.
1776 */
1777 dep->flags |= DWC3_EP_MISSED_ISOC;
1778 } else {
1779 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1780 dep->name);
1781 status = -ECONNRESET;
1782 }
1783 } else {
1784 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1785 }
1786 } else {
1787 if (count && (event->status & DEPEVT_STATUS_SHORT))
1788 s_pkt = 1;
1789 }
1790
1791 /*
1792 * We assume here we will always receive the entire data block
1793 * which we should receive. Meaning, if we program RX to
1794 * receive 4K but we receive only 2K, we assume that's all we
1795 * should receive and we simply bounce the request back to the
1796 * gadget driver for further processing.
1797 */
1798 req->request.actual += req->request.length - count;
1799 if (s_pkt)
1800 return 1;
1801 if ((event->status & DEPEVT_STATUS_LST) &&
1802 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1803 DWC3_TRB_CTRL_HWO)))
1804 return 1;
1805 if ((event->status & DEPEVT_STATUS_IOC) &&
1806 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1807 return 1;
1808 return 0;
1809}
1810
1811static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1812 const struct dwc3_event_depevt *event, int status)
1813{
1814 struct dwc3_request *req;
1815 struct dwc3_trb *trb;
1816 unsigned int slot;
1817 unsigned int i;
1818 int ret;
1819
72246da4
FB
1820 do {
1821 req = next_request(&dep->req_queued);
d39ee7be
SAS
1822 if (!req) {
1823 WARN_ON_ONCE(1);
1824 return 1;
1825 }
e5ba5ec8
PA
1826 i = 0;
1827 do {
1828 slot = req->start_slot + i;
1829 if ((slot == DWC3_TRB_NUM - 1) &&
1830 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1831 slot++;
1832 slot %= DWC3_TRB_NUM;
1833 trb = &dep->trb_pool[slot];
72246da4 1834
e5ba5ec8
PA
1835 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1836 event, status);
1837 if (ret)
1838 break;
1839 }while (++i < req->request.num_mapped_sgs);
72246da4 1840
72246da4 1841 dwc3_gadget_giveback(dep, req, status);
e5ba5ec8
PA
1842
1843 if (ret)
72246da4
FB
1844 break;
1845 } while (1);
1846
cdc359dd
PA
1847 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1848 list_empty(&dep->req_queued)) {
1849 if (list_empty(&dep->request_list)) {
1850 /*
1851 * If there is no entry in request list then do
1852 * not issue END TRANSFER now. Just set PENDING
1853 * flag, so that END TRANSFER is issued when an
1854 * entry is added into request list.
1855 */
1856 dep->flags = DWC3_EP_PENDING_REQUEST;
1857 } else {
1858 dwc3_stop_active_transfer(dwc, dep->number);
1859 dep->flags = DWC3_EP_ENABLED;
1860 }
7efea86c
PA
1861 return 1;
1862 }
1863
f6bafc6a
FB
1864 if ((event->status & DEPEVT_STATUS_IOC) &&
1865 (trb->ctrl & DWC3_TRB_CTRL_IOC))
72246da4
FB
1866 return 0;
1867 return 1;
1868}
1869
1870static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1871 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1872 int start_new)
1873{
1874 unsigned status = 0;
1875 int clean_busy;
1876
1877 if (event->status & DEPEVT_STATUS_BUSERR)
1878 status = -ECONNRESET;
1879
1d046793 1880 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
c2df85ca 1881 if (clean_busy)
72246da4 1882 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
1883
1884 /*
1885 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1886 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1887 */
1888 if (dwc->revision < DWC3_REVISION_183A) {
1889 u32 reg;
1890 int i;
1891
1892 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 1893 dep = dwc->eps[i];
fae2b904
FB
1894
1895 if (!(dep->flags & DWC3_EP_ENABLED))
1896 continue;
1897
1898 if (!list_empty(&dep->req_queued))
1899 return;
1900 }
1901
1902 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1903 reg |= dwc->u1u2;
1904 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1905
1906 dwc->u1u2 = 0;
1907 }
72246da4
FB
1908}
1909
72246da4
FB
1910static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1911 const struct dwc3_event_depevt *event)
1912{
1913 struct dwc3_ep *dep;
1914 u8 epnum = event->endpoint_number;
1915
1916 dep = dwc->eps[epnum];
1917
3336abb5
FB
1918 if (!(dep->flags & DWC3_EP_ENABLED))
1919 return;
1920
72246da4
FB
1921 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1922 dwc3_ep_event_string(event->endpoint_event));
1923
1924 if (epnum == 0 || epnum == 1) {
1925 dwc3_ep0_interrupt(dwc, event);
1926 return;
1927 }
1928
1929 switch (event->endpoint_event) {
1930 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 1931 dep->resource_index = 0;
c2df85ca 1932
16e78db7 1933 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1934 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1935 dep->name);
1936 return;
1937 }
1938
1939 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1940 break;
1941 case DWC3_DEPEVT_XFERINPROGRESS:
16e78db7 1942 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1943 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1944 dep->name);
1945 return;
1946 }
1947
1948 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1949 break;
1950 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 1951 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1952 dwc3_gadget_start_isoc(dwc, dep, event);
1953 } else {
1954 int ret;
1955
1956 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1957 dep->name, event->status &
1958 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1959 ? "Transfer Active"
1960 : "Transfer Not Active");
1961
1962 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1963 if (!ret || ret == -EBUSY)
1964 return;
1965
1966 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1967 dep->name);
1968 }
1969
879631aa
FB
1970 break;
1971 case DWC3_DEPEVT_STREAMEVT:
16e78db7 1972 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
1973 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1974 dep->name);
1975 return;
1976 }
1977
1978 switch (event->status) {
1979 case DEPEVT_STREAMEVT_FOUND:
1980 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1981 event->parameters);
1982
1983 break;
1984 case DEPEVT_STREAMEVT_NOTFOUND:
1985 /* FALLTHROUGH */
1986 default:
1987 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1988 }
72246da4
FB
1989 break;
1990 case DWC3_DEPEVT_RXTXFIFOEVT:
1991 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1992 break;
72246da4 1993 case DWC3_DEPEVT_EPCMDCMPLT:
ea53b882 1994 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
72246da4
FB
1995 break;
1996 }
1997}
1998
1999static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2000{
2001 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2002 spin_unlock(&dwc->lock);
2003 dwc->gadget_driver->disconnect(&dwc->gadget);
2004 spin_lock(&dwc->lock);
2005 }
2006}
2007
2008static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
2009{
2010 struct dwc3_ep *dep;
2011 struct dwc3_gadget_ep_cmd_params params;
2012 u32 cmd;
2013 int ret;
2014
2015 dep = dwc->eps[epnum];
2016
b4996a86 2017 if (!dep->resource_index)
3daf74d7
PA
2018 return;
2019
57911504
PA
2020 /*
2021 * NOTICE: We are violating what the Databook says about the
2022 * EndTransfer command. Ideally we would _always_ wait for the
2023 * EndTransfer Command Completion IRQ, but that's causing too
2024 * much trouble synchronizing between us and gadget driver.
2025 *
2026 * We have discussed this with the IP Provider and it was
2027 * suggested to giveback all requests here, but give HW some
2028 * extra time to synchronize with the interconnect. We're using
2029 * an arbitraty 100us delay for that.
2030 *
2031 * Note also that a similar handling was tested by Synopsys
2032 * (thanks a lot Paul) and nothing bad has come out of it.
2033 * In short, what we're doing is:
2034 *
2035 * - Issue EndTransfer WITH CMDIOC bit set
2036 * - Wait 100us
2037 */
2038
3daf74d7
PA
2039 cmd = DWC3_DEPCMD_ENDTRANSFER;
2040 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
b4996a86 2041 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7
PA
2042 memset(&params, 0, sizeof(params));
2043 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2044 WARN_ON_ONCE(ret);
b4996a86 2045 dep->resource_index = 0;
041d81f4 2046 dep->flags &= ~DWC3_EP_BUSY;
57911504 2047 udelay(100);
72246da4
FB
2048}
2049
2050static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2051{
2052 u32 epnum;
2053
2054 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2055 struct dwc3_ep *dep;
2056
2057 dep = dwc->eps[epnum];
6a1e3ef4
FB
2058 if (!dep)
2059 continue;
2060
72246da4
FB
2061 if (!(dep->flags & DWC3_EP_ENABLED))
2062 continue;
2063
624407f9 2064 dwc3_remove_requests(dwc, dep);
72246da4
FB
2065 }
2066}
2067
2068static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2069{
2070 u32 epnum;
2071
2072 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2073 struct dwc3_ep *dep;
2074 struct dwc3_gadget_ep_cmd_params params;
2075 int ret;
2076
2077 dep = dwc->eps[epnum];
6a1e3ef4
FB
2078 if (!dep)
2079 continue;
72246da4
FB
2080
2081 if (!(dep->flags & DWC3_EP_STALL))
2082 continue;
2083
2084 dep->flags &= ~DWC3_EP_STALL;
2085
2086 memset(&params, 0, sizeof(params));
2087 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2088 DWC3_DEPCMD_CLEARSTALL, &params);
2089 WARN_ON_ONCE(ret);
2090 }
2091}
2092
2093static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2094{
c4430a26
FB
2095 int reg;
2096
72246da4 2097 dev_vdbg(dwc->dev, "%s\n", __func__);
72246da4
FB
2098
2099 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2100 reg &= ~DWC3_DCTL_INITU1ENA;
2101 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2102
2103 reg &= ~DWC3_DCTL_INITU2ENA;
2104 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 2105
72246da4 2106 dwc3_disconnect_gadget(dwc);
b23c8439 2107 dwc->start_config_issued = false;
72246da4
FB
2108
2109 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 2110 dwc->setup_packet_pending = false;
72246da4
FB
2111}
2112
d7a46a8d 2113static void dwc3_gadget_usb3_phy_suspend(struct dwc3 *dwc, int suspend)
72246da4
FB
2114{
2115 u32 reg;
2116
2117 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
2118
d7a46a8d 2119 if (suspend)
72246da4 2120 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
d7a46a8d
PZ
2121 else
2122 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
72246da4
FB
2123
2124 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
2125}
2126
d7a46a8d 2127static void dwc3_gadget_usb2_phy_suspend(struct dwc3 *dwc, int suspend)
72246da4
FB
2128{
2129 u32 reg;
2130
2131 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2132
d7a46a8d 2133 if (suspend)
72246da4 2134 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
d7a46a8d
PZ
2135 else
2136 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
72246da4
FB
2137
2138 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2139}
2140
2141static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2142{
2143 u32 reg;
2144
2145 dev_vdbg(dwc->dev, "%s\n", __func__);
2146
df62df56
FB
2147 /*
2148 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2149 * would cause a missing Disconnect Event if there's a
2150 * pending Setup Packet in the FIFO.
2151 *
2152 * There's no suggested workaround on the official Bug
2153 * report, which states that "unless the driver/application
2154 * is doing any special handling of a disconnect event,
2155 * there is no functional issue".
2156 *
2157 * Unfortunately, it turns out that we _do_ some special
2158 * handling of a disconnect event, namely complete all
2159 * pending transfers, notify gadget driver of the
2160 * disconnection, and so on.
2161 *
2162 * Our suggested workaround is to follow the Disconnect
2163 * Event steps here, instead, based on a setup_packet_pending
2164 * flag. Such flag gets set whenever we have a XferNotReady
2165 * event on EP0 and gets cleared on XferComplete for the
2166 * same endpoint.
2167 *
2168 * Refers to:
2169 *
2170 * STAR#9000466709: RTL: Device : Disconnect event not
2171 * generated if setup packet pending in FIFO
2172 */
2173 if (dwc->revision < DWC3_REVISION_188A) {
2174 if (dwc->setup_packet_pending)
2175 dwc3_gadget_disconnect_interrupt(dwc);
2176 }
2177
961906ed 2178 /* after reset -> Default State */
14cd592f 2179 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
961906ed 2180
802fde98
PZ
2181 /* Recent versions support automatic phy suspend and don't need this */
2182 if (dwc->revision < DWC3_REVISION_194A) {
2183 /* Resume PHYs */
2184 dwc3_gadget_usb2_phy_suspend(dwc, false);
2185 dwc3_gadget_usb3_phy_suspend(dwc, false);
2186 }
72246da4
FB
2187
2188 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
2189 dwc3_disconnect_gadget(dwc);
2190
2191 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2192 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2193 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2194 dwc->test_mode = false;
72246da4
FB
2195
2196 dwc3_stop_active_transfers(dwc);
2197 dwc3_clear_stall_all_ep(dwc);
b23c8439 2198 dwc->start_config_issued = false;
72246da4
FB
2199
2200 /* Reset device address to zero */
2201 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2202 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2203 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2204}
2205
2206static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2207{
2208 u32 reg;
2209 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2210
2211 /*
2212 * We change the clock only at SS but I dunno why I would want to do
2213 * this. Maybe it becomes part of the power saving plan.
2214 */
2215
2216 if (speed != DWC3_DSTS_SUPERSPEED)
2217 return;
2218
2219 /*
2220 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2221 * each time on Connect Done.
2222 */
2223 if (!usb30_clock)
2224 return;
2225
2226 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2227 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2228 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2229}
2230
d7a46a8d 2231static void dwc3_gadget_phy_suspend(struct dwc3 *dwc, u8 speed)
72246da4
FB
2232{
2233 switch (speed) {
2234 case USB_SPEED_SUPER:
d7a46a8d 2235 dwc3_gadget_usb2_phy_suspend(dwc, true);
72246da4
FB
2236 break;
2237 case USB_SPEED_HIGH:
2238 case USB_SPEED_FULL:
2239 case USB_SPEED_LOW:
d7a46a8d 2240 dwc3_gadget_usb3_phy_suspend(dwc, true);
72246da4
FB
2241 break;
2242 }
2243}
2244
2245static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2246{
72246da4
FB
2247 struct dwc3_ep *dep;
2248 int ret;
2249 u32 reg;
2250 u8 speed;
2251
2252 dev_vdbg(dwc->dev, "%s\n", __func__);
2253
72246da4
FB
2254 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2255 speed = reg & DWC3_DSTS_CONNECTSPD;
2256 dwc->speed = speed;
2257
2258 dwc3_update_ram_clk_sel(dwc, speed);
2259
2260 switch (speed) {
2261 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2262 /*
2263 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2264 * would cause a missing USB3 Reset event.
2265 *
2266 * In such situations, we should force a USB3 Reset
2267 * event by calling our dwc3_gadget_reset_interrupt()
2268 * routine.
2269 *
2270 * Refers to:
2271 *
2272 * STAR#9000483510: RTL: SS : USB3 reset event may
2273 * not be generated always when the link enters poll
2274 */
2275 if (dwc->revision < DWC3_REVISION_190A)
2276 dwc3_gadget_reset_interrupt(dwc);
2277
72246da4
FB
2278 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2279 dwc->gadget.ep0->maxpacket = 512;
2280 dwc->gadget.speed = USB_SPEED_SUPER;
2281 break;
2282 case DWC3_DCFG_HIGHSPEED:
2283 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2284 dwc->gadget.ep0->maxpacket = 64;
2285 dwc->gadget.speed = USB_SPEED_HIGH;
2286 break;
2287 case DWC3_DCFG_FULLSPEED2:
2288 case DWC3_DCFG_FULLSPEED1:
2289 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2290 dwc->gadget.ep0->maxpacket = 64;
2291 dwc->gadget.speed = USB_SPEED_FULL;
2292 break;
2293 case DWC3_DCFG_LOWSPEED:
2294 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2295 dwc->gadget.ep0->maxpacket = 8;
2296 dwc->gadget.speed = USB_SPEED_LOW;
2297 break;
2298 }
2299
2b758350
PA
2300 /* Enable USB2 LPM Capability */
2301
2302 if ((dwc->revision > DWC3_REVISION_194A)
2303 && (speed != DWC3_DCFG_SUPERSPEED)) {
2304 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2305 reg |= DWC3_DCFG_LPM_CAP;
2306 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2307
2308 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2309 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2310
1a947746
FB
2311 /*
2312 * TODO: This should be configurable. For now using
2313 * maximum allowed HIRD threshold value of 0b1100
2314 */
2315 reg |= DWC3_DCTL_HIRD_THRES(12);
2b758350
PA
2316
2317 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2318 }
2319
802fde98
PZ
2320 /* Recent versions support automatic phy suspend and don't need this */
2321 if (dwc->revision < DWC3_REVISION_194A) {
2322 /* Suspend unneeded PHY */
2323 dwc3_gadget_phy_suspend(dwc, dwc->gadget.speed);
2324 }
72246da4
FB
2325
2326 dep = dwc->eps[0];
4b345c9a 2327 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
72246da4
FB
2328 if (ret) {
2329 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2330 return;
2331 }
2332
2333 dep = dwc->eps[1];
4b345c9a 2334 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
72246da4
FB
2335 if (ret) {
2336 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2337 return;
2338 }
2339
2340 /*
2341 * Configure PHY via GUSB3PIPECTLn if required.
2342 *
2343 * Update GTXFIFOSIZn
2344 *
2345 * In both cases reset values should be sufficient.
2346 */
2347}
2348
2349static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2350{
2351 dev_vdbg(dwc->dev, "%s\n", __func__);
2352
2353 /*
2354 * TODO take core out of low power mode when that's
2355 * implemented.
2356 */
2357
2358 dwc->gadget_driver->resume(&dwc->gadget);
2359}
2360
2361static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2362 unsigned int evtinfo)
2363{
fae2b904 2364 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
0b0cc1cd
FB
2365 unsigned int pwropt;
2366
2367 /*
2368 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2369 * Hibernation mode enabled which would show up when device detects
2370 * host-initiated U3 exit.
2371 *
2372 * In that case, device will generate a Link State Change Interrupt
2373 * from U3 to RESUME which is only necessary if Hibernation is
2374 * configured in.
2375 *
2376 * There are no functional changes due to such spurious event and we
2377 * just need to ignore it.
2378 *
2379 * Refers to:
2380 *
2381 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2382 * operational mode
2383 */
2384 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2385 if ((dwc->revision < DWC3_REVISION_250A) &&
2386 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2387 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2388 (next == DWC3_LINK_STATE_RESUME)) {
2389 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2390 return;
2391 }
2392 }
fae2b904
FB
2393
2394 /*
2395 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2396 * on the link partner, the USB session might do multiple entry/exit
2397 * of low power states before a transfer takes place.
2398 *
2399 * Due to this problem, we might experience lower throughput. The
2400 * suggested workaround is to disable DCTL[12:9] bits if we're
2401 * transitioning from U1/U2 to U0 and enable those bits again
2402 * after a transfer completes and there are no pending transfers
2403 * on any of the enabled endpoints.
2404 *
2405 * This is the first half of that workaround.
2406 *
2407 * Refers to:
2408 *
2409 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2410 * core send LGO_Ux entering U0
2411 */
2412 if (dwc->revision < DWC3_REVISION_183A) {
2413 if (next == DWC3_LINK_STATE_U0) {
2414 u32 u1u2;
2415 u32 reg;
2416
2417 switch (dwc->link_state) {
2418 case DWC3_LINK_STATE_U1:
2419 case DWC3_LINK_STATE_U2:
2420 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2421 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2422 | DWC3_DCTL_ACCEPTU2ENA
2423 | DWC3_DCTL_INITU1ENA
2424 | DWC3_DCTL_ACCEPTU1ENA);
2425
2426 if (!dwc->u1u2)
2427 dwc->u1u2 = reg & u1u2;
2428
2429 reg &= ~u1u2;
2430
2431 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2432 break;
2433 default:
2434 /* do nothing */
2435 break;
2436 }
2437 }
2438 }
2439
2440 dwc->link_state = next;
019ac832
FB
2441
2442 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
72246da4
FB
2443}
2444
2445static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2446 const struct dwc3_event_devt *event)
2447{
2448 switch (event->type) {
2449 case DWC3_DEVICE_EVENT_DISCONNECT:
2450 dwc3_gadget_disconnect_interrupt(dwc);
2451 break;
2452 case DWC3_DEVICE_EVENT_RESET:
2453 dwc3_gadget_reset_interrupt(dwc);
2454 break;
2455 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2456 dwc3_gadget_conndone_interrupt(dwc);
2457 break;
2458 case DWC3_DEVICE_EVENT_WAKEUP:
2459 dwc3_gadget_wakeup_interrupt(dwc);
2460 break;
2461 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2462 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2463 break;
2464 case DWC3_DEVICE_EVENT_EOPF:
2465 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2466 break;
2467 case DWC3_DEVICE_EVENT_SOF:
2468 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2469 break;
2470 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2471 dev_vdbg(dwc->dev, "Erratic Error\n");
2472 break;
2473 case DWC3_DEVICE_EVENT_CMD_CMPL:
2474 dev_vdbg(dwc->dev, "Command Complete\n");
2475 break;
2476 case DWC3_DEVICE_EVENT_OVERFLOW:
2477 dev_vdbg(dwc->dev, "Overflow\n");
2478 break;
2479 default:
2480 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2481 }
2482}
2483
2484static void dwc3_process_event_entry(struct dwc3 *dwc,
2485 const union dwc3_event *event)
2486{
2487 /* Endpoint IRQ, handle it and return early */
2488 if (event->type.is_devspec == 0) {
2489 /* depevt */
2490 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2491 }
2492
2493 switch (event->type.type) {
2494 case DWC3_EVENT_TYPE_DEV:
2495 dwc3_gadget_interrupt(dwc, &event->devt);
2496 break;
2497 /* REVISIT what to do with Carkit and I2C events ? */
2498 default:
2499 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2500 }
2501}
2502
b15a762f
FB
2503static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2504{
2505 struct dwc3 *dwc = _dwc;
2506 unsigned long flags;
2507 irqreturn_t ret = IRQ_NONE;
2508 int i;
2509
2510 spin_lock_irqsave(&dwc->lock, flags);
2511
2512 for (i = 0; i < dwc->num_event_buffers; i++) {
2513 struct dwc3_event_buffer *evt;
2514 int left;
2515
2516 evt = dwc->ev_buffs[i];
2517 left = evt->count;
2518
2519 if (!(evt->flags & DWC3_EVENT_PENDING))
2520 continue;
2521
2522 while (left > 0) {
2523 union dwc3_event event;
2524
2525 event.raw = *(u32 *) (evt->buf + evt->lpos);
2526
2527 dwc3_process_event_entry(dwc, &event);
2528
2529 /*
2530 * FIXME we wrap around correctly to the next entry as
2531 * almost all entries are 4 bytes in size. There is one
2532 * entry which has 12 bytes which is a regular entry
2533 * followed by 8 bytes data. ATM I don't know how
2534 * things are organized if we get next to the a
2535 * boundary so I worry about that once we try to handle
2536 * that.
2537 */
2538 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2539 left -= 4;
2540
2541 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(i), 4);
2542 }
2543
2544 evt->count = 0;
2545 evt->flags &= ~DWC3_EVENT_PENDING;
2546 ret = IRQ_HANDLED;
2547 }
2548
2549 spin_unlock_irqrestore(&dwc->lock, flags);
2550
2551 return ret;
2552}
2553
72246da4
FB
2554static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2555{
2556 struct dwc3_event_buffer *evt;
72246da4
FB
2557 u32 count;
2558
b15a762f
FB
2559 evt = dwc->ev_buffs[buf];
2560
72246da4
FB
2561 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2562 count &= DWC3_GEVNTCOUNT_MASK;
2563 if (!count)
2564 return IRQ_NONE;
2565
b15a762f
FB
2566 evt->count = count;
2567 evt->flags |= DWC3_EVENT_PENDING;
72246da4 2568
b15a762f 2569 return IRQ_WAKE_THREAD;
72246da4
FB
2570}
2571
2572static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2573{
2574 struct dwc3 *dwc = _dwc;
2575 int i;
2576 irqreturn_t ret = IRQ_NONE;
2577
2578 spin_lock(&dwc->lock);
2579
9f622b2a 2580 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2581 irqreturn_t status;
2582
2583 status = dwc3_process_event_buf(dwc, i);
b15a762f 2584 if (status == IRQ_WAKE_THREAD)
72246da4
FB
2585 ret = status;
2586 }
2587
2588 spin_unlock(&dwc->lock);
2589
2590 return ret;
2591}
2592
2593/**
2594 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2595 * @dwc: pointer to our controller context structure
72246da4
FB
2596 *
2597 * Returns 0 on success otherwise negative errno.
2598 */
41ac7b3a 2599int dwc3_gadget_init(struct dwc3 *dwc)
72246da4
FB
2600{
2601 u32 reg;
2602 int ret;
72246da4
FB
2603
2604 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2605 &dwc->ctrl_req_addr, GFP_KERNEL);
2606 if (!dwc->ctrl_req) {
2607 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2608 ret = -ENOMEM;
2609 goto err0;
2610 }
2611
2612 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2613 &dwc->ep0_trb_addr, GFP_KERNEL);
2614 if (!dwc->ep0_trb) {
2615 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2616 ret = -ENOMEM;
2617 goto err1;
2618 }
2619
3ef35faf 2620 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4
FB
2621 if (!dwc->setup_buf) {
2622 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2623 ret = -ENOMEM;
2624 goto err2;
2625 }
2626
5812b1c2 2627 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2628 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2629 GFP_KERNEL);
5812b1c2
FB
2630 if (!dwc->ep0_bounce) {
2631 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2632 ret = -ENOMEM;
2633 goto err3;
2634 }
2635
72246da4 2636 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2637 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4 2638 dwc->gadget.speed = USB_SPEED_UNKNOWN;
eeb720fb 2639 dwc->gadget.sg_supported = true;
72246da4
FB
2640 dwc->gadget.name = "dwc3-gadget";
2641
2642 /*
2643 * REVISIT: Here we should clear all pending IRQs to be
2644 * sure we're starting from a well known location.
2645 */
2646
2647 ret = dwc3_gadget_init_endpoints(dwc);
2648 if (ret)
5812b1c2 2649 goto err4;
72246da4 2650
e6a3b5e2
SAS
2651 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2652 reg |= DWC3_DCFG_LPM_CAP;
2653 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2654
8698e2ac 2655 /* Enable USB2 LPM and automatic phy suspend only on recent versions */
802fde98 2656 if (dwc->revision >= DWC3_REVISION_194A) {
dcae3573
PA
2657 dwc3_gadget_usb2_phy_suspend(dwc, false);
2658 dwc3_gadget_usb3_phy_suspend(dwc, false);
802fde98
PZ
2659 }
2660
72246da4
FB
2661 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2662 if (ret) {
2663 dev_err(dwc->dev, "failed to register udc\n");
8698e2ac 2664 goto err5;
72246da4
FB
2665 }
2666
2667 return 0;
2668
5812b1c2 2669err5:
72246da4
FB
2670 dwc3_gadget_free_endpoints(dwc);
2671
5812b1c2 2672err4:
3ef35faf
FB
2673 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2674 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2675
72246da4 2676err3:
0fc9a1be 2677 kfree(dwc->setup_buf);
72246da4
FB
2678
2679err2:
2680 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2681 dwc->ep0_trb, dwc->ep0_trb_addr);
2682
2683err1:
2684 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2685 dwc->ctrl_req, dwc->ctrl_req_addr);
2686
2687err0:
2688 return ret;
2689}
2690
7415f17c
FB
2691/* -------------------------------------------------------------------------- */
2692
72246da4
FB
2693void dwc3_gadget_exit(struct dwc3 *dwc)
2694{
72246da4 2695 usb_del_gadget_udc(&dwc->gadget);
72246da4 2696
72246da4
FB
2697 dwc3_gadget_free_endpoints(dwc);
2698
3ef35faf
FB
2699 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2700 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2701
0fc9a1be 2702 kfree(dwc->setup_buf);
72246da4
FB
2703
2704 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2705 dwc->ep0_trb, dwc->ep0_trb_addr);
2706
2707 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2708 dwc->ctrl_req, dwc->ctrl_req_addr);
72246da4 2709}
7415f17c
FB
2710
2711int dwc3_gadget_prepare(struct dwc3 *dwc)
2712{
2713 if (dwc->pullups_connected)
2714 dwc3_gadget_disable_irq(dwc);
2715
2716 return 0;
2717}
2718
2719void dwc3_gadget_complete(struct dwc3 *dwc)
2720{
2721 if (dwc->pullups_connected) {
2722 dwc3_gadget_enable_irq(dwc);
2723 dwc3_gadget_run_stop(dwc, true);
2724 }
2725}
2726
2727int dwc3_gadget_suspend(struct dwc3 *dwc)
2728{
2729 __dwc3_gadget_ep_disable(dwc->eps[0]);
2730 __dwc3_gadget_ep_disable(dwc->eps[1]);
2731
2732 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2733
2734 return 0;
2735}
2736
2737int dwc3_gadget_resume(struct dwc3 *dwc)
2738{
2739 struct dwc3_ep *dep;
2740 int ret;
2741
2742 /* Start with SuperSpeed Default */
2743 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2744
2745 dep = dwc->eps[0];
2746 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2747 if (ret)
2748 goto err0;
2749
2750 dep = dwc->eps[1];
2751 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2752 if (ret)
2753 goto err1;
2754
2755 /* begin to receive SETUP packets */
2756 dwc->ep0state = EP0_SETUP_PHASE;
2757 dwc3_ep0_out_start(dwc);
2758
2759 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2760
2761 return 0;
2762
2763err1:
2764 __dwc3_gadget_ep_disable(dwc->eps[0]);
2765
2766err0:
2767 return ret;
2768}