]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/usb/dwc3/gadget.c
usb: dwc3: gadget: no need to pass params in case of UPDATE_TRANSFER
[mirror_ubuntu-artful-kernel.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
04a9bfcd
FB
57/**
58 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
59 * @dwc: pointer to our context structure
60 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
61 *
62 * Caller should take care of locking. This function will
63 * return 0 on success or -EINVAL if wrong Test Selector
64 * is passed
65 */
66int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
67{
68 u32 reg;
69
70 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
71 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
72
73 switch (mode) {
74 case TEST_J:
75 case TEST_K:
76 case TEST_SE0_NAK:
77 case TEST_PACKET:
78 case TEST_FORCE_EN:
79 reg |= mode << 1;
80 break;
81 default:
82 return -EINVAL;
83 }
84
85 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
86
87 return 0;
88}
89
8598bde7
FB
90/**
91 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
92 * @dwc: pointer to our context structure
93 * @state: the state to put link into
94 *
95 * Caller should take care of locking. This function will
aee63e3c 96 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
97 */
98int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
99{
aee63e3c 100 int retries = 10000;
8598bde7
FB
101 u32 reg;
102
802fde98
PZ
103 /*
104 * Wait until device controller is ready. Only applies to 1.94a and
105 * later RTL.
106 */
107 if (dwc->revision >= DWC3_REVISION_194A) {
108 while (--retries) {
109 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
110 if (reg & DWC3_DSTS_DCNRD)
111 udelay(5);
112 else
113 break;
114 }
115
116 if (retries <= 0)
117 return -ETIMEDOUT;
118 }
119
8598bde7
FB
120 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
121 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
122
123 /* set requested state */
124 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
125 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
126
802fde98
PZ
127 /*
128 * The following code is racy when called from dwc3_gadget_wakeup,
129 * and is not needed, at least on newer versions
130 */
131 if (dwc->revision >= DWC3_REVISION_194A)
132 return 0;
133
8598bde7 134 /* wait for a change in DSTS */
aed430e5 135 retries = 10000;
8598bde7
FB
136 while (--retries) {
137 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
138
8598bde7
FB
139 if (DWC3_DSTS_USBLNKST(reg) == state)
140 return 0;
141
aee63e3c 142 udelay(5);
8598bde7
FB
143 }
144
145 dev_vdbg(dwc->dev, "link state change request timed out\n");
146
147 return -ETIMEDOUT;
148}
149
457e84b6
FB
150/**
151 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
152 * @dwc: pointer to our context structure
153 *
154 * This function will a best effort FIFO allocation in order
155 * to improve FIFO usage and throughput, while still allowing
156 * us to enable as many endpoints as possible.
157 *
158 * Keep in mind that this operation will be highly dependent
159 * on the configured size for RAM1 - which contains TxFifo -,
160 * the amount of endpoints enabled on coreConsultant tool, and
161 * the width of the Master Bus.
162 *
163 * In the ideal world, we would always be able to satisfy the
164 * following equation:
165 *
166 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
167 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
168 *
169 * Unfortunately, due to many variables that's not always the case.
170 */
171int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
172{
173 int last_fifo_depth = 0;
174 int ram1_depth;
175 int fifo_size;
176 int mdwidth;
177 int num;
178
179 if (!dwc->needs_fifo_resize)
180 return 0;
181
182 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
183 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
184
185 /* MDWIDTH is represented in bits, we need it in bytes */
186 mdwidth >>= 3;
187
188 /*
189 * FIXME For now we will only allocate 1 wMaxPacketSize space
190 * for each enabled endpoint, later patches will come to
191 * improve this algorithm so that we better use the internal
192 * FIFO space
193 */
194 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
195 struct dwc3_ep *dep = dwc->eps[num];
196 int fifo_number = dep->number >> 1;
2e81c36a 197 int mult = 1;
457e84b6
FB
198 int tmp;
199
200 if (!(dep->number & 1))
201 continue;
202
203 if (!(dep->flags & DWC3_EP_ENABLED))
204 continue;
205
16e78db7
IS
206 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
207 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
2e81c36a
FB
208 mult = 3;
209
210 /*
211 * REVISIT: the following assumes we will always have enough
212 * space available on the FIFO RAM for all possible use cases.
213 * Make sure that's true somehow and change FIFO allocation
214 * accordingly.
215 *
216 * If we have Bulk or Isochronous endpoints, we want
217 * them to be able to be very, very fast. So we're giving
218 * those endpoints a fifo_size which is enough for 3 full
219 * packets
220 */
221 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
457e84b6
FB
222 tmp += mdwidth;
223
224 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
2e81c36a 225
457e84b6
FB
226 fifo_size |= (last_fifo_depth << 16);
227
228 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
229 dep->name, last_fifo_depth, fifo_size & 0xffff);
230
231 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
232 fifo_size);
233
234 last_fifo_depth += (fifo_size & 0xffff);
235 }
236
237 return 0;
238}
239
72246da4
FB
240void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
241 int status)
242{
243 struct dwc3 *dwc = dep->dwc;
244
245 if (req->queued) {
eeb720fb
FB
246 if (req->request.num_mapped_sgs)
247 dep->busy_slot += req->request.num_mapped_sgs;
248 else
249 dep->busy_slot++;
250
72246da4
FB
251 /*
252 * Skip LINK TRB. We can't use req->trb and check for
253 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
254 * completed (not the LINK TRB).
255 */
256 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
16e78db7 257 usb_endpoint_xfer_isoc(dep->endpoint.desc))
72246da4
FB
258 dep->busy_slot++;
259 }
260 list_del(&req->list);
eeb720fb 261 req->trb = NULL;
72246da4
FB
262
263 if (req->request.status == -EINPROGRESS)
264 req->request.status = status;
265
0416e494
PA
266 if (dwc->ep0_bounced && dep->number == 0)
267 dwc->ep0_bounced = false;
268 else
269 usb_gadget_unmap_request(&dwc->gadget, &req->request,
270 req->direction);
72246da4
FB
271
272 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
273 req, dep->name, req->request.actual,
274 req->request.length, status);
275
276 spin_unlock(&dwc->lock);
0fc9a1be 277 req->request.complete(&dep->endpoint, &req->request);
72246da4
FB
278 spin_lock(&dwc->lock);
279}
280
281static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
282{
283 switch (cmd) {
284 case DWC3_DEPCMD_DEPSTARTCFG:
285 return "Start New Configuration";
286 case DWC3_DEPCMD_ENDTRANSFER:
287 return "End Transfer";
288 case DWC3_DEPCMD_UPDATETRANSFER:
289 return "Update Transfer";
290 case DWC3_DEPCMD_STARTTRANSFER:
291 return "Start Transfer";
292 case DWC3_DEPCMD_CLEARSTALL:
293 return "Clear Stall";
294 case DWC3_DEPCMD_SETSTALL:
295 return "Set Stall";
802fde98
PZ
296 case DWC3_DEPCMD_GETEPSTATE:
297 return "Get Endpoint State";
72246da4
FB
298 case DWC3_DEPCMD_SETTRANSFRESOURCE:
299 return "Set Endpoint Transfer Resource";
300 case DWC3_DEPCMD_SETEPCONFIG:
301 return "Set Endpoint Configuration";
302 default:
303 return "UNKNOWN command";
304 }
305}
306
b09bb642
FB
307int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
308{
309 u32 timeout = 500;
310 u32 reg;
311
312 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
313 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
314
315 do {
316 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
317 if (!(reg & DWC3_DGCMD_CMDACT)) {
318 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
319 DWC3_DGCMD_STATUS(reg));
320 return 0;
321 }
322
323 /*
324 * We can't sleep here, because it's also called from
325 * interrupt context.
326 */
327 timeout--;
328 if (!timeout)
329 return -ETIMEDOUT;
330 udelay(1);
331 } while (1);
332}
333
72246da4
FB
334int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
335 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
336{
337 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 338 u32 timeout = 500;
72246da4
FB
339 u32 reg;
340
341 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
342 dep->name,
dc1c70a7
FB
343 dwc3_gadget_ep_cmd_string(cmd), params->param0,
344 params->param1, params->param2);
72246da4 345
dc1c70a7
FB
346 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
347 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
348 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
349
350 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
351 do {
352 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
353 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
354 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
355 DWC3_DEPCMD_STATUS(reg));
72246da4
FB
356 return 0;
357 }
358
359 /*
72246da4
FB
360 * We can't sleep here, because it is also called from
361 * interrupt context.
362 */
363 timeout--;
364 if (!timeout)
365 return -ETIMEDOUT;
366
61d58242 367 udelay(1);
72246da4
FB
368 } while (1);
369}
370
371static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 372 struct dwc3_trb *trb)
72246da4 373{
c439ef87 374 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
375
376 return dep->trb_pool_dma + offset;
377}
378
379static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
380{
381 struct dwc3 *dwc = dep->dwc;
382
383 if (dep->trb_pool)
384 return 0;
385
386 if (dep->number == 0 || dep->number == 1)
387 return 0;
388
389 dep->trb_pool = dma_alloc_coherent(dwc->dev,
390 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
391 &dep->trb_pool_dma, GFP_KERNEL);
392 if (!dep->trb_pool) {
393 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
394 dep->name);
395 return -ENOMEM;
396 }
397
398 return 0;
399}
400
401static void dwc3_free_trb_pool(struct dwc3_ep *dep)
402{
403 struct dwc3 *dwc = dep->dwc;
404
405 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
406 dep->trb_pool, dep->trb_pool_dma);
407
408 dep->trb_pool = NULL;
409 dep->trb_pool_dma = 0;
410}
411
412static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
413{
414 struct dwc3_gadget_ep_cmd_params params;
415 u32 cmd;
416
417 memset(&params, 0x00, sizeof(params));
418
419 if (dep->number != 1) {
420 cmd = DWC3_DEPCMD_DEPSTARTCFG;
421 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
422 if (dep->number > 1) {
423 if (dwc->start_config_issued)
424 return 0;
425 dwc->start_config_issued = true;
72246da4 426 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 427 }
72246da4
FB
428
429 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
430 }
431
432 return 0;
433}
434
435static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 436 const struct usb_endpoint_descriptor *desc,
4b345c9a
FB
437 const struct usb_ss_ep_comp_descriptor *comp_desc,
438 bool ignore)
72246da4
FB
439{
440 struct dwc3_gadget_ep_cmd_params params;
441
442 memset(&params, 0x00, sizeof(params));
443
dc1c70a7 444 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
445 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
446
447 /* Burst size is only needed in SuperSpeed mode */
448 if (dwc->gadget.speed == USB_SPEED_SUPER) {
449 u32 burst = dep->endpoint.maxburst - 1;
450
451 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
452 }
72246da4 453
4b345c9a
FB
454 if (ignore)
455 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
456
dc1c70a7
FB
457 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
458 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 459
18b7ede5 460 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
461 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
462 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
463 dep->stream_capable = true;
464 }
465
72246da4 466 if (usb_endpoint_xfer_isoc(desc))
dc1c70a7 467 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
468
469 /*
470 * We are doing 1:1 mapping for endpoints, meaning
471 * Physical Endpoints 2 maps to Logical Endpoint 2 and
472 * so on. We consider the direction bit as part of the physical
473 * endpoint number. So USB endpoint 0x81 is 0x03.
474 */
dc1c70a7 475 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
476
477 /*
478 * We must use the lower 16 TX FIFOs even though
479 * HW might have more
480 */
481 if (dep->direction)
dc1c70a7 482 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
483
484 if (desc->bInterval) {
dc1c70a7 485 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
486 dep->interval = 1 << (desc->bInterval - 1);
487 }
488
489 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
490 DWC3_DEPCMD_SETEPCONFIG, &params);
491}
492
493static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
494{
495 struct dwc3_gadget_ep_cmd_params params;
496
497 memset(&params, 0x00, sizeof(params));
498
dc1c70a7 499 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
500
501 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
502 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
503}
504
505/**
506 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
507 * @dep: endpoint to be initialized
508 * @desc: USB Endpoint Descriptor
509 *
510 * Caller should take care of locking
511 */
512static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 513 const struct usb_endpoint_descriptor *desc,
4b345c9a
FB
514 const struct usb_ss_ep_comp_descriptor *comp_desc,
515 bool ignore)
72246da4
FB
516{
517 struct dwc3 *dwc = dep->dwc;
518 u32 reg;
519 int ret = -ENOMEM;
520
521 if (!(dep->flags & DWC3_EP_ENABLED)) {
522 ret = dwc3_gadget_start_config(dwc, dep);
523 if (ret)
524 return ret;
525 }
526
4b345c9a 527 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
72246da4
FB
528 if (ret)
529 return ret;
530
531 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
532 struct dwc3_trb *trb_st_hw;
533 struct dwc3_trb *trb_link;
72246da4
FB
534
535 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
536 if (ret)
537 return ret;
538
16e78db7 539 dep->endpoint.desc = desc;
c90bfaec 540 dep->comp_desc = comp_desc;
72246da4
FB
541 dep->type = usb_endpoint_type(desc);
542 dep->flags |= DWC3_EP_ENABLED;
543
544 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
545 reg |= DWC3_DALEPENA_EP(dep->number);
546 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
547
548 if (!usb_endpoint_xfer_isoc(desc))
549 return 0;
550
551 memset(&trb_link, 0, sizeof(trb_link));
552
1d046793 553 /* Link TRB for ISOC. The HWO bit is never reset */
72246da4
FB
554 trb_st_hw = &dep->trb_pool[0];
555
f6bafc6a 556 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
72246da4 557
f6bafc6a
FB
558 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
559 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
560 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
561 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
562 }
563
564 return 0;
565}
566
624407f9
SAS
567static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
568static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
569{
570 struct dwc3_request *req;
571
ea53b882 572 if (!list_empty(&dep->req_queued)) {
624407f9
SAS
573 dwc3_stop_active_transfer(dwc, dep->number);
574
57911504 575 /* - giveback all requests to gadget driver */
1591633e
PA
576 while (!list_empty(&dep->req_queued)) {
577 req = next_request(&dep->req_queued);
578
579 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
580 }
ea53b882
FB
581 }
582
72246da4
FB
583 while (!list_empty(&dep->request_list)) {
584 req = next_request(&dep->request_list);
585
624407f9 586 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 587 }
72246da4
FB
588}
589
590/**
591 * __dwc3_gadget_ep_disable - Disables a HW endpoint
592 * @dep: the endpoint to disable
593 *
624407f9
SAS
594 * This function also removes requests which are currently processed ny the
595 * hardware and those which are not yet scheduled.
596 * Caller should take care of locking.
72246da4 597 */
72246da4
FB
598static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
599{
600 struct dwc3 *dwc = dep->dwc;
601 u32 reg;
602
624407f9 603 dwc3_remove_requests(dwc, dep);
72246da4
FB
604
605 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
606 reg &= ~DWC3_DALEPENA_EP(dep->number);
607 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
608
879631aa 609 dep->stream_capable = false;
f9c56cdd 610 dep->endpoint.desc = NULL;
c90bfaec 611 dep->comp_desc = NULL;
72246da4 612 dep->type = 0;
879631aa 613 dep->flags = 0;
72246da4
FB
614
615 return 0;
616}
617
618/* -------------------------------------------------------------------------- */
619
620static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
621 const struct usb_endpoint_descriptor *desc)
622{
623 return -EINVAL;
624}
625
626static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
627{
628 return -EINVAL;
629}
630
631/* -------------------------------------------------------------------------- */
632
633static int dwc3_gadget_ep_enable(struct usb_ep *ep,
634 const struct usb_endpoint_descriptor *desc)
635{
636 struct dwc3_ep *dep;
637 struct dwc3 *dwc;
638 unsigned long flags;
639 int ret;
640
641 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
642 pr_debug("dwc3: invalid parameters\n");
643 return -EINVAL;
644 }
645
646 if (!desc->wMaxPacketSize) {
647 pr_debug("dwc3: missing wMaxPacketSize\n");
648 return -EINVAL;
649 }
650
651 dep = to_dwc3_ep(ep);
652 dwc = dep->dwc;
653
c6f83f38
FB
654 if (dep->flags & DWC3_EP_ENABLED) {
655 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
656 dep->name);
657 return 0;
658 }
659
72246da4
FB
660 switch (usb_endpoint_type(desc)) {
661 case USB_ENDPOINT_XFER_CONTROL:
27a78d6a 662 strlcat(dep->name, "-control", sizeof(dep->name));
72246da4
FB
663 break;
664 case USB_ENDPOINT_XFER_ISOC:
27a78d6a 665 strlcat(dep->name, "-isoc", sizeof(dep->name));
72246da4
FB
666 break;
667 case USB_ENDPOINT_XFER_BULK:
27a78d6a 668 strlcat(dep->name, "-bulk", sizeof(dep->name));
72246da4
FB
669 break;
670 case USB_ENDPOINT_XFER_INT:
27a78d6a 671 strlcat(dep->name, "-int", sizeof(dep->name));
72246da4
FB
672 break;
673 default:
674 dev_err(dwc->dev, "invalid endpoint transfer type\n");
675 }
676
72246da4
FB
677 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
678
679 spin_lock_irqsave(&dwc->lock, flags);
4b345c9a 680 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
72246da4
FB
681 spin_unlock_irqrestore(&dwc->lock, flags);
682
683 return ret;
684}
685
686static int dwc3_gadget_ep_disable(struct usb_ep *ep)
687{
688 struct dwc3_ep *dep;
689 struct dwc3 *dwc;
690 unsigned long flags;
691 int ret;
692
693 if (!ep) {
694 pr_debug("dwc3: invalid parameters\n");
695 return -EINVAL;
696 }
697
698 dep = to_dwc3_ep(ep);
699 dwc = dep->dwc;
700
701 if (!(dep->flags & DWC3_EP_ENABLED)) {
702 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
703 dep->name);
704 return 0;
705 }
706
707 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
708 dep->number >> 1,
709 (dep->number & 1) ? "in" : "out");
710
711 spin_lock_irqsave(&dwc->lock, flags);
712 ret = __dwc3_gadget_ep_disable(dep);
713 spin_unlock_irqrestore(&dwc->lock, flags);
714
715 return ret;
716}
717
718static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
719 gfp_t gfp_flags)
720{
721 struct dwc3_request *req;
722 struct dwc3_ep *dep = to_dwc3_ep(ep);
723 struct dwc3 *dwc = dep->dwc;
724
725 req = kzalloc(sizeof(*req), gfp_flags);
726 if (!req) {
727 dev_err(dwc->dev, "not enough memory\n");
728 return NULL;
729 }
730
731 req->epnum = dep->number;
732 req->dep = dep;
72246da4
FB
733
734 return &req->request;
735}
736
737static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
738 struct usb_request *request)
739{
740 struct dwc3_request *req = to_dwc3_request(request);
741
742 kfree(req);
743}
744
c71fc37c
FB
745/**
746 * dwc3_prepare_one_trb - setup one TRB from one request
747 * @dep: endpoint for which this request is prepared
748 * @req: dwc3_request pointer
749 */
68e823e2 750static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb
FB
751 struct dwc3_request *req, dma_addr_t dma,
752 unsigned length, unsigned last, unsigned chain)
c71fc37c 753{
eeb720fb 754 struct dwc3 *dwc = dep->dwc;
f6bafc6a 755 struct dwc3_trb *trb;
c71fc37c 756
eeb720fb
FB
757 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
758 dep->name, req, (unsigned long long) dma,
759 length, last ? " last" : "",
760 chain ? " chain" : "");
761
c71fc37c 762 /* Skip the LINK-TRB on ISOC */
915e202a 763 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
16e78db7 764 usb_endpoint_xfer_isoc(dep->endpoint.desc))
915e202a
PA
765 dep->free_slot++;
766
767 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
768 dep->free_slot++;
c71fc37c 769
eeb720fb
FB
770 if (!req->trb) {
771 dwc3_gadget_move_request_queued(req);
f6bafc6a
FB
772 req->trb = trb;
773 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
eeb720fb 774 }
c71fc37c 775
f6bafc6a
FB
776 trb->size = DWC3_TRB_SIZE_LENGTH(length);
777 trb->bpl = lower_32_bits(dma);
778 trb->bph = upper_32_bits(dma);
c71fc37c 779
16e78db7 780 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 781 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 782 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
783 break;
784
785 case USB_ENDPOINT_XFER_ISOC:
f6bafc6a 786 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
c71fc37c 787
206dd69a 788 if (!req->request.no_interrupt)
f6bafc6a 789 trb->ctrl |= DWC3_TRB_CTRL_IOC;
c71fc37c
FB
790 break;
791
792 case USB_ENDPOINT_XFER_BULK:
793 case USB_ENDPOINT_XFER_INT:
f6bafc6a 794 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
795 break;
796 default:
797 /*
798 * This is only possible with faulty memory because we
799 * checked it already :)
800 */
801 BUG();
802 }
803
16e78db7 804 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
f6bafc6a
FB
805 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
806 trb->ctrl |= DWC3_TRB_CTRL_CSP;
807 } else {
808 if (chain)
809 trb->ctrl |= DWC3_TRB_CTRL_CHN;
810
811 if (last)
812 trb->ctrl |= DWC3_TRB_CTRL_LST;
813 }
c71fc37c 814
16e78db7 815 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 816 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 817
f6bafc6a 818 trb->ctrl |= DWC3_TRB_CTRL_HWO;
c71fc37c
FB
819}
820
72246da4
FB
821/*
822 * dwc3_prepare_trbs - setup TRBs from requests
823 * @dep: endpoint for which requests are being prepared
824 * @starting: true if the endpoint is idle and no requests are queued.
825 *
1d046793
PZ
826 * The function goes through the requests list and sets up TRBs for the
827 * transfers. The function returns once there are no more TRBs available or
828 * it runs out of requests.
72246da4 829 */
68e823e2 830static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 831{
68e823e2 832 struct dwc3_request *req, *n;
72246da4 833 u32 trbs_left;
8d62cd65 834 u32 max;
c71fc37c 835 unsigned int last_one = 0;
72246da4
FB
836
837 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
838
839 /* the first request must not be queued */
840 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 841
8d62cd65 842 /* Can't wrap around on a non-isoc EP since there's no link TRB */
16e78db7 843 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8d62cd65
PZ
844 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
845 if (trbs_left > max)
846 trbs_left = max;
847 }
848
72246da4 849 /*
1d046793
PZ
850 * If busy & slot are equal than it is either full or empty. If we are
851 * starting to process requests then we are empty. Otherwise we are
72246da4
FB
852 * full and don't do anything
853 */
854 if (!trbs_left) {
855 if (!starting)
68e823e2 856 return;
72246da4
FB
857 trbs_left = DWC3_TRB_NUM;
858 /*
859 * In case we start from scratch, we queue the ISOC requests
860 * starting from slot 1. This is done because we use ring
861 * buffer and have no LST bit to stop us. Instead, we place
1d046793 862 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
72246da4
FB
863 * after the first request so we start at slot 1 and have
864 * 7 requests proceed before we hit the first IOC.
865 * Other transfer types don't use the ring buffer and are
866 * processed from the first TRB until the last one. Since we
867 * don't wrap around we have to start at the beginning.
868 */
16e78db7 869 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
870 dep->busy_slot = 1;
871 dep->free_slot = 1;
872 } else {
873 dep->busy_slot = 0;
874 dep->free_slot = 0;
875 }
876 }
877
878 /* The last TRB is a link TRB, not used for xfer */
16e78db7 879 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
68e823e2 880 return;
72246da4
FB
881
882 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
883 unsigned length;
884 dma_addr_t dma;
72246da4 885
eeb720fb
FB
886 if (req->request.num_mapped_sgs > 0) {
887 struct usb_request *request = &req->request;
888 struct scatterlist *sg = request->sg;
889 struct scatterlist *s;
890 int i;
72246da4 891
eeb720fb
FB
892 for_each_sg(sg, s, request->num_mapped_sgs, i) {
893 unsigned chain = true;
72246da4 894
eeb720fb
FB
895 length = sg_dma_len(s);
896 dma = sg_dma_address(s);
72246da4 897
1d046793
PZ
898 if (i == (request->num_mapped_sgs - 1) ||
899 sg_is_last(s)) {
eeb720fb
FB
900 last_one = true;
901 chain = false;
902 }
72246da4 903
eeb720fb
FB
904 trbs_left--;
905 if (!trbs_left)
906 last_one = true;
72246da4 907
eeb720fb
FB
908 if (last_one)
909 chain = false;
72246da4 910
eeb720fb
FB
911 dwc3_prepare_one_trb(dep, req, dma, length,
912 last_one, chain);
72246da4 913
eeb720fb
FB
914 if (last_one)
915 break;
916 }
72246da4 917 } else {
eeb720fb
FB
918 dma = req->request.dma;
919 length = req->request.length;
920 trbs_left--;
72246da4 921
eeb720fb
FB
922 if (!trbs_left)
923 last_one = 1;
879631aa 924
eeb720fb
FB
925 /* Is this the last request? */
926 if (list_is_last(&req->list, &dep->request_list))
927 last_one = 1;
72246da4 928
eeb720fb
FB
929 dwc3_prepare_one_trb(dep, req, dma, length,
930 last_one, false);
72246da4 931
eeb720fb
FB
932 if (last_one)
933 break;
72246da4 934 }
72246da4 935 }
72246da4
FB
936}
937
938static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
939 int start_new)
940{
941 struct dwc3_gadget_ep_cmd_params params;
942 struct dwc3_request *req;
943 struct dwc3 *dwc = dep->dwc;
944 int ret;
945 u32 cmd;
946
947 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
948 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
949 return -EBUSY;
950 }
951 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
952
953 /*
954 * If we are getting here after a short-out-packet we don't enqueue any
955 * new requests as we try to set the IOC bit only on the last request.
956 */
957 if (start_new) {
958 if (list_empty(&dep->req_queued))
959 dwc3_prepare_trbs(dep, start_new);
960
961 /* req points to the first request which will be sent */
962 req = next_request(&dep->req_queued);
963 } else {
68e823e2
FB
964 dwc3_prepare_trbs(dep, start_new);
965
72246da4 966 /*
1d046793 967 * req points to the first request where HWO changed from 0 to 1
72246da4 968 */
68e823e2 969 req = next_request(&dep->req_queued);
72246da4
FB
970 }
971 if (!req) {
972 dep->flags |= DWC3_EP_PENDING_REQUEST;
973 return 0;
974 }
975
976 memset(&params, 0, sizeof(params));
72246da4 977
1877d6c9
PA
978 if (start_new) {
979 params.param0 = upper_32_bits(req->trb_dma);
980 params.param1 = lower_32_bits(req->trb_dma);
72246da4 981 cmd = DWC3_DEPCMD_STARTTRANSFER;
1877d6c9 982 } else {
72246da4 983 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1877d6c9 984 }
72246da4
FB
985
986 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
987 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
988 if (ret < 0) {
989 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
990
991 /*
992 * FIXME we need to iterate over the list of requests
993 * here and stop, unmap, free and del each of the linked
1d046793 994 * requests instead of what we do now.
72246da4 995 */
0fc9a1be
FB
996 usb_gadget_unmap_request(&dwc->gadget, &req->request,
997 req->direction);
72246da4
FB
998 list_del(&req->list);
999 return ret;
1000 }
1001
1002 dep->flags |= DWC3_EP_BUSY;
25b8ff68 1003
f898ae09 1004 if (start_new) {
b4996a86 1005 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
f898ae09 1006 dep->number);
b4996a86 1007 WARN_ON_ONCE(!dep->resource_index);
f898ae09 1008 }
25b8ff68 1009
72246da4
FB
1010 return 0;
1011}
1012
d6d6ec7b
PA
1013static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1014 struct dwc3_ep *dep, u32 cur_uf)
1015{
1016 u32 uf;
1017
1018 if (list_empty(&dep->request_list)) {
1019 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1020 dep->name);
f4a53c55 1021 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1022 return;
1023 }
1024
1025 /* 4 micro frames in the future */
1026 uf = cur_uf + dep->interval * 4;
1027
1028 __dwc3_gadget_kick_transfer(dep, uf, 1);
1029}
1030
1031static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1032 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1033{
1034 u32 cur_uf, mask;
1035
1036 mask = ~(dep->interval - 1);
1037 cur_uf = event->parameters & mask;
1038
1039 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1040}
1041
72246da4
FB
1042static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1043{
0fc9a1be
FB
1044 struct dwc3 *dwc = dep->dwc;
1045 int ret;
1046
72246da4
FB
1047 req->request.actual = 0;
1048 req->request.status = -EINPROGRESS;
1049 req->direction = dep->direction;
1050 req->epnum = dep->number;
1051
1052 /*
1053 * We only add to our list of requests now and
1054 * start consuming the list once we get XferNotReady
1055 * IRQ.
1056 *
1057 * That way, we avoid doing anything that we don't need
1058 * to do now and defer it until the point we receive a
1059 * particular token from the Host side.
1060 *
1061 * This will also avoid Host cancelling URBs due to too
1d046793 1062 * many NAKs.
72246da4 1063 */
0fc9a1be
FB
1064 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1065 dep->direction);
1066 if (ret)
1067 return ret;
1068
72246da4
FB
1069 list_add_tail(&req->list, &dep->request_list);
1070
1071 /*
b511e5e7 1072 * There are a few special cases:
72246da4 1073 *
f898ae09
PZ
1074 * 1. XferNotReady with empty list of requests. We need to kick the
1075 * transfer here in that situation, otherwise we will be NAKing
1076 * forever. If we get XferNotReady before gadget driver has a
1077 * chance to queue a request, we will ACK the IRQ but won't be
1078 * able to receive the data until the next request is queued.
1079 * The following code is handling exactly that.
72246da4 1080 *
72246da4
FB
1081 */
1082 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1083 /*
1084 * If xfernotready is already elapsed and it is a case
1085 * of isoc transfer, then issue END TRANSFER, so that
1086 * you can receive xfernotready again and can have
1087 * notion of current microframe.
1088 */
1089 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
cdc359dd
PA
1090 if (list_empty(&dep->req_queued)) {
1091 dwc3_stop_active_transfer(dwc, dep->number);
1092 dep->flags = DWC3_EP_ENABLED;
1093 }
f4a53c55
PA
1094 return 0;
1095 }
1096
b511e5e7 1097 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
348e026f 1098 if (ret && ret != -EBUSY)
b511e5e7
FB
1099 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1100 dep->name);
15f86bde 1101 return ret;
b511e5e7 1102 }
72246da4 1103
b511e5e7
FB
1104 /*
1105 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1106 * kick the transfer here after queuing a request, otherwise the
1107 * core may not see the modified TRB(s).
1108 */
1109 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1110 (dep->flags & DWC3_EP_BUSY) &&
1111 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86
FB
1112 WARN_ON_ONCE(!dep->resource_index);
1113 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
b511e5e7 1114 false);
348e026f 1115 if (ret && ret != -EBUSY)
72246da4
FB
1116 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1117 dep->name);
15f86bde 1118 return ret;
a0925324 1119 }
72246da4
FB
1120
1121 return 0;
1122}
1123
1124static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1125 gfp_t gfp_flags)
1126{
1127 struct dwc3_request *req = to_dwc3_request(request);
1128 struct dwc3_ep *dep = to_dwc3_ep(ep);
1129 struct dwc3 *dwc = dep->dwc;
1130
1131 unsigned long flags;
1132
1133 int ret;
1134
16e78db7 1135 if (!dep->endpoint.desc) {
72246da4
FB
1136 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1137 request, ep->name);
1138 return -ESHUTDOWN;
1139 }
1140
1141 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1142 request, ep->name, request->length);
1143
1144 spin_lock_irqsave(&dwc->lock, flags);
1145 ret = __dwc3_gadget_ep_queue(dep, req);
1146 spin_unlock_irqrestore(&dwc->lock, flags);
1147
1148 return ret;
1149}
1150
1151static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1152 struct usb_request *request)
1153{
1154 struct dwc3_request *req = to_dwc3_request(request);
1155 struct dwc3_request *r = NULL;
1156
1157 struct dwc3_ep *dep = to_dwc3_ep(ep);
1158 struct dwc3 *dwc = dep->dwc;
1159
1160 unsigned long flags;
1161 int ret = 0;
1162
1163 spin_lock_irqsave(&dwc->lock, flags);
1164
1165 list_for_each_entry(r, &dep->request_list, list) {
1166 if (r == req)
1167 break;
1168 }
1169
1170 if (r != req) {
1171 list_for_each_entry(r, &dep->req_queued, list) {
1172 if (r == req)
1173 break;
1174 }
1175 if (r == req) {
1176 /* wait until it is processed */
1177 dwc3_stop_active_transfer(dwc, dep->number);
e8d4e8be 1178 goto out1;
72246da4
FB
1179 }
1180 dev_err(dwc->dev, "request %p was not queued to %s\n",
1181 request, ep->name);
1182 ret = -EINVAL;
1183 goto out0;
1184 }
1185
e8d4e8be 1186out1:
72246da4
FB
1187 /* giveback the request */
1188 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1189
1190out0:
1191 spin_unlock_irqrestore(&dwc->lock, flags);
1192
1193 return ret;
1194}
1195
1196int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1197{
1198 struct dwc3_gadget_ep_cmd_params params;
1199 struct dwc3 *dwc = dep->dwc;
1200 int ret;
1201
1202 memset(&params, 0x00, sizeof(params));
1203
1204 if (value) {
72246da4
FB
1205 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1206 DWC3_DEPCMD_SETSTALL, &params);
1207 if (ret)
1208 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1209 value ? "set" : "clear",
1210 dep->name);
1211 else
1212 dep->flags |= DWC3_EP_STALL;
1213 } else {
5275455a
PZ
1214 if (dep->flags & DWC3_EP_WEDGE)
1215 return 0;
1216
72246da4
FB
1217 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1218 DWC3_DEPCMD_CLEARSTALL, &params);
1219 if (ret)
1220 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1221 value ? "set" : "clear",
1222 dep->name);
1223 else
1224 dep->flags &= ~DWC3_EP_STALL;
1225 }
5275455a 1226
72246da4
FB
1227 return ret;
1228}
1229
1230static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1231{
1232 struct dwc3_ep *dep = to_dwc3_ep(ep);
1233 struct dwc3 *dwc = dep->dwc;
1234
1235 unsigned long flags;
1236
1237 int ret;
1238
1239 spin_lock_irqsave(&dwc->lock, flags);
1240
16e78db7 1241 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1242 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1243 ret = -EINVAL;
1244 goto out;
1245 }
1246
1247 ret = __dwc3_gadget_ep_set_halt(dep, value);
1248out:
1249 spin_unlock_irqrestore(&dwc->lock, flags);
1250
1251 return ret;
1252}
1253
1254static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1255{
1256 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1257 struct dwc3 *dwc = dep->dwc;
1258 unsigned long flags;
72246da4 1259
249a4569 1260 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1261 dep->flags |= DWC3_EP_WEDGE;
249a4569 1262 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4 1263
08f0d966
PA
1264 if (dep->number == 0 || dep->number == 1)
1265 return dwc3_gadget_ep0_set_halt(ep, 1);
1266 else
1267 return dwc3_gadget_ep_set_halt(ep, 1);
72246da4
FB
1268}
1269
1270/* -------------------------------------------------------------------------- */
1271
1272static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1273 .bLength = USB_DT_ENDPOINT_SIZE,
1274 .bDescriptorType = USB_DT_ENDPOINT,
1275 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1276};
1277
1278static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1279 .enable = dwc3_gadget_ep0_enable,
1280 .disable = dwc3_gadget_ep0_disable,
1281 .alloc_request = dwc3_gadget_ep_alloc_request,
1282 .free_request = dwc3_gadget_ep_free_request,
1283 .queue = dwc3_gadget_ep0_queue,
1284 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1285 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1286 .set_wedge = dwc3_gadget_ep_set_wedge,
1287};
1288
1289static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1290 .enable = dwc3_gadget_ep_enable,
1291 .disable = dwc3_gadget_ep_disable,
1292 .alloc_request = dwc3_gadget_ep_alloc_request,
1293 .free_request = dwc3_gadget_ep_free_request,
1294 .queue = dwc3_gadget_ep_queue,
1295 .dequeue = dwc3_gadget_ep_dequeue,
1296 .set_halt = dwc3_gadget_ep_set_halt,
1297 .set_wedge = dwc3_gadget_ep_set_wedge,
1298};
1299
1300/* -------------------------------------------------------------------------- */
1301
1302static int dwc3_gadget_get_frame(struct usb_gadget *g)
1303{
1304 struct dwc3 *dwc = gadget_to_dwc(g);
1305 u32 reg;
1306
1307 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1308 return DWC3_DSTS_SOFFN(reg);
1309}
1310
1311static int dwc3_gadget_wakeup(struct usb_gadget *g)
1312{
1313 struct dwc3 *dwc = gadget_to_dwc(g);
1314
1315 unsigned long timeout;
1316 unsigned long flags;
1317
1318 u32 reg;
1319
1320 int ret = 0;
1321
1322 u8 link_state;
1323 u8 speed;
1324
1325 spin_lock_irqsave(&dwc->lock, flags);
1326
1327 /*
1328 * According to the Databook Remote wakeup request should
1329 * be issued only when the device is in early suspend state.
1330 *
1331 * We can check that via USB Link State bits in DSTS register.
1332 */
1333 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1334
1335 speed = reg & DWC3_DSTS_CONNECTSPD;
1336 if (speed == DWC3_DSTS_SUPERSPEED) {
1337 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1338 ret = -EINVAL;
1339 goto out;
1340 }
1341
1342 link_state = DWC3_DSTS_USBLNKST(reg);
1343
1344 switch (link_state) {
1345 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1346 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1347 break;
1348 default:
1349 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1350 link_state);
1351 ret = -EINVAL;
1352 goto out;
1353 }
1354
8598bde7
FB
1355 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1356 if (ret < 0) {
1357 dev_err(dwc->dev, "failed to put link in Recovery\n");
1358 goto out;
1359 }
72246da4 1360
802fde98
PZ
1361 /* Recent versions do this automatically */
1362 if (dwc->revision < DWC3_REVISION_194A) {
1363 /* write zeroes to Link Change Request */
fcc023c7 1364 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1365 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1366 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1367 }
72246da4 1368
1d046793 1369 /* poll until Link State changes to ON */
72246da4
FB
1370 timeout = jiffies + msecs_to_jiffies(100);
1371
1d046793 1372 while (!time_after(jiffies, timeout)) {
72246da4
FB
1373 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1374
1375 /* in HS, means ON */
1376 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1377 break;
1378 }
1379
1380 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1381 dev_err(dwc->dev, "failed to send remote wakeup\n");
1382 ret = -EINVAL;
1383 }
1384
1385out:
1386 spin_unlock_irqrestore(&dwc->lock, flags);
1387
1388 return ret;
1389}
1390
1391static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1392 int is_selfpowered)
1393{
1394 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1395 unsigned long flags;
72246da4 1396
249a4569 1397 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1398 dwc->is_selfpowered = !!is_selfpowered;
249a4569 1399 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1400
1401 return 0;
1402}
1403
6f17f74b 1404static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
72246da4
FB
1405{
1406 u32 reg;
61d58242 1407 u32 timeout = 500;
72246da4
FB
1408
1409 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1410 if (is_on) {
802fde98
PZ
1411 if (dwc->revision <= DWC3_REVISION_187A) {
1412 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1413 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1414 }
1415
1416 if (dwc->revision >= DWC3_REVISION_194A)
1417 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1418 reg |= DWC3_DCTL_RUN_STOP;
8db7ed15 1419 } else {
72246da4 1420 reg &= ~DWC3_DCTL_RUN_STOP;
8db7ed15 1421 }
72246da4
FB
1422
1423 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1424
1425 do {
1426 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1427 if (is_on) {
1428 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1429 break;
1430 } else {
1431 if (reg & DWC3_DSTS_DEVCTRLHLT)
1432 break;
1433 }
72246da4
FB
1434 timeout--;
1435 if (!timeout)
6f17f74b 1436 return -ETIMEDOUT;
61d58242 1437 udelay(1);
72246da4
FB
1438 } while (1);
1439
1440 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1441 dwc->gadget_driver
1442 ? dwc->gadget_driver->function : "no-function",
1443 is_on ? "connect" : "disconnect");
6f17f74b
PA
1444
1445 return 0;
72246da4
FB
1446}
1447
1448static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1449{
1450 struct dwc3 *dwc = gadget_to_dwc(g);
1451 unsigned long flags;
6f17f74b 1452 int ret;
72246da4
FB
1453
1454 is_on = !!is_on;
1455
1456 spin_lock_irqsave(&dwc->lock, flags);
6f17f74b 1457 ret = dwc3_gadget_run_stop(dwc, is_on);
72246da4
FB
1458 spin_unlock_irqrestore(&dwc->lock, flags);
1459
6f17f74b 1460 return ret;
72246da4
FB
1461}
1462
1463static int dwc3_gadget_start(struct usb_gadget *g,
1464 struct usb_gadget_driver *driver)
1465{
1466 struct dwc3 *dwc = gadget_to_dwc(g);
1467 struct dwc3_ep *dep;
1468 unsigned long flags;
1469 int ret = 0;
1470 u32 reg;
1471
1472 spin_lock_irqsave(&dwc->lock, flags);
1473
1474 if (dwc->gadget_driver) {
1475 dev_err(dwc->dev, "%s is already bound to %s\n",
1476 dwc->gadget.name,
1477 dwc->gadget_driver->driver.name);
1478 ret = -EBUSY;
1479 goto err0;
1480 }
1481
1482 dwc->gadget_driver = driver;
1483 dwc->gadget.dev.driver = &driver->driver;
1484
72246da4
FB
1485 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1486 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1487
1488 /**
1489 * WORKAROUND: DWC3 revision < 2.20a have an issue
1490 * which would cause metastability state on Run/Stop
1491 * bit if we try to force the IP to USB2-only mode.
1492 *
1493 * Because of that, we cannot configure the IP to any
1494 * speed other than the SuperSpeed
1495 *
1496 * Refers to:
1497 *
1498 * STAR#9000525659: Clock Domain Crossing on DCTL in
1499 * USB 2.0 Mode
1500 */
1501 if (dwc->revision < DWC3_REVISION_220A)
1502 reg |= DWC3_DCFG_SUPERSPEED;
1503 else
1504 reg |= dwc->maximum_speed;
72246da4
FB
1505 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1506
b23c8439
PZ
1507 dwc->start_config_issued = false;
1508
72246da4
FB
1509 /* Start with SuperSpeed Default */
1510 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1511
1512 dep = dwc->eps[0];
4b345c9a 1513 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
72246da4
FB
1514 if (ret) {
1515 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1516 goto err0;
1517 }
1518
1519 dep = dwc->eps[1];
4b345c9a 1520 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
72246da4
FB
1521 if (ret) {
1522 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1523 goto err1;
1524 }
1525
1526 /* begin to receive SETUP packets */
c7fcdeb2 1527 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1528 dwc3_ep0_out_start(dwc);
1529
1530 spin_unlock_irqrestore(&dwc->lock, flags);
1531
1532 return 0;
1533
1534err1:
1535 __dwc3_gadget_ep_disable(dwc->eps[0]);
1536
1537err0:
1538 spin_unlock_irqrestore(&dwc->lock, flags);
1539
1540 return ret;
1541}
1542
1543static int dwc3_gadget_stop(struct usb_gadget *g,
1544 struct usb_gadget_driver *driver)
1545{
1546 struct dwc3 *dwc = gadget_to_dwc(g);
1547 unsigned long flags;
1548
1549 spin_lock_irqsave(&dwc->lock, flags);
1550
1551 __dwc3_gadget_ep_disable(dwc->eps[0]);
1552 __dwc3_gadget_ep_disable(dwc->eps[1]);
1553
1554 dwc->gadget_driver = NULL;
1555 dwc->gadget.dev.driver = NULL;
1556
1557 spin_unlock_irqrestore(&dwc->lock, flags);
1558
1559 return 0;
1560}
802fde98 1561
72246da4
FB
1562static const struct usb_gadget_ops dwc3_gadget_ops = {
1563 .get_frame = dwc3_gadget_get_frame,
1564 .wakeup = dwc3_gadget_wakeup,
1565 .set_selfpowered = dwc3_gadget_set_selfpowered,
1566 .pullup = dwc3_gadget_pullup,
1567 .udc_start = dwc3_gadget_start,
1568 .udc_stop = dwc3_gadget_stop,
1569};
1570
1571/* -------------------------------------------------------------------------- */
1572
41ac7b3a 1573static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
72246da4
FB
1574{
1575 struct dwc3_ep *dep;
1576 u8 epnum;
1577
1578 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1579
1580 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1581 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1582 if (!dep) {
1583 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1584 epnum);
1585 return -ENOMEM;
1586 }
1587
1588 dep->dwc = dwc;
1589 dep->number = epnum;
1590 dwc->eps[epnum] = dep;
1591
1592 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1593 (epnum & 1) ? "in" : "out");
1594 dep->endpoint.name = dep->name;
1595 dep->direction = (epnum & 1);
1596
1597 if (epnum == 0 || epnum == 1) {
1598 dep->endpoint.maxpacket = 512;
1599 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1600 if (!epnum)
1601 dwc->gadget.ep0 = &dep->endpoint;
1602 } else {
1603 int ret;
1604
1605 dep->endpoint.maxpacket = 1024;
12d36c16 1606 dep->endpoint.max_streams = 15;
72246da4
FB
1607 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1608 list_add_tail(&dep->endpoint.ep_list,
1609 &dwc->gadget.ep_list);
1610
1611 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1612 if (ret)
72246da4 1613 return ret;
72246da4 1614 }
25b8ff68 1615
72246da4
FB
1616 INIT_LIST_HEAD(&dep->request_list);
1617 INIT_LIST_HEAD(&dep->req_queued);
1618 }
1619
1620 return 0;
1621}
1622
1623static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1624{
1625 struct dwc3_ep *dep;
1626 u8 epnum;
1627
1628 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1629 dep = dwc->eps[epnum];
1630 dwc3_free_trb_pool(dep);
1631
1632 if (epnum != 0 && epnum != 1)
1633 list_del(&dep->endpoint.ep_list);
1634
1635 kfree(dep);
1636 }
1637}
1638
1639static void dwc3_gadget_release(struct device *dev)
1640{
1641 dev_dbg(dev, "%s\n", __func__);
1642}
1643
1644/* -------------------------------------------------------------------------- */
1645static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1646 const struct dwc3_event_depevt *event, int status)
1647{
1648 struct dwc3_request *req;
f6bafc6a 1649 struct dwc3_trb *trb;
72246da4
FB
1650 unsigned int count;
1651 unsigned int s_pkt = 0;
d6d6ec7b 1652 unsigned int trb_status;
72246da4
FB
1653
1654 do {
1655 req = next_request(&dep->req_queued);
d39ee7be
SAS
1656 if (!req) {
1657 WARN_ON_ONCE(1);
1658 return 1;
1659 }
72246da4 1660
f6bafc6a 1661 trb = req->trb;
72246da4 1662
f6bafc6a 1663 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
0d2f4758
SAS
1664 /*
1665 * We continue despite the error. There is not much we
1d046793
PZ
1666 * can do. If we don't clean it up we loop forever. If
1667 * we skip the TRB then it gets overwritten after a
1668 * while since we use them in a ring buffer. A BUG()
1669 * would help. Lets hope that if this occurs, someone
0d2f4758
SAS
1670 * fixes the root cause instead of looking away :)
1671 */
72246da4
FB
1672 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1673 dep->name, req->trb);
f6bafc6a 1674 count = trb->size & DWC3_TRB_SIZE_MASK;
72246da4
FB
1675
1676 if (dep->direction) {
1677 if (count) {
d6d6ec7b
PA
1678 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1679 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1680 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1681 dep->name);
7efea86c
PA
1682 /*
1683 * If missed isoc occurred and there is
1684 * no request queued then issue END
1685 * TRANSFER, so that core generates
1686 * next xfernotready and we will issue
1687 * a fresh START TRANSFER.
1688 * If there are still queued request
1689 * then wait, do not issue either END
1690 * or UPDATE TRANSFER, just attach next
1691 * request in request_list during
1692 * giveback.If any future queued request
1693 * is successfully transferred then we
1694 * will issue UPDATE TRANSFER for all
1695 * request in the request_list.
1696 */
d6d6ec7b
PA
1697 dep->flags |= DWC3_EP_MISSED_ISOC;
1698 } else {
1699 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1700 dep->name);
1701 status = -ECONNRESET;
1702 }
7efea86c
PA
1703 } else {
1704 dep->flags &= ~DWC3_EP_MISSED_ISOC;
72246da4
FB
1705 }
1706 } else {
1707 if (count && (event->status & DEPEVT_STATUS_SHORT))
1708 s_pkt = 1;
1709 }
1710
1711 /*
1712 * We assume here we will always receive the entire data block
1713 * which we should receive. Meaning, if we program RX to
1714 * receive 4K but we receive only 2K, we assume that's all we
1715 * should receive and we simply bounce the request back to the
1716 * gadget driver for further processing.
1717 */
1718 req->request.actual += req->request.length - count;
1719 dwc3_gadget_giveback(dep, req, status);
1720 if (s_pkt)
1721 break;
f6bafc6a 1722 if ((event->status & DEPEVT_STATUS_LST) &&
70b674bf
PA
1723 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1724 DWC3_TRB_CTRL_HWO)))
72246da4 1725 break;
f6bafc6a
FB
1726 if ((event->status & DEPEVT_STATUS_IOC) &&
1727 (trb->ctrl & DWC3_TRB_CTRL_IOC))
72246da4
FB
1728 break;
1729 } while (1);
1730
cdc359dd
PA
1731 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1732 list_empty(&dep->req_queued)) {
1733 if (list_empty(&dep->request_list)) {
1734 /*
1735 * If there is no entry in request list then do
1736 * not issue END TRANSFER now. Just set PENDING
1737 * flag, so that END TRANSFER is issued when an
1738 * entry is added into request list.
1739 */
1740 dep->flags = DWC3_EP_PENDING_REQUEST;
1741 } else {
1742 dwc3_stop_active_transfer(dwc, dep->number);
1743 dep->flags = DWC3_EP_ENABLED;
1744 }
7efea86c
PA
1745 return 1;
1746 }
1747
f6bafc6a
FB
1748 if ((event->status & DEPEVT_STATUS_IOC) &&
1749 (trb->ctrl & DWC3_TRB_CTRL_IOC))
72246da4
FB
1750 return 0;
1751 return 1;
1752}
1753
1754static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1755 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1756 int start_new)
1757{
1758 unsigned status = 0;
1759 int clean_busy;
1760
1761 if (event->status & DEPEVT_STATUS_BUSERR)
1762 status = -ECONNRESET;
1763
1d046793 1764 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
c2df85ca 1765 if (clean_busy)
72246da4 1766 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
1767
1768 /*
1769 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1770 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1771 */
1772 if (dwc->revision < DWC3_REVISION_183A) {
1773 u32 reg;
1774 int i;
1775
1776 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 1777 dep = dwc->eps[i];
fae2b904
FB
1778
1779 if (!(dep->flags & DWC3_EP_ENABLED))
1780 continue;
1781
1782 if (!list_empty(&dep->req_queued))
1783 return;
1784 }
1785
1786 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1787 reg |= dwc->u1u2;
1788 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1789
1790 dwc->u1u2 = 0;
1791 }
72246da4
FB
1792}
1793
72246da4
FB
1794static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1795 const struct dwc3_event_depevt *event)
1796{
1797 struct dwc3_ep *dep;
1798 u8 epnum = event->endpoint_number;
1799
1800 dep = dwc->eps[epnum];
1801
3336abb5
FB
1802 if (!(dep->flags & DWC3_EP_ENABLED))
1803 return;
1804
72246da4
FB
1805 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1806 dwc3_ep_event_string(event->endpoint_event));
1807
1808 if (epnum == 0 || epnum == 1) {
1809 dwc3_ep0_interrupt(dwc, event);
1810 return;
1811 }
1812
1813 switch (event->endpoint_event) {
1814 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 1815 dep->resource_index = 0;
c2df85ca 1816
16e78db7 1817 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1818 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1819 dep->name);
1820 return;
1821 }
1822
1823 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1824 break;
1825 case DWC3_DEPEVT_XFERINPROGRESS:
16e78db7 1826 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1827 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1828 dep->name);
1829 return;
1830 }
1831
1832 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1833 break;
1834 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 1835 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1836 dwc3_gadget_start_isoc(dwc, dep, event);
1837 } else {
1838 int ret;
1839
1840 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1841 dep->name, event->status &
1842 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1843 ? "Transfer Active"
1844 : "Transfer Not Active");
1845
1846 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1847 if (!ret || ret == -EBUSY)
1848 return;
1849
1850 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1851 dep->name);
1852 }
1853
879631aa
FB
1854 break;
1855 case DWC3_DEPEVT_STREAMEVT:
16e78db7 1856 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
1857 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1858 dep->name);
1859 return;
1860 }
1861
1862 switch (event->status) {
1863 case DEPEVT_STREAMEVT_FOUND:
1864 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1865 event->parameters);
1866
1867 break;
1868 case DEPEVT_STREAMEVT_NOTFOUND:
1869 /* FALLTHROUGH */
1870 default:
1871 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1872 }
72246da4
FB
1873 break;
1874 case DWC3_DEPEVT_RXTXFIFOEVT:
1875 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1876 break;
72246da4 1877 case DWC3_DEPEVT_EPCMDCMPLT:
ea53b882 1878 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
72246da4
FB
1879 break;
1880 }
1881}
1882
1883static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1884{
1885 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1886 spin_unlock(&dwc->lock);
1887 dwc->gadget_driver->disconnect(&dwc->gadget);
1888 spin_lock(&dwc->lock);
1889 }
1890}
1891
1892static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1893{
1894 struct dwc3_ep *dep;
1895 struct dwc3_gadget_ep_cmd_params params;
1896 u32 cmd;
1897 int ret;
1898
1899 dep = dwc->eps[epnum];
1900
b4996a86 1901 if (!dep->resource_index)
3daf74d7
PA
1902 return;
1903
57911504
PA
1904 /*
1905 * NOTICE: We are violating what the Databook says about the
1906 * EndTransfer command. Ideally we would _always_ wait for the
1907 * EndTransfer Command Completion IRQ, but that's causing too
1908 * much trouble synchronizing between us and gadget driver.
1909 *
1910 * We have discussed this with the IP Provider and it was
1911 * suggested to giveback all requests here, but give HW some
1912 * extra time to synchronize with the interconnect. We're using
1913 * an arbitraty 100us delay for that.
1914 *
1915 * Note also that a similar handling was tested by Synopsys
1916 * (thanks a lot Paul) and nothing bad has come out of it.
1917 * In short, what we're doing is:
1918 *
1919 * - Issue EndTransfer WITH CMDIOC bit set
1920 * - Wait 100us
1921 */
1922
3daf74d7
PA
1923 cmd = DWC3_DEPCMD_ENDTRANSFER;
1924 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
b4996a86 1925 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7
PA
1926 memset(&params, 0, sizeof(params));
1927 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1928 WARN_ON_ONCE(ret);
b4996a86 1929 dep->resource_index = 0;
041d81f4 1930 dep->flags &= ~DWC3_EP_BUSY;
57911504 1931 udelay(100);
72246da4
FB
1932}
1933
1934static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1935{
1936 u32 epnum;
1937
1938 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1939 struct dwc3_ep *dep;
1940
1941 dep = dwc->eps[epnum];
1942 if (!(dep->flags & DWC3_EP_ENABLED))
1943 continue;
1944
624407f9 1945 dwc3_remove_requests(dwc, dep);
72246da4
FB
1946 }
1947}
1948
1949static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1950{
1951 u32 epnum;
1952
1953 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1954 struct dwc3_ep *dep;
1955 struct dwc3_gadget_ep_cmd_params params;
1956 int ret;
1957
1958 dep = dwc->eps[epnum];
1959
1960 if (!(dep->flags & DWC3_EP_STALL))
1961 continue;
1962
1963 dep->flags &= ~DWC3_EP_STALL;
1964
1965 memset(&params, 0, sizeof(params));
1966 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1967 DWC3_DEPCMD_CLEARSTALL, &params);
1968 WARN_ON_ONCE(ret);
1969 }
1970}
1971
1972static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1973{
c4430a26
FB
1974 int reg;
1975
72246da4 1976 dev_vdbg(dwc->dev, "%s\n", __func__);
72246da4
FB
1977
1978 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1979 reg &= ~DWC3_DCTL_INITU1ENA;
1980 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1981
1982 reg &= ~DWC3_DCTL_INITU2ENA;
1983 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 1984
72246da4 1985 dwc3_disconnect_gadget(dwc);
b23c8439 1986 dwc->start_config_issued = false;
72246da4
FB
1987
1988 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 1989 dwc->setup_packet_pending = false;
72246da4
FB
1990}
1991
d7a46a8d 1992static void dwc3_gadget_usb3_phy_suspend(struct dwc3 *dwc, int suspend)
72246da4
FB
1993{
1994 u32 reg;
1995
1996 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1997
d7a46a8d 1998 if (suspend)
72246da4 1999 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
d7a46a8d
PZ
2000 else
2001 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
72246da4
FB
2002
2003 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
2004}
2005
d7a46a8d 2006static void dwc3_gadget_usb2_phy_suspend(struct dwc3 *dwc, int suspend)
72246da4
FB
2007{
2008 u32 reg;
2009
2010 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2011
d7a46a8d 2012 if (suspend)
72246da4 2013 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
d7a46a8d
PZ
2014 else
2015 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
72246da4
FB
2016
2017 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2018}
2019
2020static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2021{
2022 u32 reg;
2023
2024 dev_vdbg(dwc->dev, "%s\n", __func__);
2025
df62df56
FB
2026 /*
2027 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2028 * would cause a missing Disconnect Event if there's a
2029 * pending Setup Packet in the FIFO.
2030 *
2031 * There's no suggested workaround on the official Bug
2032 * report, which states that "unless the driver/application
2033 * is doing any special handling of a disconnect event,
2034 * there is no functional issue".
2035 *
2036 * Unfortunately, it turns out that we _do_ some special
2037 * handling of a disconnect event, namely complete all
2038 * pending transfers, notify gadget driver of the
2039 * disconnection, and so on.
2040 *
2041 * Our suggested workaround is to follow the Disconnect
2042 * Event steps here, instead, based on a setup_packet_pending
2043 * flag. Such flag gets set whenever we have a XferNotReady
2044 * event on EP0 and gets cleared on XferComplete for the
2045 * same endpoint.
2046 *
2047 * Refers to:
2048 *
2049 * STAR#9000466709: RTL: Device : Disconnect event not
2050 * generated if setup packet pending in FIFO
2051 */
2052 if (dwc->revision < DWC3_REVISION_188A) {
2053 if (dwc->setup_packet_pending)
2054 dwc3_gadget_disconnect_interrupt(dwc);
2055 }
2056
961906ed
FB
2057 /* after reset -> Default State */
2058 dwc->dev_state = DWC3_DEFAULT_STATE;
2059
802fde98
PZ
2060 /* Recent versions support automatic phy suspend and don't need this */
2061 if (dwc->revision < DWC3_REVISION_194A) {
2062 /* Resume PHYs */
2063 dwc3_gadget_usb2_phy_suspend(dwc, false);
2064 dwc3_gadget_usb3_phy_suspend(dwc, false);
2065 }
72246da4
FB
2066
2067 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
2068 dwc3_disconnect_gadget(dwc);
2069
2070 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2071 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2072 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2073 dwc->test_mode = false;
72246da4
FB
2074
2075 dwc3_stop_active_transfers(dwc);
2076 dwc3_clear_stall_all_ep(dwc);
b23c8439 2077 dwc->start_config_issued = false;
72246da4
FB
2078
2079 /* Reset device address to zero */
2080 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2081 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2082 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2083}
2084
2085static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2086{
2087 u32 reg;
2088 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2089
2090 /*
2091 * We change the clock only at SS but I dunno why I would want to do
2092 * this. Maybe it becomes part of the power saving plan.
2093 */
2094
2095 if (speed != DWC3_DSTS_SUPERSPEED)
2096 return;
2097
2098 /*
2099 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2100 * each time on Connect Done.
2101 */
2102 if (!usb30_clock)
2103 return;
2104
2105 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2106 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2107 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2108}
2109
d7a46a8d 2110static void dwc3_gadget_phy_suspend(struct dwc3 *dwc, u8 speed)
72246da4
FB
2111{
2112 switch (speed) {
2113 case USB_SPEED_SUPER:
d7a46a8d 2114 dwc3_gadget_usb2_phy_suspend(dwc, true);
72246da4
FB
2115 break;
2116 case USB_SPEED_HIGH:
2117 case USB_SPEED_FULL:
2118 case USB_SPEED_LOW:
d7a46a8d 2119 dwc3_gadget_usb3_phy_suspend(dwc, true);
72246da4
FB
2120 break;
2121 }
2122}
2123
2124static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2125{
2126 struct dwc3_gadget_ep_cmd_params params;
2127 struct dwc3_ep *dep;
2128 int ret;
2129 u32 reg;
2130 u8 speed;
2131
2132 dev_vdbg(dwc->dev, "%s\n", __func__);
2133
2134 memset(&params, 0x00, sizeof(params));
2135
72246da4
FB
2136 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2137 speed = reg & DWC3_DSTS_CONNECTSPD;
2138 dwc->speed = speed;
2139
2140 dwc3_update_ram_clk_sel(dwc, speed);
2141
2142 switch (speed) {
2143 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2144 /*
2145 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2146 * would cause a missing USB3 Reset event.
2147 *
2148 * In such situations, we should force a USB3 Reset
2149 * event by calling our dwc3_gadget_reset_interrupt()
2150 * routine.
2151 *
2152 * Refers to:
2153 *
2154 * STAR#9000483510: RTL: SS : USB3 reset event may
2155 * not be generated always when the link enters poll
2156 */
2157 if (dwc->revision < DWC3_REVISION_190A)
2158 dwc3_gadget_reset_interrupt(dwc);
2159
72246da4
FB
2160 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2161 dwc->gadget.ep0->maxpacket = 512;
2162 dwc->gadget.speed = USB_SPEED_SUPER;
2163 break;
2164 case DWC3_DCFG_HIGHSPEED:
2165 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2166 dwc->gadget.ep0->maxpacket = 64;
2167 dwc->gadget.speed = USB_SPEED_HIGH;
2168 break;
2169 case DWC3_DCFG_FULLSPEED2:
2170 case DWC3_DCFG_FULLSPEED1:
2171 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2172 dwc->gadget.ep0->maxpacket = 64;
2173 dwc->gadget.speed = USB_SPEED_FULL;
2174 break;
2175 case DWC3_DCFG_LOWSPEED:
2176 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2177 dwc->gadget.ep0->maxpacket = 8;
2178 dwc->gadget.speed = USB_SPEED_LOW;
2179 break;
2180 }
2181
2b758350
PA
2182 /* Enable USB2 LPM Capability */
2183
2184 if ((dwc->revision > DWC3_REVISION_194A)
2185 && (speed != DWC3_DCFG_SUPERSPEED)) {
2186 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2187 reg |= DWC3_DCFG_LPM_CAP;
2188 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2189
2190 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2191 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2192
2193 /* TODO: This should be configurable */
2194 reg |= DWC3_DCTL_HIRD_THRES(28);
2195
2196 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2197 }
2198
802fde98
PZ
2199 /* Recent versions support automatic phy suspend and don't need this */
2200 if (dwc->revision < DWC3_REVISION_194A) {
2201 /* Suspend unneeded PHY */
2202 dwc3_gadget_phy_suspend(dwc, dwc->gadget.speed);
2203 }
72246da4
FB
2204
2205 dep = dwc->eps[0];
4b345c9a 2206 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
72246da4
FB
2207 if (ret) {
2208 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2209 return;
2210 }
2211
2212 dep = dwc->eps[1];
4b345c9a 2213 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
72246da4
FB
2214 if (ret) {
2215 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2216 return;
2217 }
2218
2219 /*
2220 * Configure PHY via GUSB3PIPECTLn if required.
2221 *
2222 * Update GTXFIFOSIZn
2223 *
2224 * In both cases reset values should be sufficient.
2225 */
2226}
2227
2228static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2229{
2230 dev_vdbg(dwc->dev, "%s\n", __func__);
2231
2232 /*
2233 * TODO take core out of low power mode when that's
2234 * implemented.
2235 */
2236
2237 dwc->gadget_driver->resume(&dwc->gadget);
2238}
2239
2240static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2241 unsigned int evtinfo)
2242{
fae2b904
FB
2243 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2244
2245 /*
2246 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2247 * on the link partner, the USB session might do multiple entry/exit
2248 * of low power states before a transfer takes place.
2249 *
2250 * Due to this problem, we might experience lower throughput. The
2251 * suggested workaround is to disable DCTL[12:9] bits if we're
2252 * transitioning from U1/U2 to U0 and enable those bits again
2253 * after a transfer completes and there are no pending transfers
2254 * on any of the enabled endpoints.
2255 *
2256 * This is the first half of that workaround.
2257 *
2258 * Refers to:
2259 *
2260 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2261 * core send LGO_Ux entering U0
2262 */
2263 if (dwc->revision < DWC3_REVISION_183A) {
2264 if (next == DWC3_LINK_STATE_U0) {
2265 u32 u1u2;
2266 u32 reg;
2267
2268 switch (dwc->link_state) {
2269 case DWC3_LINK_STATE_U1:
2270 case DWC3_LINK_STATE_U2:
2271 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2272 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2273 | DWC3_DCTL_ACCEPTU2ENA
2274 | DWC3_DCTL_INITU1ENA
2275 | DWC3_DCTL_ACCEPTU1ENA);
2276
2277 if (!dwc->u1u2)
2278 dwc->u1u2 = reg & u1u2;
2279
2280 reg &= ~u1u2;
2281
2282 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2283 break;
2284 default:
2285 /* do nothing */
2286 break;
2287 }
2288 }
2289 }
2290
2291 dwc->link_state = next;
019ac832
FB
2292
2293 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
72246da4
FB
2294}
2295
2296static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2297 const struct dwc3_event_devt *event)
2298{
2299 switch (event->type) {
2300 case DWC3_DEVICE_EVENT_DISCONNECT:
2301 dwc3_gadget_disconnect_interrupt(dwc);
2302 break;
2303 case DWC3_DEVICE_EVENT_RESET:
2304 dwc3_gadget_reset_interrupt(dwc);
2305 break;
2306 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2307 dwc3_gadget_conndone_interrupt(dwc);
2308 break;
2309 case DWC3_DEVICE_EVENT_WAKEUP:
2310 dwc3_gadget_wakeup_interrupt(dwc);
2311 break;
2312 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2313 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2314 break;
2315 case DWC3_DEVICE_EVENT_EOPF:
2316 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2317 break;
2318 case DWC3_DEVICE_EVENT_SOF:
2319 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2320 break;
2321 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2322 dev_vdbg(dwc->dev, "Erratic Error\n");
2323 break;
2324 case DWC3_DEVICE_EVENT_CMD_CMPL:
2325 dev_vdbg(dwc->dev, "Command Complete\n");
2326 break;
2327 case DWC3_DEVICE_EVENT_OVERFLOW:
2328 dev_vdbg(dwc->dev, "Overflow\n");
2329 break;
2330 default:
2331 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2332 }
2333}
2334
2335static void dwc3_process_event_entry(struct dwc3 *dwc,
2336 const union dwc3_event *event)
2337{
2338 /* Endpoint IRQ, handle it and return early */
2339 if (event->type.is_devspec == 0) {
2340 /* depevt */
2341 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2342 }
2343
2344 switch (event->type.type) {
2345 case DWC3_EVENT_TYPE_DEV:
2346 dwc3_gadget_interrupt(dwc, &event->devt);
2347 break;
2348 /* REVISIT what to do with Carkit and I2C events ? */
2349 default:
2350 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2351 }
2352}
2353
2354static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2355{
2356 struct dwc3_event_buffer *evt;
2357 int left;
2358 u32 count;
2359
2360 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2361 count &= DWC3_GEVNTCOUNT_MASK;
2362 if (!count)
2363 return IRQ_NONE;
2364
2365 evt = dwc->ev_buffs[buf];
2366 left = count;
2367
2368 while (left > 0) {
2369 union dwc3_event event;
2370
d70d8442
FB
2371 event.raw = *(u32 *) (evt->buf + evt->lpos);
2372
72246da4
FB
2373 dwc3_process_event_entry(dwc, &event);
2374 /*
2375 * XXX we wrap around correctly to the next entry as almost all
2376 * entries are 4 bytes in size. There is one entry which has 12
2377 * bytes which is a regular entry followed by 8 bytes data. ATM
2378 * I don't know how things are organized if were get next to the
2379 * a boundary so I worry about that once we try to handle that.
2380 */
2381 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2382 left -= 4;
2383
2384 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2385 }
2386
2387 return IRQ_HANDLED;
2388}
2389
2390static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2391{
2392 struct dwc3 *dwc = _dwc;
2393 int i;
2394 irqreturn_t ret = IRQ_NONE;
2395
2396 spin_lock(&dwc->lock);
2397
9f622b2a 2398 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2399 irqreturn_t status;
2400
2401 status = dwc3_process_event_buf(dwc, i);
2402 if (status == IRQ_HANDLED)
2403 ret = status;
2404 }
2405
2406 spin_unlock(&dwc->lock);
2407
2408 return ret;
2409}
2410
2411/**
2412 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2413 * @dwc: pointer to our controller context structure
72246da4
FB
2414 *
2415 * Returns 0 on success otherwise negative errno.
2416 */
41ac7b3a 2417int dwc3_gadget_init(struct dwc3 *dwc)
72246da4
FB
2418{
2419 u32 reg;
2420 int ret;
2421 int irq;
2422
2423 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2424 &dwc->ctrl_req_addr, GFP_KERNEL);
2425 if (!dwc->ctrl_req) {
2426 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2427 ret = -ENOMEM;
2428 goto err0;
2429 }
2430
2431 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2432 &dwc->ep0_trb_addr, GFP_KERNEL);
2433 if (!dwc->ep0_trb) {
2434 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2435 ret = -ENOMEM;
2436 goto err1;
2437 }
2438
3ef35faf 2439 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4
FB
2440 if (!dwc->setup_buf) {
2441 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2442 ret = -ENOMEM;
2443 goto err2;
2444 }
2445
5812b1c2 2446 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2447 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2448 GFP_KERNEL);
5812b1c2
FB
2449 if (!dwc->ep0_bounce) {
2450 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2451 ret = -ENOMEM;
2452 goto err3;
2453 }
2454
72246da4
FB
2455 dev_set_name(&dwc->gadget.dev, "gadget");
2456
2457 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2458 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4
FB
2459 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2460 dwc->gadget.dev.parent = dwc->dev;
eeb720fb 2461 dwc->gadget.sg_supported = true;
72246da4
FB
2462
2463 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2464
2465 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms;
2466 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask;
2467 dwc->gadget.dev.release = dwc3_gadget_release;
2468 dwc->gadget.name = "dwc3-gadget";
2469
2470 /*
2471 * REVISIT: Here we should clear all pending IRQs to be
2472 * sure we're starting from a well known location.
2473 */
2474
2475 ret = dwc3_gadget_init_endpoints(dwc);
2476 if (ret)
5812b1c2 2477 goto err4;
72246da4
FB
2478
2479 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2480
2481 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2482 "dwc3", dwc);
2483 if (ret) {
2484 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2485 irq, ret);
5812b1c2 2486 goto err5;
72246da4
FB
2487 }
2488
e6a3b5e2
SAS
2489 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2490 reg |= DWC3_DCFG_LPM_CAP;
2491 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2492
72246da4
FB
2493 /* Enable all but Start and End of Frame IRQs */
2494 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2495 DWC3_DEVTEN_EVNTOVERFLOWEN |
2496 DWC3_DEVTEN_CMDCMPLTEN |
2497 DWC3_DEVTEN_ERRTICERREN |
2498 DWC3_DEVTEN_WKUPEVTEN |
2499 DWC3_DEVTEN_ULSTCNGEN |
2500 DWC3_DEVTEN_CONNECTDONEEN |
2501 DWC3_DEVTEN_USBRSTEN |
2502 DWC3_DEVTEN_DISCONNEVTEN);
2503 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2504
2b758350 2505 /* automatic phy suspend only on recent versions */
802fde98 2506 if (dwc->revision >= DWC3_REVISION_194A) {
dcae3573
PA
2507 dwc3_gadget_usb2_phy_suspend(dwc, false);
2508 dwc3_gadget_usb3_phy_suspend(dwc, false);
802fde98
PZ
2509 }
2510
72246da4
FB
2511 ret = device_register(&dwc->gadget.dev);
2512 if (ret) {
2513 dev_err(dwc->dev, "failed to register gadget device\n");
2514 put_device(&dwc->gadget.dev);
5812b1c2 2515 goto err6;
72246da4
FB
2516 }
2517
2518 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2519 if (ret) {
2520 dev_err(dwc->dev, "failed to register udc\n");
5812b1c2 2521 goto err7;
72246da4
FB
2522 }
2523
2524 return 0;
2525
5812b1c2 2526err7:
72246da4
FB
2527 device_unregister(&dwc->gadget.dev);
2528
5812b1c2 2529err6:
72246da4
FB
2530 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2531 free_irq(irq, dwc);
2532
5812b1c2 2533err5:
72246da4
FB
2534 dwc3_gadget_free_endpoints(dwc);
2535
5812b1c2 2536err4:
3ef35faf
FB
2537 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2538 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2539
72246da4 2540err3:
0fc9a1be 2541 kfree(dwc->setup_buf);
72246da4
FB
2542
2543err2:
2544 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2545 dwc->ep0_trb, dwc->ep0_trb_addr);
2546
2547err1:
2548 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2549 dwc->ctrl_req, dwc->ctrl_req_addr);
2550
2551err0:
2552 return ret;
2553}
2554
2555void dwc3_gadget_exit(struct dwc3 *dwc)
2556{
2557 int irq;
72246da4
FB
2558
2559 usb_del_gadget_udc(&dwc->gadget);
2560 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2561
2562 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2563 free_irq(irq, dwc);
2564
72246da4
FB
2565 dwc3_gadget_free_endpoints(dwc);
2566
3ef35faf
FB
2567 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2568 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2569
0fc9a1be 2570 kfree(dwc->setup_buf);
72246da4
FB
2571
2572 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2573 dwc->ep0_trb, dwc->ep0_trb_addr);
2574
2575 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2576 dwc->ctrl_req, dwc->ctrl_req_addr);
2577
2578 device_unregister(&dwc->gadget.dev);
2579}