]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/usb/dwc3/gadget.c
usb: gadget: at91_udc: fix ep maxpacket initialisation
[mirror_ubuntu-artful-kernel.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
5945f789
FB
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
72246da4 12 *
5945f789
FB
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
72246da4
FB
17 */
18
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/list.h>
28#include <linux/dma-mapping.h>
29
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32
33#include "core.h"
34#include "gadget.h"
35#include "io.h"
36
04a9bfcd
FB
37/**
38 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
39 * @dwc: pointer to our context structure
40 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
41 *
42 * Caller should take care of locking. This function will
43 * return 0 on success or -EINVAL if wrong Test Selector
44 * is passed
45 */
46int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
47{
48 u32 reg;
49
50 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
51 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
52
53 switch (mode) {
54 case TEST_J:
55 case TEST_K:
56 case TEST_SE0_NAK:
57 case TEST_PACKET:
58 case TEST_FORCE_EN:
59 reg |= mode << 1;
60 break;
61 default:
62 return -EINVAL;
63 }
64
65 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
66
67 return 0;
68}
69
8598bde7
FB
70/**
71 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
72 * @dwc: pointer to our context structure
73 * @state: the state to put link into
74 *
75 * Caller should take care of locking. This function will
aee63e3c 76 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
77 */
78int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
79{
aee63e3c 80 int retries = 10000;
8598bde7
FB
81 u32 reg;
82
802fde98
PZ
83 /*
84 * Wait until device controller is ready. Only applies to 1.94a and
85 * later RTL.
86 */
87 if (dwc->revision >= DWC3_REVISION_194A) {
88 while (--retries) {
89 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
90 if (reg & DWC3_DSTS_DCNRD)
91 udelay(5);
92 else
93 break;
94 }
95
96 if (retries <= 0)
97 return -ETIMEDOUT;
98 }
99
8598bde7
FB
100 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
101 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
102
103 /* set requested state */
104 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
105 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
106
802fde98
PZ
107 /*
108 * The following code is racy when called from dwc3_gadget_wakeup,
109 * and is not needed, at least on newer versions
110 */
111 if (dwc->revision >= DWC3_REVISION_194A)
112 return 0;
113
8598bde7 114 /* wait for a change in DSTS */
aed430e5 115 retries = 10000;
8598bde7
FB
116 while (--retries) {
117 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
118
8598bde7
FB
119 if (DWC3_DSTS_USBLNKST(reg) == state)
120 return 0;
121
aee63e3c 122 udelay(5);
8598bde7
FB
123 }
124
125 dev_vdbg(dwc->dev, "link state change request timed out\n");
126
127 return -ETIMEDOUT;
128}
129
457e84b6
FB
130/**
131 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
132 * @dwc: pointer to our context structure
133 *
134 * This function will a best effort FIFO allocation in order
135 * to improve FIFO usage and throughput, while still allowing
136 * us to enable as many endpoints as possible.
137 *
138 * Keep in mind that this operation will be highly dependent
139 * on the configured size for RAM1 - which contains TxFifo -,
140 * the amount of endpoints enabled on coreConsultant tool, and
141 * the width of the Master Bus.
142 *
143 * In the ideal world, we would always be able to satisfy the
144 * following equation:
145 *
146 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
147 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
148 *
149 * Unfortunately, due to many variables that's not always the case.
150 */
151int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
152{
153 int last_fifo_depth = 0;
154 int ram1_depth;
155 int fifo_size;
156 int mdwidth;
157 int num;
158
159 if (!dwc->needs_fifo_resize)
160 return 0;
161
162 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
163 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
164
165 /* MDWIDTH is represented in bits, we need it in bytes */
166 mdwidth >>= 3;
167
168 /*
169 * FIXME For now we will only allocate 1 wMaxPacketSize space
170 * for each enabled endpoint, later patches will come to
171 * improve this algorithm so that we better use the internal
172 * FIFO space
173 */
174 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
175 struct dwc3_ep *dep = dwc->eps[num];
176 int fifo_number = dep->number >> 1;
2e81c36a 177 int mult = 1;
457e84b6
FB
178 int tmp;
179
180 if (!(dep->number & 1))
181 continue;
182
183 if (!(dep->flags & DWC3_EP_ENABLED))
184 continue;
185
16e78db7
IS
186 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
187 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
2e81c36a
FB
188 mult = 3;
189
190 /*
191 * REVISIT: the following assumes we will always have enough
192 * space available on the FIFO RAM for all possible use cases.
193 * Make sure that's true somehow and change FIFO allocation
194 * accordingly.
195 *
196 * If we have Bulk or Isochronous endpoints, we want
197 * them to be able to be very, very fast. So we're giving
198 * those endpoints a fifo_size which is enough for 3 full
199 * packets
200 */
201 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
457e84b6
FB
202 tmp += mdwidth;
203
204 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
2e81c36a 205
457e84b6
FB
206 fifo_size |= (last_fifo_depth << 16);
207
208 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
209 dep->name, last_fifo_depth, fifo_size & 0xffff);
210
211 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
212 fifo_size);
213
214 last_fifo_depth += (fifo_size & 0xffff);
215 }
216
217 return 0;
218}
219
72246da4
FB
220void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
221 int status)
222{
223 struct dwc3 *dwc = dep->dwc;
e5ba5ec8 224 int i;
72246da4
FB
225
226 if (req->queued) {
e5ba5ec8
PA
227 i = 0;
228 do {
eeb720fb 229 dep->busy_slot++;
e5ba5ec8
PA
230 /*
231 * Skip LINK TRB. We can't use req->trb and check for
232 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
233 * just completed (not the LINK TRB).
234 */
235 if (((dep->busy_slot & DWC3_TRB_MASK) ==
236 DWC3_TRB_NUM- 1) &&
16e78db7 237 usb_endpoint_xfer_isoc(dep->endpoint.desc))
e5ba5ec8
PA
238 dep->busy_slot++;
239 } while(++i < req->request.num_mapped_sgs);
c9fda7d6 240 req->queued = false;
72246da4
FB
241 }
242 list_del(&req->list);
eeb720fb 243 req->trb = NULL;
72246da4
FB
244
245 if (req->request.status == -EINPROGRESS)
246 req->request.status = status;
247
0416e494
PA
248 if (dwc->ep0_bounced && dep->number == 0)
249 dwc->ep0_bounced = false;
250 else
251 usb_gadget_unmap_request(&dwc->gadget, &req->request,
252 req->direction);
72246da4
FB
253
254 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
255 req, dep->name, req->request.actual,
256 req->request.length, status);
257
258 spin_unlock(&dwc->lock);
0fc9a1be 259 req->request.complete(&dep->endpoint, &req->request);
72246da4
FB
260 spin_lock(&dwc->lock);
261}
262
263static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
264{
265 switch (cmd) {
266 case DWC3_DEPCMD_DEPSTARTCFG:
267 return "Start New Configuration";
268 case DWC3_DEPCMD_ENDTRANSFER:
269 return "End Transfer";
270 case DWC3_DEPCMD_UPDATETRANSFER:
271 return "Update Transfer";
272 case DWC3_DEPCMD_STARTTRANSFER:
273 return "Start Transfer";
274 case DWC3_DEPCMD_CLEARSTALL:
275 return "Clear Stall";
276 case DWC3_DEPCMD_SETSTALL:
277 return "Set Stall";
802fde98
PZ
278 case DWC3_DEPCMD_GETEPSTATE:
279 return "Get Endpoint State";
72246da4
FB
280 case DWC3_DEPCMD_SETTRANSFRESOURCE:
281 return "Set Endpoint Transfer Resource";
282 case DWC3_DEPCMD_SETEPCONFIG:
283 return "Set Endpoint Configuration";
284 default:
285 return "UNKNOWN command";
286 }
287}
288
b09bb642
FB
289int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
290{
291 u32 timeout = 500;
292 u32 reg;
293
294 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
295 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
296
297 do {
298 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
299 if (!(reg & DWC3_DGCMD_CMDACT)) {
300 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
301 DWC3_DGCMD_STATUS(reg));
302 return 0;
303 }
304
305 /*
306 * We can't sleep here, because it's also called from
307 * interrupt context.
308 */
309 timeout--;
310 if (!timeout)
311 return -ETIMEDOUT;
312 udelay(1);
313 } while (1);
314}
315
72246da4
FB
316int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
317 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
318{
319 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 320 u32 timeout = 500;
72246da4
FB
321 u32 reg;
322
323 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
324 dep->name,
dc1c70a7
FB
325 dwc3_gadget_ep_cmd_string(cmd), params->param0,
326 params->param1, params->param2);
72246da4 327
dc1c70a7
FB
328 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
329 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
330 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
331
332 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
333 do {
334 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
335 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
336 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
337 DWC3_DEPCMD_STATUS(reg));
72246da4
FB
338 return 0;
339 }
340
341 /*
72246da4
FB
342 * We can't sleep here, because it is also called from
343 * interrupt context.
344 */
345 timeout--;
346 if (!timeout)
347 return -ETIMEDOUT;
348
61d58242 349 udelay(1);
72246da4
FB
350 } while (1);
351}
352
353static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 354 struct dwc3_trb *trb)
72246da4 355{
c439ef87 356 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
357
358 return dep->trb_pool_dma + offset;
359}
360
361static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
362{
363 struct dwc3 *dwc = dep->dwc;
364
365 if (dep->trb_pool)
366 return 0;
367
368 if (dep->number == 0 || dep->number == 1)
369 return 0;
370
371 dep->trb_pool = dma_alloc_coherent(dwc->dev,
372 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
373 &dep->trb_pool_dma, GFP_KERNEL);
374 if (!dep->trb_pool) {
375 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
376 dep->name);
377 return -ENOMEM;
378 }
379
380 return 0;
381}
382
383static void dwc3_free_trb_pool(struct dwc3_ep *dep)
384{
385 struct dwc3 *dwc = dep->dwc;
386
387 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
388 dep->trb_pool, dep->trb_pool_dma);
389
390 dep->trb_pool = NULL;
391 dep->trb_pool_dma = 0;
392}
393
394static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
395{
396 struct dwc3_gadget_ep_cmd_params params;
397 u32 cmd;
398
399 memset(&params, 0x00, sizeof(params));
400
401 if (dep->number != 1) {
402 cmd = DWC3_DEPCMD_DEPSTARTCFG;
403 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
404 if (dep->number > 1) {
405 if (dwc->start_config_issued)
406 return 0;
407 dwc->start_config_issued = true;
72246da4 408 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 409 }
72246da4
FB
410
411 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
412 }
413
414 return 0;
415}
416
417static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 418 const struct usb_endpoint_descriptor *desc,
4b345c9a
FB
419 const struct usb_ss_ep_comp_descriptor *comp_desc,
420 bool ignore)
72246da4
FB
421{
422 struct dwc3_gadget_ep_cmd_params params;
423
424 memset(&params, 0x00, sizeof(params));
425
dc1c70a7 426 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
427 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
428
429 /* Burst size is only needed in SuperSpeed mode */
430 if (dwc->gadget.speed == USB_SPEED_SUPER) {
431 u32 burst = dep->endpoint.maxburst - 1;
432
433 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
434 }
72246da4 435
4b345c9a
FB
436 if (ignore)
437 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
438
dc1c70a7
FB
439 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
440 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 441
18b7ede5 442 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
443 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
444 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
445 dep->stream_capable = true;
446 }
447
72246da4 448 if (usb_endpoint_xfer_isoc(desc))
dc1c70a7 449 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
450
451 /*
452 * We are doing 1:1 mapping for endpoints, meaning
453 * Physical Endpoints 2 maps to Logical Endpoint 2 and
454 * so on. We consider the direction bit as part of the physical
455 * endpoint number. So USB endpoint 0x81 is 0x03.
456 */
dc1c70a7 457 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
458
459 /*
460 * We must use the lower 16 TX FIFOs even though
461 * HW might have more
462 */
463 if (dep->direction)
dc1c70a7 464 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
465
466 if (desc->bInterval) {
dc1c70a7 467 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
468 dep->interval = 1 << (desc->bInterval - 1);
469 }
470
471 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
472 DWC3_DEPCMD_SETEPCONFIG, &params);
473}
474
475static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
476{
477 struct dwc3_gadget_ep_cmd_params params;
478
479 memset(&params, 0x00, sizeof(params));
480
dc1c70a7 481 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
482
483 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
484 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
485}
486
487/**
488 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
489 * @dep: endpoint to be initialized
490 * @desc: USB Endpoint Descriptor
491 *
492 * Caller should take care of locking
493 */
494static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 495 const struct usb_endpoint_descriptor *desc,
4b345c9a
FB
496 const struct usb_ss_ep_comp_descriptor *comp_desc,
497 bool ignore)
72246da4
FB
498{
499 struct dwc3 *dwc = dep->dwc;
500 u32 reg;
501 int ret = -ENOMEM;
502
ff62d6b6
FB
503 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
504
72246da4
FB
505 if (!(dep->flags & DWC3_EP_ENABLED)) {
506 ret = dwc3_gadget_start_config(dwc, dep);
507 if (ret)
508 return ret;
509 }
510
4b345c9a 511 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
72246da4
FB
512 if (ret)
513 return ret;
514
515 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
516 struct dwc3_trb *trb_st_hw;
517 struct dwc3_trb *trb_link;
72246da4
FB
518
519 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
520 if (ret)
521 return ret;
522
16e78db7 523 dep->endpoint.desc = desc;
c90bfaec 524 dep->comp_desc = comp_desc;
72246da4
FB
525 dep->type = usb_endpoint_type(desc);
526 dep->flags |= DWC3_EP_ENABLED;
527
528 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
529 reg |= DWC3_DALEPENA_EP(dep->number);
530 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
531
532 if (!usb_endpoint_xfer_isoc(desc))
533 return 0;
534
535 memset(&trb_link, 0, sizeof(trb_link));
536
1d046793 537 /* Link TRB for ISOC. The HWO bit is never reset */
72246da4
FB
538 trb_st_hw = &dep->trb_pool[0];
539
f6bafc6a 540 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
72246da4 541
f6bafc6a
FB
542 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
543 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
544 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
545 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
546 }
547
548 return 0;
549}
550
624407f9
SAS
551static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
552static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
553{
554 struct dwc3_request *req;
555
ea53b882 556 if (!list_empty(&dep->req_queued)) {
624407f9
SAS
557 dwc3_stop_active_transfer(dwc, dep->number);
558
57911504 559 /* - giveback all requests to gadget driver */
1591633e
PA
560 while (!list_empty(&dep->req_queued)) {
561 req = next_request(&dep->req_queued);
562
563 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
564 }
ea53b882
FB
565 }
566
72246da4
FB
567 while (!list_empty(&dep->request_list)) {
568 req = next_request(&dep->request_list);
569
624407f9 570 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 571 }
72246da4
FB
572}
573
574/**
575 * __dwc3_gadget_ep_disable - Disables a HW endpoint
576 * @dep: the endpoint to disable
577 *
624407f9
SAS
578 * This function also removes requests which are currently processed ny the
579 * hardware and those which are not yet scheduled.
580 * Caller should take care of locking.
72246da4 581 */
72246da4
FB
582static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
583{
584 struct dwc3 *dwc = dep->dwc;
585 u32 reg;
586
624407f9 587 dwc3_remove_requests(dwc, dep);
72246da4
FB
588
589 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
590 reg &= ~DWC3_DALEPENA_EP(dep->number);
591 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
592
879631aa 593 dep->stream_capable = false;
f9c56cdd 594 dep->endpoint.desc = NULL;
c90bfaec 595 dep->comp_desc = NULL;
72246da4 596 dep->type = 0;
879631aa 597 dep->flags = 0;
72246da4
FB
598
599 return 0;
600}
601
602/* -------------------------------------------------------------------------- */
603
604static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
605 const struct usb_endpoint_descriptor *desc)
606{
607 return -EINVAL;
608}
609
610static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
611{
612 return -EINVAL;
613}
614
615/* -------------------------------------------------------------------------- */
616
617static int dwc3_gadget_ep_enable(struct usb_ep *ep,
618 const struct usb_endpoint_descriptor *desc)
619{
620 struct dwc3_ep *dep;
621 struct dwc3 *dwc;
622 unsigned long flags;
623 int ret;
624
625 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
626 pr_debug("dwc3: invalid parameters\n");
627 return -EINVAL;
628 }
629
630 if (!desc->wMaxPacketSize) {
631 pr_debug("dwc3: missing wMaxPacketSize\n");
632 return -EINVAL;
633 }
634
635 dep = to_dwc3_ep(ep);
636 dwc = dep->dwc;
637
c6f83f38
FB
638 if (dep->flags & DWC3_EP_ENABLED) {
639 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
640 dep->name);
641 return 0;
642 }
643
72246da4
FB
644 switch (usb_endpoint_type(desc)) {
645 case USB_ENDPOINT_XFER_CONTROL:
27a78d6a 646 strlcat(dep->name, "-control", sizeof(dep->name));
72246da4
FB
647 break;
648 case USB_ENDPOINT_XFER_ISOC:
27a78d6a 649 strlcat(dep->name, "-isoc", sizeof(dep->name));
72246da4
FB
650 break;
651 case USB_ENDPOINT_XFER_BULK:
27a78d6a 652 strlcat(dep->name, "-bulk", sizeof(dep->name));
72246da4
FB
653 break;
654 case USB_ENDPOINT_XFER_INT:
27a78d6a 655 strlcat(dep->name, "-int", sizeof(dep->name));
72246da4
FB
656 break;
657 default:
658 dev_err(dwc->dev, "invalid endpoint transfer type\n");
659 }
660
72246da4 661 spin_lock_irqsave(&dwc->lock, flags);
4b345c9a 662 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
72246da4
FB
663 spin_unlock_irqrestore(&dwc->lock, flags);
664
665 return ret;
666}
667
668static int dwc3_gadget_ep_disable(struct usb_ep *ep)
669{
670 struct dwc3_ep *dep;
671 struct dwc3 *dwc;
672 unsigned long flags;
673 int ret;
674
675 if (!ep) {
676 pr_debug("dwc3: invalid parameters\n");
677 return -EINVAL;
678 }
679
680 dep = to_dwc3_ep(ep);
681 dwc = dep->dwc;
682
683 if (!(dep->flags & DWC3_EP_ENABLED)) {
684 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
685 dep->name);
686 return 0;
687 }
688
689 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
690 dep->number >> 1,
691 (dep->number & 1) ? "in" : "out");
692
693 spin_lock_irqsave(&dwc->lock, flags);
694 ret = __dwc3_gadget_ep_disable(dep);
695 spin_unlock_irqrestore(&dwc->lock, flags);
696
697 return ret;
698}
699
700static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
701 gfp_t gfp_flags)
702{
703 struct dwc3_request *req;
704 struct dwc3_ep *dep = to_dwc3_ep(ep);
705 struct dwc3 *dwc = dep->dwc;
706
707 req = kzalloc(sizeof(*req), gfp_flags);
708 if (!req) {
709 dev_err(dwc->dev, "not enough memory\n");
710 return NULL;
711 }
712
713 req->epnum = dep->number;
714 req->dep = dep;
72246da4
FB
715
716 return &req->request;
717}
718
719static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
720 struct usb_request *request)
721{
722 struct dwc3_request *req = to_dwc3_request(request);
723
724 kfree(req);
725}
726
c71fc37c
FB
727/**
728 * dwc3_prepare_one_trb - setup one TRB from one request
729 * @dep: endpoint for which this request is prepared
730 * @req: dwc3_request pointer
731 */
68e823e2 732static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb 733 struct dwc3_request *req, dma_addr_t dma,
e5ba5ec8 734 unsigned length, unsigned last, unsigned chain, unsigned node)
c71fc37c 735{
eeb720fb 736 struct dwc3 *dwc = dep->dwc;
f6bafc6a 737 struct dwc3_trb *trb;
c71fc37c 738
eeb720fb
FB
739 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
740 dep->name, req, (unsigned long long) dma,
741 length, last ? " last" : "",
742 chain ? " chain" : "");
743
c71fc37c 744 /* Skip the LINK-TRB on ISOC */
915e202a 745 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
16e78db7 746 usb_endpoint_xfer_isoc(dep->endpoint.desc))
915e202a
PA
747 dep->free_slot++;
748
749 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
c71fc37c 750
eeb720fb
FB
751 if (!req->trb) {
752 dwc3_gadget_move_request_queued(req);
f6bafc6a
FB
753 req->trb = trb;
754 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
e5ba5ec8 755 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
eeb720fb 756 }
c71fc37c 757
e5ba5ec8
PA
758 dep->free_slot++;
759
f6bafc6a
FB
760 trb->size = DWC3_TRB_SIZE_LENGTH(length);
761 trb->bpl = lower_32_bits(dma);
762 trb->bph = upper_32_bits(dma);
c71fc37c 763
16e78db7 764 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 765 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 766 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
767 break;
768
769 case USB_ENDPOINT_XFER_ISOC:
e5ba5ec8
PA
770 if (!node)
771 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
772 else
773 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
c71fc37c 774
e5ba5ec8 775 if (!req->request.no_interrupt && !chain)
f6bafc6a 776 trb->ctrl |= DWC3_TRB_CTRL_IOC;
c71fc37c
FB
777 break;
778
779 case USB_ENDPOINT_XFER_BULK:
780 case USB_ENDPOINT_XFER_INT:
f6bafc6a 781 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
782 break;
783 default:
784 /*
785 * This is only possible with faulty memory because we
786 * checked it already :)
787 */
788 BUG();
789 }
790
16e78db7 791 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
f6bafc6a
FB
792 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
793 trb->ctrl |= DWC3_TRB_CTRL_CSP;
e5ba5ec8
PA
794 } else if (last) {
795 trb->ctrl |= DWC3_TRB_CTRL_LST;
f6bafc6a 796 }
c71fc37c 797
e5ba5ec8
PA
798 if (chain)
799 trb->ctrl |= DWC3_TRB_CTRL_CHN;
800
16e78db7 801 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 802 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 803
f6bafc6a 804 trb->ctrl |= DWC3_TRB_CTRL_HWO;
c71fc37c
FB
805}
806
72246da4
FB
807/*
808 * dwc3_prepare_trbs - setup TRBs from requests
809 * @dep: endpoint for which requests are being prepared
810 * @starting: true if the endpoint is idle and no requests are queued.
811 *
1d046793
PZ
812 * The function goes through the requests list and sets up TRBs for the
813 * transfers. The function returns once there are no more TRBs available or
814 * it runs out of requests.
72246da4 815 */
68e823e2 816static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 817{
68e823e2 818 struct dwc3_request *req, *n;
72246da4 819 u32 trbs_left;
8d62cd65 820 u32 max;
c71fc37c 821 unsigned int last_one = 0;
72246da4
FB
822
823 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
824
825 /* the first request must not be queued */
826 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 827
8d62cd65 828 /* Can't wrap around on a non-isoc EP since there's no link TRB */
16e78db7 829 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8d62cd65
PZ
830 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
831 if (trbs_left > max)
832 trbs_left = max;
833 }
834
72246da4 835 /*
1d046793
PZ
836 * If busy & slot are equal than it is either full or empty. If we are
837 * starting to process requests then we are empty. Otherwise we are
72246da4
FB
838 * full and don't do anything
839 */
840 if (!trbs_left) {
841 if (!starting)
68e823e2 842 return;
72246da4
FB
843 trbs_left = DWC3_TRB_NUM;
844 /*
845 * In case we start from scratch, we queue the ISOC requests
846 * starting from slot 1. This is done because we use ring
847 * buffer and have no LST bit to stop us. Instead, we place
1d046793 848 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
72246da4
FB
849 * after the first request so we start at slot 1 and have
850 * 7 requests proceed before we hit the first IOC.
851 * Other transfer types don't use the ring buffer and are
852 * processed from the first TRB until the last one. Since we
853 * don't wrap around we have to start at the beginning.
854 */
16e78db7 855 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
856 dep->busy_slot = 1;
857 dep->free_slot = 1;
858 } else {
859 dep->busy_slot = 0;
860 dep->free_slot = 0;
861 }
862 }
863
864 /* The last TRB is a link TRB, not used for xfer */
16e78db7 865 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
68e823e2 866 return;
72246da4
FB
867
868 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
869 unsigned length;
870 dma_addr_t dma;
e5ba5ec8 871 last_one = false;
72246da4 872
eeb720fb
FB
873 if (req->request.num_mapped_sgs > 0) {
874 struct usb_request *request = &req->request;
875 struct scatterlist *sg = request->sg;
876 struct scatterlist *s;
877 int i;
72246da4 878
eeb720fb
FB
879 for_each_sg(sg, s, request->num_mapped_sgs, i) {
880 unsigned chain = true;
72246da4 881
eeb720fb
FB
882 length = sg_dma_len(s);
883 dma = sg_dma_address(s);
72246da4 884
1d046793
PZ
885 if (i == (request->num_mapped_sgs - 1) ||
886 sg_is_last(s)) {
e5ba5ec8
PA
887 if (list_is_last(&req->list,
888 &dep->request_list))
889 last_one = true;
eeb720fb
FB
890 chain = false;
891 }
72246da4 892
eeb720fb
FB
893 trbs_left--;
894 if (!trbs_left)
895 last_one = true;
72246da4 896
eeb720fb
FB
897 if (last_one)
898 chain = false;
72246da4 899
eeb720fb 900 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 901 last_one, chain, i);
72246da4 902
eeb720fb
FB
903 if (last_one)
904 break;
905 }
72246da4 906 } else {
eeb720fb
FB
907 dma = req->request.dma;
908 length = req->request.length;
909 trbs_left--;
72246da4 910
eeb720fb
FB
911 if (!trbs_left)
912 last_one = 1;
879631aa 913
eeb720fb
FB
914 /* Is this the last request? */
915 if (list_is_last(&req->list, &dep->request_list))
916 last_one = 1;
72246da4 917
eeb720fb 918 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 919 last_one, false, 0);
72246da4 920
eeb720fb
FB
921 if (last_one)
922 break;
72246da4 923 }
72246da4 924 }
72246da4
FB
925}
926
927static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
928 int start_new)
929{
930 struct dwc3_gadget_ep_cmd_params params;
931 struct dwc3_request *req;
932 struct dwc3 *dwc = dep->dwc;
933 int ret;
934 u32 cmd;
935
936 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
937 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
938 return -EBUSY;
939 }
940 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
941
942 /*
943 * If we are getting here after a short-out-packet we don't enqueue any
944 * new requests as we try to set the IOC bit only on the last request.
945 */
946 if (start_new) {
947 if (list_empty(&dep->req_queued))
948 dwc3_prepare_trbs(dep, start_new);
949
950 /* req points to the first request which will be sent */
951 req = next_request(&dep->req_queued);
952 } else {
68e823e2
FB
953 dwc3_prepare_trbs(dep, start_new);
954
72246da4 955 /*
1d046793 956 * req points to the first request where HWO changed from 0 to 1
72246da4 957 */
68e823e2 958 req = next_request(&dep->req_queued);
72246da4
FB
959 }
960 if (!req) {
961 dep->flags |= DWC3_EP_PENDING_REQUEST;
962 return 0;
963 }
964
965 memset(&params, 0, sizeof(params));
72246da4 966
1877d6c9
PA
967 if (start_new) {
968 params.param0 = upper_32_bits(req->trb_dma);
969 params.param1 = lower_32_bits(req->trb_dma);
72246da4 970 cmd = DWC3_DEPCMD_STARTTRANSFER;
1877d6c9 971 } else {
72246da4 972 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1877d6c9 973 }
72246da4
FB
974
975 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
976 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
977 if (ret < 0) {
978 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
979
980 /*
981 * FIXME we need to iterate over the list of requests
982 * here and stop, unmap, free and del each of the linked
1d046793 983 * requests instead of what we do now.
72246da4 984 */
0fc9a1be
FB
985 usb_gadget_unmap_request(&dwc->gadget, &req->request,
986 req->direction);
72246da4
FB
987 list_del(&req->list);
988 return ret;
989 }
990
991 dep->flags |= DWC3_EP_BUSY;
25b8ff68 992
f898ae09 993 if (start_new) {
b4996a86 994 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
f898ae09 995 dep->number);
b4996a86 996 WARN_ON_ONCE(!dep->resource_index);
f898ae09 997 }
25b8ff68 998
72246da4
FB
999 return 0;
1000}
1001
d6d6ec7b
PA
1002static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1003 struct dwc3_ep *dep, u32 cur_uf)
1004{
1005 u32 uf;
1006
1007 if (list_empty(&dep->request_list)) {
1008 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1009 dep->name);
f4a53c55 1010 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1011 return;
1012 }
1013
1014 /* 4 micro frames in the future */
1015 uf = cur_uf + dep->interval * 4;
1016
1017 __dwc3_gadget_kick_transfer(dep, uf, 1);
1018}
1019
1020static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1021 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1022{
1023 u32 cur_uf, mask;
1024
1025 mask = ~(dep->interval - 1);
1026 cur_uf = event->parameters & mask;
1027
1028 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1029}
1030
72246da4
FB
1031static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1032{
0fc9a1be
FB
1033 struct dwc3 *dwc = dep->dwc;
1034 int ret;
1035
72246da4
FB
1036 req->request.actual = 0;
1037 req->request.status = -EINPROGRESS;
1038 req->direction = dep->direction;
1039 req->epnum = dep->number;
1040
1041 /*
1042 * We only add to our list of requests now and
1043 * start consuming the list once we get XferNotReady
1044 * IRQ.
1045 *
1046 * That way, we avoid doing anything that we don't need
1047 * to do now and defer it until the point we receive a
1048 * particular token from the Host side.
1049 *
1050 * This will also avoid Host cancelling URBs due to too
1d046793 1051 * many NAKs.
72246da4 1052 */
0fc9a1be
FB
1053 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1054 dep->direction);
1055 if (ret)
1056 return ret;
1057
72246da4
FB
1058 list_add_tail(&req->list, &dep->request_list);
1059
1060 /*
b511e5e7 1061 * There are a few special cases:
72246da4 1062 *
f898ae09
PZ
1063 * 1. XferNotReady with empty list of requests. We need to kick the
1064 * transfer here in that situation, otherwise we will be NAKing
1065 * forever. If we get XferNotReady before gadget driver has a
1066 * chance to queue a request, we will ACK the IRQ but won't be
1067 * able to receive the data until the next request is queued.
1068 * The following code is handling exactly that.
72246da4 1069 *
72246da4
FB
1070 */
1071 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1072 /*
1073 * If xfernotready is already elapsed and it is a case
1074 * of isoc transfer, then issue END TRANSFER, so that
1075 * you can receive xfernotready again and can have
1076 * notion of current microframe.
1077 */
1078 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
cdc359dd
PA
1079 if (list_empty(&dep->req_queued)) {
1080 dwc3_stop_active_transfer(dwc, dep->number);
1081 dep->flags = DWC3_EP_ENABLED;
1082 }
f4a53c55
PA
1083 return 0;
1084 }
1085
b511e5e7 1086 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
348e026f 1087 if (ret && ret != -EBUSY)
b511e5e7
FB
1088 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1089 dep->name);
15f86bde 1090 return ret;
b511e5e7 1091 }
72246da4 1092
b511e5e7
FB
1093 /*
1094 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1095 * kick the transfer here after queuing a request, otherwise the
1096 * core may not see the modified TRB(s).
1097 */
1098 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1099 (dep->flags & DWC3_EP_BUSY) &&
1100 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86
FB
1101 WARN_ON_ONCE(!dep->resource_index);
1102 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
b511e5e7 1103 false);
348e026f 1104 if (ret && ret != -EBUSY)
72246da4
FB
1105 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1106 dep->name);
15f86bde 1107 return ret;
a0925324 1108 }
72246da4
FB
1109
1110 return 0;
1111}
1112
1113static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1114 gfp_t gfp_flags)
1115{
1116 struct dwc3_request *req = to_dwc3_request(request);
1117 struct dwc3_ep *dep = to_dwc3_ep(ep);
1118 struct dwc3 *dwc = dep->dwc;
1119
1120 unsigned long flags;
1121
1122 int ret;
1123
16e78db7 1124 if (!dep->endpoint.desc) {
72246da4
FB
1125 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1126 request, ep->name);
1127 return -ESHUTDOWN;
1128 }
1129
1130 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1131 request, ep->name, request->length);
1132
1133 spin_lock_irqsave(&dwc->lock, flags);
1134 ret = __dwc3_gadget_ep_queue(dep, req);
1135 spin_unlock_irqrestore(&dwc->lock, flags);
1136
1137 return ret;
1138}
1139
1140static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1141 struct usb_request *request)
1142{
1143 struct dwc3_request *req = to_dwc3_request(request);
1144 struct dwc3_request *r = NULL;
1145
1146 struct dwc3_ep *dep = to_dwc3_ep(ep);
1147 struct dwc3 *dwc = dep->dwc;
1148
1149 unsigned long flags;
1150 int ret = 0;
1151
1152 spin_lock_irqsave(&dwc->lock, flags);
1153
1154 list_for_each_entry(r, &dep->request_list, list) {
1155 if (r == req)
1156 break;
1157 }
1158
1159 if (r != req) {
1160 list_for_each_entry(r, &dep->req_queued, list) {
1161 if (r == req)
1162 break;
1163 }
1164 if (r == req) {
1165 /* wait until it is processed */
1166 dwc3_stop_active_transfer(dwc, dep->number);
e8d4e8be 1167 goto out1;
72246da4
FB
1168 }
1169 dev_err(dwc->dev, "request %p was not queued to %s\n",
1170 request, ep->name);
1171 ret = -EINVAL;
1172 goto out0;
1173 }
1174
e8d4e8be 1175out1:
72246da4
FB
1176 /* giveback the request */
1177 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1178
1179out0:
1180 spin_unlock_irqrestore(&dwc->lock, flags);
1181
1182 return ret;
1183}
1184
1185int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1186{
1187 struct dwc3_gadget_ep_cmd_params params;
1188 struct dwc3 *dwc = dep->dwc;
1189 int ret;
1190
1191 memset(&params, 0x00, sizeof(params));
1192
1193 if (value) {
72246da4
FB
1194 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1195 DWC3_DEPCMD_SETSTALL, &params);
1196 if (ret)
1197 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1198 value ? "set" : "clear",
1199 dep->name);
1200 else
1201 dep->flags |= DWC3_EP_STALL;
1202 } else {
1203 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1204 DWC3_DEPCMD_CLEARSTALL, &params);
1205 if (ret)
1206 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1207 value ? "set" : "clear",
1208 dep->name);
1209 else
a535d81c 1210 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
72246da4 1211 }
5275455a 1212
72246da4
FB
1213 return ret;
1214}
1215
1216static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1217{
1218 struct dwc3_ep *dep = to_dwc3_ep(ep);
1219 struct dwc3 *dwc = dep->dwc;
1220
1221 unsigned long flags;
1222
1223 int ret;
1224
1225 spin_lock_irqsave(&dwc->lock, flags);
1226
16e78db7 1227 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1228 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1229 ret = -EINVAL;
1230 goto out;
1231 }
1232
1233 ret = __dwc3_gadget_ep_set_halt(dep, value);
1234out:
1235 spin_unlock_irqrestore(&dwc->lock, flags);
1236
1237 return ret;
1238}
1239
1240static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1241{
1242 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1243 struct dwc3 *dwc = dep->dwc;
1244 unsigned long flags;
72246da4 1245
249a4569 1246 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1247 dep->flags |= DWC3_EP_WEDGE;
249a4569 1248 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4 1249
08f0d966
PA
1250 if (dep->number == 0 || dep->number == 1)
1251 return dwc3_gadget_ep0_set_halt(ep, 1);
1252 else
1253 return dwc3_gadget_ep_set_halt(ep, 1);
72246da4
FB
1254}
1255
1256/* -------------------------------------------------------------------------- */
1257
1258static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1259 .bLength = USB_DT_ENDPOINT_SIZE,
1260 .bDescriptorType = USB_DT_ENDPOINT,
1261 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1262};
1263
1264static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1265 .enable = dwc3_gadget_ep0_enable,
1266 .disable = dwc3_gadget_ep0_disable,
1267 .alloc_request = dwc3_gadget_ep_alloc_request,
1268 .free_request = dwc3_gadget_ep_free_request,
1269 .queue = dwc3_gadget_ep0_queue,
1270 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1271 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1272 .set_wedge = dwc3_gadget_ep_set_wedge,
1273};
1274
1275static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1276 .enable = dwc3_gadget_ep_enable,
1277 .disable = dwc3_gadget_ep_disable,
1278 .alloc_request = dwc3_gadget_ep_alloc_request,
1279 .free_request = dwc3_gadget_ep_free_request,
1280 .queue = dwc3_gadget_ep_queue,
1281 .dequeue = dwc3_gadget_ep_dequeue,
1282 .set_halt = dwc3_gadget_ep_set_halt,
1283 .set_wedge = dwc3_gadget_ep_set_wedge,
1284};
1285
1286/* -------------------------------------------------------------------------- */
1287
1288static int dwc3_gadget_get_frame(struct usb_gadget *g)
1289{
1290 struct dwc3 *dwc = gadget_to_dwc(g);
1291 u32 reg;
1292
1293 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1294 return DWC3_DSTS_SOFFN(reg);
1295}
1296
1297static int dwc3_gadget_wakeup(struct usb_gadget *g)
1298{
1299 struct dwc3 *dwc = gadget_to_dwc(g);
1300
1301 unsigned long timeout;
1302 unsigned long flags;
1303
1304 u32 reg;
1305
1306 int ret = 0;
1307
1308 u8 link_state;
1309 u8 speed;
1310
1311 spin_lock_irqsave(&dwc->lock, flags);
1312
1313 /*
1314 * According to the Databook Remote wakeup request should
1315 * be issued only when the device is in early suspend state.
1316 *
1317 * We can check that via USB Link State bits in DSTS register.
1318 */
1319 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1320
1321 speed = reg & DWC3_DSTS_CONNECTSPD;
1322 if (speed == DWC3_DSTS_SUPERSPEED) {
1323 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1324 ret = -EINVAL;
1325 goto out;
1326 }
1327
1328 link_state = DWC3_DSTS_USBLNKST(reg);
1329
1330 switch (link_state) {
1331 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1332 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1333 break;
1334 default:
1335 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1336 link_state);
1337 ret = -EINVAL;
1338 goto out;
1339 }
1340
8598bde7
FB
1341 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1342 if (ret < 0) {
1343 dev_err(dwc->dev, "failed to put link in Recovery\n");
1344 goto out;
1345 }
72246da4 1346
802fde98
PZ
1347 /* Recent versions do this automatically */
1348 if (dwc->revision < DWC3_REVISION_194A) {
1349 /* write zeroes to Link Change Request */
fcc023c7 1350 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1351 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1352 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1353 }
72246da4 1354
1d046793 1355 /* poll until Link State changes to ON */
72246da4
FB
1356 timeout = jiffies + msecs_to_jiffies(100);
1357
1d046793 1358 while (!time_after(jiffies, timeout)) {
72246da4
FB
1359 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1360
1361 /* in HS, means ON */
1362 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1363 break;
1364 }
1365
1366 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1367 dev_err(dwc->dev, "failed to send remote wakeup\n");
1368 ret = -EINVAL;
1369 }
1370
1371out:
1372 spin_unlock_irqrestore(&dwc->lock, flags);
1373
1374 return ret;
1375}
1376
1377static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1378 int is_selfpowered)
1379{
1380 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1381 unsigned long flags;
72246da4 1382
249a4569 1383 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1384 dwc->is_selfpowered = !!is_selfpowered;
249a4569 1385 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1386
1387 return 0;
1388}
1389
6f17f74b 1390static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
72246da4
FB
1391{
1392 u32 reg;
61d58242 1393 u32 timeout = 500;
72246da4
FB
1394
1395 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1396 if (is_on) {
802fde98
PZ
1397 if (dwc->revision <= DWC3_REVISION_187A) {
1398 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1399 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1400 }
1401
1402 if (dwc->revision >= DWC3_REVISION_194A)
1403 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1404 reg |= DWC3_DCTL_RUN_STOP;
9fcb3bd8 1405 dwc->pullups_connected = true;
8db7ed15 1406 } else {
72246da4 1407 reg &= ~DWC3_DCTL_RUN_STOP;
9fcb3bd8 1408 dwc->pullups_connected = false;
8db7ed15 1409 }
72246da4
FB
1410
1411 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1412
1413 do {
1414 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1415 if (is_on) {
1416 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1417 break;
1418 } else {
1419 if (reg & DWC3_DSTS_DEVCTRLHLT)
1420 break;
1421 }
72246da4
FB
1422 timeout--;
1423 if (!timeout)
6f17f74b 1424 return -ETIMEDOUT;
61d58242 1425 udelay(1);
72246da4
FB
1426 } while (1);
1427
1428 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1429 dwc->gadget_driver
1430 ? dwc->gadget_driver->function : "no-function",
1431 is_on ? "connect" : "disconnect");
6f17f74b
PA
1432
1433 return 0;
72246da4
FB
1434}
1435
1436static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1437{
1438 struct dwc3 *dwc = gadget_to_dwc(g);
1439 unsigned long flags;
6f17f74b 1440 int ret;
72246da4
FB
1441
1442 is_on = !!is_on;
1443
1444 spin_lock_irqsave(&dwc->lock, flags);
6f17f74b 1445 ret = dwc3_gadget_run_stop(dwc, is_on);
72246da4
FB
1446 spin_unlock_irqrestore(&dwc->lock, flags);
1447
6f17f74b 1448 return ret;
72246da4
FB
1449}
1450
8698e2ac
FB
1451static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1452{
1453 u32 reg;
1454
1455 /* Enable all but Start and End of Frame IRQs */
1456 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1457 DWC3_DEVTEN_EVNTOVERFLOWEN |
1458 DWC3_DEVTEN_CMDCMPLTEN |
1459 DWC3_DEVTEN_ERRTICERREN |
1460 DWC3_DEVTEN_WKUPEVTEN |
1461 DWC3_DEVTEN_ULSTCNGEN |
1462 DWC3_DEVTEN_CONNECTDONEEN |
1463 DWC3_DEVTEN_USBRSTEN |
1464 DWC3_DEVTEN_DISCONNEVTEN);
1465
1466 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1467}
1468
1469static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1470{
1471 /* mask all interrupts */
1472 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1473}
1474
1475static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
b15a762f 1476static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
8698e2ac 1477
72246da4
FB
1478static int dwc3_gadget_start(struct usb_gadget *g,
1479 struct usb_gadget_driver *driver)
1480{
1481 struct dwc3 *dwc = gadget_to_dwc(g);
1482 struct dwc3_ep *dep;
1483 unsigned long flags;
1484 int ret = 0;
8698e2ac 1485 int irq;
72246da4
FB
1486 u32 reg;
1487
b0d7ffd4
FB
1488 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1489 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
e8adfc30 1490 IRQF_SHARED, "dwc3", dwc);
b0d7ffd4
FB
1491 if (ret) {
1492 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1493 irq, ret);
1494 goto err0;
1495 }
1496
72246da4
FB
1497 spin_lock_irqsave(&dwc->lock, flags);
1498
1499 if (dwc->gadget_driver) {
1500 dev_err(dwc->dev, "%s is already bound to %s\n",
1501 dwc->gadget.name,
1502 dwc->gadget_driver->driver.name);
1503 ret = -EBUSY;
b0d7ffd4 1504 goto err1;
72246da4
FB
1505 }
1506
1507 dwc->gadget_driver = driver;
72246da4 1508
72246da4
FB
1509 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1510 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1511
1512 /**
1513 * WORKAROUND: DWC3 revision < 2.20a have an issue
1514 * which would cause metastability state on Run/Stop
1515 * bit if we try to force the IP to USB2-only mode.
1516 *
1517 * Because of that, we cannot configure the IP to any
1518 * speed other than the SuperSpeed
1519 *
1520 * Refers to:
1521 *
1522 * STAR#9000525659: Clock Domain Crossing on DCTL in
1523 * USB 2.0 Mode
1524 */
f7e846f0 1525 if (dwc->revision < DWC3_REVISION_220A) {
07e7f47b 1526 reg |= DWC3_DCFG_SUPERSPEED;
f7e846f0
FB
1527 } else {
1528 switch (dwc->maximum_speed) {
1529 case USB_SPEED_LOW:
1530 reg |= DWC3_DSTS_LOWSPEED;
1531 break;
1532 case USB_SPEED_FULL:
1533 reg |= DWC3_DSTS_FULLSPEED1;
1534 break;
1535 case USB_SPEED_HIGH:
1536 reg |= DWC3_DSTS_HIGHSPEED;
1537 break;
1538 case USB_SPEED_SUPER: /* FALLTHROUGH */
1539 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1540 default:
1541 reg |= DWC3_DSTS_SUPERSPEED;
1542 }
1543 }
72246da4
FB
1544 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1545
b23c8439
PZ
1546 dwc->start_config_issued = false;
1547
72246da4
FB
1548 /* Start with SuperSpeed Default */
1549 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1550
1551 dep = dwc->eps[0];
4b345c9a 1552 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
72246da4
FB
1553 if (ret) {
1554 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1555 goto err2;
72246da4
FB
1556 }
1557
1558 dep = dwc->eps[1];
4b345c9a 1559 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
72246da4
FB
1560 if (ret) {
1561 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1562 goto err3;
72246da4
FB
1563 }
1564
1565 /* begin to receive SETUP packets */
c7fcdeb2 1566 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1567 dwc3_ep0_out_start(dwc);
1568
8698e2ac
FB
1569 dwc3_gadget_enable_irq(dwc);
1570
72246da4
FB
1571 spin_unlock_irqrestore(&dwc->lock, flags);
1572
1573 return 0;
1574
b0d7ffd4 1575err3:
72246da4
FB
1576 __dwc3_gadget_ep_disable(dwc->eps[0]);
1577
b0d7ffd4 1578err2:
cdcedd69 1579 dwc->gadget_driver = NULL;
b0d7ffd4
FB
1580
1581err1:
72246da4
FB
1582 spin_unlock_irqrestore(&dwc->lock, flags);
1583
b0d7ffd4
FB
1584 free_irq(irq, dwc);
1585
1586err0:
72246da4
FB
1587 return ret;
1588}
1589
1590static int dwc3_gadget_stop(struct usb_gadget *g,
1591 struct usb_gadget_driver *driver)
1592{
1593 struct dwc3 *dwc = gadget_to_dwc(g);
1594 unsigned long flags;
8698e2ac 1595 int irq;
72246da4
FB
1596
1597 spin_lock_irqsave(&dwc->lock, flags);
1598
8698e2ac 1599 dwc3_gadget_disable_irq(dwc);
72246da4
FB
1600 __dwc3_gadget_ep_disable(dwc->eps[0]);
1601 __dwc3_gadget_ep_disable(dwc->eps[1]);
1602
1603 dwc->gadget_driver = NULL;
72246da4
FB
1604
1605 spin_unlock_irqrestore(&dwc->lock, flags);
1606
b0d7ffd4
FB
1607 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1608 free_irq(irq, dwc);
1609
72246da4
FB
1610 return 0;
1611}
802fde98 1612
72246da4
FB
1613static const struct usb_gadget_ops dwc3_gadget_ops = {
1614 .get_frame = dwc3_gadget_get_frame,
1615 .wakeup = dwc3_gadget_wakeup,
1616 .set_selfpowered = dwc3_gadget_set_selfpowered,
1617 .pullup = dwc3_gadget_pullup,
1618 .udc_start = dwc3_gadget_start,
1619 .udc_stop = dwc3_gadget_stop,
1620};
1621
1622/* -------------------------------------------------------------------------- */
1623
6a1e3ef4
FB
1624static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1625 u8 num, u32 direction)
72246da4
FB
1626{
1627 struct dwc3_ep *dep;
6a1e3ef4 1628 u8 i;
72246da4 1629
6a1e3ef4
FB
1630 for (i = 0; i < num; i++) {
1631 u8 epnum = (i << 1) | (!!direction);
72246da4 1632
72246da4
FB
1633 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1634 if (!dep) {
1635 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1636 epnum);
1637 return -ENOMEM;
1638 }
1639
1640 dep->dwc = dwc;
1641 dep->number = epnum;
9aa62ae4 1642 dep->direction = !!direction;
72246da4
FB
1643 dwc->eps[epnum] = dep;
1644
1645 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1646 (epnum & 1) ? "in" : "out");
6a1e3ef4 1647
72246da4 1648 dep->endpoint.name = dep->name;
72246da4 1649
653df35e
FB
1650 dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1651
72246da4 1652 if (epnum == 0 || epnum == 1) {
e117e742 1653 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
6048e4c6 1654 dep->endpoint.maxburst = 1;
72246da4
FB
1655 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1656 if (!epnum)
1657 dwc->gadget.ep0 = &dep->endpoint;
1658 } else {
1659 int ret;
1660
e117e742 1661 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
12d36c16 1662 dep->endpoint.max_streams = 15;
72246da4
FB
1663 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1664 list_add_tail(&dep->endpoint.ep_list,
1665 &dwc->gadget.ep_list);
1666
1667 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1668 if (ret)
72246da4 1669 return ret;
72246da4 1670 }
25b8ff68 1671
72246da4
FB
1672 INIT_LIST_HEAD(&dep->request_list);
1673 INIT_LIST_HEAD(&dep->req_queued);
1674 }
1675
1676 return 0;
1677}
1678
6a1e3ef4
FB
1679static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1680{
1681 int ret;
1682
1683 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1684
1685 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1686 if (ret < 0) {
1687 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1688 return ret;
1689 }
1690
1691 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1692 if (ret < 0) {
1693 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1694 return ret;
1695 }
1696
1697 return 0;
1698}
1699
72246da4
FB
1700static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1701{
1702 struct dwc3_ep *dep;
1703 u8 epnum;
1704
1705 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1706 dep = dwc->eps[epnum];
6a1e3ef4
FB
1707 if (!dep)
1708 continue;
5bf8fae3
GC
1709 /*
1710 * Physical endpoints 0 and 1 are special; they form the
1711 * bi-directional USB endpoint 0.
1712 *
1713 * For those two physical endpoints, we don't allocate a TRB
1714 * pool nor do we add them the endpoints list. Due to that, we
1715 * shouldn't do these two operations otherwise we would end up
1716 * with all sorts of bugs when removing dwc3.ko.
1717 */
1718 if (epnum != 0 && epnum != 1) {
1719 dwc3_free_trb_pool(dep);
72246da4 1720 list_del(&dep->endpoint.ep_list);
5bf8fae3 1721 }
72246da4
FB
1722
1723 kfree(dep);
1724 }
1725}
1726
72246da4 1727/* -------------------------------------------------------------------------- */
e5caff68 1728
e5ba5ec8
PA
1729static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1730 struct dwc3_request *req, struct dwc3_trb *trb,
72246da4
FB
1731 const struct dwc3_event_depevt *event, int status)
1732{
72246da4
FB
1733 unsigned int count;
1734 unsigned int s_pkt = 0;
d6d6ec7b 1735 unsigned int trb_status;
72246da4 1736
e5ba5ec8
PA
1737 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1738 /*
1739 * We continue despite the error. There is not much we
1740 * can do. If we don't clean it up we loop forever. If
1741 * we skip the TRB then it gets overwritten after a
1742 * while since we use them in a ring buffer. A BUG()
1743 * would help. Lets hope that if this occurs, someone
1744 * fixes the root cause instead of looking away :)
1745 */
1746 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1747 dep->name, trb);
1748 count = trb->size & DWC3_TRB_SIZE_MASK;
1749
1750 if (dep->direction) {
1751 if (count) {
1752 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1753 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1754 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1755 dep->name);
1756 /*
1757 * If missed isoc occurred and there is
1758 * no request queued then issue END
1759 * TRANSFER, so that core generates
1760 * next xfernotready and we will issue
1761 * a fresh START TRANSFER.
1762 * If there are still queued request
1763 * then wait, do not issue either END
1764 * or UPDATE TRANSFER, just attach next
1765 * request in request_list during
1766 * giveback.If any future queued request
1767 * is successfully transferred then we
1768 * will issue UPDATE TRANSFER for all
1769 * request in the request_list.
1770 */
1771 dep->flags |= DWC3_EP_MISSED_ISOC;
1772 } else {
1773 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1774 dep->name);
1775 status = -ECONNRESET;
1776 }
1777 } else {
1778 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1779 }
1780 } else {
1781 if (count && (event->status & DEPEVT_STATUS_SHORT))
1782 s_pkt = 1;
1783 }
1784
1785 /*
1786 * We assume here we will always receive the entire data block
1787 * which we should receive. Meaning, if we program RX to
1788 * receive 4K but we receive only 2K, we assume that's all we
1789 * should receive and we simply bounce the request back to the
1790 * gadget driver for further processing.
1791 */
1792 req->request.actual += req->request.length - count;
1793 if (s_pkt)
1794 return 1;
1795 if ((event->status & DEPEVT_STATUS_LST) &&
1796 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1797 DWC3_TRB_CTRL_HWO)))
1798 return 1;
1799 if ((event->status & DEPEVT_STATUS_IOC) &&
1800 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1801 return 1;
1802 return 0;
1803}
1804
1805static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1806 const struct dwc3_event_depevt *event, int status)
1807{
1808 struct dwc3_request *req;
1809 struct dwc3_trb *trb;
1810 unsigned int slot;
1811 unsigned int i;
1812 int ret;
1813
72246da4
FB
1814 do {
1815 req = next_request(&dep->req_queued);
d39ee7be
SAS
1816 if (!req) {
1817 WARN_ON_ONCE(1);
1818 return 1;
1819 }
e5ba5ec8
PA
1820 i = 0;
1821 do {
1822 slot = req->start_slot + i;
1823 if ((slot == DWC3_TRB_NUM - 1) &&
1824 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1825 slot++;
1826 slot %= DWC3_TRB_NUM;
1827 trb = &dep->trb_pool[slot];
72246da4 1828
e5ba5ec8
PA
1829 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1830 event, status);
1831 if (ret)
1832 break;
1833 }while (++i < req->request.num_mapped_sgs);
72246da4 1834
72246da4 1835 dwc3_gadget_giveback(dep, req, status);
e5ba5ec8
PA
1836
1837 if (ret)
72246da4
FB
1838 break;
1839 } while (1);
1840
cdc359dd
PA
1841 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1842 list_empty(&dep->req_queued)) {
1843 if (list_empty(&dep->request_list)) {
1844 /*
1845 * If there is no entry in request list then do
1846 * not issue END TRANSFER now. Just set PENDING
1847 * flag, so that END TRANSFER is issued when an
1848 * entry is added into request list.
1849 */
1850 dep->flags = DWC3_EP_PENDING_REQUEST;
1851 } else {
1852 dwc3_stop_active_transfer(dwc, dep->number);
1853 dep->flags = DWC3_EP_ENABLED;
1854 }
7efea86c
PA
1855 return 1;
1856 }
1857
f6bafc6a
FB
1858 if ((event->status & DEPEVT_STATUS_IOC) &&
1859 (trb->ctrl & DWC3_TRB_CTRL_IOC))
72246da4
FB
1860 return 0;
1861 return 1;
1862}
1863
1864static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1865 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1866 int start_new)
1867{
1868 unsigned status = 0;
1869 int clean_busy;
1870
1871 if (event->status & DEPEVT_STATUS_BUSERR)
1872 status = -ECONNRESET;
1873
1d046793 1874 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
c2df85ca 1875 if (clean_busy)
72246da4 1876 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
1877
1878 /*
1879 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1880 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1881 */
1882 if (dwc->revision < DWC3_REVISION_183A) {
1883 u32 reg;
1884 int i;
1885
1886 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 1887 dep = dwc->eps[i];
fae2b904
FB
1888
1889 if (!(dep->flags & DWC3_EP_ENABLED))
1890 continue;
1891
1892 if (!list_empty(&dep->req_queued))
1893 return;
1894 }
1895
1896 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1897 reg |= dwc->u1u2;
1898 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1899
1900 dwc->u1u2 = 0;
1901 }
72246da4
FB
1902}
1903
72246da4
FB
1904static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1905 const struct dwc3_event_depevt *event)
1906{
1907 struct dwc3_ep *dep;
1908 u8 epnum = event->endpoint_number;
1909
1910 dep = dwc->eps[epnum];
1911
3336abb5
FB
1912 if (!(dep->flags & DWC3_EP_ENABLED))
1913 return;
1914
72246da4
FB
1915 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1916 dwc3_ep_event_string(event->endpoint_event));
1917
1918 if (epnum == 0 || epnum == 1) {
1919 dwc3_ep0_interrupt(dwc, event);
1920 return;
1921 }
1922
1923 switch (event->endpoint_event) {
1924 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 1925 dep->resource_index = 0;
c2df85ca 1926
16e78db7 1927 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1928 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1929 dep->name);
1930 return;
1931 }
1932
1933 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1934 break;
1935 case DWC3_DEPEVT_XFERINPROGRESS:
16e78db7 1936 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1937 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1938 dep->name);
1939 return;
1940 }
1941
1942 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1943 break;
1944 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 1945 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1946 dwc3_gadget_start_isoc(dwc, dep, event);
1947 } else {
1948 int ret;
1949
1950 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1951 dep->name, event->status &
1952 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1953 ? "Transfer Active"
1954 : "Transfer Not Active");
1955
1956 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1957 if (!ret || ret == -EBUSY)
1958 return;
1959
1960 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1961 dep->name);
1962 }
1963
879631aa
FB
1964 break;
1965 case DWC3_DEPEVT_STREAMEVT:
16e78db7 1966 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
1967 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1968 dep->name);
1969 return;
1970 }
1971
1972 switch (event->status) {
1973 case DEPEVT_STREAMEVT_FOUND:
1974 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1975 event->parameters);
1976
1977 break;
1978 case DEPEVT_STREAMEVT_NOTFOUND:
1979 /* FALLTHROUGH */
1980 default:
1981 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1982 }
72246da4
FB
1983 break;
1984 case DWC3_DEPEVT_RXTXFIFOEVT:
1985 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1986 break;
72246da4 1987 case DWC3_DEPEVT_EPCMDCMPLT:
ea53b882 1988 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
72246da4
FB
1989 break;
1990 }
1991}
1992
1993static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1994{
1995 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1996 spin_unlock(&dwc->lock);
1997 dwc->gadget_driver->disconnect(&dwc->gadget);
1998 spin_lock(&dwc->lock);
1999 }
2000}
2001
2002static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
2003{
2004 struct dwc3_ep *dep;
2005 struct dwc3_gadget_ep_cmd_params params;
2006 u32 cmd;
2007 int ret;
2008
2009 dep = dwc->eps[epnum];
2010
b4996a86 2011 if (!dep->resource_index)
3daf74d7
PA
2012 return;
2013
57911504
PA
2014 /*
2015 * NOTICE: We are violating what the Databook says about the
2016 * EndTransfer command. Ideally we would _always_ wait for the
2017 * EndTransfer Command Completion IRQ, but that's causing too
2018 * much trouble synchronizing between us and gadget driver.
2019 *
2020 * We have discussed this with the IP Provider and it was
2021 * suggested to giveback all requests here, but give HW some
2022 * extra time to synchronize with the interconnect. We're using
2023 * an arbitraty 100us delay for that.
2024 *
2025 * Note also that a similar handling was tested by Synopsys
2026 * (thanks a lot Paul) and nothing bad has come out of it.
2027 * In short, what we're doing is:
2028 *
2029 * - Issue EndTransfer WITH CMDIOC bit set
2030 * - Wait 100us
2031 */
2032
3daf74d7
PA
2033 cmd = DWC3_DEPCMD_ENDTRANSFER;
2034 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
b4996a86 2035 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7
PA
2036 memset(&params, 0, sizeof(params));
2037 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2038 WARN_ON_ONCE(ret);
b4996a86 2039 dep->resource_index = 0;
041d81f4 2040 dep->flags &= ~DWC3_EP_BUSY;
57911504 2041 udelay(100);
72246da4
FB
2042}
2043
2044static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2045{
2046 u32 epnum;
2047
2048 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2049 struct dwc3_ep *dep;
2050
2051 dep = dwc->eps[epnum];
6a1e3ef4
FB
2052 if (!dep)
2053 continue;
2054
72246da4
FB
2055 if (!(dep->flags & DWC3_EP_ENABLED))
2056 continue;
2057
624407f9 2058 dwc3_remove_requests(dwc, dep);
72246da4
FB
2059 }
2060}
2061
2062static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2063{
2064 u32 epnum;
2065
2066 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2067 struct dwc3_ep *dep;
2068 struct dwc3_gadget_ep_cmd_params params;
2069 int ret;
2070
2071 dep = dwc->eps[epnum];
6a1e3ef4
FB
2072 if (!dep)
2073 continue;
72246da4
FB
2074
2075 if (!(dep->flags & DWC3_EP_STALL))
2076 continue;
2077
2078 dep->flags &= ~DWC3_EP_STALL;
2079
2080 memset(&params, 0, sizeof(params));
2081 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2082 DWC3_DEPCMD_CLEARSTALL, &params);
2083 WARN_ON_ONCE(ret);
2084 }
2085}
2086
2087static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2088{
c4430a26
FB
2089 int reg;
2090
72246da4 2091 dev_vdbg(dwc->dev, "%s\n", __func__);
72246da4
FB
2092
2093 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2094 reg &= ~DWC3_DCTL_INITU1ENA;
2095 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2096
2097 reg &= ~DWC3_DCTL_INITU2ENA;
2098 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 2099
72246da4 2100 dwc3_disconnect_gadget(dwc);
b23c8439 2101 dwc->start_config_issued = false;
72246da4
FB
2102
2103 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 2104 dwc->setup_packet_pending = false;
72246da4
FB
2105}
2106
72246da4
FB
2107static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2108{
2109 u32 reg;
2110
2111 dev_vdbg(dwc->dev, "%s\n", __func__);
2112
df62df56
FB
2113 /*
2114 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2115 * would cause a missing Disconnect Event if there's a
2116 * pending Setup Packet in the FIFO.
2117 *
2118 * There's no suggested workaround on the official Bug
2119 * report, which states that "unless the driver/application
2120 * is doing any special handling of a disconnect event,
2121 * there is no functional issue".
2122 *
2123 * Unfortunately, it turns out that we _do_ some special
2124 * handling of a disconnect event, namely complete all
2125 * pending transfers, notify gadget driver of the
2126 * disconnection, and so on.
2127 *
2128 * Our suggested workaround is to follow the Disconnect
2129 * Event steps here, instead, based on a setup_packet_pending
2130 * flag. Such flag gets set whenever we have a XferNotReady
2131 * event on EP0 and gets cleared on XferComplete for the
2132 * same endpoint.
2133 *
2134 * Refers to:
2135 *
2136 * STAR#9000466709: RTL: Device : Disconnect event not
2137 * generated if setup packet pending in FIFO
2138 */
2139 if (dwc->revision < DWC3_REVISION_188A) {
2140 if (dwc->setup_packet_pending)
2141 dwc3_gadget_disconnect_interrupt(dwc);
2142 }
2143
961906ed 2144 /* after reset -> Default State */
14cd592f 2145 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
961906ed 2146
72246da4
FB
2147 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
2148 dwc3_disconnect_gadget(dwc);
2149
2150 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2151 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2152 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2153 dwc->test_mode = false;
72246da4
FB
2154
2155 dwc3_stop_active_transfers(dwc);
2156 dwc3_clear_stall_all_ep(dwc);
b23c8439 2157 dwc->start_config_issued = false;
72246da4
FB
2158
2159 /* Reset device address to zero */
2160 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2161 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2162 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2163}
2164
2165static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2166{
2167 u32 reg;
2168 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2169
2170 /*
2171 * We change the clock only at SS but I dunno why I would want to do
2172 * this. Maybe it becomes part of the power saving plan.
2173 */
2174
2175 if (speed != DWC3_DSTS_SUPERSPEED)
2176 return;
2177
2178 /*
2179 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2180 * each time on Connect Done.
2181 */
2182 if (!usb30_clock)
2183 return;
2184
2185 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2186 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2187 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2188}
2189
72246da4
FB
2190static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2191{
72246da4
FB
2192 struct dwc3_ep *dep;
2193 int ret;
2194 u32 reg;
2195 u8 speed;
2196
2197 dev_vdbg(dwc->dev, "%s\n", __func__);
2198
72246da4
FB
2199 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2200 speed = reg & DWC3_DSTS_CONNECTSPD;
2201 dwc->speed = speed;
2202
2203 dwc3_update_ram_clk_sel(dwc, speed);
2204
2205 switch (speed) {
2206 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2207 /*
2208 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2209 * would cause a missing USB3 Reset event.
2210 *
2211 * In such situations, we should force a USB3 Reset
2212 * event by calling our dwc3_gadget_reset_interrupt()
2213 * routine.
2214 *
2215 * Refers to:
2216 *
2217 * STAR#9000483510: RTL: SS : USB3 reset event may
2218 * not be generated always when the link enters poll
2219 */
2220 if (dwc->revision < DWC3_REVISION_190A)
2221 dwc3_gadget_reset_interrupt(dwc);
2222
72246da4
FB
2223 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2224 dwc->gadget.ep0->maxpacket = 512;
2225 dwc->gadget.speed = USB_SPEED_SUPER;
2226 break;
2227 case DWC3_DCFG_HIGHSPEED:
2228 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2229 dwc->gadget.ep0->maxpacket = 64;
2230 dwc->gadget.speed = USB_SPEED_HIGH;
2231 break;
2232 case DWC3_DCFG_FULLSPEED2:
2233 case DWC3_DCFG_FULLSPEED1:
2234 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2235 dwc->gadget.ep0->maxpacket = 64;
2236 dwc->gadget.speed = USB_SPEED_FULL;
2237 break;
2238 case DWC3_DCFG_LOWSPEED:
2239 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2240 dwc->gadget.ep0->maxpacket = 8;
2241 dwc->gadget.speed = USB_SPEED_LOW;
2242 break;
2243 }
2244
2b758350
PA
2245 /* Enable USB2 LPM Capability */
2246
2247 if ((dwc->revision > DWC3_REVISION_194A)
2248 && (speed != DWC3_DCFG_SUPERSPEED)) {
2249 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2250 reg |= DWC3_DCFG_LPM_CAP;
2251 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2252
2253 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2254 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2255
1a947746
FB
2256 /*
2257 * TODO: This should be configurable. For now using
2258 * maximum allowed HIRD threshold value of 0b1100
2259 */
2260 reg |= DWC3_DCTL_HIRD_THRES(12);
2b758350
PA
2261
2262 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2263 }
2264
72246da4 2265 dep = dwc->eps[0];
4b345c9a 2266 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
72246da4
FB
2267 if (ret) {
2268 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2269 return;
2270 }
2271
2272 dep = dwc->eps[1];
4b345c9a 2273 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
72246da4
FB
2274 if (ret) {
2275 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2276 return;
2277 }
2278
2279 /*
2280 * Configure PHY via GUSB3PIPECTLn if required.
2281 *
2282 * Update GTXFIFOSIZn
2283 *
2284 * In both cases reset values should be sufficient.
2285 */
2286}
2287
2288static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2289{
2290 dev_vdbg(dwc->dev, "%s\n", __func__);
2291
2292 /*
2293 * TODO take core out of low power mode when that's
2294 * implemented.
2295 */
2296
2297 dwc->gadget_driver->resume(&dwc->gadget);
2298}
2299
2300static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2301 unsigned int evtinfo)
2302{
fae2b904 2303 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
0b0cc1cd
FB
2304 unsigned int pwropt;
2305
2306 /*
2307 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2308 * Hibernation mode enabled which would show up when device detects
2309 * host-initiated U3 exit.
2310 *
2311 * In that case, device will generate a Link State Change Interrupt
2312 * from U3 to RESUME which is only necessary if Hibernation is
2313 * configured in.
2314 *
2315 * There are no functional changes due to such spurious event and we
2316 * just need to ignore it.
2317 *
2318 * Refers to:
2319 *
2320 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2321 * operational mode
2322 */
2323 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2324 if ((dwc->revision < DWC3_REVISION_250A) &&
2325 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2326 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2327 (next == DWC3_LINK_STATE_RESUME)) {
2328 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2329 return;
2330 }
2331 }
fae2b904
FB
2332
2333 /*
2334 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2335 * on the link partner, the USB session might do multiple entry/exit
2336 * of low power states before a transfer takes place.
2337 *
2338 * Due to this problem, we might experience lower throughput. The
2339 * suggested workaround is to disable DCTL[12:9] bits if we're
2340 * transitioning from U1/U2 to U0 and enable those bits again
2341 * after a transfer completes and there are no pending transfers
2342 * on any of the enabled endpoints.
2343 *
2344 * This is the first half of that workaround.
2345 *
2346 * Refers to:
2347 *
2348 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2349 * core send LGO_Ux entering U0
2350 */
2351 if (dwc->revision < DWC3_REVISION_183A) {
2352 if (next == DWC3_LINK_STATE_U0) {
2353 u32 u1u2;
2354 u32 reg;
2355
2356 switch (dwc->link_state) {
2357 case DWC3_LINK_STATE_U1:
2358 case DWC3_LINK_STATE_U2:
2359 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2360 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2361 | DWC3_DCTL_ACCEPTU2ENA
2362 | DWC3_DCTL_INITU1ENA
2363 | DWC3_DCTL_ACCEPTU1ENA);
2364
2365 if (!dwc->u1u2)
2366 dwc->u1u2 = reg & u1u2;
2367
2368 reg &= ~u1u2;
2369
2370 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2371 break;
2372 default:
2373 /* do nothing */
2374 break;
2375 }
2376 }
2377 }
2378
2379 dwc->link_state = next;
019ac832
FB
2380
2381 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
72246da4
FB
2382}
2383
2384static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2385 const struct dwc3_event_devt *event)
2386{
2387 switch (event->type) {
2388 case DWC3_DEVICE_EVENT_DISCONNECT:
2389 dwc3_gadget_disconnect_interrupt(dwc);
2390 break;
2391 case DWC3_DEVICE_EVENT_RESET:
2392 dwc3_gadget_reset_interrupt(dwc);
2393 break;
2394 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2395 dwc3_gadget_conndone_interrupt(dwc);
2396 break;
2397 case DWC3_DEVICE_EVENT_WAKEUP:
2398 dwc3_gadget_wakeup_interrupt(dwc);
2399 break;
2400 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2401 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2402 break;
2403 case DWC3_DEVICE_EVENT_EOPF:
2404 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2405 break;
2406 case DWC3_DEVICE_EVENT_SOF:
2407 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2408 break;
2409 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2410 dev_vdbg(dwc->dev, "Erratic Error\n");
2411 break;
2412 case DWC3_DEVICE_EVENT_CMD_CMPL:
2413 dev_vdbg(dwc->dev, "Command Complete\n");
2414 break;
2415 case DWC3_DEVICE_EVENT_OVERFLOW:
2416 dev_vdbg(dwc->dev, "Overflow\n");
2417 break;
2418 default:
2419 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2420 }
2421}
2422
2423static void dwc3_process_event_entry(struct dwc3 *dwc,
2424 const union dwc3_event *event)
2425{
2426 /* Endpoint IRQ, handle it and return early */
2427 if (event->type.is_devspec == 0) {
2428 /* depevt */
2429 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2430 }
2431
2432 switch (event->type.type) {
2433 case DWC3_EVENT_TYPE_DEV:
2434 dwc3_gadget_interrupt(dwc, &event->devt);
2435 break;
2436 /* REVISIT what to do with Carkit and I2C events ? */
2437 default:
2438 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2439 }
2440}
2441
f42f2447 2442static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
b15a762f 2443{
f42f2447 2444 struct dwc3_event_buffer *evt;
b15a762f 2445 irqreturn_t ret = IRQ_NONE;
f42f2447 2446 int left;
e8adfc30 2447 u32 reg;
b15a762f 2448
f42f2447
FB
2449 evt = dwc->ev_buffs[buf];
2450 left = evt->count;
b15a762f 2451
f42f2447
FB
2452 if (!(evt->flags & DWC3_EVENT_PENDING))
2453 return IRQ_NONE;
b15a762f 2454
f42f2447
FB
2455 while (left > 0) {
2456 union dwc3_event event;
b15a762f 2457
f42f2447 2458 event.raw = *(u32 *) (evt->buf + evt->lpos);
b15a762f 2459
f42f2447 2460 dwc3_process_event_entry(dwc, &event);
b15a762f 2461
f42f2447
FB
2462 /*
2463 * FIXME we wrap around correctly to the next entry as
2464 * almost all entries are 4 bytes in size. There is one
2465 * entry which has 12 bytes which is a regular entry
2466 * followed by 8 bytes data. ATM I don't know how
2467 * things are organized if we get next to the a
2468 * boundary so I worry about that once we try to handle
2469 * that.
2470 */
2471 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2472 left -= 4;
b15a762f 2473
f42f2447
FB
2474 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2475 }
b15a762f 2476
f42f2447
FB
2477 evt->count = 0;
2478 evt->flags &= ~DWC3_EVENT_PENDING;
2479 ret = IRQ_HANDLED;
b15a762f 2480
f42f2447
FB
2481 /* Unmask interrupt */
2482 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2483 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2484 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
b15a762f 2485
f42f2447
FB
2486 return ret;
2487}
e8adfc30 2488
f42f2447
FB
2489static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2490{
2491 struct dwc3 *dwc = _dwc;
2492 unsigned long flags;
2493 irqreturn_t ret = IRQ_NONE;
2494 int i;
2495
2496 spin_lock_irqsave(&dwc->lock, flags);
2497
2498 for (i = 0; i < dwc->num_event_buffers; i++)
2499 ret |= dwc3_process_event_buf(dwc, i);
b15a762f
FB
2500
2501 spin_unlock_irqrestore(&dwc->lock, flags);
2502
2503 return ret;
2504}
2505
7f97aa98 2506static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
72246da4
FB
2507{
2508 struct dwc3_event_buffer *evt;
72246da4 2509 u32 count;
e8adfc30 2510 u32 reg;
72246da4 2511
b15a762f
FB
2512 evt = dwc->ev_buffs[buf];
2513
72246da4
FB
2514 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2515 count &= DWC3_GEVNTCOUNT_MASK;
2516 if (!count)
2517 return IRQ_NONE;
2518
b15a762f
FB
2519 evt->count = count;
2520 evt->flags |= DWC3_EVENT_PENDING;
72246da4 2521
e8adfc30
FB
2522 /* Mask interrupt */
2523 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2524 reg |= DWC3_GEVNTSIZ_INTMASK;
2525 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2526
b15a762f 2527 return IRQ_WAKE_THREAD;
72246da4
FB
2528}
2529
2530static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2531{
2532 struct dwc3 *dwc = _dwc;
2533 int i;
2534 irqreturn_t ret = IRQ_NONE;
2535
2536 spin_lock(&dwc->lock);
2537
9f622b2a 2538 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2539 irqreturn_t status;
2540
7f97aa98 2541 status = dwc3_check_event_buf(dwc, i);
b15a762f 2542 if (status == IRQ_WAKE_THREAD)
72246da4
FB
2543 ret = status;
2544 }
2545
2546 spin_unlock(&dwc->lock);
2547
2548 return ret;
2549}
2550
2551/**
2552 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2553 * @dwc: pointer to our controller context structure
72246da4
FB
2554 *
2555 * Returns 0 on success otherwise negative errno.
2556 */
41ac7b3a 2557int dwc3_gadget_init(struct dwc3 *dwc)
72246da4 2558{
72246da4 2559 int ret;
72246da4
FB
2560
2561 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2562 &dwc->ctrl_req_addr, GFP_KERNEL);
2563 if (!dwc->ctrl_req) {
2564 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2565 ret = -ENOMEM;
2566 goto err0;
2567 }
2568
2569 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2570 &dwc->ep0_trb_addr, GFP_KERNEL);
2571 if (!dwc->ep0_trb) {
2572 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2573 ret = -ENOMEM;
2574 goto err1;
2575 }
2576
3ef35faf 2577 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4
FB
2578 if (!dwc->setup_buf) {
2579 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2580 ret = -ENOMEM;
2581 goto err2;
2582 }
2583
5812b1c2 2584 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2585 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2586 GFP_KERNEL);
5812b1c2
FB
2587 if (!dwc->ep0_bounce) {
2588 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2589 ret = -ENOMEM;
2590 goto err3;
2591 }
2592
72246da4 2593 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2594 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4 2595 dwc->gadget.speed = USB_SPEED_UNKNOWN;
eeb720fb 2596 dwc->gadget.sg_supported = true;
72246da4
FB
2597 dwc->gadget.name = "dwc3-gadget";
2598
a4b9d94b
DC
2599 /*
2600 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2601 * on ep out.
2602 */
2603 dwc->gadget.quirk_ep_out_aligned_size = true;
2604
72246da4
FB
2605 /*
2606 * REVISIT: Here we should clear all pending IRQs to be
2607 * sure we're starting from a well known location.
2608 */
2609
2610 ret = dwc3_gadget_init_endpoints(dwc);
2611 if (ret)
5812b1c2 2612 goto err4;
72246da4 2613
72246da4
FB
2614 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2615 if (ret) {
2616 dev_err(dwc->dev, "failed to register udc\n");
e1f80467 2617 goto err4;
72246da4
FB
2618 }
2619
2620 return 0;
2621
5812b1c2 2622err4:
e1f80467 2623 dwc3_gadget_free_endpoints(dwc);
3ef35faf
FB
2624 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2625 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2626
72246da4 2627err3:
0fc9a1be 2628 kfree(dwc->setup_buf);
72246da4
FB
2629
2630err2:
2631 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2632 dwc->ep0_trb, dwc->ep0_trb_addr);
2633
2634err1:
2635 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2636 dwc->ctrl_req, dwc->ctrl_req_addr);
2637
2638err0:
2639 return ret;
2640}
2641
7415f17c
FB
2642/* -------------------------------------------------------------------------- */
2643
72246da4
FB
2644void dwc3_gadget_exit(struct dwc3 *dwc)
2645{
72246da4 2646 usb_del_gadget_udc(&dwc->gadget);
72246da4 2647
72246da4
FB
2648 dwc3_gadget_free_endpoints(dwc);
2649
3ef35faf
FB
2650 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2651 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2652
0fc9a1be 2653 kfree(dwc->setup_buf);
72246da4
FB
2654
2655 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2656 dwc->ep0_trb, dwc->ep0_trb_addr);
2657
2658 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2659 dwc->ctrl_req, dwc->ctrl_req_addr);
72246da4 2660}
7415f17c
FB
2661
2662int dwc3_gadget_prepare(struct dwc3 *dwc)
2663{
2664 if (dwc->pullups_connected)
2665 dwc3_gadget_disable_irq(dwc);
2666
2667 return 0;
2668}
2669
2670void dwc3_gadget_complete(struct dwc3 *dwc)
2671{
2672 if (dwc->pullups_connected) {
2673 dwc3_gadget_enable_irq(dwc);
2674 dwc3_gadget_run_stop(dwc, true);
2675 }
2676}
2677
2678int dwc3_gadget_suspend(struct dwc3 *dwc)
2679{
2680 __dwc3_gadget_ep_disable(dwc->eps[0]);
2681 __dwc3_gadget_ep_disable(dwc->eps[1]);
2682
2683 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2684
2685 return 0;
2686}
2687
2688int dwc3_gadget_resume(struct dwc3 *dwc)
2689{
2690 struct dwc3_ep *dep;
2691 int ret;
2692
2693 /* Start with SuperSpeed Default */
2694 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2695
2696 dep = dwc->eps[0];
2697 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2698 if (ret)
2699 goto err0;
2700
2701 dep = dwc->eps[1];
2702 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2703 if (ret)
2704 goto err1;
2705
2706 /* begin to receive SETUP packets */
2707 dwc->ep0state = EP0_SETUP_PHASE;
2708 dwc3_ep0_out_start(dwc);
2709
2710 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2711
2712 return 0;
2713
2714err1:
2715 __dwc3_gadget_ep_disable(dwc->eps[0]);
2716
2717err0:
2718 return ret;
2719}