]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/usb/dwc3/gadget.c
usb: dwc3: gadget: add 'force' argument to stop_active_transfer
[mirror_ubuntu-bionic-kernel.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
5945f789
FB
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
72246da4 12 *
5945f789
FB
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
72246da4
FB
17 */
18
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/list.h>
28#include <linux/dma-mapping.h>
29
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32
33#include "core.h"
34#include "gadget.h"
35#include "io.h"
36
04a9bfcd
FB
37/**
38 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
39 * @dwc: pointer to our context structure
40 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
41 *
42 * Caller should take care of locking. This function will
43 * return 0 on success or -EINVAL if wrong Test Selector
44 * is passed
45 */
46int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
47{
48 u32 reg;
49
50 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
51 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
52
53 switch (mode) {
54 case TEST_J:
55 case TEST_K:
56 case TEST_SE0_NAK:
57 case TEST_PACKET:
58 case TEST_FORCE_EN:
59 reg |= mode << 1;
60 break;
61 default:
62 return -EINVAL;
63 }
64
65 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
66
67 return 0;
68}
69
911f1f88
PZ
70/**
71 * dwc3_gadget_get_link_state - Gets current state of USB Link
72 * @dwc: pointer to our context structure
73 *
74 * Caller should take care of locking. This function will
75 * return the link state on success (>= 0) or -ETIMEDOUT.
76 */
77int dwc3_gadget_get_link_state(struct dwc3 *dwc)
78{
79 u32 reg;
80
81 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
82
83 return DWC3_DSTS_USBLNKST(reg);
84}
85
8598bde7
FB
86/**
87 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
88 * @dwc: pointer to our context structure
89 * @state: the state to put link into
90 *
91 * Caller should take care of locking. This function will
aee63e3c 92 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
93 */
94int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
95{
aee63e3c 96 int retries = 10000;
8598bde7
FB
97 u32 reg;
98
802fde98
PZ
99 /*
100 * Wait until device controller is ready. Only applies to 1.94a and
101 * later RTL.
102 */
103 if (dwc->revision >= DWC3_REVISION_194A) {
104 while (--retries) {
105 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
106 if (reg & DWC3_DSTS_DCNRD)
107 udelay(5);
108 else
109 break;
110 }
111
112 if (retries <= 0)
113 return -ETIMEDOUT;
114 }
115
8598bde7
FB
116 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
117 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
118
119 /* set requested state */
120 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
121 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
122
802fde98
PZ
123 /*
124 * The following code is racy when called from dwc3_gadget_wakeup,
125 * and is not needed, at least on newer versions
126 */
127 if (dwc->revision >= DWC3_REVISION_194A)
128 return 0;
129
8598bde7 130 /* wait for a change in DSTS */
aed430e5 131 retries = 10000;
8598bde7
FB
132 while (--retries) {
133 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
134
8598bde7
FB
135 if (DWC3_DSTS_USBLNKST(reg) == state)
136 return 0;
137
aee63e3c 138 udelay(5);
8598bde7
FB
139 }
140
141 dev_vdbg(dwc->dev, "link state change request timed out\n");
142
143 return -ETIMEDOUT;
144}
145
457e84b6
FB
146/**
147 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
148 * @dwc: pointer to our context structure
149 *
150 * This function will a best effort FIFO allocation in order
151 * to improve FIFO usage and throughput, while still allowing
152 * us to enable as many endpoints as possible.
153 *
154 * Keep in mind that this operation will be highly dependent
155 * on the configured size for RAM1 - which contains TxFifo -,
156 * the amount of endpoints enabled on coreConsultant tool, and
157 * the width of the Master Bus.
158 *
159 * In the ideal world, we would always be able to satisfy the
160 * following equation:
161 *
162 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
163 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
164 *
165 * Unfortunately, due to many variables that's not always the case.
166 */
167int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
168{
169 int last_fifo_depth = 0;
170 int ram1_depth;
171 int fifo_size;
172 int mdwidth;
173 int num;
174
175 if (!dwc->needs_fifo_resize)
176 return 0;
177
178 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
179 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
180
181 /* MDWIDTH is represented in bits, we need it in bytes */
182 mdwidth >>= 3;
183
184 /*
185 * FIXME For now we will only allocate 1 wMaxPacketSize space
186 * for each enabled endpoint, later patches will come to
187 * improve this algorithm so that we better use the internal
188 * FIFO space
189 */
190 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
191 struct dwc3_ep *dep = dwc->eps[num];
192 int fifo_number = dep->number >> 1;
2e81c36a 193 int mult = 1;
457e84b6
FB
194 int tmp;
195
196 if (!(dep->number & 1))
197 continue;
198
199 if (!(dep->flags & DWC3_EP_ENABLED))
200 continue;
201
16e78db7
IS
202 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
203 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
2e81c36a
FB
204 mult = 3;
205
206 /*
207 * REVISIT: the following assumes we will always have enough
208 * space available on the FIFO RAM for all possible use cases.
209 * Make sure that's true somehow and change FIFO allocation
210 * accordingly.
211 *
212 * If we have Bulk or Isochronous endpoints, we want
213 * them to be able to be very, very fast. So we're giving
214 * those endpoints a fifo_size which is enough for 3 full
215 * packets
216 */
217 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
457e84b6
FB
218 tmp += mdwidth;
219
220 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
2e81c36a 221
457e84b6
FB
222 fifo_size |= (last_fifo_depth << 16);
223
224 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
225 dep->name, last_fifo_depth, fifo_size & 0xffff);
226
227 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
228 fifo_size);
229
230 last_fifo_depth += (fifo_size & 0xffff);
231 }
232
233 return 0;
234}
235
72246da4
FB
236void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
237 int status)
238{
239 struct dwc3 *dwc = dep->dwc;
e5ba5ec8 240 int i;
72246da4
FB
241
242 if (req->queued) {
e5ba5ec8
PA
243 i = 0;
244 do {
eeb720fb 245 dep->busy_slot++;
e5ba5ec8
PA
246 /*
247 * Skip LINK TRB. We can't use req->trb and check for
248 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
249 * just completed (not the LINK TRB).
250 */
251 if (((dep->busy_slot & DWC3_TRB_MASK) ==
252 DWC3_TRB_NUM- 1) &&
16e78db7 253 usb_endpoint_xfer_isoc(dep->endpoint.desc))
e5ba5ec8
PA
254 dep->busy_slot++;
255 } while(++i < req->request.num_mapped_sgs);
c9fda7d6 256 req->queued = false;
72246da4
FB
257 }
258 list_del(&req->list);
eeb720fb 259 req->trb = NULL;
72246da4
FB
260
261 if (req->request.status == -EINPROGRESS)
262 req->request.status = status;
263
0416e494
PA
264 if (dwc->ep0_bounced && dep->number == 0)
265 dwc->ep0_bounced = false;
266 else
267 usb_gadget_unmap_request(&dwc->gadget, &req->request,
268 req->direction);
72246da4
FB
269
270 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
271 req, dep->name, req->request.actual,
272 req->request.length, status);
273
274 spin_unlock(&dwc->lock);
0fc9a1be 275 req->request.complete(&dep->endpoint, &req->request);
72246da4
FB
276 spin_lock(&dwc->lock);
277}
278
279static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
280{
281 switch (cmd) {
282 case DWC3_DEPCMD_DEPSTARTCFG:
283 return "Start New Configuration";
284 case DWC3_DEPCMD_ENDTRANSFER:
285 return "End Transfer";
286 case DWC3_DEPCMD_UPDATETRANSFER:
287 return "Update Transfer";
288 case DWC3_DEPCMD_STARTTRANSFER:
289 return "Start Transfer";
290 case DWC3_DEPCMD_CLEARSTALL:
291 return "Clear Stall";
292 case DWC3_DEPCMD_SETSTALL:
293 return "Set Stall";
802fde98
PZ
294 case DWC3_DEPCMD_GETEPSTATE:
295 return "Get Endpoint State";
72246da4
FB
296 case DWC3_DEPCMD_SETTRANSFRESOURCE:
297 return "Set Endpoint Transfer Resource";
298 case DWC3_DEPCMD_SETEPCONFIG:
299 return "Set Endpoint Configuration";
300 default:
301 return "UNKNOWN command";
302 }
303}
304
b09bb642
FB
305int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
306{
307 u32 timeout = 500;
308 u32 reg;
309
310 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
311 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
312
313 do {
314 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
315 if (!(reg & DWC3_DGCMD_CMDACT)) {
316 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
317 DWC3_DGCMD_STATUS(reg));
318 return 0;
319 }
320
321 /*
322 * We can't sleep here, because it's also called from
323 * interrupt context.
324 */
325 timeout--;
326 if (!timeout)
327 return -ETIMEDOUT;
328 udelay(1);
329 } while (1);
330}
331
72246da4
FB
332int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
333 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
334{
335 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 336 u32 timeout = 500;
72246da4
FB
337 u32 reg;
338
339 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
340 dep->name,
dc1c70a7
FB
341 dwc3_gadget_ep_cmd_string(cmd), params->param0,
342 params->param1, params->param2);
72246da4 343
dc1c70a7
FB
344 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
345 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
346 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
347
348 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
349 do {
350 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
351 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
352 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
353 DWC3_DEPCMD_STATUS(reg));
72246da4
FB
354 return 0;
355 }
356
357 /*
72246da4
FB
358 * We can't sleep here, because it is also called from
359 * interrupt context.
360 */
361 timeout--;
362 if (!timeout)
363 return -ETIMEDOUT;
364
61d58242 365 udelay(1);
72246da4
FB
366 } while (1);
367}
368
369static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 370 struct dwc3_trb *trb)
72246da4 371{
c439ef87 372 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
373
374 return dep->trb_pool_dma + offset;
375}
376
377static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
378{
379 struct dwc3 *dwc = dep->dwc;
380
381 if (dep->trb_pool)
382 return 0;
383
384 if (dep->number == 0 || dep->number == 1)
385 return 0;
386
387 dep->trb_pool = dma_alloc_coherent(dwc->dev,
388 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
389 &dep->trb_pool_dma, GFP_KERNEL);
390 if (!dep->trb_pool) {
391 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
392 dep->name);
393 return -ENOMEM;
394 }
395
396 return 0;
397}
398
399static void dwc3_free_trb_pool(struct dwc3_ep *dep)
400{
401 struct dwc3 *dwc = dep->dwc;
402
403 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
404 dep->trb_pool, dep->trb_pool_dma);
405
406 dep->trb_pool = NULL;
407 dep->trb_pool_dma = 0;
408}
409
410static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
411{
412 struct dwc3_gadget_ep_cmd_params params;
413 u32 cmd;
414
415 memset(&params, 0x00, sizeof(params));
416
417 if (dep->number != 1) {
418 cmd = DWC3_DEPCMD_DEPSTARTCFG;
419 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
420 if (dep->number > 1) {
421 if (dwc->start_config_issued)
422 return 0;
423 dwc->start_config_issued = true;
72246da4 424 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 425 }
72246da4
FB
426
427 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
428 }
429
430 return 0;
431}
432
433static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 434 const struct usb_endpoint_descriptor *desc,
4b345c9a 435 const struct usb_ss_ep_comp_descriptor *comp_desc,
265b70a7 436 bool ignore, bool restore)
72246da4
FB
437{
438 struct dwc3_gadget_ep_cmd_params params;
439
440 memset(&params, 0x00, sizeof(params));
441
dc1c70a7 442 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
443 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
444
445 /* Burst size is only needed in SuperSpeed mode */
446 if (dwc->gadget.speed == USB_SPEED_SUPER) {
447 u32 burst = dep->endpoint.maxburst - 1;
448
449 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
450 }
72246da4 451
4b345c9a
FB
452 if (ignore)
453 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
454
265b70a7
PZ
455 if (restore) {
456 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
457 params.param2 |= dep->saved_state;
458 }
459
dc1c70a7
FB
460 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
461 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 462
18b7ede5 463 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
464 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
465 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
466 dep->stream_capable = true;
467 }
468
72246da4 469 if (usb_endpoint_xfer_isoc(desc))
dc1c70a7 470 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
471
472 /*
473 * We are doing 1:1 mapping for endpoints, meaning
474 * Physical Endpoints 2 maps to Logical Endpoint 2 and
475 * so on. We consider the direction bit as part of the physical
476 * endpoint number. So USB endpoint 0x81 is 0x03.
477 */
dc1c70a7 478 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
479
480 /*
481 * We must use the lower 16 TX FIFOs even though
482 * HW might have more
483 */
484 if (dep->direction)
dc1c70a7 485 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
486
487 if (desc->bInterval) {
dc1c70a7 488 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
489 dep->interval = 1 << (desc->bInterval - 1);
490 }
491
492 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
493 DWC3_DEPCMD_SETEPCONFIG, &params);
494}
495
496static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
497{
498 struct dwc3_gadget_ep_cmd_params params;
499
500 memset(&params, 0x00, sizeof(params));
501
dc1c70a7 502 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
503
504 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
505 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
506}
507
508/**
509 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
510 * @dep: endpoint to be initialized
511 * @desc: USB Endpoint Descriptor
512 *
513 * Caller should take care of locking
514 */
515static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 516 const struct usb_endpoint_descriptor *desc,
4b345c9a 517 const struct usb_ss_ep_comp_descriptor *comp_desc,
265b70a7 518 bool ignore, bool restore)
72246da4
FB
519{
520 struct dwc3 *dwc = dep->dwc;
521 u32 reg;
522 int ret = -ENOMEM;
523
ff62d6b6
FB
524 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
525
72246da4
FB
526 if (!(dep->flags & DWC3_EP_ENABLED)) {
527 ret = dwc3_gadget_start_config(dwc, dep);
528 if (ret)
529 return ret;
530 }
531
265b70a7
PZ
532 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
533 restore);
72246da4
FB
534 if (ret)
535 return ret;
536
537 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
538 struct dwc3_trb *trb_st_hw;
539 struct dwc3_trb *trb_link;
72246da4
FB
540
541 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
542 if (ret)
543 return ret;
544
16e78db7 545 dep->endpoint.desc = desc;
c90bfaec 546 dep->comp_desc = comp_desc;
72246da4
FB
547 dep->type = usb_endpoint_type(desc);
548 dep->flags |= DWC3_EP_ENABLED;
549
550 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
551 reg |= DWC3_DALEPENA_EP(dep->number);
552 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
553
554 if (!usb_endpoint_xfer_isoc(desc))
555 return 0;
556
557 memset(&trb_link, 0, sizeof(trb_link));
558
1d046793 559 /* Link TRB for ISOC. The HWO bit is never reset */
72246da4
FB
560 trb_st_hw = &dep->trb_pool[0];
561
f6bafc6a 562 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
72246da4 563
f6bafc6a
FB
564 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
565 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
566 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
567 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
568 }
569
570 return 0;
571}
572
b992e681 573static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
624407f9 574static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
575{
576 struct dwc3_request *req;
577
ea53b882 578 if (!list_empty(&dep->req_queued)) {
b992e681 579 dwc3_stop_active_transfer(dwc, dep->number, true);
624407f9 580
57911504 581 /* - giveback all requests to gadget driver */
1591633e
PA
582 while (!list_empty(&dep->req_queued)) {
583 req = next_request(&dep->req_queued);
584
585 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
586 }
ea53b882
FB
587 }
588
72246da4
FB
589 while (!list_empty(&dep->request_list)) {
590 req = next_request(&dep->request_list);
591
624407f9 592 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 593 }
72246da4
FB
594}
595
596/**
597 * __dwc3_gadget_ep_disable - Disables a HW endpoint
598 * @dep: the endpoint to disable
599 *
624407f9
SAS
600 * This function also removes requests which are currently processed ny the
601 * hardware and those which are not yet scheduled.
602 * Caller should take care of locking.
72246da4 603 */
72246da4
FB
604static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
605{
606 struct dwc3 *dwc = dep->dwc;
607 u32 reg;
608
624407f9 609 dwc3_remove_requests(dwc, dep);
72246da4
FB
610
611 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
612 reg &= ~DWC3_DALEPENA_EP(dep->number);
613 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
614
879631aa 615 dep->stream_capable = false;
f9c56cdd 616 dep->endpoint.desc = NULL;
c90bfaec 617 dep->comp_desc = NULL;
72246da4 618 dep->type = 0;
879631aa 619 dep->flags = 0;
72246da4
FB
620
621 return 0;
622}
623
624/* -------------------------------------------------------------------------- */
625
626static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
627 const struct usb_endpoint_descriptor *desc)
628{
629 return -EINVAL;
630}
631
632static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
633{
634 return -EINVAL;
635}
636
637/* -------------------------------------------------------------------------- */
638
639static int dwc3_gadget_ep_enable(struct usb_ep *ep,
640 const struct usb_endpoint_descriptor *desc)
641{
642 struct dwc3_ep *dep;
643 struct dwc3 *dwc;
644 unsigned long flags;
645 int ret;
646
647 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
648 pr_debug("dwc3: invalid parameters\n");
649 return -EINVAL;
650 }
651
652 if (!desc->wMaxPacketSize) {
653 pr_debug("dwc3: missing wMaxPacketSize\n");
654 return -EINVAL;
655 }
656
657 dep = to_dwc3_ep(ep);
658 dwc = dep->dwc;
659
c6f83f38
FB
660 if (dep->flags & DWC3_EP_ENABLED) {
661 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
662 dep->name);
663 return 0;
664 }
665
72246da4
FB
666 switch (usb_endpoint_type(desc)) {
667 case USB_ENDPOINT_XFER_CONTROL:
27a78d6a 668 strlcat(dep->name, "-control", sizeof(dep->name));
72246da4
FB
669 break;
670 case USB_ENDPOINT_XFER_ISOC:
27a78d6a 671 strlcat(dep->name, "-isoc", sizeof(dep->name));
72246da4
FB
672 break;
673 case USB_ENDPOINT_XFER_BULK:
27a78d6a 674 strlcat(dep->name, "-bulk", sizeof(dep->name));
72246da4
FB
675 break;
676 case USB_ENDPOINT_XFER_INT:
27a78d6a 677 strlcat(dep->name, "-int", sizeof(dep->name));
72246da4
FB
678 break;
679 default:
680 dev_err(dwc->dev, "invalid endpoint transfer type\n");
681 }
682
72246da4 683 spin_lock_irqsave(&dwc->lock, flags);
265b70a7 684 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
72246da4
FB
685 spin_unlock_irqrestore(&dwc->lock, flags);
686
687 return ret;
688}
689
690static int dwc3_gadget_ep_disable(struct usb_ep *ep)
691{
692 struct dwc3_ep *dep;
693 struct dwc3 *dwc;
694 unsigned long flags;
695 int ret;
696
697 if (!ep) {
698 pr_debug("dwc3: invalid parameters\n");
699 return -EINVAL;
700 }
701
702 dep = to_dwc3_ep(ep);
703 dwc = dep->dwc;
704
705 if (!(dep->flags & DWC3_EP_ENABLED)) {
706 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
707 dep->name);
708 return 0;
709 }
710
711 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
712 dep->number >> 1,
713 (dep->number & 1) ? "in" : "out");
714
715 spin_lock_irqsave(&dwc->lock, flags);
716 ret = __dwc3_gadget_ep_disable(dep);
717 spin_unlock_irqrestore(&dwc->lock, flags);
718
719 return ret;
720}
721
722static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
723 gfp_t gfp_flags)
724{
725 struct dwc3_request *req;
726 struct dwc3_ep *dep = to_dwc3_ep(ep);
727 struct dwc3 *dwc = dep->dwc;
728
729 req = kzalloc(sizeof(*req), gfp_flags);
730 if (!req) {
731 dev_err(dwc->dev, "not enough memory\n");
732 return NULL;
733 }
734
735 req->epnum = dep->number;
736 req->dep = dep;
72246da4
FB
737
738 return &req->request;
739}
740
741static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
742 struct usb_request *request)
743{
744 struct dwc3_request *req = to_dwc3_request(request);
745
746 kfree(req);
747}
748
c71fc37c
FB
749/**
750 * dwc3_prepare_one_trb - setup one TRB from one request
751 * @dep: endpoint for which this request is prepared
752 * @req: dwc3_request pointer
753 */
68e823e2 754static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb 755 struct dwc3_request *req, dma_addr_t dma,
e5ba5ec8 756 unsigned length, unsigned last, unsigned chain, unsigned node)
c71fc37c 757{
eeb720fb 758 struct dwc3 *dwc = dep->dwc;
f6bafc6a 759 struct dwc3_trb *trb;
c71fc37c 760
eeb720fb
FB
761 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
762 dep->name, req, (unsigned long long) dma,
763 length, last ? " last" : "",
764 chain ? " chain" : "");
765
c71fc37c 766 /* Skip the LINK-TRB on ISOC */
915e202a 767 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
16e78db7 768 usb_endpoint_xfer_isoc(dep->endpoint.desc))
915e202a
PA
769 dep->free_slot++;
770
771 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
c71fc37c 772
eeb720fb
FB
773 if (!req->trb) {
774 dwc3_gadget_move_request_queued(req);
f6bafc6a
FB
775 req->trb = trb;
776 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
e5ba5ec8 777 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
eeb720fb 778 }
c71fc37c 779
e5ba5ec8
PA
780 dep->free_slot++;
781
f6bafc6a
FB
782 trb->size = DWC3_TRB_SIZE_LENGTH(length);
783 trb->bpl = lower_32_bits(dma);
784 trb->bph = upper_32_bits(dma);
c71fc37c 785
16e78db7 786 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 787 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 788 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
789 break;
790
791 case USB_ENDPOINT_XFER_ISOC:
e5ba5ec8
PA
792 if (!node)
793 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
794 else
795 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
c71fc37c
FB
796 break;
797
798 case USB_ENDPOINT_XFER_BULK:
799 case USB_ENDPOINT_XFER_INT:
f6bafc6a 800 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
801 break;
802 default:
803 /*
804 * This is only possible with faulty memory because we
805 * checked it already :)
806 */
807 BUG();
808 }
809
f3af3651
FB
810 if (!req->request.no_interrupt && !chain)
811 trb->ctrl |= DWC3_TRB_CTRL_IOC;
812
16e78db7 813 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
f6bafc6a
FB
814 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
815 trb->ctrl |= DWC3_TRB_CTRL_CSP;
e5ba5ec8
PA
816 } else if (last) {
817 trb->ctrl |= DWC3_TRB_CTRL_LST;
f6bafc6a 818 }
c71fc37c 819
e5ba5ec8
PA
820 if (chain)
821 trb->ctrl |= DWC3_TRB_CTRL_CHN;
822
16e78db7 823 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 824 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 825
f6bafc6a 826 trb->ctrl |= DWC3_TRB_CTRL_HWO;
c71fc37c
FB
827}
828
72246da4
FB
829/*
830 * dwc3_prepare_trbs - setup TRBs from requests
831 * @dep: endpoint for which requests are being prepared
832 * @starting: true if the endpoint is idle and no requests are queued.
833 *
1d046793
PZ
834 * The function goes through the requests list and sets up TRBs for the
835 * transfers. The function returns once there are no more TRBs available or
836 * it runs out of requests.
72246da4 837 */
68e823e2 838static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 839{
68e823e2 840 struct dwc3_request *req, *n;
72246da4 841 u32 trbs_left;
8d62cd65 842 u32 max;
c71fc37c 843 unsigned int last_one = 0;
72246da4
FB
844
845 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
846
847 /* the first request must not be queued */
848 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 849
8d62cd65 850 /* Can't wrap around on a non-isoc EP since there's no link TRB */
16e78db7 851 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8d62cd65
PZ
852 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
853 if (trbs_left > max)
854 trbs_left = max;
855 }
856
72246da4 857 /*
1d046793
PZ
858 * If busy & slot are equal than it is either full or empty. If we are
859 * starting to process requests then we are empty. Otherwise we are
72246da4
FB
860 * full and don't do anything
861 */
862 if (!trbs_left) {
863 if (!starting)
68e823e2 864 return;
72246da4
FB
865 trbs_left = DWC3_TRB_NUM;
866 /*
867 * In case we start from scratch, we queue the ISOC requests
868 * starting from slot 1. This is done because we use ring
869 * buffer and have no LST bit to stop us. Instead, we place
1d046793 870 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
72246da4
FB
871 * after the first request so we start at slot 1 and have
872 * 7 requests proceed before we hit the first IOC.
873 * Other transfer types don't use the ring buffer and are
874 * processed from the first TRB until the last one. Since we
875 * don't wrap around we have to start at the beginning.
876 */
16e78db7 877 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
878 dep->busy_slot = 1;
879 dep->free_slot = 1;
880 } else {
881 dep->busy_slot = 0;
882 dep->free_slot = 0;
883 }
884 }
885
886 /* The last TRB is a link TRB, not used for xfer */
16e78db7 887 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
68e823e2 888 return;
72246da4
FB
889
890 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
891 unsigned length;
892 dma_addr_t dma;
e5ba5ec8 893 last_one = false;
72246da4 894
eeb720fb
FB
895 if (req->request.num_mapped_sgs > 0) {
896 struct usb_request *request = &req->request;
897 struct scatterlist *sg = request->sg;
898 struct scatterlist *s;
899 int i;
72246da4 900
eeb720fb
FB
901 for_each_sg(sg, s, request->num_mapped_sgs, i) {
902 unsigned chain = true;
72246da4 903
eeb720fb
FB
904 length = sg_dma_len(s);
905 dma = sg_dma_address(s);
72246da4 906
1d046793
PZ
907 if (i == (request->num_mapped_sgs - 1) ||
908 sg_is_last(s)) {
e5ba5ec8
PA
909 if (list_is_last(&req->list,
910 &dep->request_list))
911 last_one = true;
eeb720fb
FB
912 chain = false;
913 }
72246da4 914
eeb720fb
FB
915 trbs_left--;
916 if (!trbs_left)
917 last_one = true;
72246da4 918
eeb720fb
FB
919 if (last_one)
920 chain = false;
72246da4 921
eeb720fb 922 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 923 last_one, chain, i);
72246da4 924
eeb720fb
FB
925 if (last_one)
926 break;
927 }
72246da4 928 } else {
eeb720fb
FB
929 dma = req->request.dma;
930 length = req->request.length;
931 trbs_left--;
72246da4 932
eeb720fb
FB
933 if (!trbs_left)
934 last_one = 1;
879631aa 935
eeb720fb
FB
936 /* Is this the last request? */
937 if (list_is_last(&req->list, &dep->request_list))
938 last_one = 1;
72246da4 939
eeb720fb 940 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 941 last_one, false, 0);
72246da4 942
eeb720fb
FB
943 if (last_one)
944 break;
72246da4 945 }
72246da4 946 }
72246da4
FB
947}
948
949static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
950 int start_new)
951{
952 struct dwc3_gadget_ep_cmd_params params;
953 struct dwc3_request *req;
954 struct dwc3 *dwc = dep->dwc;
955 int ret;
956 u32 cmd;
957
958 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
959 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
960 return -EBUSY;
961 }
962 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
963
964 /*
965 * If we are getting here after a short-out-packet we don't enqueue any
966 * new requests as we try to set the IOC bit only on the last request.
967 */
968 if (start_new) {
969 if (list_empty(&dep->req_queued))
970 dwc3_prepare_trbs(dep, start_new);
971
972 /* req points to the first request which will be sent */
973 req = next_request(&dep->req_queued);
974 } else {
68e823e2
FB
975 dwc3_prepare_trbs(dep, start_new);
976
72246da4 977 /*
1d046793 978 * req points to the first request where HWO changed from 0 to 1
72246da4 979 */
68e823e2 980 req = next_request(&dep->req_queued);
72246da4
FB
981 }
982 if (!req) {
983 dep->flags |= DWC3_EP_PENDING_REQUEST;
984 return 0;
985 }
986
987 memset(&params, 0, sizeof(params));
72246da4 988
1877d6c9
PA
989 if (start_new) {
990 params.param0 = upper_32_bits(req->trb_dma);
991 params.param1 = lower_32_bits(req->trb_dma);
72246da4 992 cmd = DWC3_DEPCMD_STARTTRANSFER;
1877d6c9 993 } else {
72246da4 994 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1877d6c9 995 }
72246da4
FB
996
997 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
998 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
999 if (ret < 0) {
1000 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
1001
1002 /*
1003 * FIXME we need to iterate over the list of requests
1004 * here and stop, unmap, free and del each of the linked
1d046793 1005 * requests instead of what we do now.
72246da4 1006 */
0fc9a1be
FB
1007 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1008 req->direction);
72246da4
FB
1009 list_del(&req->list);
1010 return ret;
1011 }
1012
1013 dep->flags |= DWC3_EP_BUSY;
25b8ff68 1014
f898ae09 1015 if (start_new) {
b4996a86 1016 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
f898ae09 1017 dep->number);
b4996a86 1018 WARN_ON_ONCE(!dep->resource_index);
f898ae09 1019 }
25b8ff68 1020
72246da4
FB
1021 return 0;
1022}
1023
d6d6ec7b
PA
1024static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1025 struct dwc3_ep *dep, u32 cur_uf)
1026{
1027 u32 uf;
1028
1029 if (list_empty(&dep->request_list)) {
1030 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1031 dep->name);
f4a53c55 1032 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1033 return;
1034 }
1035
1036 /* 4 micro frames in the future */
1037 uf = cur_uf + dep->interval * 4;
1038
1039 __dwc3_gadget_kick_transfer(dep, uf, 1);
1040}
1041
1042static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1043 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1044{
1045 u32 cur_uf, mask;
1046
1047 mask = ~(dep->interval - 1);
1048 cur_uf = event->parameters & mask;
1049
1050 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1051}
1052
72246da4
FB
1053static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1054{
0fc9a1be
FB
1055 struct dwc3 *dwc = dep->dwc;
1056 int ret;
1057
72246da4
FB
1058 req->request.actual = 0;
1059 req->request.status = -EINPROGRESS;
1060 req->direction = dep->direction;
1061 req->epnum = dep->number;
1062
1063 /*
1064 * We only add to our list of requests now and
1065 * start consuming the list once we get XferNotReady
1066 * IRQ.
1067 *
1068 * That way, we avoid doing anything that we don't need
1069 * to do now and defer it until the point we receive a
1070 * particular token from the Host side.
1071 *
1072 * This will also avoid Host cancelling URBs due to too
1d046793 1073 * many NAKs.
72246da4 1074 */
0fc9a1be
FB
1075 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1076 dep->direction);
1077 if (ret)
1078 return ret;
1079
72246da4
FB
1080 list_add_tail(&req->list, &dep->request_list);
1081
1082 /*
b511e5e7 1083 * There are a few special cases:
72246da4 1084 *
f898ae09
PZ
1085 * 1. XferNotReady with empty list of requests. We need to kick the
1086 * transfer here in that situation, otherwise we will be NAKing
1087 * forever. If we get XferNotReady before gadget driver has a
1088 * chance to queue a request, we will ACK the IRQ but won't be
1089 * able to receive the data until the next request is queued.
1090 * The following code is handling exactly that.
72246da4 1091 *
72246da4
FB
1092 */
1093 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1094 /*
1095 * If xfernotready is already elapsed and it is a case
1096 * of isoc transfer, then issue END TRANSFER, so that
1097 * you can receive xfernotready again and can have
1098 * notion of current microframe.
1099 */
1100 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
cdc359dd 1101 if (list_empty(&dep->req_queued)) {
b992e681 1102 dwc3_stop_active_transfer(dwc, dep->number, true);
cdc359dd
PA
1103 dep->flags = DWC3_EP_ENABLED;
1104 }
f4a53c55
PA
1105 return 0;
1106 }
1107
b511e5e7 1108 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
348e026f 1109 if (ret && ret != -EBUSY)
b511e5e7
FB
1110 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1111 dep->name);
15f86bde 1112 return ret;
b511e5e7 1113 }
72246da4 1114
b511e5e7
FB
1115 /*
1116 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1117 * kick the transfer here after queuing a request, otherwise the
1118 * core may not see the modified TRB(s).
1119 */
1120 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1121 (dep->flags & DWC3_EP_BUSY) &&
1122 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86
FB
1123 WARN_ON_ONCE(!dep->resource_index);
1124 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
b511e5e7 1125 false);
348e026f 1126 if (ret && ret != -EBUSY)
72246da4
FB
1127 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1128 dep->name);
15f86bde 1129 return ret;
a0925324 1130 }
72246da4
FB
1131
1132 return 0;
1133}
1134
1135static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1136 gfp_t gfp_flags)
1137{
1138 struct dwc3_request *req = to_dwc3_request(request);
1139 struct dwc3_ep *dep = to_dwc3_ep(ep);
1140 struct dwc3 *dwc = dep->dwc;
1141
1142 unsigned long flags;
1143
1144 int ret;
1145
16e78db7 1146 if (!dep->endpoint.desc) {
72246da4
FB
1147 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1148 request, ep->name);
1149 return -ESHUTDOWN;
1150 }
1151
1152 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1153 request, ep->name, request->length);
1154
1155 spin_lock_irqsave(&dwc->lock, flags);
1156 ret = __dwc3_gadget_ep_queue(dep, req);
1157 spin_unlock_irqrestore(&dwc->lock, flags);
1158
1159 return ret;
1160}
1161
1162static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1163 struct usb_request *request)
1164{
1165 struct dwc3_request *req = to_dwc3_request(request);
1166 struct dwc3_request *r = NULL;
1167
1168 struct dwc3_ep *dep = to_dwc3_ep(ep);
1169 struct dwc3 *dwc = dep->dwc;
1170
1171 unsigned long flags;
1172 int ret = 0;
1173
1174 spin_lock_irqsave(&dwc->lock, flags);
1175
1176 list_for_each_entry(r, &dep->request_list, list) {
1177 if (r == req)
1178 break;
1179 }
1180
1181 if (r != req) {
1182 list_for_each_entry(r, &dep->req_queued, list) {
1183 if (r == req)
1184 break;
1185 }
1186 if (r == req) {
1187 /* wait until it is processed */
b992e681 1188 dwc3_stop_active_transfer(dwc, dep->number, true);
e8d4e8be 1189 goto out1;
72246da4
FB
1190 }
1191 dev_err(dwc->dev, "request %p was not queued to %s\n",
1192 request, ep->name);
1193 ret = -EINVAL;
1194 goto out0;
1195 }
1196
e8d4e8be 1197out1:
72246da4
FB
1198 /* giveback the request */
1199 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1200
1201out0:
1202 spin_unlock_irqrestore(&dwc->lock, flags);
1203
1204 return ret;
1205}
1206
1207int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1208{
1209 struct dwc3_gadget_ep_cmd_params params;
1210 struct dwc3 *dwc = dep->dwc;
1211 int ret;
1212
1213 memset(&params, 0x00, sizeof(params));
1214
1215 if (value) {
72246da4
FB
1216 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1217 DWC3_DEPCMD_SETSTALL, &params);
1218 if (ret)
1219 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1220 value ? "set" : "clear",
1221 dep->name);
1222 else
1223 dep->flags |= DWC3_EP_STALL;
1224 } else {
1225 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1226 DWC3_DEPCMD_CLEARSTALL, &params);
1227 if (ret)
1228 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1229 value ? "set" : "clear",
1230 dep->name);
1231 else
a535d81c 1232 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
72246da4 1233 }
5275455a 1234
72246da4
FB
1235 return ret;
1236}
1237
1238static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1239{
1240 struct dwc3_ep *dep = to_dwc3_ep(ep);
1241 struct dwc3 *dwc = dep->dwc;
1242
1243 unsigned long flags;
1244
1245 int ret;
1246
1247 spin_lock_irqsave(&dwc->lock, flags);
1248
16e78db7 1249 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1250 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1251 ret = -EINVAL;
1252 goto out;
1253 }
1254
1255 ret = __dwc3_gadget_ep_set_halt(dep, value);
1256out:
1257 spin_unlock_irqrestore(&dwc->lock, flags);
1258
1259 return ret;
1260}
1261
1262static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1263{
1264 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1265 struct dwc3 *dwc = dep->dwc;
1266 unsigned long flags;
72246da4 1267
249a4569 1268 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1269 dep->flags |= DWC3_EP_WEDGE;
249a4569 1270 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4 1271
08f0d966
PA
1272 if (dep->number == 0 || dep->number == 1)
1273 return dwc3_gadget_ep0_set_halt(ep, 1);
1274 else
1275 return dwc3_gadget_ep_set_halt(ep, 1);
72246da4
FB
1276}
1277
1278/* -------------------------------------------------------------------------- */
1279
1280static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1281 .bLength = USB_DT_ENDPOINT_SIZE,
1282 .bDescriptorType = USB_DT_ENDPOINT,
1283 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1284};
1285
1286static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1287 .enable = dwc3_gadget_ep0_enable,
1288 .disable = dwc3_gadget_ep0_disable,
1289 .alloc_request = dwc3_gadget_ep_alloc_request,
1290 .free_request = dwc3_gadget_ep_free_request,
1291 .queue = dwc3_gadget_ep0_queue,
1292 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1293 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1294 .set_wedge = dwc3_gadget_ep_set_wedge,
1295};
1296
1297static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1298 .enable = dwc3_gadget_ep_enable,
1299 .disable = dwc3_gadget_ep_disable,
1300 .alloc_request = dwc3_gadget_ep_alloc_request,
1301 .free_request = dwc3_gadget_ep_free_request,
1302 .queue = dwc3_gadget_ep_queue,
1303 .dequeue = dwc3_gadget_ep_dequeue,
1304 .set_halt = dwc3_gadget_ep_set_halt,
1305 .set_wedge = dwc3_gadget_ep_set_wedge,
1306};
1307
1308/* -------------------------------------------------------------------------- */
1309
1310static int dwc3_gadget_get_frame(struct usb_gadget *g)
1311{
1312 struct dwc3 *dwc = gadget_to_dwc(g);
1313 u32 reg;
1314
1315 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1316 return DWC3_DSTS_SOFFN(reg);
1317}
1318
1319static int dwc3_gadget_wakeup(struct usb_gadget *g)
1320{
1321 struct dwc3 *dwc = gadget_to_dwc(g);
1322
1323 unsigned long timeout;
1324 unsigned long flags;
1325
1326 u32 reg;
1327
1328 int ret = 0;
1329
1330 u8 link_state;
1331 u8 speed;
1332
1333 spin_lock_irqsave(&dwc->lock, flags);
1334
1335 /*
1336 * According to the Databook Remote wakeup request should
1337 * be issued only when the device is in early suspend state.
1338 *
1339 * We can check that via USB Link State bits in DSTS register.
1340 */
1341 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1342
1343 speed = reg & DWC3_DSTS_CONNECTSPD;
1344 if (speed == DWC3_DSTS_SUPERSPEED) {
1345 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1346 ret = -EINVAL;
1347 goto out;
1348 }
1349
1350 link_state = DWC3_DSTS_USBLNKST(reg);
1351
1352 switch (link_state) {
1353 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1354 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1355 break;
1356 default:
1357 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1358 link_state);
1359 ret = -EINVAL;
1360 goto out;
1361 }
1362
8598bde7
FB
1363 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1364 if (ret < 0) {
1365 dev_err(dwc->dev, "failed to put link in Recovery\n");
1366 goto out;
1367 }
72246da4 1368
802fde98
PZ
1369 /* Recent versions do this automatically */
1370 if (dwc->revision < DWC3_REVISION_194A) {
1371 /* write zeroes to Link Change Request */
fcc023c7 1372 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1373 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1374 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1375 }
72246da4 1376
1d046793 1377 /* poll until Link State changes to ON */
72246da4
FB
1378 timeout = jiffies + msecs_to_jiffies(100);
1379
1d046793 1380 while (!time_after(jiffies, timeout)) {
72246da4
FB
1381 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1382
1383 /* in HS, means ON */
1384 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1385 break;
1386 }
1387
1388 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1389 dev_err(dwc->dev, "failed to send remote wakeup\n");
1390 ret = -EINVAL;
1391 }
1392
1393out:
1394 spin_unlock_irqrestore(&dwc->lock, flags);
1395
1396 return ret;
1397}
1398
1399static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1400 int is_selfpowered)
1401{
1402 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1403 unsigned long flags;
72246da4 1404
249a4569 1405 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1406 dwc->is_selfpowered = !!is_selfpowered;
249a4569 1407 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1408
1409 return 0;
1410}
1411
7b2a0368 1412static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
72246da4
FB
1413{
1414 u32 reg;
61d58242 1415 u32 timeout = 500;
72246da4
FB
1416
1417 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1418 if (is_on) {
802fde98
PZ
1419 if (dwc->revision <= DWC3_REVISION_187A) {
1420 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1421 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1422 }
1423
1424 if (dwc->revision >= DWC3_REVISION_194A)
1425 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1426 reg |= DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1427
1428 if (dwc->has_hibernation)
1429 reg |= DWC3_DCTL_KEEP_CONNECT;
1430
9fcb3bd8 1431 dwc->pullups_connected = true;
8db7ed15 1432 } else {
72246da4 1433 reg &= ~DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1434
1435 if (dwc->has_hibernation && !suspend)
1436 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1437
9fcb3bd8 1438 dwc->pullups_connected = false;
8db7ed15 1439 }
72246da4
FB
1440
1441 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1442
1443 do {
1444 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1445 if (is_on) {
1446 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1447 break;
1448 } else {
1449 if (reg & DWC3_DSTS_DEVCTRLHLT)
1450 break;
1451 }
72246da4
FB
1452 timeout--;
1453 if (!timeout)
6f17f74b 1454 return -ETIMEDOUT;
61d58242 1455 udelay(1);
72246da4
FB
1456 } while (1);
1457
1458 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1459 dwc->gadget_driver
1460 ? dwc->gadget_driver->function : "no-function",
1461 is_on ? "connect" : "disconnect");
6f17f74b
PA
1462
1463 return 0;
72246da4
FB
1464}
1465
1466static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1467{
1468 struct dwc3 *dwc = gadget_to_dwc(g);
1469 unsigned long flags;
6f17f74b 1470 int ret;
72246da4
FB
1471
1472 is_on = !!is_on;
1473
1474 spin_lock_irqsave(&dwc->lock, flags);
7b2a0368 1475 ret = dwc3_gadget_run_stop(dwc, is_on, false);
72246da4
FB
1476 spin_unlock_irqrestore(&dwc->lock, flags);
1477
6f17f74b 1478 return ret;
72246da4
FB
1479}
1480
8698e2ac
FB
1481static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1482{
1483 u32 reg;
1484
1485 /* Enable all but Start and End of Frame IRQs */
1486 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1487 DWC3_DEVTEN_EVNTOVERFLOWEN |
1488 DWC3_DEVTEN_CMDCMPLTEN |
1489 DWC3_DEVTEN_ERRTICERREN |
1490 DWC3_DEVTEN_WKUPEVTEN |
1491 DWC3_DEVTEN_ULSTCNGEN |
1492 DWC3_DEVTEN_CONNECTDONEEN |
1493 DWC3_DEVTEN_USBRSTEN |
1494 DWC3_DEVTEN_DISCONNEVTEN);
1495
1496 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1497}
1498
1499static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1500{
1501 /* mask all interrupts */
1502 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1503}
1504
1505static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
b15a762f 1506static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
8698e2ac 1507
72246da4
FB
1508static int dwc3_gadget_start(struct usb_gadget *g,
1509 struct usb_gadget_driver *driver)
1510{
1511 struct dwc3 *dwc = gadget_to_dwc(g);
1512 struct dwc3_ep *dep;
1513 unsigned long flags;
1514 int ret = 0;
8698e2ac 1515 int irq;
72246da4
FB
1516 u32 reg;
1517
b0d7ffd4
FB
1518 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1519 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
e8adfc30 1520 IRQF_SHARED, "dwc3", dwc);
b0d7ffd4
FB
1521 if (ret) {
1522 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1523 irq, ret);
1524 goto err0;
1525 }
1526
72246da4
FB
1527 spin_lock_irqsave(&dwc->lock, flags);
1528
1529 if (dwc->gadget_driver) {
1530 dev_err(dwc->dev, "%s is already bound to %s\n",
1531 dwc->gadget.name,
1532 dwc->gadget_driver->driver.name);
1533 ret = -EBUSY;
b0d7ffd4 1534 goto err1;
72246da4
FB
1535 }
1536
1537 dwc->gadget_driver = driver;
72246da4 1538
72246da4
FB
1539 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1540 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1541
1542 /**
1543 * WORKAROUND: DWC3 revision < 2.20a have an issue
1544 * which would cause metastability state on Run/Stop
1545 * bit if we try to force the IP to USB2-only mode.
1546 *
1547 * Because of that, we cannot configure the IP to any
1548 * speed other than the SuperSpeed
1549 *
1550 * Refers to:
1551 *
1552 * STAR#9000525659: Clock Domain Crossing on DCTL in
1553 * USB 2.0 Mode
1554 */
f7e846f0 1555 if (dwc->revision < DWC3_REVISION_220A) {
07e7f47b 1556 reg |= DWC3_DCFG_SUPERSPEED;
f7e846f0
FB
1557 } else {
1558 switch (dwc->maximum_speed) {
1559 case USB_SPEED_LOW:
1560 reg |= DWC3_DSTS_LOWSPEED;
1561 break;
1562 case USB_SPEED_FULL:
1563 reg |= DWC3_DSTS_FULLSPEED1;
1564 break;
1565 case USB_SPEED_HIGH:
1566 reg |= DWC3_DSTS_HIGHSPEED;
1567 break;
1568 case USB_SPEED_SUPER: /* FALLTHROUGH */
1569 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1570 default:
1571 reg |= DWC3_DSTS_SUPERSPEED;
1572 }
1573 }
72246da4
FB
1574 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1575
b23c8439
PZ
1576 dwc->start_config_issued = false;
1577
72246da4
FB
1578 /* Start with SuperSpeed Default */
1579 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1580
1581 dep = dwc->eps[0];
265b70a7
PZ
1582 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1583 false);
72246da4
FB
1584 if (ret) {
1585 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1586 goto err2;
72246da4
FB
1587 }
1588
1589 dep = dwc->eps[1];
265b70a7
PZ
1590 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1591 false);
72246da4
FB
1592 if (ret) {
1593 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1594 goto err3;
72246da4
FB
1595 }
1596
1597 /* begin to receive SETUP packets */
c7fcdeb2 1598 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1599 dwc3_ep0_out_start(dwc);
1600
8698e2ac
FB
1601 dwc3_gadget_enable_irq(dwc);
1602
72246da4
FB
1603 spin_unlock_irqrestore(&dwc->lock, flags);
1604
1605 return 0;
1606
b0d7ffd4 1607err3:
72246da4
FB
1608 __dwc3_gadget_ep_disable(dwc->eps[0]);
1609
b0d7ffd4 1610err2:
cdcedd69 1611 dwc->gadget_driver = NULL;
b0d7ffd4
FB
1612
1613err1:
72246da4
FB
1614 spin_unlock_irqrestore(&dwc->lock, flags);
1615
b0d7ffd4
FB
1616 free_irq(irq, dwc);
1617
1618err0:
72246da4
FB
1619 return ret;
1620}
1621
1622static int dwc3_gadget_stop(struct usb_gadget *g,
1623 struct usb_gadget_driver *driver)
1624{
1625 struct dwc3 *dwc = gadget_to_dwc(g);
1626 unsigned long flags;
8698e2ac 1627 int irq;
72246da4
FB
1628
1629 spin_lock_irqsave(&dwc->lock, flags);
1630
8698e2ac 1631 dwc3_gadget_disable_irq(dwc);
72246da4
FB
1632 __dwc3_gadget_ep_disable(dwc->eps[0]);
1633 __dwc3_gadget_ep_disable(dwc->eps[1]);
1634
1635 dwc->gadget_driver = NULL;
72246da4
FB
1636
1637 spin_unlock_irqrestore(&dwc->lock, flags);
1638
b0d7ffd4
FB
1639 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1640 free_irq(irq, dwc);
1641
72246da4
FB
1642 return 0;
1643}
802fde98 1644
72246da4
FB
1645static const struct usb_gadget_ops dwc3_gadget_ops = {
1646 .get_frame = dwc3_gadget_get_frame,
1647 .wakeup = dwc3_gadget_wakeup,
1648 .set_selfpowered = dwc3_gadget_set_selfpowered,
1649 .pullup = dwc3_gadget_pullup,
1650 .udc_start = dwc3_gadget_start,
1651 .udc_stop = dwc3_gadget_stop,
1652};
1653
1654/* -------------------------------------------------------------------------- */
1655
6a1e3ef4
FB
1656static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1657 u8 num, u32 direction)
72246da4
FB
1658{
1659 struct dwc3_ep *dep;
6a1e3ef4 1660 u8 i;
72246da4 1661
6a1e3ef4
FB
1662 for (i = 0; i < num; i++) {
1663 u8 epnum = (i << 1) | (!!direction);
72246da4 1664
72246da4
FB
1665 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1666 if (!dep) {
1667 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1668 epnum);
1669 return -ENOMEM;
1670 }
1671
1672 dep->dwc = dwc;
1673 dep->number = epnum;
9aa62ae4 1674 dep->direction = !!direction;
72246da4
FB
1675 dwc->eps[epnum] = dep;
1676
1677 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1678 (epnum & 1) ? "in" : "out");
6a1e3ef4 1679
72246da4 1680 dep->endpoint.name = dep->name;
72246da4 1681
653df35e
FB
1682 dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1683
72246da4 1684 if (epnum == 0 || epnum == 1) {
e117e742 1685 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
6048e4c6 1686 dep->endpoint.maxburst = 1;
72246da4
FB
1687 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1688 if (!epnum)
1689 dwc->gadget.ep0 = &dep->endpoint;
1690 } else {
1691 int ret;
1692
e117e742 1693 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
12d36c16 1694 dep->endpoint.max_streams = 15;
72246da4
FB
1695 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1696 list_add_tail(&dep->endpoint.ep_list,
1697 &dwc->gadget.ep_list);
1698
1699 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1700 if (ret)
72246da4 1701 return ret;
72246da4 1702 }
25b8ff68 1703
72246da4
FB
1704 INIT_LIST_HEAD(&dep->request_list);
1705 INIT_LIST_HEAD(&dep->req_queued);
1706 }
1707
1708 return 0;
1709}
1710
6a1e3ef4
FB
1711static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1712{
1713 int ret;
1714
1715 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1716
1717 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1718 if (ret < 0) {
1719 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1720 return ret;
1721 }
1722
1723 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1724 if (ret < 0) {
1725 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1726 return ret;
1727 }
1728
1729 return 0;
1730}
1731
72246da4
FB
1732static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1733{
1734 struct dwc3_ep *dep;
1735 u8 epnum;
1736
1737 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1738 dep = dwc->eps[epnum];
6a1e3ef4
FB
1739 if (!dep)
1740 continue;
5bf8fae3
GC
1741 /*
1742 * Physical endpoints 0 and 1 are special; they form the
1743 * bi-directional USB endpoint 0.
1744 *
1745 * For those two physical endpoints, we don't allocate a TRB
1746 * pool nor do we add them the endpoints list. Due to that, we
1747 * shouldn't do these two operations otherwise we would end up
1748 * with all sorts of bugs when removing dwc3.ko.
1749 */
1750 if (epnum != 0 && epnum != 1) {
1751 dwc3_free_trb_pool(dep);
72246da4 1752 list_del(&dep->endpoint.ep_list);
5bf8fae3 1753 }
72246da4
FB
1754
1755 kfree(dep);
1756 }
1757}
1758
72246da4 1759/* -------------------------------------------------------------------------- */
e5caff68 1760
e5ba5ec8
PA
1761static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1762 struct dwc3_request *req, struct dwc3_trb *trb,
72246da4
FB
1763 const struct dwc3_event_depevt *event, int status)
1764{
72246da4
FB
1765 unsigned int count;
1766 unsigned int s_pkt = 0;
d6d6ec7b 1767 unsigned int trb_status;
72246da4 1768
e5ba5ec8
PA
1769 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1770 /*
1771 * We continue despite the error. There is not much we
1772 * can do. If we don't clean it up we loop forever. If
1773 * we skip the TRB then it gets overwritten after a
1774 * while since we use them in a ring buffer. A BUG()
1775 * would help. Lets hope that if this occurs, someone
1776 * fixes the root cause instead of looking away :)
1777 */
1778 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1779 dep->name, trb);
1780 count = trb->size & DWC3_TRB_SIZE_MASK;
1781
1782 if (dep->direction) {
1783 if (count) {
1784 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1785 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1786 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1787 dep->name);
1788 /*
1789 * If missed isoc occurred and there is
1790 * no request queued then issue END
1791 * TRANSFER, so that core generates
1792 * next xfernotready and we will issue
1793 * a fresh START TRANSFER.
1794 * If there are still queued request
1795 * then wait, do not issue either END
1796 * or UPDATE TRANSFER, just attach next
1797 * request in request_list during
1798 * giveback.If any future queued request
1799 * is successfully transferred then we
1800 * will issue UPDATE TRANSFER for all
1801 * request in the request_list.
1802 */
1803 dep->flags |= DWC3_EP_MISSED_ISOC;
1804 } else {
1805 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1806 dep->name);
1807 status = -ECONNRESET;
1808 }
1809 } else {
1810 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1811 }
1812 } else {
1813 if (count && (event->status & DEPEVT_STATUS_SHORT))
1814 s_pkt = 1;
1815 }
1816
1817 /*
1818 * We assume here we will always receive the entire data block
1819 * which we should receive. Meaning, if we program RX to
1820 * receive 4K but we receive only 2K, we assume that's all we
1821 * should receive and we simply bounce the request back to the
1822 * gadget driver for further processing.
1823 */
1824 req->request.actual += req->request.length - count;
1825 if (s_pkt)
1826 return 1;
1827 if ((event->status & DEPEVT_STATUS_LST) &&
1828 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1829 DWC3_TRB_CTRL_HWO)))
1830 return 1;
1831 if ((event->status & DEPEVT_STATUS_IOC) &&
1832 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1833 return 1;
1834 return 0;
1835}
1836
1837static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1838 const struct dwc3_event_depevt *event, int status)
1839{
1840 struct dwc3_request *req;
1841 struct dwc3_trb *trb;
1842 unsigned int slot;
1843 unsigned int i;
1844 int ret;
1845
72246da4
FB
1846 do {
1847 req = next_request(&dep->req_queued);
d39ee7be
SAS
1848 if (!req) {
1849 WARN_ON_ONCE(1);
1850 return 1;
1851 }
e5ba5ec8
PA
1852 i = 0;
1853 do {
1854 slot = req->start_slot + i;
1855 if ((slot == DWC3_TRB_NUM - 1) &&
1856 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1857 slot++;
1858 slot %= DWC3_TRB_NUM;
1859 trb = &dep->trb_pool[slot];
72246da4 1860
e5ba5ec8
PA
1861 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1862 event, status);
1863 if (ret)
1864 break;
1865 }while (++i < req->request.num_mapped_sgs);
72246da4 1866
72246da4 1867 dwc3_gadget_giveback(dep, req, status);
e5ba5ec8
PA
1868
1869 if (ret)
72246da4
FB
1870 break;
1871 } while (1);
1872
cdc359dd
PA
1873 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1874 list_empty(&dep->req_queued)) {
1875 if (list_empty(&dep->request_list)) {
1876 /*
1877 * If there is no entry in request list then do
1878 * not issue END TRANSFER now. Just set PENDING
1879 * flag, so that END TRANSFER is issued when an
1880 * entry is added into request list.
1881 */
1882 dep->flags = DWC3_EP_PENDING_REQUEST;
1883 } else {
b992e681 1884 dwc3_stop_active_transfer(dwc, dep->number, true);
cdc359dd
PA
1885 dep->flags = DWC3_EP_ENABLED;
1886 }
7efea86c
PA
1887 return 1;
1888 }
1889
72246da4
FB
1890 return 1;
1891}
1892
1893static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1894 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1895 int start_new)
1896{
1897 unsigned status = 0;
1898 int clean_busy;
1899
1900 if (event->status & DEPEVT_STATUS_BUSERR)
1901 status = -ECONNRESET;
1902
1d046793 1903 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
c2df85ca 1904 if (clean_busy)
72246da4 1905 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
1906
1907 /*
1908 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1909 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1910 */
1911 if (dwc->revision < DWC3_REVISION_183A) {
1912 u32 reg;
1913 int i;
1914
1915 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 1916 dep = dwc->eps[i];
fae2b904
FB
1917
1918 if (!(dep->flags & DWC3_EP_ENABLED))
1919 continue;
1920
1921 if (!list_empty(&dep->req_queued))
1922 return;
1923 }
1924
1925 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1926 reg |= dwc->u1u2;
1927 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1928
1929 dwc->u1u2 = 0;
1930 }
72246da4
FB
1931}
1932
72246da4
FB
1933static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1934 const struct dwc3_event_depevt *event)
1935{
1936 struct dwc3_ep *dep;
1937 u8 epnum = event->endpoint_number;
1938
1939 dep = dwc->eps[epnum];
1940
3336abb5
FB
1941 if (!(dep->flags & DWC3_EP_ENABLED))
1942 return;
1943
72246da4
FB
1944 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1945 dwc3_ep_event_string(event->endpoint_event));
1946
1947 if (epnum == 0 || epnum == 1) {
1948 dwc3_ep0_interrupt(dwc, event);
1949 return;
1950 }
1951
1952 switch (event->endpoint_event) {
1953 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 1954 dep->resource_index = 0;
c2df85ca 1955
16e78db7 1956 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1957 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1958 dep->name);
1959 return;
1960 }
1961
1962 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1963 break;
1964 case DWC3_DEPEVT_XFERINPROGRESS:
16e78db7 1965 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1966 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1967 dep->name);
1968 return;
1969 }
1970
1971 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1972 break;
1973 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 1974 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1975 dwc3_gadget_start_isoc(dwc, dep, event);
1976 } else {
1977 int ret;
1978
1979 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1980 dep->name, event->status &
1981 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1982 ? "Transfer Active"
1983 : "Transfer Not Active");
1984
1985 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1986 if (!ret || ret == -EBUSY)
1987 return;
1988
1989 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1990 dep->name);
1991 }
1992
879631aa
FB
1993 break;
1994 case DWC3_DEPEVT_STREAMEVT:
16e78db7 1995 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
1996 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1997 dep->name);
1998 return;
1999 }
2000
2001 switch (event->status) {
2002 case DEPEVT_STREAMEVT_FOUND:
2003 dev_vdbg(dwc->dev, "Stream %d found and started\n",
2004 event->parameters);
2005
2006 break;
2007 case DEPEVT_STREAMEVT_NOTFOUND:
2008 /* FALLTHROUGH */
2009 default:
2010 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2011 }
72246da4
FB
2012 break;
2013 case DWC3_DEPEVT_RXTXFIFOEVT:
2014 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2015 break;
72246da4 2016 case DWC3_DEPEVT_EPCMDCMPLT:
ea53b882 2017 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
72246da4
FB
2018 break;
2019 }
2020}
2021
2022static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2023{
2024 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2025 spin_unlock(&dwc->lock);
2026 dwc->gadget_driver->disconnect(&dwc->gadget);
2027 spin_lock(&dwc->lock);
2028 }
2029}
2030
b992e681 2031static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
72246da4
FB
2032{
2033 struct dwc3_ep *dep;
2034 struct dwc3_gadget_ep_cmd_params params;
2035 u32 cmd;
2036 int ret;
2037
2038 dep = dwc->eps[epnum];
2039
b4996a86 2040 if (!dep->resource_index)
3daf74d7
PA
2041 return;
2042
57911504
PA
2043 /*
2044 * NOTICE: We are violating what the Databook says about the
2045 * EndTransfer command. Ideally we would _always_ wait for the
2046 * EndTransfer Command Completion IRQ, but that's causing too
2047 * much trouble synchronizing between us and gadget driver.
2048 *
2049 * We have discussed this with the IP Provider and it was
2050 * suggested to giveback all requests here, but give HW some
2051 * extra time to synchronize with the interconnect. We're using
2052 * an arbitraty 100us delay for that.
2053 *
2054 * Note also that a similar handling was tested by Synopsys
2055 * (thanks a lot Paul) and nothing bad has come out of it.
2056 * In short, what we're doing is:
2057 *
2058 * - Issue EndTransfer WITH CMDIOC bit set
2059 * - Wait 100us
2060 */
2061
3daf74d7 2062 cmd = DWC3_DEPCMD_ENDTRANSFER;
b992e681
PZ
2063 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2064 cmd |= DWC3_DEPCMD_CMDIOC;
b4996a86 2065 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7
PA
2066 memset(&params, 0, sizeof(params));
2067 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2068 WARN_ON_ONCE(ret);
b4996a86 2069 dep->resource_index = 0;
041d81f4 2070 dep->flags &= ~DWC3_EP_BUSY;
57911504 2071 udelay(100);
72246da4
FB
2072}
2073
2074static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2075{
2076 u32 epnum;
2077
2078 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2079 struct dwc3_ep *dep;
2080
2081 dep = dwc->eps[epnum];
6a1e3ef4
FB
2082 if (!dep)
2083 continue;
2084
72246da4
FB
2085 if (!(dep->flags & DWC3_EP_ENABLED))
2086 continue;
2087
624407f9 2088 dwc3_remove_requests(dwc, dep);
72246da4
FB
2089 }
2090}
2091
2092static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2093{
2094 u32 epnum;
2095
2096 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2097 struct dwc3_ep *dep;
2098 struct dwc3_gadget_ep_cmd_params params;
2099 int ret;
2100
2101 dep = dwc->eps[epnum];
6a1e3ef4
FB
2102 if (!dep)
2103 continue;
72246da4
FB
2104
2105 if (!(dep->flags & DWC3_EP_STALL))
2106 continue;
2107
2108 dep->flags &= ~DWC3_EP_STALL;
2109
2110 memset(&params, 0, sizeof(params));
2111 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2112 DWC3_DEPCMD_CLEARSTALL, &params);
2113 WARN_ON_ONCE(ret);
2114 }
2115}
2116
2117static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2118{
c4430a26
FB
2119 int reg;
2120
72246da4 2121 dev_vdbg(dwc->dev, "%s\n", __func__);
72246da4
FB
2122
2123 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2124 reg &= ~DWC3_DCTL_INITU1ENA;
2125 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2126
2127 reg &= ~DWC3_DCTL_INITU2ENA;
2128 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 2129
72246da4 2130 dwc3_disconnect_gadget(dwc);
b23c8439 2131 dwc->start_config_issued = false;
72246da4
FB
2132
2133 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 2134 dwc->setup_packet_pending = false;
72246da4
FB
2135}
2136
72246da4
FB
2137static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2138{
2139 u32 reg;
2140
2141 dev_vdbg(dwc->dev, "%s\n", __func__);
2142
df62df56
FB
2143 /*
2144 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2145 * would cause a missing Disconnect Event if there's a
2146 * pending Setup Packet in the FIFO.
2147 *
2148 * There's no suggested workaround on the official Bug
2149 * report, which states that "unless the driver/application
2150 * is doing any special handling of a disconnect event,
2151 * there is no functional issue".
2152 *
2153 * Unfortunately, it turns out that we _do_ some special
2154 * handling of a disconnect event, namely complete all
2155 * pending transfers, notify gadget driver of the
2156 * disconnection, and so on.
2157 *
2158 * Our suggested workaround is to follow the Disconnect
2159 * Event steps here, instead, based on a setup_packet_pending
2160 * flag. Such flag gets set whenever we have a XferNotReady
2161 * event on EP0 and gets cleared on XferComplete for the
2162 * same endpoint.
2163 *
2164 * Refers to:
2165 *
2166 * STAR#9000466709: RTL: Device : Disconnect event not
2167 * generated if setup packet pending in FIFO
2168 */
2169 if (dwc->revision < DWC3_REVISION_188A) {
2170 if (dwc->setup_packet_pending)
2171 dwc3_gadget_disconnect_interrupt(dwc);
2172 }
2173
961906ed 2174 /* after reset -> Default State */
14cd592f 2175 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
961906ed 2176
72246da4
FB
2177 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
2178 dwc3_disconnect_gadget(dwc);
2179
2180 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2181 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2182 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2183 dwc->test_mode = false;
72246da4
FB
2184
2185 dwc3_stop_active_transfers(dwc);
2186 dwc3_clear_stall_all_ep(dwc);
b23c8439 2187 dwc->start_config_issued = false;
72246da4
FB
2188
2189 /* Reset device address to zero */
2190 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2191 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2192 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2193}
2194
2195static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2196{
2197 u32 reg;
2198 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2199
2200 /*
2201 * We change the clock only at SS but I dunno why I would want to do
2202 * this. Maybe it becomes part of the power saving plan.
2203 */
2204
2205 if (speed != DWC3_DSTS_SUPERSPEED)
2206 return;
2207
2208 /*
2209 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2210 * each time on Connect Done.
2211 */
2212 if (!usb30_clock)
2213 return;
2214
2215 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2216 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2217 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2218}
2219
72246da4
FB
2220static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2221{
72246da4
FB
2222 struct dwc3_ep *dep;
2223 int ret;
2224 u32 reg;
2225 u8 speed;
2226
2227 dev_vdbg(dwc->dev, "%s\n", __func__);
2228
72246da4
FB
2229 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2230 speed = reg & DWC3_DSTS_CONNECTSPD;
2231 dwc->speed = speed;
2232
2233 dwc3_update_ram_clk_sel(dwc, speed);
2234
2235 switch (speed) {
2236 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2237 /*
2238 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2239 * would cause a missing USB3 Reset event.
2240 *
2241 * In such situations, we should force a USB3 Reset
2242 * event by calling our dwc3_gadget_reset_interrupt()
2243 * routine.
2244 *
2245 * Refers to:
2246 *
2247 * STAR#9000483510: RTL: SS : USB3 reset event may
2248 * not be generated always when the link enters poll
2249 */
2250 if (dwc->revision < DWC3_REVISION_190A)
2251 dwc3_gadget_reset_interrupt(dwc);
2252
72246da4
FB
2253 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2254 dwc->gadget.ep0->maxpacket = 512;
2255 dwc->gadget.speed = USB_SPEED_SUPER;
2256 break;
2257 case DWC3_DCFG_HIGHSPEED:
2258 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2259 dwc->gadget.ep0->maxpacket = 64;
2260 dwc->gadget.speed = USB_SPEED_HIGH;
2261 break;
2262 case DWC3_DCFG_FULLSPEED2:
2263 case DWC3_DCFG_FULLSPEED1:
2264 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2265 dwc->gadget.ep0->maxpacket = 64;
2266 dwc->gadget.speed = USB_SPEED_FULL;
2267 break;
2268 case DWC3_DCFG_LOWSPEED:
2269 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2270 dwc->gadget.ep0->maxpacket = 8;
2271 dwc->gadget.speed = USB_SPEED_LOW;
2272 break;
2273 }
2274
2b758350
PA
2275 /* Enable USB2 LPM Capability */
2276
2277 if ((dwc->revision > DWC3_REVISION_194A)
2278 && (speed != DWC3_DCFG_SUPERSPEED)) {
2279 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2280 reg |= DWC3_DCFG_LPM_CAP;
2281 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2282
2283 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2284 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2285
1a947746
FB
2286 /*
2287 * TODO: This should be configurable. For now using
2288 * maximum allowed HIRD threshold value of 0b1100
2289 */
2290 reg |= DWC3_DCTL_HIRD_THRES(12);
2b758350 2291
356363bf
FB
2292 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2293 } else {
2294 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2295 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2b758350
PA
2296 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2297 }
2298
72246da4 2299 dep = dwc->eps[0];
265b70a7
PZ
2300 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2301 false);
72246da4
FB
2302 if (ret) {
2303 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2304 return;
2305 }
2306
2307 dep = dwc->eps[1];
265b70a7
PZ
2308 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2309 false);
72246da4
FB
2310 if (ret) {
2311 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2312 return;
2313 }
2314
2315 /*
2316 * Configure PHY via GUSB3PIPECTLn if required.
2317 *
2318 * Update GTXFIFOSIZn
2319 *
2320 * In both cases reset values should be sufficient.
2321 */
2322}
2323
2324static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2325{
2326 dev_vdbg(dwc->dev, "%s\n", __func__);
2327
2328 /*
2329 * TODO take core out of low power mode when that's
2330 * implemented.
2331 */
2332
2333 dwc->gadget_driver->resume(&dwc->gadget);
2334}
2335
2336static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2337 unsigned int evtinfo)
2338{
fae2b904 2339 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
0b0cc1cd
FB
2340 unsigned int pwropt;
2341
2342 /*
2343 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2344 * Hibernation mode enabled which would show up when device detects
2345 * host-initiated U3 exit.
2346 *
2347 * In that case, device will generate a Link State Change Interrupt
2348 * from U3 to RESUME which is only necessary if Hibernation is
2349 * configured in.
2350 *
2351 * There are no functional changes due to such spurious event and we
2352 * just need to ignore it.
2353 *
2354 * Refers to:
2355 *
2356 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2357 * operational mode
2358 */
2359 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2360 if ((dwc->revision < DWC3_REVISION_250A) &&
2361 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2362 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2363 (next == DWC3_LINK_STATE_RESUME)) {
2364 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2365 return;
2366 }
2367 }
fae2b904
FB
2368
2369 /*
2370 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2371 * on the link partner, the USB session might do multiple entry/exit
2372 * of low power states before a transfer takes place.
2373 *
2374 * Due to this problem, we might experience lower throughput. The
2375 * suggested workaround is to disable DCTL[12:9] bits if we're
2376 * transitioning from U1/U2 to U0 and enable those bits again
2377 * after a transfer completes and there are no pending transfers
2378 * on any of the enabled endpoints.
2379 *
2380 * This is the first half of that workaround.
2381 *
2382 * Refers to:
2383 *
2384 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2385 * core send LGO_Ux entering U0
2386 */
2387 if (dwc->revision < DWC3_REVISION_183A) {
2388 if (next == DWC3_LINK_STATE_U0) {
2389 u32 u1u2;
2390 u32 reg;
2391
2392 switch (dwc->link_state) {
2393 case DWC3_LINK_STATE_U1:
2394 case DWC3_LINK_STATE_U2:
2395 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2396 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2397 | DWC3_DCTL_ACCEPTU2ENA
2398 | DWC3_DCTL_INITU1ENA
2399 | DWC3_DCTL_ACCEPTU1ENA);
2400
2401 if (!dwc->u1u2)
2402 dwc->u1u2 = reg & u1u2;
2403
2404 reg &= ~u1u2;
2405
2406 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2407 break;
2408 default:
2409 /* do nothing */
2410 break;
2411 }
2412 }
2413 }
2414
2415 dwc->link_state = next;
019ac832
FB
2416
2417 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
72246da4
FB
2418}
2419
e1dadd3b
FB
2420static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2421 unsigned int evtinfo)
2422{
2423 unsigned int is_ss = evtinfo & BIT(4);
2424
2425 /**
2426 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2427 * have a known issue which can cause USB CV TD.9.23 to fail
2428 * randomly.
2429 *
2430 * Because of this issue, core could generate bogus hibernation
2431 * events which SW needs to ignore.
2432 *
2433 * Refers to:
2434 *
2435 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2436 * Device Fallback from SuperSpeed
2437 */
2438 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2439 return;
2440
2441 /* enter hibernation here */
2442}
2443
72246da4
FB
2444static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2445 const struct dwc3_event_devt *event)
2446{
2447 switch (event->type) {
2448 case DWC3_DEVICE_EVENT_DISCONNECT:
2449 dwc3_gadget_disconnect_interrupt(dwc);
2450 break;
2451 case DWC3_DEVICE_EVENT_RESET:
2452 dwc3_gadget_reset_interrupt(dwc);
2453 break;
2454 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2455 dwc3_gadget_conndone_interrupt(dwc);
2456 break;
2457 case DWC3_DEVICE_EVENT_WAKEUP:
2458 dwc3_gadget_wakeup_interrupt(dwc);
2459 break;
e1dadd3b
FB
2460 case DWC3_DEVICE_EVENT_HIBER_REQ:
2461 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2462 "unexpected hibernation event\n"))
2463 break;
2464
2465 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2466 break;
72246da4
FB
2467 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2468 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2469 break;
2470 case DWC3_DEVICE_EVENT_EOPF:
2471 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2472 break;
2473 case DWC3_DEVICE_EVENT_SOF:
2474 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2475 break;
2476 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2477 dev_vdbg(dwc->dev, "Erratic Error\n");
2478 break;
2479 case DWC3_DEVICE_EVENT_CMD_CMPL:
2480 dev_vdbg(dwc->dev, "Command Complete\n");
2481 break;
2482 case DWC3_DEVICE_EVENT_OVERFLOW:
2483 dev_vdbg(dwc->dev, "Overflow\n");
2484 break;
2485 default:
2486 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2487 }
2488}
2489
2490static void dwc3_process_event_entry(struct dwc3 *dwc,
2491 const union dwc3_event *event)
2492{
2493 /* Endpoint IRQ, handle it and return early */
2494 if (event->type.is_devspec == 0) {
2495 /* depevt */
2496 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2497 }
2498
2499 switch (event->type.type) {
2500 case DWC3_EVENT_TYPE_DEV:
2501 dwc3_gadget_interrupt(dwc, &event->devt);
2502 break;
2503 /* REVISIT what to do with Carkit and I2C events ? */
2504 default:
2505 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2506 }
2507}
2508
f42f2447 2509static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
b15a762f 2510{
f42f2447 2511 struct dwc3_event_buffer *evt;
b15a762f 2512 irqreturn_t ret = IRQ_NONE;
f42f2447 2513 int left;
e8adfc30 2514 u32 reg;
b15a762f 2515
f42f2447
FB
2516 evt = dwc->ev_buffs[buf];
2517 left = evt->count;
b15a762f 2518
f42f2447
FB
2519 if (!(evt->flags & DWC3_EVENT_PENDING))
2520 return IRQ_NONE;
b15a762f 2521
f42f2447
FB
2522 while (left > 0) {
2523 union dwc3_event event;
b15a762f 2524
f42f2447 2525 event.raw = *(u32 *) (evt->buf + evt->lpos);
b15a762f 2526
f42f2447 2527 dwc3_process_event_entry(dwc, &event);
b15a762f 2528
f42f2447
FB
2529 /*
2530 * FIXME we wrap around correctly to the next entry as
2531 * almost all entries are 4 bytes in size. There is one
2532 * entry which has 12 bytes which is a regular entry
2533 * followed by 8 bytes data. ATM I don't know how
2534 * things are organized if we get next to the a
2535 * boundary so I worry about that once we try to handle
2536 * that.
2537 */
2538 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2539 left -= 4;
b15a762f 2540
f42f2447
FB
2541 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2542 }
b15a762f 2543
f42f2447
FB
2544 evt->count = 0;
2545 evt->flags &= ~DWC3_EVENT_PENDING;
2546 ret = IRQ_HANDLED;
b15a762f 2547
f42f2447
FB
2548 /* Unmask interrupt */
2549 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2550 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2551 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
b15a762f 2552
f42f2447
FB
2553 return ret;
2554}
e8adfc30 2555
f42f2447
FB
2556static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2557{
2558 struct dwc3 *dwc = _dwc;
2559 unsigned long flags;
2560 irqreturn_t ret = IRQ_NONE;
2561 int i;
2562
2563 spin_lock_irqsave(&dwc->lock, flags);
2564
2565 for (i = 0; i < dwc->num_event_buffers; i++)
2566 ret |= dwc3_process_event_buf(dwc, i);
b15a762f
FB
2567
2568 spin_unlock_irqrestore(&dwc->lock, flags);
2569
2570 return ret;
2571}
2572
7f97aa98 2573static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
72246da4
FB
2574{
2575 struct dwc3_event_buffer *evt;
72246da4 2576 u32 count;
e8adfc30 2577 u32 reg;
72246da4 2578
b15a762f
FB
2579 evt = dwc->ev_buffs[buf];
2580
72246da4
FB
2581 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2582 count &= DWC3_GEVNTCOUNT_MASK;
2583 if (!count)
2584 return IRQ_NONE;
2585
b15a762f
FB
2586 evt->count = count;
2587 evt->flags |= DWC3_EVENT_PENDING;
72246da4 2588
e8adfc30
FB
2589 /* Mask interrupt */
2590 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2591 reg |= DWC3_GEVNTSIZ_INTMASK;
2592 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2593
b15a762f 2594 return IRQ_WAKE_THREAD;
72246da4
FB
2595}
2596
2597static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2598{
2599 struct dwc3 *dwc = _dwc;
2600 int i;
2601 irqreturn_t ret = IRQ_NONE;
2602
2603 spin_lock(&dwc->lock);
2604
9f622b2a 2605 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2606 irqreturn_t status;
2607
7f97aa98 2608 status = dwc3_check_event_buf(dwc, i);
b15a762f 2609 if (status == IRQ_WAKE_THREAD)
72246da4
FB
2610 ret = status;
2611 }
2612
2613 spin_unlock(&dwc->lock);
2614
2615 return ret;
2616}
2617
2618/**
2619 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2620 * @dwc: pointer to our controller context structure
72246da4
FB
2621 *
2622 * Returns 0 on success otherwise negative errno.
2623 */
41ac7b3a 2624int dwc3_gadget_init(struct dwc3 *dwc)
72246da4 2625{
72246da4 2626 int ret;
72246da4
FB
2627
2628 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2629 &dwc->ctrl_req_addr, GFP_KERNEL);
2630 if (!dwc->ctrl_req) {
2631 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2632 ret = -ENOMEM;
2633 goto err0;
2634 }
2635
2636 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2637 &dwc->ep0_trb_addr, GFP_KERNEL);
2638 if (!dwc->ep0_trb) {
2639 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2640 ret = -ENOMEM;
2641 goto err1;
2642 }
2643
3ef35faf 2644 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4
FB
2645 if (!dwc->setup_buf) {
2646 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2647 ret = -ENOMEM;
2648 goto err2;
2649 }
2650
5812b1c2 2651 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2652 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2653 GFP_KERNEL);
5812b1c2
FB
2654 if (!dwc->ep0_bounce) {
2655 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2656 ret = -ENOMEM;
2657 goto err3;
2658 }
2659
72246da4 2660 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2661 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4 2662 dwc->gadget.speed = USB_SPEED_UNKNOWN;
eeb720fb 2663 dwc->gadget.sg_supported = true;
72246da4
FB
2664 dwc->gadget.name = "dwc3-gadget";
2665
a4b9d94b
DC
2666 /*
2667 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2668 * on ep out.
2669 */
2670 dwc->gadget.quirk_ep_out_aligned_size = true;
2671
72246da4
FB
2672 /*
2673 * REVISIT: Here we should clear all pending IRQs to be
2674 * sure we're starting from a well known location.
2675 */
2676
2677 ret = dwc3_gadget_init_endpoints(dwc);
2678 if (ret)
5812b1c2 2679 goto err4;
72246da4 2680
72246da4
FB
2681 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2682 if (ret) {
2683 dev_err(dwc->dev, "failed to register udc\n");
e1f80467 2684 goto err4;
72246da4
FB
2685 }
2686
2687 return 0;
2688
5812b1c2 2689err4:
e1f80467 2690 dwc3_gadget_free_endpoints(dwc);
3ef35faf
FB
2691 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2692 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2693
72246da4 2694err3:
0fc9a1be 2695 kfree(dwc->setup_buf);
72246da4
FB
2696
2697err2:
2698 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2699 dwc->ep0_trb, dwc->ep0_trb_addr);
2700
2701err1:
2702 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2703 dwc->ctrl_req, dwc->ctrl_req_addr);
2704
2705err0:
2706 return ret;
2707}
2708
7415f17c
FB
2709/* -------------------------------------------------------------------------- */
2710
72246da4
FB
2711void dwc3_gadget_exit(struct dwc3 *dwc)
2712{
72246da4 2713 usb_del_gadget_udc(&dwc->gadget);
72246da4 2714
72246da4
FB
2715 dwc3_gadget_free_endpoints(dwc);
2716
3ef35faf
FB
2717 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2718 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2719
0fc9a1be 2720 kfree(dwc->setup_buf);
72246da4
FB
2721
2722 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2723 dwc->ep0_trb, dwc->ep0_trb_addr);
2724
2725 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2726 dwc->ctrl_req, dwc->ctrl_req_addr);
72246da4 2727}
7415f17c
FB
2728
2729int dwc3_gadget_prepare(struct dwc3 *dwc)
2730{
7b2a0368 2731 if (dwc->pullups_connected) {
7415f17c 2732 dwc3_gadget_disable_irq(dwc);
7b2a0368
FB
2733 dwc3_gadget_run_stop(dwc, true, true);
2734 }
7415f17c
FB
2735
2736 return 0;
2737}
2738
2739void dwc3_gadget_complete(struct dwc3 *dwc)
2740{
2741 if (dwc->pullups_connected) {
2742 dwc3_gadget_enable_irq(dwc);
7b2a0368 2743 dwc3_gadget_run_stop(dwc, true, false);
7415f17c
FB
2744 }
2745}
2746
2747int dwc3_gadget_suspend(struct dwc3 *dwc)
2748{
2749 __dwc3_gadget_ep_disable(dwc->eps[0]);
2750 __dwc3_gadget_ep_disable(dwc->eps[1]);
2751
2752 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2753
2754 return 0;
2755}
2756
2757int dwc3_gadget_resume(struct dwc3 *dwc)
2758{
2759 struct dwc3_ep *dep;
2760 int ret;
2761
2762 /* Start with SuperSpeed Default */
2763 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2764
2765 dep = dwc->eps[0];
265b70a7
PZ
2766 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2767 false);
7415f17c
FB
2768 if (ret)
2769 goto err0;
2770
2771 dep = dwc->eps[1];
265b70a7
PZ
2772 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2773 false);
7415f17c
FB
2774 if (ret)
2775 goto err1;
2776
2777 /* begin to receive SETUP packets */
2778 dwc->ep0state = EP0_SETUP_PHASE;
2779 dwc3_ep0_out_start(dwc);
2780
2781 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2782
2783 return 0;
2784
2785err1:
2786 __dwc3_gadget_ep_disable(dwc->eps[0]);
2787
2788err0:
2789 return ret;
2790}