]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/usb/dwc3/gadget.c
usb: dwc3: gadget: make sure HIRD threshold is 0 in superspeed
[mirror_ubuntu-hirsute-kernel.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
5945f789
FB
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
72246da4 12 *
5945f789
FB
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
72246da4
FB
17 */
18
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/list.h>
28#include <linux/dma-mapping.h>
29
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32
33#include "core.h"
34#include "gadget.h"
35#include "io.h"
36
04a9bfcd
FB
37/**
38 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
39 * @dwc: pointer to our context structure
40 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
41 *
42 * Caller should take care of locking. This function will
43 * return 0 on success or -EINVAL if wrong Test Selector
44 * is passed
45 */
46int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
47{
48 u32 reg;
49
50 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
51 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
52
53 switch (mode) {
54 case TEST_J:
55 case TEST_K:
56 case TEST_SE0_NAK:
57 case TEST_PACKET:
58 case TEST_FORCE_EN:
59 reg |= mode << 1;
60 break;
61 default:
62 return -EINVAL;
63 }
64
65 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
66
67 return 0;
68}
69
911f1f88
PZ
70/**
71 * dwc3_gadget_get_link_state - Gets current state of USB Link
72 * @dwc: pointer to our context structure
73 *
74 * Caller should take care of locking. This function will
75 * return the link state on success (>= 0) or -ETIMEDOUT.
76 */
77int dwc3_gadget_get_link_state(struct dwc3 *dwc)
78{
79 u32 reg;
80
81 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
82
83 return DWC3_DSTS_USBLNKST(reg);
84}
85
8598bde7
FB
86/**
87 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
88 * @dwc: pointer to our context structure
89 * @state: the state to put link into
90 *
91 * Caller should take care of locking. This function will
aee63e3c 92 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
93 */
94int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
95{
aee63e3c 96 int retries = 10000;
8598bde7
FB
97 u32 reg;
98
802fde98
PZ
99 /*
100 * Wait until device controller is ready. Only applies to 1.94a and
101 * later RTL.
102 */
103 if (dwc->revision >= DWC3_REVISION_194A) {
104 while (--retries) {
105 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
106 if (reg & DWC3_DSTS_DCNRD)
107 udelay(5);
108 else
109 break;
110 }
111
112 if (retries <= 0)
113 return -ETIMEDOUT;
114 }
115
8598bde7
FB
116 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
117 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
118
119 /* set requested state */
120 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
121 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
122
802fde98
PZ
123 /*
124 * The following code is racy when called from dwc3_gadget_wakeup,
125 * and is not needed, at least on newer versions
126 */
127 if (dwc->revision >= DWC3_REVISION_194A)
128 return 0;
129
8598bde7 130 /* wait for a change in DSTS */
aed430e5 131 retries = 10000;
8598bde7
FB
132 while (--retries) {
133 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
134
8598bde7
FB
135 if (DWC3_DSTS_USBLNKST(reg) == state)
136 return 0;
137
aee63e3c 138 udelay(5);
8598bde7
FB
139 }
140
141 dev_vdbg(dwc->dev, "link state change request timed out\n");
142
143 return -ETIMEDOUT;
144}
145
457e84b6
FB
146/**
147 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
148 * @dwc: pointer to our context structure
149 *
150 * This function will a best effort FIFO allocation in order
151 * to improve FIFO usage and throughput, while still allowing
152 * us to enable as many endpoints as possible.
153 *
154 * Keep in mind that this operation will be highly dependent
155 * on the configured size for RAM1 - which contains TxFifo -,
156 * the amount of endpoints enabled on coreConsultant tool, and
157 * the width of the Master Bus.
158 *
159 * In the ideal world, we would always be able to satisfy the
160 * following equation:
161 *
162 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
163 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
164 *
165 * Unfortunately, due to many variables that's not always the case.
166 */
167int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
168{
169 int last_fifo_depth = 0;
170 int ram1_depth;
171 int fifo_size;
172 int mdwidth;
173 int num;
174
175 if (!dwc->needs_fifo_resize)
176 return 0;
177
178 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
179 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
180
181 /* MDWIDTH is represented in bits, we need it in bytes */
182 mdwidth >>= 3;
183
184 /*
185 * FIXME For now we will only allocate 1 wMaxPacketSize space
186 * for each enabled endpoint, later patches will come to
187 * improve this algorithm so that we better use the internal
188 * FIFO space
189 */
190 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
191 struct dwc3_ep *dep = dwc->eps[num];
192 int fifo_number = dep->number >> 1;
2e81c36a 193 int mult = 1;
457e84b6
FB
194 int tmp;
195
196 if (!(dep->number & 1))
197 continue;
198
199 if (!(dep->flags & DWC3_EP_ENABLED))
200 continue;
201
16e78db7
IS
202 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
203 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
2e81c36a
FB
204 mult = 3;
205
206 /*
207 * REVISIT: the following assumes we will always have enough
208 * space available on the FIFO RAM for all possible use cases.
209 * Make sure that's true somehow and change FIFO allocation
210 * accordingly.
211 *
212 * If we have Bulk or Isochronous endpoints, we want
213 * them to be able to be very, very fast. So we're giving
214 * those endpoints a fifo_size which is enough for 3 full
215 * packets
216 */
217 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
457e84b6
FB
218 tmp += mdwidth;
219
220 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
2e81c36a 221
457e84b6
FB
222 fifo_size |= (last_fifo_depth << 16);
223
224 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
225 dep->name, last_fifo_depth, fifo_size & 0xffff);
226
227 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
228 fifo_size);
229
230 last_fifo_depth += (fifo_size & 0xffff);
231 }
232
233 return 0;
234}
235
72246da4
FB
236void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
237 int status)
238{
239 struct dwc3 *dwc = dep->dwc;
e5ba5ec8 240 int i;
72246da4
FB
241
242 if (req->queued) {
e5ba5ec8
PA
243 i = 0;
244 do {
eeb720fb 245 dep->busy_slot++;
e5ba5ec8
PA
246 /*
247 * Skip LINK TRB. We can't use req->trb and check for
248 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
249 * just completed (not the LINK TRB).
250 */
251 if (((dep->busy_slot & DWC3_TRB_MASK) ==
252 DWC3_TRB_NUM- 1) &&
16e78db7 253 usb_endpoint_xfer_isoc(dep->endpoint.desc))
e5ba5ec8
PA
254 dep->busy_slot++;
255 } while(++i < req->request.num_mapped_sgs);
c9fda7d6 256 req->queued = false;
72246da4
FB
257 }
258 list_del(&req->list);
eeb720fb 259 req->trb = NULL;
72246da4
FB
260
261 if (req->request.status == -EINPROGRESS)
262 req->request.status = status;
263
0416e494
PA
264 if (dwc->ep0_bounced && dep->number == 0)
265 dwc->ep0_bounced = false;
266 else
267 usb_gadget_unmap_request(&dwc->gadget, &req->request,
268 req->direction);
72246da4
FB
269
270 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
271 req, dep->name, req->request.actual,
272 req->request.length, status);
273
274 spin_unlock(&dwc->lock);
0fc9a1be 275 req->request.complete(&dep->endpoint, &req->request);
72246da4
FB
276 spin_lock(&dwc->lock);
277}
278
279static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
280{
281 switch (cmd) {
282 case DWC3_DEPCMD_DEPSTARTCFG:
283 return "Start New Configuration";
284 case DWC3_DEPCMD_ENDTRANSFER:
285 return "End Transfer";
286 case DWC3_DEPCMD_UPDATETRANSFER:
287 return "Update Transfer";
288 case DWC3_DEPCMD_STARTTRANSFER:
289 return "Start Transfer";
290 case DWC3_DEPCMD_CLEARSTALL:
291 return "Clear Stall";
292 case DWC3_DEPCMD_SETSTALL:
293 return "Set Stall";
802fde98
PZ
294 case DWC3_DEPCMD_GETEPSTATE:
295 return "Get Endpoint State";
72246da4
FB
296 case DWC3_DEPCMD_SETTRANSFRESOURCE:
297 return "Set Endpoint Transfer Resource";
298 case DWC3_DEPCMD_SETEPCONFIG:
299 return "Set Endpoint Configuration";
300 default:
301 return "UNKNOWN command";
302 }
303}
304
b09bb642
FB
305int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
306{
307 u32 timeout = 500;
308 u32 reg;
309
310 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
311 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
312
313 do {
314 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
315 if (!(reg & DWC3_DGCMD_CMDACT)) {
316 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
317 DWC3_DGCMD_STATUS(reg));
318 return 0;
319 }
320
321 /*
322 * We can't sleep here, because it's also called from
323 * interrupt context.
324 */
325 timeout--;
326 if (!timeout)
327 return -ETIMEDOUT;
328 udelay(1);
329 } while (1);
330}
331
72246da4
FB
332int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
333 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
334{
335 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 336 u32 timeout = 500;
72246da4
FB
337 u32 reg;
338
339 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
340 dep->name,
dc1c70a7
FB
341 dwc3_gadget_ep_cmd_string(cmd), params->param0,
342 params->param1, params->param2);
72246da4 343
dc1c70a7
FB
344 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
345 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
346 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
347
348 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
349 do {
350 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
351 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
352 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
353 DWC3_DEPCMD_STATUS(reg));
72246da4
FB
354 return 0;
355 }
356
357 /*
72246da4
FB
358 * We can't sleep here, because it is also called from
359 * interrupt context.
360 */
361 timeout--;
362 if (!timeout)
363 return -ETIMEDOUT;
364
61d58242 365 udelay(1);
72246da4
FB
366 } while (1);
367}
368
369static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 370 struct dwc3_trb *trb)
72246da4 371{
c439ef87 372 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
373
374 return dep->trb_pool_dma + offset;
375}
376
377static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
378{
379 struct dwc3 *dwc = dep->dwc;
380
381 if (dep->trb_pool)
382 return 0;
383
384 if (dep->number == 0 || dep->number == 1)
385 return 0;
386
387 dep->trb_pool = dma_alloc_coherent(dwc->dev,
388 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
389 &dep->trb_pool_dma, GFP_KERNEL);
390 if (!dep->trb_pool) {
391 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
392 dep->name);
393 return -ENOMEM;
394 }
395
396 return 0;
397}
398
399static void dwc3_free_trb_pool(struct dwc3_ep *dep)
400{
401 struct dwc3 *dwc = dep->dwc;
402
403 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
404 dep->trb_pool, dep->trb_pool_dma);
405
406 dep->trb_pool = NULL;
407 dep->trb_pool_dma = 0;
408}
409
410static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
411{
412 struct dwc3_gadget_ep_cmd_params params;
413 u32 cmd;
414
415 memset(&params, 0x00, sizeof(params));
416
417 if (dep->number != 1) {
418 cmd = DWC3_DEPCMD_DEPSTARTCFG;
419 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
420 if (dep->number > 1) {
421 if (dwc->start_config_issued)
422 return 0;
423 dwc->start_config_issued = true;
72246da4 424 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 425 }
72246da4
FB
426
427 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
428 }
429
430 return 0;
431}
432
433static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 434 const struct usb_endpoint_descriptor *desc,
4b345c9a 435 const struct usb_ss_ep_comp_descriptor *comp_desc,
265b70a7 436 bool ignore, bool restore)
72246da4
FB
437{
438 struct dwc3_gadget_ep_cmd_params params;
439
440 memset(&params, 0x00, sizeof(params));
441
dc1c70a7 442 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
443 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
444
445 /* Burst size is only needed in SuperSpeed mode */
446 if (dwc->gadget.speed == USB_SPEED_SUPER) {
447 u32 burst = dep->endpoint.maxburst - 1;
448
449 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
450 }
72246da4 451
4b345c9a
FB
452 if (ignore)
453 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
454
265b70a7
PZ
455 if (restore) {
456 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
457 params.param2 |= dep->saved_state;
458 }
459
dc1c70a7
FB
460 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
461 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 462
18b7ede5 463 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
464 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
465 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
466 dep->stream_capable = true;
467 }
468
72246da4 469 if (usb_endpoint_xfer_isoc(desc))
dc1c70a7 470 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
471
472 /*
473 * We are doing 1:1 mapping for endpoints, meaning
474 * Physical Endpoints 2 maps to Logical Endpoint 2 and
475 * so on. We consider the direction bit as part of the physical
476 * endpoint number. So USB endpoint 0x81 is 0x03.
477 */
dc1c70a7 478 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
479
480 /*
481 * We must use the lower 16 TX FIFOs even though
482 * HW might have more
483 */
484 if (dep->direction)
dc1c70a7 485 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
486
487 if (desc->bInterval) {
dc1c70a7 488 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
489 dep->interval = 1 << (desc->bInterval - 1);
490 }
491
492 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
493 DWC3_DEPCMD_SETEPCONFIG, &params);
494}
495
496static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
497{
498 struct dwc3_gadget_ep_cmd_params params;
499
500 memset(&params, 0x00, sizeof(params));
501
dc1c70a7 502 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
503
504 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
505 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
506}
507
508/**
509 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
510 * @dep: endpoint to be initialized
511 * @desc: USB Endpoint Descriptor
512 *
513 * Caller should take care of locking
514 */
515static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 516 const struct usb_endpoint_descriptor *desc,
4b345c9a 517 const struct usb_ss_ep_comp_descriptor *comp_desc,
265b70a7 518 bool ignore, bool restore)
72246da4
FB
519{
520 struct dwc3 *dwc = dep->dwc;
521 u32 reg;
522 int ret = -ENOMEM;
523
ff62d6b6
FB
524 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
525
72246da4
FB
526 if (!(dep->flags & DWC3_EP_ENABLED)) {
527 ret = dwc3_gadget_start_config(dwc, dep);
528 if (ret)
529 return ret;
530 }
531
265b70a7
PZ
532 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
533 restore);
72246da4
FB
534 if (ret)
535 return ret;
536
537 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
538 struct dwc3_trb *trb_st_hw;
539 struct dwc3_trb *trb_link;
72246da4
FB
540
541 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
542 if (ret)
543 return ret;
544
16e78db7 545 dep->endpoint.desc = desc;
c90bfaec 546 dep->comp_desc = comp_desc;
72246da4
FB
547 dep->type = usb_endpoint_type(desc);
548 dep->flags |= DWC3_EP_ENABLED;
549
550 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
551 reg |= DWC3_DALEPENA_EP(dep->number);
552 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
553
554 if (!usb_endpoint_xfer_isoc(desc))
555 return 0;
556
557 memset(&trb_link, 0, sizeof(trb_link));
558
1d046793 559 /* Link TRB for ISOC. The HWO bit is never reset */
72246da4
FB
560 trb_st_hw = &dep->trb_pool[0];
561
f6bafc6a 562 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
72246da4 563
f6bafc6a
FB
564 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
565 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
566 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
567 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
568 }
569
570 return 0;
571}
572
624407f9
SAS
573static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
574static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
575{
576 struct dwc3_request *req;
577
ea53b882 578 if (!list_empty(&dep->req_queued)) {
624407f9
SAS
579 dwc3_stop_active_transfer(dwc, dep->number);
580
57911504 581 /* - giveback all requests to gadget driver */
1591633e
PA
582 while (!list_empty(&dep->req_queued)) {
583 req = next_request(&dep->req_queued);
584
585 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
586 }
ea53b882
FB
587 }
588
72246da4
FB
589 while (!list_empty(&dep->request_list)) {
590 req = next_request(&dep->request_list);
591
624407f9 592 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 593 }
72246da4
FB
594}
595
596/**
597 * __dwc3_gadget_ep_disable - Disables a HW endpoint
598 * @dep: the endpoint to disable
599 *
624407f9
SAS
600 * This function also removes requests which are currently processed ny the
601 * hardware and those which are not yet scheduled.
602 * Caller should take care of locking.
72246da4 603 */
72246da4
FB
604static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
605{
606 struct dwc3 *dwc = dep->dwc;
607 u32 reg;
608
624407f9 609 dwc3_remove_requests(dwc, dep);
72246da4
FB
610
611 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
612 reg &= ~DWC3_DALEPENA_EP(dep->number);
613 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
614
879631aa 615 dep->stream_capable = false;
f9c56cdd 616 dep->endpoint.desc = NULL;
c90bfaec 617 dep->comp_desc = NULL;
72246da4 618 dep->type = 0;
879631aa 619 dep->flags = 0;
72246da4
FB
620
621 return 0;
622}
623
624/* -------------------------------------------------------------------------- */
625
626static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
627 const struct usb_endpoint_descriptor *desc)
628{
629 return -EINVAL;
630}
631
632static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
633{
634 return -EINVAL;
635}
636
637/* -------------------------------------------------------------------------- */
638
639static int dwc3_gadget_ep_enable(struct usb_ep *ep,
640 const struct usb_endpoint_descriptor *desc)
641{
642 struct dwc3_ep *dep;
643 struct dwc3 *dwc;
644 unsigned long flags;
645 int ret;
646
647 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
648 pr_debug("dwc3: invalid parameters\n");
649 return -EINVAL;
650 }
651
652 if (!desc->wMaxPacketSize) {
653 pr_debug("dwc3: missing wMaxPacketSize\n");
654 return -EINVAL;
655 }
656
657 dep = to_dwc3_ep(ep);
658 dwc = dep->dwc;
659
c6f83f38
FB
660 if (dep->flags & DWC3_EP_ENABLED) {
661 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
662 dep->name);
663 return 0;
664 }
665
72246da4
FB
666 switch (usb_endpoint_type(desc)) {
667 case USB_ENDPOINT_XFER_CONTROL:
27a78d6a 668 strlcat(dep->name, "-control", sizeof(dep->name));
72246da4
FB
669 break;
670 case USB_ENDPOINT_XFER_ISOC:
27a78d6a 671 strlcat(dep->name, "-isoc", sizeof(dep->name));
72246da4
FB
672 break;
673 case USB_ENDPOINT_XFER_BULK:
27a78d6a 674 strlcat(dep->name, "-bulk", sizeof(dep->name));
72246da4
FB
675 break;
676 case USB_ENDPOINT_XFER_INT:
27a78d6a 677 strlcat(dep->name, "-int", sizeof(dep->name));
72246da4
FB
678 break;
679 default:
680 dev_err(dwc->dev, "invalid endpoint transfer type\n");
681 }
682
72246da4 683 spin_lock_irqsave(&dwc->lock, flags);
265b70a7 684 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
72246da4
FB
685 spin_unlock_irqrestore(&dwc->lock, flags);
686
687 return ret;
688}
689
690static int dwc3_gadget_ep_disable(struct usb_ep *ep)
691{
692 struct dwc3_ep *dep;
693 struct dwc3 *dwc;
694 unsigned long flags;
695 int ret;
696
697 if (!ep) {
698 pr_debug("dwc3: invalid parameters\n");
699 return -EINVAL;
700 }
701
702 dep = to_dwc3_ep(ep);
703 dwc = dep->dwc;
704
705 if (!(dep->flags & DWC3_EP_ENABLED)) {
706 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
707 dep->name);
708 return 0;
709 }
710
711 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
712 dep->number >> 1,
713 (dep->number & 1) ? "in" : "out");
714
715 spin_lock_irqsave(&dwc->lock, flags);
716 ret = __dwc3_gadget_ep_disable(dep);
717 spin_unlock_irqrestore(&dwc->lock, flags);
718
719 return ret;
720}
721
722static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
723 gfp_t gfp_flags)
724{
725 struct dwc3_request *req;
726 struct dwc3_ep *dep = to_dwc3_ep(ep);
727 struct dwc3 *dwc = dep->dwc;
728
729 req = kzalloc(sizeof(*req), gfp_flags);
730 if (!req) {
731 dev_err(dwc->dev, "not enough memory\n");
732 return NULL;
733 }
734
735 req->epnum = dep->number;
736 req->dep = dep;
72246da4
FB
737
738 return &req->request;
739}
740
741static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
742 struct usb_request *request)
743{
744 struct dwc3_request *req = to_dwc3_request(request);
745
746 kfree(req);
747}
748
c71fc37c
FB
749/**
750 * dwc3_prepare_one_trb - setup one TRB from one request
751 * @dep: endpoint for which this request is prepared
752 * @req: dwc3_request pointer
753 */
68e823e2 754static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb 755 struct dwc3_request *req, dma_addr_t dma,
e5ba5ec8 756 unsigned length, unsigned last, unsigned chain, unsigned node)
c71fc37c 757{
eeb720fb 758 struct dwc3 *dwc = dep->dwc;
f6bafc6a 759 struct dwc3_trb *trb;
c71fc37c 760
eeb720fb
FB
761 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
762 dep->name, req, (unsigned long long) dma,
763 length, last ? " last" : "",
764 chain ? " chain" : "");
765
c71fc37c 766 /* Skip the LINK-TRB on ISOC */
915e202a 767 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
16e78db7 768 usb_endpoint_xfer_isoc(dep->endpoint.desc))
915e202a
PA
769 dep->free_slot++;
770
771 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
c71fc37c 772
eeb720fb
FB
773 if (!req->trb) {
774 dwc3_gadget_move_request_queued(req);
f6bafc6a
FB
775 req->trb = trb;
776 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
e5ba5ec8 777 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
eeb720fb 778 }
c71fc37c 779
e5ba5ec8
PA
780 dep->free_slot++;
781
f6bafc6a
FB
782 trb->size = DWC3_TRB_SIZE_LENGTH(length);
783 trb->bpl = lower_32_bits(dma);
784 trb->bph = upper_32_bits(dma);
c71fc37c 785
16e78db7 786 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 787 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 788 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
789 break;
790
791 case USB_ENDPOINT_XFER_ISOC:
e5ba5ec8
PA
792 if (!node)
793 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
794 else
795 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
c71fc37c
FB
796 break;
797
798 case USB_ENDPOINT_XFER_BULK:
799 case USB_ENDPOINT_XFER_INT:
f6bafc6a 800 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
801 break;
802 default:
803 /*
804 * This is only possible with faulty memory because we
805 * checked it already :)
806 */
807 BUG();
808 }
809
f3af3651
FB
810 if (!req->request.no_interrupt && !chain)
811 trb->ctrl |= DWC3_TRB_CTRL_IOC;
812
16e78db7 813 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
f6bafc6a
FB
814 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
815 trb->ctrl |= DWC3_TRB_CTRL_CSP;
e5ba5ec8
PA
816 } else if (last) {
817 trb->ctrl |= DWC3_TRB_CTRL_LST;
f6bafc6a 818 }
c71fc37c 819
e5ba5ec8
PA
820 if (chain)
821 trb->ctrl |= DWC3_TRB_CTRL_CHN;
822
16e78db7 823 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 824 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 825
f6bafc6a 826 trb->ctrl |= DWC3_TRB_CTRL_HWO;
c71fc37c
FB
827}
828
72246da4
FB
829/*
830 * dwc3_prepare_trbs - setup TRBs from requests
831 * @dep: endpoint for which requests are being prepared
832 * @starting: true if the endpoint is idle and no requests are queued.
833 *
1d046793
PZ
834 * The function goes through the requests list and sets up TRBs for the
835 * transfers. The function returns once there are no more TRBs available or
836 * it runs out of requests.
72246da4 837 */
68e823e2 838static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 839{
68e823e2 840 struct dwc3_request *req, *n;
72246da4 841 u32 trbs_left;
8d62cd65 842 u32 max;
c71fc37c 843 unsigned int last_one = 0;
72246da4
FB
844
845 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
846
847 /* the first request must not be queued */
848 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 849
8d62cd65 850 /* Can't wrap around on a non-isoc EP since there's no link TRB */
16e78db7 851 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8d62cd65
PZ
852 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
853 if (trbs_left > max)
854 trbs_left = max;
855 }
856
72246da4 857 /*
1d046793
PZ
858 * If busy & slot are equal than it is either full or empty. If we are
859 * starting to process requests then we are empty. Otherwise we are
72246da4
FB
860 * full and don't do anything
861 */
862 if (!trbs_left) {
863 if (!starting)
68e823e2 864 return;
72246da4
FB
865 trbs_left = DWC3_TRB_NUM;
866 /*
867 * In case we start from scratch, we queue the ISOC requests
868 * starting from slot 1. This is done because we use ring
869 * buffer and have no LST bit to stop us. Instead, we place
1d046793 870 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
72246da4
FB
871 * after the first request so we start at slot 1 and have
872 * 7 requests proceed before we hit the first IOC.
873 * Other transfer types don't use the ring buffer and are
874 * processed from the first TRB until the last one. Since we
875 * don't wrap around we have to start at the beginning.
876 */
16e78db7 877 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
878 dep->busy_slot = 1;
879 dep->free_slot = 1;
880 } else {
881 dep->busy_slot = 0;
882 dep->free_slot = 0;
883 }
884 }
885
886 /* The last TRB is a link TRB, not used for xfer */
16e78db7 887 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
68e823e2 888 return;
72246da4
FB
889
890 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
891 unsigned length;
892 dma_addr_t dma;
e5ba5ec8 893 last_one = false;
72246da4 894
eeb720fb
FB
895 if (req->request.num_mapped_sgs > 0) {
896 struct usb_request *request = &req->request;
897 struct scatterlist *sg = request->sg;
898 struct scatterlist *s;
899 int i;
72246da4 900
eeb720fb
FB
901 for_each_sg(sg, s, request->num_mapped_sgs, i) {
902 unsigned chain = true;
72246da4 903
eeb720fb
FB
904 length = sg_dma_len(s);
905 dma = sg_dma_address(s);
72246da4 906
1d046793
PZ
907 if (i == (request->num_mapped_sgs - 1) ||
908 sg_is_last(s)) {
e5ba5ec8
PA
909 if (list_is_last(&req->list,
910 &dep->request_list))
911 last_one = true;
eeb720fb
FB
912 chain = false;
913 }
72246da4 914
eeb720fb
FB
915 trbs_left--;
916 if (!trbs_left)
917 last_one = true;
72246da4 918
eeb720fb
FB
919 if (last_one)
920 chain = false;
72246da4 921
eeb720fb 922 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 923 last_one, chain, i);
72246da4 924
eeb720fb
FB
925 if (last_one)
926 break;
927 }
72246da4 928 } else {
eeb720fb
FB
929 dma = req->request.dma;
930 length = req->request.length;
931 trbs_left--;
72246da4 932
eeb720fb
FB
933 if (!trbs_left)
934 last_one = 1;
879631aa 935
eeb720fb
FB
936 /* Is this the last request? */
937 if (list_is_last(&req->list, &dep->request_list))
938 last_one = 1;
72246da4 939
eeb720fb 940 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 941 last_one, false, 0);
72246da4 942
eeb720fb
FB
943 if (last_one)
944 break;
72246da4 945 }
72246da4 946 }
72246da4
FB
947}
948
949static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
950 int start_new)
951{
952 struct dwc3_gadget_ep_cmd_params params;
953 struct dwc3_request *req;
954 struct dwc3 *dwc = dep->dwc;
955 int ret;
956 u32 cmd;
957
958 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
959 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
960 return -EBUSY;
961 }
962 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
963
964 /*
965 * If we are getting here after a short-out-packet we don't enqueue any
966 * new requests as we try to set the IOC bit only on the last request.
967 */
968 if (start_new) {
969 if (list_empty(&dep->req_queued))
970 dwc3_prepare_trbs(dep, start_new);
971
972 /* req points to the first request which will be sent */
973 req = next_request(&dep->req_queued);
974 } else {
68e823e2
FB
975 dwc3_prepare_trbs(dep, start_new);
976
72246da4 977 /*
1d046793 978 * req points to the first request where HWO changed from 0 to 1
72246da4 979 */
68e823e2 980 req = next_request(&dep->req_queued);
72246da4
FB
981 }
982 if (!req) {
983 dep->flags |= DWC3_EP_PENDING_REQUEST;
984 return 0;
985 }
986
987 memset(&params, 0, sizeof(params));
72246da4 988
1877d6c9
PA
989 if (start_new) {
990 params.param0 = upper_32_bits(req->trb_dma);
991 params.param1 = lower_32_bits(req->trb_dma);
72246da4 992 cmd = DWC3_DEPCMD_STARTTRANSFER;
1877d6c9 993 } else {
72246da4 994 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1877d6c9 995 }
72246da4
FB
996
997 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
998 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
999 if (ret < 0) {
1000 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
1001
1002 /*
1003 * FIXME we need to iterate over the list of requests
1004 * here and stop, unmap, free and del each of the linked
1d046793 1005 * requests instead of what we do now.
72246da4 1006 */
0fc9a1be
FB
1007 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1008 req->direction);
72246da4
FB
1009 list_del(&req->list);
1010 return ret;
1011 }
1012
1013 dep->flags |= DWC3_EP_BUSY;
25b8ff68 1014
f898ae09 1015 if (start_new) {
b4996a86 1016 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
f898ae09 1017 dep->number);
b4996a86 1018 WARN_ON_ONCE(!dep->resource_index);
f898ae09 1019 }
25b8ff68 1020
72246da4
FB
1021 return 0;
1022}
1023
d6d6ec7b
PA
1024static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1025 struct dwc3_ep *dep, u32 cur_uf)
1026{
1027 u32 uf;
1028
1029 if (list_empty(&dep->request_list)) {
1030 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1031 dep->name);
f4a53c55 1032 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1033 return;
1034 }
1035
1036 /* 4 micro frames in the future */
1037 uf = cur_uf + dep->interval * 4;
1038
1039 __dwc3_gadget_kick_transfer(dep, uf, 1);
1040}
1041
1042static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1043 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1044{
1045 u32 cur_uf, mask;
1046
1047 mask = ~(dep->interval - 1);
1048 cur_uf = event->parameters & mask;
1049
1050 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1051}
1052
72246da4
FB
1053static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1054{
0fc9a1be
FB
1055 struct dwc3 *dwc = dep->dwc;
1056 int ret;
1057
72246da4
FB
1058 req->request.actual = 0;
1059 req->request.status = -EINPROGRESS;
1060 req->direction = dep->direction;
1061 req->epnum = dep->number;
1062
1063 /*
1064 * We only add to our list of requests now and
1065 * start consuming the list once we get XferNotReady
1066 * IRQ.
1067 *
1068 * That way, we avoid doing anything that we don't need
1069 * to do now and defer it until the point we receive a
1070 * particular token from the Host side.
1071 *
1072 * This will also avoid Host cancelling URBs due to too
1d046793 1073 * many NAKs.
72246da4 1074 */
0fc9a1be
FB
1075 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1076 dep->direction);
1077 if (ret)
1078 return ret;
1079
72246da4
FB
1080 list_add_tail(&req->list, &dep->request_list);
1081
1082 /*
b511e5e7 1083 * There are a few special cases:
72246da4 1084 *
f898ae09
PZ
1085 * 1. XferNotReady with empty list of requests. We need to kick the
1086 * transfer here in that situation, otherwise we will be NAKing
1087 * forever. If we get XferNotReady before gadget driver has a
1088 * chance to queue a request, we will ACK the IRQ but won't be
1089 * able to receive the data until the next request is queued.
1090 * The following code is handling exactly that.
72246da4 1091 *
72246da4
FB
1092 */
1093 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1094 /*
1095 * If xfernotready is already elapsed and it is a case
1096 * of isoc transfer, then issue END TRANSFER, so that
1097 * you can receive xfernotready again and can have
1098 * notion of current microframe.
1099 */
1100 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
cdc359dd
PA
1101 if (list_empty(&dep->req_queued)) {
1102 dwc3_stop_active_transfer(dwc, dep->number);
1103 dep->flags = DWC3_EP_ENABLED;
1104 }
f4a53c55
PA
1105 return 0;
1106 }
1107
b511e5e7 1108 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
348e026f 1109 if (ret && ret != -EBUSY)
b511e5e7
FB
1110 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1111 dep->name);
15f86bde 1112 return ret;
b511e5e7 1113 }
72246da4 1114
b511e5e7
FB
1115 /*
1116 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1117 * kick the transfer here after queuing a request, otherwise the
1118 * core may not see the modified TRB(s).
1119 */
1120 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1121 (dep->flags & DWC3_EP_BUSY) &&
1122 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86
FB
1123 WARN_ON_ONCE(!dep->resource_index);
1124 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
b511e5e7 1125 false);
348e026f 1126 if (ret && ret != -EBUSY)
72246da4
FB
1127 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1128 dep->name);
15f86bde 1129 return ret;
a0925324 1130 }
72246da4
FB
1131
1132 return 0;
1133}
1134
1135static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1136 gfp_t gfp_flags)
1137{
1138 struct dwc3_request *req = to_dwc3_request(request);
1139 struct dwc3_ep *dep = to_dwc3_ep(ep);
1140 struct dwc3 *dwc = dep->dwc;
1141
1142 unsigned long flags;
1143
1144 int ret;
1145
16e78db7 1146 if (!dep->endpoint.desc) {
72246da4
FB
1147 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1148 request, ep->name);
1149 return -ESHUTDOWN;
1150 }
1151
1152 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1153 request, ep->name, request->length);
1154
1155 spin_lock_irqsave(&dwc->lock, flags);
1156 ret = __dwc3_gadget_ep_queue(dep, req);
1157 spin_unlock_irqrestore(&dwc->lock, flags);
1158
1159 return ret;
1160}
1161
1162static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1163 struct usb_request *request)
1164{
1165 struct dwc3_request *req = to_dwc3_request(request);
1166 struct dwc3_request *r = NULL;
1167
1168 struct dwc3_ep *dep = to_dwc3_ep(ep);
1169 struct dwc3 *dwc = dep->dwc;
1170
1171 unsigned long flags;
1172 int ret = 0;
1173
1174 spin_lock_irqsave(&dwc->lock, flags);
1175
1176 list_for_each_entry(r, &dep->request_list, list) {
1177 if (r == req)
1178 break;
1179 }
1180
1181 if (r != req) {
1182 list_for_each_entry(r, &dep->req_queued, list) {
1183 if (r == req)
1184 break;
1185 }
1186 if (r == req) {
1187 /* wait until it is processed */
1188 dwc3_stop_active_transfer(dwc, dep->number);
e8d4e8be 1189 goto out1;
72246da4
FB
1190 }
1191 dev_err(dwc->dev, "request %p was not queued to %s\n",
1192 request, ep->name);
1193 ret = -EINVAL;
1194 goto out0;
1195 }
1196
e8d4e8be 1197out1:
72246da4
FB
1198 /* giveback the request */
1199 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1200
1201out0:
1202 spin_unlock_irqrestore(&dwc->lock, flags);
1203
1204 return ret;
1205}
1206
1207int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1208{
1209 struct dwc3_gadget_ep_cmd_params params;
1210 struct dwc3 *dwc = dep->dwc;
1211 int ret;
1212
1213 memset(&params, 0x00, sizeof(params));
1214
1215 if (value) {
72246da4
FB
1216 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1217 DWC3_DEPCMD_SETSTALL, &params);
1218 if (ret)
1219 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1220 value ? "set" : "clear",
1221 dep->name);
1222 else
1223 dep->flags |= DWC3_EP_STALL;
1224 } else {
1225 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1226 DWC3_DEPCMD_CLEARSTALL, &params);
1227 if (ret)
1228 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1229 value ? "set" : "clear",
1230 dep->name);
1231 else
a535d81c 1232 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
72246da4 1233 }
5275455a 1234
72246da4
FB
1235 return ret;
1236}
1237
1238static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1239{
1240 struct dwc3_ep *dep = to_dwc3_ep(ep);
1241 struct dwc3 *dwc = dep->dwc;
1242
1243 unsigned long flags;
1244
1245 int ret;
1246
1247 spin_lock_irqsave(&dwc->lock, flags);
1248
16e78db7 1249 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1250 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1251 ret = -EINVAL;
1252 goto out;
1253 }
1254
1255 ret = __dwc3_gadget_ep_set_halt(dep, value);
1256out:
1257 spin_unlock_irqrestore(&dwc->lock, flags);
1258
1259 return ret;
1260}
1261
1262static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1263{
1264 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1265 struct dwc3 *dwc = dep->dwc;
1266 unsigned long flags;
72246da4 1267
249a4569 1268 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1269 dep->flags |= DWC3_EP_WEDGE;
249a4569 1270 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4 1271
08f0d966
PA
1272 if (dep->number == 0 || dep->number == 1)
1273 return dwc3_gadget_ep0_set_halt(ep, 1);
1274 else
1275 return dwc3_gadget_ep_set_halt(ep, 1);
72246da4
FB
1276}
1277
1278/* -------------------------------------------------------------------------- */
1279
1280static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1281 .bLength = USB_DT_ENDPOINT_SIZE,
1282 .bDescriptorType = USB_DT_ENDPOINT,
1283 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1284};
1285
1286static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1287 .enable = dwc3_gadget_ep0_enable,
1288 .disable = dwc3_gadget_ep0_disable,
1289 .alloc_request = dwc3_gadget_ep_alloc_request,
1290 .free_request = dwc3_gadget_ep_free_request,
1291 .queue = dwc3_gadget_ep0_queue,
1292 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1293 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1294 .set_wedge = dwc3_gadget_ep_set_wedge,
1295};
1296
1297static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1298 .enable = dwc3_gadget_ep_enable,
1299 .disable = dwc3_gadget_ep_disable,
1300 .alloc_request = dwc3_gadget_ep_alloc_request,
1301 .free_request = dwc3_gadget_ep_free_request,
1302 .queue = dwc3_gadget_ep_queue,
1303 .dequeue = dwc3_gadget_ep_dequeue,
1304 .set_halt = dwc3_gadget_ep_set_halt,
1305 .set_wedge = dwc3_gadget_ep_set_wedge,
1306};
1307
1308/* -------------------------------------------------------------------------- */
1309
1310static int dwc3_gadget_get_frame(struct usb_gadget *g)
1311{
1312 struct dwc3 *dwc = gadget_to_dwc(g);
1313 u32 reg;
1314
1315 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1316 return DWC3_DSTS_SOFFN(reg);
1317}
1318
1319static int dwc3_gadget_wakeup(struct usb_gadget *g)
1320{
1321 struct dwc3 *dwc = gadget_to_dwc(g);
1322
1323 unsigned long timeout;
1324 unsigned long flags;
1325
1326 u32 reg;
1327
1328 int ret = 0;
1329
1330 u8 link_state;
1331 u8 speed;
1332
1333 spin_lock_irqsave(&dwc->lock, flags);
1334
1335 /*
1336 * According to the Databook Remote wakeup request should
1337 * be issued only when the device is in early suspend state.
1338 *
1339 * We can check that via USB Link State bits in DSTS register.
1340 */
1341 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1342
1343 speed = reg & DWC3_DSTS_CONNECTSPD;
1344 if (speed == DWC3_DSTS_SUPERSPEED) {
1345 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1346 ret = -EINVAL;
1347 goto out;
1348 }
1349
1350 link_state = DWC3_DSTS_USBLNKST(reg);
1351
1352 switch (link_state) {
1353 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1354 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1355 break;
1356 default:
1357 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1358 link_state);
1359 ret = -EINVAL;
1360 goto out;
1361 }
1362
8598bde7
FB
1363 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1364 if (ret < 0) {
1365 dev_err(dwc->dev, "failed to put link in Recovery\n");
1366 goto out;
1367 }
72246da4 1368
802fde98
PZ
1369 /* Recent versions do this automatically */
1370 if (dwc->revision < DWC3_REVISION_194A) {
1371 /* write zeroes to Link Change Request */
fcc023c7 1372 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1373 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1374 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1375 }
72246da4 1376
1d046793 1377 /* poll until Link State changes to ON */
72246da4
FB
1378 timeout = jiffies + msecs_to_jiffies(100);
1379
1d046793 1380 while (!time_after(jiffies, timeout)) {
72246da4
FB
1381 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1382
1383 /* in HS, means ON */
1384 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1385 break;
1386 }
1387
1388 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1389 dev_err(dwc->dev, "failed to send remote wakeup\n");
1390 ret = -EINVAL;
1391 }
1392
1393out:
1394 spin_unlock_irqrestore(&dwc->lock, flags);
1395
1396 return ret;
1397}
1398
1399static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1400 int is_selfpowered)
1401{
1402 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1403 unsigned long flags;
72246da4 1404
249a4569 1405 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1406 dwc->is_selfpowered = !!is_selfpowered;
249a4569 1407 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1408
1409 return 0;
1410}
1411
7b2a0368 1412static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
72246da4
FB
1413{
1414 u32 reg;
61d58242 1415 u32 timeout = 500;
72246da4
FB
1416
1417 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1418 if (is_on) {
802fde98
PZ
1419 if (dwc->revision <= DWC3_REVISION_187A) {
1420 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1421 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1422 }
1423
1424 if (dwc->revision >= DWC3_REVISION_194A)
1425 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1426 reg |= DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1427
1428 if (dwc->has_hibernation)
1429 reg |= DWC3_DCTL_KEEP_CONNECT;
1430
9fcb3bd8 1431 dwc->pullups_connected = true;
8db7ed15 1432 } else {
72246da4 1433 reg &= ~DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1434
1435 if (dwc->has_hibernation && !suspend)
1436 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1437
9fcb3bd8 1438 dwc->pullups_connected = false;
8db7ed15 1439 }
72246da4
FB
1440
1441 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1442
1443 do {
1444 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1445 if (is_on) {
1446 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1447 break;
1448 } else {
1449 if (reg & DWC3_DSTS_DEVCTRLHLT)
1450 break;
1451 }
72246da4
FB
1452 timeout--;
1453 if (!timeout)
6f17f74b 1454 return -ETIMEDOUT;
61d58242 1455 udelay(1);
72246da4
FB
1456 } while (1);
1457
1458 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1459 dwc->gadget_driver
1460 ? dwc->gadget_driver->function : "no-function",
1461 is_on ? "connect" : "disconnect");
6f17f74b
PA
1462
1463 return 0;
72246da4
FB
1464}
1465
1466static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1467{
1468 struct dwc3 *dwc = gadget_to_dwc(g);
1469 unsigned long flags;
6f17f74b 1470 int ret;
72246da4
FB
1471
1472 is_on = !!is_on;
1473
1474 spin_lock_irqsave(&dwc->lock, flags);
7b2a0368 1475 ret = dwc3_gadget_run_stop(dwc, is_on, false);
72246da4
FB
1476 spin_unlock_irqrestore(&dwc->lock, flags);
1477
6f17f74b 1478 return ret;
72246da4
FB
1479}
1480
8698e2ac
FB
1481static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1482{
1483 u32 reg;
1484
1485 /* Enable all but Start and End of Frame IRQs */
1486 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1487 DWC3_DEVTEN_EVNTOVERFLOWEN |
1488 DWC3_DEVTEN_CMDCMPLTEN |
1489 DWC3_DEVTEN_ERRTICERREN |
1490 DWC3_DEVTEN_WKUPEVTEN |
1491 DWC3_DEVTEN_ULSTCNGEN |
1492 DWC3_DEVTEN_CONNECTDONEEN |
1493 DWC3_DEVTEN_USBRSTEN |
1494 DWC3_DEVTEN_DISCONNEVTEN);
1495
1496 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1497}
1498
1499static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1500{
1501 /* mask all interrupts */
1502 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1503}
1504
1505static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
b15a762f 1506static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
8698e2ac 1507
72246da4
FB
1508static int dwc3_gadget_start(struct usb_gadget *g,
1509 struct usb_gadget_driver *driver)
1510{
1511 struct dwc3 *dwc = gadget_to_dwc(g);
1512 struct dwc3_ep *dep;
1513 unsigned long flags;
1514 int ret = 0;
8698e2ac 1515 int irq;
72246da4
FB
1516 u32 reg;
1517
b0d7ffd4
FB
1518 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1519 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
e8adfc30 1520 IRQF_SHARED, "dwc3", dwc);
b0d7ffd4
FB
1521 if (ret) {
1522 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1523 irq, ret);
1524 goto err0;
1525 }
1526
72246da4
FB
1527 spin_lock_irqsave(&dwc->lock, flags);
1528
1529 if (dwc->gadget_driver) {
1530 dev_err(dwc->dev, "%s is already bound to %s\n",
1531 dwc->gadget.name,
1532 dwc->gadget_driver->driver.name);
1533 ret = -EBUSY;
b0d7ffd4 1534 goto err1;
72246da4
FB
1535 }
1536
1537 dwc->gadget_driver = driver;
72246da4 1538
72246da4
FB
1539 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1540 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1541
1542 /**
1543 * WORKAROUND: DWC3 revision < 2.20a have an issue
1544 * which would cause metastability state on Run/Stop
1545 * bit if we try to force the IP to USB2-only mode.
1546 *
1547 * Because of that, we cannot configure the IP to any
1548 * speed other than the SuperSpeed
1549 *
1550 * Refers to:
1551 *
1552 * STAR#9000525659: Clock Domain Crossing on DCTL in
1553 * USB 2.0 Mode
1554 */
f7e846f0 1555 if (dwc->revision < DWC3_REVISION_220A) {
07e7f47b 1556 reg |= DWC3_DCFG_SUPERSPEED;
f7e846f0
FB
1557 } else {
1558 switch (dwc->maximum_speed) {
1559 case USB_SPEED_LOW:
1560 reg |= DWC3_DSTS_LOWSPEED;
1561 break;
1562 case USB_SPEED_FULL:
1563 reg |= DWC3_DSTS_FULLSPEED1;
1564 break;
1565 case USB_SPEED_HIGH:
1566 reg |= DWC3_DSTS_HIGHSPEED;
1567 break;
1568 case USB_SPEED_SUPER: /* FALLTHROUGH */
1569 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1570 default:
1571 reg |= DWC3_DSTS_SUPERSPEED;
1572 }
1573 }
72246da4
FB
1574 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1575
b23c8439
PZ
1576 dwc->start_config_issued = false;
1577
72246da4
FB
1578 /* Start with SuperSpeed Default */
1579 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1580
1581 dep = dwc->eps[0];
265b70a7
PZ
1582 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1583 false);
72246da4
FB
1584 if (ret) {
1585 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1586 goto err2;
72246da4
FB
1587 }
1588
1589 dep = dwc->eps[1];
265b70a7
PZ
1590 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1591 false);
72246da4
FB
1592 if (ret) {
1593 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1594 goto err3;
72246da4
FB
1595 }
1596
1597 /* begin to receive SETUP packets */
c7fcdeb2 1598 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1599 dwc3_ep0_out_start(dwc);
1600
8698e2ac
FB
1601 dwc3_gadget_enable_irq(dwc);
1602
72246da4
FB
1603 spin_unlock_irqrestore(&dwc->lock, flags);
1604
1605 return 0;
1606
b0d7ffd4 1607err3:
72246da4
FB
1608 __dwc3_gadget_ep_disable(dwc->eps[0]);
1609
b0d7ffd4 1610err2:
cdcedd69 1611 dwc->gadget_driver = NULL;
b0d7ffd4
FB
1612
1613err1:
72246da4
FB
1614 spin_unlock_irqrestore(&dwc->lock, flags);
1615
b0d7ffd4
FB
1616 free_irq(irq, dwc);
1617
1618err0:
72246da4
FB
1619 return ret;
1620}
1621
1622static int dwc3_gadget_stop(struct usb_gadget *g,
1623 struct usb_gadget_driver *driver)
1624{
1625 struct dwc3 *dwc = gadget_to_dwc(g);
1626 unsigned long flags;
8698e2ac 1627 int irq;
72246da4
FB
1628
1629 spin_lock_irqsave(&dwc->lock, flags);
1630
8698e2ac 1631 dwc3_gadget_disable_irq(dwc);
72246da4
FB
1632 __dwc3_gadget_ep_disable(dwc->eps[0]);
1633 __dwc3_gadget_ep_disable(dwc->eps[1]);
1634
1635 dwc->gadget_driver = NULL;
72246da4
FB
1636
1637 spin_unlock_irqrestore(&dwc->lock, flags);
1638
b0d7ffd4
FB
1639 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1640 free_irq(irq, dwc);
1641
72246da4
FB
1642 return 0;
1643}
802fde98 1644
72246da4
FB
1645static const struct usb_gadget_ops dwc3_gadget_ops = {
1646 .get_frame = dwc3_gadget_get_frame,
1647 .wakeup = dwc3_gadget_wakeup,
1648 .set_selfpowered = dwc3_gadget_set_selfpowered,
1649 .pullup = dwc3_gadget_pullup,
1650 .udc_start = dwc3_gadget_start,
1651 .udc_stop = dwc3_gadget_stop,
1652};
1653
1654/* -------------------------------------------------------------------------- */
1655
6a1e3ef4
FB
1656static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1657 u8 num, u32 direction)
72246da4
FB
1658{
1659 struct dwc3_ep *dep;
6a1e3ef4 1660 u8 i;
72246da4 1661
6a1e3ef4
FB
1662 for (i = 0; i < num; i++) {
1663 u8 epnum = (i << 1) | (!!direction);
72246da4 1664
72246da4
FB
1665 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1666 if (!dep) {
1667 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1668 epnum);
1669 return -ENOMEM;
1670 }
1671
1672 dep->dwc = dwc;
1673 dep->number = epnum;
9aa62ae4 1674 dep->direction = !!direction;
72246da4
FB
1675 dwc->eps[epnum] = dep;
1676
1677 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1678 (epnum & 1) ? "in" : "out");
6a1e3ef4 1679
72246da4 1680 dep->endpoint.name = dep->name;
72246da4 1681
653df35e
FB
1682 dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1683
72246da4 1684 if (epnum == 0 || epnum == 1) {
e117e742 1685 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
6048e4c6 1686 dep->endpoint.maxburst = 1;
72246da4
FB
1687 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1688 if (!epnum)
1689 dwc->gadget.ep0 = &dep->endpoint;
1690 } else {
1691 int ret;
1692
e117e742 1693 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
12d36c16 1694 dep->endpoint.max_streams = 15;
72246da4
FB
1695 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1696 list_add_tail(&dep->endpoint.ep_list,
1697 &dwc->gadget.ep_list);
1698
1699 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1700 if (ret)
72246da4 1701 return ret;
72246da4 1702 }
25b8ff68 1703
72246da4
FB
1704 INIT_LIST_HEAD(&dep->request_list);
1705 INIT_LIST_HEAD(&dep->req_queued);
1706 }
1707
1708 return 0;
1709}
1710
6a1e3ef4
FB
1711static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1712{
1713 int ret;
1714
1715 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1716
1717 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1718 if (ret < 0) {
1719 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1720 return ret;
1721 }
1722
1723 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1724 if (ret < 0) {
1725 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1726 return ret;
1727 }
1728
1729 return 0;
1730}
1731
72246da4
FB
1732static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1733{
1734 struct dwc3_ep *dep;
1735 u8 epnum;
1736
1737 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1738 dep = dwc->eps[epnum];
6a1e3ef4
FB
1739 if (!dep)
1740 continue;
5bf8fae3
GC
1741 /*
1742 * Physical endpoints 0 and 1 are special; they form the
1743 * bi-directional USB endpoint 0.
1744 *
1745 * For those two physical endpoints, we don't allocate a TRB
1746 * pool nor do we add them the endpoints list. Due to that, we
1747 * shouldn't do these two operations otherwise we would end up
1748 * with all sorts of bugs when removing dwc3.ko.
1749 */
1750 if (epnum != 0 && epnum != 1) {
1751 dwc3_free_trb_pool(dep);
72246da4 1752 list_del(&dep->endpoint.ep_list);
5bf8fae3 1753 }
72246da4
FB
1754
1755 kfree(dep);
1756 }
1757}
1758
72246da4 1759/* -------------------------------------------------------------------------- */
e5caff68 1760
e5ba5ec8
PA
1761static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1762 struct dwc3_request *req, struct dwc3_trb *trb,
72246da4
FB
1763 const struct dwc3_event_depevt *event, int status)
1764{
72246da4
FB
1765 unsigned int count;
1766 unsigned int s_pkt = 0;
d6d6ec7b 1767 unsigned int trb_status;
72246da4 1768
e5ba5ec8
PA
1769 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1770 /*
1771 * We continue despite the error. There is not much we
1772 * can do. If we don't clean it up we loop forever. If
1773 * we skip the TRB then it gets overwritten after a
1774 * while since we use them in a ring buffer. A BUG()
1775 * would help. Lets hope that if this occurs, someone
1776 * fixes the root cause instead of looking away :)
1777 */
1778 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1779 dep->name, trb);
1780 count = trb->size & DWC3_TRB_SIZE_MASK;
1781
1782 if (dep->direction) {
1783 if (count) {
1784 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1785 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1786 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1787 dep->name);
1788 /*
1789 * If missed isoc occurred and there is
1790 * no request queued then issue END
1791 * TRANSFER, so that core generates
1792 * next xfernotready and we will issue
1793 * a fresh START TRANSFER.
1794 * If there are still queued request
1795 * then wait, do not issue either END
1796 * or UPDATE TRANSFER, just attach next
1797 * request in request_list during
1798 * giveback.If any future queued request
1799 * is successfully transferred then we
1800 * will issue UPDATE TRANSFER for all
1801 * request in the request_list.
1802 */
1803 dep->flags |= DWC3_EP_MISSED_ISOC;
1804 } else {
1805 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1806 dep->name);
1807 status = -ECONNRESET;
1808 }
1809 } else {
1810 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1811 }
1812 } else {
1813 if (count && (event->status & DEPEVT_STATUS_SHORT))
1814 s_pkt = 1;
1815 }
1816
1817 /*
1818 * We assume here we will always receive the entire data block
1819 * which we should receive. Meaning, if we program RX to
1820 * receive 4K but we receive only 2K, we assume that's all we
1821 * should receive and we simply bounce the request back to the
1822 * gadget driver for further processing.
1823 */
1824 req->request.actual += req->request.length - count;
1825 if (s_pkt)
1826 return 1;
1827 if ((event->status & DEPEVT_STATUS_LST) &&
1828 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1829 DWC3_TRB_CTRL_HWO)))
1830 return 1;
1831 if ((event->status & DEPEVT_STATUS_IOC) &&
1832 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1833 return 1;
1834 return 0;
1835}
1836
1837static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1838 const struct dwc3_event_depevt *event, int status)
1839{
1840 struct dwc3_request *req;
1841 struct dwc3_trb *trb;
1842 unsigned int slot;
1843 unsigned int i;
1844 int ret;
1845
72246da4
FB
1846 do {
1847 req = next_request(&dep->req_queued);
d39ee7be
SAS
1848 if (!req) {
1849 WARN_ON_ONCE(1);
1850 return 1;
1851 }
e5ba5ec8
PA
1852 i = 0;
1853 do {
1854 slot = req->start_slot + i;
1855 if ((slot == DWC3_TRB_NUM - 1) &&
1856 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1857 slot++;
1858 slot %= DWC3_TRB_NUM;
1859 trb = &dep->trb_pool[slot];
72246da4 1860
e5ba5ec8
PA
1861 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1862 event, status);
1863 if (ret)
1864 break;
1865 }while (++i < req->request.num_mapped_sgs);
72246da4 1866
72246da4 1867 dwc3_gadget_giveback(dep, req, status);
e5ba5ec8
PA
1868
1869 if (ret)
72246da4
FB
1870 break;
1871 } while (1);
1872
cdc359dd
PA
1873 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1874 list_empty(&dep->req_queued)) {
1875 if (list_empty(&dep->request_list)) {
1876 /*
1877 * If there is no entry in request list then do
1878 * not issue END TRANSFER now. Just set PENDING
1879 * flag, so that END TRANSFER is issued when an
1880 * entry is added into request list.
1881 */
1882 dep->flags = DWC3_EP_PENDING_REQUEST;
1883 } else {
1884 dwc3_stop_active_transfer(dwc, dep->number);
1885 dep->flags = DWC3_EP_ENABLED;
1886 }
7efea86c
PA
1887 return 1;
1888 }
1889
72246da4
FB
1890 return 1;
1891}
1892
1893static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1894 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1895 int start_new)
1896{
1897 unsigned status = 0;
1898 int clean_busy;
1899
1900 if (event->status & DEPEVT_STATUS_BUSERR)
1901 status = -ECONNRESET;
1902
1d046793 1903 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
c2df85ca 1904 if (clean_busy)
72246da4 1905 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
1906
1907 /*
1908 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1909 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1910 */
1911 if (dwc->revision < DWC3_REVISION_183A) {
1912 u32 reg;
1913 int i;
1914
1915 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 1916 dep = dwc->eps[i];
fae2b904
FB
1917
1918 if (!(dep->flags & DWC3_EP_ENABLED))
1919 continue;
1920
1921 if (!list_empty(&dep->req_queued))
1922 return;
1923 }
1924
1925 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1926 reg |= dwc->u1u2;
1927 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1928
1929 dwc->u1u2 = 0;
1930 }
72246da4
FB
1931}
1932
72246da4
FB
1933static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1934 const struct dwc3_event_depevt *event)
1935{
1936 struct dwc3_ep *dep;
1937 u8 epnum = event->endpoint_number;
1938
1939 dep = dwc->eps[epnum];
1940
3336abb5
FB
1941 if (!(dep->flags & DWC3_EP_ENABLED))
1942 return;
1943
72246da4
FB
1944 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1945 dwc3_ep_event_string(event->endpoint_event));
1946
1947 if (epnum == 0 || epnum == 1) {
1948 dwc3_ep0_interrupt(dwc, event);
1949 return;
1950 }
1951
1952 switch (event->endpoint_event) {
1953 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 1954 dep->resource_index = 0;
c2df85ca 1955
16e78db7 1956 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1957 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1958 dep->name);
1959 return;
1960 }
1961
1962 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1963 break;
1964 case DWC3_DEPEVT_XFERINPROGRESS:
16e78db7 1965 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1966 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1967 dep->name);
1968 return;
1969 }
1970
1971 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1972 break;
1973 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 1974 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1975 dwc3_gadget_start_isoc(dwc, dep, event);
1976 } else {
1977 int ret;
1978
1979 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1980 dep->name, event->status &
1981 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1982 ? "Transfer Active"
1983 : "Transfer Not Active");
1984
1985 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1986 if (!ret || ret == -EBUSY)
1987 return;
1988
1989 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1990 dep->name);
1991 }
1992
879631aa
FB
1993 break;
1994 case DWC3_DEPEVT_STREAMEVT:
16e78db7 1995 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
1996 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1997 dep->name);
1998 return;
1999 }
2000
2001 switch (event->status) {
2002 case DEPEVT_STREAMEVT_FOUND:
2003 dev_vdbg(dwc->dev, "Stream %d found and started\n",
2004 event->parameters);
2005
2006 break;
2007 case DEPEVT_STREAMEVT_NOTFOUND:
2008 /* FALLTHROUGH */
2009 default:
2010 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2011 }
72246da4
FB
2012 break;
2013 case DWC3_DEPEVT_RXTXFIFOEVT:
2014 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2015 break;
72246da4 2016 case DWC3_DEPEVT_EPCMDCMPLT:
ea53b882 2017 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
72246da4
FB
2018 break;
2019 }
2020}
2021
2022static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2023{
2024 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2025 spin_unlock(&dwc->lock);
2026 dwc->gadget_driver->disconnect(&dwc->gadget);
2027 spin_lock(&dwc->lock);
2028 }
2029}
2030
2031static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
2032{
2033 struct dwc3_ep *dep;
2034 struct dwc3_gadget_ep_cmd_params params;
2035 u32 cmd;
2036 int ret;
2037
2038 dep = dwc->eps[epnum];
2039
b4996a86 2040 if (!dep->resource_index)
3daf74d7
PA
2041 return;
2042
57911504
PA
2043 /*
2044 * NOTICE: We are violating what the Databook says about the
2045 * EndTransfer command. Ideally we would _always_ wait for the
2046 * EndTransfer Command Completion IRQ, but that's causing too
2047 * much trouble synchronizing between us and gadget driver.
2048 *
2049 * We have discussed this with the IP Provider and it was
2050 * suggested to giveback all requests here, but give HW some
2051 * extra time to synchronize with the interconnect. We're using
2052 * an arbitraty 100us delay for that.
2053 *
2054 * Note also that a similar handling was tested by Synopsys
2055 * (thanks a lot Paul) and nothing bad has come out of it.
2056 * In short, what we're doing is:
2057 *
2058 * - Issue EndTransfer WITH CMDIOC bit set
2059 * - Wait 100us
2060 */
2061
3daf74d7
PA
2062 cmd = DWC3_DEPCMD_ENDTRANSFER;
2063 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
b4996a86 2064 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7
PA
2065 memset(&params, 0, sizeof(params));
2066 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2067 WARN_ON_ONCE(ret);
b4996a86 2068 dep->resource_index = 0;
041d81f4 2069 dep->flags &= ~DWC3_EP_BUSY;
57911504 2070 udelay(100);
72246da4
FB
2071}
2072
2073static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2074{
2075 u32 epnum;
2076
2077 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2078 struct dwc3_ep *dep;
2079
2080 dep = dwc->eps[epnum];
6a1e3ef4
FB
2081 if (!dep)
2082 continue;
2083
72246da4
FB
2084 if (!(dep->flags & DWC3_EP_ENABLED))
2085 continue;
2086
624407f9 2087 dwc3_remove_requests(dwc, dep);
72246da4
FB
2088 }
2089}
2090
2091static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2092{
2093 u32 epnum;
2094
2095 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2096 struct dwc3_ep *dep;
2097 struct dwc3_gadget_ep_cmd_params params;
2098 int ret;
2099
2100 dep = dwc->eps[epnum];
6a1e3ef4
FB
2101 if (!dep)
2102 continue;
72246da4
FB
2103
2104 if (!(dep->flags & DWC3_EP_STALL))
2105 continue;
2106
2107 dep->flags &= ~DWC3_EP_STALL;
2108
2109 memset(&params, 0, sizeof(params));
2110 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2111 DWC3_DEPCMD_CLEARSTALL, &params);
2112 WARN_ON_ONCE(ret);
2113 }
2114}
2115
2116static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2117{
c4430a26
FB
2118 int reg;
2119
72246da4 2120 dev_vdbg(dwc->dev, "%s\n", __func__);
72246da4
FB
2121
2122 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2123 reg &= ~DWC3_DCTL_INITU1ENA;
2124 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2125
2126 reg &= ~DWC3_DCTL_INITU2ENA;
2127 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 2128
72246da4 2129 dwc3_disconnect_gadget(dwc);
b23c8439 2130 dwc->start_config_issued = false;
72246da4
FB
2131
2132 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 2133 dwc->setup_packet_pending = false;
72246da4
FB
2134}
2135
72246da4
FB
2136static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2137{
2138 u32 reg;
2139
2140 dev_vdbg(dwc->dev, "%s\n", __func__);
2141
df62df56
FB
2142 /*
2143 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2144 * would cause a missing Disconnect Event if there's a
2145 * pending Setup Packet in the FIFO.
2146 *
2147 * There's no suggested workaround on the official Bug
2148 * report, which states that "unless the driver/application
2149 * is doing any special handling of a disconnect event,
2150 * there is no functional issue".
2151 *
2152 * Unfortunately, it turns out that we _do_ some special
2153 * handling of a disconnect event, namely complete all
2154 * pending transfers, notify gadget driver of the
2155 * disconnection, and so on.
2156 *
2157 * Our suggested workaround is to follow the Disconnect
2158 * Event steps here, instead, based on a setup_packet_pending
2159 * flag. Such flag gets set whenever we have a XferNotReady
2160 * event on EP0 and gets cleared on XferComplete for the
2161 * same endpoint.
2162 *
2163 * Refers to:
2164 *
2165 * STAR#9000466709: RTL: Device : Disconnect event not
2166 * generated if setup packet pending in FIFO
2167 */
2168 if (dwc->revision < DWC3_REVISION_188A) {
2169 if (dwc->setup_packet_pending)
2170 dwc3_gadget_disconnect_interrupt(dwc);
2171 }
2172
961906ed 2173 /* after reset -> Default State */
14cd592f 2174 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
961906ed 2175
72246da4
FB
2176 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
2177 dwc3_disconnect_gadget(dwc);
2178
2179 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2180 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2181 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2182 dwc->test_mode = false;
72246da4
FB
2183
2184 dwc3_stop_active_transfers(dwc);
2185 dwc3_clear_stall_all_ep(dwc);
b23c8439 2186 dwc->start_config_issued = false;
72246da4
FB
2187
2188 /* Reset device address to zero */
2189 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2190 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2191 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2192}
2193
2194static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2195{
2196 u32 reg;
2197 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2198
2199 /*
2200 * We change the clock only at SS but I dunno why I would want to do
2201 * this. Maybe it becomes part of the power saving plan.
2202 */
2203
2204 if (speed != DWC3_DSTS_SUPERSPEED)
2205 return;
2206
2207 /*
2208 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2209 * each time on Connect Done.
2210 */
2211 if (!usb30_clock)
2212 return;
2213
2214 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2215 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2216 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2217}
2218
72246da4
FB
2219static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2220{
72246da4
FB
2221 struct dwc3_ep *dep;
2222 int ret;
2223 u32 reg;
2224 u8 speed;
2225
2226 dev_vdbg(dwc->dev, "%s\n", __func__);
2227
72246da4
FB
2228 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2229 speed = reg & DWC3_DSTS_CONNECTSPD;
2230 dwc->speed = speed;
2231
2232 dwc3_update_ram_clk_sel(dwc, speed);
2233
2234 switch (speed) {
2235 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2236 /*
2237 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2238 * would cause a missing USB3 Reset event.
2239 *
2240 * In such situations, we should force a USB3 Reset
2241 * event by calling our dwc3_gadget_reset_interrupt()
2242 * routine.
2243 *
2244 * Refers to:
2245 *
2246 * STAR#9000483510: RTL: SS : USB3 reset event may
2247 * not be generated always when the link enters poll
2248 */
2249 if (dwc->revision < DWC3_REVISION_190A)
2250 dwc3_gadget_reset_interrupt(dwc);
2251
72246da4
FB
2252 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2253 dwc->gadget.ep0->maxpacket = 512;
2254 dwc->gadget.speed = USB_SPEED_SUPER;
2255 break;
2256 case DWC3_DCFG_HIGHSPEED:
2257 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2258 dwc->gadget.ep0->maxpacket = 64;
2259 dwc->gadget.speed = USB_SPEED_HIGH;
2260 break;
2261 case DWC3_DCFG_FULLSPEED2:
2262 case DWC3_DCFG_FULLSPEED1:
2263 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2264 dwc->gadget.ep0->maxpacket = 64;
2265 dwc->gadget.speed = USB_SPEED_FULL;
2266 break;
2267 case DWC3_DCFG_LOWSPEED:
2268 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2269 dwc->gadget.ep0->maxpacket = 8;
2270 dwc->gadget.speed = USB_SPEED_LOW;
2271 break;
2272 }
2273
2b758350
PA
2274 /* Enable USB2 LPM Capability */
2275
2276 if ((dwc->revision > DWC3_REVISION_194A)
2277 && (speed != DWC3_DCFG_SUPERSPEED)) {
2278 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2279 reg |= DWC3_DCFG_LPM_CAP;
2280 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2281
2282 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2283 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2284
1a947746
FB
2285 /*
2286 * TODO: This should be configurable. For now using
2287 * maximum allowed HIRD threshold value of 0b1100
2288 */
2289 reg |= DWC3_DCTL_HIRD_THRES(12);
2b758350 2290
356363bf
FB
2291 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2292 } else {
2293 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2294 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2b758350
PA
2295 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2296 }
2297
72246da4 2298 dep = dwc->eps[0];
265b70a7
PZ
2299 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2300 false);
72246da4
FB
2301 if (ret) {
2302 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2303 return;
2304 }
2305
2306 dep = dwc->eps[1];
265b70a7
PZ
2307 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2308 false);
72246da4
FB
2309 if (ret) {
2310 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2311 return;
2312 }
2313
2314 /*
2315 * Configure PHY via GUSB3PIPECTLn if required.
2316 *
2317 * Update GTXFIFOSIZn
2318 *
2319 * In both cases reset values should be sufficient.
2320 */
2321}
2322
2323static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2324{
2325 dev_vdbg(dwc->dev, "%s\n", __func__);
2326
2327 /*
2328 * TODO take core out of low power mode when that's
2329 * implemented.
2330 */
2331
2332 dwc->gadget_driver->resume(&dwc->gadget);
2333}
2334
2335static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2336 unsigned int evtinfo)
2337{
fae2b904 2338 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
0b0cc1cd
FB
2339 unsigned int pwropt;
2340
2341 /*
2342 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2343 * Hibernation mode enabled which would show up when device detects
2344 * host-initiated U3 exit.
2345 *
2346 * In that case, device will generate a Link State Change Interrupt
2347 * from U3 to RESUME which is only necessary if Hibernation is
2348 * configured in.
2349 *
2350 * There are no functional changes due to such spurious event and we
2351 * just need to ignore it.
2352 *
2353 * Refers to:
2354 *
2355 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2356 * operational mode
2357 */
2358 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2359 if ((dwc->revision < DWC3_REVISION_250A) &&
2360 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2361 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2362 (next == DWC3_LINK_STATE_RESUME)) {
2363 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2364 return;
2365 }
2366 }
fae2b904
FB
2367
2368 /*
2369 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2370 * on the link partner, the USB session might do multiple entry/exit
2371 * of low power states before a transfer takes place.
2372 *
2373 * Due to this problem, we might experience lower throughput. The
2374 * suggested workaround is to disable DCTL[12:9] bits if we're
2375 * transitioning from U1/U2 to U0 and enable those bits again
2376 * after a transfer completes and there are no pending transfers
2377 * on any of the enabled endpoints.
2378 *
2379 * This is the first half of that workaround.
2380 *
2381 * Refers to:
2382 *
2383 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2384 * core send LGO_Ux entering U0
2385 */
2386 if (dwc->revision < DWC3_REVISION_183A) {
2387 if (next == DWC3_LINK_STATE_U0) {
2388 u32 u1u2;
2389 u32 reg;
2390
2391 switch (dwc->link_state) {
2392 case DWC3_LINK_STATE_U1:
2393 case DWC3_LINK_STATE_U2:
2394 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2395 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2396 | DWC3_DCTL_ACCEPTU2ENA
2397 | DWC3_DCTL_INITU1ENA
2398 | DWC3_DCTL_ACCEPTU1ENA);
2399
2400 if (!dwc->u1u2)
2401 dwc->u1u2 = reg & u1u2;
2402
2403 reg &= ~u1u2;
2404
2405 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2406 break;
2407 default:
2408 /* do nothing */
2409 break;
2410 }
2411 }
2412 }
2413
2414 dwc->link_state = next;
019ac832
FB
2415
2416 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
72246da4
FB
2417}
2418
e1dadd3b
FB
2419static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2420 unsigned int evtinfo)
2421{
2422 unsigned int is_ss = evtinfo & BIT(4);
2423
2424 /**
2425 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2426 * have a known issue which can cause USB CV TD.9.23 to fail
2427 * randomly.
2428 *
2429 * Because of this issue, core could generate bogus hibernation
2430 * events which SW needs to ignore.
2431 *
2432 * Refers to:
2433 *
2434 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2435 * Device Fallback from SuperSpeed
2436 */
2437 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2438 return;
2439
2440 /* enter hibernation here */
2441}
2442
72246da4
FB
2443static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2444 const struct dwc3_event_devt *event)
2445{
2446 switch (event->type) {
2447 case DWC3_DEVICE_EVENT_DISCONNECT:
2448 dwc3_gadget_disconnect_interrupt(dwc);
2449 break;
2450 case DWC3_DEVICE_EVENT_RESET:
2451 dwc3_gadget_reset_interrupt(dwc);
2452 break;
2453 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2454 dwc3_gadget_conndone_interrupt(dwc);
2455 break;
2456 case DWC3_DEVICE_EVENT_WAKEUP:
2457 dwc3_gadget_wakeup_interrupt(dwc);
2458 break;
e1dadd3b
FB
2459 case DWC3_DEVICE_EVENT_HIBER_REQ:
2460 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2461 "unexpected hibernation event\n"))
2462 break;
2463
2464 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2465 break;
72246da4
FB
2466 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2467 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2468 break;
2469 case DWC3_DEVICE_EVENT_EOPF:
2470 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2471 break;
2472 case DWC3_DEVICE_EVENT_SOF:
2473 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2474 break;
2475 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2476 dev_vdbg(dwc->dev, "Erratic Error\n");
2477 break;
2478 case DWC3_DEVICE_EVENT_CMD_CMPL:
2479 dev_vdbg(dwc->dev, "Command Complete\n");
2480 break;
2481 case DWC3_DEVICE_EVENT_OVERFLOW:
2482 dev_vdbg(dwc->dev, "Overflow\n");
2483 break;
2484 default:
2485 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2486 }
2487}
2488
2489static void dwc3_process_event_entry(struct dwc3 *dwc,
2490 const union dwc3_event *event)
2491{
2492 /* Endpoint IRQ, handle it and return early */
2493 if (event->type.is_devspec == 0) {
2494 /* depevt */
2495 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2496 }
2497
2498 switch (event->type.type) {
2499 case DWC3_EVENT_TYPE_DEV:
2500 dwc3_gadget_interrupt(dwc, &event->devt);
2501 break;
2502 /* REVISIT what to do with Carkit and I2C events ? */
2503 default:
2504 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2505 }
2506}
2507
f42f2447 2508static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
b15a762f 2509{
f42f2447 2510 struct dwc3_event_buffer *evt;
b15a762f 2511 irqreturn_t ret = IRQ_NONE;
f42f2447 2512 int left;
e8adfc30 2513 u32 reg;
b15a762f 2514
f42f2447
FB
2515 evt = dwc->ev_buffs[buf];
2516 left = evt->count;
b15a762f 2517
f42f2447
FB
2518 if (!(evt->flags & DWC3_EVENT_PENDING))
2519 return IRQ_NONE;
b15a762f 2520
f42f2447
FB
2521 while (left > 0) {
2522 union dwc3_event event;
b15a762f 2523
f42f2447 2524 event.raw = *(u32 *) (evt->buf + evt->lpos);
b15a762f 2525
f42f2447 2526 dwc3_process_event_entry(dwc, &event);
b15a762f 2527
f42f2447
FB
2528 /*
2529 * FIXME we wrap around correctly to the next entry as
2530 * almost all entries are 4 bytes in size. There is one
2531 * entry which has 12 bytes which is a regular entry
2532 * followed by 8 bytes data. ATM I don't know how
2533 * things are organized if we get next to the a
2534 * boundary so I worry about that once we try to handle
2535 * that.
2536 */
2537 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2538 left -= 4;
b15a762f 2539
f42f2447
FB
2540 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2541 }
b15a762f 2542
f42f2447
FB
2543 evt->count = 0;
2544 evt->flags &= ~DWC3_EVENT_PENDING;
2545 ret = IRQ_HANDLED;
b15a762f 2546
f42f2447
FB
2547 /* Unmask interrupt */
2548 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2549 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2550 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
b15a762f 2551
f42f2447
FB
2552 return ret;
2553}
e8adfc30 2554
f42f2447
FB
2555static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2556{
2557 struct dwc3 *dwc = _dwc;
2558 unsigned long flags;
2559 irqreturn_t ret = IRQ_NONE;
2560 int i;
2561
2562 spin_lock_irqsave(&dwc->lock, flags);
2563
2564 for (i = 0; i < dwc->num_event_buffers; i++)
2565 ret |= dwc3_process_event_buf(dwc, i);
b15a762f
FB
2566
2567 spin_unlock_irqrestore(&dwc->lock, flags);
2568
2569 return ret;
2570}
2571
7f97aa98 2572static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
72246da4
FB
2573{
2574 struct dwc3_event_buffer *evt;
72246da4 2575 u32 count;
e8adfc30 2576 u32 reg;
72246da4 2577
b15a762f
FB
2578 evt = dwc->ev_buffs[buf];
2579
72246da4
FB
2580 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2581 count &= DWC3_GEVNTCOUNT_MASK;
2582 if (!count)
2583 return IRQ_NONE;
2584
b15a762f
FB
2585 evt->count = count;
2586 evt->flags |= DWC3_EVENT_PENDING;
72246da4 2587
e8adfc30
FB
2588 /* Mask interrupt */
2589 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2590 reg |= DWC3_GEVNTSIZ_INTMASK;
2591 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2592
b15a762f 2593 return IRQ_WAKE_THREAD;
72246da4
FB
2594}
2595
2596static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2597{
2598 struct dwc3 *dwc = _dwc;
2599 int i;
2600 irqreturn_t ret = IRQ_NONE;
2601
2602 spin_lock(&dwc->lock);
2603
9f622b2a 2604 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2605 irqreturn_t status;
2606
7f97aa98 2607 status = dwc3_check_event_buf(dwc, i);
b15a762f 2608 if (status == IRQ_WAKE_THREAD)
72246da4
FB
2609 ret = status;
2610 }
2611
2612 spin_unlock(&dwc->lock);
2613
2614 return ret;
2615}
2616
2617/**
2618 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2619 * @dwc: pointer to our controller context structure
72246da4
FB
2620 *
2621 * Returns 0 on success otherwise negative errno.
2622 */
41ac7b3a 2623int dwc3_gadget_init(struct dwc3 *dwc)
72246da4 2624{
72246da4 2625 int ret;
72246da4
FB
2626
2627 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2628 &dwc->ctrl_req_addr, GFP_KERNEL);
2629 if (!dwc->ctrl_req) {
2630 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2631 ret = -ENOMEM;
2632 goto err0;
2633 }
2634
2635 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2636 &dwc->ep0_trb_addr, GFP_KERNEL);
2637 if (!dwc->ep0_trb) {
2638 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2639 ret = -ENOMEM;
2640 goto err1;
2641 }
2642
3ef35faf 2643 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4
FB
2644 if (!dwc->setup_buf) {
2645 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2646 ret = -ENOMEM;
2647 goto err2;
2648 }
2649
5812b1c2 2650 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2651 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2652 GFP_KERNEL);
5812b1c2
FB
2653 if (!dwc->ep0_bounce) {
2654 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2655 ret = -ENOMEM;
2656 goto err3;
2657 }
2658
72246da4 2659 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2660 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4 2661 dwc->gadget.speed = USB_SPEED_UNKNOWN;
eeb720fb 2662 dwc->gadget.sg_supported = true;
72246da4
FB
2663 dwc->gadget.name = "dwc3-gadget";
2664
a4b9d94b
DC
2665 /*
2666 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2667 * on ep out.
2668 */
2669 dwc->gadget.quirk_ep_out_aligned_size = true;
2670
72246da4
FB
2671 /*
2672 * REVISIT: Here we should clear all pending IRQs to be
2673 * sure we're starting from a well known location.
2674 */
2675
2676 ret = dwc3_gadget_init_endpoints(dwc);
2677 if (ret)
5812b1c2 2678 goto err4;
72246da4 2679
72246da4
FB
2680 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2681 if (ret) {
2682 dev_err(dwc->dev, "failed to register udc\n");
e1f80467 2683 goto err4;
72246da4
FB
2684 }
2685
2686 return 0;
2687
5812b1c2 2688err4:
e1f80467 2689 dwc3_gadget_free_endpoints(dwc);
3ef35faf
FB
2690 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2691 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2692
72246da4 2693err3:
0fc9a1be 2694 kfree(dwc->setup_buf);
72246da4
FB
2695
2696err2:
2697 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2698 dwc->ep0_trb, dwc->ep0_trb_addr);
2699
2700err1:
2701 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2702 dwc->ctrl_req, dwc->ctrl_req_addr);
2703
2704err0:
2705 return ret;
2706}
2707
7415f17c
FB
2708/* -------------------------------------------------------------------------- */
2709
72246da4
FB
2710void dwc3_gadget_exit(struct dwc3 *dwc)
2711{
72246da4 2712 usb_del_gadget_udc(&dwc->gadget);
72246da4 2713
72246da4
FB
2714 dwc3_gadget_free_endpoints(dwc);
2715
3ef35faf
FB
2716 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2717 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2718
0fc9a1be 2719 kfree(dwc->setup_buf);
72246da4
FB
2720
2721 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2722 dwc->ep0_trb, dwc->ep0_trb_addr);
2723
2724 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2725 dwc->ctrl_req, dwc->ctrl_req_addr);
72246da4 2726}
7415f17c
FB
2727
2728int dwc3_gadget_prepare(struct dwc3 *dwc)
2729{
7b2a0368 2730 if (dwc->pullups_connected) {
7415f17c 2731 dwc3_gadget_disable_irq(dwc);
7b2a0368
FB
2732 dwc3_gadget_run_stop(dwc, true, true);
2733 }
7415f17c
FB
2734
2735 return 0;
2736}
2737
2738void dwc3_gadget_complete(struct dwc3 *dwc)
2739{
2740 if (dwc->pullups_connected) {
2741 dwc3_gadget_enable_irq(dwc);
7b2a0368 2742 dwc3_gadget_run_stop(dwc, true, false);
7415f17c
FB
2743 }
2744}
2745
2746int dwc3_gadget_suspend(struct dwc3 *dwc)
2747{
2748 __dwc3_gadget_ep_disable(dwc->eps[0]);
2749 __dwc3_gadget_ep_disable(dwc->eps[1]);
2750
2751 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2752
2753 return 0;
2754}
2755
2756int dwc3_gadget_resume(struct dwc3 *dwc)
2757{
2758 struct dwc3_ep *dep;
2759 int ret;
2760
2761 /* Start with SuperSpeed Default */
2762 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2763
2764 dep = dwc->eps[0];
265b70a7
PZ
2765 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2766 false);
7415f17c
FB
2767 if (ret)
2768 goto err0;
2769
2770 dep = dwc->eps[1];
265b70a7
PZ
2771 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2772 false);
7415f17c
FB
2773 if (ret)
2774 goto err1;
2775
2776 /* begin to receive SETUP packets */
2777 dwc->ep0state = EP0_SETUP_PHASE;
2778 dwc3_ep0_out_start(dwc);
2779
2780 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2781
2782 return 0;
2783
2784err1:
2785 __dwc3_gadget_ep_disable(dwc->eps[0]);
2786
2787err0:
2788 return ret;
2789}