]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/usb/dwc3/gadget.c
usb: dwc3: gadget: Fix TRB preparation during SG
[mirror_ubuntu-bionic-kernel.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
5945f789
FB
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
72246da4 12 *
5945f789
FB
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
72246da4
FB
17 */
18
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/list.h>
28#include <linux/dma-mapping.h>
29
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32
80977dc9 33#include "debug.h"
72246da4
FB
34#include "core.h"
35#include "gadget.h"
36#include "io.h"
37
04a9bfcd
FB
38/**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48{
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69}
70
911f1f88
PZ
71/**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79{
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85}
86
8598bde7
FB
87/**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
aee63e3c 93 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
94 */
95int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96{
aee63e3c 97 int retries = 10000;
8598bde7
FB
98 u32 reg;
99
802fde98
PZ
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
8598bde7
FB
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
802fde98
PZ
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
8598bde7 131 /* wait for a change in DSTS */
aed430e5 132 retries = 10000;
8598bde7
FB
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
8598bde7
FB
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
aee63e3c 139 udelay(5);
8598bde7
FB
140 }
141
142 dev_vdbg(dwc->dev, "link state change request timed out\n");
143
144 return -ETIMEDOUT;
145}
146
457e84b6
FB
147/**
148 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
149 * @dwc: pointer to our context structure
150 *
151 * This function will a best effort FIFO allocation in order
152 * to improve FIFO usage and throughput, while still allowing
153 * us to enable as many endpoints as possible.
154 *
155 * Keep in mind that this operation will be highly dependent
156 * on the configured size for RAM1 - which contains TxFifo -,
157 * the amount of endpoints enabled on coreConsultant tool, and
158 * the width of the Master Bus.
159 *
160 * In the ideal world, we would always be able to satisfy the
161 * following equation:
162 *
163 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
164 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
165 *
166 * Unfortunately, due to many variables that's not always the case.
167 */
168int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
169{
170 int last_fifo_depth = 0;
171 int ram1_depth;
172 int fifo_size;
173 int mdwidth;
174 int num;
175
176 if (!dwc->needs_fifo_resize)
177 return 0;
178
179 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
180 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
181
182 /* MDWIDTH is represented in bits, we need it in bytes */
183 mdwidth >>= 3;
184
185 /*
186 * FIXME For now we will only allocate 1 wMaxPacketSize space
187 * for each enabled endpoint, later patches will come to
188 * improve this algorithm so that we better use the internal
189 * FIFO space
190 */
32702e96
JP
191 for (num = 0; num < dwc->num_in_eps; num++) {
192 /* bit0 indicates direction; 1 means IN ep */
193 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
2e81c36a 194 int mult = 1;
457e84b6
FB
195 int tmp;
196
457e84b6
FB
197 if (!(dep->flags & DWC3_EP_ENABLED))
198 continue;
199
16e78db7
IS
200 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
201 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
2e81c36a
FB
202 mult = 3;
203
204 /*
205 * REVISIT: the following assumes we will always have enough
206 * space available on the FIFO RAM for all possible use cases.
207 * Make sure that's true somehow and change FIFO allocation
208 * accordingly.
209 *
210 * If we have Bulk or Isochronous endpoints, we want
211 * them to be able to be very, very fast. So we're giving
212 * those endpoints a fifo_size which is enough for 3 full
213 * packets
214 */
215 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
457e84b6
FB
216 tmp += mdwidth;
217
218 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
2e81c36a 219
457e84b6
FB
220 fifo_size |= (last_fifo_depth << 16);
221
222 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
223 dep->name, last_fifo_depth, fifo_size & 0xffff);
224
32702e96 225 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
457e84b6
FB
226
227 last_fifo_depth += (fifo_size & 0xffff);
228 }
229
230 return 0;
231}
232
72246da4
FB
233void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
234 int status)
235{
236 struct dwc3 *dwc = dep->dwc;
e5ba5ec8 237 int i;
72246da4
FB
238
239 if (req->queued) {
e5ba5ec8
PA
240 i = 0;
241 do {
eeb720fb 242 dep->busy_slot++;
e5ba5ec8
PA
243 /*
244 * Skip LINK TRB. We can't use req->trb and check for
245 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
246 * just completed (not the LINK TRB).
247 */
248 if (((dep->busy_slot & DWC3_TRB_MASK) ==
249 DWC3_TRB_NUM- 1) &&
16e78db7 250 usb_endpoint_xfer_isoc(dep->endpoint.desc))
e5ba5ec8
PA
251 dep->busy_slot++;
252 } while(++i < req->request.num_mapped_sgs);
c9fda7d6 253 req->queued = false;
72246da4
FB
254 }
255 list_del(&req->list);
eeb720fb 256 req->trb = NULL;
72246da4
FB
257
258 if (req->request.status == -EINPROGRESS)
259 req->request.status = status;
260
0416e494
PA
261 if (dwc->ep0_bounced && dep->number == 0)
262 dwc->ep0_bounced = false;
263 else
264 usb_gadget_unmap_request(&dwc->gadget, &req->request,
265 req->direction);
72246da4
FB
266
267 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
268 req, dep->name, req->request.actual,
269 req->request.length, status);
2c4cbe6e 270 trace_dwc3_gadget_giveback(req);
72246da4
FB
271
272 spin_unlock(&dwc->lock);
304f7e5e 273 usb_gadget_giveback_request(&dep->endpoint, &req->request);
72246da4
FB
274 spin_lock(&dwc->lock);
275}
276
3ece0ec4 277int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
b09bb642
FB
278{
279 u32 timeout = 500;
280 u32 reg;
281
2c4cbe6e 282 trace_dwc3_gadget_generic_cmd(cmd, param);
427c3df6 283
b09bb642
FB
284 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
285 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
286
287 do {
288 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
289 if (!(reg & DWC3_DGCMD_CMDACT)) {
290 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
291 DWC3_DGCMD_STATUS(reg));
292 return 0;
293 }
294
295 /*
296 * We can't sleep here, because it's also called from
297 * interrupt context.
298 */
299 timeout--;
300 if (!timeout)
301 return -ETIMEDOUT;
302 udelay(1);
303 } while (1);
304}
305
72246da4
FB
306int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
307 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
308{
309 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 310 u32 timeout = 500;
72246da4
FB
311 u32 reg;
312
2c4cbe6e 313 trace_dwc3_gadget_ep_cmd(dep, cmd, params);
72246da4 314
dc1c70a7
FB
315 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
316 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
317 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
318
319 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
320 do {
321 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
322 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
323 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
324 DWC3_DEPCMD_STATUS(reg));
72246da4
FB
325 return 0;
326 }
327
328 /*
72246da4
FB
329 * We can't sleep here, because it is also called from
330 * interrupt context.
331 */
332 timeout--;
333 if (!timeout)
334 return -ETIMEDOUT;
335
61d58242 336 udelay(1);
72246da4
FB
337 } while (1);
338}
339
340static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 341 struct dwc3_trb *trb)
72246da4 342{
c439ef87 343 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
344
345 return dep->trb_pool_dma + offset;
346}
347
348static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
349{
350 struct dwc3 *dwc = dep->dwc;
351
352 if (dep->trb_pool)
353 return 0;
354
355 if (dep->number == 0 || dep->number == 1)
356 return 0;
357
358 dep->trb_pool = dma_alloc_coherent(dwc->dev,
359 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
360 &dep->trb_pool_dma, GFP_KERNEL);
361 if (!dep->trb_pool) {
362 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
363 dep->name);
364 return -ENOMEM;
365 }
366
367 return 0;
368}
369
370static void dwc3_free_trb_pool(struct dwc3_ep *dep)
371{
372 struct dwc3 *dwc = dep->dwc;
373
374 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
375 dep->trb_pool, dep->trb_pool_dma);
376
377 dep->trb_pool = NULL;
378 dep->trb_pool_dma = 0;
379}
380
381static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
382{
383 struct dwc3_gadget_ep_cmd_params params;
384 u32 cmd;
385
386 memset(&params, 0x00, sizeof(params));
387
388 if (dep->number != 1) {
389 cmd = DWC3_DEPCMD_DEPSTARTCFG;
390 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
391 if (dep->number > 1) {
392 if (dwc->start_config_issued)
393 return 0;
394 dwc->start_config_issued = true;
72246da4 395 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 396 }
72246da4
FB
397
398 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
399 }
400
401 return 0;
402}
403
404static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 405 const struct usb_endpoint_descriptor *desc,
4b345c9a 406 const struct usb_ss_ep_comp_descriptor *comp_desc,
265b70a7 407 bool ignore, bool restore)
72246da4
FB
408{
409 struct dwc3_gadget_ep_cmd_params params;
410
411 memset(&params, 0x00, sizeof(params));
412
dc1c70a7 413 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
414 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
415
416 /* Burst size is only needed in SuperSpeed mode */
417 if (dwc->gadget.speed == USB_SPEED_SUPER) {
418 u32 burst = dep->endpoint.maxburst - 1;
419
420 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
421 }
72246da4 422
4b345c9a
FB
423 if (ignore)
424 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
425
265b70a7
PZ
426 if (restore) {
427 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
428 params.param2 |= dep->saved_state;
429 }
430
dc1c70a7
FB
431 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
432 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 433
18b7ede5 434 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
435 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
436 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
437 dep->stream_capable = true;
438 }
439
0b93a4c8 440 if (!usb_endpoint_xfer_control(desc))
dc1c70a7 441 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
442
443 /*
444 * We are doing 1:1 mapping for endpoints, meaning
445 * Physical Endpoints 2 maps to Logical Endpoint 2 and
446 * so on. We consider the direction bit as part of the physical
447 * endpoint number. So USB endpoint 0x81 is 0x03.
448 */
dc1c70a7 449 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
450
451 /*
452 * We must use the lower 16 TX FIFOs even though
453 * HW might have more
454 */
455 if (dep->direction)
dc1c70a7 456 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
457
458 if (desc->bInterval) {
dc1c70a7 459 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
460 dep->interval = 1 << (desc->bInterval - 1);
461 }
462
463 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
464 DWC3_DEPCMD_SETEPCONFIG, &params);
465}
466
467static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
468{
469 struct dwc3_gadget_ep_cmd_params params;
470
471 memset(&params, 0x00, sizeof(params));
472
dc1c70a7 473 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
474
475 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
476 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
477}
478
479/**
480 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
481 * @dep: endpoint to be initialized
482 * @desc: USB Endpoint Descriptor
483 *
484 * Caller should take care of locking
485 */
486static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 487 const struct usb_endpoint_descriptor *desc,
4b345c9a 488 const struct usb_ss_ep_comp_descriptor *comp_desc,
265b70a7 489 bool ignore, bool restore)
72246da4
FB
490{
491 struct dwc3 *dwc = dep->dwc;
492 u32 reg;
b09e99ee 493 int ret;
72246da4 494
ff62d6b6
FB
495 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
496
72246da4
FB
497 if (!(dep->flags & DWC3_EP_ENABLED)) {
498 ret = dwc3_gadget_start_config(dwc, dep);
499 if (ret)
500 return ret;
501 }
502
265b70a7
PZ
503 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
504 restore);
72246da4
FB
505 if (ret)
506 return ret;
507
508 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
509 struct dwc3_trb *trb_st_hw;
510 struct dwc3_trb *trb_link;
72246da4
FB
511
512 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
513 if (ret)
514 return ret;
515
16e78db7 516 dep->endpoint.desc = desc;
c90bfaec 517 dep->comp_desc = comp_desc;
72246da4
FB
518 dep->type = usb_endpoint_type(desc);
519 dep->flags |= DWC3_EP_ENABLED;
520
521 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
522 reg |= DWC3_DALEPENA_EP(dep->number);
523 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
524
525 if (!usb_endpoint_xfer_isoc(desc))
526 return 0;
527
1d046793 528 /* Link TRB for ISOC. The HWO bit is never reset */
72246da4
FB
529 trb_st_hw = &dep->trb_pool[0];
530
f6bafc6a 531 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
1200a82a 532 memset(trb_link, 0, sizeof(*trb_link));
72246da4 533
f6bafc6a
FB
534 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
535 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
536 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
537 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
538 }
539
540 return 0;
541}
542
b992e681 543static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
624407f9 544static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
545{
546 struct dwc3_request *req;
547
ea53b882 548 if (!list_empty(&dep->req_queued)) {
b992e681 549 dwc3_stop_active_transfer(dwc, dep->number, true);
624407f9 550
57911504 551 /* - giveback all requests to gadget driver */
1591633e
PA
552 while (!list_empty(&dep->req_queued)) {
553 req = next_request(&dep->req_queued);
554
555 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
556 }
ea53b882
FB
557 }
558
72246da4
FB
559 while (!list_empty(&dep->request_list)) {
560 req = next_request(&dep->request_list);
561
624407f9 562 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 563 }
72246da4
FB
564}
565
566/**
567 * __dwc3_gadget_ep_disable - Disables a HW endpoint
568 * @dep: the endpoint to disable
569 *
624407f9
SAS
570 * This function also removes requests which are currently processed ny the
571 * hardware and those which are not yet scheduled.
572 * Caller should take care of locking.
72246da4 573 */
72246da4
FB
574static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
575{
576 struct dwc3 *dwc = dep->dwc;
577 u32 reg;
578
624407f9 579 dwc3_remove_requests(dwc, dep);
72246da4 580
687ef981
FB
581 /* make sure HW endpoint isn't stalled */
582 if (dep->flags & DWC3_EP_STALL)
7a608559 583 __dwc3_gadget_ep_set_halt(dep, 0, false);
687ef981 584
72246da4
FB
585 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
586 reg &= ~DWC3_DALEPENA_EP(dep->number);
587 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
588
879631aa 589 dep->stream_capable = false;
f9c56cdd 590 dep->endpoint.desc = NULL;
c90bfaec 591 dep->comp_desc = NULL;
72246da4 592 dep->type = 0;
879631aa 593 dep->flags = 0;
72246da4
FB
594
595 return 0;
596}
597
598/* -------------------------------------------------------------------------- */
599
600static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
601 const struct usb_endpoint_descriptor *desc)
602{
603 return -EINVAL;
604}
605
606static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
607{
608 return -EINVAL;
609}
610
611/* -------------------------------------------------------------------------- */
612
613static int dwc3_gadget_ep_enable(struct usb_ep *ep,
614 const struct usb_endpoint_descriptor *desc)
615{
616 struct dwc3_ep *dep;
617 struct dwc3 *dwc;
618 unsigned long flags;
619 int ret;
620
621 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
622 pr_debug("dwc3: invalid parameters\n");
623 return -EINVAL;
624 }
625
626 if (!desc->wMaxPacketSize) {
627 pr_debug("dwc3: missing wMaxPacketSize\n");
628 return -EINVAL;
629 }
630
631 dep = to_dwc3_ep(ep);
632 dwc = dep->dwc;
633
c6f83f38
FB
634 if (dep->flags & DWC3_EP_ENABLED) {
635 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
636 dep->name);
637 return 0;
638 }
639
72246da4
FB
640 switch (usb_endpoint_type(desc)) {
641 case USB_ENDPOINT_XFER_CONTROL:
27a78d6a 642 strlcat(dep->name, "-control", sizeof(dep->name));
72246da4
FB
643 break;
644 case USB_ENDPOINT_XFER_ISOC:
27a78d6a 645 strlcat(dep->name, "-isoc", sizeof(dep->name));
72246da4
FB
646 break;
647 case USB_ENDPOINT_XFER_BULK:
27a78d6a 648 strlcat(dep->name, "-bulk", sizeof(dep->name));
72246da4
FB
649 break;
650 case USB_ENDPOINT_XFER_INT:
27a78d6a 651 strlcat(dep->name, "-int", sizeof(dep->name));
72246da4
FB
652 break;
653 default:
654 dev_err(dwc->dev, "invalid endpoint transfer type\n");
655 }
656
72246da4 657 spin_lock_irqsave(&dwc->lock, flags);
265b70a7 658 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
72246da4
FB
659 spin_unlock_irqrestore(&dwc->lock, flags);
660
661 return ret;
662}
663
664static int dwc3_gadget_ep_disable(struct usb_ep *ep)
665{
666 struct dwc3_ep *dep;
667 struct dwc3 *dwc;
668 unsigned long flags;
669 int ret;
670
671 if (!ep) {
672 pr_debug("dwc3: invalid parameters\n");
673 return -EINVAL;
674 }
675
676 dep = to_dwc3_ep(ep);
677 dwc = dep->dwc;
678
679 if (!(dep->flags & DWC3_EP_ENABLED)) {
680 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
681 dep->name);
682 return 0;
683 }
684
685 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
686 dep->number >> 1,
687 (dep->number & 1) ? "in" : "out");
688
689 spin_lock_irqsave(&dwc->lock, flags);
690 ret = __dwc3_gadget_ep_disable(dep);
691 spin_unlock_irqrestore(&dwc->lock, flags);
692
693 return ret;
694}
695
696static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
697 gfp_t gfp_flags)
698{
699 struct dwc3_request *req;
700 struct dwc3_ep *dep = to_dwc3_ep(ep);
72246da4
FB
701
702 req = kzalloc(sizeof(*req), gfp_flags);
734d5a53 703 if (!req)
72246da4 704 return NULL;
72246da4
FB
705
706 req->epnum = dep->number;
707 req->dep = dep;
72246da4 708
2c4cbe6e
FB
709 trace_dwc3_alloc_request(req);
710
72246da4
FB
711 return &req->request;
712}
713
714static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
715 struct usb_request *request)
716{
717 struct dwc3_request *req = to_dwc3_request(request);
718
2c4cbe6e 719 trace_dwc3_free_request(req);
72246da4
FB
720 kfree(req);
721}
722
c71fc37c
FB
723/**
724 * dwc3_prepare_one_trb - setup one TRB from one request
725 * @dep: endpoint for which this request is prepared
726 * @req: dwc3_request pointer
727 */
68e823e2 728static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb 729 struct dwc3_request *req, dma_addr_t dma,
e5ba5ec8 730 unsigned length, unsigned last, unsigned chain, unsigned node)
c71fc37c 731{
eeb720fb 732 struct dwc3 *dwc = dep->dwc;
f6bafc6a 733 struct dwc3_trb *trb;
c71fc37c 734
eeb720fb
FB
735 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
736 dep->name, req, (unsigned long long) dma,
737 length, last ? " last" : "",
738 chain ? " chain" : "");
739
915e202a
PA
740
741 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
c71fc37c 742
eeb720fb
FB
743 if (!req->trb) {
744 dwc3_gadget_move_request_queued(req);
f6bafc6a
FB
745 req->trb = trb;
746 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
e5ba5ec8 747 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
eeb720fb 748 }
c71fc37c 749
e5ba5ec8 750 dep->free_slot++;
5cd8c48d
ZJC
751 /* Skip the LINK-TRB on ISOC */
752 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
753 usb_endpoint_xfer_isoc(dep->endpoint.desc))
754 dep->free_slot++;
e5ba5ec8 755
f6bafc6a
FB
756 trb->size = DWC3_TRB_SIZE_LENGTH(length);
757 trb->bpl = lower_32_bits(dma);
758 trb->bph = upper_32_bits(dma);
c71fc37c 759
16e78db7 760 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 761 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 762 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
763 break;
764
765 case USB_ENDPOINT_XFER_ISOC:
e5ba5ec8
PA
766 if (!node)
767 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
768 else
769 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
c71fc37c
FB
770 break;
771
772 case USB_ENDPOINT_XFER_BULK:
773 case USB_ENDPOINT_XFER_INT:
f6bafc6a 774 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
775 break;
776 default:
777 /*
778 * This is only possible with faulty memory because we
779 * checked it already :)
780 */
781 BUG();
782 }
783
f3af3651
FB
784 if (!req->request.no_interrupt && !chain)
785 trb->ctrl |= DWC3_TRB_CTRL_IOC;
786
16e78db7 787 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
f6bafc6a
FB
788 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
789 trb->ctrl |= DWC3_TRB_CTRL_CSP;
e5ba5ec8
PA
790 } else if (last) {
791 trb->ctrl |= DWC3_TRB_CTRL_LST;
f6bafc6a 792 }
c71fc37c 793
e5ba5ec8
PA
794 if (chain)
795 trb->ctrl |= DWC3_TRB_CTRL_CHN;
796
16e78db7 797 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 798 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 799
f6bafc6a 800 trb->ctrl |= DWC3_TRB_CTRL_HWO;
2c4cbe6e
FB
801
802 trace_dwc3_prepare_trb(dep, trb);
c71fc37c
FB
803}
804
72246da4
FB
805/*
806 * dwc3_prepare_trbs - setup TRBs from requests
807 * @dep: endpoint for which requests are being prepared
808 * @starting: true if the endpoint is idle and no requests are queued.
809 *
1d046793
PZ
810 * The function goes through the requests list and sets up TRBs for the
811 * transfers. The function returns once there are no more TRBs available or
812 * it runs out of requests.
72246da4 813 */
68e823e2 814static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 815{
68e823e2 816 struct dwc3_request *req, *n;
72246da4 817 u32 trbs_left;
8d62cd65 818 u32 max;
c71fc37c 819 unsigned int last_one = 0;
72246da4
FB
820
821 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
822
823 /* the first request must not be queued */
824 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 825
8d62cd65 826 /* Can't wrap around on a non-isoc EP since there's no link TRB */
16e78db7 827 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8d62cd65
PZ
828 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
829 if (trbs_left > max)
830 trbs_left = max;
831 }
832
72246da4 833 /*
1d046793
PZ
834 * If busy & slot are equal than it is either full or empty. If we are
835 * starting to process requests then we are empty. Otherwise we are
72246da4
FB
836 * full and don't do anything
837 */
838 if (!trbs_left) {
839 if (!starting)
68e823e2 840 return;
72246da4
FB
841 trbs_left = DWC3_TRB_NUM;
842 /*
843 * In case we start from scratch, we queue the ISOC requests
844 * starting from slot 1. This is done because we use ring
845 * buffer and have no LST bit to stop us. Instead, we place
1d046793 846 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
72246da4
FB
847 * after the first request so we start at slot 1 and have
848 * 7 requests proceed before we hit the first IOC.
849 * Other transfer types don't use the ring buffer and are
850 * processed from the first TRB until the last one. Since we
851 * don't wrap around we have to start at the beginning.
852 */
16e78db7 853 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
854 dep->busy_slot = 1;
855 dep->free_slot = 1;
856 } else {
857 dep->busy_slot = 0;
858 dep->free_slot = 0;
859 }
860 }
861
862 /* The last TRB is a link TRB, not used for xfer */
16e78db7 863 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
68e823e2 864 return;
72246da4
FB
865
866 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
867 unsigned length;
868 dma_addr_t dma;
e5ba5ec8 869 last_one = false;
72246da4 870
eeb720fb
FB
871 if (req->request.num_mapped_sgs > 0) {
872 struct usb_request *request = &req->request;
873 struct scatterlist *sg = request->sg;
874 struct scatterlist *s;
875 int i;
72246da4 876
eeb720fb
FB
877 for_each_sg(sg, s, request->num_mapped_sgs, i) {
878 unsigned chain = true;
72246da4 879
eeb720fb
FB
880 length = sg_dma_len(s);
881 dma = sg_dma_address(s);
72246da4 882
1d046793
PZ
883 if (i == (request->num_mapped_sgs - 1) ||
884 sg_is_last(s)) {
ec512fb8 885 if (list_empty(&dep->request_list))
e5ba5ec8 886 last_one = true;
eeb720fb
FB
887 chain = false;
888 }
72246da4 889
eeb720fb
FB
890 trbs_left--;
891 if (!trbs_left)
892 last_one = true;
72246da4 893
eeb720fb
FB
894 if (last_one)
895 chain = false;
72246da4 896
eeb720fb 897 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 898 last_one, chain, i);
72246da4 899
eeb720fb
FB
900 if (last_one)
901 break;
902 }
72246da4 903 } else {
eeb720fb
FB
904 dma = req->request.dma;
905 length = req->request.length;
906 trbs_left--;
72246da4 907
eeb720fb
FB
908 if (!trbs_left)
909 last_one = 1;
879631aa 910
eeb720fb
FB
911 /* Is this the last request? */
912 if (list_is_last(&req->list, &dep->request_list))
913 last_one = 1;
72246da4 914
eeb720fb 915 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 916 last_one, false, 0);
72246da4 917
eeb720fb
FB
918 if (last_one)
919 break;
72246da4 920 }
72246da4 921 }
72246da4
FB
922}
923
924static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
925 int start_new)
926{
927 struct dwc3_gadget_ep_cmd_params params;
928 struct dwc3_request *req;
929 struct dwc3 *dwc = dep->dwc;
930 int ret;
931 u32 cmd;
932
933 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
934 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
935 return -EBUSY;
936 }
937 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
938
939 /*
940 * If we are getting here after a short-out-packet we don't enqueue any
941 * new requests as we try to set the IOC bit only on the last request.
942 */
943 if (start_new) {
944 if (list_empty(&dep->req_queued))
945 dwc3_prepare_trbs(dep, start_new);
946
947 /* req points to the first request which will be sent */
948 req = next_request(&dep->req_queued);
949 } else {
68e823e2
FB
950 dwc3_prepare_trbs(dep, start_new);
951
72246da4 952 /*
1d046793 953 * req points to the first request where HWO changed from 0 to 1
72246da4 954 */
68e823e2 955 req = next_request(&dep->req_queued);
72246da4
FB
956 }
957 if (!req) {
958 dep->flags |= DWC3_EP_PENDING_REQUEST;
959 return 0;
960 }
961
962 memset(&params, 0, sizeof(params));
72246da4 963
1877d6c9
PA
964 if (start_new) {
965 params.param0 = upper_32_bits(req->trb_dma);
966 params.param1 = lower_32_bits(req->trb_dma);
72246da4 967 cmd = DWC3_DEPCMD_STARTTRANSFER;
1877d6c9 968 } else {
72246da4 969 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1877d6c9 970 }
72246da4
FB
971
972 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
973 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
974 if (ret < 0) {
975 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
976
977 /*
978 * FIXME we need to iterate over the list of requests
979 * here and stop, unmap, free and del each of the linked
1d046793 980 * requests instead of what we do now.
72246da4 981 */
0fc9a1be
FB
982 usb_gadget_unmap_request(&dwc->gadget, &req->request,
983 req->direction);
72246da4
FB
984 list_del(&req->list);
985 return ret;
986 }
987
988 dep->flags |= DWC3_EP_BUSY;
25b8ff68 989
f898ae09 990 if (start_new) {
b4996a86 991 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
f898ae09 992 dep->number);
b4996a86 993 WARN_ON_ONCE(!dep->resource_index);
f898ae09 994 }
25b8ff68 995
72246da4
FB
996 return 0;
997}
998
d6d6ec7b
PA
999static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1000 struct dwc3_ep *dep, u32 cur_uf)
1001{
1002 u32 uf;
1003
1004 if (list_empty(&dep->request_list)) {
1005 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1006 dep->name);
f4a53c55 1007 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1008 return;
1009 }
1010
1011 /* 4 micro frames in the future */
1012 uf = cur_uf + dep->interval * 4;
1013
1014 __dwc3_gadget_kick_transfer(dep, uf, 1);
1015}
1016
1017static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1018 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1019{
1020 u32 cur_uf, mask;
1021
1022 mask = ~(dep->interval - 1);
1023 cur_uf = event->parameters & mask;
1024
1025 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1026}
1027
72246da4
FB
1028static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1029{
0fc9a1be
FB
1030 struct dwc3 *dwc = dep->dwc;
1031 int ret;
1032
72246da4
FB
1033 req->request.actual = 0;
1034 req->request.status = -EINPROGRESS;
1035 req->direction = dep->direction;
1036 req->epnum = dep->number;
1037
1038 /*
1039 * We only add to our list of requests now and
1040 * start consuming the list once we get XferNotReady
1041 * IRQ.
1042 *
1043 * That way, we avoid doing anything that we don't need
1044 * to do now and defer it until the point we receive a
1045 * particular token from the Host side.
1046 *
1047 * This will also avoid Host cancelling URBs due to too
1d046793 1048 * many NAKs.
72246da4 1049 */
0fc9a1be
FB
1050 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1051 dep->direction);
1052 if (ret)
1053 return ret;
1054
72246da4
FB
1055 list_add_tail(&req->list, &dep->request_list);
1056
1057 /*
b511e5e7 1058 * There are a few special cases:
72246da4 1059 *
f898ae09
PZ
1060 * 1. XferNotReady with empty list of requests. We need to kick the
1061 * transfer here in that situation, otherwise we will be NAKing
1062 * forever. If we get XferNotReady before gadget driver has a
1063 * chance to queue a request, we will ACK the IRQ but won't be
1064 * able to receive the data until the next request is queued.
1065 * The following code is handling exactly that.
72246da4 1066 *
72246da4
FB
1067 */
1068 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1069 /*
1070 * If xfernotready is already elapsed and it is a case
1071 * of isoc transfer, then issue END TRANSFER, so that
1072 * you can receive xfernotready again and can have
1073 * notion of current microframe.
1074 */
1075 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
cdc359dd 1076 if (list_empty(&dep->req_queued)) {
b992e681 1077 dwc3_stop_active_transfer(dwc, dep->number, true);
cdc359dd
PA
1078 dep->flags = DWC3_EP_ENABLED;
1079 }
f4a53c55
PA
1080 return 0;
1081 }
1082
b511e5e7 1083 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
348e026f 1084 if (ret && ret != -EBUSY)
b511e5e7
FB
1085 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1086 dep->name);
15f86bde 1087 return ret;
b511e5e7 1088 }
72246da4 1089
b511e5e7
FB
1090 /*
1091 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1092 * kick the transfer here after queuing a request, otherwise the
1093 * core may not see the modified TRB(s).
1094 */
1095 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1096 (dep->flags & DWC3_EP_BUSY) &&
1097 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86
FB
1098 WARN_ON_ONCE(!dep->resource_index);
1099 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
b511e5e7 1100 false);
348e026f 1101 if (ret && ret != -EBUSY)
72246da4
FB
1102 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1103 dep->name);
15f86bde 1104 return ret;
a0925324 1105 }
72246da4 1106
b997ada5
FB
1107 /*
1108 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1109 * right away, otherwise host will not know we have streams to be
1110 * handled.
1111 */
1112 if (dep->stream_capable) {
1113 int ret;
1114
1115 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1116 if (ret && ret != -EBUSY) {
1117 struct dwc3 *dwc = dep->dwc;
1118
1119 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1120 dep->name);
1121 }
1122 }
1123
72246da4
FB
1124 return 0;
1125}
1126
1127static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1128 gfp_t gfp_flags)
1129{
1130 struct dwc3_request *req = to_dwc3_request(request);
1131 struct dwc3_ep *dep = to_dwc3_ep(ep);
1132 struct dwc3 *dwc = dep->dwc;
1133
1134 unsigned long flags;
1135
1136 int ret;
1137
fdee4eba 1138 spin_lock_irqsave(&dwc->lock, flags);
16e78db7 1139 if (!dep->endpoint.desc) {
72246da4
FB
1140 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1141 request, ep->name);
73359cef
FB
1142 ret = -ESHUTDOWN;
1143 goto out;
1144 }
1145
1146 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1147 request, req->dep->name)) {
1148 ret = -EINVAL;
1149 goto out;
72246da4
FB
1150 }
1151
1152 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1153 request, ep->name, request->length);
2c4cbe6e 1154 trace_dwc3_ep_queue(req);
72246da4 1155
72246da4 1156 ret = __dwc3_gadget_ep_queue(dep, req);
73359cef
FB
1157
1158out:
72246da4
FB
1159 spin_unlock_irqrestore(&dwc->lock, flags);
1160
1161 return ret;
1162}
1163
1164static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1165 struct usb_request *request)
1166{
1167 struct dwc3_request *req = to_dwc3_request(request);
1168 struct dwc3_request *r = NULL;
1169
1170 struct dwc3_ep *dep = to_dwc3_ep(ep);
1171 struct dwc3 *dwc = dep->dwc;
1172
1173 unsigned long flags;
1174 int ret = 0;
1175
2c4cbe6e
FB
1176 trace_dwc3_ep_dequeue(req);
1177
72246da4
FB
1178 spin_lock_irqsave(&dwc->lock, flags);
1179
1180 list_for_each_entry(r, &dep->request_list, list) {
1181 if (r == req)
1182 break;
1183 }
1184
1185 if (r != req) {
1186 list_for_each_entry(r, &dep->req_queued, list) {
1187 if (r == req)
1188 break;
1189 }
1190 if (r == req) {
1191 /* wait until it is processed */
b992e681 1192 dwc3_stop_active_transfer(dwc, dep->number, true);
e8d4e8be 1193 goto out1;
72246da4
FB
1194 }
1195 dev_err(dwc->dev, "request %p was not queued to %s\n",
1196 request, ep->name);
1197 ret = -EINVAL;
1198 goto out0;
1199 }
1200
e8d4e8be 1201out1:
72246da4
FB
1202 /* giveback the request */
1203 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1204
1205out0:
1206 spin_unlock_irqrestore(&dwc->lock, flags);
1207
1208 return ret;
1209}
1210
7a608559 1211int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
72246da4
FB
1212{
1213 struct dwc3_gadget_ep_cmd_params params;
1214 struct dwc3 *dwc = dep->dwc;
1215 int ret;
1216
5ad02fb8
FB
1217 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1218 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1219 return -EINVAL;
1220 }
1221
72246da4
FB
1222 memset(&params, 0x00, sizeof(params));
1223
1224 if (value) {
7a608559
FB
1225 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1226 (!list_empty(&dep->req_queued) ||
1227 !list_empty(&dep->request_list)))) {
1228 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1229 dep->name);
1230 return -EAGAIN;
1231 }
1232
72246da4
FB
1233 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1234 DWC3_DEPCMD_SETSTALL, &params);
1235 if (ret)
3f89204b 1236 dev_err(dwc->dev, "failed to set STALL on %s\n",
72246da4
FB
1237 dep->name);
1238 else
1239 dep->flags |= DWC3_EP_STALL;
1240 } else {
1241 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1242 DWC3_DEPCMD_CLEARSTALL, &params);
1243 if (ret)
3f89204b 1244 dev_err(dwc->dev, "failed to clear STALL on %s\n",
72246da4
FB
1245 dep->name);
1246 else
a535d81c 1247 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
72246da4 1248 }
5275455a 1249
72246da4
FB
1250 return ret;
1251}
1252
1253static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1254{
1255 struct dwc3_ep *dep = to_dwc3_ep(ep);
1256 struct dwc3 *dwc = dep->dwc;
1257
1258 unsigned long flags;
1259
1260 int ret;
1261
1262 spin_lock_irqsave(&dwc->lock, flags);
7a608559 1263 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
72246da4
FB
1264 spin_unlock_irqrestore(&dwc->lock, flags);
1265
1266 return ret;
1267}
1268
1269static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1270{
1271 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1272 struct dwc3 *dwc = dep->dwc;
1273 unsigned long flags;
95aa4e8d 1274 int ret;
72246da4 1275
249a4569 1276 spin_lock_irqsave(&dwc->lock, flags);
72246da4
FB
1277 dep->flags |= DWC3_EP_WEDGE;
1278
08f0d966 1279 if (dep->number == 0 || dep->number == 1)
95aa4e8d 1280 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
08f0d966 1281 else
7a608559 1282 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
95aa4e8d
FB
1283 spin_unlock_irqrestore(&dwc->lock, flags);
1284
1285 return ret;
72246da4
FB
1286}
1287
1288/* -------------------------------------------------------------------------- */
1289
1290static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1291 .bLength = USB_DT_ENDPOINT_SIZE,
1292 .bDescriptorType = USB_DT_ENDPOINT,
1293 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1294};
1295
1296static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1297 .enable = dwc3_gadget_ep0_enable,
1298 .disable = dwc3_gadget_ep0_disable,
1299 .alloc_request = dwc3_gadget_ep_alloc_request,
1300 .free_request = dwc3_gadget_ep_free_request,
1301 .queue = dwc3_gadget_ep0_queue,
1302 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1303 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1304 .set_wedge = dwc3_gadget_ep_set_wedge,
1305};
1306
1307static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1308 .enable = dwc3_gadget_ep_enable,
1309 .disable = dwc3_gadget_ep_disable,
1310 .alloc_request = dwc3_gadget_ep_alloc_request,
1311 .free_request = dwc3_gadget_ep_free_request,
1312 .queue = dwc3_gadget_ep_queue,
1313 .dequeue = dwc3_gadget_ep_dequeue,
1314 .set_halt = dwc3_gadget_ep_set_halt,
1315 .set_wedge = dwc3_gadget_ep_set_wedge,
1316};
1317
1318/* -------------------------------------------------------------------------- */
1319
1320static int dwc3_gadget_get_frame(struct usb_gadget *g)
1321{
1322 struct dwc3 *dwc = gadget_to_dwc(g);
1323 u32 reg;
1324
1325 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1326 return DWC3_DSTS_SOFFN(reg);
1327}
1328
1329static int dwc3_gadget_wakeup(struct usb_gadget *g)
1330{
1331 struct dwc3 *dwc = gadget_to_dwc(g);
1332
1333 unsigned long timeout;
1334 unsigned long flags;
1335
1336 u32 reg;
1337
1338 int ret = 0;
1339
1340 u8 link_state;
1341 u8 speed;
1342
1343 spin_lock_irqsave(&dwc->lock, flags);
1344
1345 /*
1346 * According to the Databook Remote wakeup request should
1347 * be issued only when the device is in early suspend state.
1348 *
1349 * We can check that via USB Link State bits in DSTS register.
1350 */
1351 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1352
1353 speed = reg & DWC3_DSTS_CONNECTSPD;
1354 if (speed == DWC3_DSTS_SUPERSPEED) {
1355 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1356 ret = -EINVAL;
1357 goto out;
1358 }
1359
1360 link_state = DWC3_DSTS_USBLNKST(reg);
1361
1362 switch (link_state) {
1363 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1364 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1365 break;
1366 default:
1367 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1368 link_state);
1369 ret = -EINVAL;
1370 goto out;
1371 }
1372
8598bde7
FB
1373 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1374 if (ret < 0) {
1375 dev_err(dwc->dev, "failed to put link in Recovery\n");
1376 goto out;
1377 }
72246da4 1378
802fde98
PZ
1379 /* Recent versions do this automatically */
1380 if (dwc->revision < DWC3_REVISION_194A) {
1381 /* write zeroes to Link Change Request */
fcc023c7 1382 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1383 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1384 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1385 }
72246da4 1386
1d046793 1387 /* poll until Link State changes to ON */
72246da4
FB
1388 timeout = jiffies + msecs_to_jiffies(100);
1389
1d046793 1390 while (!time_after(jiffies, timeout)) {
72246da4
FB
1391 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1392
1393 /* in HS, means ON */
1394 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1395 break;
1396 }
1397
1398 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1399 dev_err(dwc->dev, "failed to send remote wakeup\n");
1400 ret = -EINVAL;
1401 }
1402
1403out:
1404 spin_unlock_irqrestore(&dwc->lock, flags);
1405
1406 return ret;
1407}
1408
1409static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1410 int is_selfpowered)
1411{
1412 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1413 unsigned long flags;
72246da4 1414
249a4569 1415 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1416 dwc->is_selfpowered = !!is_selfpowered;
249a4569 1417 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1418
1419 return 0;
1420}
1421
7b2a0368 1422static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
72246da4
FB
1423{
1424 u32 reg;
61d58242 1425 u32 timeout = 500;
72246da4
FB
1426
1427 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1428 if (is_on) {
802fde98
PZ
1429 if (dwc->revision <= DWC3_REVISION_187A) {
1430 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1431 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1432 }
1433
1434 if (dwc->revision >= DWC3_REVISION_194A)
1435 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1436 reg |= DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1437
1438 if (dwc->has_hibernation)
1439 reg |= DWC3_DCTL_KEEP_CONNECT;
1440
9fcb3bd8 1441 dwc->pullups_connected = true;
8db7ed15 1442 } else {
72246da4 1443 reg &= ~DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1444
1445 if (dwc->has_hibernation && !suspend)
1446 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1447
9fcb3bd8 1448 dwc->pullups_connected = false;
8db7ed15 1449 }
72246da4
FB
1450
1451 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1452
1453 do {
1454 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1455 if (is_on) {
1456 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1457 break;
1458 } else {
1459 if (reg & DWC3_DSTS_DEVCTRLHLT)
1460 break;
1461 }
72246da4
FB
1462 timeout--;
1463 if (!timeout)
6f17f74b 1464 return -ETIMEDOUT;
61d58242 1465 udelay(1);
72246da4
FB
1466 } while (1);
1467
1468 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1469 dwc->gadget_driver
1470 ? dwc->gadget_driver->function : "no-function",
1471 is_on ? "connect" : "disconnect");
6f17f74b
PA
1472
1473 return 0;
72246da4
FB
1474}
1475
1476static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1477{
1478 struct dwc3 *dwc = gadget_to_dwc(g);
1479 unsigned long flags;
6f17f74b 1480 int ret;
72246da4
FB
1481
1482 is_on = !!is_on;
1483
1484 spin_lock_irqsave(&dwc->lock, flags);
7b2a0368 1485 ret = dwc3_gadget_run_stop(dwc, is_on, false);
72246da4
FB
1486 spin_unlock_irqrestore(&dwc->lock, flags);
1487
6f17f74b 1488 return ret;
72246da4
FB
1489}
1490
8698e2ac
FB
1491static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1492{
1493 u32 reg;
1494
1495 /* Enable all but Start and End of Frame IRQs */
1496 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1497 DWC3_DEVTEN_EVNTOVERFLOWEN |
1498 DWC3_DEVTEN_CMDCMPLTEN |
1499 DWC3_DEVTEN_ERRTICERREN |
1500 DWC3_DEVTEN_WKUPEVTEN |
1501 DWC3_DEVTEN_ULSTCNGEN |
1502 DWC3_DEVTEN_CONNECTDONEEN |
1503 DWC3_DEVTEN_USBRSTEN |
1504 DWC3_DEVTEN_DISCONNEVTEN);
1505
1506 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1507}
1508
1509static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1510{
1511 /* mask all interrupts */
1512 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1513}
1514
1515static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
b15a762f 1516static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
8698e2ac 1517
72246da4
FB
1518static int dwc3_gadget_start(struct usb_gadget *g,
1519 struct usb_gadget_driver *driver)
1520{
1521 struct dwc3 *dwc = gadget_to_dwc(g);
1522 struct dwc3_ep *dep;
1523 unsigned long flags;
1524 int ret = 0;
8698e2ac 1525 int irq;
72246da4
FB
1526 u32 reg;
1527
b0d7ffd4
FB
1528 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1529 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
e8adfc30 1530 IRQF_SHARED, "dwc3", dwc);
b0d7ffd4
FB
1531 if (ret) {
1532 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1533 irq, ret);
1534 goto err0;
1535 }
1536
72246da4
FB
1537 spin_lock_irqsave(&dwc->lock, flags);
1538
1539 if (dwc->gadget_driver) {
1540 dev_err(dwc->dev, "%s is already bound to %s\n",
1541 dwc->gadget.name,
1542 dwc->gadget_driver->driver.name);
1543 ret = -EBUSY;
b0d7ffd4 1544 goto err1;
72246da4
FB
1545 }
1546
1547 dwc->gadget_driver = driver;
72246da4 1548
72246da4
FB
1549 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1550 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1551
1552 /**
1553 * WORKAROUND: DWC3 revision < 2.20a have an issue
1554 * which would cause metastability state on Run/Stop
1555 * bit if we try to force the IP to USB2-only mode.
1556 *
1557 * Because of that, we cannot configure the IP to any
1558 * speed other than the SuperSpeed
1559 *
1560 * Refers to:
1561 *
1562 * STAR#9000525659: Clock Domain Crossing on DCTL in
1563 * USB 2.0 Mode
1564 */
f7e846f0 1565 if (dwc->revision < DWC3_REVISION_220A) {
07e7f47b 1566 reg |= DWC3_DCFG_SUPERSPEED;
f7e846f0
FB
1567 } else {
1568 switch (dwc->maximum_speed) {
1569 case USB_SPEED_LOW:
1570 reg |= DWC3_DSTS_LOWSPEED;
1571 break;
1572 case USB_SPEED_FULL:
1573 reg |= DWC3_DSTS_FULLSPEED1;
1574 break;
1575 case USB_SPEED_HIGH:
1576 reg |= DWC3_DSTS_HIGHSPEED;
1577 break;
1578 case USB_SPEED_SUPER: /* FALLTHROUGH */
1579 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1580 default:
1581 reg |= DWC3_DSTS_SUPERSPEED;
1582 }
1583 }
72246da4
FB
1584 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1585
b23c8439
PZ
1586 dwc->start_config_issued = false;
1587
72246da4
FB
1588 /* Start with SuperSpeed Default */
1589 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1590
1591 dep = dwc->eps[0];
265b70a7
PZ
1592 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1593 false);
72246da4
FB
1594 if (ret) {
1595 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1596 goto err2;
72246da4
FB
1597 }
1598
1599 dep = dwc->eps[1];
265b70a7
PZ
1600 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1601 false);
72246da4
FB
1602 if (ret) {
1603 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1604 goto err3;
72246da4
FB
1605 }
1606
1607 /* begin to receive SETUP packets */
c7fcdeb2 1608 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1609 dwc3_ep0_out_start(dwc);
1610
8698e2ac
FB
1611 dwc3_gadget_enable_irq(dwc);
1612
72246da4
FB
1613 spin_unlock_irqrestore(&dwc->lock, flags);
1614
1615 return 0;
1616
b0d7ffd4 1617err3:
72246da4
FB
1618 __dwc3_gadget_ep_disable(dwc->eps[0]);
1619
b0d7ffd4 1620err2:
cdcedd69 1621 dwc->gadget_driver = NULL;
b0d7ffd4
FB
1622
1623err1:
72246da4
FB
1624 spin_unlock_irqrestore(&dwc->lock, flags);
1625
b0d7ffd4
FB
1626 free_irq(irq, dwc);
1627
1628err0:
72246da4
FB
1629 return ret;
1630}
1631
22835b80 1632static int dwc3_gadget_stop(struct usb_gadget *g)
72246da4
FB
1633{
1634 struct dwc3 *dwc = gadget_to_dwc(g);
1635 unsigned long flags;
8698e2ac 1636 int irq;
72246da4
FB
1637
1638 spin_lock_irqsave(&dwc->lock, flags);
1639
8698e2ac 1640 dwc3_gadget_disable_irq(dwc);
72246da4
FB
1641 __dwc3_gadget_ep_disable(dwc->eps[0]);
1642 __dwc3_gadget_ep_disable(dwc->eps[1]);
1643
1644 dwc->gadget_driver = NULL;
72246da4
FB
1645
1646 spin_unlock_irqrestore(&dwc->lock, flags);
1647
b0d7ffd4
FB
1648 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1649 free_irq(irq, dwc);
1650
72246da4
FB
1651 return 0;
1652}
802fde98 1653
72246da4
FB
1654static const struct usb_gadget_ops dwc3_gadget_ops = {
1655 .get_frame = dwc3_gadget_get_frame,
1656 .wakeup = dwc3_gadget_wakeup,
1657 .set_selfpowered = dwc3_gadget_set_selfpowered,
1658 .pullup = dwc3_gadget_pullup,
1659 .udc_start = dwc3_gadget_start,
1660 .udc_stop = dwc3_gadget_stop,
1661};
1662
1663/* -------------------------------------------------------------------------- */
1664
6a1e3ef4
FB
1665static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1666 u8 num, u32 direction)
72246da4
FB
1667{
1668 struct dwc3_ep *dep;
6a1e3ef4 1669 u8 i;
72246da4 1670
6a1e3ef4
FB
1671 for (i = 0; i < num; i++) {
1672 u8 epnum = (i << 1) | (!!direction);
72246da4 1673
72246da4 1674 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
734d5a53 1675 if (!dep)
72246da4 1676 return -ENOMEM;
72246da4
FB
1677
1678 dep->dwc = dwc;
1679 dep->number = epnum;
9aa62ae4 1680 dep->direction = !!direction;
72246da4
FB
1681 dwc->eps[epnum] = dep;
1682
1683 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1684 (epnum & 1) ? "in" : "out");
6a1e3ef4 1685
72246da4 1686 dep->endpoint.name = dep->name;
72246da4 1687
653df35e
FB
1688 dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1689
72246da4 1690 if (epnum == 0 || epnum == 1) {
e117e742 1691 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
6048e4c6 1692 dep->endpoint.maxburst = 1;
72246da4
FB
1693 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1694 if (!epnum)
1695 dwc->gadget.ep0 = &dep->endpoint;
1696 } else {
1697 int ret;
1698
e117e742 1699 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
12d36c16 1700 dep->endpoint.max_streams = 15;
72246da4
FB
1701 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1702 list_add_tail(&dep->endpoint.ep_list,
1703 &dwc->gadget.ep_list);
1704
1705 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1706 if (ret)
72246da4 1707 return ret;
72246da4 1708 }
25b8ff68 1709
72246da4
FB
1710 INIT_LIST_HEAD(&dep->request_list);
1711 INIT_LIST_HEAD(&dep->req_queued);
1712 }
1713
1714 return 0;
1715}
1716
6a1e3ef4
FB
1717static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1718{
1719 int ret;
1720
1721 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1722
1723 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1724 if (ret < 0) {
1725 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1726 return ret;
1727 }
1728
1729 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1730 if (ret < 0) {
1731 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1732 return ret;
1733 }
1734
1735 return 0;
1736}
1737
72246da4
FB
1738static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1739{
1740 struct dwc3_ep *dep;
1741 u8 epnum;
1742
1743 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1744 dep = dwc->eps[epnum];
6a1e3ef4
FB
1745 if (!dep)
1746 continue;
5bf8fae3
GC
1747 /*
1748 * Physical endpoints 0 and 1 are special; they form the
1749 * bi-directional USB endpoint 0.
1750 *
1751 * For those two physical endpoints, we don't allocate a TRB
1752 * pool nor do we add them the endpoints list. Due to that, we
1753 * shouldn't do these two operations otherwise we would end up
1754 * with all sorts of bugs when removing dwc3.ko.
1755 */
1756 if (epnum != 0 && epnum != 1) {
1757 dwc3_free_trb_pool(dep);
72246da4 1758 list_del(&dep->endpoint.ep_list);
5bf8fae3 1759 }
72246da4
FB
1760
1761 kfree(dep);
1762 }
1763}
1764
72246da4 1765/* -------------------------------------------------------------------------- */
e5caff68 1766
e5ba5ec8
PA
1767static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1768 struct dwc3_request *req, struct dwc3_trb *trb,
72246da4
FB
1769 const struct dwc3_event_depevt *event, int status)
1770{
72246da4
FB
1771 unsigned int count;
1772 unsigned int s_pkt = 0;
d6d6ec7b 1773 unsigned int trb_status;
72246da4 1774
2c4cbe6e
FB
1775 trace_dwc3_complete_trb(dep, trb);
1776
e5ba5ec8
PA
1777 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1778 /*
1779 * We continue despite the error. There is not much we
1780 * can do. If we don't clean it up we loop forever. If
1781 * we skip the TRB then it gets overwritten after a
1782 * while since we use them in a ring buffer. A BUG()
1783 * would help. Lets hope that if this occurs, someone
1784 * fixes the root cause instead of looking away :)
1785 */
1786 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1787 dep->name, trb);
1788 count = trb->size & DWC3_TRB_SIZE_MASK;
1789
1790 if (dep->direction) {
1791 if (count) {
1792 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1793 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1794 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1795 dep->name);
1796 /*
1797 * If missed isoc occurred and there is
1798 * no request queued then issue END
1799 * TRANSFER, so that core generates
1800 * next xfernotready and we will issue
1801 * a fresh START TRANSFER.
1802 * If there are still queued request
1803 * then wait, do not issue either END
1804 * or UPDATE TRANSFER, just attach next
1805 * request in request_list during
1806 * giveback.If any future queued request
1807 * is successfully transferred then we
1808 * will issue UPDATE TRANSFER for all
1809 * request in the request_list.
1810 */
1811 dep->flags |= DWC3_EP_MISSED_ISOC;
1812 } else {
1813 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1814 dep->name);
1815 status = -ECONNRESET;
1816 }
1817 } else {
1818 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1819 }
1820 } else {
1821 if (count && (event->status & DEPEVT_STATUS_SHORT))
1822 s_pkt = 1;
1823 }
1824
1825 /*
1826 * We assume here we will always receive the entire data block
1827 * which we should receive. Meaning, if we program RX to
1828 * receive 4K but we receive only 2K, we assume that's all we
1829 * should receive and we simply bounce the request back to the
1830 * gadget driver for further processing.
1831 */
1832 req->request.actual += req->request.length - count;
1833 if (s_pkt)
1834 return 1;
1835 if ((event->status & DEPEVT_STATUS_LST) &&
1836 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1837 DWC3_TRB_CTRL_HWO)))
1838 return 1;
1839 if ((event->status & DEPEVT_STATUS_IOC) &&
1840 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1841 return 1;
1842 return 0;
1843}
1844
1845static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1846 const struct dwc3_event_depevt *event, int status)
1847{
1848 struct dwc3_request *req;
1849 struct dwc3_trb *trb;
1850 unsigned int slot;
1851 unsigned int i;
1852 int ret;
1853
72246da4
FB
1854 do {
1855 req = next_request(&dep->req_queued);
d39ee7be
SAS
1856 if (!req) {
1857 WARN_ON_ONCE(1);
1858 return 1;
1859 }
e5ba5ec8
PA
1860 i = 0;
1861 do {
1862 slot = req->start_slot + i;
1863 if ((slot == DWC3_TRB_NUM - 1) &&
1864 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1865 slot++;
1866 slot %= DWC3_TRB_NUM;
1867 trb = &dep->trb_pool[slot];
72246da4 1868
e5ba5ec8
PA
1869 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1870 event, status);
1871 if (ret)
1872 break;
1873 }while (++i < req->request.num_mapped_sgs);
72246da4 1874
72246da4 1875 dwc3_gadget_giveback(dep, req, status);
e5ba5ec8
PA
1876
1877 if (ret)
72246da4
FB
1878 break;
1879 } while (1);
1880
cdc359dd
PA
1881 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1882 list_empty(&dep->req_queued)) {
1883 if (list_empty(&dep->request_list)) {
1884 /*
1885 * If there is no entry in request list then do
1886 * not issue END TRANSFER now. Just set PENDING
1887 * flag, so that END TRANSFER is issued when an
1888 * entry is added into request list.
1889 */
1890 dep->flags = DWC3_EP_PENDING_REQUEST;
1891 } else {
b992e681 1892 dwc3_stop_active_transfer(dwc, dep->number, true);
cdc359dd
PA
1893 dep->flags = DWC3_EP_ENABLED;
1894 }
7efea86c
PA
1895 return 1;
1896 }
1897
72246da4
FB
1898 return 1;
1899}
1900
1901static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
029d97ff 1902 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
72246da4
FB
1903{
1904 unsigned status = 0;
1905 int clean_busy;
1906
1907 if (event->status & DEPEVT_STATUS_BUSERR)
1908 status = -ECONNRESET;
1909
1d046793 1910 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
c2df85ca 1911 if (clean_busy)
72246da4 1912 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
1913
1914 /*
1915 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1916 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1917 */
1918 if (dwc->revision < DWC3_REVISION_183A) {
1919 u32 reg;
1920 int i;
1921
1922 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 1923 dep = dwc->eps[i];
fae2b904
FB
1924
1925 if (!(dep->flags & DWC3_EP_ENABLED))
1926 continue;
1927
1928 if (!list_empty(&dep->req_queued))
1929 return;
1930 }
1931
1932 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1933 reg |= dwc->u1u2;
1934 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1935
1936 dwc->u1u2 = 0;
1937 }
72246da4
FB
1938}
1939
72246da4
FB
1940static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1941 const struct dwc3_event_depevt *event)
1942{
1943 struct dwc3_ep *dep;
1944 u8 epnum = event->endpoint_number;
1945
1946 dep = dwc->eps[epnum];
1947
3336abb5
FB
1948 if (!(dep->flags & DWC3_EP_ENABLED))
1949 return;
1950
72246da4
FB
1951 if (epnum == 0 || epnum == 1) {
1952 dwc3_ep0_interrupt(dwc, event);
1953 return;
1954 }
1955
1956 switch (event->endpoint_event) {
1957 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 1958 dep->resource_index = 0;
c2df85ca 1959
16e78db7 1960 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1961 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1962 dep->name);
1963 return;
1964 }
1965
029d97ff 1966 dwc3_endpoint_transfer_complete(dwc, dep, event);
72246da4
FB
1967 break;
1968 case DWC3_DEPEVT_XFERINPROGRESS:
029d97ff 1969 dwc3_endpoint_transfer_complete(dwc, dep, event);
72246da4
FB
1970 break;
1971 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 1972 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1973 dwc3_gadget_start_isoc(dwc, dep, event);
1974 } else {
1975 int ret;
1976
1977 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1978 dep->name, event->status &
1979 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1980 ? "Transfer Active"
1981 : "Transfer Not Active");
1982
1983 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1984 if (!ret || ret == -EBUSY)
1985 return;
1986
1987 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1988 dep->name);
1989 }
1990
879631aa
FB
1991 break;
1992 case DWC3_DEPEVT_STREAMEVT:
16e78db7 1993 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
1994 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1995 dep->name);
1996 return;
1997 }
1998
1999 switch (event->status) {
2000 case DEPEVT_STREAMEVT_FOUND:
2001 dev_vdbg(dwc->dev, "Stream %d found and started\n",
2002 event->parameters);
2003
2004 break;
2005 case DEPEVT_STREAMEVT_NOTFOUND:
2006 /* FALLTHROUGH */
2007 default:
2008 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2009 }
72246da4
FB
2010 break;
2011 case DWC3_DEPEVT_RXTXFIFOEVT:
2012 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2013 break;
72246da4 2014 case DWC3_DEPEVT_EPCMDCMPLT:
ea53b882 2015 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
72246da4
FB
2016 break;
2017 }
2018}
2019
2020static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2021{
2022 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2023 spin_unlock(&dwc->lock);
2024 dwc->gadget_driver->disconnect(&dwc->gadget);
2025 spin_lock(&dwc->lock);
2026 }
2027}
2028
bc5ba2e0
FB
2029static void dwc3_suspend_gadget(struct dwc3 *dwc)
2030{
73a30bfc 2031 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
bc5ba2e0
FB
2032 spin_unlock(&dwc->lock);
2033 dwc->gadget_driver->suspend(&dwc->gadget);
2034 spin_lock(&dwc->lock);
2035 }
2036}
2037
2038static void dwc3_resume_gadget(struct dwc3 *dwc)
2039{
73a30bfc 2040 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
bc5ba2e0
FB
2041 spin_unlock(&dwc->lock);
2042 dwc->gadget_driver->resume(&dwc->gadget);
8e74475b
FB
2043 }
2044}
2045
2046static void dwc3_reset_gadget(struct dwc3 *dwc)
2047{
2048 if (!dwc->gadget_driver)
2049 return;
2050
2051 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2052 spin_unlock(&dwc->lock);
2053 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
bc5ba2e0
FB
2054 spin_lock(&dwc->lock);
2055 }
2056}
2057
b992e681 2058static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
72246da4
FB
2059{
2060 struct dwc3_ep *dep;
2061 struct dwc3_gadget_ep_cmd_params params;
2062 u32 cmd;
2063 int ret;
2064
2065 dep = dwc->eps[epnum];
2066
b4996a86 2067 if (!dep->resource_index)
3daf74d7
PA
2068 return;
2069
57911504
PA
2070 /*
2071 * NOTICE: We are violating what the Databook says about the
2072 * EndTransfer command. Ideally we would _always_ wait for the
2073 * EndTransfer Command Completion IRQ, but that's causing too
2074 * much trouble synchronizing between us and gadget driver.
2075 *
2076 * We have discussed this with the IP Provider and it was
2077 * suggested to giveback all requests here, but give HW some
2078 * extra time to synchronize with the interconnect. We're using
2079 * an arbitraty 100us delay for that.
2080 *
2081 * Note also that a similar handling was tested by Synopsys
2082 * (thanks a lot Paul) and nothing bad has come out of it.
2083 * In short, what we're doing is:
2084 *
2085 * - Issue EndTransfer WITH CMDIOC bit set
2086 * - Wait 100us
2087 */
2088
3daf74d7 2089 cmd = DWC3_DEPCMD_ENDTRANSFER;
b992e681
PZ
2090 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2091 cmd |= DWC3_DEPCMD_CMDIOC;
b4996a86 2092 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7
PA
2093 memset(&params, 0, sizeof(params));
2094 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2095 WARN_ON_ONCE(ret);
b4996a86 2096 dep->resource_index = 0;
041d81f4 2097 dep->flags &= ~DWC3_EP_BUSY;
57911504 2098 udelay(100);
72246da4
FB
2099}
2100
2101static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2102{
2103 u32 epnum;
2104
2105 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2106 struct dwc3_ep *dep;
2107
2108 dep = dwc->eps[epnum];
6a1e3ef4
FB
2109 if (!dep)
2110 continue;
2111
72246da4
FB
2112 if (!(dep->flags & DWC3_EP_ENABLED))
2113 continue;
2114
624407f9 2115 dwc3_remove_requests(dwc, dep);
72246da4
FB
2116 }
2117}
2118
2119static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2120{
2121 u32 epnum;
2122
2123 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2124 struct dwc3_ep *dep;
2125 struct dwc3_gadget_ep_cmd_params params;
2126 int ret;
2127
2128 dep = dwc->eps[epnum];
6a1e3ef4
FB
2129 if (!dep)
2130 continue;
72246da4
FB
2131
2132 if (!(dep->flags & DWC3_EP_STALL))
2133 continue;
2134
2135 dep->flags &= ~DWC3_EP_STALL;
2136
2137 memset(&params, 0, sizeof(params));
2138 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2139 DWC3_DEPCMD_CLEARSTALL, &params);
2140 WARN_ON_ONCE(ret);
2141 }
2142}
2143
2144static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2145{
c4430a26
FB
2146 int reg;
2147
72246da4
FB
2148 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2149 reg &= ~DWC3_DCTL_INITU1ENA;
2150 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2151
2152 reg &= ~DWC3_DCTL_INITU2ENA;
2153 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 2154
72246da4 2155 dwc3_disconnect_gadget(dwc);
b23c8439 2156 dwc->start_config_issued = false;
72246da4
FB
2157
2158 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 2159 dwc->setup_packet_pending = false;
06a374ed 2160 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
72246da4
FB
2161}
2162
72246da4
FB
2163static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2164{
2165 u32 reg;
2166
df62df56
FB
2167 /*
2168 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2169 * would cause a missing Disconnect Event if there's a
2170 * pending Setup Packet in the FIFO.
2171 *
2172 * There's no suggested workaround on the official Bug
2173 * report, which states that "unless the driver/application
2174 * is doing any special handling of a disconnect event,
2175 * there is no functional issue".
2176 *
2177 * Unfortunately, it turns out that we _do_ some special
2178 * handling of a disconnect event, namely complete all
2179 * pending transfers, notify gadget driver of the
2180 * disconnection, and so on.
2181 *
2182 * Our suggested workaround is to follow the Disconnect
2183 * Event steps here, instead, based on a setup_packet_pending
2184 * flag. Such flag gets set whenever we have a XferNotReady
2185 * event on EP0 and gets cleared on XferComplete for the
2186 * same endpoint.
2187 *
2188 * Refers to:
2189 *
2190 * STAR#9000466709: RTL: Device : Disconnect event not
2191 * generated if setup packet pending in FIFO
2192 */
2193 if (dwc->revision < DWC3_REVISION_188A) {
2194 if (dwc->setup_packet_pending)
2195 dwc3_gadget_disconnect_interrupt(dwc);
2196 }
2197
8e74475b 2198 dwc3_reset_gadget(dwc);
72246da4
FB
2199
2200 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2201 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2202 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2203 dwc->test_mode = false;
72246da4
FB
2204
2205 dwc3_stop_active_transfers(dwc);
2206 dwc3_clear_stall_all_ep(dwc);
b23c8439 2207 dwc->start_config_issued = false;
72246da4
FB
2208
2209 /* Reset device address to zero */
2210 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2211 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2212 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2213}
2214
2215static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2216{
2217 u32 reg;
2218 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2219
2220 /*
2221 * We change the clock only at SS but I dunno why I would want to do
2222 * this. Maybe it becomes part of the power saving plan.
2223 */
2224
2225 if (speed != DWC3_DSTS_SUPERSPEED)
2226 return;
2227
2228 /*
2229 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2230 * each time on Connect Done.
2231 */
2232 if (!usb30_clock)
2233 return;
2234
2235 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2236 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2237 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2238}
2239
72246da4
FB
2240static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2241{
72246da4
FB
2242 struct dwc3_ep *dep;
2243 int ret;
2244 u32 reg;
2245 u8 speed;
2246
72246da4
FB
2247 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2248 speed = reg & DWC3_DSTS_CONNECTSPD;
2249 dwc->speed = speed;
2250
2251 dwc3_update_ram_clk_sel(dwc, speed);
2252
2253 switch (speed) {
2254 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2255 /*
2256 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2257 * would cause a missing USB3 Reset event.
2258 *
2259 * In such situations, we should force a USB3 Reset
2260 * event by calling our dwc3_gadget_reset_interrupt()
2261 * routine.
2262 *
2263 * Refers to:
2264 *
2265 * STAR#9000483510: RTL: SS : USB3 reset event may
2266 * not be generated always when the link enters poll
2267 */
2268 if (dwc->revision < DWC3_REVISION_190A)
2269 dwc3_gadget_reset_interrupt(dwc);
2270
72246da4
FB
2271 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2272 dwc->gadget.ep0->maxpacket = 512;
2273 dwc->gadget.speed = USB_SPEED_SUPER;
2274 break;
2275 case DWC3_DCFG_HIGHSPEED:
2276 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2277 dwc->gadget.ep0->maxpacket = 64;
2278 dwc->gadget.speed = USB_SPEED_HIGH;
2279 break;
2280 case DWC3_DCFG_FULLSPEED2:
2281 case DWC3_DCFG_FULLSPEED1:
2282 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2283 dwc->gadget.ep0->maxpacket = 64;
2284 dwc->gadget.speed = USB_SPEED_FULL;
2285 break;
2286 case DWC3_DCFG_LOWSPEED:
2287 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2288 dwc->gadget.ep0->maxpacket = 8;
2289 dwc->gadget.speed = USB_SPEED_LOW;
2290 break;
2291 }
2292
2b758350
PA
2293 /* Enable USB2 LPM Capability */
2294
2295 if ((dwc->revision > DWC3_REVISION_194A)
2296 && (speed != DWC3_DCFG_SUPERSPEED)) {
2297 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2298 reg |= DWC3_DCFG_LPM_CAP;
2299 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2300
2301 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2302 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2303
460d098c 2304 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2b758350 2305
80caf7d2
HR
2306 /*
2307 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2308 * DCFG.LPMCap is set, core responses with an ACK and the
2309 * BESL value in the LPM token is less than or equal to LPM
2310 * NYET threshold.
2311 */
2312 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2313 && dwc->has_lpm_erratum,
2314 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2315
2316 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2317 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2318
356363bf
FB
2319 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2320 } else {
2321 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2322 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2b758350
PA
2323 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2324 }
2325
72246da4 2326 dep = dwc->eps[0];
265b70a7
PZ
2327 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2328 false);
72246da4
FB
2329 if (ret) {
2330 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2331 return;
2332 }
2333
2334 dep = dwc->eps[1];
265b70a7
PZ
2335 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2336 false);
72246da4
FB
2337 if (ret) {
2338 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2339 return;
2340 }
2341
2342 /*
2343 * Configure PHY via GUSB3PIPECTLn if required.
2344 *
2345 * Update GTXFIFOSIZn
2346 *
2347 * In both cases reset values should be sufficient.
2348 */
2349}
2350
2351static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2352{
72246da4
FB
2353 /*
2354 * TODO take core out of low power mode when that's
2355 * implemented.
2356 */
2357
2358 dwc->gadget_driver->resume(&dwc->gadget);
2359}
2360
2361static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2362 unsigned int evtinfo)
2363{
fae2b904 2364 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
0b0cc1cd
FB
2365 unsigned int pwropt;
2366
2367 /*
2368 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2369 * Hibernation mode enabled which would show up when device detects
2370 * host-initiated U3 exit.
2371 *
2372 * In that case, device will generate a Link State Change Interrupt
2373 * from U3 to RESUME which is only necessary if Hibernation is
2374 * configured in.
2375 *
2376 * There are no functional changes due to such spurious event and we
2377 * just need to ignore it.
2378 *
2379 * Refers to:
2380 *
2381 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2382 * operational mode
2383 */
2384 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2385 if ((dwc->revision < DWC3_REVISION_250A) &&
2386 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2387 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2388 (next == DWC3_LINK_STATE_RESUME)) {
2389 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2390 return;
2391 }
2392 }
fae2b904
FB
2393
2394 /*
2395 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2396 * on the link partner, the USB session might do multiple entry/exit
2397 * of low power states before a transfer takes place.
2398 *
2399 * Due to this problem, we might experience lower throughput. The
2400 * suggested workaround is to disable DCTL[12:9] bits if we're
2401 * transitioning from U1/U2 to U0 and enable those bits again
2402 * after a transfer completes and there are no pending transfers
2403 * on any of the enabled endpoints.
2404 *
2405 * This is the first half of that workaround.
2406 *
2407 * Refers to:
2408 *
2409 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2410 * core send LGO_Ux entering U0
2411 */
2412 if (dwc->revision < DWC3_REVISION_183A) {
2413 if (next == DWC3_LINK_STATE_U0) {
2414 u32 u1u2;
2415 u32 reg;
2416
2417 switch (dwc->link_state) {
2418 case DWC3_LINK_STATE_U1:
2419 case DWC3_LINK_STATE_U2:
2420 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2421 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2422 | DWC3_DCTL_ACCEPTU2ENA
2423 | DWC3_DCTL_INITU1ENA
2424 | DWC3_DCTL_ACCEPTU1ENA);
2425
2426 if (!dwc->u1u2)
2427 dwc->u1u2 = reg & u1u2;
2428
2429 reg &= ~u1u2;
2430
2431 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2432 break;
2433 default:
2434 /* do nothing */
2435 break;
2436 }
2437 }
2438 }
2439
bc5ba2e0
FB
2440 switch (next) {
2441 case DWC3_LINK_STATE_U1:
2442 if (dwc->speed == USB_SPEED_SUPER)
2443 dwc3_suspend_gadget(dwc);
2444 break;
2445 case DWC3_LINK_STATE_U2:
2446 case DWC3_LINK_STATE_U3:
2447 dwc3_suspend_gadget(dwc);
2448 break;
2449 case DWC3_LINK_STATE_RESUME:
2450 dwc3_resume_gadget(dwc);
2451 break;
2452 default:
2453 /* do nothing */
2454 break;
2455 }
2456
e57ebc1d 2457 dwc->link_state = next;
72246da4
FB
2458}
2459
e1dadd3b
FB
2460static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2461 unsigned int evtinfo)
2462{
2463 unsigned int is_ss = evtinfo & BIT(4);
2464
2465 /**
2466 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2467 * have a known issue which can cause USB CV TD.9.23 to fail
2468 * randomly.
2469 *
2470 * Because of this issue, core could generate bogus hibernation
2471 * events which SW needs to ignore.
2472 *
2473 * Refers to:
2474 *
2475 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2476 * Device Fallback from SuperSpeed
2477 */
2478 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2479 return;
2480
2481 /* enter hibernation here */
2482}
2483
72246da4
FB
2484static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2485 const struct dwc3_event_devt *event)
2486{
2487 switch (event->type) {
2488 case DWC3_DEVICE_EVENT_DISCONNECT:
2489 dwc3_gadget_disconnect_interrupt(dwc);
2490 break;
2491 case DWC3_DEVICE_EVENT_RESET:
2492 dwc3_gadget_reset_interrupt(dwc);
2493 break;
2494 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2495 dwc3_gadget_conndone_interrupt(dwc);
2496 break;
2497 case DWC3_DEVICE_EVENT_WAKEUP:
2498 dwc3_gadget_wakeup_interrupt(dwc);
2499 break;
e1dadd3b
FB
2500 case DWC3_DEVICE_EVENT_HIBER_REQ:
2501 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2502 "unexpected hibernation event\n"))
2503 break;
2504
2505 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2506 break;
72246da4
FB
2507 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2508 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2509 break;
2510 case DWC3_DEVICE_EVENT_EOPF:
2511 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2512 break;
2513 case DWC3_DEVICE_EVENT_SOF:
2514 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2515 break;
2516 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2517 dev_vdbg(dwc->dev, "Erratic Error\n");
2518 break;
2519 case DWC3_DEVICE_EVENT_CMD_CMPL:
2520 dev_vdbg(dwc->dev, "Command Complete\n");
2521 break;
2522 case DWC3_DEVICE_EVENT_OVERFLOW:
2523 dev_vdbg(dwc->dev, "Overflow\n");
2524 break;
2525 default:
2526 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2527 }
2528}
2529
2530static void dwc3_process_event_entry(struct dwc3 *dwc,
2531 const union dwc3_event *event)
2532{
2c4cbe6e
FB
2533 trace_dwc3_event(event->raw);
2534
72246da4
FB
2535 /* Endpoint IRQ, handle it and return early */
2536 if (event->type.is_devspec == 0) {
2537 /* depevt */
2538 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2539 }
2540
2541 switch (event->type.type) {
2542 case DWC3_EVENT_TYPE_DEV:
2543 dwc3_gadget_interrupt(dwc, &event->devt);
2544 break;
2545 /* REVISIT what to do with Carkit and I2C events ? */
2546 default:
2547 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2548 }
2549}
2550
f42f2447 2551static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
b15a762f 2552{
f42f2447 2553 struct dwc3_event_buffer *evt;
b15a762f 2554 irqreturn_t ret = IRQ_NONE;
f42f2447 2555 int left;
e8adfc30 2556 u32 reg;
b15a762f 2557
f42f2447
FB
2558 evt = dwc->ev_buffs[buf];
2559 left = evt->count;
b15a762f 2560
f42f2447
FB
2561 if (!(evt->flags & DWC3_EVENT_PENDING))
2562 return IRQ_NONE;
b15a762f 2563
f42f2447
FB
2564 while (left > 0) {
2565 union dwc3_event event;
b15a762f 2566
f42f2447 2567 event.raw = *(u32 *) (evt->buf + evt->lpos);
b15a762f 2568
f42f2447 2569 dwc3_process_event_entry(dwc, &event);
b15a762f 2570
f42f2447
FB
2571 /*
2572 * FIXME we wrap around correctly to the next entry as
2573 * almost all entries are 4 bytes in size. There is one
2574 * entry which has 12 bytes which is a regular entry
2575 * followed by 8 bytes data. ATM I don't know how
2576 * things are organized if we get next to the a
2577 * boundary so I worry about that once we try to handle
2578 * that.
2579 */
2580 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2581 left -= 4;
b15a762f 2582
f42f2447
FB
2583 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2584 }
b15a762f 2585
f42f2447
FB
2586 evt->count = 0;
2587 evt->flags &= ~DWC3_EVENT_PENDING;
2588 ret = IRQ_HANDLED;
b15a762f 2589
f42f2447
FB
2590 /* Unmask interrupt */
2591 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2592 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2593 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
b15a762f 2594
f42f2447
FB
2595 return ret;
2596}
e8adfc30 2597
f42f2447
FB
2598static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2599{
2600 struct dwc3 *dwc = _dwc;
2601 unsigned long flags;
2602 irqreturn_t ret = IRQ_NONE;
2603 int i;
2604
2605 spin_lock_irqsave(&dwc->lock, flags);
2606
2607 for (i = 0; i < dwc->num_event_buffers; i++)
2608 ret |= dwc3_process_event_buf(dwc, i);
b15a762f
FB
2609
2610 spin_unlock_irqrestore(&dwc->lock, flags);
2611
2612 return ret;
2613}
2614
7f97aa98 2615static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
72246da4
FB
2616{
2617 struct dwc3_event_buffer *evt;
72246da4 2618 u32 count;
e8adfc30 2619 u32 reg;
72246da4 2620
b15a762f
FB
2621 evt = dwc->ev_buffs[buf];
2622
72246da4
FB
2623 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2624 count &= DWC3_GEVNTCOUNT_MASK;
2625 if (!count)
2626 return IRQ_NONE;
2627
b15a762f
FB
2628 evt->count = count;
2629 evt->flags |= DWC3_EVENT_PENDING;
72246da4 2630
e8adfc30
FB
2631 /* Mask interrupt */
2632 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2633 reg |= DWC3_GEVNTSIZ_INTMASK;
2634 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2635
b15a762f 2636 return IRQ_WAKE_THREAD;
72246da4
FB
2637}
2638
2639static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2640{
2641 struct dwc3 *dwc = _dwc;
2642 int i;
2643 irqreturn_t ret = IRQ_NONE;
2644
2645 spin_lock(&dwc->lock);
2646
9f622b2a 2647 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2648 irqreturn_t status;
2649
7f97aa98 2650 status = dwc3_check_event_buf(dwc, i);
b15a762f 2651 if (status == IRQ_WAKE_THREAD)
72246da4
FB
2652 ret = status;
2653 }
2654
2655 spin_unlock(&dwc->lock);
2656
2657 return ret;
2658}
2659
2660/**
2661 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2662 * @dwc: pointer to our controller context structure
72246da4
FB
2663 *
2664 * Returns 0 on success otherwise negative errno.
2665 */
41ac7b3a 2666int dwc3_gadget_init(struct dwc3 *dwc)
72246da4 2667{
72246da4 2668 int ret;
72246da4
FB
2669
2670 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2671 &dwc->ctrl_req_addr, GFP_KERNEL);
2672 if (!dwc->ctrl_req) {
2673 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2674 ret = -ENOMEM;
2675 goto err0;
2676 }
2677
2678 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2679 &dwc->ep0_trb_addr, GFP_KERNEL);
2680 if (!dwc->ep0_trb) {
2681 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2682 ret = -ENOMEM;
2683 goto err1;
2684 }
2685
3ef35faf 2686 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4 2687 if (!dwc->setup_buf) {
72246da4
FB
2688 ret = -ENOMEM;
2689 goto err2;
2690 }
2691
5812b1c2 2692 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2693 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2694 GFP_KERNEL);
5812b1c2
FB
2695 if (!dwc->ep0_bounce) {
2696 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2697 ret = -ENOMEM;
2698 goto err3;
2699 }
2700
72246da4 2701 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2702 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4 2703 dwc->gadget.speed = USB_SPEED_UNKNOWN;
eeb720fb 2704 dwc->gadget.sg_supported = true;
72246da4
FB
2705 dwc->gadget.name = "dwc3-gadget";
2706
a4b9d94b
DC
2707 /*
2708 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2709 * on ep out.
2710 */
2711 dwc->gadget.quirk_ep_out_aligned_size = true;
2712
72246da4
FB
2713 /*
2714 * REVISIT: Here we should clear all pending IRQs to be
2715 * sure we're starting from a well known location.
2716 */
2717
2718 ret = dwc3_gadget_init_endpoints(dwc);
2719 if (ret)
5812b1c2 2720 goto err4;
72246da4 2721
72246da4
FB
2722 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2723 if (ret) {
2724 dev_err(dwc->dev, "failed to register udc\n");
e1f80467 2725 goto err4;
72246da4
FB
2726 }
2727
2728 return 0;
2729
5812b1c2 2730err4:
e1f80467 2731 dwc3_gadget_free_endpoints(dwc);
3ef35faf
FB
2732 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2733 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2734
72246da4 2735err3:
0fc9a1be 2736 kfree(dwc->setup_buf);
72246da4
FB
2737
2738err2:
2739 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2740 dwc->ep0_trb, dwc->ep0_trb_addr);
2741
2742err1:
2743 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2744 dwc->ctrl_req, dwc->ctrl_req_addr);
2745
2746err0:
2747 return ret;
2748}
2749
7415f17c
FB
2750/* -------------------------------------------------------------------------- */
2751
72246da4
FB
2752void dwc3_gadget_exit(struct dwc3 *dwc)
2753{
72246da4 2754 usb_del_gadget_udc(&dwc->gadget);
72246da4 2755
72246da4
FB
2756 dwc3_gadget_free_endpoints(dwc);
2757
3ef35faf
FB
2758 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2759 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2760
0fc9a1be 2761 kfree(dwc->setup_buf);
72246da4
FB
2762
2763 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2764 dwc->ep0_trb, dwc->ep0_trb_addr);
2765
2766 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2767 dwc->ctrl_req, dwc->ctrl_req_addr);
72246da4 2768}
7415f17c 2769
0b0231aa 2770int dwc3_gadget_suspend(struct dwc3 *dwc)
7415f17c 2771{
7b2a0368 2772 if (dwc->pullups_connected) {
7415f17c 2773 dwc3_gadget_disable_irq(dwc);
7b2a0368
FB
2774 dwc3_gadget_run_stop(dwc, true, true);
2775 }
7415f17c 2776
7415f17c
FB
2777 __dwc3_gadget_ep_disable(dwc->eps[0]);
2778 __dwc3_gadget_ep_disable(dwc->eps[1]);
2779
2780 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2781
2782 return 0;
2783}
2784
2785int dwc3_gadget_resume(struct dwc3 *dwc)
2786{
2787 struct dwc3_ep *dep;
2788 int ret;
2789
2790 /* Start with SuperSpeed Default */
2791 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2792
2793 dep = dwc->eps[0];
265b70a7
PZ
2794 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2795 false);
7415f17c
FB
2796 if (ret)
2797 goto err0;
2798
2799 dep = dwc->eps[1];
265b70a7
PZ
2800 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2801 false);
7415f17c
FB
2802 if (ret)
2803 goto err1;
2804
2805 /* begin to receive SETUP packets */
2806 dwc->ep0state = EP0_SETUP_PHASE;
2807 dwc3_ep0_out_start(dwc);
2808
2809 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2810
0b0231aa
FB
2811 if (dwc->pullups_connected) {
2812 dwc3_gadget_enable_irq(dwc);
2813 dwc3_gadget_run_stop(dwc, true, false);
2814 }
2815
7415f17c
FB
2816 return 0;
2817
2818err1:
2819 __dwc3_gadget_ep_disable(dwc->eps[0]);
2820
2821err0:
2822 return ret;
2823}