]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/usb/dwc3/gadget.c
usb: dwc3: gadget: keep track of allocated and queued reqs
[mirror_ubuntu-hirsute-kernel.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
5945f789
FB
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
72246da4 12 *
5945f789
FB
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
72246da4
FB
17 */
18
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/list.h>
28#include <linux/dma-mapping.h>
29
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32
80977dc9 33#include "debug.h"
72246da4
FB
34#include "core.h"
35#include "gadget.h"
36#include "io.h"
37
04a9bfcd
FB
38/**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48{
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69}
70
911f1f88
PZ
71/**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79{
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85}
86
8598bde7
FB
87/**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
aee63e3c 93 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
94 */
95int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96{
aee63e3c 97 int retries = 10000;
8598bde7
FB
98 u32 reg;
99
802fde98
PZ
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
8598bde7
FB
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
802fde98
PZ
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
8598bde7 131 /* wait for a change in DSTS */
aed430e5 132 retries = 10000;
8598bde7
FB
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
8598bde7
FB
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
aee63e3c 139 udelay(5);
8598bde7
FB
140 }
141
73815280
FB
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
8598bde7
FB
144
145 return -ETIMEDOUT;
146}
147
dca0119c
JY
148/**
149 * dwc3_ep_inc_trb() - Increment a TRB index.
150 * @index - Pointer to the TRB index to increment.
151 *
152 * The index should never point to the link TRB. After incrementing,
153 * if it is point to the link TRB, wrap around to the beginning. The
154 * link TRB is always at the last TRB entry.
155 */
156static void dwc3_ep_inc_trb(u8 *index)
457e84b6 157{
dca0119c
JY
158 (*index)++;
159 if (*index == (DWC3_TRB_NUM - 1))
160 *index = 0;
ef966b9d 161}
457e84b6 162
dca0119c 163static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
ef966b9d 164{
dca0119c 165 dwc3_ep_inc_trb(&dep->trb_enqueue);
ef966b9d 166}
457e84b6 167
dca0119c 168static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
ef966b9d 169{
dca0119c 170 dwc3_ep_inc_trb(&dep->trb_dequeue);
457e84b6
FB
171}
172
72246da4
FB
173void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
174 int status)
175{
176 struct dwc3 *dwc = dep->dwc;
e5ba5ec8 177 int i;
72246da4 178
aa3342c8 179 if (req->started) {
e5ba5ec8
PA
180 i = 0;
181 do {
ef966b9d 182 dwc3_ep_inc_deq(dep);
e5ba5ec8 183 } while(++i < req->request.num_mapped_sgs);
aa3342c8 184 req->started = false;
72246da4
FB
185 }
186 list_del(&req->list);
eeb720fb 187 req->trb = NULL;
72246da4
FB
188
189 if (req->request.status == -EINPROGRESS)
190 req->request.status = status;
191
0416e494
PA
192 if (dwc->ep0_bounced && dep->number == 0)
193 dwc->ep0_bounced = false;
194 else
195 usb_gadget_unmap_request(&dwc->gadget, &req->request,
196 req->direction);
72246da4 197
2c4cbe6e 198 trace_dwc3_gadget_giveback(req);
72246da4
FB
199
200 spin_unlock(&dwc->lock);
304f7e5e 201 usb_gadget_giveback_request(&dep->endpoint, &req->request);
72246da4 202 spin_lock(&dwc->lock);
fc8bb91b
FB
203
204 if (dep->number > 1)
205 pm_runtime_put(dwc->dev);
72246da4
FB
206}
207
3ece0ec4 208int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
b09bb642
FB
209{
210 u32 timeout = 500;
71f7e702 211 int status = 0;
0fe886cd 212 int ret = 0;
b09bb642
FB
213 u32 reg;
214
215 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
216 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
217
218 do {
219 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
220 if (!(reg & DWC3_DGCMD_CMDACT)) {
71f7e702
FB
221 status = DWC3_DGCMD_STATUS(reg);
222 if (status)
0fe886cd
FB
223 ret = -EINVAL;
224 break;
b09bb642 225 }
0fe886cd
FB
226 } while (timeout--);
227
228 if (!timeout) {
0fe886cd 229 ret = -ETIMEDOUT;
71f7e702 230 status = -ETIMEDOUT;
0fe886cd
FB
231 }
232
71f7e702
FB
233 trace_dwc3_gadget_generic_cmd(cmd, param, status);
234
0fe886cd 235 return ret;
b09bb642
FB
236}
237
c36d8e94
FB
238static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
239
2cd4718d
FB
240int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
241 struct dwc3_gadget_ep_cmd_params *params)
72246da4 242{
2cd4718d 243 struct dwc3 *dwc = dep->dwc;
61d58242 244 u32 timeout = 500;
72246da4
FB
245 u32 reg;
246
0933df15 247 int cmd_status = 0;
2b0f11df 248 int susphy = false;
c0ca324d 249 int ret = -EINVAL;
72246da4 250
2b0f11df
FB
251 /*
252 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
253 * we're issuing an endpoint command, we must check if
254 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
255 *
256 * We will also set SUSPHY bit to what it was before returning as stated
257 * by the same section on Synopsys databook.
258 */
ab2a92e7
FB
259 if (dwc->gadget.speed <= USB_SPEED_HIGH) {
260 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
261 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
262 susphy = true;
263 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
264 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
265 }
2b0f11df
FB
266 }
267
c36d8e94
FB
268 if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
269 int needs_wakeup;
270
271 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
272 dwc->link_state == DWC3_LINK_STATE_U2 ||
273 dwc->link_state == DWC3_LINK_STATE_U3);
274
275 if (unlikely(needs_wakeup)) {
276 ret = __dwc3_gadget_wakeup(dwc);
277 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
278 ret);
279 }
280 }
281
2eb88016
FB
282 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
283 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
284 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
72246da4 285
2eb88016 286 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
72246da4 287 do {
2eb88016 288 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
72246da4 289 if (!(reg & DWC3_DEPCMD_CMDACT)) {
0933df15 290 cmd_status = DWC3_DEPCMD_STATUS(reg);
7b9cc7a2 291
7b9cc7a2
KL
292 switch (cmd_status) {
293 case 0:
294 ret = 0;
295 break;
296 case DEPEVT_TRANSFER_NO_RESOURCE:
7b9cc7a2 297 ret = -EINVAL;
c0ca324d 298 break;
7b9cc7a2
KL
299 case DEPEVT_TRANSFER_BUS_EXPIRY:
300 /*
301 * SW issues START TRANSFER command to
302 * isochronous ep with future frame interval. If
303 * future interval time has already passed when
304 * core receives the command, it will respond
305 * with an error status of 'Bus Expiry'.
306 *
307 * Instead of always returning -EINVAL, let's
308 * give a hint to the gadget driver that this is
309 * the case by returning -EAGAIN.
310 */
7b9cc7a2
KL
311 ret = -EAGAIN;
312 break;
313 default:
314 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
315 }
316
c0ca324d 317 break;
72246da4 318 }
f6bb225b 319 } while (--timeout);
72246da4 320
f6bb225b 321 if (timeout == 0) {
f6bb225b 322 ret = -ETIMEDOUT;
0933df15 323 cmd_status = -ETIMEDOUT;
f6bb225b 324 }
c0ca324d 325
0933df15
FB
326 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
327
2b0f11df
FB
328 if (unlikely(susphy)) {
329 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
330 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
331 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
332 }
333
c0ca324d 334 return ret;
72246da4
FB
335}
336
50c763f8
JY
337static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
338{
339 struct dwc3 *dwc = dep->dwc;
340 struct dwc3_gadget_ep_cmd_params params;
341 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
342
343 /*
344 * As of core revision 2.60a the recommended programming model
345 * is to set the ClearPendIN bit when issuing a Clear Stall EP
346 * command for IN endpoints. This is to prevent an issue where
347 * some (non-compliant) hosts may not send ACK TPs for pending
348 * IN transfers due to a mishandled error condition. Synopsys
349 * STAR 9000614252.
350 */
351 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
352 cmd |= DWC3_DEPCMD_CLEARPENDIN;
353
354 memset(&params, 0, sizeof(params));
355
2cd4718d 356 return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
50c763f8
JY
357}
358
72246da4 359static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 360 struct dwc3_trb *trb)
72246da4 361{
c439ef87 362 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
363
364 return dep->trb_pool_dma + offset;
365}
366
367static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
368{
369 struct dwc3 *dwc = dep->dwc;
370
371 if (dep->trb_pool)
372 return 0;
373
72246da4
FB
374 dep->trb_pool = dma_alloc_coherent(dwc->dev,
375 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
376 &dep->trb_pool_dma, GFP_KERNEL);
377 if (!dep->trb_pool) {
378 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
379 dep->name);
380 return -ENOMEM;
381 }
382
383 return 0;
384}
385
386static void dwc3_free_trb_pool(struct dwc3_ep *dep)
387{
388 struct dwc3 *dwc = dep->dwc;
389
390 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
391 dep->trb_pool, dep->trb_pool_dma);
392
393 dep->trb_pool = NULL;
394 dep->trb_pool_dma = 0;
395}
396
c4509601
JY
397static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
398
399/**
400 * dwc3_gadget_start_config - Configure EP resources
401 * @dwc: pointer to our controller context structure
402 * @dep: endpoint that is being enabled
403 *
404 * The assignment of transfer resources cannot perfectly follow the
405 * data book due to the fact that the controller driver does not have
406 * all knowledge of the configuration in advance. It is given this
407 * information piecemeal by the composite gadget framework after every
408 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
409 * programming model in this scenario can cause errors. For two
410 * reasons:
411 *
412 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
413 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
414 * multiple interfaces.
415 *
416 * 2) The databook does not mention doing more DEPXFERCFG for new
417 * endpoint on alt setting (8.1.6).
418 *
419 * The following simplified method is used instead:
420 *
421 * All hardware endpoints can be assigned a transfer resource and this
422 * setting will stay persistent until either a core reset or
423 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
424 * do DEPXFERCFG for every hardware endpoint as well. We are
425 * guaranteed that there are as many transfer resources as endpoints.
426 *
427 * This function is called for each endpoint when it is being enabled
428 * but is triggered only when called for EP0-out, which always happens
429 * first, and which should only happen in one of the above conditions.
430 */
72246da4
FB
431static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
432{
433 struct dwc3_gadget_ep_cmd_params params;
434 u32 cmd;
c4509601
JY
435 int i;
436 int ret;
437
438 if (dep->number)
439 return 0;
72246da4
FB
440
441 memset(&params, 0x00, sizeof(params));
c4509601 442 cmd = DWC3_DEPCMD_DEPSTARTCFG;
72246da4 443
2cd4718d 444 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
c4509601
JY
445 if (ret)
446 return ret;
447
448 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
449 struct dwc3_ep *dep = dwc->eps[i];
72246da4 450
c4509601
JY
451 if (!dep)
452 continue;
453
454 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
455 if (ret)
456 return ret;
72246da4
FB
457 }
458
459 return 0;
460}
461
462static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 463 const struct usb_endpoint_descriptor *desc,
4b345c9a 464 const struct usb_ss_ep_comp_descriptor *comp_desc,
265b70a7 465 bool ignore, bool restore)
72246da4
FB
466{
467 struct dwc3_gadget_ep_cmd_params params;
468
469 memset(&params, 0x00, sizeof(params));
470
dc1c70a7 471 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
472 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
473
474 /* Burst size is only needed in SuperSpeed mode */
ee5cd41c 475 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
676e3497 476 u32 burst = dep->endpoint.maxburst;
676e3497 477 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
d2e9a13a 478 }
72246da4 479
4b345c9a
FB
480 if (ignore)
481 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
482
265b70a7
PZ
483 if (restore) {
484 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
485 params.param2 |= dep->saved_state;
486 }
487
dc1c70a7
FB
488 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
489 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 490
18b7ede5 491 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
492 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
493 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
494 dep->stream_capable = true;
495 }
496
0b93a4c8 497 if (!usb_endpoint_xfer_control(desc))
dc1c70a7 498 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
499
500 /*
501 * We are doing 1:1 mapping for endpoints, meaning
502 * Physical Endpoints 2 maps to Logical Endpoint 2 and
503 * so on. We consider the direction bit as part of the physical
504 * endpoint number. So USB endpoint 0x81 is 0x03.
505 */
dc1c70a7 506 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
507
508 /*
509 * We must use the lower 16 TX FIFOs even though
510 * HW might have more
511 */
512 if (dep->direction)
dc1c70a7 513 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
514
515 if (desc->bInterval) {
dc1c70a7 516 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
517 dep->interval = 1 << (desc->bInterval - 1);
518 }
519
2cd4718d 520 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
72246da4
FB
521}
522
523static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
524{
525 struct dwc3_gadget_ep_cmd_params params;
526
527 memset(&params, 0x00, sizeof(params));
528
dc1c70a7 529 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4 530
2cd4718d
FB
531 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
532 &params);
72246da4
FB
533}
534
535/**
536 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
537 * @dep: endpoint to be initialized
538 * @desc: USB Endpoint Descriptor
539 *
540 * Caller should take care of locking
541 */
542static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 543 const struct usb_endpoint_descriptor *desc,
4b345c9a 544 const struct usb_ss_ep_comp_descriptor *comp_desc,
265b70a7 545 bool ignore, bool restore)
72246da4
FB
546{
547 struct dwc3 *dwc = dep->dwc;
548 u32 reg;
b09e99ee 549 int ret;
72246da4 550
73815280 551 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
ff62d6b6 552
72246da4
FB
553 if (!(dep->flags & DWC3_EP_ENABLED)) {
554 ret = dwc3_gadget_start_config(dwc, dep);
555 if (ret)
556 return ret;
557 }
558
265b70a7
PZ
559 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
560 restore);
72246da4
FB
561 if (ret)
562 return ret;
563
564 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
565 struct dwc3_trb *trb_st_hw;
566 struct dwc3_trb *trb_link;
72246da4 567
16e78db7 568 dep->endpoint.desc = desc;
c90bfaec 569 dep->comp_desc = comp_desc;
72246da4
FB
570 dep->type = usb_endpoint_type(desc);
571 dep->flags |= DWC3_EP_ENABLED;
572
573 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
574 reg |= DWC3_DALEPENA_EP(dep->number);
575 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
576
36b68aae 577 if (usb_endpoint_xfer_control(desc))
7ab373aa 578 return 0;
72246da4 579
0d25744a
JY
580 /* Initialize the TRB ring */
581 dep->trb_dequeue = 0;
582 dep->trb_enqueue = 0;
583 memset(dep->trb_pool, 0,
584 sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
585
36b68aae 586 /* Link TRB. The HWO bit is never reset */
72246da4
FB
587 trb_st_hw = &dep->trb_pool[0];
588
f6bafc6a 589 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
f6bafc6a
FB
590 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
591 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
592 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
593 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
594 }
595
596 return 0;
597}
598
b992e681 599static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
624407f9 600static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
601{
602 struct dwc3_request *req;
603
aa3342c8 604 if (!list_empty(&dep->started_list)) {
b992e681 605 dwc3_stop_active_transfer(dwc, dep->number, true);
624407f9 606
57911504 607 /* - giveback all requests to gadget driver */
aa3342c8
FB
608 while (!list_empty(&dep->started_list)) {
609 req = next_request(&dep->started_list);
1591633e
PA
610
611 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
612 }
ea53b882
FB
613 }
614
aa3342c8
FB
615 while (!list_empty(&dep->pending_list)) {
616 req = next_request(&dep->pending_list);
72246da4 617
624407f9 618 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 619 }
72246da4
FB
620}
621
622/**
623 * __dwc3_gadget_ep_disable - Disables a HW endpoint
624 * @dep: the endpoint to disable
625 *
624407f9
SAS
626 * This function also removes requests which are currently processed ny the
627 * hardware and those which are not yet scheduled.
628 * Caller should take care of locking.
72246da4 629 */
72246da4
FB
630static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
631{
632 struct dwc3 *dwc = dep->dwc;
633 u32 reg;
634
7eaeac5c
FB
635 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
636
624407f9 637 dwc3_remove_requests(dwc, dep);
72246da4 638
687ef981
FB
639 /* make sure HW endpoint isn't stalled */
640 if (dep->flags & DWC3_EP_STALL)
7a608559 641 __dwc3_gadget_ep_set_halt(dep, 0, false);
687ef981 642
72246da4
FB
643 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
644 reg &= ~DWC3_DALEPENA_EP(dep->number);
645 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
646
879631aa 647 dep->stream_capable = false;
f9c56cdd 648 dep->endpoint.desc = NULL;
c90bfaec 649 dep->comp_desc = NULL;
72246da4 650 dep->type = 0;
879631aa 651 dep->flags = 0;
72246da4
FB
652
653 return 0;
654}
655
656/* -------------------------------------------------------------------------- */
657
658static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
659 const struct usb_endpoint_descriptor *desc)
660{
661 return -EINVAL;
662}
663
664static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
665{
666 return -EINVAL;
667}
668
669/* -------------------------------------------------------------------------- */
670
671static int dwc3_gadget_ep_enable(struct usb_ep *ep,
672 const struct usb_endpoint_descriptor *desc)
673{
674 struct dwc3_ep *dep;
675 struct dwc3 *dwc;
676 unsigned long flags;
677 int ret;
678
679 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
680 pr_debug("dwc3: invalid parameters\n");
681 return -EINVAL;
682 }
683
684 if (!desc->wMaxPacketSize) {
685 pr_debug("dwc3: missing wMaxPacketSize\n");
686 return -EINVAL;
687 }
688
689 dep = to_dwc3_ep(ep);
690 dwc = dep->dwc;
691
95ca961c
FB
692 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
693 "%s is already enabled\n",
694 dep->name))
c6f83f38 695 return 0;
c6f83f38 696
72246da4 697 spin_lock_irqsave(&dwc->lock, flags);
265b70a7 698 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
72246da4
FB
699 spin_unlock_irqrestore(&dwc->lock, flags);
700
701 return ret;
702}
703
704static int dwc3_gadget_ep_disable(struct usb_ep *ep)
705{
706 struct dwc3_ep *dep;
707 struct dwc3 *dwc;
708 unsigned long flags;
709 int ret;
710
711 if (!ep) {
712 pr_debug("dwc3: invalid parameters\n");
713 return -EINVAL;
714 }
715
716 dep = to_dwc3_ep(ep);
717 dwc = dep->dwc;
718
95ca961c
FB
719 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
720 "%s is already disabled\n",
721 dep->name))
72246da4 722 return 0;
72246da4 723
72246da4
FB
724 spin_lock_irqsave(&dwc->lock, flags);
725 ret = __dwc3_gadget_ep_disable(dep);
726 spin_unlock_irqrestore(&dwc->lock, flags);
727
728 return ret;
729}
730
731static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
732 gfp_t gfp_flags)
733{
734 struct dwc3_request *req;
735 struct dwc3_ep *dep = to_dwc3_ep(ep);
72246da4
FB
736
737 req = kzalloc(sizeof(*req), gfp_flags);
734d5a53 738 if (!req)
72246da4 739 return NULL;
72246da4
FB
740
741 req->epnum = dep->number;
742 req->dep = dep;
72246da4 743
68d34c8a
FB
744 dep->allocated_requests++;
745
2c4cbe6e
FB
746 trace_dwc3_alloc_request(req);
747
72246da4
FB
748 return &req->request;
749}
750
751static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
752 struct usb_request *request)
753{
754 struct dwc3_request *req = to_dwc3_request(request);
68d34c8a 755 struct dwc3_ep *dep = to_dwc3_ep(ep);
72246da4 756
68d34c8a 757 dep->allocated_requests--;
2c4cbe6e 758 trace_dwc3_free_request(req);
72246da4
FB
759 kfree(req);
760}
761
c71fc37c
FB
762/**
763 * dwc3_prepare_one_trb - setup one TRB from one request
764 * @dep: endpoint for which this request is prepared
765 * @req: dwc3_request pointer
766 */
68e823e2 767static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb 768 struct dwc3_request *req, dma_addr_t dma,
e5ba5ec8 769 unsigned length, unsigned last, unsigned chain, unsigned node)
c71fc37c 770{
f6bafc6a 771 struct dwc3_trb *trb;
c71fc37c 772
73815280 773 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
eeb720fb
FB
774 dep->name, req, (unsigned long long) dma,
775 length, last ? " last" : "",
776 chain ? " chain" : "");
777
915e202a 778
4faf7550 779 trb = &dep->trb_pool[dep->trb_enqueue];
c71fc37c 780
eeb720fb 781 if (!req->trb) {
aa3342c8 782 dwc3_gadget_move_started_request(req);
f6bafc6a
FB
783 req->trb = trb;
784 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
4faf7550 785 req->first_trb_index = dep->trb_enqueue;
eeb720fb 786 }
c71fc37c 787
ef966b9d 788 dwc3_ep_inc_enq(dep);
e5ba5ec8 789
f6bafc6a
FB
790 trb->size = DWC3_TRB_SIZE_LENGTH(length);
791 trb->bpl = lower_32_bits(dma);
792 trb->bph = upper_32_bits(dma);
c71fc37c 793
16e78db7 794 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 795 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 796 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
797 break;
798
799 case USB_ENDPOINT_XFER_ISOC:
e5ba5ec8
PA
800 if (!node)
801 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
802 else
803 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
ca4d44ea
FB
804
805 /* always enable Interrupt on Missed ISOC */
806 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
c71fc37c
FB
807 break;
808
809 case USB_ENDPOINT_XFER_BULK:
810 case USB_ENDPOINT_XFER_INT:
f6bafc6a 811 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
812 break;
813 default:
814 /*
815 * This is only possible with faulty memory because we
816 * checked it already :)
817 */
818 BUG();
819 }
820
ca4d44ea
FB
821 /* always enable Continue on Short Packet */
822 trb->ctrl |= DWC3_TRB_CTRL_CSP;
f3af3651 823
f3af3651 824 if (!req->request.no_interrupt && !chain)
ca4d44ea 825 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
f3af3651 826
ca4d44ea 827 if (last)
e5ba5ec8 828 trb->ctrl |= DWC3_TRB_CTRL_LST;
c71fc37c 829
e5ba5ec8
PA
830 if (chain)
831 trb->ctrl |= DWC3_TRB_CTRL_CHN;
832
16e78db7 833 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 834 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 835
f6bafc6a 836 trb->ctrl |= DWC3_TRB_CTRL_HWO;
2c4cbe6e 837
68d34c8a
FB
838 dep->queued_requests++;
839
2c4cbe6e 840 trace_dwc3_prepare_trb(dep, trb);
c71fc37c
FB
841}
842
361572b5
JY
843/**
844 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
845 * @dep: The endpoint with the TRB ring
846 * @index: The index of the current TRB in the ring
847 *
848 * Returns the TRB prior to the one pointed to by the index. If the
849 * index is 0, we will wrap backwards, skip the link TRB, and return
850 * the one just before that.
851 */
852static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
853{
854 if (!index)
855 index = DWC3_TRB_NUM - 2;
856 else
857 index = dep->trb_enqueue - 1;
858
859 return &dep->trb_pool[index];
860}
861
c4233573
FB
862static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
863{
864 struct dwc3_trb *tmp;
32db3d94 865 u8 trbs_left;
c4233573
FB
866
867 /*
868 * If enqueue & dequeue are equal than it is either full or empty.
869 *
870 * One way to know for sure is if the TRB right before us has HWO bit
871 * set or not. If it has, then we're definitely full and can't fit any
872 * more transfers in our ring.
873 */
874 if (dep->trb_enqueue == dep->trb_dequeue) {
361572b5
JY
875 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
876 if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
877 return 0;
c4233573
FB
878
879 return DWC3_TRB_NUM - 1;
880 }
881
32db3d94 882 trbs_left = dep->trb_dequeue - dep->trb_enqueue;
3de2685f 883 trbs_left &= (DWC3_TRB_NUM - 1);
32db3d94 884
7d0a038b
JY
885 if (dep->trb_dequeue < dep->trb_enqueue)
886 trbs_left--;
887
32db3d94 888 return trbs_left;
c4233573
FB
889}
890
5ee85d89
FB
891static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
892 struct dwc3_request *req, unsigned int trbs_left)
893{
894 struct usb_request *request = &req->request;
895 struct scatterlist *sg = request->sg;
896 struct scatterlist *s;
897 unsigned int last = false;
898 unsigned int length;
899 dma_addr_t dma;
900 int i;
901
902 for_each_sg(sg, s, request->num_mapped_sgs, i) {
903 unsigned chain = true;
904
905 length = sg_dma_len(s);
906 dma = sg_dma_address(s);
907
908 if (sg_is_last(s)) {
909 if (list_is_last(&req->list, &dep->pending_list))
910 last = true;
911
912 chain = false;
913 }
914
915 if (!trbs_left)
916 last = true;
917
918 if (last)
919 chain = false;
920
921 dwc3_prepare_one_trb(dep, req, dma, length,
922 last, chain, i);
923
924 if (last)
925 break;
926 }
927}
928
929static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
930 struct dwc3_request *req, unsigned int trbs_left)
931{
932 unsigned int last = false;
933 unsigned int length;
934 dma_addr_t dma;
935
936 dma = req->request.dma;
937 length = req->request.length;
938
939 if (!trbs_left)
940 last = true;
941
942 /* Is this the last request? */
943 if (list_is_last(&req->list, &dep->pending_list))
944 last = true;
945
946 dwc3_prepare_one_trb(dep, req, dma, length,
947 last, false, 0);
948}
949
72246da4
FB
950/*
951 * dwc3_prepare_trbs - setup TRBs from requests
952 * @dep: endpoint for which requests are being prepared
72246da4 953 *
1d046793
PZ
954 * The function goes through the requests list and sets up TRBs for the
955 * transfers. The function returns once there are no more TRBs available or
956 * it runs out of requests.
72246da4 957 */
c4233573 958static void dwc3_prepare_trbs(struct dwc3_ep *dep)
72246da4 959{
68e823e2 960 struct dwc3_request *req, *n;
72246da4
FB
961 u32 trbs_left;
962
963 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
964
c4233573 965 trbs_left = dwc3_calc_trbs_left(dep);
89bc856e
JY
966 if (!trbs_left)
967 return;
72246da4 968
aa3342c8 969 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
5ee85d89
FB
970 if (req->request.num_mapped_sgs > 0)
971 dwc3_prepare_one_trb_sg(dep, req, trbs_left--);
972 else
973 dwc3_prepare_one_trb_linear(dep, req, trbs_left--);
72246da4 974
5ee85d89
FB
975 if (!trbs_left)
976 return;
72246da4 977 }
72246da4
FB
978}
979
4fae2e3e 980static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
72246da4
FB
981{
982 struct dwc3_gadget_ep_cmd_params params;
983 struct dwc3_request *req;
984 struct dwc3 *dwc = dep->dwc;
4fae2e3e 985 int starting;
72246da4
FB
986 int ret;
987 u32 cmd;
988
4fae2e3e 989 starting = !(dep->flags & DWC3_EP_BUSY);
72246da4 990
4fae2e3e
FB
991 dwc3_prepare_trbs(dep);
992 req = next_request(&dep->started_list);
72246da4
FB
993 if (!req) {
994 dep->flags |= DWC3_EP_PENDING_REQUEST;
995 return 0;
996 }
997
998 memset(&params, 0, sizeof(params));
72246da4 999
4fae2e3e 1000 if (starting) {
1877d6c9
PA
1001 params.param0 = upper_32_bits(req->trb_dma);
1002 params.param1 = lower_32_bits(req->trb_dma);
b6b1c6db
FB
1003 cmd = DWC3_DEPCMD_STARTTRANSFER |
1004 DWC3_DEPCMD_PARAM(cmd_param);
1877d6c9 1005 } else {
b6b1c6db
FB
1006 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1007 DWC3_DEPCMD_PARAM(dep->resource_index);
1877d6c9 1008 }
72246da4 1009
2cd4718d 1010 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
72246da4 1011 if (ret < 0) {
72246da4
FB
1012 /*
1013 * FIXME we need to iterate over the list of requests
1014 * here and stop, unmap, free and del each of the linked
1d046793 1015 * requests instead of what we do now.
72246da4 1016 */
0fc9a1be
FB
1017 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1018 req->direction);
72246da4
FB
1019 list_del(&req->list);
1020 return ret;
1021 }
1022
1023 dep->flags |= DWC3_EP_BUSY;
25b8ff68 1024
4fae2e3e 1025 if (starting) {
2eb88016 1026 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
b4996a86 1027 WARN_ON_ONCE(!dep->resource_index);
f898ae09 1028 }
25b8ff68 1029
72246da4
FB
1030 return 0;
1031}
1032
d6d6ec7b
PA
1033static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1034 struct dwc3_ep *dep, u32 cur_uf)
1035{
1036 u32 uf;
1037
aa3342c8 1038 if (list_empty(&dep->pending_list)) {
73815280
FB
1039 dwc3_trace(trace_dwc3_gadget,
1040 "ISOC ep %s run out for requests",
1041 dep->name);
f4a53c55 1042 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1043 return;
1044 }
1045
1046 /* 4 micro frames in the future */
1047 uf = cur_uf + dep->interval * 4;
1048
4fae2e3e 1049 __dwc3_gadget_kick_transfer(dep, uf);
d6d6ec7b
PA
1050}
1051
1052static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1053 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1054{
1055 u32 cur_uf, mask;
1056
1057 mask = ~(dep->interval - 1);
1058 cur_uf = event->parameters & mask;
1059
1060 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1061}
1062
72246da4
FB
1063static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1064{
0fc9a1be
FB
1065 struct dwc3 *dwc = dep->dwc;
1066 int ret;
1067
bb423984 1068 if (!dep->endpoint.desc) {
ec5e795c 1069 dwc3_trace(trace_dwc3_gadget,
60cfb37a 1070 "trying to queue request %p to disabled %s",
bb423984
FB
1071 &req->request, dep->endpoint.name);
1072 return -ESHUTDOWN;
1073 }
1074
1075 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1076 &req->request, req->dep->name)) {
60cfb37a 1077 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
ec5e795c 1078 &req->request, req->dep->name);
bb423984
FB
1079 return -EINVAL;
1080 }
1081
fc8bb91b
FB
1082 pm_runtime_get(dwc->dev);
1083
72246da4
FB
1084 req->request.actual = 0;
1085 req->request.status = -EINPROGRESS;
1086 req->direction = dep->direction;
1087 req->epnum = dep->number;
1088
fe84f522
FB
1089 trace_dwc3_ep_queue(req);
1090
72246da4
FB
1091 /*
1092 * We only add to our list of requests now and
1093 * start consuming the list once we get XferNotReady
1094 * IRQ.
1095 *
1096 * That way, we avoid doing anything that we don't need
1097 * to do now and defer it until the point we receive a
1098 * particular token from the Host side.
1099 *
1100 * This will also avoid Host cancelling URBs due to too
1d046793 1101 * many NAKs.
72246da4 1102 */
0fc9a1be
FB
1103 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1104 dep->direction);
1105 if (ret)
1106 return ret;
1107
aa3342c8 1108 list_add_tail(&req->list, &dep->pending_list);
72246da4 1109
1d6a3918
FB
1110 /*
1111 * If there are no pending requests and the endpoint isn't already
1112 * busy, we will just start the request straight away.
1113 *
1114 * This will save one IRQ (XFER_NOT_READY) and possibly make it a
1115 * little bit faster.
1116 */
1117 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
62e345ae 1118 !usb_endpoint_xfer_int(dep->endpoint.desc) &&
1d6a3918 1119 !(dep->flags & DWC3_EP_BUSY)) {
4fae2e3e 1120 ret = __dwc3_gadget_kick_transfer(dep, 0);
a8f32817 1121 goto out;
1d6a3918
FB
1122 }
1123
72246da4 1124 /*
b511e5e7 1125 * There are a few special cases:
72246da4 1126 *
f898ae09
PZ
1127 * 1. XferNotReady with empty list of requests. We need to kick the
1128 * transfer here in that situation, otherwise we will be NAKing
1129 * forever. If we get XferNotReady before gadget driver has a
1130 * chance to queue a request, we will ACK the IRQ but won't be
1131 * able to receive the data until the next request is queued.
1132 * The following code is handling exactly that.
72246da4 1133 *
72246da4
FB
1134 */
1135 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1136 /*
1137 * If xfernotready is already elapsed and it is a case
1138 * of isoc transfer, then issue END TRANSFER, so that
1139 * you can receive xfernotready again and can have
1140 * notion of current microframe.
1141 */
1142 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
aa3342c8 1143 if (list_empty(&dep->started_list)) {
b992e681 1144 dwc3_stop_active_transfer(dwc, dep->number, true);
cdc359dd
PA
1145 dep->flags = DWC3_EP_ENABLED;
1146 }
f4a53c55
PA
1147 return 0;
1148 }
1149
4fae2e3e 1150 ret = __dwc3_gadget_kick_transfer(dep, 0);
89185916
FB
1151 if (!ret)
1152 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1153
a8f32817 1154 goto out;
b511e5e7 1155 }
72246da4 1156
b511e5e7
FB
1157 /*
1158 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1159 * kick the transfer here after queuing a request, otherwise the
1160 * core may not see the modified TRB(s).
1161 */
1162 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1163 (dep->flags & DWC3_EP_BUSY) &&
1164 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86 1165 WARN_ON_ONCE(!dep->resource_index);
4fae2e3e 1166 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
a8f32817 1167 goto out;
a0925324 1168 }
72246da4 1169
b997ada5
FB
1170 /*
1171 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1172 * right away, otherwise host will not know we have streams to be
1173 * handled.
1174 */
a8f32817 1175 if (dep->stream_capable)
4fae2e3e 1176 ret = __dwc3_gadget_kick_transfer(dep, 0);
b997ada5 1177
a8f32817
FB
1178out:
1179 if (ret && ret != -EBUSY)
ec5e795c 1180 dwc3_trace(trace_dwc3_gadget,
60cfb37a 1181 "%s: failed to kick transfers",
a8f32817
FB
1182 dep->name);
1183 if (ret == -EBUSY)
1184 ret = 0;
1185
1186 return ret;
72246da4
FB
1187}
1188
04c03d10
FB
1189static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1190 struct usb_request *request)
1191{
1192 dwc3_gadget_ep_free_request(ep, request);
1193}
1194
1195static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1196{
1197 struct dwc3_request *req;
1198 struct usb_request *request;
1199 struct usb_ep *ep = &dep->endpoint;
1200
60cfb37a 1201 dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
04c03d10
FB
1202 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1203 if (!request)
1204 return -ENOMEM;
1205
1206 request->length = 0;
1207 request->buf = dwc->zlp_buf;
1208 request->complete = __dwc3_gadget_ep_zlp_complete;
1209
1210 req = to_dwc3_request(request);
1211
1212 return __dwc3_gadget_ep_queue(dep, req);
1213}
1214
72246da4
FB
1215static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1216 gfp_t gfp_flags)
1217{
1218 struct dwc3_request *req = to_dwc3_request(request);
1219 struct dwc3_ep *dep = to_dwc3_ep(ep);
1220 struct dwc3 *dwc = dep->dwc;
1221
1222 unsigned long flags;
1223
1224 int ret;
1225
fdee4eba 1226 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1227 ret = __dwc3_gadget_ep_queue(dep, req);
04c03d10
FB
1228
1229 /*
1230 * Okay, here's the thing, if gadget driver has requested for a ZLP by
1231 * setting request->zero, instead of doing magic, we will just queue an
1232 * extra usb_request ourselves so that it gets handled the same way as
1233 * any other request.
1234 */
d9261898
JY
1235 if (ret == 0 && request->zero && request->length &&
1236 (request->length % ep->maxpacket == 0))
04c03d10
FB
1237 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1238
72246da4
FB
1239 spin_unlock_irqrestore(&dwc->lock, flags);
1240
1241 return ret;
1242}
1243
1244static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1245 struct usb_request *request)
1246{
1247 struct dwc3_request *req = to_dwc3_request(request);
1248 struct dwc3_request *r = NULL;
1249
1250 struct dwc3_ep *dep = to_dwc3_ep(ep);
1251 struct dwc3 *dwc = dep->dwc;
1252
1253 unsigned long flags;
1254 int ret = 0;
1255
2c4cbe6e
FB
1256 trace_dwc3_ep_dequeue(req);
1257
72246da4
FB
1258 spin_lock_irqsave(&dwc->lock, flags);
1259
aa3342c8 1260 list_for_each_entry(r, &dep->pending_list, list) {
72246da4
FB
1261 if (r == req)
1262 break;
1263 }
1264
1265 if (r != req) {
aa3342c8 1266 list_for_each_entry(r, &dep->started_list, list) {
72246da4
FB
1267 if (r == req)
1268 break;
1269 }
1270 if (r == req) {
1271 /* wait until it is processed */
b992e681 1272 dwc3_stop_active_transfer(dwc, dep->number, true);
e8d4e8be 1273 goto out1;
72246da4
FB
1274 }
1275 dev_err(dwc->dev, "request %p was not queued to %s\n",
1276 request, ep->name);
1277 ret = -EINVAL;
1278 goto out0;
1279 }
1280
e8d4e8be 1281out1:
72246da4
FB
1282 /* giveback the request */
1283 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1284
1285out0:
1286 spin_unlock_irqrestore(&dwc->lock, flags);
1287
1288 return ret;
1289}
1290
7a608559 1291int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
72246da4
FB
1292{
1293 struct dwc3_gadget_ep_cmd_params params;
1294 struct dwc3 *dwc = dep->dwc;
1295 int ret;
1296
5ad02fb8
FB
1297 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1298 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1299 return -EINVAL;
1300 }
1301
72246da4
FB
1302 memset(&params, 0x00, sizeof(params));
1303
1304 if (value) {
7a608559 1305 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
aa3342c8
FB
1306 (!list_empty(&dep->started_list) ||
1307 !list_empty(&dep->pending_list)))) {
ec5e795c 1308 dwc3_trace(trace_dwc3_gadget,
052ba52e 1309 "%s: pending request, cannot halt",
7a608559
FB
1310 dep->name);
1311 return -EAGAIN;
1312 }
1313
2cd4718d
FB
1314 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1315 &params);
72246da4 1316 if (ret)
3f89204b 1317 dev_err(dwc->dev, "failed to set STALL on %s\n",
72246da4
FB
1318 dep->name);
1319 else
1320 dep->flags |= DWC3_EP_STALL;
1321 } else {
2cd4718d 1322
50c763f8 1323 ret = dwc3_send_clear_stall_ep_cmd(dep);
72246da4 1324 if (ret)
3f89204b 1325 dev_err(dwc->dev, "failed to clear STALL on %s\n",
72246da4
FB
1326 dep->name);
1327 else
a535d81c 1328 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
72246da4 1329 }
5275455a 1330
72246da4
FB
1331 return ret;
1332}
1333
1334static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1335{
1336 struct dwc3_ep *dep = to_dwc3_ep(ep);
1337 struct dwc3 *dwc = dep->dwc;
1338
1339 unsigned long flags;
1340
1341 int ret;
1342
1343 spin_lock_irqsave(&dwc->lock, flags);
7a608559 1344 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
72246da4
FB
1345 spin_unlock_irqrestore(&dwc->lock, flags);
1346
1347 return ret;
1348}
1349
1350static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1351{
1352 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1353 struct dwc3 *dwc = dep->dwc;
1354 unsigned long flags;
95aa4e8d 1355 int ret;
72246da4 1356
249a4569 1357 spin_lock_irqsave(&dwc->lock, flags);
72246da4
FB
1358 dep->flags |= DWC3_EP_WEDGE;
1359
08f0d966 1360 if (dep->number == 0 || dep->number == 1)
95aa4e8d 1361 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
08f0d966 1362 else
7a608559 1363 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
95aa4e8d
FB
1364 spin_unlock_irqrestore(&dwc->lock, flags);
1365
1366 return ret;
72246da4
FB
1367}
1368
1369/* -------------------------------------------------------------------------- */
1370
1371static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1372 .bLength = USB_DT_ENDPOINT_SIZE,
1373 .bDescriptorType = USB_DT_ENDPOINT,
1374 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1375};
1376
1377static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1378 .enable = dwc3_gadget_ep0_enable,
1379 .disable = dwc3_gadget_ep0_disable,
1380 .alloc_request = dwc3_gadget_ep_alloc_request,
1381 .free_request = dwc3_gadget_ep_free_request,
1382 .queue = dwc3_gadget_ep0_queue,
1383 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1384 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1385 .set_wedge = dwc3_gadget_ep_set_wedge,
1386};
1387
1388static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1389 .enable = dwc3_gadget_ep_enable,
1390 .disable = dwc3_gadget_ep_disable,
1391 .alloc_request = dwc3_gadget_ep_alloc_request,
1392 .free_request = dwc3_gadget_ep_free_request,
1393 .queue = dwc3_gadget_ep_queue,
1394 .dequeue = dwc3_gadget_ep_dequeue,
1395 .set_halt = dwc3_gadget_ep_set_halt,
1396 .set_wedge = dwc3_gadget_ep_set_wedge,
1397};
1398
1399/* -------------------------------------------------------------------------- */
1400
1401static int dwc3_gadget_get_frame(struct usb_gadget *g)
1402{
1403 struct dwc3 *dwc = gadget_to_dwc(g);
1404 u32 reg;
1405
1406 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1407 return DWC3_DSTS_SOFFN(reg);
1408}
1409
218ef7b6 1410static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
72246da4 1411{
72246da4 1412 unsigned long timeout;
72246da4 1413
218ef7b6 1414 int ret;
72246da4
FB
1415 u32 reg;
1416
72246da4
FB
1417 u8 link_state;
1418 u8 speed;
1419
72246da4
FB
1420 /*
1421 * According to the Databook Remote wakeup request should
1422 * be issued only when the device is in early suspend state.
1423 *
1424 * We can check that via USB Link State bits in DSTS register.
1425 */
1426 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1427
1428 speed = reg & DWC3_DSTS_CONNECTSPD;
ee5cd41c
JY
1429 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1430 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
60cfb37a 1431 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
6b742899 1432 return 0;
72246da4
FB
1433 }
1434
1435 link_state = DWC3_DSTS_USBLNKST(reg);
1436
1437 switch (link_state) {
1438 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1439 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1440 break;
1441 default:
ec5e795c 1442 dwc3_trace(trace_dwc3_gadget,
60cfb37a 1443 "can't wakeup from '%s'",
ec5e795c 1444 dwc3_gadget_link_string(link_state));
218ef7b6 1445 return -EINVAL;
72246da4
FB
1446 }
1447
8598bde7
FB
1448 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1449 if (ret < 0) {
1450 dev_err(dwc->dev, "failed to put link in Recovery\n");
218ef7b6 1451 return ret;
8598bde7 1452 }
72246da4 1453
802fde98
PZ
1454 /* Recent versions do this automatically */
1455 if (dwc->revision < DWC3_REVISION_194A) {
1456 /* write zeroes to Link Change Request */
fcc023c7 1457 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1458 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1459 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1460 }
72246da4 1461
1d046793 1462 /* poll until Link State changes to ON */
72246da4
FB
1463 timeout = jiffies + msecs_to_jiffies(100);
1464
1d046793 1465 while (!time_after(jiffies, timeout)) {
72246da4
FB
1466 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1467
1468 /* in HS, means ON */
1469 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1470 break;
1471 }
1472
1473 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1474 dev_err(dwc->dev, "failed to send remote wakeup\n");
218ef7b6 1475 return -EINVAL;
72246da4
FB
1476 }
1477
218ef7b6
FB
1478 return 0;
1479}
1480
1481static int dwc3_gadget_wakeup(struct usb_gadget *g)
1482{
1483 struct dwc3 *dwc = gadget_to_dwc(g);
1484 unsigned long flags;
1485 int ret;
1486
1487 spin_lock_irqsave(&dwc->lock, flags);
1488 ret = __dwc3_gadget_wakeup(dwc);
72246da4
FB
1489 spin_unlock_irqrestore(&dwc->lock, flags);
1490
1491 return ret;
1492}
1493
1494static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1495 int is_selfpowered)
1496{
1497 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1498 unsigned long flags;
72246da4 1499
249a4569 1500 spin_lock_irqsave(&dwc->lock, flags);
bcdea503 1501 g->is_selfpowered = !!is_selfpowered;
249a4569 1502 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1503
1504 return 0;
1505}
1506
7b2a0368 1507static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
72246da4
FB
1508{
1509 u32 reg;
61d58242 1510 u32 timeout = 500;
72246da4 1511
fc8bb91b
FB
1512 if (pm_runtime_suspended(dwc->dev))
1513 return 0;
1514
72246da4 1515 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1516 if (is_on) {
802fde98
PZ
1517 if (dwc->revision <= DWC3_REVISION_187A) {
1518 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1519 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1520 }
1521
1522 if (dwc->revision >= DWC3_REVISION_194A)
1523 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1524 reg |= DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1525
1526 if (dwc->has_hibernation)
1527 reg |= DWC3_DCTL_KEEP_CONNECT;
1528
9fcb3bd8 1529 dwc->pullups_connected = true;
8db7ed15 1530 } else {
72246da4 1531 reg &= ~DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1532
1533 if (dwc->has_hibernation && !suspend)
1534 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1535
9fcb3bd8 1536 dwc->pullups_connected = false;
8db7ed15 1537 }
72246da4
FB
1538
1539 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1540
1541 do {
1542 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1543 if (is_on) {
1544 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1545 break;
1546 } else {
1547 if (reg & DWC3_DSTS_DEVCTRLHLT)
1548 break;
1549 }
72246da4
FB
1550 timeout--;
1551 if (!timeout)
6f17f74b 1552 return -ETIMEDOUT;
61d58242 1553 udelay(1);
72246da4
FB
1554 } while (1);
1555
73815280 1556 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
72246da4
FB
1557 dwc->gadget_driver
1558 ? dwc->gadget_driver->function : "no-function",
1559 is_on ? "connect" : "disconnect");
6f17f74b
PA
1560
1561 return 0;
72246da4
FB
1562}
1563
1564static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1565{
1566 struct dwc3 *dwc = gadget_to_dwc(g);
1567 unsigned long flags;
6f17f74b 1568 int ret;
72246da4
FB
1569
1570 is_on = !!is_on;
1571
1572 spin_lock_irqsave(&dwc->lock, flags);
7b2a0368 1573 ret = dwc3_gadget_run_stop(dwc, is_on, false);
72246da4
FB
1574 spin_unlock_irqrestore(&dwc->lock, flags);
1575
6f17f74b 1576 return ret;
72246da4
FB
1577}
1578
8698e2ac
FB
1579static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1580{
1581 u32 reg;
1582
1583 /* Enable all but Start and End of Frame IRQs */
1584 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1585 DWC3_DEVTEN_EVNTOVERFLOWEN |
1586 DWC3_DEVTEN_CMDCMPLTEN |
1587 DWC3_DEVTEN_ERRTICERREN |
1588 DWC3_DEVTEN_WKUPEVTEN |
1589 DWC3_DEVTEN_ULSTCNGEN |
1590 DWC3_DEVTEN_CONNECTDONEEN |
1591 DWC3_DEVTEN_USBRSTEN |
1592 DWC3_DEVTEN_DISCONNEVTEN);
1593
1594 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1595}
1596
1597static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1598{
1599 /* mask all interrupts */
1600 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1601}
1602
1603static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
b15a762f 1604static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
8698e2ac 1605
4e99472b
FB
1606/**
1607 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
1608 * dwc: pointer to our context structure
1609 *
1610 * The following looks like complex but it's actually very simple. In order to
1611 * calculate the number of packets we can burst at once on OUT transfers, we're
1612 * gonna use RxFIFO size.
1613 *
1614 * To calculate RxFIFO size we need two numbers:
1615 * MDWIDTH = size, in bits, of the internal memory bus
1616 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1617 *
1618 * Given these two numbers, the formula is simple:
1619 *
1620 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1621 *
1622 * 24 bytes is for 3x SETUP packets
1623 * 16 bytes is a clock domain crossing tolerance
1624 *
1625 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1626 */
1627static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
1628{
1629 u32 ram2_depth;
1630 u32 mdwidth;
1631 u32 nump;
1632 u32 reg;
1633
1634 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
1635 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
1636
1637 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
1638 nump = min_t(u32, nump, 16);
1639
1640 /* update NumP */
1641 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1642 reg &= ~DWC3_DCFG_NUMP_MASK;
1643 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
1644 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1645}
1646
d7be2952 1647static int __dwc3_gadget_start(struct dwc3 *dwc)
72246da4 1648{
72246da4 1649 struct dwc3_ep *dep;
72246da4
FB
1650 int ret = 0;
1651 u32 reg;
1652
72246da4
FB
1653 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1654 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1655
1656 /**
1657 * WORKAROUND: DWC3 revision < 2.20a have an issue
1658 * which would cause metastability state on Run/Stop
1659 * bit if we try to force the IP to USB2-only mode.
1660 *
1661 * Because of that, we cannot configure the IP to any
1662 * speed other than the SuperSpeed
1663 *
1664 * Refers to:
1665 *
1666 * STAR#9000525659: Clock Domain Crossing on DCTL in
1667 * USB 2.0 Mode
1668 */
f7e846f0 1669 if (dwc->revision < DWC3_REVISION_220A) {
07e7f47b 1670 reg |= DWC3_DCFG_SUPERSPEED;
f7e846f0
FB
1671 } else {
1672 switch (dwc->maximum_speed) {
1673 case USB_SPEED_LOW:
2da9ad76 1674 reg |= DWC3_DCFG_LOWSPEED;
f7e846f0
FB
1675 break;
1676 case USB_SPEED_FULL:
2da9ad76 1677 reg |= DWC3_DCFG_FULLSPEED1;
f7e846f0
FB
1678 break;
1679 case USB_SPEED_HIGH:
2da9ad76 1680 reg |= DWC3_DCFG_HIGHSPEED;
f7e846f0 1681 break;
7580862b 1682 case USB_SPEED_SUPER_PLUS:
2da9ad76 1683 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
7580862b 1684 break;
f7e846f0 1685 default:
77966eb8
JY
1686 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1687 dwc->maximum_speed);
1688 /* fall through */
1689 case USB_SPEED_SUPER:
1690 reg |= DWC3_DCFG_SUPERSPEED;
1691 break;
f7e846f0
FB
1692 }
1693 }
72246da4
FB
1694 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1695
2a58f9c1
FB
1696 /*
1697 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1698 * field instead of letting dwc3 itself calculate that automatically.
1699 *
1700 * This way, we maximize the chances that we'll be able to get several
1701 * bursts of data without going through any sort of endpoint throttling.
1702 */
1703 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1704 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1705 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1706
4e99472b
FB
1707 dwc3_gadget_setup_nump(dwc);
1708
72246da4
FB
1709 /* Start with SuperSpeed Default */
1710 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1711
1712 dep = dwc->eps[0];
265b70a7
PZ
1713 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1714 false);
72246da4
FB
1715 if (ret) {
1716 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
d7be2952 1717 goto err0;
72246da4
FB
1718 }
1719
1720 dep = dwc->eps[1];
265b70a7
PZ
1721 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1722 false);
72246da4
FB
1723 if (ret) {
1724 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
d7be2952 1725 goto err1;
72246da4
FB
1726 }
1727
1728 /* begin to receive SETUP packets */
c7fcdeb2 1729 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1730 dwc3_ep0_out_start(dwc);
1731
8698e2ac
FB
1732 dwc3_gadget_enable_irq(dwc);
1733
72246da4
FB
1734 return 0;
1735
b0d7ffd4 1736err1:
d7be2952 1737 __dwc3_gadget_ep_disable(dwc->eps[0]);
b0d7ffd4
FB
1738
1739err0:
72246da4
FB
1740 return ret;
1741}
1742
d7be2952
FB
1743static int dwc3_gadget_start(struct usb_gadget *g,
1744 struct usb_gadget_driver *driver)
72246da4
FB
1745{
1746 struct dwc3 *dwc = gadget_to_dwc(g);
1747 unsigned long flags;
d7be2952 1748 int ret = 0;
8698e2ac 1749 int irq;
72246da4 1750
d7be2952
FB
1751 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1752 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1753 IRQF_SHARED, "dwc3", dwc->ev_buf);
1754 if (ret) {
1755 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1756 irq, ret);
1757 goto err0;
1758 }
3f308d17 1759 dwc->irq_gadget = irq;
d7be2952 1760
72246da4 1761 spin_lock_irqsave(&dwc->lock, flags);
d7be2952
FB
1762 if (dwc->gadget_driver) {
1763 dev_err(dwc->dev, "%s is already bound to %s\n",
1764 dwc->gadget.name,
1765 dwc->gadget_driver->driver.name);
1766 ret = -EBUSY;
1767 goto err1;
1768 }
1769
1770 dwc->gadget_driver = driver;
1771
fc8bb91b
FB
1772 if (pm_runtime_active(dwc->dev))
1773 __dwc3_gadget_start(dwc);
1774
d7be2952
FB
1775 spin_unlock_irqrestore(&dwc->lock, flags);
1776
1777 return 0;
1778
1779err1:
1780 spin_unlock_irqrestore(&dwc->lock, flags);
1781 free_irq(irq, dwc);
1782
1783err0:
1784 return ret;
1785}
72246da4 1786
d7be2952
FB
1787static void __dwc3_gadget_stop(struct dwc3 *dwc)
1788{
8698e2ac 1789 dwc3_gadget_disable_irq(dwc);
72246da4
FB
1790 __dwc3_gadget_ep_disable(dwc->eps[0]);
1791 __dwc3_gadget_ep_disable(dwc->eps[1]);
d7be2952 1792}
72246da4 1793
d7be2952
FB
1794static int dwc3_gadget_stop(struct usb_gadget *g)
1795{
1796 struct dwc3 *dwc = gadget_to_dwc(g);
1797 unsigned long flags;
72246da4 1798
d7be2952
FB
1799 spin_lock_irqsave(&dwc->lock, flags);
1800 __dwc3_gadget_stop(dwc);
1801 dwc->gadget_driver = NULL;
72246da4
FB
1802 spin_unlock_irqrestore(&dwc->lock, flags);
1803
3f308d17 1804 free_irq(dwc->irq_gadget, dwc->ev_buf);
b0d7ffd4 1805
72246da4
FB
1806 return 0;
1807}
802fde98 1808
72246da4
FB
1809static const struct usb_gadget_ops dwc3_gadget_ops = {
1810 .get_frame = dwc3_gadget_get_frame,
1811 .wakeup = dwc3_gadget_wakeup,
1812 .set_selfpowered = dwc3_gadget_set_selfpowered,
1813 .pullup = dwc3_gadget_pullup,
1814 .udc_start = dwc3_gadget_start,
1815 .udc_stop = dwc3_gadget_stop,
1816};
1817
1818/* -------------------------------------------------------------------------- */
1819
6a1e3ef4
FB
1820static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1821 u8 num, u32 direction)
72246da4
FB
1822{
1823 struct dwc3_ep *dep;
6a1e3ef4 1824 u8 i;
72246da4 1825
6a1e3ef4 1826 for (i = 0; i < num; i++) {
d07fa665 1827 u8 epnum = (i << 1) | (direction ? 1 : 0);
72246da4 1828
72246da4 1829 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
734d5a53 1830 if (!dep)
72246da4 1831 return -ENOMEM;
72246da4
FB
1832
1833 dep->dwc = dwc;
1834 dep->number = epnum;
9aa62ae4 1835 dep->direction = !!direction;
2eb88016 1836 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
72246da4
FB
1837 dwc->eps[epnum] = dep;
1838
1839 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1840 (epnum & 1) ? "in" : "out");
6a1e3ef4 1841
72246da4 1842 dep->endpoint.name = dep->name;
74674cbf 1843 spin_lock_init(&dep->lock);
72246da4 1844
73815280 1845 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
653df35e 1846
72246da4 1847 if (epnum == 0 || epnum == 1) {
e117e742 1848 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
6048e4c6 1849 dep->endpoint.maxburst = 1;
72246da4
FB
1850 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1851 if (!epnum)
1852 dwc->gadget.ep0 = &dep->endpoint;
1853 } else {
1854 int ret;
1855
e117e742 1856 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
12d36c16 1857 dep->endpoint.max_streams = 15;
72246da4
FB
1858 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1859 list_add_tail(&dep->endpoint.ep_list,
1860 &dwc->gadget.ep_list);
1861
1862 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1863 if (ret)
72246da4 1864 return ret;
72246da4 1865 }
25b8ff68 1866
a474d3b7
RB
1867 if (epnum == 0 || epnum == 1) {
1868 dep->endpoint.caps.type_control = true;
1869 } else {
1870 dep->endpoint.caps.type_iso = true;
1871 dep->endpoint.caps.type_bulk = true;
1872 dep->endpoint.caps.type_int = true;
1873 }
1874
1875 dep->endpoint.caps.dir_in = !!direction;
1876 dep->endpoint.caps.dir_out = !direction;
1877
aa3342c8
FB
1878 INIT_LIST_HEAD(&dep->pending_list);
1879 INIT_LIST_HEAD(&dep->started_list);
72246da4
FB
1880 }
1881
1882 return 0;
1883}
1884
6a1e3ef4
FB
1885static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1886{
1887 int ret;
1888
1889 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1890
1891 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1892 if (ret < 0) {
73815280
FB
1893 dwc3_trace(trace_dwc3_gadget,
1894 "failed to allocate OUT endpoints");
6a1e3ef4
FB
1895 return ret;
1896 }
1897
1898 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1899 if (ret < 0) {
73815280
FB
1900 dwc3_trace(trace_dwc3_gadget,
1901 "failed to allocate IN endpoints");
6a1e3ef4
FB
1902 return ret;
1903 }
1904
1905 return 0;
1906}
1907
72246da4
FB
1908static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1909{
1910 struct dwc3_ep *dep;
1911 u8 epnum;
1912
1913 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1914 dep = dwc->eps[epnum];
6a1e3ef4
FB
1915 if (!dep)
1916 continue;
5bf8fae3
GC
1917 /*
1918 * Physical endpoints 0 and 1 are special; they form the
1919 * bi-directional USB endpoint 0.
1920 *
1921 * For those two physical endpoints, we don't allocate a TRB
1922 * pool nor do we add them the endpoints list. Due to that, we
1923 * shouldn't do these two operations otherwise we would end up
1924 * with all sorts of bugs when removing dwc3.ko.
1925 */
1926 if (epnum != 0 && epnum != 1) {
1927 dwc3_free_trb_pool(dep);
72246da4 1928 list_del(&dep->endpoint.ep_list);
5bf8fae3 1929 }
72246da4
FB
1930
1931 kfree(dep);
1932 }
1933}
1934
72246da4 1935/* -------------------------------------------------------------------------- */
e5caff68 1936
e5ba5ec8
PA
1937static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1938 struct dwc3_request *req, struct dwc3_trb *trb,
72246da4
FB
1939 const struct dwc3_event_depevt *event, int status)
1940{
72246da4
FB
1941 unsigned int count;
1942 unsigned int s_pkt = 0;
d6d6ec7b 1943 unsigned int trb_status;
72246da4 1944
68d34c8a 1945 dep->queued_requests--;
2c4cbe6e
FB
1946 trace_dwc3_complete_trb(dep, trb);
1947
e5ba5ec8
PA
1948 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1949 /*
1950 * We continue despite the error. There is not much we
1951 * can do. If we don't clean it up we loop forever. If
1952 * we skip the TRB then it gets overwritten after a
1953 * while since we use them in a ring buffer. A BUG()
1954 * would help. Lets hope that if this occurs, someone
1955 * fixes the root cause instead of looking away :)
1956 */
1957 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1958 dep->name, trb);
1959 count = trb->size & DWC3_TRB_SIZE_MASK;
1960
1961 if (dep->direction) {
1962 if (count) {
1963 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1964 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
ec5e795c 1965 dwc3_trace(trace_dwc3_gadget,
60cfb37a 1966 "%s: incomplete IN transfer",
e5ba5ec8
PA
1967 dep->name);
1968 /*
1969 * If missed isoc occurred and there is
1970 * no request queued then issue END
1971 * TRANSFER, so that core generates
1972 * next xfernotready and we will issue
1973 * a fresh START TRANSFER.
1974 * If there are still queued request
1975 * then wait, do not issue either END
1976 * or UPDATE TRANSFER, just attach next
aa3342c8 1977 * request in pending_list during
e5ba5ec8
PA
1978 * giveback.If any future queued request
1979 * is successfully transferred then we
1980 * will issue UPDATE TRANSFER for all
aa3342c8 1981 * request in the pending_list.
e5ba5ec8
PA
1982 */
1983 dep->flags |= DWC3_EP_MISSED_ISOC;
1984 } else {
1985 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1986 dep->name);
1987 status = -ECONNRESET;
1988 }
1989 } else {
1990 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1991 }
1992 } else {
1993 if (count && (event->status & DEPEVT_STATUS_SHORT))
1994 s_pkt = 1;
1995 }
1996
1997 /*
1998 * We assume here we will always receive the entire data block
1999 * which we should receive. Meaning, if we program RX to
2000 * receive 4K but we receive only 2K, we assume that's all we
2001 * should receive and we simply bounce the request back to the
2002 * gadget driver for further processing.
2003 */
2004 req->request.actual += req->request.length - count;
2005 if (s_pkt)
2006 return 1;
2007 if ((event->status & DEPEVT_STATUS_LST) &&
2008 (trb->ctrl & (DWC3_TRB_CTRL_LST |
2009 DWC3_TRB_CTRL_HWO)))
2010 return 1;
2011 if ((event->status & DEPEVT_STATUS_IOC) &&
2012 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2013 return 1;
2014 return 0;
2015}
2016
2017static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2018 const struct dwc3_event_depevt *event, int status)
2019{
2020 struct dwc3_request *req;
2021 struct dwc3_trb *trb;
2022 unsigned int slot;
2023 unsigned int i;
2024 int ret;
2025
72246da4 2026 do {
aa3342c8 2027 req = next_request(&dep->started_list);
ac7bdcc1 2028 if (WARN_ON_ONCE(!req))
d115d705 2029 return 1;
ac7bdcc1 2030
d115d705
VS
2031 i = 0;
2032 do {
53fd8818 2033 slot = req->first_trb_index + i;
36b68aae 2034 if (slot == DWC3_TRB_NUM - 1)
d115d705
VS
2035 slot++;
2036 slot %= DWC3_TRB_NUM;
2037 trb = &dep->trb_pool[slot];
2038
2039 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2040 event, status);
2041 if (ret)
2042 break;
2043 } while (++i < req->request.num_mapped_sgs);
2044
2045 dwc3_gadget_giveback(dep, req, status);
e5ba5ec8
PA
2046
2047 if (ret)
72246da4 2048 break;
d115d705 2049 } while (1);
72246da4 2050
4cb42217
FB
2051 /*
2052 * Our endpoint might get disabled by another thread during
2053 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2054 * early on so DWC3_EP_BUSY flag gets cleared
2055 */
2056 if (!dep->endpoint.desc)
2057 return 1;
2058
cdc359dd 2059 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
aa3342c8
FB
2060 list_empty(&dep->started_list)) {
2061 if (list_empty(&dep->pending_list)) {
cdc359dd
PA
2062 /*
2063 * If there is no entry in request list then do
2064 * not issue END TRANSFER now. Just set PENDING
2065 * flag, so that END TRANSFER is issued when an
2066 * entry is added into request list.
2067 */
2068 dep->flags = DWC3_EP_PENDING_REQUEST;
2069 } else {
b992e681 2070 dwc3_stop_active_transfer(dwc, dep->number, true);
cdc359dd
PA
2071 dep->flags = DWC3_EP_ENABLED;
2072 }
7efea86c
PA
2073 return 1;
2074 }
2075
9cad39fe
KL
2076 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
2077 if ((event->status & DEPEVT_STATUS_IOC) &&
2078 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2079 return 0;
72246da4
FB
2080 return 1;
2081}
2082
2083static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
029d97ff 2084 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
72246da4
FB
2085{
2086 unsigned status = 0;
2087 int clean_busy;
e18b7975
FB
2088 u32 is_xfer_complete;
2089
2090 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
72246da4
FB
2091
2092 if (event->status & DEPEVT_STATUS_BUSERR)
2093 status = -ECONNRESET;
2094
1d046793 2095 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
4cb42217 2096 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
e18b7975 2097 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
72246da4 2098 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
2099
2100 /*
2101 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2102 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2103 */
2104 if (dwc->revision < DWC3_REVISION_183A) {
2105 u32 reg;
2106 int i;
2107
2108 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 2109 dep = dwc->eps[i];
fae2b904
FB
2110
2111 if (!(dep->flags & DWC3_EP_ENABLED))
2112 continue;
2113
aa3342c8 2114 if (!list_empty(&dep->started_list))
fae2b904
FB
2115 return;
2116 }
2117
2118 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2119 reg |= dwc->u1u2;
2120 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2121
2122 dwc->u1u2 = 0;
2123 }
8a1a9c9e 2124
4cb42217
FB
2125 /*
2126 * Our endpoint might get disabled by another thread during
2127 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2128 * early on so DWC3_EP_BUSY flag gets cleared
2129 */
2130 if (!dep->endpoint.desc)
2131 return;
2132
e6e709b7 2133 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8a1a9c9e
FB
2134 int ret;
2135
4fae2e3e 2136 ret = __dwc3_gadget_kick_transfer(dep, 0);
8a1a9c9e
FB
2137 if (!ret || ret == -EBUSY)
2138 return;
2139 }
72246da4
FB
2140}
2141
72246da4
FB
2142static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2143 const struct dwc3_event_depevt *event)
2144{
2145 struct dwc3_ep *dep;
2146 u8 epnum = event->endpoint_number;
2147
2148 dep = dwc->eps[epnum];
2149
3336abb5
FB
2150 if (!(dep->flags & DWC3_EP_ENABLED))
2151 return;
2152
72246da4
FB
2153 if (epnum == 0 || epnum == 1) {
2154 dwc3_ep0_interrupt(dwc, event);
2155 return;
2156 }
2157
2158 switch (event->endpoint_event) {
2159 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 2160 dep->resource_index = 0;
c2df85ca 2161
16e78db7 2162 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
ec5e795c 2163 dwc3_trace(trace_dwc3_gadget,
60cfb37a 2164 "%s is an Isochronous endpoint",
72246da4
FB
2165 dep->name);
2166 return;
2167 }
2168
029d97ff 2169 dwc3_endpoint_transfer_complete(dwc, dep, event);
72246da4
FB
2170 break;
2171 case DWC3_DEPEVT_XFERINPROGRESS:
029d97ff 2172 dwc3_endpoint_transfer_complete(dwc, dep, event);
72246da4
FB
2173 break;
2174 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 2175 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
2176 dwc3_gadget_start_isoc(dwc, dep, event);
2177 } else {
6bb4fe12 2178 int active;
72246da4
FB
2179 int ret;
2180
6bb4fe12
FB
2181 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
2182
73815280 2183 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
6bb4fe12 2184 dep->name, active ? "Transfer Active"
72246da4
FB
2185 : "Transfer Not Active");
2186
4fae2e3e 2187 ret = __dwc3_gadget_kick_transfer(dep, 0);
72246da4
FB
2188 if (!ret || ret == -EBUSY)
2189 return;
2190
ec5e795c 2191 dwc3_trace(trace_dwc3_gadget,
60cfb37a 2192 "%s: failed to kick transfers",
72246da4
FB
2193 dep->name);
2194 }
2195
879631aa
FB
2196 break;
2197 case DWC3_DEPEVT_STREAMEVT:
16e78db7 2198 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
2199 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2200 dep->name);
2201 return;
2202 }
2203
2204 switch (event->status) {
2205 case DEPEVT_STREAMEVT_FOUND:
73815280
FB
2206 dwc3_trace(trace_dwc3_gadget,
2207 "Stream %d found and started",
879631aa
FB
2208 event->parameters);
2209
2210 break;
2211 case DEPEVT_STREAMEVT_NOTFOUND:
2212 /* FALLTHROUGH */
2213 default:
ec5e795c 2214 dwc3_trace(trace_dwc3_gadget,
60cfb37a 2215 "unable to find suitable stream");
879631aa 2216 }
72246da4
FB
2217 break;
2218 case DWC3_DEPEVT_RXTXFIFOEVT:
60cfb37a 2219 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
72246da4 2220 break;
72246da4 2221 case DWC3_DEPEVT_EPCMDCMPLT:
73815280 2222 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
72246da4
FB
2223 break;
2224 }
2225}
2226
2227static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2228{
2229 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2230 spin_unlock(&dwc->lock);
2231 dwc->gadget_driver->disconnect(&dwc->gadget);
2232 spin_lock(&dwc->lock);
2233 }
2234}
2235
bc5ba2e0
FB
2236static void dwc3_suspend_gadget(struct dwc3 *dwc)
2237{
73a30bfc 2238 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
bc5ba2e0
FB
2239 spin_unlock(&dwc->lock);
2240 dwc->gadget_driver->suspend(&dwc->gadget);
2241 spin_lock(&dwc->lock);
2242 }
2243}
2244
2245static void dwc3_resume_gadget(struct dwc3 *dwc)
2246{
73a30bfc 2247 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
bc5ba2e0
FB
2248 spin_unlock(&dwc->lock);
2249 dwc->gadget_driver->resume(&dwc->gadget);
5c7b3b02 2250 spin_lock(&dwc->lock);
8e74475b
FB
2251 }
2252}
2253
2254static void dwc3_reset_gadget(struct dwc3 *dwc)
2255{
2256 if (!dwc->gadget_driver)
2257 return;
2258
2259 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2260 spin_unlock(&dwc->lock);
2261 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
bc5ba2e0
FB
2262 spin_lock(&dwc->lock);
2263 }
2264}
2265
b992e681 2266static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
72246da4
FB
2267{
2268 struct dwc3_ep *dep;
2269 struct dwc3_gadget_ep_cmd_params params;
2270 u32 cmd;
2271 int ret;
2272
2273 dep = dwc->eps[epnum];
2274
b4996a86 2275 if (!dep->resource_index)
3daf74d7
PA
2276 return;
2277
57911504
PA
2278 /*
2279 * NOTICE: We are violating what the Databook says about the
2280 * EndTransfer command. Ideally we would _always_ wait for the
2281 * EndTransfer Command Completion IRQ, but that's causing too
2282 * much trouble synchronizing between us and gadget driver.
2283 *
2284 * We have discussed this with the IP Provider and it was
2285 * suggested to giveback all requests here, but give HW some
2286 * extra time to synchronize with the interconnect. We're using
dc93b41a 2287 * an arbitrary 100us delay for that.
57911504
PA
2288 *
2289 * Note also that a similar handling was tested by Synopsys
2290 * (thanks a lot Paul) and nothing bad has come out of it.
2291 * In short, what we're doing is:
2292 *
2293 * - Issue EndTransfer WITH CMDIOC bit set
2294 * - Wait 100us
2295 */
2296
3daf74d7 2297 cmd = DWC3_DEPCMD_ENDTRANSFER;
b992e681
PZ
2298 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2299 cmd |= DWC3_DEPCMD_CMDIOC;
b4996a86 2300 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7 2301 memset(&params, 0, sizeof(params));
2cd4718d 2302 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
3daf74d7 2303 WARN_ON_ONCE(ret);
b4996a86 2304 dep->resource_index = 0;
041d81f4 2305 dep->flags &= ~DWC3_EP_BUSY;
57911504 2306 udelay(100);
72246da4
FB
2307}
2308
2309static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2310{
2311 u32 epnum;
2312
2313 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2314 struct dwc3_ep *dep;
2315
2316 dep = dwc->eps[epnum];
6a1e3ef4
FB
2317 if (!dep)
2318 continue;
2319
72246da4
FB
2320 if (!(dep->flags & DWC3_EP_ENABLED))
2321 continue;
2322
624407f9 2323 dwc3_remove_requests(dwc, dep);
72246da4
FB
2324 }
2325}
2326
2327static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2328{
2329 u32 epnum;
2330
2331 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2332 struct dwc3_ep *dep;
72246da4
FB
2333 int ret;
2334
2335 dep = dwc->eps[epnum];
6a1e3ef4
FB
2336 if (!dep)
2337 continue;
72246da4
FB
2338
2339 if (!(dep->flags & DWC3_EP_STALL))
2340 continue;
2341
2342 dep->flags &= ~DWC3_EP_STALL;
2343
50c763f8 2344 ret = dwc3_send_clear_stall_ep_cmd(dep);
72246da4
FB
2345 WARN_ON_ONCE(ret);
2346 }
2347}
2348
2349static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2350{
c4430a26
FB
2351 int reg;
2352
72246da4
FB
2353 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2354 reg &= ~DWC3_DCTL_INITU1ENA;
2355 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2356
2357 reg &= ~DWC3_DCTL_INITU2ENA;
2358 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 2359
72246da4
FB
2360 dwc3_disconnect_gadget(dwc);
2361
2362 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 2363 dwc->setup_packet_pending = false;
06a374ed 2364 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
fc8bb91b
FB
2365
2366 dwc->connected = false;
72246da4
FB
2367}
2368
72246da4
FB
2369static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2370{
2371 u32 reg;
2372
fc8bb91b
FB
2373 dwc->connected = true;
2374
df62df56
FB
2375 /*
2376 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2377 * would cause a missing Disconnect Event if there's a
2378 * pending Setup Packet in the FIFO.
2379 *
2380 * There's no suggested workaround on the official Bug
2381 * report, which states that "unless the driver/application
2382 * is doing any special handling of a disconnect event,
2383 * there is no functional issue".
2384 *
2385 * Unfortunately, it turns out that we _do_ some special
2386 * handling of a disconnect event, namely complete all
2387 * pending transfers, notify gadget driver of the
2388 * disconnection, and so on.
2389 *
2390 * Our suggested workaround is to follow the Disconnect
2391 * Event steps here, instead, based on a setup_packet_pending
b5d335e5
FB
2392 * flag. Such flag gets set whenever we have a SETUP_PENDING
2393 * status for EP0 TRBs and gets cleared on XferComplete for the
df62df56
FB
2394 * same endpoint.
2395 *
2396 * Refers to:
2397 *
2398 * STAR#9000466709: RTL: Device : Disconnect event not
2399 * generated if setup packet pending in FIFO
2400 */
2401 if (dwc->revision < DWC3_REVISION_188A) {
2402 if (dwc->setup_packet_pending)
2403 dwc3_gadget_disconnect_interrupt(dwc);
2404 }
2405
8e74475b 2406 dwc3_reset_gadget(dwc);
72246da4
FB
2407
2408 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2409 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2410 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2411 dwc->test_mode = false;
72246da4
FB
2412
2413 dwc3_stop_active_transfers(dwc);
2414 dwc3_clear_stall_all_ep(dwc);
2415
2416 /* Reset device address to zero */
2417 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2418 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2419 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2420}
2421
2422static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2423{
2424 u32 reg;
2425 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2426
2427 /*
2428 * We change the clock only at SS but I dunno why I would want to do
2429 * this. Maybe it becomes part of the power saving plan.
2430 */
2431
ee5cd41c
JY
2432 if ((speed != DWC3_DSTS_SUPERSPEED) &&
2433 (speed != DWC3_DSTS_SUPERSPEED_PLUS))
72246da4
FB
2434 return;
2435
2436 /*
2437 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2438 * each time on Connect Done.
2439 */
2440 if (!usb30_clock)
2441 return;
2442
2443 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2444 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2445 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2446}
2447
72246da4
FB
2448static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2449{
72246da4
FB
2450 struct dwc3_ep *dep;
2451 int ret;
2452 u32 reg;
2453 u8 speed;
2454
72246da4
FB
2455 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2456 speed = reg & DWC3_DSTS_CONNECTSPD;
2457 dwc->speed = speed;
2458
2459 dwc3_update_ram_clk_sel(dwc, speed);
2460
2461 switch (speed) {
2da9ad76 2462 case DWC3_DSTS_SUPERSPEED_PLUS:
7580862b
JY
2463 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2464 dwc->gadget.ep0->maxpacket = 512;
2465 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2466 break;
2da9ad76 2467 case DWC3_DSTS_SUPERSPEED:
05870c5b
FB
2468 /*
2469 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2470 * would cause a missing USB3 Reset event.
2471 *
2472 * In such situations, we should force a USB3 Reset
2473 * event by calling our dwc3_gadget_reset_interrupt()
2474 * routine.
2475 *
2476 * Refers to:
2477 *
2478 * STAR#9000483510: RTL: SS : USB3 reset event may
2479 * not be generated always when the link enters poll
2480 */
2481 if (dwc->revision < DWC3_REVISION_190A)
2482 dwc3_gadget_reset_interrupt(dwc);
2483
72246da4
FB
2484 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2485 dwc->gadget.ep0->maxpacket = 512;
2486 dwc->gadget.speed = USB_SPEED_SUPER;
2487 break;
2da9ad76 2488 case DWC3_DSTS_HIGHSPEED:
72246da4
FB
2489 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2490 dwc->gadget.ep0->maxpacket = 64;
2491 dwc->gadget.speed = USB_SPEED_HIGH;
2492 break;
2da9ad76
JY
2493 case DWC3_DSTS_FULLSPEED2:
2494 case DWC3_DSTS_FULLSPEED1:
72246da4
FB
2495 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2496 dwc->gadget.ep0->maxpacket = 64;
2497 dwc->gadget.speed = USB_SPEED_FULL;
2498 break;
2da9ad76 2499 case DWC3_DSTS_LOWSPEED:
72246da4
FB
2500 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2501 dwc->gadget.ep0->maxpacket = 8;
2502 dwc->gadget.speed = USB_SPEED_LOW;
2503 break;
2504 }
2505
2b758350
PA
2506 /* Enable USB2 LPM Capability */
2507
ee5cd41c 2508 if ((dwc->revision > DWC3_REVISION_194A) &&
2da9ad76
JY
2509 (speed != DWC3_DSTS_SUPERSPEED) &&
2510 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
2b758350
PA
2511 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2512 reg |= DWC3_DCFG_LPM_CAP;
2513 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2514
2515 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2516 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2517
460d098c 2518 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2b758350 2519
80caf7d2
HR
2520 /*
2521 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2522 * DCFG.LPMCap is set, core responses with an ACK and the
2523 * BESL value in the LPM token is less than or equal to LPM
2524 * NYET threshold.
2525 */
2526 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2527 && dwc->has_lpm_erratum,
2528 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2529
2530 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2531 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2532
356363bf
FB
2533 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2534 } else {
2535 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2536 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2b758350
PA
2537 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2538 }
2539
72246da4 2540 dep = dwc->eps[0];
265b70a7
PZ
2541 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2542 false);
72246da4
FB
2543 if (ret) {
2544 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2545 return;
2546 }
2547
2548 dep = dwc->eps[1];
265b70a7
PZ
2549 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2550 false);
72246da4
FB
2551 if (ret) {
2552 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2553 return;
2554 }
2555
2556 /*
2557 * Configure PHY via GUSB3PIPECTLn if required.
2558 *
2559 * Update GTXFIFOSIZn
2560 *
2561 * In both cases reset values should be sufficient.
2562 */
2563}
2564
2565static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2566{
72246da4
FB
2567 /*
2568 * TODO take core out of low power mode when that's
2569 * implemented.
2570 */
2571
ad14d4e0
JL
2572 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2573 spin_unlock(&dwc->lock);
2574 dwc->gadget_driver->resume(&dwc->gadget);
2575 spin_lock(&dwc->lock);
2576 }
72246da4
FB
2577}
2578
2579static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2580 unsigned int evtinfo)
2581{
fae2b904 2582 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
0b0cc1cd
FB
2583 unsigned int pwropt;
2584
2585 /*
2586 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2587 * Hibernation mode enabled which would show up when device detects
2588 * host-initiated U3 exit.
2589 *
2590 * In that case, device will generate a Link State Change Interrupt
2591 * from U3 to RESUME which is only necessary if Hibernation is
2592 * configured in.
2593 *
2594 * There are no functional changes due to such spurious event and we
2595 * just need to ignore it.
2596 *
2597 * Refers to:
2598 *
2599 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2600 * operational mode
2601 */
2602 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2603 if ((dwc->revision < DWC3_REVISION_250A) &&
2604 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2605 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2606 (next == DWC3_LINK_STATE_RESUME)) {
73815280
FB
2607 dwc3_trace(trace_dwc3_gadget,
2608 "ignoring transition U3 -> Resume");
0b0cc1cd
FB
2609 return;
2610 }
2611 }
fae2b904
FB
2612
2613 /*
2614 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2615 * on the link partner, the USB session might do multiple entry/exit
2616 * of low power states before a transfer takes place.
2617 *
2618 * Due to this problem, we might experience lower throughput. The
2619 * suggested workaround is to disable DCTL[12:9] bits if we're
2620 * transitioning from U1/U2 to U0 and enable those bits again
2621 * after a transfer completes and there are no pending transfers
2622 * on any of the enabled endpoints.
2623 *
2624 * This is the first half of that workaround.
2625 *
2626 * Refers to:
2627 *
2628 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2629 * core send LGO_Ux entering U0
2630 */
2631 if (dwc->revision < DWC3_REVISION_183A) {
2632 if (next == DWC3_LINK_STATE_U0) {
2633 u32 u1u2;
2634 u32 reg;
2635
2636 switch (dwc->link_state) {
2637 case DWC3_LINK_STATE_U1:
2638 case DWC3_LINK_STATE_U2:
2639 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2640 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2641 | DWC3_DCTL_ACCEPTU2ENA
2642 | DWC3_DCTL_INITU1ENA
2643 | DWC3_DCTL_ACCEPTU1ENA);
2644
2645 if (!dwc->u1u2)
2646 dwc->u1u2 = reg & u1u2;
2647
2648 reg &= ~u1u2;
2649
2650 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2651 break;
2652 default:
2653 /* do nothing */
2654 break;
2655 }
2656 }
2657 }
2658
bc5ba2e0
FB
2659 switch (next) {
2660 case DWC3_LINK_STATE_U1:
2661 if (dwc->speed == USB_SPEED_SUPER)
2662 dwc3_suspend_gadget(dwc);
2663 break;
2664 case DWC3_LINK_STATE_U2:
2665 case DWC3_LINK_STATE_U3:
2666 dwc3_suspend_gadget(dwc);
2667 break;
2668 case DWC3_LINK_STATE_RESUME:
2669 dwc3_resume_gadget(dwc);
2670 break;
2671 default:
2672 /* do nothing */
2673 break;
2674 }
2675
e57ebc1d 2676 dwc->link_state = next;
72246da4
FB
2677}
2678
e1dadd3b
FB
2679static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2680 unsigned int evtinfo)
2681{
2682 unsigned int is_ss = evtinfo & BIT(4);
2683
2684 /**
2685 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2686 * have a known issue which can cause USB CV TD.9.23 to fail
2687 * randomly.
2688 *
2689 * Because of this issue, core could generate bogus hibernation
2690 * events which SW needs to ignore.
2691 *
2692 * Refers to:
2693 *
2694 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2695 * Device Fallback from SuperSpeed
2696 */
2697 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2698 return;
2699
2700 /* enter hibernation here */
2701}
2702
72246da4
FB
2703static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2704 const struct dwc3_event_devt *event)
2705{
2706 switch (event->type) {
2707 case DWC3_DEVICE_EVENT_DISCONNECT:
2708 dwc3_gadget_disconnect_interrupt(dwc);
2709 break;
2710 case DWC3_DEVICE_EVENT_RESET:
2711 dwc3_gadget_reset_interrupt(dwc);
2712 break;
2713 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2714 dwc3_gadget_conndone_interrupt(dwc);
2715 break;
2716 case DWC3_DEVICE_EVENT_WAKEUP:
2717 dwc3_gadget_wakeup_interrupt(dwc);
2718 break;
e1dadd3b
FB
2719 case DWC3_DEVICE_EVENT_HIBER_REQ:
2720 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2721 "unexpected hibernation event\n"))
2722 break;
2723
2724 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2725 break;
72246da4
FB
2726 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2727 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2728 break;
2729 case DWC3_DEVICE_EVENT_EOPF:
73815280 2730 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
72246da4
FB
2731 break;
2732 case DWC3_DEVICE_EVENT_SOF:
73815280 2733 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
72246da4
FB
2734 break;
2735 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
73815280 2736 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
72246da4
FB
2737 break;
2738 case DWC3_DEVICE_EVENT_CMD_CMPL:
73815280 2739 dwc3_trace(trace_dwc3_gadget, "Command Complete");
72246da4
FB
2740 break;
2741 case DWC3_DEVICE_EVENT_OVERFLOW:
73815280 2742 dwc3_trace(trace_dwc3_gadget, "Overflow");
72246da4
FB
2743 break;
2744 default:
e9f2aa87 2745 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
72246da4
FB
2746 }
2747}
2748
2749static void dwc3_process_event_entry(struct dwc3 *dwc,
2750 const union dwc3_event *event)
2751{
2c4cbe6e
FB
2752 trace_dwc3_event(event->raw);
2753
72246da4
FB
2754 /* Endpoint IRQ, handle it and return early */
2755 if (event->type.is_devspec == 0) {
2756 /* depevt */
2757 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2758 }
2759
2760 switch (event->type.type) {
2761 case DWC3_EVENT_TYPE_DEV:
2762 dwc3_gadget_interrupt(dwc, &event->devt);
2763 break;
2764 /* REVISIT what to do with Carkit and I2C events ? */
2765 default:
2766 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2767 }
2768}
2769
dea520a4 2770static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
b15a762f 2771{
dea520a4 2772 struct dwc3 *dwc = evt->dwc;
b15a762f 2773 irqreturn_t ret = IRQ_NONE;
f42f2447 2774 int left;
e8adfc30 2775 u32 reg;
b15a762f 2776
f42f2447 2777 left = evt->count;
b15a762f 2778
f42f2447
FB
2779 if (!(evt->flags & DWC3_EVENT_PENDING))
2780 return IRQ_NONE;
b15a762f 2781
f42f2447
FB
2782 while (left > 0) {
2783 union dwc3_event event;
b15a762f 2784
f42f2447 2785 event.raw = *(u32 *) (evt->buf + evt->lpos);
b15a762f 2786
f42f2447 2787 dwc3_process_event_entry(dwc, &event);
b15a762f 2788
f42f2447
FB
2789 /*
2790 * FIXME we wrap around correctly to the next entry as
2791 * almost all entries are 4 bytes in size. There is one
2792 * entry which has 12 bytes which is a regular entry
2793 * followed by 8 bytes data. ATM I don't know how
2794 * things are organized if we get next to the a
2795 * boundary so I worry about that once we try to handle
2796 * that.
2797 */
2798 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2799 left -= 4;
b15a762f 2800
660e9bde 2801 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
f42f2447 2802 }
b15a762f 2803
f42f2447
FB
2804 evt->count = 0;
2805 evt->flags &= ~DWC3_EVENT_PENDING;
2806 ret = IRQ_HANDLED;
b15a762f 2807
f42f2447 2808 /* Unmask interrupt */
660e9bde 2809 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
f42f2447 2810 reg &= ~DWC3_GEVNTSIZ_INTMASK;
660e9bde 2811 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
b15a762f 2812
f42f2447
FB
2813 return ret;
2814}
e8adfc30 2815
dea520a4 2816static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
f42f2447 2817{
dea520a4
FB
2818 struct dwc3_event_buffer *evt = _evt;
2819 struct dwc3 *dwc = evt->dwc;
e5f68b4a 2820 unsigned long flags;
f42f2447 2821 irqreturn_t ret = IRQ_NONE;
f42f2447 2822
e5f68b4a 2823 spin_lock_irqsave(&dwc->lock, flags);
dea520a4 2824 ret = dwc3_process_event_buf(evt);
e5f68b4a 2825 spin_unlock_irqrestore(&dwc->lock, flags);
b15a762f
FB
2826
2827 return ret;
2828}
2829
dea520a4 2830static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
72246da4 2831{
dea520a4 2832 struct dwc3 *dwc = evt->dwc;
72246da4 2833 u32 count;
e8adfc30 2834 u32 reg;
72246da4 2835
fc8bb91b
FB
2836 if (pm_runtime_suspended(dwc->dev)) {
2837 pm_runtime_get(dwc->dev);
2838 disable_irq_nosync(dwc->irq_gadget);
2839 dwc->pending_events = true;
2840 return IRQ_HANDLED;
2841 }
2842
660e9bde 2843 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
72246da4
FB
2844 count &= DWC3_GEVNTCOUNT_MASK;
2845 if (!count)
2846 return IRQ_NONE;
2847
b15a762f
FB
2848 evt->count = count;
2849 evt->flags |= DWC3_EVENT_PENDING;
72246da4 2850
e8adfc30 2851 /* Mask interrupt */
660e9bde 2852 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
e8adfc30 2853 reg |= DWC3_GEVNTSIZ_INTMASK;
660e9bde 2854 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
e8adfc30 2855
b15a762f 2856 return IRQ_WAKE_THREAD;
72246da4
FB
2857}
2858
dea520a4 2859static irqreturn_t dwc3_interrupt(int irq, void *_evt)
72246da4 2860{
dea520a4 2861 struct dwc3_event_buffer *evt = _evt;
72246da4 2862
dea520a4 2863 return dwc3_check_event_buf(evt);
72246da4
FB
2864}
2865
2866/**
2867 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2868 * @dwc: pointer to our controller context structure
72246da4
FB
2869 *
2870 * Returns 0 on success otherwise negative errno.
2871 */
41ac7b3a 2872int dwc3_gadget_init(struct dwc3 *dwc)
72246da4 2873{
72246da4 2874 int ret;
72246da4
FB
2875
2876 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2877 &dwc->ctrl_req_addr, GFP_KERNEL);
2878 if (!dwc->ctrl_req) {
2879 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2880 ret = -ENOMEM;
2881 goto err0;
2882 }
2883
2abd9d5f 2884 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
72246da4
FB
2885 &dwc->ep0_trb_addr, GFP_KERNEL);
2886 if (!dwc->ep0_trb) {
2887 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2888 ret = -ENOMEM;
2889 goto err1;
2890 }
2891
3ef35faf 2892 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4 2893 if (!dwc->setup_buf) {
72246da4
FB
2894 ret = -ENOMEM;
2895 goto err2;
2896 }
2897
5812b1c2 2898 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2899 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2900 GFP_KERNEL);
5812b1c2
FB
2901 if (!dwc->ep0_bounce) {
2902 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2903 ret = -ENOMEM;
2904 goto err3;
2905 }
2906
04c03d10
FB
2907 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
2908 if (!dwc->zlp_buf) {
2909 ret = -ENOMEM;
2910 goto err4;
2911 }
2912
72246da4 2913 dwc->gadget.ops = &dwc3_gadget_ops;
72246da4 2914 dwc->gadget.speed = USB_SPEED_UNKNOWN;
eeb720fb 2915 dwc->gadget.sg_supported = true;
72246da4 2916 dwc->gadget.name = "dwc3-gadget";
6a4290cc 2917 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
72246da4 2918
b9e51b2b
BM
2919 /*
2920 * FIXME We might be setting max_speed to <SUPER, however versions
2921 * <2.20a of dwc3 have an issue with metastability (documented
2922 * elsewhere in this driver) which tells us we can't set max speed to
2923 * anything lower than SUPER.
2924 *
2925 * Because gadget.max_speed is only used by composite.c and function
2926 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2927 * to happen so we avoid sending SuperSpeed Capability descriptor
2928 * together with our BOS descriptor as that could confuse host into
2929 * thinking we can handle super speed.
2930 *
2931 * Note that, in fact, we won't even support GetBOS requests when speed
2932 * is less than super speed because we don't have means, yet, to tell
2933 * composite.c that we are USB 2.0 + LPM ECN.
2934 */
2935 if (dwc->revision < DWC3_REVISION_220A)
2936 dwc3_trace(trace_dwc3_gadget,
60cfb37a 2937 "Changing max_speed on rev %08x",
b9e51b2b
BM
2938 dwc->revision);
2939
2940 dwc->gadget.max_speed = dwc->maximum_speed;
2941
a4b9d94b
DC
2942 /*
2943 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2944 * on ep out.
2945 */
2946 dwc->gadget.quirk_ep_out_aligned_size = true;
2947
72246da4
FB
2948 /*
2949 * REVISIT: Here we should clear all pending IRQs to be
2950 * sure we're starting from a well known location.
2951 */
2952
2953 ret = dwc3_gadget_init_endpoints(dwc);
2954 if (ret)
04c03d10 2955 goto err5;
72246da4 2956
72246da4
FB
2957 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2958 if (ret) {
2959 dev_err(dwc->dev, "failed to register udc\n");
04c03d10 2960 goto err5;
72246da4
FB
2961 }
2962
2963 return 0;
2964
04c03d10
FB
2965err5:
2966 kfree(dwc->zlp_buf);
2967
5812b1c2 2968err4:
e1f80467 2969 dwc3_gadget_free_endpoints(dwc);
3ef35faf
FB
2970 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2971 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2972
72246da4 2973err3:
0fc9a1be 2974 kfree(dwc->setup_buf);
72246da4
FB
2975
2976err2:
2977 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2978 dwc->ep0_trb, dwc->ep0_trb_addr);
2979
2980err1:
2981 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2982 dwc->ctrl_req, dwc->ctrl_req_addr);
2983
2984err0:
2985 return ret;
2986}
2987
7415f17c
FB
2988/* -------------------------------------------------------------------------- */
2989
72246da4
FB
2990void dwc3_gadget_exit(struct dwc3 *dwc)
2991{
72246da4 2992 usb_del_gadget_udc(&dwc->gadget);
72246da4 2993
72246da4
FB
2994 dwc3_gadget_free_endpoints(dwc);
2995
3ef35faf
FB
2996 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2997 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2998
0fc9a1be 2999 kfree(dwc->setup_buf);
04c03d10 3000 kfree(dwc->zlp_buf);
72246da4
FB
3001
3002 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3003 dwc->ep0_trb, dwc->ep0_trb_addr);
3004
3005 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3006 dwc->ctrl_req, dwc->ctrl_req_addr);
72246da4 3007}
7415f17c 3008
0b0231aa 3009int dwc3_gadget_suspend(struct dwc3 *dwc)
7415f17c 3010{
9f8a67b6
FB
3011 int ret;
3012
9772b47a
RQ
3013 if (!dwc->gadget_driver)
3014 return 0;
3015
9f8a67b6
FB
3016 ret = dwc3_gadget_run_stop(dwc, false, false);
3017 if (ret < 0)
3018 return ret;
7415f17c 3019
9f8a67b6
FB
3020 dwc3_disconnect_gadget(dwc);
3021 __dwc3_gadget_stop(dwc);
7415f17c
FB
3022
3023 return 0;
3024}
3025
3026int dwc3_gadget_resume(struct dwc3 *dwc)
3027{
7415f17c
FB
3028 int ret;
3029
9772b47a
RQ
3030 if (!dwc->gadget_driver)
3031 return 0;
3032
9f8a67b6
FB
3033 ret = __dwc3_gadget_start(dwc);
3034 if (ret < 0)
7415f17c
FB
3035 goto err0;
3036
9f8a67b6
FB
3037 ret = dwc3_gadget_run_stop(dwc, true, false);
3038 if (ret < 0)
7415f17c
FB
3039 goto err1;
3040
7415f17c
FB
3041 return 0;
3042
3043err1:
9f8a67b6 3044 __dwc3_gadget_stop(dwc);
7415f17c
FB
3045
3046err0:
3047 return ret;
3048}
fc8bb91b
FB
3049
3050void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
3051{
3052 if (dwc->pending_events) {
3053 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
3054 dwc->pending_events = false;
3055 enable_irq(dwc->irq_gadget);
3056 }
3057}