]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/usb/dwc3/gadget.c
USB: Fix of_usb_get_dr_mode_by_phy with a shared phy block
[mirror_ubuntu-artful-kernel.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
5945f789
FB
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
72246da4 12 *
5945f789
FB
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
72246da4
FB
17 */
18
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/list.h>
28#include <linux/dma-mapping.h>
29
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32
80977dc9 33#include "debug.h"
72246da4
FB
34#include "core.h"
35#include "gadget.h"
36#include "io.h"
37
04a9bfcd
FB
38/**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48{
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69}
70
911f1f88
PZ
71/**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79{
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85}
86
8598bde7
FB
87/**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
aee63e3c 93 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
94 */
95int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96{
aee63e3c 97 int retries = 10000;
8598bde7
FB
98 u32 reg;
99
802fde98
PZ
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
8598bde7
FB
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
802fde98
PZ
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
8598bde7 131 /* wait for a change in DSTS */
aed430e5 132 retries = 10000;
8598bde7
FB
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
8598bde7
FB
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
aee63e3c 139 udelay(5);
8598bde7
FB
140 }
141
73815280
FB
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
8598bde7
FB
144
145 return -ETIMEDOUT;
146}
147
dca0119c
JY
148/**
149 * dwc3_ep_inc_trb() - Increment a TRB index.
150 * @index - Pointer to the TRB index to increment.
151 *
152 * The index should never point to the link TRB. After incrementing,
153 * if it is point to the link TRB, wrap around to the beginning. The
154 * link TRB is always at the last TRB entry.
155 */
156static void dwc3_ep_inc_trb(u8 *index)
457e84b6 157{
dca0119c
JY
158 (*index)++;
159 if (*index == (DWC3_TRB_NUM - 1))
160 *index = 0;
ef966b9d 161}
457e84b6 162
dca0119c 163static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
ef966b9d 164{
dca0119c 165 dwc3_ep_inc_trb(&dep->trb_enqueue);
ef966b9d 166}
457e84b6 167
dca0119c 168static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
ef966b9d 169{
dca0119c 170 dwc3_ep_inc_trb(&dep->trb_dequeue);
457e84b6
FB
171}
172
72246da4
FB
173void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
174 int status)
175{
176 struct dwc3 *dwc = dep->dwc;
e5ba5ec8 177 int i;
72246da4 178
aa3342c8 179 if (req->started) {
e5ba5ec8
PA
180 i = 0;
181 do {
ef966b9d 182 dwc3_ep_inc_deq(dep);
e5ba5ec8 183 } while(++i < req->request.num_mapped_sgs);
aa3342c8 184 req->started = false;
72246da4
FB
185 }
186 list_del(&req->list);
eeb720fb 187 req->trb = NULL;
72246da4
FB
188
189 if (req->request.status == -EINPROGRESS)
190 req->request.status = status;
191
0416e494
PA
192 if (dwc->ep0_bounced && dep->number == 0)
193 dwc->ep0_bounced = false;
194 else
195 usb_gadget_unmap_request(&dwc->gadget, &req->request,
196 req->direction);
72246da4 197
2c4cbe6e 198 trace_dwc3_gadget_giveback(req);
72246da4
FB
199
200 spin_unlock(&dwc->lock);
304f7e5e 201 usb_gadget_giveback_request(&dep->endpoint, &req->request);
72246da4 202 spin_lock(&dwc->lock);
fc8bb91b
FB
203
204 if (dep->number > 1)
205 pm_runtime_put(dwc->dev);
72246da4
FB
206}
207
3ece0ec4 208int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
b09bb642
FB
209{
210 u32 timeout = 500;
71f7e702 211 int status = 0;
0fe886cd 212 int ret = 0;
b09bb642
FB
213 u32 reg;
214
215 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
216 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
217
218 do {
219 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
220 if (!(reg & DWC3_DGCMD_CMDACT)) {
71f7e702
FB
221 status = DWC3_DGCMD_STATUS(reg);
222 if (status)
0fe886cd
FB
223 ret = -EINVAL;
224 break;
b09bb642 225 }
0fe886cd
FB
226 } while (timeout--);
227
228 if (!timeout) {
0fe886cd 229 ret = -ETIMEDOUT;
71f7e702 230 status = -ETIMEDOUT;
0fe886cd
FB
231 }
232
71f7e702
FB
233 trace_dwc3_gadget_generic_cmd(cmd, param, status);
234
0fe886cd 235 return ret;
b09bb642
FB
236}
237
c36d8e94
FB
238static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
239
2cd4718d
FB
240int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
241 struct dwc3_gadget_ep_cmd_params *params)
72246da4 242{
2cd4718d 243 struct dwc3 *dwc = dep->dwc;
61d58242 244 u32 timeout = 500;
72246da4
FB
245 u32 reg;
246
0933df15 247 int cmd_status = 0;
2b0f11df 248 int susphy = false;
c0ca324d 249 int ret = -EINVAL;
72246da4 250
2b0f11df
FB
251 /*
252 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
253 * we're issuing an endpoint command, we must check if
254 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
255 *
256 * We will also set SUSPHY bit to what it was before returning as stated
257 * by the same section on Synopsys databook.
258 */
ab2a92e7
FB
259 if (dwc->gadget.speed <= USB_SPEED_HIGH) {
260 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
261 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
262 susphy = true;
263 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
264 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
265 }
2b0f11df
FB
266 }
267
c36d8e94
FB
268 if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
269 int needs_wakeup;
270
271 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
272 dwc->link_state == DWC3_LINK_STATE_U2 ||
273 dwc->link_state == DWC3_LINK_STATE_U3);
274
275 if (unlikely(needs_wakeup)) {
276 ret = __dwc3_gadget_wakeup(dwc);
277 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
278 ret);
279 }
280 }
281
2eb88016
FB
282 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
283 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
284 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
72246da4 285
2eb88016 286 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
72246da4 287 do {
2eb88016 288 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
72246da4 289 if (!(reg & DWC3_DEPCMD_CMDACT)) {
0933df15 290 cmd_status = DWC3_DEPCMD_STATUS(reg);
7b9cc7a2 291
7b9cc7a2
KL
292 switch (cmd_status) {
293 case 0:
294 ret = 0;
295 break;
296 case DEPEVT_TRANSFER_NO_RESOURCE:
7b9cc7a2 297 ret = -EINVAL;
c0ca324d 298 break;
7b9cc7a2
KL
299 case DEPEVT_TRANSFER_BUS_EXPIRY:
300 /*
301 * SW issues START TRANSFER command to
302 * isochronous ep with future frame interval. If
303 * future interval time has already passed when
304 * core receives the command, it will respond
305 * with an error status of 'Bus Expiry'.
306 *
307 * Instead of always returning -EINVAL, let's
308 * give a hint to the gadget driver that this is
309 * the case by returning -EAGAIN.
310 */
7b9cc7a2
KL
311 ret = -EAGAIN;
312 break;
313 default:
314 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
315 }
316
c0ca324d 317 break;
72246da4 318 }
f6bb225b 319 } while (--timeout);
72246da4 320
f6bb225b 321 if (timeout == 0) {
f6bb225b 322 ret = -ETIMEDOUT;
0933df15 323 cmd_status = -ETIMEDOUT;
f6bb225b 324 }
c0ca324d 325
0933df15
FB
326 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
327
2b0f11df
FB
328 if (unlikely(susphy)) {
329 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
330 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
331 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
332 }
333
c0ca324d 334 return ret;
72246da4
FB
335}
336
50c763f8
JY
337static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
338{
339 struct dwc3 *dwc = dep->dwc;
340 struct dwc3_gadget_ep_cmd_params params;
341 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
342
343 /*
344 * As of core revision 2.60a the recommended programming model
345 * is to set the ClearPendIN bit when issuing a Clear Stall EP
346 * command for IN endpoints. This is to prevent an issue where
347 * some (non-compliant) hosts may not send ACK TPs for pending
348 * IN transfers due to a mishandled error condition. Synopsys
349 * STAR 9000614252.
350 */
351 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
352 cmd |= DWC3_DEPCMD_CLEARPENDIN;
353
354 memset(&params, 0, sizeof(params));
355
2cd4718d 356 return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
50c763f8
JY
357}
358
72246da4 359static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 360 struct dwc3_trb *trb)
72246da4 361{
c439ef87 362 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
363
364 return dep->trb_pool_dma + offset;
365}
366
367static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
368{
369 struct dwc3 *dwc = dep->dwc;
370
371 if (dep->trb_pool)
372 return 0;
373
72246da4
FB
374 dep->trb_pool = dma_alloc_coherent(dwc->dev,
375 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
376 &dep->trb_pool_dma, GFP_KERNEL);
377 if (!dep->trb_pool) {
378 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
379 dep->name);
380 return -ENOMEM;
381 }
382
383 return 0;
384}
385
386static void dwc3_free_trb_pool(struct dwc3_ep *dep)
387{
388 struct dwc3 *dwc = dep->dwc;
389
390 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
391 dep->trb_pool, dep->trb_pool_dma);
392
393 dep->trb_pool = NULL;
394 dep->trb_pool_dma = 0;
395}
396
c4509601
JY
397static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
398
399/**
400 * dwc3_gadget_start_config - Configure EP resources
401 * @dwc: pointer to our controller context structure
402 * @dep: endpoint that is being enabled
403 *
404 * The assignment of transfer resources cannot perfectly follow the
405 * data book due to the fact that the controller driver does not have
406 * all knowledge of the configuration in advance. It is given this
407 * information piecemeal by the composite gadget framework after every
408 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
409 * programming model in this scenario can cause errors. For two
410 * reasons:
411 *
412 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
413 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
414 * multiple interfaces.
415 *
416 * 2) The databook does not mention doing more DEPXFERCFG for new
417 * endpoint on alt setting (8.1.6).
418 *
419 * The following simplified method is used instead:
420 *
421 * All hardware endpoints can be assigned a transfer resource and this
422 * setting will stay persistent until either a core reset or
423 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
424 * do DEPXFERCFG for every hardware endpoint as well. We are
425 * guaranteed that there are as many transfer resources as endpoints.
426 *
427 * This function is called for each endpoint when it is being enabled
428 * but is triggered only when called for EP0-out, which always happens
429 * first, and which should only happen in one of the above conditions.
430 */
72246da4
FB
431static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
432{
433 struct dwc3_gadget_ep_cmd_params params;
434 u32 cmd;
c4509601
JY
435 int i;
436 int ret;
437
438 if (dep->number)
439 return 0;
72246da4
FB
440
441 memset(&params, 0x00, sizeof(params));
c4509601 442 cmd = DWC3_DEPCMD_DEPSTARTCFG;
72246da4 443
2cd4718d 444 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
c4509601
JY
445 if (ret)
446 return ret;
447
448 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
449 struct dwc3_ep *dep = dwc->eps[i];
72246da4 450
c4509601
JY
451 if (!dep)
452 continue;
453
454 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
455 if (ret)
456 return ret;
72246da4
FB
457 }
458
459 return 0;
460}
461
462static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 463 const struct usb_endpoint_descriptor *desc,
4b345c9a 464 const struct usb_ss_ep_comp_descriptor *comp_desc,
21e64bf2 465 bool modify, bool restore)
72246da4
FB
466{
467 struct dwc3_gadget_ep_cmd_params params;
468
21e64bf2
FB
469 if (dev_WARN_ONCE(dwc->dev, modify && restore,
470 "Can't modify and restore\n"))
471 return -EINVAL;
472
72246da4
FB
473 memset(&params, 0x00, sizeof(params));
474
dc1c70a7 475 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
476 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
477
478 /* Burst size is only needed in SuperSpeed mode */
ee5cd41c 479 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
676e3497 480 u32 burst = dep->endpoint.maxburst;
676e3497 481 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
d2e9a13a 482 }
72246da4 483
21e64bf2
FB
484 if (modify) {
485 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
486 } else if (restore) {
265b70a7
PZ
487 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
488 params.param2 |= dep->saved_state;
21e64bf2
FB
489 } else {
490 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
265b70a7
PZ
491 }
492
13fa2e69
FB
493 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
494
495 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
496 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 497
18b7ede5 498 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
499 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
500 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
501 dep->stream_capable = true;
502 }
503
0b93a4c8 504 if (!usb_endpoint_xfer_control(desc))
dc1c70a7 505 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
506
507 /*
508 * We are doing 1:1 mapping for endpoints, meaning
509 * Physical Endpoints 2 maps to Logical Endpoint 2 and
510 * so on. We consider the direction bit as part of the physical
511 * endpoint number. So USB endpoint 0x81 is 0x03.
512 */
dc1c70a7 513 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
514
515 /*
516 * We must use the lower 16 TX FIFOs even though
517 * HW might have more
518 */
519 if (dep->direction)
dc1c70a7 520 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
521
522 if (desc->bInterval) {
dc1c70a7 523 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
524 dep->interval = 1 << (desc->bInterval - 1);
525 }
526
2cd4718d 527 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
72246da4
FB
528}
529
530static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
531{
532 struct dwc3_gadget_ep_cmd_params params;
533
534 memset(&params, 0x00, sizeof(params));
535
dc1c70a7 536 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4 537
2cd4718d
FB
538 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
539 &params);
72246da4
FB
540}
541
542/**
543 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
544 * @dep: endpoint to be initialized
545 * @desc: USB Endpoint Descriptor
546 *
547 * Caller should take care of locking
548 */
549static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 550 const struct usb_endpoint_descriptor *desc,
4b345c9a 551 const struct usb_ss_ep_comp_descriptor *comp_desc,
21e64bf2 552 bool modify, bool restore)
72246da4
FB
553{
554 struct dwc3 *dwc = dep->dwc;
555 u32 reg;
b09e99ee 556 int ret;
72246da4 557
73815280 558 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
ff62d6b6 559
72246da4
FB
560 if (!(dep->flags & DWC3_EP_ENABLED)) {
561 ret = dwc3_gadget_start_config(dwc, dep);
562 if (ret)
563 return ret;
564 }
565
21e64bf2 566 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
265b70a7 567 restore);
72246da4
FB
568 if (ret)
569 return ret;
570
571 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
572 struct dwc3_trb *trb_st_hw;
573 struct dwc3_trb *trb_link;
72246da4 574
16e78db7 575 dep->endpoint.desc = desc;
c90bfaec 576 dep->comp_desc = comp_desc;
72246da4
FB
577 dep->type = usb_endpoint_type(desc);
578 dep->flags |= DWC3_EP_ENABLED;
579
580 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
581 reg |= DWC3_DALEPENA_EP(dep->number);
582 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
583
36b68aae 584 if (usb_endpoint_xfer_control(desc))
7ab373aa 585 return 0;
72246da4 586
0d25744a
JY
587 /* Initialize the TRB ring */
588 dep->trb_dequeue = 0;
589 dep->trb_enqueue = 0;
590 memset(dep->trb_pool, 0,
591 sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
592
36b68aae 593 /* Link TRB. The HWO bit is never reset */
72246da4
FB
594 trb_st_hw = &dep->trb_pool[0];
595
f6bafc6a 596 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
f6bafc6a
FB
597 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
598 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
599 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
600 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
601 }
602
603 return 0;
604}
605
b992e681 606static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
624407f9 607static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
608{
609 struct dwc3_request *req;
610
0e146028 611 dwc3_stop_active_transfer(dwc, dep->number, true);
624407f9 612
0e146028
FB
613 /* - giveback all requests to gadget driver */
614 while (!list_empty(&dep->started_list)) {
615 req = next_request(&dep->started_list);
1591633e 616
0e146028 617 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
ea53b882
FB
618 }
619
aa3342c8
FB
620 while (!list_empty(&dep->pending_list)) {
621 req = next_request(&dep->pending_list);
72246da4 622
624407f9 623 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 624 }
72246da4
FB
625}
626
627/**
628 * __dwc3_gadget_ep_disable - Disables a HW endpoint
629 * @dep: the endpoint to disable
630 *
624407f9
SAS
631 * This function also removes requests which are currently processed ny the
632 * hardware and those which are not yet scheduled.
633 * Caller should take care of locking.
72246da4 634 */
72246da4
FB
635static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
636{
637 struct dwc3 *dwc = dep->dwc;
638 u32 reg;
639
7eaeac5c
FB
640 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
641
624407f9 642 dwc3_remove_requests(dwc, dep);
72246da4 643
687ef981
FB
644 /* make sure HW endpoint isn't stalled */
645 if (dep->flags & DWC3_EP_STALL)
7a608559 646 __dwc3_gadget_ep_set_halt(dep, 0, false);
687ef981 647
72246da4
FB
648 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
649 reg &= ~DWC3_DALEPENA_EP(dep->number);
650 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
651
879631aa 652 dep->stream_capable = false;
f9c56cdd 653 dep->endpoint.desc = NULL;
c90bfaec 654 dep->comp_desc = NULL;
72246da4 655 dep->type = 0;
879631aa 656 dep->flags = 0;
72246da4
FB
657
658 return 0;
659}
660
661/* -------------------------------------------------------------------------- */
662
663static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
664 const struct usb_endpoint_descriptor *desc)
665{
666 return -EINVAL;
667}
668
669static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
670{
671 return -EINVAL;
672}
673
674/* -------------------------------------------------------------------------- */
675
676static int dwc3_gadget_ep_enable(struct usb_ep *ep,
677 const struct usb_endpoint_descriptor *desc)
678{
679 struct dwc3_ep *dep;
680 struct dwc3 *dwc;
681 unsigned long flags;
682 int ret;
683
684 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
685 pr_debug("dwc3: invalid parameters\n");
686 return -EINVAL;
687 }
688
689 if (!desc->wMaxPacketSize) {
690 pr_debug("dwc3: missing wMaxPacketSize\n");
691 return -EINVAL;
692 }
693
694 dep = to_dwc3_ep(ep);
695 dwc = dep->dwc;
696
95ca961c
FB
697 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
698 "%s is already enabled\n",
699 dep->name))
c6f83f38 700 return 0;
c6f83f38 701
72246da4 702 spin_lock_irqsave(&dwc->lock, flags);
265b70a7 703 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
72246da4
FB
704 spin_unlock_irqrestore(&dwc->lock, flags);
705
706 return ret;
707}
708
709static int dwc3_gadget_ep_disable(struct usb_ep *ep)
710{
711 struct dwc3_ep *dep;
712 struct dwc3 *dwc;
713 unsigned long flags;
714 int ret;
715
716 if (!ep) {
717 pr_debug("dwc3: invalid parameters\n");
718 return -EINVAL;
719 }
720
721 dep = to_dwc3_ep(ep);
722 dwc = dep->dwc;
723
95ca961c
FB
724 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
725 "%s is already disabled\n",
726 dep->name))
72246da4 727 return 0;
72246da4 728
72246da4
FB
729 spin_lock_irqsave(&dwc->lock, flags);
730 ret = __dwc3_gadget_ep_disable(dep);
731 spin_unlock_irqrestore(&dwc->lock, flags);
732
733 return ret;
734}
735
736static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
737 gfp_t gfp_flags)
738{
739 struct dwc3_request *req;
740 struct dwc3_ep *dep = to_dwc3_ep(ep);
72246da4
FB
741
742 req = kzalloc(sizeof(*req), gfp_flags);
734d5a53 743 if (!req)
72246da4 744 return NULL;
72246da4
FB
745
746 req->epnum = dep->number;
747 req->dep = dep;
72246da4 748
68d34c8a
FB
749 dep->allocated_requests++;
750
2c4cbe6e
FB
751 trace_dwc3_alloc_request(req);
752
72246da4
FB
753 return &req->request;
754}
755
756static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
757 struct usb_request *request)
758{
759 struct dwc3_request *req = to_dwc3_request(request);
68d34c8a 760 struct dwc3_ep *dep = to_dwc3_ep(ep);
72246da4 761
68d34c8a 762 dep->allocated_requests--;
2c4cbe6e 763 trace_dwc3_free_request(req);
72246da4
FB
764 kfree(req);
765}
766
c71fc37c
FB
767/**
768 * dwc3_prepare_one_trb - setup one TRB from one request
769 * @dep: endpoint for which this request is prepared
770 * @req: dwc3_request pointer
771 */
68e823e2 772static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb 773 struct dwc3_request *req, dma_addr_t dma,
e5ba5ec8 774 unsigned length, unsigned last, unsigned chain, unsigned node)
c71fc37c 775{
f6bafc6a 776 struct dwc3_trb *trb;
c71fc37c 777
73815280 778 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
eeb720fb
FB
779 dep->name, req, (unsigned long long) dma,
780 length, last ? " last" : "",
781 chain ? " chain" : "");
782
915e202a 783
4faf7550 784 trb = &dep->trb_pool[dep->trb_enqueue];
c71fc37c 785
eeb720fb 786 if (!req->trb) {
aa3342c8 787 dwc3_gadget_move_started_request(req);
f6bafc6a
FB
788 req->trb = trb;
789 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
4faf7550 790 req->first_trb_index = dep->trb_enqueue;
eeb720fb 791 }
c71fc37c 792
ef966b9d 793 dwc3_ep_inc_enq(dep);
e5ba5ec8 794
f6bafc6a
FB
795 trb->size = DWC3_TRB_SIZE_LENGTH(length);
796 trb->bpl = lower_32_bits(dma);
797 trb->bph = upper_32_bits(dma);
c71fc37c 798
16e78db7 799 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 800 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 801 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
802 break;
803
804 case USB_ENDPOINT_XFER_ISOC:
e5ba5ec8
PA
805 if (!node)
806 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
807 else
808 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
ca4d44ea
FB
809
810 /* always enable Interrupt on Missed ISOC */
811 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
c71fc37c
FB
812 break;
813
814 case USB_ENDPOINT_XFER_BULK:
815 case USB_ENDPOINT_XFER_INT:
f6bafc6a 816 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
817 break;
818 default:
819 /*
820 * This is only possible with faulty memory because we
821 * checked it already :)
822 */
823 BUG();
824 }
825
ca4d44ea
FB
826 /* always enable Continue on Short Packet */
827 trb->ctrl |= DWC3_TRB_CTRL_CSP;
f3af3651 828
f3af3651 829 if (!req->request.no_interrupt && !chain)
ca4d44ea 830 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
f3af3651 831
ca4d44ea 832 if (last)
e5ba5ec8 833 trb->ctrl |= DWC3_TRB_CTRL_LST;
c71fc37c 834
e5ba5ec8
PA
835 if (chain)
836 trb->ctrl |= DWC3_TRB_CTRL_CHN;
837
16e78db7 838 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 839 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 840
f6bafc6a 841 trb->ctrl |= DWC3_TRB_CTRL_HWO;
2c4cbe6e 842
68d34c8a
FB
843 dep->queued_requests++;
844
2c4cbe6e 845 trace_dwc3_prepare_trb(dep, trb);
c71fc37c
FB
846}
847
361572b5
JY
848/**
849 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
850 * @dep: The endpoint with the TRB ring
851 * @index: The index of the current TRB in the ring
852 *
853 * Returns the TRB prior to the one pointed to by the index. If the
854 * index is 0, we will wrap backwards, skip the link TRB, and return
855 * the one just before that.
856 */
857static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
858{
859 if (!index)
860 index = DWC3_TRB_NUM - 2;
861 else
862 index = dep->trb_enqueue - 1;
863
864 return &dep->trb_pool[index];
865}
866
c4233573
FB
867static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
868{
869 struct dwc3_trb *tmp;
32db3d94 870 u8 trbs_left;
c4233573
FB
871
872 /*
873 * If enqueue & dequeue are equal than it is either full or empty.
874 *
875 * One way to know for sure is if the TRB right before us has HWO bit
876 * set or not. If it has, then we're definitely full and can't fit any
877 * more transfers in our ring.
878 */
879 if (dep->trb_enqueue == dep->trb_dequeue) {
361572b5
JY
880 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
881 if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
882 return 0;
c4233573
FB
883
884 return DWC3_TRB_NUM - 1;
885 }
886
32db3d94 887 trbs_left = dep->trb_dequeue - dep->trb_enqueue;
3de2685f 888 trbs_left &= (DWC3_TRB_NUM - 1);
32db3d94 889
7d0a038b
JY
890 if (dep->trb_dequeue < dep->trb_enqueue)
891 trbs_left--;
892
32db3d94 893 return trbs_left;
c4233573
FB
894}
895
5ee85d89 896static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
55a0237f
FB
897 struct dwc3_request *req, unsigned int trbs_left,
898 unsigned int more_coming)
5ee85d89
FB
899{
900 struct usb_request *request = &req->request;
901 struct scatterlist *sg = request->sg;
902 struct scatterlist *s;
903 unsigned int last = false;
904 unsigned int length;
905 dma_addr_t dma;
906 int i;
907
908 for_each_sg(sg, s, request->num_mapped_sgs, i) {
909 unsigned chain = true;
910
911 length = sg_dma_len(s);
912 dma = sg_dma_address(s);
913
914 if (sg_is_last(s)) {
55a0237f
FB
915 if (usb_endpoint_xfer_int(dep->endpoint.desc) ||
916 !more_coming)
5ee85d89
FB
917 last = true;
918
919 chain = false;
920 }
921
d6dc2e76 922 if (!trbs_left--)
5ee85d89
FB
923 last = true;
924
925 if (last)
926 chain = false;
927
928 dwc3_prepare_one_trb(dep, req, dma, length,
929 last, chain, i);
930
931 if (last)
932 break;
933 }
934}
935
936static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
55a0237f
FB
937 struct dwc3_request *req, unsigned int trbs_left,
938 unsigned int more_coming)
5ee85d89
FB
939{
940 unsigned int last = false;
941 unsigned int length;
942 dma_addr_t dma;
943
944 dma = req->request.dma;
945 length = req->request.length;
946
947 if (!trbs_left)
948 last = true;
949
950 /* Is this the last request? */
55a0237f 951 if (usb_endpoint_xfer_int(dep->endpoint.desc) || !more_coming)
5ee85d89
FB
952 last = true;
953
954 dwc3_prepare_one_trb(dep, req, dma, length,
955 last, false, 0);
956}
957
72246da4
FB
958/*
959 * dwc3_prepare_trbs - setup TRBs from requests
960 * @dep: endpoint for which requests are being prepared
72246da4 961 *
1d046793
PZ
962 * The function goes through the requests list and sets up TRBs for the
963 * transfers. The function returns once there are no more TRBs available or
964 * it runs out of requests.
72246da4 965 */
c4233573 966static void dwc3_prepare_trbs(struct dwc3_ep *dep)
72246da4 967{
68e823e2 968 struct dwc3_request *req, *n;
55a0237f 969 unsigned int more_coming;
72246da4
FB
970 u32 trbs_left;
971
972 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
973
c4233573 974 trbs_left = dwc3_calc_trbs_left(dep);
89bc856e
JY
975 if (!trbs_left)
976 return;
72246da4 977
55a0237f
FB
978 more_coming = dep->allocated_requests - dep->queued_requests;
979
aa3342c8 980 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
5ee85d89 981 if (req->request.num_mapped_sgs > 0)
55a0237f
FB
982 dwc3_prepare_one_trb_sg(dep, req, trbs_left--,
983 more_coming);
5ee85d89 984 else
55a0237f
FB
985 dwc3_prepare_one_trb_linear(dep, req, trbs_left--,
986 more_coming);
72246da4 987
5ee85d89
FB
988 if (!trbs_left)
989 return;
72246da4 990 }
72246da4
FB
991}
992
4fae2e3e 993static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
72246da4
FB
994{
995 struct dwc3_gadget_ep_cmd_params params;
996 struct dwc3_request *req;
997 struct dwc3 *dwc = dep->dwc;
4fae2e3e 998 int starting;
72246da4
FB
999 int ret;
1000 u32 cmd;
1001
4fae2e3e 1002 starting = !(dep->flags & DWC3_EP_BUSY);
72246da4 1003
4fae2e3e
FB
1004 dwc3_prepare_trbs(dep);
1005 req = next_request(&dep->started_list);
72246da4
FB
1006 if (!req) {
1007 dep->flags |= DWC3_EP_PENDING_REQUEST;
1008 return 0;
1009 }
1010
1011 memset(&params, 0, sizeof(params));
72246da4 1012
4fae2e3e 1013 if (starting) {
1877d6c9
PA
1014 params.param0 = upper_32_bits(req->trb_dma);
1015 params.param1 = lower_32_bits(req->trb_dma);
b6b1c6db
FB
1016 cmd = DWC3_DEPCMD_STARTTRANSFER |
1017 DWC3_DEPCMD_PARAM(cmd_param);
1877d6c9 1018 } else {
b6b1c6db
FB
1019 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1020 DWC3_DEPCMD_PARAM(dep->resource_index);
1877d6c9 1021 }
72246da4 1022
2cd4718d 1023 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
72246da4 1024 if (ret < 0) {
72246da4
FB
1025 /*
1026 * FIXME we need to iterate over the list of requests
1027 * here and stop, unmap, free and del each of the linked
1d046793 1028 * requests instead of what we do now.
72246da4 1029 */
0fc9a1be
FB
1030 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1031 req->direction);
72246da4
FB
1032 list_del(&req->list);
1033 return ret;
1034 }
1035
1036 dep->flags |= DWC3_EP_BUSY;
25b8ff68 1037
4fae2e3e 1038 if (starting) {
2eb88016 1039 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
b4996a86 1040 WARN_ON_ONCE(!dep->resource_index);
f898ae09 1041 }
25b8ff68 1042
72246da4
FB
1043 return 0;
1044}
1045
d6d6ec7b
PA
1046static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1047 struct dwc3_ep *dep, u32 cur_uf)
1048{
1049 u32 uf;
1050
aa3342c8 1051 if (list_empty(&dep->pending_list)) {
73815280
FB
1052 dwc3_trace(trace_dwc3_gadget,
1053 "ISOC ep %s run out for requests",
1054 dep->name);
f4a53c55 1055 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1056 return;
1057 }
1058
1059 /* 4 micro frames in the future */
1060 uf = cur_uf + dep->interval * 4;
1061
4fae2e3e 1062 __dwc3_gadget_kick_transfer(dep, uf);
d6d6ec7b
PA
1063}
1064
1065static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1066 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1067{
1068 u32 cur_uf, mask;
1069
1070 mask = ~(dep->interval - 1);
1071 cur_uf = event->parameters & mask;
1072
1073 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1074}
1075
72246da4
FB
1076static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1077{
0fc9a1be
FB
1078 struct dwc3 *dwc = dep->dwc;
1079 int ret;
1080
bb423984 1081 if (!dep->endpoint.desc) {
ec5e795c 1082 dwc3_trace(trace_dwc3_gadget,
60cfb37a 1083 "trying to queue request %p to disabled %s",
bb423984
FB
1084 &req->request, dep->endpoint.name);
1085 return -ESHUTDOWN;
1086 }
1087
1088 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1089 &req->request, req->dep->name)) {
60cfb37a 1090 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
ec5e795c 1091 &req->request, req->dep->name);
bb423984
FB
1092 return -EINVAL;
1093 }
1094
fc8bb91b
FB
1095 pm_runtime_get(dwc->dev);
1096
72246da4
FB
1097 req->request.actual = 0;
1098 req->request.status = -EINPROGRESS;
1099 req->direction = dep->direction;
1100 req->epnum = dep->number;
1101
fe84f522
FB
1102 trace_dwc3_ep_queue(req);
1103
72246da4
FB
1104 /*
1105 * We only add to our list of requests now and
1106 * start consuming the list once we get XferNotReady
1107 * IRQ.
1108 *
1109 * That way, we avoid doing anything that we don't need
1110 * to do now and defer it until the point we receive a
1111 * particular token from the Host side.
1112 *
1113 * This will also avoid Host cancelling URBs due to too
1d046793 1114 * many NAKs.
72246da4 1115 */
0fc9a1be
FB
1116 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1117 dep->direction);
1118 if (ret)
1119 return ret;
1120
aa3342c8 1121 list_add_tail(&req->list, &dep->pending_list);
72246da4 1122
1d6a3918
FB
1123 /*
1124 * If there are no pending requests and the endpoint isn't already
1125 * busy, we will just start the request straight away.
1126 *
1127 * This will save one IRQ (XFER_NOT_READY) and possibly make it a
1128 * little bit faster.
1129 */
1130 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
ba62c09d 1131 !usb_endpoint_xfer_int(dep->endpoint.desc)) {
4fae2e3e 1132 ret = __dwc3_gadget_kick_transfer(dep, 0);
a8f32817 1133 goto out;
1d6a3918
FB
1134 }
1135
72246da4 1136 /*
b511e5e7 1137 * There are a few special cases:
72246da4 1138 *
f898ae09
PZ
1139 * 1. XferNotReady with empty list of requests. We need to kick the
1140 * transfer here in that situation, otherwise we will be NAKing
1141 * forever. If we get XferNotReady before gadget driver has a
1142 * chance to queue a request, we will ACK the IRQ but won't be
1143 * able to receive the data until the next request is queued.
1144 * The following code is handling exactly that.
72246da4 1145 *
72246da4
FB
1146 */
1147 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1148 /*
1149 * If xfernotready is already elapsed and it is a case
1150 * of isoc transfer, then issue END TRANSFER, so that
1151 * you can receive xfernotready again and can have
1152 * notion of current microframe.
1153 */
1154 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
aa3342c8 1155 if (list_empty(&dep->started_list)) {
b992e681 1156 dwc3_stop_active_transfer(dwc, dep->number, true);
cdc359dd
PA
1157 dep->flags = DWC3_EP_ENABLED;
1158 }
f4a53c55
PA
1159 return 0;
1160 }
1161
4fae2e3e 1162 ret = __dwc3_gadget_kick_transfer(dep, 0);
89185916
FB
1163 if (!ret)
1164 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1165
a8f32817 1166 goto out;
b511e5e7 1167 }
72246da4 1168
b511e5e7
FB
1169 /*
1170 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1171 * kick the transfer here after queuing a request, otherwise the
1172 * core may not see the modified TRB(s).
1173 */
1174 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1175 (dep->flags & DWC3_EP_BUSY) &&
1176 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86 1177 WARN_ON_ONCE(!dep->resource_index);
4fae2e3e 1178 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
a8f32817 1179 goto out;
a0925324 1180 }
72246da4 1181
b997ada5
FB
1182 /*
1183 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1184 * right away, otherwise host will not know we have streams to be
1185 * handled.
1186 */
a8f32817 1187 if (dep->stream_capable)
4fae2e3e 1188 ret = __dwc3_gadget_kick_transfer(dep, 0);
b997ada5 1189
a8f32817
FB
1190out:
1191 if (ret && ret != -EBUSY)
ec5e795c 1192 dwc3_trace(trace_dwc3_gadget,
60cfb37a 1193 "%s: failed to kick transfers",
a8f32817
FB
1194 dep->name);
1195 if (ret == -EBUSY)
1196 ret = 0;
1197
1198 return ret;
72246da4
FB
1199}
1200
04c03d10
FB
1201static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1202 struct usb_request *request)
1203{
1204 dwc3_gadget_ep_free_request(ep, request);
1205}
1206
1207static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1208{
1209 struct dwc3_request *req;
1210 struct usb_request *request;
1211 struct usb_ep *ep = &dep->endpoint;
1212
60cfb37a 1213 dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
04c03d10
FB
1214 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1215 if (!request)
1216 return -ENOMEM;
1217
1218 request->length = 0;
1219 request->buf = dwc->zlp_buf;
1220 request->complete = __dwc3_gadget_ep_zlp_complete;
1221
1222 req = to_dwc3_request(request);
1223
1224 return __dwc3_gadget_ep_queue(dep, req);
1225}
1226
72246da4
FB
1227static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1228 gfp_t gfp_flags)
1229{
1230 struct dwc3_request *req = to_dwc3_request(request);
1231 struct dwc3_ep *dep = to_dwc3_ep(ep);
1232 struct dwc3 *dwc = dep->dwc;
1233
1234 unsigned long flags;
1235
1236 int ret;
1237
fdee4eba 1238 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1239 ret = __dwc3_gadget_ep_queue(dep, req);
04c03d10
FB
1240
1241 /*
1242 * Okay, here's the thing, if gadget driver has requested for a ZLP by
1243 * setting request->zero, instead of doing magic, we will just queue an
1244 * extra usb_request ourselves so that it gets handled the same way as
1245 * any other request.
1246 */
d9261898
JY
1247 if (ret == 0 && request->zero && request->length &&
1248 (request->length % ep->maxpacket == 0))
04c03d10
FB
1249 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1250
72246da4
FB
1251 spin_unlock_irqrestore(&dwc->lock, flags);
1252
1253 return ret;
1254}
1255
1256static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1257 struct usb_request *request)
1258{
1259 struct dwc3_request *req = to_dwc3_request(request);
1260 struct dwc3_request *r = NULL;
1261
1262 struct dwc3_ep *dep = to_dwc3_ep(ep);
1263 struct dwc3 *dwc = dep->dwc;
1264
1265 unsigned long flags;
1266 int ret = 0;
1267
2c4cbe6e
FB
1268 trace_dwc3_ep_dequeue(req);
1269
72246da4
FB
1270 spin_lock_irqsave(&dwc->lock, flags);
1271
aa3342c8 1272 list_for_each_entry(r, &dep->pending_list, list) {
72246da4
FB
1273 if (r == req)
1274 break;
1275 }
1276
1277 if (r != req) {
aa3342c8 1278 list_for_each_entry(r, &dep->started_list, list) {
72246da4
FB
1279 if (r == req)
1280 break;
1281 }
1282 if (r == req) {
1283 /* wait until it is processed */
b992e681 1284 dwc3_stop_active_transfer(dwc, dep->number, true);
e8d4e8be 1285 goto out1;
72246da4
FB
1286 }
1287 dev_err(dwc->dev, "request %p was not queued to %s\n",
1288 request, ep->name);
1289 ret = -EINVAL;
1290 goto out0;
1291 }
1292
e8d4e8be 1293out1:
72246da4
FB
1294 /* giveback the request */
1295 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1296
1297out0:
1298 spin_unlock_irqrestore(&dwc->lock, flags);
1299
1300 return ret;
1301}
1302
7a608559 1303int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
72246da4
FB
1304{
1305 struct dwc3_gadget_ep_cmd_params params;
1306 struct dwc3 *dwc = dep->dwc;
1307 int ret;
1308
5ad02fb8
FB
1309 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1310 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1311 return -EINVAL;
1312 }
1313
72246da4
FB
1314 memset(&params, 0x00, sizeof(params));
1315
1316 if (value) {
69450c4d
FB
1317 struct dwc3_trb *trb;
1318
1319 unsigned transfer_in_flight;
1320 unsigned started;
1321
1322 if (dep->number > 1)
1323 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1324 else
1325 trb = &dwc->ep0_trb[dep->trb_enqueue];
1326
1327 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
1328 started = !list_empty(&dep->started_list);
1329
1330 if (!protocol && ((dep->direction && transfer_in_flight) ||
1331 (!dep->direction && started))) {
ec5e795c 1332 dwc3_trace(trace_dwc3_gadget,
052ba52e 1333 "%s: pending request, cannot halt",
7a608559
FB
1334 dep->name);
1335 return -EAGAIN;
1336 }
1337
2cd4718d
FB
1338 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1339 &params);
72246da4 1340 if (ret)
3f89204b 1341 dev_err(dwc->dev, "failed to set STALL on %s\n",
72246da4
FB
1342 dep->name);
1343 else
1344 dep->flags |= DWC3_EP_STALL;
1345 } else {
2cd4718d 1346
50c763f8 1347 ret = dwc3_send_clear_stall_ep_cmd(dep);
72246da4 1348 if (ret)
3f89204b 1349 dev_err(dwc->dev, "failed to clear STALL on %s\n",
72246da4
FB
1350 dep->name);
1351 else
a535d81c 1352 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
72246da4 1353 }
5275455a 1354
72246da4
FB
1355 return ret;
1356}
1357
1358static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1359{
1360 struct dwc3_ep *dep = to_dwc3_ep(ep);
1361 struct dwc3 *dwc = dep->dwc;
1362
1363 unsigned long flags;
1364
1365 int ret;
1366
1367 spin_lock_irqsave(&dwc->lock, flags);
7a608559 1368 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
72246da4
FB
1369 spin_unlock_irqrestore(&dwc->lock, flags);
1370
1371 return ret;
1372}
1373
1374static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1375{
1376 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1377 struct dwc3 *dwc = dep->dwc;
1378 unsigned long flags;
95aa4e8d 1379 int ret;
72246da4 1380
249a4569 1381 spin_lock_irqsave(&dwc->lock, flags);
72246da4
FB
1382 dep->flags |= DWC3_EP_WEDGE;
1383
08f0d966 1384 if (dep->number == 0 || dep->number == 1)
95aa4e8d 1385 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
08f0d966 1386 else
7a608559 1387 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
95aa4e8d
FB
1388 spin_unlock_irqrestore(&dwc->lock, flags);
1389
1390 return ret;
72246da4
FB
1391}
1392
1393/* -------------------------------------------------------------------------- */
1394
1395static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1396 .bLength = USB_DT_ENDPOINT_SIZE,
1397 .bDescriptorType = USB_DT_ENDPOINT,
1398 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1399};
1400
1401static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1402 .enable = dwc3_gadget_ep0_enable,
1403 .disable = dwc3_gadget_ep0_disable,
1404 .alloc_request = dwc3_gadget_ep_alloc_request,
1405 .free_request = dwc3_gadget_ep_free_request,
1406 .queue = dwc3_gadget_ep0_queue,
1407 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1408 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1409 .set_wedge = dwc3_gadget_ep_set_wedge,
1410};
1411
1412static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1413 .enable = dwc3_gadget_ep_enable,
1414 .disable = dwc3_gadget_ep_disable,
1415 .alloc_request = dwc3_gadget_ep_alloc_request,
1416 .free_request = dwc3_gadget_ep_free_request,
1417 .queue = dwc3_gadget_ep_queue,
1418 .dequeue = dwc3_gadget_ep_dequeue,
1419 .set_halt = dwc3_gadget_ep_set_halt,
1420 .set_wedge = dwc3_gadget_ep_set_wedge,
1421};
1422
1423/* -------------------------------------------------------------------------- */
1424
1425static int dwc3_gadget_get_frame(struct usb_gadget *g)
1426{
1427 struct dwc3 *dwc = gadget_to_dwc(g);
1428 u32 reg;
1429
1430 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1431 return DWC3_DSTS_SOFFN(reg);
1432}
1433
218ef7b6 1434static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
72246da4 1435{
72246da4 1436 unsigned long timeout;
72246da4 1437
218ef7b6 1438 int ret;
72246da4
FB
1439 u32 reg;
1440
72246da4
FB
1441 u8 link_state;
1442 u8 speed;
1443
72246da4
FB
1444 /*
1445 * According to the Databook Remote wakeup request should
1446 * be issued only when the device is in early suspend state.
1447 *
1448 * We can check that via USB Link State bits in DSTS register.
1449 */
1450 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1451
1452 speed = reg & DWC3_DSTS_CONNECTSPD;
ee5cd41c
JY
1453 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1454 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
60cfb37a 1455 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
6b742899 1456 return 0;
72246da4
FB
1457 }
1458
1459 link_state = DWC3_DSTS_USBLNKST(reg);
1460
1461 switch (link_state) {
1462 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1463 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1464 break;
1465 default:
ec5e795c 1466 dwc3_trace(trace_dwc3_gadget,
60cfb37a 1467 "can't wakeup from '%s'",
ec5e795c 1468 dwc3_gadget_link_string(link_state));
218ef7b6 1469 return -EINVAL;
72246da4
FB
1470 }
1471
8598bde7
FB
1472 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1473 if (ret < 0) {
1474 dev_err(dwc->dev, "failed to put link in Recovery\n");
218ef7b6 1475 return ret;
8598bde7 1476 }
72246da4 1477
802fde98
PZ
1478 /* Recent versions do this automatically */
1479 if (dwc->revision < DWC3_REVISION_194A) {
1480 /* write zeroes to Link Change Request */
fcc023c7 1481 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1482 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1483 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1484 }
72246da4 1485
1d046793 1486 /* poll until Link State changes to ON */
72246da4
FB
1487 timeout = jiffies + msecs_to_jiffies(100);
1488
1d046793 1489 while (!time_after(jiffies, timeout)) {
72246da4
FB
1490 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1491
1492 /* in HS, means ON */
1493 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1494 break;
1495 }
1496
1497 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1498 dev_err(dwc->dev, "failed to send remote wakeup\n");
218ef7b6 1499 return -EINVAL;
72246da4
FB
1500 }
1501
218ef7b6
FB
1502 return 0;
1503}
1504
1505static int dwc3_gadget_wakeup(struct usb_gadget *g)
1506{
1507 struct dwc3 *dwc = gadget_to_dwc(g);
1508 unsigned long flags;
1509 int ret;
1510
1511 spin_lock_irqsave(&dwc->lock, flags);
1512 ret = __dwc3_gadget_wakeup(dwc);
72246da4
FB
1513 spin_unlock_irqrestore(&dwc->lock, flags);
1514
1515 return ret;
1516}
1517
1518static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1519 int is_selfpowered)
1520{
1521 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1522 unsigned long flags;
72246da4 1523
249a4569 1524 spin_lock_irqsave(&dwc->lock, flags);
bcdea503 1525 g->is_selfpowered = !!is_selfpowered;
249a4569 1526 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1527
1528 return 0;
1529}
1530
7b2a0368 1531static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
72246da4
FB
1532{
1533 u32 reg;
61d58242 1534 u32 timeout = 500;
72246da4 1535
fc8bb91b
FB
1536 if (pm_runtime_suspended(dwc->dev))
1537 return 0;
1538
72246da4 1539 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1540 if (is_on) {
802fde98
PZ
1541 if (dwc->revision <= DWC3_REVISION_187A) {
1542 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1543 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1544 }
1545
1546 if (dwc->revision >= DWC3_REVISION_194A)
1547 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1548 reg |= DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1549
1550 if (dwc->has_hibernation)
1551 reg |= DWC3_DCTL_KEEP_CONNECT;
1552
9fcb3bd8 1553 dwc->pullups_connected = true;
8db7ed15 1554 } else {
72246da4 1555 reg &= ~DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1556
1557 if (dwc->has_hibernation && !suspend)
1558 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1559
9fcb3bd8 1560 dwc->pullups_connected = false;
8db7ed15 1561 }
72246da4
FB
1562
1563 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1564
1565 do {
1566 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
b6d4e16e
FB
1567 reg &= DWC3_DSTS_DEVCTRLHLT;
1568 } while (--timeout && !(!is_on ^ !reg));
f2df679b
FB
1569
1570 if (!timeout)
1571 return -ETIMEDOUT;
72246da4 1572
73815280 1573 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
72246da4
FB
1574 dwc->gadget_driver
1575 ? dwc->gadget_driver->function : "no-function",
1576 is_on ? "connect" : "disconnect");
6f17f74b
PA
1577
1578 return 0;
72246da4
FB
1579}
1580
1581static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1582{
1583 struct dwc3 *dwc = gadget_to_dwc(g);
1584 unsigned long flags;
6f17f74b 1585 int ret;
72246da4
FB
1586
1587 is_on = !!is_on;
1588
1589 spin_lock_irqsave(&dwc->lock, flags);
7b2a0368 1590 ret = dwc3_gadget_run_stop(dwc, is_on, false);
72246da4
FB
1591 spin_unlock_irqrestore(&dwc->lock, flags);
1592
6f17f74b 1593 return ret;
72246da4
FB
1594}
1595
8698e2ac
FB
1596static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1597{
1598 u32 reg;
1599
1600 /* Enable all but Start and End of Frame IRQs */
1601 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1602 DWC3_DEVTEN_EVNTOVERFLOWEN |
1603 DWC3_DEVTEN_CMDCMPLTEN |
1604 DWC3_DEVTEN_ERRTICERREN |
1605 DWC3_DEVTEN_WKUPEVTEN |
1606 DWC3_DEVTEN_ULSTCNGEN |
1607 DWC3_DEVTEN_CONNECTDONEEN |
1608 DWC3_DEVTEN_USBRSTEN |
1609 DWC3_DEVTEN_DISCONNEVTEN);
1610
1611 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1612}
1613
1614static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1615{
1616 /* mask all interrupts */
1617 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1618}
1619
1620static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
b15a762f 1621static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
8698e2ac 1622
4e99472b
FB
1623/**
1624 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
1625 * dwc: pointer to our context structure
1626 *
1627 * The following looks like complex but it's actually very simple. In order to
1628 * calculate the number of packets we can burst at once on OUT transfers, we're
1629 * gonna use RxFIFO size.
1630 *
1631 * To calculate RxFIFO size we need two numbers:
1632 * MDWIDTH = size, in bits, of the internal memory bus
1633 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1634 *
1635 * Given these two numbers, the formula is simple:
1636 *
1637 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1638 *
1639 * 24 bytes is for 3x SETUP packets
1640 * 16 bytes is a clock domain crossing tolerance
1641 *
1642 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1643 */
1644static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
1645{
1646 u32 ram2_depth;
1647 u32 mdwidth;
1648 u32 nump;
1649 u32 reg;
1650
1651 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
1652 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
1653
1654 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
1655 nump = min_t(u32, nump, 16);
1656
1657 /* update NumP */
1658 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1659 reg &= ~DWC3_DCFG_NUMP_MASK;
1660 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
1661 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1662}
1663
d7be2952 1664static int __dwc3_gadget_start(struct dwc3 *dwc)
72246da4 1665{
72246da4 1666 struct dwc3_ep *dep;
72246da4
FB
1667 int ret = 0;
1668 u32 reg;
1669
72246da4
FB
1670 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1671 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1672
1673 /**
1674 * WORKAROUND: DWC3 revision < 2.20a have an issue
1675 * which would cause metastability state on Run/Stop
1676 * bit if we try to force the IP to USB2-only mode.
1677 *
1678 * Because of that, we cannot configure the IP to any
1679 * speed other than the SuperSpeed
1680 *
1681 * Refers to:
1682 *
1683 * STAR#9000525659: Clock Domain Crossing on DCTL in
1684 * USB 2.0 Mode
1685 */
f7e846f0 1686 if (dwc->revision < DWC3_REVISION_220A) {
07e7f47b 1687 reg |= DWC3_DCFG_SUPERSPEED;
f7e846f0
FB
1688 } else {
1689 switch (dwc->maximum_speed) {
1690 case USB_SPEED_LOW:
2da9ad76 1691 reg |= DWC3_DCFG_LOWSPEED;
f7e846f0
FB
1692 break;
1693 case USB_SPEED_FULL:
2da9ad76 1694 reg |= DWC3_DCFG_FULLSPEED1;
f7e846f0
FB
1695 break;
1696 case USB_SPEED_HIGH:
2da9ad76 1697 reg |= DWC3_DCFG_HIGHSPEED;
f7e846f0 1698 break;
7580862b 1699 case USB_SPEED_SUPER_PLUS:
2da9ad76 1700 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
7580862b 1701 break;
f7e846f0 1702 default:
77966eb8
JY
1703 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1704 dwc->maximum_speed);
1705 /* fall through */
1706 case USB_SPEED_SUPER:
1707 reg |= DWC3_DCFG_SUPERSPEED;
1708 break;
f7e846f0
FB
1709 }
1710 }
72246da4
FB
1711 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1712
2a58f9c1
FB
1713 /*
1714 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1715 * field instead of letting dwc3 itself calculate that automatically.
1716 *
1717 * This way, we maximize the chances that we'll be able to get several
1718 * bursts of data without going through any sort of endpoint throttling.
1719 */
1720 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1721 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1722 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1723
4e99472b
FB
1724 dwc3_gadget_setup_nump(dwc);
1725
72246da4
FB
1726 /* Start with SuperSpeed Default */
1727 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1728
1729 dep = dwc->eps[0];
265b70a7
PZ
1730 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1731 false);
72246da4
FB
1732 if (ret) {
1733 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
d7be2952 1734 goto err0;
72246da4
FB
1735 }
1736
1737 dep = dwc->eps[1];
265b70a7
PZ
1738 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1739 false);
72246da4
FB
1740 if (ret) {
1741 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
d7be2952 1742 goto err1;
72246da4
FB
1743 }
1744
1745 /* begin to receive SETUP packets */
c7fcdeb2 1746 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1747 dwc3_ep0_out_start(dwc);
1748
8698e2ac
FB
1749 dwc3_gadget_enable_irq(dwc);
1750
72246da4
FB
1751 return 0;
1752
b0d7ffd4 1753err1:
d7be2952 1754 __dwc3_gadget_ep_disable(dwc->eps[0]);
b0d7ffd4
FB
1755
1756err0:
72246da4
FB
1757 return ret;
1758}
1759
d7be2952
FB
1760static int dwc3_gadget_start(struct usb_gadget *g,
1761 struct usb_gadget_driver *driver)
72246da4
FB
1762{
1763 struct dwc3 *dwc = gadget_to_dwc(g);
1764 unsigned long flags;
d7be2952 1765 int ret = 0;
8698e2ac 1766 int irq;
72246da4 1767
d7be2952
FB
1768 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1769 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1770 IRQF_SHARED, "dwc3", dwc->ev_buf);
1771 if (ret) {
1772 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1773 irq, ret);
1774 goto err0;
1775 }
3f308d17 1776 dwc->irq_gadget = irq;
d7be2952 1777
72246da4 1778 spin_lock_irqsave(&dwc->lock, flags);
d7be2952
FB
1779 if (dwc->gadget_driver) {
1780 dev_err(dwc->dev, "%s is already bound to %s\n",
1781 dwc->gadget.name,
1782 dwc->gadget_driver->driver.name);
1783 ret = -EBUSY;
1784 goto err1;
1785 }
1786
1787 dwc->gadget_driver = driver;
1788
fc8bb91b
FB
1789 if (pm_runtime_active(dwc->dev))
1790 __dwc3_gadget_start(dwc);
1791
d7be2952
FB
1792 spin_unlock_irqrestore(&dwc->lock, flags);
1793
1794 return 0;
1795
1796err1:
1797 spin_unlock_irqrestore(&dwc->lock, flags);
1798 free_irq(irq, dwc);
1799
1800err0:
1801 return ret;
1802}
72246da4 1803
d7be2952
FB
1804static void __dwc3_gadget_stop(struct dwc3 *dwc)
1805{
da1410be
BW
1806 if (pm_runtime_suspended(dwc->dev))
1807 return;
1808
8698e2ac 1809 dwc3_gadget_disable_irq(dwc);
72246da4
FB
1810 __dwc3_gadget_ep_disable(dwc->eps[0]);
1811 __dwc3_gadget_ep_disable(dwc->eps[1]);
d7be2952 1812}
72246da4 1813
d7be2952
FB
1814static int dwc3_gadget_stop(struct usb_gadget *g)
1815{
1816 struct dwc3 *dwc = gadget_to_dwc(g);
1817 unsigned long flags;
72246da4 1818
d7be2952
FB
1819 spin_lock_irqsave(&dwc->lock, flags);
1820 __dwc3_gadget_stop(dwc);
1821 dwc->gadget_driver = NULL;
72246da4
FB
1822 spin_unlock_irqrestore(&dwc->lock, flags);
1823
3f308d17 1824 free_irq(dwc->irq_gadget, dwc->ev_buf);
b0d7ffd4 1825
72246da4
FB
1826 return 0;
1827}
802fde98 1828
72246da4
FB
1829static const struct usb_gadget_ops dwc3_gadget_ops = {
1830 .get_frame = dwc3_gadget_get_frame,
1831 .wakeup = dwc3_gadget_wakeup,
1832 .set_selfpowered = dwc3_gadget_set_selfpowered,
1833 .pullup = dwc3_gadget_pullup,
1834 .udc_start = dwc3_gadget_start,
1835 .udc_stop = dwc3_gadget_stop,
1836};
1837
1838/* -------------------------------------------------------------------------- */
1839
6a1e3ef4
FB
1840static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1841 u8 num, u32 direction)
72246da4
FB
1842{
1843 struct dwc3_ep *dep;
6a1e3ef4 1844 u8 i;
72246da4 1845
6a1e3ef4 1846 for (i = 0; i < num; i++) {
d07fa665 1847 u8 epnum = (i << 1) | (direction ? 1 : 0);
72246da4 1848
72246da4 1849 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
734d5a53 1850 if (!dep)
72246da4 1851 return -ENOMEM;
72246da4
FB
1852
1853 dep->dwc = dwc;
1854 dep->number = epnum;
9aa62ae4 1855 dep->direction = !!direction;
2eb88016 1856 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
72246da4
FB
1857 dwc->eps[epnum] = dep;
1858
1859 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1860 (epnum & 1) ? "in" : "out");
6a1e3ef4 1861
72246da4 1862 dep->endpoint.name = dep->name;
74674cbf 1863 spin_lock_init(&dep->lock);
72246da4 1864
73815280 1865 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
653df35e 1866
72246da4 1867 if (epnum == 0 || epnum == 1) {
e117e742 1868 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
6048e4c6 1869 dep->endpoint.maxburst = 1;
72246da4
FB
1870 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1871 if (!epnum)
1872 dwc->gadget.ep0 = &dep->endpoint;
1873 } else {
1874 int ret;
1875
e117e742 1876 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
12d36c16 1877 dep->endpoint.max_streams = 15;
72246da4
FB
1878 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1879 list_add_tail(&dep->endpoint.ep_list,
1880 &dwc->gadget.ep_list);
1881
1882 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1883 if (ret)
72246da4 1884 return ret;
72246da4 1885 }
25b8ff68 1886
a474d3b7
RB
1887 if (epnum == 0 || epnum == 1) {
1888 dep->endpoint.caps.type_control = true;
1889 } else {
1890 dep->endpoint.caps.type_iso = true;
1891 dep->endpoint.caps.type_bulk = true;
1892 dep->endpoint.caps.type_int = true;
1893 }
1894
1895 dep->endpoint.caps.dir_in = !!direction;
1896 dep->endpoint.caps.dir_out = !direction;
1897
aa3342c8
FB
1898 INIT_LIST_HEAD(&dep->pending_list);
1899 INIT_LIST_HEAD(&dep->started_list);
72246da4
FB
1900 }
1901
1902 return 0;
1903}
1904
6a1e3ef4
FB
1905static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1906{
1907 int ret;
1908
1909 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1910
1911 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1912 if (ret < 0) {
73815280
FB
1913 dwc3_trace(trace_dwc3_gadget,
1914 "failed to allocate OUT endpoints");
6a1e3ef4
FB
1915 return ret;
1916 }
1917
1918 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1919 if (ret < 0) {
73815280
FB
1920 dwc3_trace(trace_dwc3_gadget,
1921 "failed to allocate IN endpoints");
6a1e3ef4
FB
1922 return ret;
1923 }
1924
1925 return 0;
1926}
1927
72246da4
FB
1928static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1929{
1930 struct dwc3_ep *dep;
1931 u8 epnum;
1932
1933 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1934 dep = dwc->eps[epnum];
6a1e3ef4
FB
1935 if (!dep)
1936 continue;
5bf8fae3
GC
1937 /*
1938 * Physical endpoints 0 and 1 are special; they form the
1939 * bi-directional USB endpoint 0.
1940 *
1941 * For those two physical endpoints, we don't allocate a TRB
1942 * pool nor do we add them the endpoints list. Due to that, we
1943 * shouldn't do these two operations otherwise we would end up
1944 * with all sorts of bugs when removing dwc3.ko.
1945 */
1946 if (epnum != 0 && epnum != 1) {
1947 dwc3_free_trb_pool(dep);
72246da4 1948 list_del(&dep->endpoint.ep_list);
5bf8fae3 1949 }
72246da4
FB
1950
1951 kfree(dep);
1952 }
1953}
1954
72246da4 1955/* -------------------------------------------------------------------------- */
e5caff68 1956
e5ba5ec8
PA
1957static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1958 struct dwc3_request *req, struct dwc3_trb *trb,
72246da4
FB
1959 const struct dwc3_event_depevt *event, int status)
1960{
72246da4
FB
1961 unsigned int count;
1962 unsigned int s_pkt = 0;
d6d6ec7b 1963 unsigned int trb_status;
72246da4 1964
68d34c8a 1965 dep->queued_requests--;
2c4cbe6e
FB
1966 trace_dwc3_complete_trb(dep, trb);
1967
e5ba5ec8
PA
1968 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1969 /*
1970 * We continue despite the error. There is not much we
1971 * can do. If we don't clean it up we loop forever. If
1972 * we skip the TRB then it gets overwritten after a
1973 * while since we use them in a ring buffer. A BUG()
1974 * would help. Lets hope that if this occurs, someone
1975 * fixes the root cause instead of looking away :)
1976 */
1977 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1978 dep->name, trb);
1979 count = trb->size & DWC3_TRB_SIZE_MASK;
1980
1981 if (dep->direction) {
1982 if (count) {
1983 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1984 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
ec5e795c 1985 dwc3_trace(trace_dwc3_gadget,
60cfb37a 1986 "%s: incomplete IN transfer",
e5ba5ec8
PA
1987 dep->name);
1988 /*
1989 * If missed isoc occurred and there is
1990 * no request queued then issue END
1991 * TRANSFER, so that core generates
1992 * next xfernotready and we will issue
1993 * a fresh START TRANSFER.
1994 * If there are still queued request
1995 * then wait, do not issue either END
1996 * or UPDATE TRANSFER, just attach next
aa3342c8 1997 * request in pending_list during
e5ba5ec8
PA
1998 * giveback.If any future queued request
1999 * is successfully transferred then we
2000 * will issue UPDATE TRANSFER for all
aa3342c8 2001 * request in the pending_list.
e5ba5ec8
PA
2002 */
2003 dep->flags |= DWC3_EP_MISSED_ISOC;
2004 } else {
2005 dev_err(dwc->dev, "incomplete IN transfer %s\n",
2006 dep->name);
2007 status = -ECONNRESET;
2008 }
2009 } else {
2010 dep->flags &= ~DWC3_EP_MISSED_ISOC;
2011 }
2012 } else {
2013 if (count && (event->status & DEPEVT_STATUS_SHORT))
2014 s_pkt = 1;
2015 }
2016
2017 /*
2018 * We assume here we will always receive the entire data block
2019 * which we should receive. Meaning, if we program RX to
2020 * receive 4K but we receive only 2K, we assume that's all we
2021 * should receive and we simply bounce the request back to the
2022 * gadget driver for further processing.
2023 */
2024 req->request.actual += req->request.length - count;
2025 if (s_pkt)
2026 return 1;
2027 if ((event->status & DEPEVT_STATUS_LST) &&
2028 (trb->ctrl & (DWC3_TRB_CTRL_LST |
2029 DWC3_TRB_CTRL_HWO)))
2030 return 1;
2031 if ((event->status & DEPEVT_STATUS_IOC) &&
2032 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2033 return 1;
2034 return 0;
2035}
2036
2037static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2038 const struct dwc3_event_depevt *event, int status)
2039{
2040 struct dwc3_request *req;
2041 struct dwc3_trb *trb;
2042 unsigned int slot;
2043 unsigned int i;
2044 int ret;
2045
72246da4 2046 do {
aa3342c8 2047 req = next_request(&dep->started_list);
ac7bdcc1 2048 if (WARN_ON_ONCE(!req))
d115d705 2049 return 1;
ac7bdcc1 2050
d115d705
VS
2051 i = 0;
2052 do {
53fd8818 2053 slot = req->first_trb_index + i;
36b68aae 2054 if (slot == DWC3_TRB_NUM - 1)
d115d705
VS
2055 slot++;
2056 slot %= DWC3_TRB_NUM;
2057 trb = &dep->trb_pool[slot];
2058
2059 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2060 event, status);
2061 if (ret)
2062 break;
2063 } while (++i < req->request.num_mapped_sgs);
2064
2065 dwc3_gadget_giveback(dep, req, status);
e5ba5ec8
PA
2066
2067 if (ret)
72246da4 2068 break;
d115d705 2069 } while (1);
72246da4 2070
4cb42217
FB
2071 /*
2072 * Our endpoint might get disabled by another thread during
2073 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2074 * early on so DWC3_EP_BUSY flag gets cleared
2075 */
2076 if (!dep->endpoint.desc)
2077 return 1;
2078
cdc359dd 2079 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
aa3342c8
FB
2080 list_empty(&dep->started_list)) {
2081 if (list_empty(&dep->pending_list)) {
cdc359dd
PA
2082 /*
2083 * If there is no entry in request list then do
2084 * not issue END TRANSFER now. Just set PENDING
2085 * flag, so that END TRANSFER is issued when an
2086 * entry is added into request list.
2087 */
2088 dep->flags = DWC3_EP_PENDING_REQUEST;
2089 } else {
b992e681 2090 dwc3_stop_active_transfer(dwc, dep->number, true);
cdc359dd
PA
2091 dep->flags = DWC3_EP_ENABLED;
2092 }
7efea86c
PA
2093 return 1;
2094 }
2095
9cad39fe
KL
2096 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
2097 if ((event->status & DEPEVT_STATUS_IOC) &&
2098 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2099 return 0;
72246da4
FB
2100 return 1;
2101}
2102
2103static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
029d97ff 2104 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
72246da4
FB
2105{
2106 unsigned status = 0;
2107 int clean_busy;
e18b7975
FB
2108 u32 is_xfer_complete;
2109
2110 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
72246da4
FB
2111
2112 if (event->status & DEPEVT_STATUS_BUSERR)
2113 status = -ECONNRESET;
2114
1d046793 2115 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
4cb42217 2116 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
e18b7975 2117 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
72246da4 2118 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
2119
2120 /*
2121 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2122 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2123 */
2124 if (dwc->revision < DWC3_REVISION_183A) {
2125 u32 reg;
2126 int i;
2127
2128 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 2129 dep = dwc->eps[i];
fae2b904
FB
2130
2131 if (!(dep->flags & DWC3_EP_ENABLED))
2132 continue;
2133
aa3342c8 2134 if (!list_empty(&dep->started_list))
fae2b904
FB
2135 return;
2136 }
2137
2138 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2139 reg |= dwc->u1u2;
2140 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2141
2142 dwc->u1u2 = 0;
2143 }
8a1a9c9e 2144
4cb42217
FB
2145 /*
2146 * Our endpoint might get disabled by another thread during
2147 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2148 * early on so DWC3_EP_BUSY flag gets cleared
2149 */
2150 if (!dep->endpoint.desc)
2151 return;
2152
e6e709b7 2153 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8a1a9c9e
FB
2154 int ret;
2155
4fae2e3e 2156 ret = __dwc3_gadget_kick_transfer(dep, 0);
8a1a9c9e
FB
2157 if (!ret || ret == -EBUSY)
2158 return;
2159 }
72246da4
FB
2160}
2161
72246da4
FB
2162static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2163 const struct dwc3_event_depevt *event)
2164{
2165 struct dwc3_ep *dep;
2166 u8 epnum = event->endpoint_number;
2167
2168 dep = dwc->eps[epnum];
2169
3336abb5
FB
2170 if (!(dep->flags & DWC3_EP_ENABLED))
2171 return;
2172
72246da4
FB
2173 if (epnum == 0 || epnum == 1) {
2174 dwc3_ep0_interrupt(dwc, event);
2175 return;
2176 }
2177
2178 switch (event->endpoint_event) {
2179 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 2180 dep->resource_index = 0;
c2df85ca 2181
16e78db7 2182 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
ec5e795c 2183 dwc3_trace(trace_dwc3_gadget,
60cfb37a 2184 "%s is an Isochronous endpoint",
72246da4
FB
2185 dep->name);
2186 return;
2187 }
2188
029d97ff 2189 dwc3_endpoint_transfer_complete(dwc, dep, event);
72246da4
FB
2190 break;
2191 case DWC3_DEPEVT_XFERINPROGRESS:
029d97ff 2192 dwc3_endpoint_transfer_complete(dwc, dep, event);
72246da4
FB
2193 break;
2194 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 2195 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
2196 dwc3_gadget_start_isoc(dwc, dep, event);
2197 } else {
6bb4fe12 2198 int active;
72246da4
FB
2199 int ret;
2200
6bb4fe12
FB
2201 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
2202
73815280 2203 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
6bb4fe12 2204 dep->name, active ? "Transfer Active"
72246da4
FB
2205 : "Transfer Not Active");
2206
4fae2e3e 2207 ret = __dwc3_gadget_kick_transfer(dep, 0);
72246da4
FB
2208 if (!ret || ret == -EBUSY)
2209 return;
2210
ec5e795c 2211 dwc3_trace(trace_dwc3_gadget,
60cfb37a 2212 "%s: failed to kick transfers",
72246da4
FB
2213 dep->name);
2214 }
2215
879631aa
FB
2216 break;
2217 case DWC3_DEPEVT_STREAMEVT:
16e78db7 2218 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
2219 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2220 dep->name);
2221 return;
2222 }
2223
2224 switch (event->status) {
2225 case DEPEVT_STREAMEVT_FOUND:
73815280
FB
2226 dwc3_trace(trace_dwc3_gadget,
2227 "Stream %d found and started",
879631aa
FB
2228 event->parameters);
2229
2230 break;
2231 case DEPEVT_STREAMEVT_NOTFOUND:
2232 /* FALLTHROUGH */
2233 default:
ec5e795c 2234 dwc3_trace(trace_dwc3_gadget,
60cfb37a 2235 "unable to find suitable stream");
879631aa 2236 }
72246da4
FB
2237 break;
2238 case DWC3_DEPEVT_RXTXFIFOEVT:
60cfb37a 2239 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
72246da4 2240 break;
72246da4 2241 case DWC3_DEPEVT_EPCMDCMPLT:
73815280 2242 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
72246da4
FB
2243 break;
2244 }
2245}
2246
2247static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2248{
2249 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2250 spin_unlock(&dwc->lock);
2251 dwc->gadget_driver->disconnect(&dwc->gadget);
2252 spin_lock(&dwc->lock);
2253 }
2254}
2255
bc5ba2e0
FB
2256static void dwc3_suspend_gadget(struct dwc3 *dwc)
2257{
73a30bfc 2258 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
bc5ba2e0
FB
2259 spin_unlock(&dwc->lock);
2260 dwc->gadget_driver->suspend(&dwc->gadget);
2261 spin_lock(&dwc->lock);
2262 }
2263}
2264
2265static void dwc3_resume_gadget(struct dwc3 *dwc)
2266{
73a30bfc 2267 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
bc5ba2e0
FB
2268 spin_unlock(&dwc->lock);
2269 dwc->gadget_driver->resume(&dwc->gadget);
5c7b3b02 2270 spin_lock(&dwc->lock);
8e74475b
FB
2271 }
2272}
2273
2274static void dwc3_reset_gadget(struct dwc3 *dwc)
2275{
2276 if (!dwc->gadget_driver)
2277 return;
2278
2279 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2280 spin_unlock(&dwc->lock);
2281 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
bc5ba2e0
FB
2282 spin_lock(&dwc->lock);
2283 }
2284}
2285
b992e681 2286static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
72246da4
FB
2287{
2288 struct dwc3_ep *dep;
2289 struct dwc3_gadget_ep_cmd_params params;
2290 u32 cmd;
2291 int ret;
2292
2293 dep = dwc->eps[epnum];
2294
b4996a86 2295 if (!dep->resource_index)
3daf74d7
PA
2296 return;
2297
57911504
PA
2298 /*
2299 * NOTICE: We are violating what the Databook says about the
2300 * EndTransfer command. Ideally we would _always_ wait for the
2301 * EndTransfer Command Completion IRQ, but that's causing too
2302 * much trouble synchronizing between us and gadget driver.
2303 *
2304 * We have discussed this with the IP Provider and it was
2305 * suggested to giveback all requests here, but give HW some
2306 * extra time to synchronize with the interconnect. We're using
dc93b41a 2307 * an arbitrary 100us delay for that.
57911504
PA
2308 *
2309 * Note also that a similar handling was tested by Synopsys
2310 * (thanks a lot Paul) and nothing bad has come out of it.
2311 * In short, what we're doing is:
2312 *
2313 * - Issue EndTransfer WITH CMDIOC bit set
2314 * - Wait 100us
2315 */
2316
3daf74d7 2317 cmd = DWC3_DEPCMD_ENDTRANSFER;
b992e681
PZ
2318 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2319 cmd |= DWC3_DEPCMD_CMDIOC;
b4996a86 2320 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7 2321 memset(&params, 0, sizeof(params));
2cd4718d 2322 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
3daf74d7 2323 WARN_ON_ONCE(ret);
b4996a86 2324 dep->resource_index = 0;
041d81f4 2325 dep->flags &= ~DWC3_EP_BUSY;
57911504 2326 udelay(100);
72246da4
FB
2327}
2328
2329static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2330{
2331 u32 epnum;
2332
2333 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2334 struct dwc3_ep *dep;
2335
2336 dep = dwc->eps[epnum];
6a1e3ef4
FB
2337 if (!dep)
2338 continue;
2339
72246da4
FB
2340 if (!(dep->flags & DWC3_EP_ENABLED))
2341 continue;
2342
624407f9 2343 dwc3_remove_requests(dwc, dep);
72246da4
FB
2344 }
2345}
2346
2347static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2348{
2349 u32 epnum;
2350
2351 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2352 struct dwc3_ep *dep;
72246da4
FB
2353 int ret;
2354
2355 dep = dwc->eps[epnum];
6a1e3ef4
FB
2356 if (!dep)
2357 continue;
72246da4
FB
2358
2359 if (!(dep->flags & DWC3_EP_STALL))
2360 continue;
2361
2362 dep->flags &= ~DWC3_EP_STALL;
2363
50c763f8 2364 ret = dwc3_send_clear_stall_ep_cmd(dep);
72246da4
FB
2365 WARN_ON_ONCE(ret);
2366 }
2367}
2368
2369static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2370{
c4430a26
FB
2371 int reg;
2372
72246da4
FB
2373 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2374 reg &= ~DWC3_DCTL_INITU1ENA;
2375 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2376
2377 reg &= ~DWC3_DCTL_INITU2ENA;
2378 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 2379
72246da4
FB
2380 dwc3_disconnect_gadget(dwc);
2381
2382 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 2383 dwc->setup_packet_pending = false;
06a374ed 2384 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
fc8bb91b
FB
2385
2386 dwc->connected = false;
72246da4
FB
2387}
2388
72246da4
FB
2389static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2390{
2391 u32 reg;
2392
fc8bb91b
FB
2393 dwc->connected = true;
2394
df62df56
FB
2395 /*
2396 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2397 * would cause a missing Disconnect Event if there's a
2398 * pending Setup Packet in the FIFO.
2399 *
2400 * There's no suggested workaround on the official Bug
2401 * report, which states that "unless the driver/application
2402 * is doing any special handling of a disconnect event,
2403 * there is no functional issue".
2404 *
2405 * Unfortunately, it turns out that we _do_ some special
2406 * handling of a disconnect event, namely complete all
2407 * pending transfers, notify gadget driver of the
2408 * disconnection, and so on.
2409 *
2410 * Our suggested workaround is to follow the Disconnect
2411 * Event steps here, instead, based on a setup_packet_pending
b5d335e5
FB
2412 * flag. Such flag gets set whenever we have a SETUP_PENDING
2413 * status for EP0 TRBs and gets cleared on XferComplete for the
df62df56
FB
2414 * same endpoint.
2415 *
2416 * Refers to:
2417 *
2418 * STAR#9000466709: RTL: Device : Disconnect event not
2419 * generated if setup packet pending in FIFO
2420 */
2421 if (dwc->revision < DWC3_REVISION_188A) {
2422 if (dwc->setup_packet_pending)
2423 dwc3_gadget_disconnect_interrupt(dwc);
2424 }
2425
8e74475b 2426 dwc3_reset_gadget(dwc);
72246da4
FB
2427
2428 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2429 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2430 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2431 dwc->test_mode = false;
72246da4
FB
2432
2433 dwc3_stop_active_transfers(dwc);
2434 dwc3_clear_stall_all_ep(dwc);
2435
2436 /* Reset device address to zero */
2437 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2438 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2439 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2440}
2441
2442static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2443{
2444 u32 reg;
2445 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2446
2447 /*
2448 * We change the clock only at SS but I dunno why I would want to do
2449 * this. Maybe it becomes part of the power saving plan.
2450 */
2451
ee5cd41c
JY
2452 if ((speed != DWC3_DSTS_SUPERSPEED) &&
2453 (speed != DWC3_DSTS_SUPERSPEED_PLUS))
72246da4
FB
2454 return;
2455
2456 /*
2457 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2458 * each time on Connect Done.
2459 */
2460 if (!usb30_clock)
2461 return;
2462
2463 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2464 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2465 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2466}
2467
72246da4
FB
2468static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2469{
72246da4
FB
2470 struct dwc3_ep *dep;
2471 int ret;
2472 u32 reg;
2473 u8 speed;
2474
72246da4
FB
2475 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2476 speed = reg & DWC3_DSTS_CONNECTSPD;
2477 dwc->speed = speed;
2478
2479 dwc3_update_ram_clk_sel(dwc, speed);
2480
2481 switch (speed) {
2da9ad76 2482 case DWC3_DSTS_SUPERSPEED_PLUS:
7580862b
JY
2483 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2484 dwc->gadget.ep0->maxpacket = 512;
2485 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2486 break;
2da9ad76 2487 case DWC3_DSTS_SUPERSPEED:
05870c5b
FB
2488 /*
2489 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2490 * would cause a missing USB3 Reset event.
2491 *
2492 * In such situations, we should force a USB3 Reset
2493 * event by calling our dwc3_gadget_reset_interrupt()
2494 * routine.
2495 *
2496 * Refers to:
2497 *
2498 * STAR#9000483510: RTL: SS : USB3 reset event may
2499 * not be generated always when the link enters poll
2500 */
2501 if (dwc->revision < DWC3_REVISION_190A)
2502 dwc3_gadget_reset_interrupt(dwc);
2503
72246da4
FB
2504 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2505 dwc->gadget.ep0->maxpacket = 512;
2506 dwc->gadget.speed = USB_SPEED_SUPER;
2507 break;
2da9ad76 2508 case DWC3_DSTS_HIGHSPEED:
72246da4
FB
2509 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2510 dwc->gadget.ep0->maxpacket = 64;
2511 dwc->gadget.speed = USB_SPEED_HIGH;
2512 break;
2da9ad76
JY
2513 case DWC3_DSTS_FULLSPEED2:
2514 case DWC3_DSTS_FULLSPEED1:
72246da4
FB
2515 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2516 dwc->gadget.ep0->maxpacket = 64;
2517 dwc->gadget.speed = USB_SPEED_FULL;
2518 break;
2da9ad76 2519 case DWC3_DSTS_LOWSPEED:
72246da4
FB
2520 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2521 dwc->gadget.ep0->maxpacket = 8;
2522 dwc->gadget.speed = USB_SPEED_LOW;
2523 break;
2524 }
2525
2b758350
PA
2526 /* Enable USB2 LPM Capability */
2527
ee5cd41c 2528 if ((dwc->revision > DWC3_REVISION_194A) &&
2da9ad76
JY
2529 (speed != DWC3_DSTS_SUPERSPEED) &&
2530 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
2b758350
PA
2531 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2532 reg |= DWC3_DCFG_LPM_CAP;
2533 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2534
2535 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2536 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2537
460d098c 2538 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2b758350 2539
80caf7d2
HR
2540 /*
2541 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2542 * DCFG.LPMCap is set, core responses with an ACK and the
2543 * BESL value in the LPM token is less than or equal to LPM
2544 * NYET threshold.
2545 */
2546 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2547 && dwc->has_lpm_erratum,
2548 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2549
2550 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2551 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2552
356363bf
FB
2553 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2554 } else {
2555 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2556 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2b758350
PA
2557 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2558 }
2559
72246da4 2560 dep = dwc->eps[0];
265b70a7
PZ
2561 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2562 false);
72246da4
FB
2563 if (ret) {
2564 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2565 return;
2566 }
2567
2568 dep = dwc->eps[1];
265b70a7
PZ
2569 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2570 false);
72246da4
FB
2571 if (ret) {
2572 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2573 return;
2574 }
2575
2576 /*
2577 * Configure PHY via GUSB3PIPECTLn if required.
2578 *
2579 * Update GTXFIFOSIZn
2580 *
2581 * In both cases reset values should be sufficient.
2582 */
2583}
2584
2585static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2586{
72246da4
FB
2587 /*
2588 * TODO take core out of low power mode when that's
2589 * implemented.
2590 */
2591
ad14d4e0
JL
2592 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2593 spin_unlock(&dwc->lock);
2594 dwc->gadget_driver->resume(&dwc->gadget);
2595 spin_lock(&dwc->lock);
2596 }
72246da4
FB
2597}
2598
2599static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2600 unsigned int evtinfo)
2601{
fae2b904 2602 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
0b0cc1cd
FB
2603 unsigned int pwropt;
2604
2605 /*
2606 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2607 * Hibernation mode enabled which would show up when device detects
2608 * host-initiated U3 exit.
2609 *
2610 * In that case, device will generate a Link State Change Interrupt
2611 * from U3 to RESUME which is only necessary if Hibernation is
2612 * configured in.
2613 *
2614 * There are no functional changes due to such spurious event and we
2615 * just need to ignore it.
2616 *
2617 * Refers to:
2618 *
2619 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2620 * operational mode
2621 */
2622 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2623 if ((dwc->revision < DWC3_REVISION_250A) &&
2624 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2625 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2626 (next == DWC3_LINK_STATE_RESUME)) {
73815280
FB
2627 dwc3_trace(trace_dwc3_gadget,
2628 "ignoring transition U3 -> Resume");
0b0cc1cd
FB
2629 return;
2630 }
2631 }
fae2b904
FB
2632
2633 /*
2634 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2635 * on the link partner, the USB session might do multiple entry/exit
2636 * of low power states before a transfer takes place.
2637 *
2638 * Due to this problem, we might experience lower throughput. The
2639 * suggested workaround is to disable DCTL[12:9] bits if we're
2640 * transitioning from U1/U2 to U0 and enable those bits again
2641 * after a transfer completes and there are no pending transfers
2642 * on any of the enabled endpoints.
2643 *
2644 * This is the first half of that workaround.
2645 *
2646 * Refers to:
2647 *
2648 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2649 * core send LGO_Ux entering U0
2650 */
2651 if (dwc->revision < DWC3_REVISION_183A) {
2652 if (next == DWC3_LINK_STATE_U0) {
2653 u32 u1u2;
2654 u32 reg;
2655
2656 switch (dwc->link_state) {
2657 case DWC3_LINK_STATE_U1:
2658 case DWC3_LINK_STATE_U2:
2659 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2660 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2661 | DWC3_DCTL_ACCEPTU2ENA
2662 | DWC3_DCTL_INITU1ENA
2663 | DWC3_DCTL_ACCEPTU1ENA);
2664
2665 if (!dwc->u1u2)
2666 dwc->u1u2 = reg & u1u2;
2667
2668 reg &= ~u1u2;
2669
2670 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2671 break;
2672 default:
2673 /* do nothing */
2674 break;
2675 }
2676 }
2677 }
2678
bc5ba2e0
FB
2679 switch (next) {
2680 case DWC3_LINK_STATE_U1:
2681 if (dwc->speed == USB_SPEED_SUPER)
2682 dwc3_suspend_gadget(dwc);
2683 break;
2684 case DWC3_LINK_STATE_U2:
2685 case DWC3_LINK_STATE_U3:
2686 dwc3_suspend_gadget(dwc);
2687 break;
2688 case DWC3_LINK_STATE_RESUME:
2689 dwc3_resume_gadget(dwc);
2690 break;
2691 default:
2692 /* do nothing */
2693 break;
2694 }
2695
e57ebc1d 2696 dwc->link_state = next;
72246da4
FB
2697}
2698
e1dadd3b
FB
2699static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2700 unsigned int evtinfo)
2701{
2702 unsigned int is_ss = evtinfo & BIT(4);
2703
2704 /**
2705 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2706 * have a known issue which can cause USB CV TD.9.23 to fail
2707 * randomly.
2708 *
2709 * Because of this issue, core could generate bogus hibernation
2710 * events which SW needs to ignore.
2711 *
2712 * Refers to:
2713 *
2714 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2715 * Device Fallback from SuperSpeed
2716 */
2717 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2718 return;
2719
2720 /* enter hibernation here */
2721}
2722
72246da4
FB
2723static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2724 const struct dwc3_event_devt *event)
2725{
2726 switch (event->type) {
2727 case DWC3_DEVICE_EVENT_DISCONNECT:
2728 dwc3_gadget_disconnect_interrupt(dwc);
2729 break;
2730 case DWC3_DEVICE_EVENT_RESET:
2731 dwc3_gadget_reset_interrupt(dwc);
2732 break;
2733 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2734 dwc3_gadget_conndone_interrupt(dwc);
2735 break;
2736 case DWC3_DEVICE_EVENT_WAKEUP:
2737 dwc3_gadget_wakeup_interrupt(dwc);
2738 break;
e1dadd3b
FB
2739 case DWC3_DEVICE_EVENT_HIBER_REQ:
2740 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2741 "unexpected hibernation event\n"))
2742 break;
2743
2744 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2745 break;
72246da4
FB
2746 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2747 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2748 break;
2749 case DWC3_DEVICE_EVENT_EOPF:
73815280 2750 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
72246da4
FB
2751 break;
2752 case DWC3_DEVICE_EVENT_SOF:
73815280 2753 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
72246da4
FB
2754 break;
2755 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
73815280 2756 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
72246da4
FB
2757 break;
2758 case DWC3_DEVICE_EVENT_CMD_CMPL:
73815280 2759 dwc3_trace(trace_dwc3_gadget, "Command Complete");
72246da4
FB
2760 break;
2761 case DWC3_DEVICE_EVENT_OVERFLOW:
73815280 2762 dwc3_trace(trace_dwc3_gadget, "Overflow");
72246da4
FB
2763 break;
2764 default:
e9f2aa87 2765 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
72246da4
FB
2766 }
2767}
2768
2769static void dwc3_process_event_entry(struct dwc3 *dwc,
2770 const union dwc3_event *event)
2771{
2c4cbe6e
FB
2772 trace_dwc3_event(event->raw);
2773
72246da4
FB
2774 /* Endpoint IRQ, handle it and return early */
2775 if (event->type.is_devspec == 0) {
2776 /* depevt */
2777 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2778 }
2779
2780 switch (event->type.type) {
2781 case DWC3_EVENT_TYPE_DEV:
2782 dwc3_gadget_interrupt(dwc, &event->devt);
2783 break;
2784 /* REVISIT what to do with Carkit and I2C events ? */
2785 default:
2786 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2787 }
2788}
2789
dea520a4 2790static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
b15a762f 2791{
dea520a4 2792 struct dwc3 *dwc = evt->dwc;
b15a762f 2793 irqreturn_t ret = IRQ_NONE;
f42f2447 2794 int left;
e8adfc30 2795 u32 reg;
b15a762f 2796
f42f2447 2797 left = evt->count;
b15a762f 2798
f42f2447
FB
2799 if (!(evt->flags & DWC3_EVENT_PENDING))
2800 return IRQ_NONE;
b15a762f 2801
f42f2447
FB
2802 while (left > 0) {
2803 union dwc3_event event;
b15a762f 2804
f42f2447 2805 event.raw = *(u32 *) (evt->buf + evt->lpos);
b15a762f 2806
f42f2447 2807 dwc3_process_event_entry(dwc, &event);
b15a762f 2808
f42f2447
FB
2809 /*
2810 * FIXME we wrap around correctly to the next entry as
2811 * almost all entries are 4 bytes in size. There is one
2812 * entry which has 12 bytes which is a regular entry
2813 * followed by 8 bytes data. ATM I don't know how
2814 * things are organized if we get next to the a
2815 * boundary so I worry about that once we try to handle
2816 * that.
2817 */
2818 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2819 left -= 4;
b15a762f 2820
660e9bde 2821 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
f42f2447 2822 }
b15a762f 2823
f42f2447
FB
2824 evt->count = 0;
2825 evt->flags &= ~DWC3_EVENT_PENDING;
2826 ret = IRQ_HANDLED;
b15a762f 2827
f42f2447 2828 /* Unmask interrupt */
660e9bde 2829 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
f42f2447 2830 reg &= ~DWC3_GEVNTSIZ_INTMASK;
660e9bde 2831 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
b15a762f 2832
f42f2447
FB
2833 return ret;
2834}
e8adfc30 2835
dea520a4 2836static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
f42f2447 2837{
dea520a4
FB
2838 struct dwc3_event_buffer *evt = _evt;
2839 struct dwc3 *dwc = evt->dwc;
e5f68b4a 2840 unsigned long flags;
f42f2447 2841 irqreturn_t ret = IRQ_NONE;
f42f2447 2842
e5f68b4a 2843 spin_lock_irqsave(&dwc->lock, flags);
dea520a4 2844 ret = dwc3_process_event_buf(evt);
e5f68b4a 2845 spin_unlock_irqrestore(&dwc->lock, flags);
b15a762f
FB
2846
2847 return ret;
2848}
2849
dea520a4 2850static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
72246da4 2851{
dea520a4 2852 struct dwc3 *dwc = evt->dwc;
72246da4 2853 u32 count;
e8adfc30 2854 u32 reg;
72246da4 2855
fc8bb91b
FB
2856 if (pm_runtime_suspended(dwc->dev)) {
2857 pm_runtime_get(dwc->dev);
2858 disable_irq_nosync(dwc->irq_gadget);
2859 dwc->pending_events = true;
2860 return IRQ_HANDLED;
2861 }
2862
660e9bde 2863 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
72246da4
FB
2864 count &= DWC3_GEVNTCOUNT_MASK;
2865 if (!count)
2866 return IRQ_NONE;
2867
b15a762f
FB
2868 evt->count = count;
2869 evt->flags |= DWC3_EVENT_PENDING;
72246da4 2870
e8adfc30 2871 /* Mask interrupt */
660e9bde 2872 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
e8adfc30 2873 reg |= DWC3_GEVNTSIZ_INTMASK;
660e9bde 2874 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
e8adfc30 2875
b15a762f 2876 return IRQ_WAKE_THREAD;
72246da4
FB
2877}
2878
dea520a4 2879static irqreturn_t dwc3_interrupt(int irq, void *_evt)
72246da4 2880{
dea520a4 2881 struct dwc3_event_buffer *evt = _evt;
72246da4 2882
dea520a4 2883 return dwc3_check_event_buf(evt);
72246da4
FB
2884}
2885
2886/**
2887 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2888 * @dwc: pointer to our controller context structure
72246da4
FB
2889 *
2890 * Returns 0 on success otherwise negative errno.
2891 */
41ac7b3a 2892int dwc3_gadget_init(struct dwc3 *dwc)
72246da4 2893{
72246da4 2894 int ret;
72246da4
FB
2895
2896 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2897 &dwc->ctrl_req_addr, GFP_KERNEL);
2898 if (!dwc->ctrl_req) {
2899 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2900 ret = -ENOMEM;
2901 goto err0;
2902 }
2903
2abd9d5f 2904 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
72246da4
FB
2905 &dwc->ep0_trb_addr, GFP_KERNEL);
2906 if (!dwc->ep0_trb) {
2907 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2908 ret = -ENOMEM;
2909 goto err1;
2910 }
2911
3ef35faf 2912 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4 2913 if (!dwc->setup_buf) {
72246da4
FB
2914 ret = -ENOMEM;
2915 goto err2;
2916 }
2917
5812b1c2 2918 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2919 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2920 GFP_KERNEL);
5812b1c2
FB
2921 if (!dwc->ep0_bounce) {
2922 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2923 ret = -ENOMEM;
2924 goto err3;
2925 }
2926
04c03d10
FB
2927 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
2928 if (!dwc->zlp_buf) {
2929 ret = -ENOMEM;
2930 goto err4;
2931 }
2932
72246da4 2933 dwc->gadget.ops = &dwc3_gadget_ops;
72246da4 2934 dwc->gadget.speed = USB_SPEED_UNKNOWN;
eeb720fb 2935 dwc->gadget.sg_supported = true;
72246da4 2936 dwc->gadget.name = "dwc3-gadget";
6a4290cc 2937 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
72246da4 2938
b9e51b2b
BM
2939 /*
2940 * FIXME We might be setting max_speed to <SUPER, however versions
2941 * <2.20a of dwc3 have an issue with metastability (documented
2942 * elsewhere in this driver) which tells us we can't set max speed to
2943 * anything lower than SUPER.
2944 *
2945 * Because gadget.max_speed is only used by composite.c and function
2946 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2947 * to happen so we avoid sending SuperSpeed Capability descriptor
2948 * together with our BOS descriptor as that could confuse host into
2949 * thinking we can handle super speed.
2950 *
2951 * Note that, in fact, we won't even support GetBOS requests when speed
2952 * is less than super speed because we don't have means, yet, to tell
2953 * composite.c that we are USB 2.0 + LPM ECN.
2954 */
2955 if (dwc->revision < DWC3_REVISION_220A)
2956 dwc3_trace(trace_dwc3_gadget,
60cfb37a 2957 "Changing max_speed on rev %08x",
b9e51b2b
BM
2958 dwc->revision);
2959
2960 dwc->gadget.max_speed = dwc->maximum_speed;
2961
a4b9d94b
DC
2962 /*
2963 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2964 * on ep out.
2965 */
2966 dwc->gadget.quirk_ep_out_aligned_size = true;
2967
72246da4
FB
2968 /*
2969 * REVISIT: Here we should clear all pending IRQs to be
2970 * sure we're starting from a well known location.
2971 */
2972
2973 ret = dwc3_gadget_init_endpoints(dwc);
2974 if (ret)
04c03d10 2975 goto err5;
72246da4 2976
72246da4
FB
2977 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2978 if (ret) {
2979 dev_err(dwc->dev, "failed to register udc\n");
04c03d10 2980 goto err5;
72246da4
FB
2981 }
2982
2983 return 0;
2984
04c03d10
FB
2985err5:
2986 kfree(dwc->zlp_buf);
2987
5812b1c2 2988err4:
e1f80467 2989 dwc3_gadget_free_endpoints(dwc);
3ef35faf
FB
2990 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2991 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2992
72246da4 2993err3:
0fc9a1be 2994 kfree(dwc->setup_buf);
72246da4
FB
2995
2996err2:
2997 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2998 dwc->ep0_trb, dwc->ep0_trb_addr);
2999
3000err1:
3001 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3002 dwc->ctrl_req, dwc->ctrl_req_addr);
3003
3004err0:
3005 return ret;
3006}
3007
7415f17c
FB
3008/* -------------------------------------------------------------------------- */
3009
72246da4
FB
3010void dwc3_gadget_exit(struct dwc3 *dwc)
3011{
72246da4 3012 usb_del_gadget_udc(&dwc->gadget);
72246da4 3013
72246da4
FB
3014 dwc3_gadget_free_endpoints(dwc);
3015
3ef35faf
FB
3016 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3017 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 3018
0fc9a1be 3019 kfree(dwc->setup_buf);
04c03d10 3020 kfree(dwc->zlp_buf);
72246da4
FB
3021
3022 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3023 dwc->ep0_trb, dwc->ep0_trb_addr);
3024
3025 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3026 dwc->ctrl_req, dwc->ctrl_req_addr);
72246da4 3027}
7415f17c 3028
0b0231aa 3029int dwc3_gadget_suspend(struct dwc3 *dwc)
7415f17c 3030{
9f8a67b6
FB
3031 int ret;
3032
9772b47a
RQ
3033 if (!dwc->gadget_driver)
3034 return 0;
3035
9f8a67b6
FB
3036 ret = dwc3_gadget_run_stop(dwc, false, false);
3037 if (ret < 0)
3038 return ret;
7415f17c 3039
9f8a67b6
FB
3040 dwc3_disconnect_gadget(dwc);
3041 __dwc3_gadget_stop(dwc);
7415f17c
FB
3042
3043 return 0;
3044}
3045
3046int dwc3_gadget_resume(struct dwc3 *dwc)
3047{
7415f17c
FB
3048 int ret;
3049
9772b47a
RQ
3050 if (!dwc->gadget_driver)
3051 return 0;
3052
9f8a67b6
FB
3053 ret = __dwc3_gadget_start(dwc);
3054 if (ret < 0)
7415f17c
FB
3055 goto err0;
3056
9f8a67b6
FB
3057 ret = dwc3_gadget_run_stop(dwc, true, false);
3058 if (ret < 0)
7415f17c
FB
3059 goto err1;
3060
7415f17c
FB
3061 return 0;
3062
3063err1:
9f8a67b6 3064 __dwc3_gadget_stop(dwc);
7415f17c
FB
3065
3066err0:
3067 return ret;
3068}
fc8bb91b
FB
3069
3070void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
3071{
3072 if (dwc->pending_events) {
3073 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
3074 dwc->pending_events = false;
3075 enable_irq(dwc->irq_gadget);
3076 }
3077}