]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/usb/dwc3/gadget.c
ASoC: sti: fix missing clk_disable_unprepare() on error in uni_player_start()
[mirror_ubuntu-zesty-kernel.git] / drivers / usb / dwc3 / gadget.c
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69 }
70
71 /**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
93 * return 0 on success or -ETIMEDOUT.
94 */
95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 int retries = 10000;
98 u32 reg;
99
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
131 /* wait for a change in DSTS */
132 retries = 10000;
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
139 udelay(5);
140 }
141
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
144
145 return -ETIMEDOUT;
146 }
147
148 /**
149 * dwc3_ep_inc_trb() - Increment a TRB index.
150 * @index - Pointer to the TRB index to increment.
151 *
152 * The index should never point to the link TRB. After incrementing,
153 * if it is point to the link TRB, wrap around to the beginning. The
154 * link TRB is always at the last TRB entry.
155 */
156 static void dwc3_ep_inc_trb(u8 *index)
157 {
158 (*index)++;
159 if (*index == (DWC3_TRB_NUM - 1))
160 *index = 0;
161 }
162
163 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
164 {
165 dwc3_ep_inc_trb(&dep->trb_enqueue);
166 }
167
168 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
169 {
170 dwc3_ep_inc_trb(&dep->trb_dequeue);
171 }
172
173 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
174 int status)
175 {
176 struct dwc3 *dwc = dep->dwc;
177 int i;
178
179 if (req->started) {
180 i = 0;
181 do {
182 dwc3_ep_inc_deq(dep);
183 } while(++i < req->request.num_mapped_sgs);
184 req->started = false;
185 }
186 list_del(&req->list);
187 req->trb = NULL;
188
189 if (req->request.status == -EINPROGRESS)
190 req->request.status = status;
191
192 if (dwc->ep0_bounced && dep->number == 0)
193 dwc->ep0_bounced = false;
194 else
195 usb_gadget_unmap_request(&dwc->gadget, &req->request,
196 req->direction);
197
198 trace_dwc3_gadget_giveback(req);
199
200 spin_unlock(&dwc->lock);
201 usb_gadget_giveback_request(&dep->endpoint, &req->request);
202 spin_lock(&dwc->lock);
203
204 if (dep->number > 1)
205 pm_runtime_put(dwc->dev);
206 }
207
208 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
209 {
210 u32 timeout = 500;
211 int status = 0;
212 int ret = 0;
213 u32 reg;
214
215 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
216 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
217
218 do {
219 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
220 if (!(reg & DWC3_DGCMD_CMDACT)) {
221 status = DWC3_DGCMD_STATUS(reg);
222 if (status)
223 ret = -EINVAL;
224 break;
225 }
226 } while (timeout--);
227
228 if (!timeout) {
229 ret = -ETIMEDOUT;
230 status = -ETIMEDOUT;
231 }
232
233 trace_dwc3_gadget_generic_cmd(cmd, param, status);
234
235 return ret;
236 }
237
238 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
239
240 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
241 struct dwc3_gadget_ep_cmd_params *params)
242 {
243 struct dwc3 *dwc = dep->dwc;
244 u32 timeout = 500;
245 u32 reg;
246
247 int cmd_status = 0;
248 int susphy = false;
249 int ret = -EINVAL;
250
251 /*
252 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
253 * we're issuing an endpoint command, we must check if
254 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
255 *
256 * We will also set SUSPHY bit to what it was before returning as stated
257 * by the same section on Synopsys databook.
258 */
259 if (dwc->gadget.speed <= USB_SPEED_HIGH) {
260 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
261 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
262 susphy = true;
263 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
264 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
265 }
266 }
267
268 if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
269 int needs_wakeup;
270
271 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
272 dwc->link_state == DWC3_LINK_STATE_U2 ||
273 dwc->link_state == DWC3_LINK_STATE_U3);
274
275 if (unlikely(needs_wakeup)) {
276 ret = __dwc3_gadget_wakeup(dwc);
277 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
278 ret);
279 }
280 }
281
282 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
283 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
284 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
285
286 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
287 do {
288 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
289 if (!(reg & DWC3_DEPCMD_CMDACT)) {
290 cmd_status = DWC3_DEPCMD_STATUS(reg);
291
292 switch (cmd_status) {
293 case 0:
294 ret = 0;
295 break;
296 case DEPEVT_TRANSFER_NO_RESOURCE:
297 ret = -EINVAL;
298 break;
299 case DEPEVT_TRANSFER_BUS_EXPIRY:
300 /*
301 * SW issues START TRANSFER command to
302 * isochronous ep with future frame interval. If
303 * future interval time has already passed when
304 * core receives the command, it will respond
305 * with an error status of 'Bus Expiry'.
306 *
307 * Instead of always returning -EINVAL, let's
308 * give a hint to the gadget driver that this is
309 * the case by returning -EAGAIN.
310 */
311 ret = -EAGAIN;
312 break;
313 default:
314 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
315 }
316
317 break;
318 }
319 } while (--timeout);
320
321 if (timeout == 0) {
322 ret = -ETIMEDOUT;
323 cmd_status = -ETIMEDOUT;
324 }
325
326 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
327
328 if (unlikely(susphy)) {
329 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
330 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
331 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
332 }
333
334 return ret;
335 }
336
337 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
338 {
339 struct dwc3 *dwc = dep->dwc;
340 struct dwc3_gadget_ep_cmd_params params;
341 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
342
343 /*
344 * As of core revision 2.60a the recommended programming model
345 * is to set the ClearPendIN bit when issuing a Clear Stall EP
346 * command for IN endpoints. This is to prevent an issue where
347 * some (non-compliant) hosts may not send ACK TPs for pending
348 * IN transfers due to a mishandled error condition. Synopsys
349 * STAR 9000614252.
350 */
351 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
352 cmd |= DWC3_DEPCMD_CLEARPENDIN;
353
354 memset(&params, 0, sizeof(params));
355
356 return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
357 }
358
359 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
360 struct dwc3_trb *trb)
361 {
362 u32 offset = (char *) trb - (char *) dep->trb_pool;
363
364 return dep->trb_pool_dma + offset;
365 }
366
367 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
368 {
369 struct dwc3 *dwc = dep->dwc;
370
371 if (dep->trb_pool)
372 return 0;
373
374 dep->trb_pool = dma_alloc_coherent(dwc->dev,
375 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
376 &dep->trb_pool_dma, GFP_KERNEL);
377 if (!dep->trb_pool) {
378 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
379 dep->name);
380 return -ENOMEM;
381 }
382
383 return 0;
384 }
385
386 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
387 {
388 struct dwc3 *dwc = dep->dwc;
389
390 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
391 dep->trb_pool, dep->trb_pool_dma);
392
393 dep->trb_pool = NULL;
394 dep->trb_pool_dma = 0;
395 }
396
397 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
398
399 /**
400 * dwc3_gadget_start_config - Configure EP resources
401 * @dwc: pointer to our controller context structure
402 * @dep: endpoint that is being enabled
403 *
404 * The assignment of transfer resources cannot perfectly follow the
405 * data book due to the fact that the controller driver does not have
406 * all knowledge of the configuration in advance. It is given this
407 * information piecemeal by the composite gadget framework after every
408 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
409 * programming model in this scenario can cause errors. For two
410 * reasons:
411 *
412 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
413 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
414 * multiple interfaces.
415 *
416 * 2) The databook does not mention doing more DEPXFERCFG for new
417 * endpoint on alt setting (8.1.6).
418 *
419 * The following simplified method is used instead:
420 *
421 * All hardware endpoints can be assigned a transfer resource and this
422 * setting will stay persistent until either a core reset or
423 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
424 * do DEPXFERCFG for every hardware endpoint as well. We are
425 * guaranteed that there are as many transfer resources as endpoints.
426 *
427 * This function is called for each endpoint when it is being enabled
428 * but is triggered only when called for EP0-out, which always happens
429 * first, and which should only happen in one of the above conditions.
430 */
431 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
432 {
433 struct dwc3_gadget_ep_cmd_params params;
434 u32 cmd;
435 int i;
436 int ret;
437
438 if (dep->number)
439 return 0;
440
441 memset(&params, 0x00, sizeof(params));
442 cmd = DWC3_DEPCMD_DEPSTARTCFG;
443
444 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
445 if (ret)
446 return ret;
447
448 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
449 struct dwc3_ep *dep = dwc->eps[i];
450
451 if (!dep)
452 continue;
453
454 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
455 if (ret)
456 return ret;
457 }
458
459 return 0;
460 }
461
462 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
463 const struct usb_endpoint_descriptor *desc,
464 const struct usb_ss_ep_comp_descriptor *comp_desc,
465 bool modify, bool restore)
466 {
467 struct dwc3_gadget_ep_cmd_params params;
468
469 if (dev_WARN_ONCE(dwc->dev, modify && restore,
470 "Can't modify and restore\n"))
471 return -EINVAL;
472
473 memset(&params, 0x00, sizeof(params));
474
475 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
476 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
477
478 /* Burst size is only needed in SuperSpeed mode */
479 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
480 u32 burst = dep->endpoint.maxburst;
481 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
482 }
483
484 if (modify) {
485 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
486 } else if (restore) {
487 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
488 params.param2 |= dep->saved_state;
489 } else {
490 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
491 }
492
493 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
494
495 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
496 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
497
498 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
499 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
500 | DWC3_DEPCFG_STREAM_EVENT_EN;
501 dep->stream_capable = true;
502 }
503
504 if (!usb_endpoint_xfer_control(desc))
505 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
506
507 /*
508 * We are doing 1:1 mapping for endpoints, meaning
509 * Physical Endpoints 2 maps to Logical Endpoint 2 and
510 * so on. We consider the direction bit as part of the physical
511 * endpoint number. So USB endpoint 0x81 is 0x03.
512 */
513 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
514
515 /*
516 * We must use the lower 16 TX FIFOs even though
517 * HW might have more
518 */
519 if (dep->direction)
520 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
521
522 if (desc->bInterval) {
523 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
524 dep->interval = 1 << (desc->bInterval - 1);
525 }
526
527 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
528 }
529
530 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
531 {
532 struct dwc3_gadget_ep_cmd_params params;
533
534 memset(&params, 0x00, sizeof(params));
535
536 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
537
538 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
539 &params);
540 }
541
542 /**
543 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
544 * @dep: endpoint to be initialized
545 * @desc: USB Endpoint Descriptor
546 *
547 * Caller should take care of locking
548 */
549 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
550 const struct usb_endpoint_descriptor *desc,
551 const struct usb_ss_ep_comp_descriptor *comp_desc,
552 bool modify, bool restore)
553 {
554 struct dwc3 *dwc = dep->dwc;
555 u32 reg;
556 int ret;
557
558 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
559
560 if (!(dep->flags & DWC3_EP_ENABLED)) {
561 ret = dwc3_gadget_start_config(dwc, dep);
562 if (ret)
563 return ret;
564 }
565
566 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
567 restore);
568 if (ret)
569 return ret;
570
571 if (!(dep->flags & DWC3_EP_ENABLED)) {
572 struct dwc3_trb *trb_st_hw;
573 struct dwc3_trb *trb_link;
574
575 dep->endpoint.desc = desc;
576 dep->comp_desc = comp_desc;
577 dep->type = usb_endpoint_type(desc);
578 dep->flags |= DWC3_EP_ENABLED;
579
580 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
581 reg |= DWC3_DALEPENA_EP(dep->number);
582 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
583
584 if (usb_endpoint_xfer_control(desc))
585 return 0;
586
587 /* Initialize the TRB ring */
588 dep->trb_dequeue = 0;
589 dep->trb_enqueue = 0;
590 memset(dep->trb_pool, 0,
591 sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
592
593 /* Link TRB. The HWO bit is never reset */
594 trb_st_hw = &dep->trb_pool[0];
595
596 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
597 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
598 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
599 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
600 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
601 }
602
603 return 0;
604 }
605
606 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
607 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
608 {
609 struct dwc3_request *req;
610
611 dwc3_stop_active_transfer(dwc, dep->number, true);
612
613 /* - giveback all requests to gadget driver */
614 while (!list_empty(&dep->started_list)) {
615 req = next_request(&dep->started_list);
616
617 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
618 }
619
620 while (!list_empty(&dep->pending_list)) {
621 req = next_request(&dep->pending_list);
622
623 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
624 }
625 }
626
627 /**
628 * __dwc3_gadget_ep_disable - Disables a HW endpoint
629 * @dep: the endpoint to disable
630 *
631 * This function also removes requests which are currently processed ny the
632 * hardware and those which are not yet scheduled.
633 * Caller should take care of locking.
634 */
635 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
636 {
637 struct dwc3 *dwc = dep->dwc;
638 u32 reg;
639
640 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
641
642 dwc3_remove_requests(dwc, dep);
643
644 /* make sure HW endpoint isn't stalled */
645 if (dep->flags & DWC3_EP_STALL)
646 __dwc3_gadget_ep_set_halt(dep, 0, false);
647
648 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
649 reg &= ~DWC3_DALEPENA_EP(dep->number);
650 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
651
652 dep->stream_capable = false;
653 dep->endpoint.desc = NULL;
654 dep->comp_desc = NULL;
655 dep->type = 0;
656 dep->flags = 0;
657
658 return 0;
659 }
660
661 /* -------------------------------------------------------------------------- */
662
663 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
664 const struct usb_endpoint_descriptor *desc)
665 {
666 return -EINVAL;
667 }
668
669 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
670 {
671 return -EINVAL;
672 }
673
674 /* -------------------------------------------------------------------------- */
675
676 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
677 const struct usb_endpoint_descriptor *desc)
678 {
679 struct dwc3_ep *dep;
680 struct dwc3 *dwc;
681 unsigned long flags;
682 int ret;
683
684 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
685 pr_debug("dwc3: invalid parameters\n");
686 return -EINVAL;
687 }
688
689 if (!desc->wMaxPacketSize) {
690 pr_debug("dwc3: missing wMaxPacketSize\n");
691 return -EINVAL;
692 }
693
694 dep = to_dwc3_ep(ep);
695 dwc = dep->dwc;
696
697 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
698 "%s is already enabled\n",
699 dep->name))
700 return 0;
701
702 spin_lock_irqsave(&dwc->lock, flags);
703 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
704 spin_unlock_irqrestore(&dwc->lock, flags);
705
706 return ret;
707 }
708
709 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
710 {
711 struct dwc3_ep *dep;
712 struct dwc3 *dwc;
713 unsigned long flags;
714 int ret;
715
716 if (!ep) {
717 pr_debug("dwc3: invalid parameters\n");
718 return -EINVAL;
719 }
720
721 dep = to_dwc3_ep(ep);
722 dwc = dep->dwc;
723
724 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
725 "%s is already disabled\n",
726 dep->name))
727 return 0;
728
729 spin_lock_irqsave(&dwc->lock, flags);
730 ret = __dwc3_gadget_ep_disable(dep);
731 spin_unlock_irqrestore(&dwc->lock, flags);
732
733 return ret;
734 }
735
736 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
737 gfp_t gfp_flags)
738 {
739 struct dwc3_request *req;
740 struct dwc3_ep *dep = to_dwc3_ep(ep);
741
742 req = kzalloc(sizeof(*req), gfp_flags);
743 if (!req)
744 return NULL;
745
746 req->epnum = dep->number;
747 req->dep = dep;
748
749 dep->allocated_requests++;
750
751 trace_dwc3_alloc_request(req);
752
753 return &req->request;
754 }
755
756 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
757 struct usb_request *request)
758 {
759 struct dwc3_request *req = to_dwc3_request(request);
760 struct dwc3_ep *dep = to_dwc3_ep(ep);
761
762 dep->allocated_requests--;
763 trace_dwc3_free_request(req);
764 kfree(req);
765 }
766
767 /**
768 * dwc3_prepare_one_trb - setup one TRB from one request
769 * @dep: endpoint for which this request is prepared
770 * @req: dwc3_request pointer
771 */
772 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
773 struct dwc3_request *req, dma_addr_t dma,
774 unsigned length, unsigned last, unsigned chain, unsigned node)
775 {
776 struct dwc3_trb *trb;
777
778 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
779 dep->name, req, (unsigned long long) dma,
780 length, last ? " last" : "",
781 chain ? " chain" : "");
782
783
784 trb = &dep->trb_pool[dep->trb_enqueue];
785
786 if (!req->trb) {
787 dwc3_gadget_move_started_request(req);
788 req->trb = trb;
789 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
790 req->first_trb_index = dep->trb_enqueue;
791 }
792
793 dwc3_ep_inc_enq(dep);
794
795 trb->size = DWC3_TRB_SIZE_LENGTH(length);
796 trb->bpl = lower_32_bits(dma);
797 trb->bph = upper_32_bits(dma);
798
799 switch (usb_endpoint_type(dep->endpoint.desc)) {
800 case USB_ENDPOINT_XFER_CONTROL:
801 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
802 break;
803
804 case USB_ENDPOINT_XFER_ISOC:
805 if (!node)
806 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
807 else
808 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
809
810 /* always enable Interrupt on Missed ISOC */
811 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
812 break;
813
814 case USB_ENDPOINT_XFER_BULK:
815 case USB_ENDPOINT_XFER_INT:
816 trb->ctrl = DWC3_TRBCTL_NORMAL;
817 break;
818 default:
819 /*
820 * This is only possible with faulty memory because we
821 * checked it already :)
822 */
823 BUG();
824 }
825
826 /* always enable Continue on Short Packet */
827 trb->ctrl |= DWC3_TRB_CTRL_CSP;
828
829 if (!req->request.no_interrupt && !chain)
830 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
831
832 if (last)
833 trb->ctrl |= DWC3_TRB_CTRL_LST;
834
835 if (chain)
836 trb->ctrl |= DWC3_TRB_CTRL_CHN;
837
838 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
839 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
840
841 trb->ctrl |= DWC3_TRB_CTRL_HWO;
842
843 dep->queued_requests++;
844
845 trace_dwc3_prepare_trb(dep, trb);
846 }
847
848 /**
849 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
850 * @dep: The endpoint with the TRB ring
851 * @index: The index of the current TRB in the ring
852 *
853 * Returns the TRB prior to the one pointed to by the index. If the
854 * index is 0, we will wrap backwards, skip the link TRB, and return
855 * the one just before that.
856 */
857 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
858 {
859 if (!index)
860 index = DWC3_TRB_NUM - 2;
861 else
862 index = dep->trb_enqueue - 1;
863
864 return &dep->trb_pool[index];
865 }
866
867 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
868 {
869 struct dwc3_trb *tmp;
870 u8 trbs_left;
871
872 /*
873 * If enqueue & dequeue are equal than it is either full or empty.
874 *
875 * One way to know for sure is if the TRB right before us has HWO bit
876 * set or not. If it has, then we're definitely full and can't fit any
877 * more transfers in our ring.
878 */
879 if (dep->trb_enqueue == dep->trb_dequeue) {
880 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
881 if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
882 return 0;
883
884 return DWC3_TRB_NUM - 1;
885 }
886
887 trbs_left = dep->trb_dequeue - dep->trb_enqueue;
888 trbs_left &= (DWC3_TRB_NUM - 1);
889
890 if (dep->trb_dequeue < dep->trb_enqueue)
891 trbs_left--;
892
893 return trbs_left;
894 }
895
896 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
897 struct dwc3_request *req, unsigned int trbs_left,
898 unsigned int more_coming)
899 {
900 struct usb_request *request = &req->request;
901 struct scatterlist *sg = request->sg;
902 struct scatterlist *s;
903 unsigned int last = false;
904 unsigned int length;
905 dma_addr_t dma;
906 int i;
907
908 for_each_sg(sg, s, request->num_mapped_sgs, i) {
909 unsigned chain = true;
910
911 length = sg_dma_len(s);
912 dma = sg_dma_address(s);
913
914 if (sg_is_last(s)) {
915 if (usb_endpoint_xfer_int(dep->endpoint.desc) ||
916 !more_coming)
917 last = true;
918
919 chain = false;
920 }
921
922 if (!trbs_left--)
923 last = true;
924
925 if (last)
926 chain = false;
927
928 dwc3_prepare_one_trb(dep, req, dma, length,
929 last, chain, i);
930
931 if (last)
932 break;
933 }
934 }
935
936 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
937 struct dwc3_request *req, unsigned int trbs_left,
938 unsigned int more_coming)
939 {
940 unsigned int last = false;
941 unsigned int length;
942 dma_addr_t dma;
943
944 dma = req->request.dma;
945 length = req->request.length;
946
947 if (!trbs_left)
948 last = true;
949
950 /* Is this the last request? */
951 if (usb_endpoint_xfer_int(dep->endpoint.desc) || !more_coming)
952 last = true;
953
954 dwc3_prepare_one_trb(dep, req, dma, length,
955 last, false, 0);
956 }
957
958 /*
959 * dwc3_prepare_trbs - setup TRBs from requests
960 * @dep: endpoint for which requests are being prepared
961 *
962 * The function goes through the requests list and sets up TRBs for the
963 * transfers. The function returns once there are no more TRBs available or
964 * it runs out of requests.
965 */
966 static void dwc3_prepare_trbs(struct dwc3_ep *dep)
967 {
968 struct dwc3_request *req, *n;
969 unsigned int more_coming;
970 u32 trbs_left;
971
972 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
973
974 trbs_left = dwc3_calc_trbs_left(dep);
975 if (!trbs_left)
976 return;
977
978 more_coming = dep->allocated_requests - dep->queued_requests;
979
980 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
981 if (req->request.num_mapped_sgs > 0)
982 dwc3_prepare_one_trb_sg(dep, req, trbs_left--,
983 more_coming);
984 else
985 dwc3_prepare_one_trb_linear(dep, req, trbs_left--,
986 more_coming);
987
988 if (!trbs_left)
989 return;
990 }
991 }
992
993 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
994 {
995 struct dwc3_gadget_ep_cmd_params params;
996 struct dwc3_request *req;
997 struct dwc3 *dwc = dep->dwc;
998 int starting;
999 int ret;
1000 u32 cmd;
1001
1002 starting = !(dep->flags & DWC3_EP_BUSY);
1003
1004 dwc3_prepare_trbs(dep);
1005 req = next_request(&dep->started_list);
1006 if (!req) {
1007 dep->flags |= DWC3_EP_PENDING_REQUEST;
1008 return 0;
1009 }
1010
1011 memset(&params, 0, sizeof(params));
1012
1013 if (starting) {
1014 params.param0 = upper_32_bits(req->trb_dma);
1015 params.param1 = lower_32_bits(req->trb_dma);
1016 cmd = DWC3_DEPCMD_STARTTRANSFER |
1017 DWC3_DEPCMD_PARAM(cmd_param);
1018 } else {
1019 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1020 DWC3_DEPCMD_PARAM(dep->resource_index);
1021 }
1022
1023 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1024 if (ret < 0) {
1025 /*
1026 * FIXME we need to iterate over the list of requests
1027 * here and stop, unmap, free and del each of the linked
1028 * requests instead of what we do now.
1029 */
1030 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1031 req->direction);
1032 list_del(&req->list);
1033 return ret;
1034 }
1035
1036 dep->flags |= DWC3_EP_BUSY;
1037
1038 if (starting) {
1039 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
1040 WARN_ON_ONCE(!dep->resource_index);
1041 }
1042
1043 return 0;
1044 }
1045
1046 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1047 struct dwc3_ep *dep, u32 cur_uf)
1048 {
1049 u32 uf;
1050
1051 if (list_empty(&dep->pending_list)) {
1052 dwc3_trace(trace_dwc3_gadget,
1053 "ISOC ep %s run out for requests",
1054 dep->name);
1055 dep->flags |= DWC3_EP_PENDING_REQUEST;
1056 return;
1057 }
1058
1059 /* 4 micro frames in the future */
1060 uf = cur_uf + dep->interval * 4;
1061
1062 __dwc3_gadget_kick_transfer(dep, uf);
1063 }
1064
1065 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1066 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1067 {
1068 u32 cur_uf, mask;
1069
1070 mask = ~(dep->interval - 1);
1071 cur_uf = event->parameters & mask;
1072
1073 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1074 }
1075
1076 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1077 {
1078 struct dwc3 *dwc = dep->dwc;
1079 int ret;
1080
1081 if (!dep->endpoint.desc) {
1082 dwc3_trace(trace_dwc3_gadget,
1083 "trying to queue request %p to disabled %s",
1084 &req->request, dep->endpoint.name);
1085 return -ESHUTDOWN;
1086 }
1087
1088 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1089 &req->request, req->dep->name)) {
1090 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
1091 &req->request, req->dep->name);
1092 return -EINVAL;
1093 }
1094
1095 pm_runtime_get(dwc->dev);
1096
1097 req->request.actual = 0;
1098 req->request.status = -EINPROGRESS;
1099 req->direction = dep->direction;
1100 req->epnum = dep->number;
1101
1102 trace_dwc3_ep_queue(req);
1103
1104 /*
1105 * We only add to our list of requests now and
1106 * start consuming the list once we get XferNotReady
1107 * IRQ.
1108 *
1109 * That way, we avoid doing anything that we don't need
1110 * to do now and defer it until the point we receive a
1111 * particular token from the Host side.
1112 *
1113 * This will also avoid Host cancelling URBs due to too
1114 * many NAKs.
1115 */
1116 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1117 dep->direction);
1118 if (ret)
1119 return ret;
1120
1121 list_add_tail(&req->list, &dep->pending_list);
1122
1123 /*
1124 * If there are no pending requests and the endpoint isn't already
1125 * busy, we will just start the request straight away.
1126 *
1127 * This will save one IRQ (XFER_NOT_READY) and possibly make it a
1128 * little bit faster.
1129 */
1130 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1131 !usb_endpoint_xfer_int(dep->endpoint.desc)) {
1132 ret = __dwc3_gadget_kick_transfer(dep, 0);
1133 goto out;
1134 }
1135
1136 /*
1137 * There are a few special cases:
1138 *
1139 * 1. XferNotReady with empty list of requests. We need to kick the
1140 * transfer here in that situation, otherwise we will be NAKing
1141 * forever. If we get XferNotReady before gadget driver has a
1142 * chance to queue a request, we will ACK the IRQ but won't be
1143 * able to receive the data until the next request is queued.
1144 * The following code is handling exactly that.
1145 *
1146 */
1147 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1148 /*
1149 * If xfernotready is already elapsed and it is a case
1150 * of isoc transfer, then issue END TRANSFER, so that
1151 * you can receive xfernotready again and can have
1152 * notion of current microframe.
1153 */
1154 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1155 if (list_empty(&dep->started_list)) {
1156 dwc3_stop_active_transfer(dwc, dep->number, true);
1157 dep->flags = DWC3_EP_ENABLED;
1158 }
1159 return 0;
1160 }
1161
1162 ret = __dwc3_gadget_kick_transfer(dep, 0);
1163 if (!ret)
1164 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1165
1166 goto out;
1167 }
1168
1169 /*
1170 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1171 * kick the transfer here after queuing a request, otherwise the
1172 * core may not see the modified TRB(s).
1173 */
1174 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1175 (dep->flags & DWC3_EP_BUSY) &&
1176 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1177 WARN_ON_ONCE(!dep->resource_index);
1178 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
1179 goto out;
1180 }
1181
1182 /*
1183 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1184 * right away, otherwise host will not know we have streams to be
1185 * handled.
1186 */
1187 if (dep->stream_capable)
1188 ret = __dwc3_gadget_kick_transfer(dep, 0);
1189
1190 out:
1191 if (ret && ret != -EBUSY)
1192 dwc3_trace(trace_dwc3_gadget,
1193 "%s: failed to kick transfers",
1194 dep->name);
1195 if (ret == -EBUSY)
1196 ret = 0;
1197
1198 return ret;
1199 }
1200
1201 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1202 struct usb_request *request)
1203 {
1204 dwc3_gadget_ep_free_request(ep, request);
1205 }
1206
1207 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1208 {
1209 struct dwc3_request *req;
1210 struct usb_request *request;
1211 struct usb_ep *ep = &dep->endpoint;
1212
1213 dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
1214 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1215 if (!request)
1216 return -ENOMEM;
1217
1218 request->length = 0;
1219 request->buf = dwc->zlp_buf;
1220 request->complete = __dwc3_gadget_ep_zlp_complete;
1221
1222 req = to_dwc3_request(request);
1223
1224 return __dwc3_gadget_ep_queue(dep, req);
1225 }
1226
1227 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1228 gfp_t gfp_flags)
1229 {
1230 struct dwc3_request *req = to_dwc3_request(request);
1231 struct dwc3_ep *dep = to_dwc3_ep(ep);
1232 struct dwc3 *dwc = dep->dwc;
1233
1234 unsigned long flags;
1235
1236 int ret;
1237
1238 spin_lock_irqsave(&dwc->lock, flags);
1239 ret = __dwc3_gadget_ep_queue(dep, req);
1240
1241 /*
1242 * Okay, here's the thing, if gadget driver has requested for a ZLP by
1243 * setting request->zero, instead of doing magic, we will just queue an
1244 * extra usb_request ourselves so that it gets handled the same way as
1245 * any other request.
1246 */
1247 if (ret == 0 && request->zero && request->length &&
1248 (request->length % ep->maxpacket == 0))
1249 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1250
1251 spin_unlock_irqrestore(&dwc->lock, flags);
1252
1253 return ret;
1254 }
1255
1256 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1257 struct usb_request *request)
1258 {
1259 struct dwc3_request *req = to_dwc3_request(request);
1260 struct dwc3_request *r = NULL;
1261
1262 struct dwc3_ep *dep = to_dwc3_ep(ep);
1263 struct dwc3 *dwc = dep->dwc;
1264
1265 unsigned long flags;
1266 int ret = 0;
1267
1268 trace_dwc3_ep_dequeue(req);
1269
1270 spin_lock_irqsave(&dwc->lock, flags);
1271
1272 list_for_each_entry(r, &dep->pending_list, list) {
1273 if (r == req)
1274 break;
1275 }
1276
1277 if (r != req) {
1278 list_for_each_entry(r, &dep->started_list, list) {
1279 if (r == req)
1280 break;
1281 }
1282 if (r == req) {
1283 /* wait until it is processed */
1284 dwc3_stop_active_transfer(dwc, dep->number, true);
1285 goto out1;
1286 }
1287 dev_err(dwc->dev, "request %p was not queued to %s\n",
1288 request, ep->name);
1289 ret = -EINVAL;
1290 goto out0;
1291 }
1292
1293 out1:
1294 /* giveback the request */
1295 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1296
1297 out0:
1298 spin_unlock_irqrestore(&dwc->lock, flags);
1299
1300 return ret;
1301 }
1302
1303 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1304 {
1305 struct dwc3_gadget_ep_cmd_params params;
1306 struct dwc3 *dwc = dep->dwc;
1307 int ret;
1308
1309 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1310 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1311 return -EINVAL;
1312 }
1313
1314 memset(&params, 0x00, sizeof(params));
1315
1316 if (value) {
1317 struct dwc3_trb *trb;
1318
1319 unsigned transfer_in_flight;
1320 unsigned started;
1321
1322 if (dep->number > 1)
1323 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1324 else
1325 trb = &dwc->ep0_trb[dep->trb_enqueue];
1326
1327 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
1328 started = !list_empty(&dep->started_list);
1329
1330 if (!protocol && ((dep->direction && transfer_in_flight) ||
1331 (!dep->direction && started))) {
1332 dwc3_trace(trace_dwc3_gadget,
1333 "%s: pending request, cannot halt",
1334 dep->name);
1335 return -EAGAIN;
1336 }
1337
1338 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1339 &params);
1340 if (ret)
1341 dev_err(dwc->dev, "failed to set STALL on %s\n",
1342 dep->name);
1343 else
1344 dep->flags |= DWC3_EP_STALL;
1345 } else {
1346
1347 ret = dwc3_send_clear_stall_ep_cmd(dep);
1348 if (ret)
1349 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1350 dep->name);
1351 else
1352 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1353 }
1354
1355 return ret;
1356 }
1357
1358 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1359 {
1360 struct dwc3_ep *dep = to_dwc3_ep(ep);
1361 struct dwc3 *dwc = dep->dwc;
1362
1363 unsigned long flags;
1364
1365 int ret;
1366
1367 spin_lock_irqsave(&dwc->lock, flags);
1368 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1369 spin_unlock_irqrestore(&dwc->lock, flags);
1370
1371 return ret;
1372 }
1373
1374 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1375 {
1376 struct dwc3_ep *dep = to_dwc3_ep(ep);
1377 struct dwc3 *dwc = dep->dwc;
1378 unsigned long flags;
1379 int ret;
1380
1381 spin_lock_irqsave(&dwc->lock, flags);
1382 dep->flags |= DWC3_EP_WEDGE;
1383
1384 if (dep->number == 0 || dep->number == 1)
1385 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1386 else
1387 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1388 spin_unlock_irqrestore(&dwc->lock, flags);
1389
1390 return ret;
1391 }
1392
1393 /* -------------------------------------------------------------------------- */
1394
1395 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1396 .bLength = USB_DT_ENDPOINT_SIZE,
1397 .bDescriptorType = USB_DT_ENDPOINT,
1398 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1399 };
1400
1401 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1402 .enable = dwc3_gadget_ep0_enable,
1403 .disable = dwc3_gadget_ep0_disable,
1404 .alloc_request = dwc3_gadget_ep_alloc_request,
1405 .free_request = dwc3_gadget_ep_free_request,
1406 .queue = dwc3_gadget_ep0_queue,
1407 .dequeue = dwc3_gadget_ep_dequeue,
1408 .set_halt = dwc3_gadget_ep0_set_halt,
1409 .set_wedge = dwc3_gadget_ep_set_wedge,
1410 };
1411
1412 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1413 .enable = dwc3_gadget_ep_enable,
1414 .disable = dwc3_gadget_ep_disable,
1415 .alloc_request = dwc3_gadget_ep_alloc_request,
1416 .free_request = dwc3_gadget_ep_free_request,
1417 .queue = dwc3_gadget_ep_queue,
1418 .dequeue = dwc3_gadget_ep_dequeue,
1419 .set_halt = dwc3_gadget_ep_set_halt,
1420 .set_wedge = dwc3_gadget_ep_set_wedge,
1421 };
1422
1423 /* -------------------------------------------------------------------------- */
1424
1425 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1426 {
1427 struct dwc3 *dwc = gadget_to_dwc(g);
1428 u32 reg;
1429
1430 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1431 return DWC3_DSTS_SOFFN(reg);
1432 }
1433
1434 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1435 {
1436 unsigned long timeout;
1437
1438 int ret;
1439 u32 reg;
1440
1441 u8 link_state;
1442 u8 speed;
1443
1444 /*
1445 * According to the Databook Remote wakeup request should
1446 * be issued only when the device is in early suspend state.
1447 *
1448 * We can check that via USB Link State bits in DSTS register.
1449 */
1450 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1451
1452 speed = reg & DWC3_DSTS_CONNECTSPD;
1453 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1454 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
1455 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
1456 return 0;
1457 }
1458
1459 link_state = DWC3_DSTS_USBLNKST(reg);
1460
1461 switch (link_state) {
1462 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1463 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1464 break;
1465 default:
1466 dwc3_trace(trace_dwc3_gadget,
1467 "can't wakeup from '%s'",
1468 dwc3_gadget_link_string(link_state));
1469 return -EINVAL;
1470 }
1471
1472 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1473 if (ret < 0) {
1474 dev_err(dwc->dev, "failed to put link in Recovery\n");
1475 return ret;
1476 }
1477
1478 /* Recent versions do this automatically */
1479 if (dwc->revision < DWC3_REVISION_194A) {
1480 /* write zeroes to Link Change Request */
1481 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1482 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1483 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1484 }
1485
1486 /* poll until Link State changes to ON */
1487 timeout = jiffies + msecs_to_jiffies(100);
1488
1489 while (!time_after(jiffies, timeout)) {
1490 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1491
1492 /* in HS, means ON */
1493 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1494 break;
1495 }
1496
1497 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1498 dev_err(dwc->dev, "failed to send remote wakeup\n");
1499 return -EINVAL;
1500 }
1501
1502 return 0;
1503 }
1504
1505 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1506 {
1507 struct dwc3 *dwc = gadget_to_dwc(g);
1508 unsigned long flags;
1509 int ret;
1510
1511 spin_lock_irqsave(&dwc->lock, flags);
1512 ret = __dwc3_gadget_wakeup(dwc);
1513 spin_unlock_irqrestore(&dwc->lock, flags);
1514
1515 return ret;
1516 }
1517
1518 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1519 int is_selfpowered)
1520 {
1521 struct dwc3 *dwc = gadget_to_dwc(g);
1522 unsigned long flags;
1523
1524 spin_lock_irqsave(&dwc->lock, flags);
1525 g->is_selfpowered = !!is_selfpowered;
1526 spin_unlock_irqrestore(&dwc->lock, flags);
1527
1528 return 0;
1529 }
1530
1531 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1532 {
1533 u32 reg;
1534 u32 timeout = 500;
1535
1536 if (pm_runtime_suspended(dwc->dev))
1537 return 0;
1538
1539 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1540 if (is_on) {
1541 if (dwc->revision <= DWC3_REVISION_187A) {
1542 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1543 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1544 }
1545
1546 if (dwc->revision >= DWC3_REVISION_194A)
1547 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1548 reg |= DWC3_DCTL_RUN_STOP;
1549
1550 if (dwc->has_hibernation)
1551 reg |= DWC3_DCTL_KEEP_CONNECT;
1552
1553 dwc->pullups_connected = true;
1554 } else {
1555 reg &= ~DWC3_DCTL_RUN_STOP;
1556
1557 if (dwc->has_hibernation && !suspend)
1558 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1559
1560 dwc->pullups_connected = false;
1561 }
1562
1563 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1564
1565 do {
1566 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1567 reg &= DWC3_DSTS_DEVCTRLHLT;
1568 } while (--timeout && !(!is_on ^ !reg));
1569
1570 if (!timeout)
1571 return -ETIMEDOUT;
1572
1573 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1574 dwc->gadget_driver
1575 ? dwc->gadget_driver->function : "no-function",
1576 is_on ? "connect" : "disconnect");
1577
1578 return 0;
1579 }
1580
1581 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1582 {
1583 struct dwc3 *dwc = gadget_to_dwc(g);
1584 unsigned long flags;
1585 int ret;
1586
1587 is_on = !!is_on;
1588
1589 spin_lock_irqsave(&dwc->lock, flags);
1590 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1591 spin_unlock_irqrestore(&dwc->lock, flags);
1592
1593 return ret;
1594 }
1595
1596 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1597 {
1598 u32 reg;
1599
1600 /* Enable all but Start and End of Frame IRQs */
1601 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1602 DWC3_DEVTEN_EVNTOVERFLOWEN |
1603 DWC3_DEVTEN_CMDCMPLTEN |
1604 DWC3_DEVTEN_ERRTICERREN |
1605 DWC3_DEVTEN_WKUPEVTEN |
1606 DWC3_DEVTEN_ULSTCNGEN |
1607 DWC3_DEVTEN_CONNECTDONEEN |
1608 DWC3_DEVTEN_USBRSTEN |
1609 DWC3_DEVTEN_DISCONNEVTEN);
1610
1611 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1612 }
1613
1614 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1615 {
1616 /* mask all interrupts */
1617 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1618 }
1619
1620 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1621 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1622
1623 /**
1624 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
1625 * dwc: pointer to our context structure
1626 *
1627 * The following looks like complex but it's actually very simple. In order to
1628 * calculate the number of packets we can burst at once on OUT transfers, we're
1629 * gonna use RxFIFO size.
1630 *
1631 * To calculate RxFIFO size we need two numbers:
1632 * MDWIDTH = size, in bits, of the internal memory bus
1633 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1634 *
1635 * Given these two numbers, the formula is simple:
1636 *
1637 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1638 *
1639 * 24 bytes is for 3x SETUP packets
1640 * 16 bytes is a clock domain crossing tolerance
1641 *
1642 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1643 */
1644 static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
1645 {
1646 u32 ram2_depth;
1647 u32 mdwidth;
1648 u32 nump;
1649 u32 reg;
1650
1651 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
1652 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
1653
1654 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
1655 nump = min_t(u32, nump, 16);
1656
1657 /* update NumP */
1658 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1659 reg &= ~DWC3_DCFG_NUMP_MASK;
1660 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
1661 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1662 }
1663
1664 static int __dwc3_gadget_start(struct dwc3 *dwc)
1665 {
1666 struct dwc3_ep *dep;
1667 int ret = 0;
1668 u32 reg;
1669
1670 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1671 reg &= ~(DWC3_DCFG_SPEED_MASK);
1672
1673 /**
1674 * WORKAROUND: DWC3 revision < 2.20a have an issue
1675 * which would cause metastability state on Run/Stop
1676 * bit if we try to force the IP to USB2-only mode.
1677 *
1678 * Because of that, we cannot configure the IP to any
1679 * speed other than the SuperSpeed
1680 *
1681 * Refers to:
1682 *
1683 * STAR#9000525659: Clock Domain Crossing on DCTL in
1684 * USB 2.0 Mode
1685 */
1686 if (dwc->revision < DWC3_REVISION_220A) {
1687 reg |= DWC3_DCFG_SUPERSPEED;
1688 } else {
1689 switch (dwc->maximum_speed) {
1690 case USB_SPEED_LOW:
1691 reg |= DWC3_DCFG_LOWSPEED;
1692 break;
1693 case USB_SPEED_FULL:
1694 reg |= DWC3_DCFG_FULLSPEED1;
1695 break;
1696 case USB_SPEED_HIGH:
1697 reg |= DWC3_DCFG_HIGHSPEED;
1698 break;
1699 case USB_SPEED_SUPER_PLUS:
1700 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
1701 break;
1702 default:
1703 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1704 dwc->maximum_speed);
1705 /* fall through */
1706 case USB_SPEED_SUPER:
1707 reg |= DWC3_DCFG_SUPERSPEED;
1708 break;
1709 }
1710 }
1711 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1712
1713 /*
1714 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1715 * field instead of letting dwc3 itself calculate that automatically.
1716 *
1717 * This way, we maximize the chances that we'll be able to get several
1718 * bursts of data without going through any sort of endpoint throttling.
1719 */
1720 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1721 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1722 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1723
1724 dwc3_gadget_setup_nump(dwc);
1725
1726 /* Start with SuperSpeed Default */
1727 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1728
1729 dep = dwc->eps[0];
1730 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1731 false);
1732 if (ret) {
1733 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1734 goto err0;
1735 }
1736
1737 dep = dwc->eps[1];
1738 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1739 false);
1740 if (ret) {
1741 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1742 goto err1;
1743 }
1744
1745 /* begin to receive SETUP packets */
1746 dwc->ep0state = EP0_SETUP_PHASE;
1747 dwc3_ep0_out_start(dwc);
1748
1749 dwc3_gadget_enable_irq(dwc);
1750
1751 return 0;
1752
1753 err1:
1754 __dwc3_gadget_ep_disable(dwc->eps[0]);
1755
1756 err0:
1757 return ret;
1758 }
1759
1760 static int dwc3_gadget_start(struct usb_gadget *g,
1761 struct usb_gadget_driver *driver)
1762 {
1763 struct dwc3 *dwc = gadget_to_dwc(g);
1764 unsigned long flags;
1765 int ret = 0;
1766 int irq;
1767
1768 irq = dwc->irq_gadget;
1769 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1770 IRQF_SHARED, "dwc3", dwc->ev_buf);
1771 if (ret) {
1772 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1773 irq, ret);
1774 goto err0;
1775 }
1776
1777 spin_lock_irqsave(&dwc->lock, flags);
1778 if (dwc->gadget_driver) {
1779 dev_err(dwc->dev, "%s is already bound to %s\n",
1780 dwc->gadget.name,
1781 dwc->gadget_driver->driver.name);
1782 ret = -EBUSY;
1783 goto err1;
1784 }
1785
1786 dwc->gadget_driver = driver;
1787
1788 if (pm_runtime_active(dwc->dev))
1789 __dwc3_gadget_start(dwc);
1790
1791 spin_unlock_irqrestore(&dwc->lock, flags);
1792
1793 return 0;
1794
1795 err1:
1796 spin_unlock_irqrestore(&dwc->lock, flags);
1797 free_irq(irq, dwc);
1798
1799 err0:
1800 return ret;
1801 }
1802
1803 static void __dwc3_gadget_stop(struct dwc3 *dwc)
1804 {
1805 if (pm_runtime_suspended(dwc->dev))
1806 return;
1807
1808 dwc3_gadget_disable_irq(dwc);
1809 __dwc3_gadget_ep_disable(dwc->eps[0]);
1810 __dwc3_gadget_ep_disable(dwc->eps[1]);
1811 }
1812
1813 static int dwc3_gadget_stop(struct usb_gadget *g)
1814 {
1815 struct dwc3 *dwc = gadget_to_dwc(g);
1816 unsigned long flags;
1817
1818 spin_lock_irqsave(&dwc->lock, flags);
1819 __dwc3_gadget_stop(dwc);
1820 dwc->gadget_driver = NULL;
1821 spin_unlock_irqrestore(&dwc->lock, flags);
1822
1823 free_irq(dwc->irq_gadget, dwc->ev_buf);
1824
1825 return 0;
1826 }
1827
1828 static const struct usb_gadget_ops dwc3_gadget_ops = {
1829 .get_frame = dwc3_gadget_get_frame,
1830 .wakeup = dwc3_gadget_wakeup,
1831 .set_selfpowered = dwc3_gadget_set_selfpowered,
1832 .pullup = dwc3_gadget_pullup,
1833 .udc_start = dwc3_gadget_start,
1834 .udc_stop = dwc3_gadget_stop,
1835 };
1836
1837 /* -------------------------------------------------------------------------- */
1838
1839 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1840 u8 num, u32 direction)
1841 {
1842 struct dwc3_ep *dep;
1843 u8 i;
1844
1845 for (i = 0; i < num; i++) {
1846 u8 epnum = (i << 1) | (direction ? 1 : 0);
1847
1848 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1849 if (!dep)
1850 return -ENOMEM;
1851
1852 dep->dwc = dwc;
1853 dep->number = epnum;
1854 dep->direction = !!direction;
1855 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
1856 dwc->eps[epnum] = dep;
1857
1858 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1859 (epnum & 1) ? "in" : "out");
1860
1861 dep->endpoint.name = dep->name;
1862 spin_lock_init(&dep->lock);
1863
1864 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1865
1866 if (epnum == 0 || epnum == 1) {
1867 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1868 dep->endpoint.maxburst = 1;
1869 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1870 if (!epnum)
1871 dwc->gadget.ep0 = &dep->endpoint;
1872 } else {
1873 int ret;
1874
1875 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1876 dep->endpoint.max_streams = 15;
1877 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1878 list_add_tail(&dep->endpoint.ep_list,
1879 &dwc->gadget.ep_list);
1880
1881 ret = dwc3_alloc_trb_pool(dep);
1882 if (ret)
1883 return ret;
1884 }
1885
1886 if (epnum == 0 || epnum == 1) {
1887 dep->endpoint.caps.type_control = true;
1888 } else {
1889 dep->endpoint.caps.type_iso = true;
1890 dep->endpoint.caps.type_bulk = true;
1891 dep->endpoint.caps.type_int = true;
1892 }
1893
1894 dep->endpoint.caps.dir_in = !!direction;
1895 dep->endpoint.caps.dir_out = !direction;
1896
1897 INIT_LIST_HEAD(&dep->pending_list);
1898 INIT_LIST_HEAD(&dep->started_list);
1899 }
1900
1901 return 0;
1902 }
1903
1904 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1905 {
1906 int ret;
1907
1908 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1909
1910 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1911 if (ret < 0) {
1912 dwc3_trace(trace_dwc3_gadget,
1913 "failed to allocate OUT endpoints");
1914 return ret;
1915 }
1916
1917 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1918 if (ret < 0) {
1919 dwc3_trace(trace_dwc3_gadget,
1920 "failed to allocate IN endpoints");
1921 return ret;
1922 }
1923
1924 return 0;
1925 }
1926
1927 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1928 {
1929 struct dwc3_ep *dep;
1930 u8 epnum;
1931
1932 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1933 dep = dwc->eps[epnum];
1934 if (!dep)
1935 continue;
1936 /*
1937 * Physical endpoints 0 and 1 are special; they form the
1938 * bi-directional USB endpoint 0.
1939 *
1940 * For those two physical endpoints, we don't allocate a TRB
1941 * pool nor do we add them the endpoints list. Due to that, we
1942 * shouldn't do these two operations otherwise we would end up
1943 * with all sorts of bugs when removing dwc3.ko.
1944 */
1945 if (epnum != 0 && epnum != 1) {
1946 dwc3_free_trb_pool(dep);
1947 list_del(&dep->endpoint.ep_list);
1948 }
1949
1950 kfree(dep);
1951 }
1952 }
1953
1954 /* -------------------------------------------------------------------------- */
1955
1956 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1957 struct dwc3_request *req, struct dwc3_trb *trb,
1958 const struct dwc3_event_depevt *event, int status)
1959 {
1960 unsigned int count;
1961 unsigned int s_pkt = 0;
1962 unsigned int trb_status;
1963
1964 dep->queued_requests--;
1965 trace_dwc3_complete_trb(dep, trb);
1966
1967 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1968 /*
1969 * We continue despite the error. There is not much we
1970 * can do. If we don't clean it up we loop forever. If
1971 * we skip the TRB then it gets overwritten after a
1972 * while since we use them in a ring buffer. A BUG()
1973 * would help. Lets hope that if this occurs, someone
1974 * fixes the root cause instead of looking away :)
1975 */
1976 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1977 dep->name, trb);
1978 count = trb->size & DWC3_TRB_SIZE_MASK;
1979
1980 if (dep->direction) {
1981 if (count) {
1982 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1983 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1984 dwc3_trace(trace_dwc3_gadget,
1985 "%s: incomplete IN transfer",
1986 dep->name);
1987 /*
1988 * If missed isoc occurred and there is
1989 * no request queued then issue END
1990 * TRANSFER, so that core generates
1991 * next xfernotready and we will issue
1992 * a fresh START TRANSFER.
1993 * If there are still queued request
1994 * then wait, do not issue either END
1995 * or UPDATE TRANSFER, just attach next
1996 * request in pending_list during
1997 * giveback.If any future queued request
1998 * is successfully transferred then we
1999 * will issue UPDATE TRANSFER for all
2000 * request in the pending_list.
2001 */
2002 dep->flags |= DWC3_EP_MISSED_ISOC;
2003 } else {
2004 dev_err(dwc->dev, "incomplete IN transfer %s\n",
2005 dep->name);
2006 status = -ECONNRESET;
2007 }
2008 } else {
2009 dep->flags &= ~DWC3_EP_MISSED_ISOC;
2010 }
2011 } else {
2012 if (count && (event->status & DEPEVT_STATUS_SHORT))
2013 s_pkt = 1;
2014 }
2015
2016 /*
2017 * We assume here we will always receive the entire data block
2018 * which we should receive. Meaning, if we program RX to
2019 * receive 4K but we receive only 2K, we assume that's all we
2020 * should receive and we simply bounce the request back to the
2021 * gadget driver for further processing.
2022 */
2023 req->request.actual += req->request.length - count;
2024 if (s_pkt)
2025 return 1;
2026 if ((event->status & DEPEVT_STATUS_LST) &&
2027 (trb->ctrl & (DWC3_TRB_CTRL_LST |
2028 DWC3_TRB_CTRL_HWO)))
2029 return 1;
2030 if ((event->status & DEPEVT_STATUS_IOC) &&
2031 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2032 return 1;
2033 return 0;
2034 }
2035
2036 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2037 const struct dwc3_event_depevt *event, int status)
2038 {
2039 struct dwc3_request *req;
2040 struct dwc3_trb *trb;
2041 unsigned int slot;
2042 unsigned int i;
2043 int ret;
2044
2045 do {
2046 req = next_request(&dep->started_list);
2047 if (WARN_ON_ONCE(!req))
2048 return 1;
2049
2050 i = 0;
2051 do {
2052 slot = req->first_trb_index + i;
2053 if (slot == DWC3_TRB_NUM - 1)
2054 slot++;
2055 slot %= DWC3_TRB_NUM;
2056 trb = &dep->trb_pool[slot];
2057
2058 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2059 event, status);
2060 if (ret)
2061 break;
2062 } while (++i < req->request.num_mapped_sgs);
2063
2064 dwc3_gadget_giveback(dep, req, status);
2065
2066 if (ret)
2067 break;
2068 } while (1);
2069
2070 /*
2071 * Our endpoint might get disabled by another thread during
2072 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2073 * early on so DWC3_EP_BUSY flag gets cleared
2074 */
2075 if (!dep->endpoint.desc)
2076 return 1;
2077
2078 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2079 list_empty(&dep->started_list)) {
2080 if (list_empty(&dep->pending_list)) {
2081 /*
2082 * If there is no entry in request list then do
2083 * not issue END TRANSFER now. Just set PENDING
2084 * flag, so that END TRANSFER is issued when an
2085 * entry is added into request list.
2086 */
2087 dep->flags = DWC3_EP_PENDING_REQUEST;
2088 } else {
2089 dwc3_stop_active_transfer(dwc, dep->number, true);
2090 dep->flags = DWC3_EP_ENABLED;
2091 }
2092 return 1;
2093 }
2094
2095 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
2096 if ((event->status & DEPEVT_STATUS_IOC) &&
2097 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2098 return 0;
2099 return 1;
2100 }
2101
2102 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
2103 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
2104 {
2105 unsigned status = 0;
2106 int clean_busy;
2107 u32 is_xfer_complete;
2108
2109 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
2110
2111 if (event->status & DEPEVT_STATUS_BUSERR)
2112 status = -ECONNRESET;
2113
2114 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
2115 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
2116 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
2117 dep->flags &= ~DWC3_EP_BUSY;
2118
2119 /*
2120 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2121 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2122 */
2123 if (dwc->revision < DWC3_REVISION_183A) {
2124 u32 reg;
2125 int i;
2126
2127 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
2128 dep = dwc->eps[i];
2129
2130 if (!(dep->flags & DWC3_EP_ENABLED))
2131 continue;
2132
2133 if (!list_empty(&dep->started_list))
2134 return;
2135 }
2136
2137 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2138 reg |= dwc->u1u2;
2139 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2140
2141 dwc->u1u2 = 0;
2142 }
2143
2144 /*
2145 * Our endpoint might get disabled by another thread during
2146 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2147 * early on so DWC3_EP_BUSY flag gets cleared
2148 */
2149 if (!dep->endpoint.desc)
2150 return;
2151
2152 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2153 int ret;
2154
2155 ret = __dwc3_gadget_kick_transfer(dep, 0);
2156 if (!ret || ret == -EBUSY)
2157 return;
2158 }
2159 }
2160
2161 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2162 const struct dwc3_event_depevt *event)
2163 {
2164 struct dwc3_ep *dep;
2165 u8 epnum = event->endpoint_number;
2166
2167 dep = dwc->eps[epnum];
2168
2169 if (!(dep->flags & DWC3_EP_ENABLED))
2170 return;
2171
2172 if (epnum == 0 || epnum == 1) {
2173 dwc3_ep0_interrupt(dwc, event);
2174 return;
2175 }
2176
2177 switch (event->endpoint_event) {
2178 case DWC3_DEPEVT_XFERCOMPLETE:
2179 dep->resource_index = 0;
2180
2181 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2182 dwc3_trace(trace_dwc3_gadget,
2183 "%s is an Isochronous endpoint",
2184 dep->name);
2185 return;
2186 }
2187
2188 dwc3_endpoint_transfer_complete(dwc, dep, event);
2189 break;
2190 case DWC3_DEPEVT_XFERINPROGRESS:
2191 dwc3_endpoint_transfer_complete(dwc, dep, event);
2192 break;
2193 case DWC3_DEPEVT_XFERNOTREADY:
2194 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2195 dwc3_gadget_start_isoc(dwc, dep, event);
2196 } else {
2197 int active;
2198 int ret;
2199
2200 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
2201
2202 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
2203 dep->name, active ? "Transfer Active"
2204 : "Transfer Not Active");
2205
2206 ret = __dwc3_gadget_kick_transfer(dep, 0);
2207 if (!ret || ret == -EBUSY)
2208 return;
2209
2210 dwc3_trace(trace_dwc3_gadget,
2211 "%s: failed to kick transfers",
2212 dep->name);
2213 }
2214
2215 break;
2216 case DWC3_DEPEVT_STREAMEVT:
2217 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2218 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2219 dep->name);
2220 return;
2221 }
2222
2223 switch (event->status) {
2224 case DEPEVT_STREAMEVT_FOUND:
2225 dwc3_trace(trace_dwc3_gadget,
2226 "Stream %d found and started",
2227 event->parameters);
2228
2229 break;
2230 case DEPEVT_STREAMEVT_NOTFOUND:
2231 /* FALLTHROUGH */
2232 default:
2233 dwc3_trace(trace_dwc3_gadget,
2234 "unable to find suitable stream");
2235 }
2236 break;
2237 case DWC3_DEPEVT_RXTXFIFOEVT:
2238 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
2239 break;
2240 case DWC3_DEPEVT_EPCMDCMPLT:
2241 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2242 break;
2243 }
2244 }
2245
2246 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2247 {
2248 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2249 spin_unlock(&dwc->lock);
2250 dwc->gadget_driver->disconnect(&dwc->gadget);
2251 spin_lock(&dwc->lock);
2252 }
2253 }
2254
2255 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2256 {
2257 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2258 spin_unlock(&dwc->lock);
2259 dwc->gadget_driver->suspend(&dwc->gadget);
2260 spin_lock(&dwc->lock);
2261 }
2262 }
2263
2264 static void dwc3_resume_gadget(struct dwc3 *dwc)
2265 {
2266 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2267 spin_unlock(&dwc->lock);
2268 dwc->gadget_driver->resume(&dwc->gadget);
2269 spin_lock(&dwc->lock);
2270 }
2271 }
2272
2273 static void dwc3_reset_gadget(struct dwc3 *dwc)
2274 {
2275 if (!dwc->gadget_driver)
2276 return;
2277
2278 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2279 spin_unlock(&dwc->lock);
2280 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2281 spin_lock(&dwc->lock);
2282 }
2283 }
2284
2285 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2286 {
2287 struct dwc3_ep *dep;
2288 struct dwc3_gadget_ep_cmd_params params;
2289 u32 cmd;
2290 int ret;
2291
2292 dep = dwc->eps[epnum];
2293
2294 if (!dep->resource_index)
2295 return;
2296
2297 /*
2298 * NOTICE: We are violating what the Databook says about the
2299 * EndTransfer command. Ideally we would _always_ wait for the
2300 * EndTransfer Command Completion IRQ, but that's causing too
2301 * much trouble synchronizing between us and gadget driver.
2302 *
2303 * We have discussed this with the IP Provider and it was
2304 * suggested to giveback all requests here, but give HW some
2305 * extra time to synchronize with the interconnect. We're using
2306 * an arbitrary 100us delay for that.
2307 *
2308 * Note also that a similar handling was tested by Synopsys
2309 * (thanks a lot Paul) and nothing bad has come out of it.
2310 * In short, what we're doing is:
2311 *
2312 * - Issue EndTransfer WITH CMDIOC bit set
2313 * - Wait 100us
2314 */
2315
2316 cmd = DWC3_DEPCMD_ENDTRANSFER;
2317 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2318 cmd |= DWC3_DEPCMD_CMDIOC;
2319 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2320 memset(&params, 0, sizeof(params));
2321 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
2322 WARN_ON_ONCE(ret);
2323 dep->resource_index = 0;
2324 dep->flags &= ~DWC3_EP_BUSY;
2325 udelay(100);
2326 }
2327
2328 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2329 {
2330 u32 epnum;
2331
2332 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2333 struct dwc3_ep *dep;
2334
2335 dep = dwc->eps[epnum];
2336 if (!dep)
2337 continue;
2338
2339 if (!(dep->flags & DWC3_EP_ENABLED))
2340 continue;
2341
2342 dwc3_remove_requests(dwc, dep);
2343 }
2344 }
2345
2346 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2347 {
2348 u32 epnum;
2349
2350 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2351 struct dwc3_ep *dep;
2352 int ret;
2353
2354 dep = dwc->eps[epnum];
2355 if (!dep)
2356 continue;
2357
2358 if (!(dep->flags & DWC3_EP_STALL))
2359 continue;
2360
2361 dep->flags &= ~DWC3_EP_STALL;
2362
2363 ret = dwc3_send_clear_stall_ep_cmd(dep);
2364 WARN_ON_ONCE(ret);
2365 }
2366 }
2367
2368 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2369 {
2370 int reg;
2371
2372 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2373 reg &= ~DWC3_DCTL_INITU1ENA;
2374 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2375
2376 reg &= ~DWC3_DCTL_INITU2ENA;
2377 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2378
2379 dwc3_disconnect_gadget(dwc);
2380
2381 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2382 dwc->setup_packet_pending = false;
2383 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2384
2385 dwc->connected = false;
2386 }
2387
2388 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2389 {
2390 u32 reg;
2391
2392 dwc->connected = true;
2393
2394 /*
2395 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2396 * would cause a missing Disconnect Event if there's a
2397 * pending Setup Packet in the FIFO.
2398 *
2399 * There's no suggested workaround on the official Bug
2400 * report, which states that "unless the driver/application
2401 * is doing any special handling of a disconnect event,
2402 * there is no functional issue".
2403 *
2404 * Unfortunately, it turns out that we _do_ some special
2405 * handling of a disconnect event, namely complete all
2406 * pending transfers, notify gadget driver of the
2407 * disconnection, and so on.
2408 *
2409 * Our suggested workaround is to follow the Disconnect
2410 * Event steps here, instead, based on a setup_packet_pending
2411 * flag. Such flag gets set whenever we have a SETUP_PENDING
2412 * status for EP0 TRBs and gets cleared on XferComplete for the
2413 * same endpoint.
2414 *
2415 * Refers to:
2416 *
2417 * STAR#9000466709: RTL: Device : Disconnect event not
2418 * generated if setup packet pending in FIFO
2419 */
2420 if (dwc->revision < DWC3_REVISION_188A) {
2421 if (dwc->setup_packet_pending)
2422 dwc3_gadget_disconnect_interrupt(dwc);
2423 }
2424
2425 dwc3_reset_gadget(dwc);
2426
2427 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2428 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2429 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2430 dwc->test_mode = false;
2431
2432 dwc3_stop_active_transfers(dwc);
2433 dwc3_clear_stall_all_ep(dwc);
2434
2435 /* Reset device address to zero */
2436 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2437 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2438 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2439 }
2440
2441 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2442 {
2443 u32 reg;
2444 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2445
2446 /*
2447 * We change the clock only at SS but I dunno why I would want to do
2448 * this. Maybe it becomes part of the power saving plan.
2449 */
2450
2451 if ((speed != DWC3_DSTS_SUPERSPEED) &&
2452 (speed != DWC3_DSTS_SUPERSPEED_PLUS))
2453 return;
2454
2455 /*
2456 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2457 * each time on Connect Done.
2458 */
2459 if (!usb30_clock)
2460 return;
2461
2462 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2463 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2464 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2465 }
2466
2467 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2468 {
2469 struct dwc3_ep *dep;
2470 int ret;
2471 u32 reg;
2472 u8 speed;
2473
2474 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2475 speed = reg & DWC3_DSTS_CONNECTSPD;
2476 dwc->speed = speed;
2477
2478 dwc3_update_ram_clk_sel(dwc, speed);
2479
2480 switch (speed) {
2481 case DWC3_DSTS_SUPERSPEED_PLUS:
2482 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2483 dwc->gadget.ep0->maxpacket = 512;
2484 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2485 break;
2486 case DWC3_DSTS_SUPERSPEED:
2487 /*
2488 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2489 * would cause a missing USB3 Reset event.
2490 *
2491 * In such situations, we should force a USB3 Reset
2492 * event by calling our dwc3_gadget_reset_interrupt()
2493 * routine.
2494 *
2495 * Refers to:
2496 *
2497 * STAR#9000483510: RTL: SS : USB3 reset event may
2498 * not be generated always when the link enters poll
2499 */
2500 if (dwc->revision < DWC3_REVISION_190A)
2501 dwc3_gadget_reset_interrupt(dwc);
2502
2503 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2504 dwc->gadget.ep0->maxpacket = 512;
2505 dwc->gadget.speed = USB_SPEED_SUPER;
2506 break;
2507 case DWC3_DSTS_HIGHSPEED:
2508 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2509 dwc->gadget.ep0->maxpacket = 64;
2510 dwc->gadget.speed = USB_SPEED_HIGH;
2511 break;
2512 case DWC3_DSTS_FULLSPEED2:
2513 case DWC3_DSTS_FULLSPEED1:
2514 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2515 dwc->gadget.ep0->maxpacket = 64;
2516 dwc->gadget.speed = USB_SPEED_FULL;
2517 break;
2518 case DWC3_DSTS_LOWSPEED:
2519 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2520 dwc->gadget.ep0->maxpacket = 8;
2521 dwc->gadget.speed = USB_SPEED_LOW;
2522 break;
2523 }
2524
2525 /* Enable USB2 LPM Capability */
2526
2527 if ((dwc->revision > DWC3_REVISION_194A) &&
2528 (speed != DWC3_DSTS_SUPERSPEED) &&
2529 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
2530 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2531 reg |= DWC3_DCFG_LPM_CAP;
2532 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2533
2534 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2535 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2536
2537 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2538
2539 /*
2540 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2541 * DCFG.LPMCap is set, core responses with an ACK and the
2542 * BESL value in the LPM token is less than or equal to LPM
2543 * NYET threshold.
2544 */
2545 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2546 && dwc->has_lpm_erratum,
2547 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2548
2549 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2550 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2551
2552 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2553 } else {
2554 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2555 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2556 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2557 }
2558
2559 dep = dwc->eps[0];
2560 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2561 false);
2562 if (ret) {
2563 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2564 return;
2565 }
2566
2567 dep = dwc->eps[1];
2568 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2569 false);
2570 if (ret) {
2571 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2572 return;
2573 }
2574
2575 /*
2576 * Configure PHY via GUSB3PIPECTLn if required.
2577 *
2578 * Update GTXFIFOSIZn
2579 *
2580 * In both cases reset values should be sufficient.
2581 */
2582 }
2583
2584 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2585 {
2586 /*
2587 * TODO take core out of low power mode when that's
2588 * implemented.
2589 */
2590
2591 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2592 spin_unlock(&dwc->lock);
2593 dwc->gadget_driver->resume(&dwc->gadget);
2594 spin_lock(&dwc->lock);
2595 }
2596 }
2597
2598 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2599 unsigned int evtinfo)
2600 {
2601 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2602 unsigned int pwropt;
2603
2604 /*
2605 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2606 * Hibernation mode enabled which would show up when device detects
2607 * host-initiated U3 exit.
2608 *
2609 * In that case, device will generate a Link State Change Interrupt
2610 * from U3 to RESUME which is only necessary if Hibernation is
2611 * configured in.
2612 *
2613 * There are no functional changes due to such spurious event and we
2614 * just need to ignore it.
2615 *
2616 * Refers to:
2617 *
2618 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2619 * operational mode
2620 */
2621 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2622 if ((dwc->revision < DWC3_REVISION_250A) &&
2623 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2624 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2625 (next == DWC3_LINK_STATE_RESUME)) {
2626 dwc3_trace(trace_dwc3_gadget,
2627 "ignoring transition U3 -> Resume");
2628 return;
2629 }
2630 }
2631
2632 /*
2633 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2634 * on the link partner, the USB session might do multiple entry/exit
2635 * of low power states before a transfer takes place.
2636 *
2637 * Due to this problem, we might experience lower throughput. The
2638 * suggested workaround is to disable DCTL[12:9] bits if we're
2639 * transitioning from U1/U2 to U0 and enable those bits again
2640 * after a transfer completes and there are no pending transfers
2641 * on any of the enabled endpoints.
2642 *
2643 * This is the first half of that workaround.
2644 *
2645 * Refers to:
2646 *
2647 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2648 * core send LGO_Ux entering U0
2649 */
2650 if (dwc->revision < DWC3_REVISION_183A) {
2651 if (next == DWC3_LINK_STATE_U0) {
2652 u32 u1u2;
2653 u32 reg;
2654
2655 switch (dwc->link_state) {
2656 case DWC3_LINK_STATE_U1:
2657 case DWC3_LINK_STATE_U2:
2658 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2659 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2660 | DWC3_DCTL_ACCEPTU2ENA
2661 | DWC3_DCTL_INITU1ENA
2662 | DWC3_DCTL_ACCEPTU1ENA);
2663
2664 if (!dwc->u1u2)
2665 dwc->u1u2 = reg & u1u2;
2666
2667 reg &= ~u1u2;
2668
2669 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2670 break;
2671 default:
2672 /* do nothing */
2673 break;
2674 }
2675 }
2676 }
2677
2678 switch (next) {
2679 case DWC3_LINK_STATE_U1:
2680 if (dwc->speed == USB_SPEED_SUPER)
2681 dwc3_suspend_gadget(dwc);
2682 break;
2683 case DWC3_LINK_STATE_U2:
2684 case DWC3_LINK_STATE_U3:
2685 dwc3_suspend_gadget(dwc);
2686 break;
2687 case DWC3_LINK_STATE_RESUME:
2688 dwc3_resume_gadget(dwc);
2689 break;
2690 default:
2691 /* do nothing */
2692 break;
2693 }
2694
2695 dwc->link_state = next;
2696 }
2697
2698 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
2699 unsigned int evtinfo)
2700 {
2701 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2702
2703 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
2704 dwc3_suspend_gadget(dwc);
2705
2706 dwc->link_state = next;
2707 }
2708
2709 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2710 unsigned int evtinfo)
2711 {
2712 unsigned int is_ss = evtinfo & BIT(4);
2713
2714 /**
2715 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2716 * have a known issue which can cause USB CV TD.9.23 to fail
2717 * randomly.
2718 *
2719 * Because of this issue, core could generate bogus hibernation
2720 * events which SW needs to ignore.
2721 *
2722 * Refers to:
2723 *
2724 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2725 * Device Fallback from SuperSpeed
2726 */
2727 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2728 return;
2729
2730 /* enter hibernation here */
2731 }
2732
2733 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2734 const struct dwc3_event_devt *event)
2735 {
2736 switch (event->type) {
2737 case DWC3_DEVICE_EVENT_DISCONNECT:
2738 dwc3_gadget_disconnect_interrupt(dwc);
2739 break;
2740 case DWC3_DEVICE_EVENT_RESET:
2741 dwc3_gadget_reset_interrupt(dwc);
2742 break;
2743 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2744 dwc3_gadget_conndone_interrupt(dwc);
2745 break;
2746 case DWC3_DEVICE_EVENT_WAKEUP:
2747 dwc3_gadget_wakeup_interrupt(dwc);
2748 break;
2749 case DWC3_DEVICE_EVENT_HIBER_REQ:
2750 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2751 "unexpected hibernation event\n"))
2752 break;
2753
2754 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2755 break;
2756 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2757 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2758 break;
2759 case DWC3_DEVICE_EVENT_EOPF:
2760 /* It changed to be suspend event for version 2.30a and above */
2761 if (dwc->revision < DWC3_REVISION_230A) {
2762 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2763 } else {
2764 dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
2765
2766 /*
2767 * Ignore suspend event until the gadget enters into
2768 * USB_STATE_CONFIGURED state.
2769 */
2770 if (dwc->gadget.state >= USB_STATE_CONFIGURED)
2771 dwc3_gadget_suspend_interrupt(dwc,
2772 event->event_info);
2773 }
2774 break;
2775 case DWC3_DEVICE_EVENT_SOF:
2776 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2777 break;
2778 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2779 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2780 break;
2781 case DWC3_DEVICE_EVENT_CMD_CMPL:
2782 dwc3_trace(trace_dwc3_gadget, "Command Complete");
2783 break;
2784 case DWC3_DEVICE_EVENT_OVERFLOW:
2785 dwc3_trace(trace_dwc3_gadget, "Overflow");
2786 break;
2787 default:
2788 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2789 }
2790 }
2791
2792 static void dwc3_process_event_entry(struct dwc3 *dwc,
2793 const union dwc3_event *event)
2794 {
2795 trace_dwc3_event(event->raw);
2796
2797 /* Endpoint IRQ, handle it and return early */
2798 if (event->type.is_devspec == 0) {
2799 /* depevt */
2800 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2801 }
2802
2803 switch (event->type.type) {
2804 case DWC3_EVENT_TYPE_DEV:
2805 dwc3_gadget_interrupt(dwc, &event->devt);
2806 break;
2807 /* REVISIT what to do with Carkit and I2C events ? */
2808 default:
2809 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2810 }
2811 }
2812
2813 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
2814 {
2815 struct dwc3 *dwc = evt->dwc;
2816 irqreturn_t ret = IRQ_NONE;
2817 int left;
2818 u32 reg;
2819
2820 left = evt->count;
2821
2822 if (!(evt->flags & DWC3_EVENT_PENDING))
2823 return IRQ_NONE;
2824
2825 while (left > 0) {
2826 union dwc3_event event;
2827
2828 event.raw = *(u32 *) (evt->buf + evt->lpos);
2829
2830 dwc3_process_event_entry(dwc, &event);
2831
2832 /*
2833 * FIXME we wrap around correctly to the next entry as
2834 * almost all entries are 4 bytes in size. There is one
2835 * entry which has 12 bytes which is a regular entry
2836 * followed by 8 bytes data. ATM I don't know how
2837 * things are organized if we get next to the a
2838 * boundary so I worry about that once we try to handle
2839 * that.
2840 */
2841 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2842 left -= 4;
2843
2844 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
2845 }
2846
2847 evt->count = 0;
2848 evt->flags &= ~DWC3_EVENT_PENDING;
2849 ret = IRQ_HANDLED;
2850
2851 /* Unmask interrupt */
2852 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2853 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2854 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2855
2856 return ret;
2857 }
2858
2859 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
2860 {
2861 struct dwc3_event_buffer *evt = _evt;
2862 struct dwc3 *dwc = evt->dwc;
2863 unsigned long flags;
2864 irqreturn_t ret = IRQ_NONE;
2865
2866 spin_lock_irqsave(&dwc->lock, flags);
2867 ret = dwc3_process_event_buf(evt);
2868 spin_unlock_irqrestore(&dwc->lock, flags);
2869
2870 return ret;
2871 }
2872
2873 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
2874 {
2875 struct dwc3 *dwc = evt->dwc;
2876 u32 count;
2877 u32 reg;
2878
2879 if (pm_runtime_suspended(dwc->dev)) {
2880 pm_runtime_get(dwc->dev);
2881 disable_irq_nosync(dwc->irq_gadget);
2882 dwc->pending_events = true;
2883 return IRQ_HANDLED;
2884 }
2885
2886 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2887 count &= DWC3_GEVNTCOUNT_MASK;
2888 if (!count)
2889 return IRQ_NONE;
2890
2891 evt->count = count;
2892 evt->flags |= DWC3_EVENT_PENDING;
2893
2894 /* Mask interrupt */
2895 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2896 reg |= DWC3_GEVNTSIZ_INTMASK;
2897 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2898
2899 return IRQ_WAKE_THREAD;
2900 }
2901
2902 static irqreturn_t dwc3_interrupt(int irq, void *_evt)
2903 {
2904 struct dwc3_event_buffer *evt = _evt;
2905
2906 return dwc3_check_event_buf(evt);
2907 }
2908
2909 /**
2910 * dwc3_gadget_init - Initializes gadget related registers
2911 * @dwc: pointer to our controller context structure
2912 *
2913 * Returns 0 on success otherwise negative errno.
2914 */
2915 int dwc3_gadget_init(struct dwc3 *dwc)
2916 {
2917 int ret, irq;
2918 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
2919
2920 irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
2921 if (irq == -EPROBE_DEFER)
2922 return irq;
2923
2924 if (irq <= 0) {
2925 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
2926 if (irq == -EPROBE_DEFER)
2927 return irq;
2928
2929 if (irq <= 0) {
2930 irq = platform_get_irq(dwc3_pdev, 0);
2931 if (irq <= 0) {
2932 if (irq != -EPROBE_DEFER) {
2933 dev_err(dwc->dev,
2934 "missing peripheral IRQ\n");
2935 }
2936 if (!irq)
2937 irq = -EINVAL;
2938 return irq;
2939 }
2940 }
2941 }
2942
2943 dwc->irq_gadget = irq;
2944
2945 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2946 &dwc->ctrl_req_addr, GFP_KERNEL);
2947 if (!dwc->ctrl_req) {
2948 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2949 ret = -ENOMEM;
2950 goto err0;
2951 }
2952
2953 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2954 &dwc->ep0_trb_addr, GFP_KERNEL);
2955 if (!dwc->ep0_trb) {
2956 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2957 ret = -ENOMEM;
2958 goto err1;
2959 }
2960
2961 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2962 if (!dwc->setup_buf) {
2963 ret = -ENOMEM;
2964 goto err2;
2965 }
2966
2967 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2968 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2969 GFP_KERNEL);
2970 if (!dwc->ep0_bounce) {
2971 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2972 ret = -ENOMEM;
2973 goto err3;
2974 }
2975
2976 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
2977 if (!dwc->zlp_buf) {
2978 ret = -ENOMEM;
2979 goto err4;
2980 }
2981
2982 dwc->gadget.ops = &dwc3_gadget_ops;
2983 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2984 dwc->gadget.sg_supported = true;
2985 dwc->gadget.name = "dwc3-gadget";
2986 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
2987
2988 /*
2989 * FIXME We might be setting max_speed to <SUPER, however versions
2990 * <2.20a of dwc3 have an issue with metastability (documented
2991 * elsewhere in this driver) which tells us we can't set max speed to
2992 * anything lower than SUPER.
2993 *
2994 * Because gadget.max_speed is only used by composite.c and function
2995 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2996 * to happen so we avoid sending SuperSpeed Capability descriptor
2997 * together with our BOS descriptor as that could confuse host into
2998 * thinking we can handle super speed.
2999 *
3000 * Note that, in fact, we won't even support GetBOS requests when speed
3001 * is less than super speed because we don't have means, yet, to tell
3002 * composite.c that we are USB 2.0 + LPM ECN.
3003 */
3004 if (dwc->revision < DWC3_REVISION_220A)
3005 dwc3_trace(trace_dwc3_gadget,
3006 "Changing max_speed on rev %08x",
3007 dwc->revision);
3008
3009 dwc->gadget.max_speed = dwc->maximum_speed;
3010
3011 /*
3012 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
3013 * on ep out.
3014 */
3015 dwc->gadget.quirk_ep_out_aligned_size = true;
3016
3017 /*
3018 * REVISIT: Here we should clear all pending IRQs to be
3019 * sure we're starting from a well known location.
3020 */
3021
3022 ret = dwc3_gadget_init_endpoints(dwc);
3023 if (ret)
3024 goto err5;
3025
3026 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
3027 if (ret) {
3028 dev_err(dwc->dev, "failed to register udc\n");
3029 goto err5;
3030 }
3031
3032 return 0;
3033
3034 err5:
3035 kfree(dwc->zlp_buf);
3036
3037 err4:
3038 dwc3_gadget_free_endpoints(dwc);
3039 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3040 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3041
3042 err3:
3043 kfree(dwc->setup_buf);
3044
3045 err2:
3046 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3047 dwc->ep0_trb, dwc->ep0_trb_addr);
3048
3049 err1:
3050 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3051 dwc->ctrl_req, dwc->ctrl_req_addr);
3052
3053 err0:
3054 return ret;
3055 }
3056
3057 /* -------------------------------------------------------------------------- */
3058
3059 void dwc3_gadget_exit(struct dwc3 *dwc)
3060 {
3061 usb_del_gadget_udc(&dwc->gadget);
3062
3063 dwc3_gadget_free_endpoints(dwc);
3064
3065 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3066 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3067
3068 kfree(dwc->setup_buf);
3069 kfree(dwc->zlp_buf);
3070
3071 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3072 dwc->ep0_trb, dwc->ep0_trb_addr);
3073
3074 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3075 dwc->ctrl_req, dwc->ctrl_req_addr);
3076 }
3077
3078 int dwc3_gadget_suspend(struct dwc3 *dwc)
3079 {
3080 int ret;
3081
3082 if (!dwc->gadget_driver)
3083 return 0;
3084
3085 ret = dwc3_gadget_run_stop(dwc, false, false);
3086 if (ret < 0)
3087 return ret;
3088
3089 dwc3_disconnect_gadget(dwc);
3090 __dwc3_gadget_stop(dwc);
3091
3092 return 0;
3093 }
3094
3095 int dwc3_gadget_resume(struct dwc3 *dwc)
3096 {
3097 int ret;
3098
3099 if (!dwc->gadget_driver)
3100 return 0;
3101
3102 ret = __dwc3_gadget_start(dwc);
3103 if (ret < 0)
3104 goto err0;
3105
3106 ret = dwc3_gadget_run_stop(dwc, true, false);
3107 if (ret < 0)
3108 goto err1;
3109
3110 return 0;
3111
3112 err1:
3113 __dwc3_gadget_stop(dwc);
3114
3115 err0:
3116 return ret;
3117 }
3118
3119 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
3120 {
3121 if (dwc->pending_events) {
3122 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
3123 dwc->pending_events = false;
3124 enable_irq(dwc->irq_gadget);
3125 }
3126 }