]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/usb/dwc3/gadget.c
usb: dwc3: gadget: add a per-endpoint request queue lock
[mirror_ubuntu-bionic-kernel.git] / drivers / usb / dwc3 / gadget.c
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69 }
70
71 /**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
93 * return 0 on success or -ETIMEDOUT.
94 */
95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 int retries = 10000;
98 u32 reg;
99
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
131 /* wait for a change in DSTS */
132 retries = 10000;
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
139 udelay(5);
140 }
141
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
144
145 return -ETIMEDOUT;
146 }
147
148 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
149 {
150 dep->trb_enqueue++;
151 dep->trb_enqueue %= DWC3_TRB_NUM;
152 }
153
154 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
155 {
156 dep->trb_dequeue++;
157 dep->trb_dequeue %= DWC3_TRB_NUM;
158 }
159
160 static int dwc3_ep_is_last_trb(unsigned int index)
161 {
162 return index == DWC3_TRB_NUM - 1;
163 }
164
165 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
166 int status)
167 {
168 struct dwc3 *dwc = dep->dwc;
169 int i;
170
171 if (req->started) {
172 i = 0;
173 do {
174 dwc3_ep_inc_deq(dep);
175 /*
176 * Skip LINK TRB. We can't use req->trb and check for
177 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
178 * just completed (not the LINK TRB).
179 */
180 if (dwc3_ep_is_last_trb(dep->trb_dequeue))
181 dwc3_ep_inc_deq(dep);
182 } while(++i < req->request.num_mapped_sgs);
183 req->started = false;
184 }
185 list_del(&req->list);
186 req->trb = NULL;
187
188 if (req->request.status == -EINPROGRESS)
189 req->request.status = status;
190
191 if (dwc->ep0_bounced && dep->number == 0)
192 dwc->ep0_bounced = false;
193 else
194 usb_gadget_unmap_request(&dwc->gadget, &req->request,
195 req->direction);
196
197 trace_dwc3_gadget_giveback(req);
198
199 spin_unlock(&dwc->lock);
200 usb_gadget_giveback_request(&dep->endpoint, &req->request);
201 spin_lock(&dwc->lock);
202
203 if (dep->number > 1)
204 pm_runtime_put(dwc->dev);
205 }
206
207 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
208 {
209 u32 timeout = 500;
210 u32 reg;
211
212 trace_dwc3_gadget_generic_cmd(cmd, param);
213
214 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
215 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
216
217 do {
218 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
219 if (!(reg & DWC3_DGCMD_CMDACT)) {
220 dwc3_trace(trace_dwc3_gadget,
221 "Command Complete --> %d",
222 DWC3_DGCMD_STATUS(reg));
223 if (DWC3_DGCMD_STATUS(reg))
224 return -EINVAL;
225 return 0;
226 }
227
228 /*
229 * We can't sleep here, because it's also called from
230 * interrupt context.
231 */
232 timeout--;
233 if (!timeout) {
234 dwc3_trace(trace_dwc3_gadget,
235 "Command Timed Out");
236 return -ETIMEDOUT;
237 }
238 udelay(1);
239 } while (1);
240 }
241
242 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
243
244 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
245 struct dwc3_gadget_ep_cmd_params *params)
246 {
247 struct dwc3 *dwc = dep->dwc;
248 u32 timeout = 500;
249 u32 reg;
250
251 int susphy = false;
252 int ret = -EINVAL;
253
254 trace_dwc3_gadget_ep_cmd(dep, cmd, params);
255
256 /*
257 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
258 * we're issuing an endpoint command, we must check if
259 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
260 *
261 * We will also set SUSPHY bit to what it was before returning as stated
262 * by the same section on Synopsys databook.
263 */
264 if (dwc->gadget.speed <= USB_SPEED_HIGH) {
265 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
266 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
267 susphy = true;
268 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
269 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
270 }
271 }
272
273 if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
274 int needs_wakeup;
275
276 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
277 dwc->link_state == DWC3_LINK_STATE_U2 ||
278 dwc->link_state == DWC3_LINK_STATE_U3);
279
280 if (unlikely(needs_wakeup)) {
281 ret = __dwc3_gadget_wakeup(dwc);
282 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
283 ret);
284 }
285 }
286
287 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
288 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
289 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
290
291 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
292 do {
293 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
294 if (!(reg & DWC3_DEPCMD_CMDACT)) {
295 int cmd_status = DWC3_DEPCMD_STATUS(reg);
296
297 dwc3_trace(trace_dwc3_gadget,
298 "Command Complete --> %d",
299 cmd_status);
300
301 switch (cmd_status) {
302 case 0:
303 ret = 0;
304 break;
305 case DEPEVT_TRANSFER_NO_RESOURCE:
306 dwc3_trace(trace_dwc3_gadget, "%s: no resource available");
307 ret = -EINVAL;
308 break;
309 case DEPEVT_TRANSFER_BUS_EXPIRY:
310 /*
311 * SW issues START TRANSFER command to
312 * isochronous ep with future frame interval. If
313 * future interval time has already passed when
314 * core receives the command, it will respond
315 * with an error status of 'Bus Expiry'.
316 *
317 * Instead of always returning -EINVAL, let's
318 * give a hint to the gadget driver that this is
319 * the case by returning -EAGAIN.
320 */
321 dwc3_trace(trace_dwc3_gadget, "%s: bus expiry");
322 ret = -EAGAIN;
323 break;
324 default:
325 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
326 }
327
328 break;
329 }
330
331 /*
332 * We can't sleep here, because it is also called from
333 * interrupt context.
334 */
335 timeout--;
336 if (!timeout) {
337 dwc3_trace(trace_dwc3_gadget,
338 "Command Timed Out");
339 ret = -ETIMEDOUT;
340 break;
341 }
342 } while (1);
343
344 if (unlikely(susphy)) {
345 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
346 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
347 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
348 }
349
350 return ret;
351 }
352
353 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
354 {
355 struct dwc3 *dwc = dep->dwc;
356 struct dwc3_gadget_ep_cmd_params params;
357 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
358
359 /*
360 * As of core revision 2.60a the recommended programming model
361 * is to set the ClearPendIN bit when issuing a Clear Stall EP
362 * command for IN endpoints. This is to prevent an issue where
363 * some (non-compliant) hosts may not send ACK TPs for pending
364 * IN transfers due to a mishandled error condition. Synopsys
365 * STAR 9000614252.
366 */
367 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
368 cmd |= DWC3_DEPCMD_CLEARPENDIN;
369
370 memset(&params, 0, sizeof(params));
371
372 return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
373 }
374
375 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
376 struct dwc3_trb *trb)
377 {
378 u32 offset = (char *) trb - (char *) dep->trb_pool;
379
380 return dep->trb_pool_dma + offset;
381 }
382
383 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
384 {
385 struct dwc3 *dwc = dep->dwc;
386
387 if (dep->trb_pool)
388 return 0;
389
390 dep->trb_pool = dma_alloc_coherent(dwc->dev,
391 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
392 &dep->trb_pool_dma, GFP_KERNEL);
393 if (!dep->trb_pool) {
394 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
395 dep->name);
396 return -ENOMEM;
397 }
398
399 return 0;
400 }
401
402 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
403 {
404 struct dwc3 *dwc = dep->dwc;
405
406 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
407 dep->trb_pool, dep->trb_pool_dma);
408
409 dep->trb_pool = NULL;
410 dep->trb_pool_dma = 0;
411 }
412
413 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
414
415 /**
416 * dwc3_gadget_start_config - Configure EP resources
417 * @dwc: pointer to our controller context structure
418 * @dep: endpoint that is being enabled
419 *
420 * The assignment of transfer resources cannot perfectly follow the
421 * data book due to the fact that the controller driver does not have
422 * all knowledge of the configuration in advance. It is given this
423 * information piecemeal by the composite gadget framework after every
424 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
425 * programming model in this scenario can cause errors. For two
426 * reasons:
427 *
428 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
429 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
430 * multiple interfaces.
431 *
432 * 2) The databook does not mention doing more DEPXFERCFG for new
433 * endpoint on alt setting (8.1.6).
434 *
435 * The following simplified method is used instead:
436 *
437 * All hardware endpoints can be assigned a transfer resource and this
438 * setting will stay persistent until either a core reset or
439 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
440 * do DEPXFERCFG for every hardware endpoint as well. We are
441 * guaranteed that there are as many transfer resources as endpoints.
442 *
443 * This function is called for each endpoint when it is being enabled
444 * but is triggered only when called for EP0-out, which always happens
445 * first, and which should only happen in one of the above conditions.
446 */
447 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
448 {
449 struct dwc3_gadget_ep_cmd_params params;
450 u32 cmd;
451 int i;
452 int ret;
453
454 if (dep->number)
455 return 0;
456
457 memset(&params, 0x00, sizeof(params));
458 cmd = DWC3_DEPCMD_DEPSTARTCFG;
459
460 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
461 if (ret)
462 return ret;
463
464 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
465 struct dwc3_ep *dep = dwc->eps[i];
466
467 if (!dep)
468 continue;
469
470 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
471 if (ret)
472 return ret;
473 }
474
475 return 0;
476 }
477
478 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
479 const struct usb_endpoint_descriptor *desc,
480 const struct usb_ss_ep_comp_descriptor *comp_desc,
481 bool ignore, bool restore)
482 {
483 struct dwc3_gadget_ep_cmd_params params;
484
485 memset(&params, 0x00, sizeof(params));
486
487 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
488 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
489
490 /* Burst size is only needed in SuperSpeed mode */
491 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
492 u32 burst = dep->endpoint.maxburst;
493 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
494 }
495
496 if (ignore)
497 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
498
499 if (restore) {
500 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
501 params.param2 |= dep->saved_state;
502 }
503
504 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
505 | DWC3_DEPCFG_XFER_NOT_READY_EN;
506
507 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
508 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
509 | DWC3_DEPCFG_STREAM_EVENT_EN;
510 dep->stream_capable = true;
511 }
512
513 if (!usb_endpoint_xfer_control(desc))
514 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
515
516 /*
517 * We are doing 1:1 mapping for endpoints, meaning
518 * Physical Endpoints 2 maps to Logical Endpoint 2 and
519 * so on. We consider the direction bit as part of the physical
520 * endpoint number. So USB endpoint 0x81 is 0x03.
521 */
522 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
523
524 /*
525 * We must use the lower 16 TX FIFOs even though
526 * HW might have more
527 */
528 if (dep->direction)
529 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
530
531 if (desc->bInterval) {
532 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
533 dep->interval = 1 << (desc->bInterval - 1);
534 }
535
536 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
537 }
538
539 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
540 {
541 struct dwc3_gadget_ep_cmd_params params;
542
543 memset(&params, 0x00, sizeof(params));
544
545 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
546
547 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
548 &params);
549 }
550
551 /**
552 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
553 * @dep: endpoint to be initialized
554 * @desc: USB Endpoint Descriptor
555 *
556 * Caller should take care of locking
557 */
558 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
559 const struct usb_endpoint_descriptor *desc,
560 const struct usb_ss_ep_comp_descriptor *comp_desc,
561 bool ignore, bool restore)
562 {
563 struct dwc3 *dwc = dep->dwc;
564 u32 reg;
565 int ret;
566
567 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
568
569 if (!(dep->flags & DWC3_EP_ENABLED)) {
570 ret = dwc3_gadget_start_config(dwc, dep);
571 if (ret)
572 return ret;
573 }
574
575 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
576 restore);
577 if (ret)
578 return ret;
579
580 if (!(dep->flags & DWC3_EP_ENABLED)) {
581 struct dwc3_trb *trb_st_hw;
582 struct dwc3_trb *trb_link;
583
584 dep->endpoint.desc = desc;
585 dep->comp_desc = comp_desc;
586 dep->type = usb_endpoint_type(desc);
587 dep->flags |= DWC3_EP_ENABLED;
588
589 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
590 reg |= DWC3_DALEPENA_EP(dep->number);
591 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
592
593 if (usb_endpoint_xfer_control(desc))
594 goto out;
595
596 /* Link TRB. The HWO bit is never reset */
597 trb_st_hw = &dep->trb_pool[0];
598
599 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
600 memset(trb_link, 0, sizeof(*trb_link));
601
602 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
603 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
604 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
605 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
606 }
607
608 out:
609 switch (usb_endpoint_type(desc)) {
610 case USB_ENDPOINT_XFER_CONTROL:
611 /* don't change name */
612 break;
613 case USB_ENDPOINT_XFER_ISOC:
614 strlcat(dep->name, "-isoc", sizeof(dep->name));
615 break;
616 case USB_ENDPOINT_XFER_BULK:
617 strlcat(dep->name, "-bulk", sizeof(dep->name));
618 break;
619 case USB_ENDPOINT_XFER_INT:
620 strlcat(dep->name, "-int", sizeof(dep->name));
621 break;
622 default:
623 dev_err(dwc->dev, "invalid endpoint transfer type\n");
624 }
625
626 return 0;
627 }
628
629 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
630 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
631 {
632 struct dwc3_request *req;
633
634 if (!list_empty(&dep->started_list)) {
635 dwc3_stop_active_transfer(dwc, dep->number, true);
636
637 /* - giveback all requests to gadget driver */
638 while (!list_empty(&dep->started_list)) {
639 req = next_request(&dep->started_list);
640
641 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
642 }
643 }
644
645 while (!list_empty(&dep->pending_list)) {
646 req = next_request(&dep->pending_list);
647
648 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
649 }
650 }
651
652 /**
653 * __dwc3_gadget_ep_disable - Disables a HW endpoint
654 * @dep: the endpoint to disable
655 *
656 * This function also removes requests which are currently processed ny the
657 * hardware and those which are not yet scheduled.
658 * Caller should take care of locking.
659 */
660 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
661 {
662 struct dwc3 *dwc = dep->dwc;
663 u32 reg;
664
665 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
666
667 dwc3_remove_requests(dwc, dep);
668
669 /* make sure HW endpoint isn't stalled */
670 if (dep->flags & DWC3_EP_STALL)
671 __dwc3_gadget_ep_set_halt(dep, 0, false);
672
673 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
674 reg &= ~DWC3_DALEPENA_EP(dep->number);
675 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
676
677 dep->stream_capable = false;
678 dep->endpoint.desc = NULL;
679 dep->comp_desc = NULL;
680 dep->type = 0;
681 dep->flags = 0;
682
683 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
684 dep->number >> 1,
685 (dep->number & 1) ? "in" : "out");
686
687 return 0;
688 }
689
690 /* -------------------------------------------------------------------------- */
691
692 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
693 const struct usb_endpoint_descriptor *desc)
694 {
695 return -EINVAL;
696 }
697
698 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
699 {
700 return -EINVAL;
701 }
702
703 /* -------------------------------------------------------------------------- */
704
705 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
706 const struct usb_endpoint_descriptor *desc)
707 {
708 struct dwc3_ep *dep;
709 struct dwc3 *dwc;
710 unsigned long flags;
711 int ret;
712
713 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
714 pr_debug("dwc3: invalid parameters\n");
715 return -EINVAL;
716 }
717
718 if (!desc->wMaxPacketSize) {
719 pr_debug("dwc3: missing wMaxPacketSize\n");
720 return -EINVAL;
721 }
722
723 dep = to_dwc3_ep(ep);
724 dwc = dep->dwc;
725
726 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
727 "%s is already enabled\n",
728 dep->name))
729 return 0;
730
731 spin_lock_irqsave(&dwc->lock, flags);
732 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
733 spin_unlock_irqrestore(&dwc->lock, flags);
734
735 return ret;
736 }
737
738 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
739 {
740 struct dwc3_ep *dep;
741 struct dwc3 *dwc;
742 unsigned long flags;
743 int ret;
744
745 if (!ep) {
746 pr_debug("dwc3: invalid parameters\n");
747 return -EINVAL;
748 }
749
750 dep = to_dwc3_ep(ep);
751 dwc = dep->dwc;
752
753 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
754 "%s is already disabled\n",
755 dep->name))
756 return 0;
757
758 spin_lock_irqsave(&dwc->lock, flags);
759 ret = __dwc3_gadget_ep_disable(dep);
760 spin_unlock_irqrestore(&dwc->lock, flags);
761
762 return ret;
763 }
764
765 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
766 gfp_t gfp_flags)
767 {
768 struct dwc3_request *req;
769 struct dwc3_ep *dep = to_dwc3_ep(ep);
770
771 req = kzalloc(sizeof(*req), gfp_flags);
772 if (!req)
773 return NULL;
774
775 req->epnum = dep->number;
776 req->dep = dep;
777
778 trace_dwc3_alloc_request(req);
779
780 return &req->request;
781 }
782
783 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
784 struct usb_request *request)
785 {
786 struct dwc3_request *req = to_dwc3_request(request);
787
788 trace_dwc3_free_request(req);
789 kfree(req);
790 }
791
792 /**
793 * dwc3_prepare_one_trb - setup one TRB from one request
794 * @dep: endpoint for which this request is prepared
795 * @req: dwc3_request pointer
796 */
797 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
798 struct dwc3_request *req, dma_addr_t dma,
799 unsigned length, unsigned last, unsigned chain, unsigned node)
800 {
801 struct dwc3_trb *trb;
802
803 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
804 dep->name, req, (unsigned long long) dma,
805 length, last ? " last" : "",
806 chain ? " chain" : "");
807
808
809 trb = &dep->trb_pool[dep->trb_enqueue];
810
811 if (!req->trb) {
812 dwc3_gadget_move_started_request(req);
813 req->trb = trb;
814 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
815 req->first_trb_index = dep->trb_enqueue;
816 }
817
818 dwc3_ep_inc_enq(dep);
819 /* Skip the LINK-TRB */
820 if (dwc3_ep_is_last_trb(dep->trb_enqueue))
821 dwc3_ep_inc_enq(dep);
822
823 trb->size = DWC3_TRB_SIZE_LENGTH(length);
824 trb->bpl = lower_32_bits(dma);
825 trb->bph = upper_32_bits(dma);
826
827 switch (usb_endpoint_type(dep->endpoint.desc)) {
828 case USB_ENDPOINT_XFER_CONTROL:
829 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
830 break;
831
832 case USB_ENDPOINT_XFER_ISOC:
833 if (!node)
834 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
835 else
836 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
837
838 /* always enable Interrupt on Missed ISOC */
839 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
840 break;
841
842 case USB_ENDPOINT_XFER_BULK:
843 case USB_ENDPOINT_XFER_INT:
844 trb->ctrl = DWC3_TRBCTL_NORMAL;
845 break;
846 default:
847 /*
848 * This is only possible with faulty memory because we
849 * checked it already :)
850 */
851 BUG();
852 }
853
854 /* always enable Continue on Short Packet */
855 trb->ctrl |= DWC3_TRB_CTRL_CSP;
856
857 if (!req->request.no_interrupt && !chain)
858 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
859
860 if (last)
861 trb->ctrl |= DWC3_TRB_CTRL_LST;
862
863 if (chain)
864 trb->ctrl |= DWC3_TRB_CTRL_CHN;
865
866 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
867 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
868
869 trb->ctrl |= DWC3_TRB_CTRL_HWO;
870
871 trace_dwc3_prepare_trb(dep, trb);
872 }
873
874 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
875 {
876 struct dwc3_trb *tmp;
877
878 /*
879 * If enqueue & dequeue are equal than it is either full or empty.
880 *
881 * One way to know for sure is if the TRB right before us has HWO bit
882 * set or not. If it has, then we're definitely full and can't fit any
883 * more transfers in our ring.
884 */
885 if (dep->trb_enqueue == dep->trb_dequeue) {
886 /* If we're full, enqueue/dequeue are > 0 */
887 if (dep->trb_enqueue) {
888 tmp = &dep->trb_pool[dep->trb_enqueue - 1];
889 if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
890 return 0;
891 }
892
893 return DWC3_TRB_NUM - 1;
894 }
895
896 return dep->trb_dequeue - dep->trb_enqueue;
897 }
898
899 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
900 struct dwc3_request *req, unsigned int trbs_left)
901 {
902 struct usb_request *request = &req->request;
903 struct scatterlist *sg = request->sg;
904 struct scatterlist *s;
905 unsigned int last = false;
906 unsigned int length;
907 dma_addr_t dma;
908 int i;
909
910 for_each_sg(sg, s, request->num_mapped_sgs, i) {
911 unsigned chain = true;
912
913 length = sg_dma_len(s);
914 dma = sg_dma_address(s);
915
916 if (sg_is_last(s)) {
917 if (list_is_last(&req->list, &dep->pending_list))
918 last = true;
919
920 chain = false;
921 }
922
923 if (!trbs_left)
924 last = true;
925
926 if (last)
927 chain = false;
928
929 dwc3_prepare_one_trb(dep, req, dma, length,
930 last, chain, i);
931
932 if (last)
933 break;
934 }
935 }
936
937 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
938 struct dwc3_request *req, unsigned int trbs_left)
939 {
940 unsigned int last = false;
941 unsigned int length;
942 dma_addr_t dma;
943
944 dma = req->request.dma;
945 length = req->request.length;
946
947 if (!trbs_left)
948 last = true;
949
950 /* Is this the last request? */
951 if (list_is_last(&req->list, &dep->pending_list))
952 last = true;
953
954 dwc3_prepare_one_trb(dep, req, dma, length,
955 last, false, 0);
956 }
957
958 /*
959 * dwc3_prepare_trbs - setup TRBs from requests
960 * @dep: endpoint for which requests are being prepared
961 *
962 * The function goes through the requests list and sets up TRBs for the
963 * transfers. The function returns once there are no more TRBs available or
964 * it runs out of requests.
965 */
966 static void dwc3_prepare_trbs(struct dwc3_ep *dep)
967 {
968 struct dwc3_request *req, *n;
969 u32 trbs_left;
970
971 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
972
973 trbs_left = dwc3_calc_trbs_left(dep);
974
975 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
976 if (req->request.num_mapped_sgs > 0)
977 dwc3_prepare_one_trb_sg(dep, req, trbs_left--);
978 else
979 dwc3_prepare_one_trb_linear(dep, req, trbs_left--);
980
981 if (!trbs_left)
982 return;
983 }
984 }
985
986 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
987 {
988 struct dwc3_gadget_ep_cmd_params params;
989 struct dwc3_request *req;
990 struct dwc3 *dwc = dep->dwc;
991 int starting;
992 int ret;
993 u32 cmd;
994
995 starting = !(dep->flags & DWC3_EP_BUSY);
996
997 dwc3_prepare_trbs(dep);
998 req = next_request(&dep->started_list);
999 if (!req) {
1000 dep->flags |= DWC3_EP_PENDING_REQUEST;
1001 return 0;
1002 }
1003
1004 memset(&params, 0, sizeof(params));
1005
1006 if (starting) {
1007 params.param0 = upper_32_bits(req->trb_dma);
1008 params.param1 = lower_32_bits(req->trb_dma);
1009 cmd = DWC3_DEPCMD_STARTTRANSFER;
1010 } else {
1011 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1012 }
1013
1014 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
1015 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1016 if (ret < 0) {
1017 /*
1018 * FIXME we need to iterate over the list of requests
1019 * here and stop, unmap, free and del each of the linked
1020 * requests instead of what we do now.
1021 */
1022 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1023 req->direction);
1024 list_del(&req->list);
1025 return ret;
1026 }
1027
1028 dep->flags |= DWC3_EP_BUSY;
1029
1030 if (starting) {
1031 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
1032 WARN_ON_ONCE(!dep->resource_index);
1033 }
1034
1035 return 0;
1036 }
1037
1038 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1039 struct dwc3_ep *dep, u32 cur_uf)
1040 {
1041 u32 uf;
1042
1043 if (list_empty(&dep->pending_list)) {
1044 dwc3_trace(trace_dwc3_gadget,
1045 "ISOC ep %s run out for requests",
1046 dep->name);
1047 dep->flags |= DWC3_EP_PENDING_REQUEST;
1048 return;
1049 }
1050
1051 /* 4 micro frames in the future */
1052 uf = cur_uf + dep->interval * 4;
1053
1054 __dwc3_gadget_kick_transfer(dep, uf);
1055 }
1056
1057 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1058 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1059 {
1060 u32 cur_uf, mask;
1061
1062 mask = ~(dep->interval - 1);
1063 cur_uf = event->parameters & mask;
1064
1065 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1066 }
1067
1068 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1069 {
1070 struct dwc3 *dwc = dep->dwc;
1071 int ret;
1072
1073 if (!dep->endpoint.desc) {
1074 dwc3_trace(trace_dwc3_gadget,
1075 "trying to queue request %p to disabled %s\n",
1076 &req->request, dep->endpoint.name);
1077 return -ESHUTDOWN;
1078 }
1079
1080 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1081 &req->request, req->dep->name)) {
1082 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'\n",
1083 &req->request, req->dep->name);
1084 return -EINVAL;
1085 }
1086
1087 pm_runtime_get(dwc->dev);
1088
1089 req->request.actual = 0;
1090 req->request.status = -EINPROGRESS;
1091 req->direction = dep->direction;
1092 req->epnum = dep->number;
1093
1094 trace_dwc3_ep_queue(req);
1095
1096 /*
1097 * We only add to our list of requests now and
1098 * start consuming the list once we get XferNotReady
1099 * IRQ.
1100 *
1101 * That way, we avoid doing anything that we don't need
1102 * to do now and defer it until the point we receive a
1103 * particular token from the Host side.
1104 *
1105 * This will also avoid Host cancelling URBs due to too
1106 * many NAKs.
1107 */
1108 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1109 dep->direction);
1110 if (ret)
1111 return ret;
1112
1113 list_add_tail(&req->list, &dep->pending_list);
1114
1115 /*
1116 * If there are no pending requests and the endpoint isn't already
1117 * busy, we will just start the request straight away.
1118 *
1119 * This will save one IRQ (XFER_NOT_READY) and possibly make it a
1120 * little bit faster.
1121 */
1122 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1123 !usb_endpoint_xfer_int(dep->endpoint.desc) &&
1124 !(dep->flags & DWC3_EP_BUSY)) {
1125 ret = __dwc3_gadget_kick_transfer(dep, 0);
1126 goto out;
1127 }
1128
1129 /*
1130 * There are a few special cases:
1131 *
1132 * 1. XferNotReady with empty list of requests. We need to kick the
1133 * transfer here in that situation, otherwise we will be NAKing
1134 * forever. If we get XferNotReady before gadget driver has a
1135 * chance to queue a request, we will ACK the IRQ but won't be
1136 * able to receive the data until the next request is queued.
1137 * The following code is handling exactly that.
1138 *
1139 */
1140 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1141 /*
1142 * If xfernotready is already elapsed and it is a case
1143 * of isoc transfer, then issue END TRANSFER, so that
1144 * you can receive xfernotready again and can have
1145 * notion of current microframe.
1146 */
1147 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1148 if (list_empty(&dep->started_list)) {
1149 dwc3_stop_active_transfer(dwc, dep->number, true);
1150 dep->flags = DWC3_EP_ENABLED;
1151 }
1152 return 0;
1153 }
1154
1155 ret = __dwc3_gadget_kick_transfer(dep, 0);
1156 if (!ret)
1157 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1158
1159 goto out;
1160 }
1161
1162 /*
1163 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1164 * kick the transfer here after queuing a request, otherwise the
1165 * core may not see the modified TRB(s).
1166 */
1167 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1168 (dep->flags & DWC3_EP_BUSY) &&
1169 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1170 WARN_ON_ONCE(!dep->resource_index);
1171 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
1172 goto out;
1173 }
1174
1175 /*
1176 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1177 * right away, otherwise host will not know we have streams to be
1178 * handled.
1179 */
1180 if (dep->stream_capable)
1181 ret = __dwc3_gadget_kick_transfer(dep, 0);
1182
1183 out:
1184 if (ret && ret != -EBUSY)
1185 dwc3_trace(trace_dwc3_gadget,
1186 "%s: failed to kick transfers\n",
1187 dep->name);
1188 if (ret == -EBUSY)
1189 ret = 0;
1190
1191 return ret;
1192 }
1193
1194 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1195 struct usb_request *request)
1196 {
1197 dwc3_gadget_ep_free_request(ep, request);
1198 }
1199
1200 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1201 {
1202 struct dwc3_request *req;
1203 struct usb_request *request;
1204 struct usb_ep *ep = &dep->endpoint;
1205
1206 dwc3_trace(trace_dwc3_gadget, "queueing ZLP\n");
1207 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1208 if (!request)
1209 return -ENOMEM;
1210
1211 request->length = 0;
1212 request->buf = dwc->zlp_buf;
1213 request->complete = __dwc3_gadget_ep_zlp_complete;
1214
1215 req = to_dwc3_request(request);
1216
1217 return __dwc3_gadget_ep_queue(dep, req);
1218 }
1219
1220 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1221 gfp_t gfp_flags)
1222 {
1223 struct dwc3_request *req = to_dwc3_request(request);
1224 struct dwc3_ep *dep = to_dwc3_ep(ep);
1225 struct dwc3 *dwc = dep->dwc;
1226
1227 unsigned long flags;
1228
1229 int ret;
1230
1231 spin_lock_irqsave(&dwc->lock, flags);
1232 ret = __dwc3_gadget_ep_queue(dep, req);
1233
1234 /*
1235 * Okay, here's the thing, if gadget driver has requested for a ZLP by
1236 * setting request->zero, instead of doing magic, we will just queue an
1237 * extra usb_request ourselves so that it gets handled the same way as
1238 * any other request.
1239 */
1240 if (ret == 0 && request->zero && request->length &&
1241 (request->length % ep->maxpacket == 0))
1242 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1243
1244 spin_unlock_irqrestore(&dwc->lock, flags);
1245
1246 return ret;
1247 }
1248
1249 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1250 struct usb_request *request)
1251 {
1252 struct dwc3_request *req = to_dwc3_request(request);
1253 struct dwc3_request *r = NULL;
1254
1255 struct dwc3_ep *dep = to_dwc3_ep(ep);
1256 struct dwc3 *dwc = dep->dwc;
1257
1258 unsigned long flags;
1259 int ret = 0;
1260
1261 trace_dwc3_ep_dequeue(req);
1262
1263 spin_lock_irqsave(&dwc->lock, flags);
1264
1265 list_for_each_entry(r, &dep->pending_list, list) {
1266 if (r == req)
1267 break;
1268 }
1269
1270 if (r != req) {
1271 list_for_each_entry(r, &dep->started_list, list) {
1272 if (r == req)
1273 break;
1274 }
1275 if (r == req) {
1276 /* wait until it is processed */
1277 dwc3_stop_active_transfer(dwc, dep->number, true);
1278 goto out1;
1279 }
1280 dev_err(dwc->dev, "request %p was not queued to %s\n",
1281 request, ep->name);
1282 ret = -EINVAL;
1283 goto out0;
1284 }
1285
1286 out1:
1287 /* giveback the request */
1288 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1289
1290 out0:
1291 spin_unlock_irqrestore(&dwc->lock, flags);
1292
1293 return ret;
1294 }
1295
1296 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1297 {
1298 struct dwc3_gadget_ep_cmd_params params;
1299 struct dwc3 *dwc = dep->dwc;
1300 int ret;
1301
1302 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1303 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1304 return -EINVAL;
1305 }
1306
1307 memset(&params, 0x00, sizeof(params));
1308
1309 if (value) {
1310 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1311 (!list_empty(&dep->started_list) ||
1312 !list_empty(&dep->pending_list)))) {
1313 dwc3_trace(trace_dwc3_gadget,
1314 "%s: pending request, cannot halt",
1315 dep->name);
1316 return -EAGAIN;
1317 }
1318
1319 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1320 &params);
1321 if (ret)
1322 dev_err(dwc->dev, "failed to set STALL on %s\n",
1323 dep->name);
1324 else
1325 dep->flags |= DWC3_EP_STALL;
1326 } else {
1327
1328 ret = dwc3_send_clear_stall_ep_cmd(dep);
1329 if (ret)
1330 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1331 dep->name);
1332 else
1333 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1334 }
1335
1336 return ret;
1337 }
1338
1339 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1340 {
1341 struct dwc3_ep *dep = to_dwc3_ep(ep);
1342 struct dwc3 *dwc = dep->dwc;
1343
1344 unsigned long flags;
1345
1346 int ret;
1347
1348 spin_lock_irqsave(&dwc->lock, flags);
1349 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1350 spin_unlock_irqrestore(&dwc->lock, flags);
1351
1352 return ret;
1353 }
1354
1355 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1356 {
1357 struct dwc3_ep *dep = to_dwc3_ep(ep);
1358 struct dwc3 *dwc = dep->dwc;
1359 unsigned long flags;
1360 int ret;
1361
1362 spin_lock_irqsave(&dwc->lock, flags);
1363 dep->flags |= DWC3_EP_WEDGE;
1364
1365 if (dep->number == 0 || dep->number == 1)
1366 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1367 else
1368 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1369 spin_unlock_irqrestore(&dwc->lock, flags);
1370
1371 return ret;
1372 }
1373
1374 /* -------------------------------------------------------------------------- */
1375
1376 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1377 .bLength = USB_DT_ENDPOINT_SIZE,
1378 .bDescriptorType = USB_DT_ENDPOINT,
1379 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1380 };
1381
1382 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1383 .enable = dwc3_gadget_ep0_enable,
1384 .disable = dwc3_gadget_ep0_disable,
1385 .alloc_request = dwc3_gadget_ep_alloc_request,
1386 .free_request = dwc3_gadget_ep_free_request,
1387 .queue = dwc3_gadget_ep0_queue,
1388 .dequeue = dwc3_gadget_ep_dequeue,
1389 .set_halt = dwc3_gadget_ep0_set_halt,
1390 .set_wedge = dwc3_gadget_ep_set_wedge,
1391 };
1392
1393 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1394 .enable = dwc3_gadget_ep_enable,
1395 .disable = dwc3_gadget_ep_disable,
1396 .alloc_request = dwc3_gadget_ep_alloc_request,
1397 .free_request = dwc3_gadget_ep_free_request,
1398 .queue = dwc3_gadget_ep_queue,
1399 .dequeue = dwc3_gadget_ep_dequeue,
1400 .set_halt = dwc3_gadget_ep_set_halt,
1401 .set_wedge = dwc3_gadget_ep_set_wedge,
1402 };
1403
1404 /* -------------------------------------------------------------------------- */
1405
1406 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1407 {
1408 struct dwc3 *dwc = gadget_to_dwc(g);
1409 u32 reg;
1410
1411 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1412 return DWC3_DSTS_SOFFN(reg);
1413 }
1414
1415 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1416 {
1417 unsigned long timeout;
1418
1419 int ret;
1420 u32 reg;
1421
1422 u8 link_state;
1423 u8 speed;
1424
1425 /*
1426 * According to the Databook Remote wakeup request should
1427 * be issued only when the device is in early suspend state.
1428 *
1429 * We can check that via USB Link State bits in DSTS register.
1430 */
1431 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1432
1433 speed = reg & DWC3_DSTS_CONNECTSPD;
1434 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1435 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
1436 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n");
1437 return 0;
1438 }
1439
1440 link_state = DWC3_DSTS_USBLNKST(reg);
1441
1442 switch (link_state) {
1443 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1444 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1445 break;
1446 default:
1447 dwc3_trace(trace_dwc3_gadget,
1448 "can't wakeup from '%s'\n",
1449 dwc3_gadget_link_string(link_state));
1450 return -EINVAL;
1451 }
1452
1453 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1454 if (ret < 0) {
1455 dev_err(dwc->dev, "failed to put link in Recovery\n");
1456 return ret;
1457 }
1458
1459 /* Recent versions do this automatically */
1460 if (dwc->revision < DWC3_REVISION_194A) {
1461 /* write zeroes to Link Change Request */
1462 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1463 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1464 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1465 }
1466
1467 /* poll until Link State changes to ON */
1468 timeout = jiffies + msecs_to_jiffies(100);
1469
1470 while (!time_after(jiffies, timeout)) {
1471 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1472
1473 /* in HS, means ON */
1474 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1475 break;
1476 }
1477
1478 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1479 dev_err(dwc->dev, "failed to send remote wakeup\n");
1480 return -EINVAL;
1481 }
1482
1483 return 0;
1484 }
1485
1486 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1487 {
1488 struct dwc3 *dwc = gadget_to_dwc(g);
1489 unsigned long flags;
1490 int ret;
1491
1492 spin_lock_irqsave(&dwc->lock, flags);
1493 ret = __dwc3_gadget_wakeup(dwc);
1494 spin_unlock_irqrestore(&dwc->lock, flags);
1495
1496 return ret;
1497 }
1498
1499 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1500 int is_selfpowered)
1501 {
1502 struct dwc3 *dwc = gadget_to_dwc(g);
1503 unsigned long flags;
1504
1505 spin_lock_irqsave(&dwc->lock, flags);
1506 g->is_selfpowered = !!is_selfpowered;
1507 spin_unlock_irqrestore(&dwc->lock, flags);
1508
1509 return 0;
1510 }
1511
1512 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1513 {
1514 u32 reg;
1515 u32 timeout = 500;
1516
1517 if (pm_runtime_suspended(dwc->dev))
1518 return 0;
1519
1520 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1521 if (is_on) {
1522 if (dwc->revision <= DWC3_REVISION_187A) {
1523 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1524 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1525 }
1526
1527 if (dwc->revision >= DWC3_REVISION_194A)
1528 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1529 reg |= DWC3_DCTL_RUN_STOP;
1530
1531 if (dwc->has_hibernation)
1532 reg |= DWC3_DCTL_KEEP_CONNECT;
1533
1534 dwc->pullups_connected = true;
1535 } else {
1536 reg &= ~DWC3_DCTL_RUN_STOP;
1537
1538 if (dwc->has_hibernation && !suspend)
1539 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1540
1541 dwc->pullups_connected = false;
1542 }
1543
1544 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1545
1546 do {
1547 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1548 if (is_on) {
1549 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1550 break;
1551 } else {
1552 if (reg & DWC3_DSTS_DEVCTRLHLT)
1553 break;
1554 }
1555 timeout--;
1556 if (!timeout)
1557 return -ETIMEDOUT;
1558 udelay(1);
1559 } while (1);
1560
1561 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1562 dwc->gadget_driver
1563 ? dwc->gadget_driver->function : "no-function",
1564 is_on ? "connect" : "disconnect");
1565
1566 return 0;
1567 }
1568
1569 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1570 {
1571 struct dwc3 *dwc = gadget_to_dwc(g);
1572 unsigned long flags;
1573 int ret;
1574
1575 is_on = !!is_on;
1576
1577 spin_lock_irqsave(&dwc->lock, flags);
1578 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1579 spin_unlock_irqrestore(&dwc->lock, flags);
1580
1581 return ret;
1582 }
1583
1584 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1585 {
1586 u32 reg;
1587
1588 /* Enable all but Start and End of Frame IRQs */
1589 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1590 DWC3_DEVTEN_EVNTOVERFLOWEN |
1591 DWC3_DEVTEN_CMDCMPLTEN |
1592 DWC3_DEVTEN_ERRTICERREN |
1593 DWC3_DEVTEN_WKUPEVTEN |
1594 DWC3_DEVTEN_ULSTCNGEN |
1595 DWC3_DEVTEN_CONNECTDONEEN |
1596 DWC3_DEVTEN_USBRSTEN |
1597 DWC3_DEVTEN_DISCONNEVTEN);
1598
1599 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1600 }
1601
1602 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1603 {
1604 /* mask all interrupts */
1605 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1606 }
1607
1608 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1609 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1610
1611 /**
1612 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
1613 * dwc: pointer to our context structure
1614 *
1615 * The following looks like complex but it's actually very simple. In order to
1616 * calculate the number of packets we can burst at once on OUT transfers, we're
1617 * gonna use RxFIFO size.
1618 *
1619 * To calculate RxFIFO size we need two numbers:
1620 * MDWIDTH = size, in bits, of the internal memory bus
1621 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1622 *
1623 * Given these two numbers, the formula is simple:
1624 *
1625 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1626 *
1627 * 24 bytes is for 3x SETUP packets
1628 * 16 bytes is a clock domain crossing tolerance
1629 *
1630 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1631 */
1632 static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
1633 {
1634 u32 ram2_depth;
1635 u32 mdwidth;
1636 u32 nump;
1637 u32 reg;
1638
1639 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
1640 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
1641
1642 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
1643 nump = min_t(u32, nump, 16);
1644
1645 /* update NumP */
1646 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1647 reg &= ~DWC3_DCFG_NUMP_MASK;
1648 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
1649 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1650 }
1651
1652 static int __dwc3_gadget_start(struct dwc3 *dwc)
1653 {
1654 struct dwc3_ep *dep;
1655 int ret = 0;
1656 u32 reg;
1657
1658 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1659 reg &= ~(DWC3_DCFG_SPEED_MASK);
1660
1661 /**
1662 * WORKAROUND: DWC3 revision < 2.20a have an issue
1663 * which would cause metastability state on Run/Stop
1664 * bit if we try to force the IP to USB2-only mode.
1665 *
1666 * Because of that, we cannot configure the IP to any
1667 * speed other than the SuperSpeed
1668 *
1669 * Refers to:
1670 *
1671 * STAR#9000525659: Clock Domain Crossing on DCTL in
1672 * USB 2.0 Mode
1673 */
1674 if (dwc->revision < DWC3_REVISION_220A) {
1675 reg |= DWC3_DCFG_SUPERSPEED;
1676 } else {
1677 switch (dwc->maximum_speed) {
1678 case USB_SPEED_LOW:
1679 reg |= DWC3_DSTS_LOWSPEED;
1680 break;
1681 case USB_SPEED_FULL:
1682 reg |= DWC3_DSTS_FULLSPEED1;
1683 break;
1684 case USB_SPEED_HIGH:
1685 reg |= DWC3_DSTS_HIGHSPEED;
1686 break;
1687 case USB_SPEED_SUPER_PLUS:
1688 reg |= DWC3_DSTS_SUPERSPEED_PLUS;
1689 break;
1690 default:
1691 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1692 dwc->maximum_speed);
1693 /* fall through */
1694 case USB_SPEED_SUPER:
1695 reg |= DWC3_DCFG_SUPERSPEED;
1696 break;
1697 }
1698 }
1699 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1700
1701 /*
1702 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1703 * field instead of letting dwc3 itself calculate that automatically.
1704 *
1705 * This way, we maximize the chances that we'll be able to get several
1706 * bursts of data without going through any sort of endpoint throttling.
1707 */
1708 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1709 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1710 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1711
1712 dwc3_gadget_setup_nump(dwc);
1713
1714 /* Start with SuperSpeed Default */
1715 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1716
1717 dep = dwc->eps[0];
1718 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1719 false);
1720 if (ret) {
1721 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1722 goto err0;
1723 }
1724
1725 dep = dwc->eps[1];
1726 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1727 false);
1728 if (ret) {
1729 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1730 goto err1;
1731 }
1732
1733 /* begin to receive SETUP packets */
1734 dwc->ep0state = EP0_SETUP_PHASE;
1735 dwc3_ep0_out_start(dwc);
1736
1737 dwc3_gadget_enable_irq(dwc);
1738
1739 return 0;
1740
1741 err1:
1742 __dwc3_gadget_ep_disable(dwc->eps[0]);
1743
1744 err0:
1745 return ret;
1746 }
1747
1748 static int dwc3_gadget_start(struct usb_gadget *g,
1749 struct usb_gadget_driver *driver)
1750 {
1751 struct dwc3 *dwc = gadget_to_dwc(g);
1752 unsigned long flags;
1753 int ret = 0;
1754 int irq;
1755
1756 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1757 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1758 IRQF_SHARED, "dwc3", dwc->ev_buf);
1759 if (ret) {
1760 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1761 irq, ret);
1762 goto err0;
1763 }
1764 dwc->irq_gadget = irq;
1765
1766 spin_lock_irqsave(&dwc->lock, flags);
1767 if (dwc->gadget_driver) {
1768 dev_err(dwc->dev, "%s is already bound to %s\n",
1769 dwc->gadget.name,
1770 dwc->gadget_driver->driver.name);
1771 ret = -EBUSY;
1772 goto err1;
1773 }
1774
1775 dwc->gadget_driver = driver;
1776
1777 if (pm_runtime_active(dwc->dev))
1778 __dwc3_gadget_start(dwc);
1779
1780 spin_unlock_irqrestore(&dwc->lock, flags);
1781
1782 return 0;
1783
1784 err1:
1785 spin_unlock_irqrestore(&dwc->lock, flags);
1786 free_irq(irq, dwc);
1787
1788 err0:
1789 return ret;
1790 }
1791
1792 static void __dwc3_gadget_stop(struct dwc3 *dwc)
1793 {
1794 dwc3_gadget_disable_irq(dwc);
1795 __dwc3_gadget_ep_disable(dwc->eps[0]);
1796 __dwc3_gadget_ep_disable(dwc->eps[1]);
1797 }
1798
1799 static int dwc3_gadget_stop(struct usb_gadget *g)
1800 {
1801 struct dwc3 *dwc = gadget_to_dwc(g);
1802 unsigned long flags;
1803
1804 spin_lock_irqsave(&dwc->lock, flags);
1805 __dwc3_gadget_stop(dwc);
1806 dwc->gadget_driver = NULL;
1807 spin_unlock_irqrestore(&dwc->lock, flags);
1808
1809 free_irq(dwc->irq_gadget, dwc->ev_buf);
1810
1811 return 0;
1812 }
1813
1814 static const struct usb_gadget_ops dwc3_gadget_ops = {
1815 .get_frame = dwc3_gadget_get_frame,
1816 .wakeup = dwc3_gadget_wakeup,
1817 .set_selfpowered = dwc3_gadget_set_selfpowered,
1818 .pullup = dwc3_gadget_pullup,
1819 .udc_start = dwc3_gadget_start,
1820 .udc_stop = dwc3_gadget_stop,
1821 };
1822
1823 /* -------------------------------------------------------------------------- */
1824
1825 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1826 u8 num, u32 direction)
1827 {
1828 struct dwc3_ep *dep;
1829 u8 i;
1830
1831 for (i = 0; i < num; i++) {
1832 u8 epnum = (i << 1) | (!!direction);
1833
1834 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1835 if (!dep)
1836 return -ENOMEM;
1837
1838 dep->dwc = dwc;
1839 dep->number = epnum;
1840 dep->direction = !!direction;
1841 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
1842 dwc->eps[epnum] = dep;
1843
1844 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1845 (epnum & 1) ? "in" : "out");
1846
1847 dep->endpoint.name = dep->name;
1848 spin_lock_init(&dep->lock);
1849
1850 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1851
1852 if (epnum == 0 || epnum == 1) {
1853 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1854 dep->endpoint.maxburst = 1;
1855 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1856 if (!epnum)
1857 dwc->gadget.ep0 = &dep->endpoint;
1858 } else {
1859 int ret;
1860
1861 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1862 dep->endpoint.max_streams = 15;
1863 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1864 list_add_tail(&dep->endpoint.ep_list,
1865 &dwc->gadget.ep_list);
1866
1867 ret = dwc3_alloc_trb_pool(dep);
1868 if (ret)
1869 return ret;
1870 }
1871
1872 if (epnum == 0 || epnum == 1) {
1873 dep->endpoint.caps.type_control = true;
1874 } else {
1875 dep->endpoint.caps.type_iso = true;
1876 dep->endpoint.caps.type_bulk = true;
1877 dep->endpoint.caps.type_int = true;
1878 }
1879
1880 dep->endpoint.caps.dir_in = !!direction;
1881 dep->endpoint.caps.dir_out = !direction;
1882
1883 INIT_LIST_HEAD(&dep->pending_list);
1884 INIT_LIST_HEAD(&dep->started_list);
1885 }
1886
1887 return 0;
1888 }
1889
1890 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1891 {
1892 int ret;
1893
1894 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1895
1896 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1897 if (ret < 0) {
1898 dwc3_trace(trace_dwc3_gadget,
1899 "failed to allocate OUT endpoints");
1900 return ret;
1901 }
1902
1903 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1904 if (ret < 0) {
1905 dwc3_trace(trace_dwc3_gadget,
1906 "failed to allocate IN endpoints");
1907 return ret;
1908 }
1909
1910 return 0;
1911 }
1912
1913 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1914 {
1915 struct dwc3_ep *dep;
1916 u8 epnum;
1917
1918 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1919 dep = dwc->eps[epnum];
1920 if (!dep)
1921 continue;
1922 /*
1923 * Physical endpoints 0 and 1 are special; they form the
1924 * bi-directional USB endpoint 0.
1925 *
1926 * For those two physical endpoints, we don't allocate a TRB
1927 * pool nor do we add them the endpoints list. Due to that, we
1928 * shouldn't do these two operations otherwise we would end up
1929 * with all sorts of bugs when removing dwc3.ko.
1930 */
1931 if (epnum != 0 && epnum != 1) {
1932 dwc3_free_trb_pool(dep);
1933 list_del(&dep->endpoint.ep_list);
1934 }
1935
1936 kfree(dep);
1937 }
1938 }
1939
1940 /* -------------------------------------------------------------------------- */
1941
1942 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1943 struct dwc3_request *req, struct dwc3_trb *trb,
1944 const struct dwc3_event_depevt *event, int status)
1945 {
1946 unsigned int count;
1947 unsigned int s_pkt = 0;
1948 unsigned int trb_status;
1949
1950 trace_dwc3_complete_trb(dep, trb);
1951
1952 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1953 /*
1954 * We continue despite the error. There is not much we
1955 * can do. If we don't clean it up we loop forever. If
1956 * we skip the TRB then it gets overwritten after a
1957 * while since we use them in a ring buffer. A BUG()
1958 * would help. Lets hope that if this occurs, someone
1959 * fixes the root cause instead of looking away :)
1960 */
1961 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1962 dep->name, trb);
1963 count = trb->size & DWC3_TRB_SIZE_MASK;
1964
1965 if (dep->direction) {
1966 if (count) {
1967 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1968 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1969 dwc3_trace(trace_dwc3_gadget,
1970 "%s: incomplete IN transfer\n",
1971 dep->name);
1972 /*
1973 * If missed isoc occurred and there is
1974 * no request queued then issue END
1975 * TRANSFER, so that core generates
1976 * next xfernotready and we will issue
1977 * a fresh START TRANSFER.
1978 * If there are still queued request
1979 * then wait, do not issue either END
1980 * or UPDATE TRANSFER, just attach next
1981 * request in pending_list during
1982 * giveback.If any future queued request
1983 * is successfully transferred then we
1984 * will issue UPDATE TRANSFER for all
1985 * request in the pending_list.
1986 */
1987 dep->flags |= DWC3_EP_MISSED_ISOC;
1988 } else {
1989 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1990 dep->name);
1991 status = -ECONNRESET;
1992 }
1993 } else {
1994 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1995 }
1996 } else {
1997 if (count && (event->status & DEPEVT_STATUS_SHORT))
1998 s_pkt = 1;
1999 }
2000
2001 /*
2002 * We assume here we will always receive the entire data block
2003 * which we should receive. Meaning, if we program RX to
2004 * receive 4K but we receive only 2K, we assume that's all we
2005 * should receive and we simply bounce the request back to the
2006 * gadget driver for further processing.
2007 */
2008 req->request.actual += req->request.length - count;
2009 if (s_pkt)
2010 return 1;
2011 if ((event->status & DEPEVT_STATUS_LST) &&
2012 (trb->ctrl & (DWC3_TRB_CTRL_LST |
2013 DWC3_TRB_CTRL_HWO)))
2014 return 1;
2015 if ((event->status & DEPEVT_STATUS_IOC) &&
2016 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2017 return 1;
2018 return 0;
2019 }
2020
2021 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2022 const struct dwc3_event_depevt *event, int status)
2023 {
2024 struct dwc3_request *req;
2025 struct dwc3_trb *trb;
2026 unsigned int slot;
2027 unsigned int i;
2028 int ret;
2029
2030 do {
2031 req = next_request(&dep->started_list);
2032 if (WARN_ON_ONCE(!req))
2033 return 1;
2034
2035 i = 0;
2036 do {
2037 slot = req->first_trb_index + i;
2038 if (slot == DWC3_TRB_NUM - 1)
2039 slot++;
2040 slot %= DWC3_TRB_NUM;
2041 trb = &dep->trb_pool[slot];
2042
2043 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2044 event, status);
2045 if (ret)
2046 break;
2047 } while (++i < req->request.num_mapped_sgs);
2048
2049 dwc3_gadget_giveback(dep, req, status);
2050
2051 if (ret)
2052 break;
2053 } while (1);
2054
2055 /*
2056 * Our endpoint might get disabled by another thread during
2057 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2058 * early on so DWC3_EP_BUSY flag gets cleared
2059 */
2060 if (!dep->endpoint.desc)
2061 return 1;
2062
2063 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2064 list_empty(&dep->started_list)) {
2065 if (list_empty(&dep->pending_list)) {
2066 /*
2067 * If there is no entry in request list then do
2068 * not issue END TRANSFER now. Just set PENDING
2069 * flag, so that END TRANSFER is issued when an
2070 * entry is added into request list.
2071 */
2072 dep->flags = DWC3_EP_PENDING_REQUEST;
2073 } else {
2074 dwc3_stop_active_transfer(dwc, dep->number, true);
2075 dep->flags = DWC3_EP_ENABLED;
2076 }
2077 return 1;
2078 }
2079
2080 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
2081 if ((event->status & DEPEVT_STATUS_IOC) &&
2082 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2083 return 0;
2084 return 1;
2085 }
2086
2087 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
2088 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
2089 {
2090 unsigned status = 0;
2091 int clean_busy;
2092 u32 is_xfer_complete;
2093
2094 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
2095
2096 if (event->status & DEPEVT_STATUS_BUSERR)
2097 status = -ECONNRESET;
2098
2099 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
2100 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
2101 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
2102 dep->flags &= ~DWC3_EP_BUSY;
2103
2104 /*
2105 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2106 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2107 */
2108 if (dwc->revision < DWC3_REVISION_183A) {
2109 u32 reg;
2110 int i;
2111
2112 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
2113 dep = dwc->eps[i];
2114
2115 if (!(dep->flags & DWC3_EP_ENABLED))
2116 continue;
2117
2118 if (!list_empty(&dep->started_list))
2119 return;
2120 }
2121
2122 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2123 reg |= dwc->u1u2;
2124 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2125
2126 dwc->u1u2 = 0;
2127 }
2128
2129 /*
2130 * Our endpoint might get disabled by another thread during
2131 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2132 * early on so DWC3_EP_BUSY flag gets cleared
2133 */
2134 if (!dep->endpoint.desc)
2135 return;
2136
2137 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2138 int ret;
2139
2140 ret = __dwc3_gadget_kick_transfer(dep, 0);
2141 if (!ret || ret == -EBUSY)
2142 return;
2143 }
2144 }
2145
2146 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2147 const struct dwc3_event_depevt *event)
2148 {
2149 struct dwc3_ep *dep;
2150 u8 epnum = event->endpoint_number;
2151
2152 dep = dwc->eps[epnum];
2153
2154 if (!(dep->flags & DWC3_EP_ENABLED))
2155 return;
2156
2157 if (epnum == 0 || epnum == 1) {
2158 dwc3_ep0_interrupt(dwc, event);
2159 return;
2160 }
2161
2162 switch (event->endpoint_event) {
2163 case DWC3_DEPEVT_XFERCOMPLETE:
2164 dep->resource_index = 0;
2165
2166 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2167 dwc3_trace(trace_dwc3_gadget,
2168 "%s is an Isochronous endpoint\n",
2169 dep->name);
2170 return;
2171 }
2172
2173 dwc3_endpoint_transfer_complete(dwc, dep, event);
2174 break;
2175 case DWC3_DEPEVT_XFERINPROGRESS:
2176 dwc3_endpoint_transfer_complete(dwc, dep, event);
2177 break;
2178 case DWC3_DEPEVT_XFERNOTREADY:
2179 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2180 dwc3_gadget_start_isoc(dwc, dep, event);
2181 } else {
2182 int active;
2183 int ret;
2184
2185 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
2186
2187 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
2188 dep->name, active ? "Transfer Active"
2189 : "Transfer Not Active");
2190
2191 ret = __dwc3_gadget_kick_transfer(dep, 0);
2192 if (!ret || ret == -EBUSY)
2193 return;
2194
2195 dwc3_trace(trace_dwc3_gadget,
2196 "%s: failed to kick transfers\n",
2197 dep->name);
2198 }
2199
2200 break;
2201 case DWC3_DEPEVT_STREAMEVT:
2202 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2203 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2204 dep->name);
2205 return;
2206 }
2207
2208 switch (event->status) {
2209 case DEPEVT_STREAMEVT_FOUND:
2210 dwc3_trace(trace_dwc3_gadget,
2211 "Stream %d found and started",
2212 event->parameters);
2213
2214 break;
2215 case DEPEVT_STREAMEVT_NOTFOUND:
2216 /* FALLTHROUGH */
2217 default:
2218 dwc3_trace(trace_dwc3_gadget,
2219 "unable to find suitable stream\n");
2220 }
2221 break;
2222 case DWC3_DEPEVT_RXTXFIFOEVT:
2223 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun\n", dep->name);
2224 break;
2225 case DWC3_DEPEVT_EPCMDCMPLT:
2226 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2227 break;
2228 }
2229 }
2230
2231 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2232 {
2233 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2234 spin_unlock(&dwc->lock);
2235 dwc->gadget_driver->disconnect(&dwc->gadget);
2236 spin_lock(&dwc->lock);
2237 }
2238 }
2239
2240 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2241 {
2242 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2243 spin_unlock(&dwc->lock);
2244 dwc->gadget_driver->suspend(&dwc->gadget);
2245 spin_lock(&dwc->lock);
2246 }
2247 }
2248
2249 static void dwc3_resume_gadget(struct dwc3 *dwc)
2250 {
2251 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2252 spin_unlock(&dwc->lock);
2253 dwc->gadget_driver->resume(&dwc->gadget);
2254 spin_lock(&dwc->lock);
2255 }
2256 }
2257
2258 static void dwc3_reset_gadget(struct dwc3 *dwc)
2259 {
2260 if (!dwc->gadget_driver)
2261 return;
2262
2263 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2264 spin_unlock(&dwc->lock);
2265 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2266 spin_lock(&dwc->lock);
2267 }
2268 }
2269
2270 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2271 {
2272 struct dwc3_ep *dep;
2273 struct dwc3_gadget_ep_cmd_params params;
2274 u32 cmd;
2275 int ret;
2276
2277 dep = dwc->eps[epnum];
2278
2279 if (!dep->resource_index)
2280 return;
2281
2282 /*
2283 * NOTICE: We are violating what the Databook says about the
2284 * EndTransfer command. Ideally we would _always_ wait for the
2285 * EndTransfer Command Completion IRQ, but that's causing too
2286 * much trouble synchronizing between us and gadget driver.
2287 *
2288 * We have discussed this with the IP Provider and it was
2289 * suggested to giveback all requests here, but give HW some
2290 * extra time to synchronize with the interconnect. We're using
2291 * an arbitrary 100us delay for that.
2292 *
2293 * Note also that a similar handling was tested by Synopsys
2294 * (thanks a lot Paul) and nothing bad has come out of it.
2295 * In short, what we're doing is:
2296 *
2297 * - Issue EndTransfer WITH CMDIOC bit set
2298 * - Wait 100us
2299 */
2300
2301 cmd = DWC3_DEPCMD_ENDTRANSFER;
2302 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2303 cmd |= DWC3_DEPCMD_CMDIOC;
2304 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2305 memset(&params, 0, sizeof(params));
2306 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
2307 WARN_ON_ONCE(ret);
2308 dep->resource_index = 0;
2309 dep->flags &= ~DWC3_EP_BUSY;
2310 udelay(100);
2311 }
2312
2313 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2314 {
2315 u32 epnum;
2316
2317 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2318 struct dwc3_ep *dep;
2319
2320 dep = dwc->eps[epnum];
2321 if (!dep)
2322 continue;
2323
2324 if (!(dep->flags & DWC3_EP_ENABLED))
2325 continue;
2326
2327 dwc3_remove_requests(dwc, dep);
2328 }
2329 }
2330
2331 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2332 {
2333 u32 epnum;
2334
2335 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2336 struct dwc3_ep *dep;
2337 int ret;
2338
2339 dep = dwc->eps[epnum];
2340 if (!dep)
2341 continue;
2342
2343 if (!(dep->flags & DWC3_EP_STALL))
2344 continue;
2345
2346 dep->flags &= ~DWC3_EP_STALL;
2347
2348 ret = dwc3_send_clear_stall_ep_cmd(dep);
2349 WARN_ON_ONCE(ret);
2350 }
2351 }
2352
2353 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2354 {
2355 int reg;
2356
2357 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2358 reg &= ~DWC3_DCTL_INITU1ENA;
2359 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2360
2361 reg &= ~DWC3_DCTL_INITU2ENA;
2362 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2363
2364 dwc3_disconnect_gadget(dwc);
2365
2366 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2367 dwc->setup_packet_pending = false;
2368 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2369
2370 dwc->connected = false;
2371 }
2372
2373 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2374 {
2375 u32 reg;
2376
2377 dwc->connected = true;
2378
2379 /*
2380 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2381 * would cause a missing Disconnect Event if there's a
2382 * pending Setup Packet in the FIFO.
2383 *
2384 * There's no suggested workaround on the official Bug
2385 * report, which states that "unless the driver/application
2386 * is doing any special handling of a disconnect event,
2387 * there is no functional issue".
2388 *
2389 * Unfortunately, it turns out that we _do_ some special
2390 * handling of a disconnect event, namely complete all
2391 * pending transfers, notify gadget driver of the
2392 * disconnection, and so on.
2393 *
2394 * Our suggested workaround is to follow the Disconnect
2395 * Event steps here, instead, based on a setup_packet_pending
2396 * flag. Such flag gets set whenever we have a SETUP_PENDING
2397 * status for EP0 TRBs and gets cleared on XferComplete for the
2398 * same endpoint.
2399 *
2400 * Refers to:
2401 *
2402 * STAR#9000466709: RTL: Device : Disconnect event not
2403 * generated if setup packet pending in FIFO
2404 */
2405 if (dwc->revision < DWC3_REVISION_188A) {
2406 if (dwc->setup_packet_pending)
2407 dwc3_gadget_disconnect_interrupt(dwc);
2408 }
2409
2410 dwc3_reset_gadget(dwc);
2411
2412 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2413 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2414 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2415 dwc->test_mode = false;
2416
2417 dwc3_stop_active_transfers(dwc);
2418 dwc3_clear_stall_all_ep(dwc);
2419
2420 /* Reset device address to zero */
2421 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2422 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2423 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2424 }
2425
2426 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2427 {
2428 u32 reg;
2429 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2430
2431 /*
2432 * We change the clock only at SS but I dunno why I would want to do
2433 * this. Maybe it becomes part of the power saving plan.
2434 */
2435
2436 if ((speed != DWC3_DSTS_SUPERSPEED) &&
2437 (speed != DWC3_DSTS_SUPERSPEED_PLUS))
2438 return;
2439
2440 /*
2441 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2442 * each time on Connect Done.
2443 */
2444 if (!usb30_clock)
2445 return;
2446
2447 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2448 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2449 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2450 }
2451
2452 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2453 {
2454 struct dwc3_ep *dep;
2455 int ret;
2456 u32 reg;
2457 u8 speed;
2458
2459 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2460 speed = reg & DWC3_DSTS_CONNECTSPD;
2461 dwc->speed = speed;
2462
2463 dwc3_update_ram_clk_sel(dwc, speed);
2464
2465 switch (speed) {
2466 case DWC3_DCFG_SUPERSPEED_PLUS:
2467 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2468 dwc->gadget.ep0->maxpacket = 512;
2469 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2470 break;
2471 case DWC3_DCFG_SUPERSPEED:
2472 /*
2473 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2474 * would cause a missing USB3 Reset event.
2475 *
2476 * In such situations, we should force a USB3 Reset
2477 * event by calling our dwc3_gadget_reset_interrupt()
2478 * routine.
2479 *
2480 * Refers to:
2481 *
2482 * STAR#9000483510: RTL: SS : USB3 reset event may
2483 * not be generated always when the link enters poll
2484 */
2485 if (dwc->revision < DWC3_REVISION_190A)
2486 dwc3_gadget_reset_interrupt(dwc);
2487
2488 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2489 dwc->gadget.ep0->maxpacket = 512;
2490 dwc->gadget.speed = USB_SPEED_SUPER;
2491 break;
2492 case DWC3_DCFG_HIGHSPEED:
2493 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2494 dwc->gadget.ep0->maxpacket = 64;
2495 dwc->gadget.speed = USB_SPEED_HIGH;
2496 break;
2497 case DWC3_DCFG_FULLSPEED2:
2498 case DWC3_DCFG_FULLSPEED1:
2499 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2500 dwc->gadget.ep0->maxpacket = 64;
2501 dwc->gadget.speed = USB_SPEED_FULL;
2502 break;
2503 case DWC3_DCFG_LOWSPEED:
2504 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2505 dwc->gadget.ep0->maxpacket = 8;
2506 dwc->gadget.speed = USB_SPEED_LOW;
2507 break;
2508 }
2509
2510 /* Enable USB2 LPM Capability */
2511
2512 if ((dwc->revision > DWC3_REVISION_194A) &&
2513 (speed != DWC3_DCFG_SUPERSPEED) &&
2514 (speed != DWC3_DCFG_SUPERSPEED_PLUS)) {
2515 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2516 reg |= DWC3_DCFG_LPM_CAP;
2517 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2518
2519 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2520 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2521
2522 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2523
2524 /*
2525 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2526 * DCFG.LPMCap is set, core responses with an ACK and the
2527 * BESL value in the LPM token is less than or equal to LPM
2528 * NYET threshold.
2529 */
2530 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2531 && dwc->has_lpm_erratum,
2532 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2533
2534 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2535 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2536
2537 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2538 } else {
2539 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2540 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2541 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2542 }
2543
2544 dep = dwc->eps[0];
2545 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2546 false);
2547 if (ret) {
2548 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2549 return;
2550 }
2551
2552 dep = dwc->eps[1];
2553 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2554 false);
2555 if (ret) {
2556 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2557 return;
2558 }
2559
2560 /*
2561 * Configure PHY via GUSB3PIPECTLn if required.
2562 *
2563 * Update GTXFIFOSIZn
2564 *
2565 * In both cases reset values should be sufficient.
2566 */
2567 }
2568
2569 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2570 {
2571 /*
2572 * TODO take core out of low power mode when that's
2573 * implemented.
2574 */
2575
2576 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2577 spin_unlock(&dwc->lock);
2578 dwc->gadget_driver->resume(&dwc->gadget);
2579 spin_lock(&dwc->lock);
2580 }
2581 }
2582
2583 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2584 unsigned int evtinfo)
2585 {
2586 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2587 unsigned int pwropt;
2588
2589 /*
2590 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2591 * Hibernation mode enabled which would show up when device detects
2592 * host-initiated U3 exit.
2593 *
2594 * In that case, device will generate a Link State Change Interrupt
2595 * from U3 to RESUME which is only necessary if Hibernation is
2596 * configured in.
2597 *
2598 * There are no functional changes due to such spurious event and we
2599 * just need to ignore it.
2600 *
2601 * Refers to:
2602 *
2603 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2604 * operational mode
2605 */
2606 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2607 if ((dwc->revision < DWC3_REVISION_250A) &&
2608 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2609 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2610 (next == DWC3_LINK_STATE_RESUME)) {
2611 dwc3_trace(trace_dwc3_gadget,
2612 "ignoring transition U3 -> Resume");
2613 return;
2614 }
2615 }
2616
2617 /*
2618 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2619 * on the link partner, the USB session might do multiple entry/exit
2620 * of low power states before a transfer takes place.
2621 *
2622 * Due to this problem, we might experience lower throughput. The
2623 * suggested workaround is to disable DCTL[12:9] bits if we're
2624 * transitioning from U1/U2 to U0 and enable those bits again
2625 * after a transfer completes and there are no pending transfers
2626 * on any of the enabled endpoints.
2627 *
2628 * This is the first half of that workaround.
2629 *
2630 * Refers to:
2631 *
2632 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2633 * core send LGO_Ux entering U0
2634 */
2635 if (dwc->revision < DWC3_REVISION_183A) {
2636 if (next == DWC3_LINK_STATE_U0) {
2637 u32 u1u2;
2638 u32 reg;
2639
2640 switch (dwc->link_state) {
2641 case DWC3_LINK_STATE_U1:
2642 case DWC3_LINK_STATE_U2:
2643 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2644 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2645 | DWC3_DCTL_ACCEPTU2ENA
2646 | DWC3_DCTL_INITU1ENA
2647 | DWC3_DCTL_ACCEPTU1ENA);
2648
2649 if (!dwc->u1u2)
2650 dwc->u1u2 = reg & u1u2;
2651
2652 reg &= ~u1u2;
2653
2654 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2655 break;
2656 default:
2657 /* do nothing */
2658 break;
2659 }
2660 }
2661 }
2662
2663 switch (next) {
2664 case DWC3_LINK_STATE_U1:
2665 if (dwc->speed == USB_SPEED_SUPER)
2666 dwc3_suspend_gadget(dwc);
2667 break;
2668 case DWC3_LINK_STATE_U2:
2669 case DWC3_LINK_STATE_U3:
2670 dwc3_suspend_gadget(dwc);
2671 break;
2672 case DWC3_LINK_STATE_RESUME:
2673 dwc3_resume_gadget(dwc);
2674 break;
2675 default:
2676 /* do nothing */
2677 break;
2678 }
2679
2680 dwc->link_state = next;
2681 }
2682
2683 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2684 unsigned int evtinfo)
2685 {
2686 unsigned int is_ss = evtinfo & BIT(4);
2687
2688 /**
2689 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2690 * have a known issue which can cause USB CV TD.9.23 to fail
2691 * randomly.
2692 *
2693 * Because of this issue, core could generate bogus hibernation
2694 * events which SW needs to ignore.
2695 *
2696 * Refers to:
2697 *
2698 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2699 * Device Fallback from SuperSpeed
2700 */
2701 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2702 return;
2703
2704 /* enter hibernation here */
2705 }
2706
2707 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2708 const struct dwc3_event_devt *event)
2709 {
2710 switch (event->type) {
2711 case DWC3_DEVICE_EVENT_DISCONNECT:
2712 dwc3_gadget_disconnect_interrupt(dwc);
2713 break;
2714 case DWC3_DEVICE_EVENT_RESET:
2715 dwc3_gadget_reset_interrupt(dwc);
2716 break;
2717 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2718 dwc3_gadget_conndone_interrupt(dwc);
2719 break;
2720 case DWC3_DEVICE_EVENT_WAKEUP:
2721 dwc3_gadget_wakeup_interrupt(dwc);
2722 break;
2723 case DWC3_DEVICE_EVENT_HIBER_REQ:
2724 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2725 "unexpected hibernation event\n"))
2726 break;
2727
2728 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2729 break;
2730 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2731 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2732 break;
2733 case DWC3_DEVICE_EVENT_EOPF:
2734 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2735 break;
2736 case DWC3_DEVICE_EVENT_SOF:
2737 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2738 break;
2739 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2740 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2741 break;
2742 case DWC3_DEVICE_EVENT_CMD_CMPL:
2743 dwc3_trace(trace_dwc3_gadget, "Command Complete");
2744 break;
2745 case DWC3_DEVICE_EVENT_OVERFLOW:
2746 dwc3_trace(trace_dwc3_gadget, "Overflow");
2747 break;
2748 default:
2749 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2750 }
2751 }
2752
2753 static void dwc3_process_event_entry(struct dwc3 *dwc,
2754 const union dwc3_event *event)
2755 {
2756 trace_dwc3_event(event->raw);
2757
2758 /* Endpoint IRQ, handle it and return early */
2759 if (event->type.is_devspec == 0) {
2760 /* depevt */
2761 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2762 }
2763
2764 switch (event->type.type) {
2765 case DWC3_EVENT_TYPE_DEV:
2766 dwc3_gadget_interrupt(dwc, &event->devt);
2767 break;
2768 /* REVISIT what to do with Carkit and I2C events ? */
2769 default:
2770 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2771 }
2772 }
2773
2774 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
2775 {
2776 struct dwc3 *dwc = evt->dwc;
2777 irqreturn_t ret = IRQ_NONE;
2778 int left;
2779 u32 reg;
2780
2781 left = evt->count;
2782
2783 if (!(evt->flags & DWC3_EVENT_PENDING))
2784 return IRQ_NONE;
2785
2786 while (left > 0) {
2787 union dwc3_event event;
2788
2789 event.raw = *(u32 *) (evt->buf + evt->lpos);
2790
2791 dwc3_process_event_entry(dwc, &event);
2792
2793 /*
2794 * FIXME we wrap around correctly to the next entry as
2795 * almost all entries are 4 bytes in size. There is one
2796 * entry which has 12 bytes which is a regular entry
2797 * followed by 8 bytes data. ATM I don't know how
2798 * things are organized if we get next to the a
2799 * boundary so I worry about that once we try to handle
2800 * that.
2801 */
2802 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2803 left -= 4;
2804
2805 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
2806 }
2807
2808 evt->count = 0;
2809 evt->flags &= ~DWC3_EVENT_PENDING;
2810 ret = IRQ_HANDLED;
2811
2812 /* Unmask interrupt */
2813 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2814 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2815 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2816
2817 return ret;
2818 }
2819
2820 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
2821 {
2822 struct dwc3_event_buffer *evt = _evt;
2823 struct dwc3 *dwc = evt->dwc;
2824 unsigned long flags;
2825 irqreturn_t ret = IRQ_NONE;
2826
2827 spin_lock_irqsave(&dwc->lock, flags);
2828 ret = dwc3_process_event_buf(evt);
2829 spin_unlock_irqrestore(&dwc->lock, flags);
2830
2831 return ret;
2832 }
2833
2834 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
2835 {
2836 struct dwc3 *dwc = evt->dwc;
2837 u32 count;
2838 u32 reg;
2839
2840 if (pm_runtime_suspended(dwc->dev)) {
2841 pm_runtime_get(dwc->dev);
2842 disable_irq_nosync(dwc->irq_gadget);
2843 dwc->pending_events = true;
2844 return IRQ_HANDLED;
2845 }
2846
2847 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2848 count &= DWC3_GEVNTCOUNT_MASK;
2849 if (!count)
2850 return IRQ_NONE;
2851
2852 evt->count = count;
2853 evt->flags |= DWC3_EVENT_PENDING;
2854
2855 /* Mask interrupt */
2856 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2857 reg |= DWC3_GEVNTSIZ_INTMASK;
2858 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2859
2860 return IRQ_WAKE_THREAD;
2861 }
2862
2863 static irqreturn_t dwc3_interrupt(int irq, void *_evt)
2864 {
2865 struct dwc3_event_buffer *evt = _evt;
2866
2867 return dwc3_check_event_buf(evt);
2868 }
2869
2870 /**
2871 * dwc3_gadget_init - Initializes gadget related registers
2872 * @dwc: pointer to our controller context structure
2873 *
2874 * Returns 0 on success otherwise negative errno.
2875 */
2876 int dwc3_gadget_init(struct dwc3 *dwc)
2877 {
2878 int ret;
2879
2880 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2881 &dwc->ctrl_req_addr, GFP_KERNEL);
2882 if (!dwc->ctrl_req) {
2883 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2884 ret = -ENOMEM;
2885 goto err0;
2886 }
2887
2888 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2889 &dwc->ep0_trb_addr, GFP_KERNEL);
2890 if (!dwc->ep0_trb) {
2891 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2892 ret = -ENOMEM;
2893 goto err1;
2894 }
2895
2896 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2897 if (!dwc->setup_buf) {
2898 ret = -ENOMEM;
2899 goto err2;
2900 }
2901
2902 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2903 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2904 GFP_KERNEL);
2905 if (!dwc->ep0_bounce) {
2906 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2907 ret = -ENOMEM;
2908 goto err3;
2909 }
2910
2911 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
2912 if (!dwc->zlp_buf) {
2913 ret = -ENOMEM;
2914 goto err4;
2915 }
2916
2917 dwc->gadget.ops = &dwc3_gadget_ops;
2918 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2919 dwc->gadget.sg_supported = true;
2920 dwc->gadget.name = "dwc3-gadget";
2921 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
2922
2923 /*
2924 * FIXME We might be setting max_speed to <SUPER, however versions
2925 * <2.20a of dwc3 have an issue with metastability (documented
2926 * elsewhere in this driver) which tells us we can't set max speed to
2927 * anything lower than SUPER.
2928 *
2929 * Because gadget.max_speed is only used by composite.c and function
2930 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2931 * to happen so we avoid sending SuperSpeed Capability descriptor
2932 * together with our BOS descriptor as that could confuse host into
2933 * thinking we can handle super speed.
2934 *
2935 * Note that, in fact, we won't even support GetBOS requests when speed
2936 * is less than super speed because we don't have means, yet, to tell
2937 * composite.c that we are USB 2.0 + LPM ECN.
2938 */
2939 if (dwc->revision < DWC3_REVISION_220A)
2940 dwc3_trace(trace_dwc3_gadget,
2941 "Changing max_speed on rev %08x\n",
2942 dwc->revision);
2943
2944 dwc->gadget.max_speed = dwc->maximum_speed;
2945
2946 /*
2947 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2948 * on ep out.
2949 */
2950 dwc->gadget.quirk_ep_out_aligned_size = true;
2951
2952 /*
2953 * REVISIT: Here we should clear all pending IRQs to be
2954 * sure we're starting from a well known location.
2955 */
2956
2957 ret = dwc3_gadget_init_endpoints(dwc);
2958 if (ret)
2959 goto err5;
2960
2961 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2962 if (ret) {
2963 dev_err(dwc->dev, "failed to register udc\n");
2964 goto err5;
2965 }
2966
2967 return 0;
2968
2969 err5:
2970 kfree(dwc->zlp_buf);
2971
2972 err4:
2973 dwc3_gadget_free_endpoints(dwc);
2974 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2975 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2976
2977 err3:
2978 kfree(dwc->setup_buf);
2979
2980 err2:
2981 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2982 dwc->ep0_trb, dwc->ep0_trb_addr);
2983
2984 err1:
2985 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2986 dwc->ctrl_req, dwc->ctrl_req_addr);
2987
2988 err0:
2989 return ret;
2990 }
2991
2992 /* -------------------------------------------------------------------------- */
2993
2994 void dwc3_gadget_exit(struct dwc3 *dwc)
2995 {
2996 usb_del_gadget_udc(&dwc->gadget);
2997
2998 dwc3_gadget_free_endpoints(dwc);
2999
3000 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3001 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3002
3003 kfree(dwc->setup_buf);
3004 kfree(dwc->zlp_buf);
3005
3006 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3007 dwc->ep0_trb, dwc->ep0_trb_addr);
3008
3009 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3010 dwc->ctrl_req, dwc->ctrl_req_addr);
3011 }
3012
3013 int dwc3_gadget_suspend(struct dwc3 *dwc)
3014 {
3015 int ret;
3016
3017 if (!dwc->gadget_driver)
3018 return 0;
3019
3020 ret = dwc3_gadget_run_stop(dwc, false, false);
3021 if (ret < 0)
3022 return ret;
3023
3024 dwc3_disconnect_gadget(dwc);
3025 __dwc3_gadget_stop(dwc);
3026
3027 return 0;
3028 }
3029
3030 int dwc3_gadget_resume(struct dwc3 *dwc)
3031 {
3032 int ret;
3033
3034 if (!dwc->gadget_driver)
3035 return 0;
3036
3037 ret = __dwc3_gadget_start(dwc);
3038 if (ret < 0)
3039 goto err0;
3040
3041 ret = dwc3_gadget_run_stop(dwc, true, false);
3042 if (ret < 0)
3043 goto err1;
3044
3045 return 0;
3046
3047 err1:
3048 __dwc3_gadget_stop(dwc);
3049
3050 err0:
3051 return ret;
3052 }
3053
3054 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
3055 {
3056 if (dwc->pending_events) {
3057 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
3058 dwc->pending_events = false;
3059 enable_irq(dwc->irq_gadget);
3060 }
3061 }