]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/usb/dwc3/gadget.c
usb: dwc3: clean TRB if STARTTRANSFER fail
[mirror_ubuntu-artful-kernel.git] / drivers / usb / dwc3 / gadget.c
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69 }
70
71 /**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
93 * return 0 on success or -ETIMEDOUT.
94 */
95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 int retries = 10000;
98 u32 reg;
99
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
131 /* wait for a change in DSTS */
132 retries = 10000;
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
139 udelay(5);
140 }
141
142 return -ETIMEDOUT;
143 }
144
145 /**
146 * dwc3_ep_inc_trb() - Increment a TRB index.
147 * @index - Pointer to the TRB index to increment.
148 *
149 * The index should never point to the link TRB. After incrementing,
150 * if it is point to the link TRB, wrap around to the beginning. The
151 * link TRB is always at the last TRB entry.
152 */
153 static void dwc3_ep_inc_trb(u8 *index)
154 {
155 (*index)++;
156 if (*index == (DWC3_TRB_NUM - 1))
157 *index = 0;
158 }
159
160 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
161 {
162 dwc3_ep_inc_trb(&dep->trb_enqueue);
163 }
164
165 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
166 {
167 dwc3_ep_inc_trb(&dep->trb_dequeue);
168 }
169
170 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
171 int status)
172 {
173 struct dwc3 *dwc = dep->dwc;
174
175 req->started = false;
176 list_del(&req->list);
177 req->trb = NULL;
178 req->remaining = 0;
179
180 if (req->request.status == -EINPROGRESS)
181 req->request.status = status;
182
183 if (dwc->ep0_bounced && dep->number == 0)
184 dwc->ep0_bounced = false;
185 else
186 usb_gadget_unmap_request(&dwc->gadget, &req->request,
187 req->direction);
188
189 trace_dwc3_gadget_giveback(req);
190
191 spin_unlock(&dwc->lock);
192 usb_gadget_giveback_request(&dep->endpoint, &req->request);
193 spin_lock(&dwc->lock);
194
195 if (dep->number > 1)
196 pm_runtime_put(dwc->dev);
197 }
198
199 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
200 {
201 u32 timeout = 500;
202 int status = 0;
203 int ret = 0;
204 u32 reg;
205
206 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
207 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
208
209 do {
210 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
211 if (!(reg & DWC3_DGCMD_CMDACT)) {
212 status = DWC3_DGCMD_STATUS(reg);
213 if (status)
214 ret = -EINVAL;
215 break;
216 }
217 } while (timeout--);
218
219 if (!timeout) {
220 ret = -ETIMEDOUT;
221 status = -ETIMEDOUT;
222 }
223
224 trace_dwc3_gadget_generic_cmd(cmd, param, status);
225
226 return ret;
227 }
228
229 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
230
231 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
232 struct dwc3_gadget_ep_cmd_params *params)
233 {
234 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
235 struct dwc3 *dwc = dep->dwc;
236 u32 timeout = 500;
237 u32 reg;
238
239 int cmd_status = 0;
240 int susphy = false;
241 int ret = -EINVAL;
242
243 /*
244 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
245 * we're issuing an endpoint command, we must check if
246 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
247 *
248 * We will also set SUSPHY bit to what it was before returning as stated
249 * by the same section on Synopsys databook.
250 */
251 if (dwc->gadget.speed <= USB_SPEED_HIGH) {
252 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
253 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
254 susphy = true;
255 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
256 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
257 }
258 }
259
260 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
261 int needs_wakeup;
262
263 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
264 dwc->link_state == DWC3_LINK_STATE_U2 ||
265 dwc->link_state == DWC3_LINK_STATE_U3);
266
267 if (unlikely(needs_wakeup)) {
268 ret = __dwc3_gadget_wakeup(dwc);
269 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
270 ret);
271 }
272 }
273
274 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
275 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
276 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
277
278 /*
279 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
280 * not relying on XferNotReady, we can make use of a special "No
281 * Response Update Transfer" command where we should clear both CmdAct
282 * and CmdIOC bits.
283 *
284 * With this, we don't need to wait for command completion and can
285 * straight away issue further commands to the endpoint.
286 *
287 * NOTICE: We're making an assumption that control endpoints will never
288 * make use of Update Transfer command. This is a safe assumption
289 * because we can never have more than one request at a time with
290 * Control Endpoints. If anybody changes that assumption, this chunk
291 * needs to be updated accordingly.
292 */
293 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
294 !usb_endpoint_xfer_isoc(desc))
295 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
296 else
297 cmd |= DWC3_DEPCMD_CMDACT;
298
299 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
300 do {
301 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
302 if (!(reg & DWC3_DEPCMD_CMDACT)) {
303 cmd_status = DWC3_DEPCMD_STATUS(reg);
304
305 switch (cmd_status) {
306 case 0:
307 ret = 0;
308 break;
309 case DEPEVT_TRANSFER_NO_RESOURCE:
310 ret = -EINVAL;
311 break;
312 case DEPEVT_TRANSFER_BUS_EXPIRY:
313 /*
314 * SW issues START TRANSFER command to
315 * isochronous ep with future frame interval. If
316 * future interval time has already passed when
317 * core receives the command, it will respond
318 * with an error status of 'Bus Expiry'.
319 *
320 * Instead of always returning -EINVAL, let's
321 * give a hint to the gadget driver that this is
322 * the case by returning -EAGAIN.
323 */
324 ret = -EAGAIN;
325 break;
326 default:
327 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
328 }
329
330 break;
331 }
332 } while (--timeout);
333
334 if (timeout == 0) {
335 ret = -ETIMEDOUT;
336 cmd_status = -ETIMEDOUT;
337 }
338
339 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
340
341 if (ret == 0) {
342 switch (DWC3_DEPCMD_CMD(cmd)) {
343 case DWC3_DEPCMD_STARTTRANSFER:
344 dep->flags |= DWC3_EP_TRANSFER_STARTED;
345 break;
346 case DWC3_DEPCMD_ENDTRANSFER:
347 dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
348 break;
349 default:
350 /* nothing */
351 break;
352 }
353 }
354
355 if (unlikely(susphy)) {
356 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
357 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
358 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
359 }
360
361 return ret;
362 }
363
364 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
365 {
366 struct dwc3 *dwc = dep->dwc;
367 struct dwc3_gadget_ep_cmd_params params;
368 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
369
370 /*
371 * As of core revision 2.60a the recommended programming model
372 * is to set the ClearPendIN bit when issuing a Clear Stall EP
373 * command for IN endpoints. This is to prevent an issue where
374 * some (non-compliant) hosts may not send ACK TPs for pending
375 * IN transfers due to a mishandled error condition. Synopsys
376 * STAR 9000614252.
377 */
378 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) &&
379 (dwc->gadget.speed >= USB_SPEED_SUPER))
380 cmd |= DWC3_DEPCMD_CLEARPENDIN;
381
382 memset(&params, 0, sizeof(params));
383
384 return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
385 }
386
387 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
388 struct dwc3_trb *trb)
389 {
390 u32 offset = (char *) trb - (char *) dep->trb_pool;
391
392 return dep->trb_pool_dma + offset;
393 }
394
395 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
396 {
397 struct dwc3 *dwc = dep->dwc;
398
399 if (dep->trb_pool)
400 return 0;
401
402 dep->trb_pool = dma_alloc_coherent(dwc->dev,
403 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
404 &dep->trb_pool_dma, GFP_KERNEL);
405 if (!dep->trb_pool) {
406 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
407 dep->name);
408 return -ENOMEM;
409 }
410
411 return 0;
412 }
413
414 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
415 {
416 struct dwc3 *dwc = dep->dwc;
417
418 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
419 dep->trb_pool, dep->trb_pool_dma);
420
421 dep->trb_pool = NULL;
422 dep->trb_pool_dma = 0;
423 }
424
425 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
426
427 /**
428 * dwc3_gadget_start_config - Configure EP resources
429 * @dwc: pointer to our controller context structure
430 * @dep: endpoint that is being enabled
431 *
432 * The assignment of transfer resources cannot perfectly follow the
433 * data book due to the fact that the controller driver does not have
434 * all knowledge of the configuration in advance. It is given this
435 * information piecemeal by the composite gadget framework after every
436 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
437 * programming model in this scenario can cause errors. For two
438 * reasons:
439 *
440 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
441 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
442 * multiple interfaces.
443 *
444 * 2) The databook does not mention doing more DEPXFERCFG for new
445 * endpoint on alt setting (8.1.6).
446 *
447 * The following simplified method is used instead:
448 *
449 * All hardware endpoints can be assigned a transfer resource and this
450 * setting will stay persistent until either a core reset or
451 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
452 * do DEPXFERCFG for every hardware endpoint as well. We are
453 * guaranteed that there are as many transfer resources as endpoints.
454 *
455 * This function is called for each endpoint when it is being enabled
456 * but is triggered only when called for EP0-out, which always happens
457 * first, and which should only happen in one of the above conditions.
458 */
459 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
460 {
461 struct dwc3_gadget_ep_cmd_params params;
462 u32 cmd;
463 int i;
464 int ret;
465
466 if (dep->number)
467 return 0;
468
469 memset(&params, 0x00, sizeof(params));
470 cmd = DWC3_DEPCMD_DEPSTARTCFG;
471
472 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
473 if (ret)
474 return ret;
475
476 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
477 struct dwc3_ep *dep = dwc->eps[i];
478
479 if (!dep)
480 continue;
481
482 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
483 if (ret)
484 return ret;
485 }
486
487 return 0;
488 }
489
490 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
491 const struct usb_endpoint_descriptor *desc,
492 const struct usb_ss_ep_comp_descriptor *comp_desc,
493 bool modify, bool restore)
494 {
495 struct dwc3_gadget_ep_cmd_params params;
496
497 if (dev_WARN_ONCE(dwc->dev, modify && restore,
498 "Can't modify and restore\n"))
499 return -EINVAL;
500
501 memset(&params, 0x00, sizeof(params));
502
503 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
504 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
505
506 /* Burst size is only needed in SuperSpeed mode */
507 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
508 u32 burst = dep->endpoint.maxburst;
509 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
510 }
511
512 if (modify) {
513 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
514 } else if (restore) {
515 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
516 params.param2 |= dep->saved_state;
517 } else {
518 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
519 }
520
521 if (usb_endpoint_xfer_control(desc))
522 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
523
524 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
525 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
526
527 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
528 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
529 | DWC3_DEPCFG_STREAM_EVENT_EN;
530 dep->stream_capable = true;
531 }
532
533 if (!usb_endpoint_xfer_control(desc))
534 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
535
536 /*
537 * We are doing 1:1 mapping for endpoints, meaning
538 * Physical Endpoints 2 maps to Logical Endpoint 2 and
539 * so on. We consider the direction bit as part of the physical
540 * endpoint number. So USB endpoint 0x81 is 0x03.
541 */
542 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
543
544 /*
545 * We must use the lower 16 TX FIFOs even though
546 * HW might have more
547 */
548 if (dep->direction)
549 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
550
551 if (desc->bInterval) {
552 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
553 dep->interval = 1 << (desc->bInterval - 1);
554 }
555
556 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
557 }
558
559 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
560 {
561 struct dwc3_gadget_ep_cmd_params params;
562
563 memset(&params, 0x00, sizeof(params));
564
565 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
566
567 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
568 &params);
569 }
570
571 /**
572 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
573 * @dep: endpoint to be initialized
574 * @desc: USB Endpoint Descriptor
575 *
576 * Caller should take care of locking
577 */
578 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
579 const struct usb_endpoint_descriptor *desc,
580 const struct usb_ss_ep_comp_descriptor *comp_desc,
581 bool modify, bool restore)
582 {
583 struct dwc3 *dwc = dep->dwc;
584 u32 reg;
585 int ret;
586
587 if (!(dep->flags & DWC3_EP_ENABLED)) {
588 ret = dwc3_gadget_start_config(dwc, dep);
589 if (ret)
590 return ret;
591 }
592
593 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
594 restore);
595 if (ret)
596 return ret;
597
598 if (!(dep->flags & DWC3_EP_ENABLED)) {
599 struct dwc3_trb *trb_st_hw;
600 struct dwc3_trb *trb_link;
601
602 dep->endpoint.desc = desc;
603 dep->comp_desc = comp_desc;
604 dep->type = usb_endpoint_type(desc);
605 dep->flags |= DWC3_EP_ENABLED;
606 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
607
608 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
609 reg |= DWC3_DALEPENA_EP(dep->number);
610 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
611
612 init_waitqueue_head(&dep->wait_end_transfer);
613
614 if (usb_endpoint_xfer_control(desc))
615 goto out;
616
617 /* Initialize the TRB ring */
618 dep->trb_dequeue = 0;
619 dep->trb_enqueue = 0;
620 memset(dep->trb_pool, 0,
621 sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
622
623 /* Link TRB. The HWO bit is never reset */
624 trb_st_hw = &dep->trb_pool[0];
625
626 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
627 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
628 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
629 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
630 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
631 }
632
633 /*
634 * Issue StartTransfer here with no-op TRB so we can always rely on No
635 * Response Update Transfer command.
636 */
637 if (usb_endpoint_xfer_bulk(desc)) {
638 struct dwc3_gadget_ep_cmd_params params;
639 struct dwc3_trb *trb;
640 dma_addr_t trb_dma;
641 u32 cmd;
642
643 memset(&params, 0, sizeof(params));
644 trb = &dep->trb_pool[0];
645 trb_dma = dwc3_trb_dma_offset(dep, trb);
646
647 params.param0 = upper_32_bits(trb_dma);
648 params.param1 = lower_32_bits(trb_dma);
649
650 cmd = DWC3_DEPCMD_STARTTRANSFER;
651
652 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
653 if (ret < 0)
654 return ret;
655
656 dep->flags |= DWC3_EP_BUSY;
657
658 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
659 WARN_ON_ONCE(!dep->resource_index);
660 }
661
662
663 out:
664 trace_dwc3_gadget_ep_enable(dep);
665
666 return 0;
667 }
668
669 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
670 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
671 {
672 struct dwc3_request *req;
673
674 dwc3_stop_active_transfer(dwc, dep->number, true);
675
676 /* - giveback all requests to gadget driver */
677 while (!list_empty(&dep->started_list)) {
678 req = next_request(&dep->started_list);
679
680 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
681 }
682
683 while (!list_empty(&dep->pending_list)) {
684 req = next_request(&dep->pending_list);
685
686 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
687 }
688 }
689
690 /**
691 * __dwc3_gadget_ep_disable - Disables a HW endpoint
692 * @dep: the endpoint to disable
693 *
694 * This function also removes requests which are currently processed ny the
695 * hardware and those which are not yet scheduled.
696 * Caller should take care of locking.
697 */
698 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
699 {
700 struct dwc3 *dwc = dep->dwc;
701 u32 reg;
702
703 trace_dwc3_gadget_ep_disable(dep);
704
705 dwc3_remove_requests(dwc, dep);
706
707 /* make sure HW endpoint isn't stalled */
708 if (dep->flags & DWC3_EP_STALL)
709 __dwc3_gadget_ep_set_halt(dep, 0, false);
710
711 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
712 reg &= ~DWC3_DALEPENA_EP(dep->number);
713 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
714
715 dep->stream_capable = false;
716 dep->endpoint.desc = NULL;
717 dep->comp_desc = NULL;
718 dep->type = 0;
719 dep->flags &= DWC3_EP_END_TRANSFER_PENDING;
720
721 return 0;
722 }
723
724 /* -------------------------------------------------------------------------- */
725
726 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
727 const struct usb_endpoint_descriptor *desc)
728 {
729 return -EINVAL;
730 }
731
732 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
733 {
734 return -EINVAL;
735 }
736
737 /* -------------------------------------------------------------------------- */
738
739 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
740 const struct usb_endpoint_descriptor *desc)
741 {
742 struct dwc3_ep *dep;
743 struct dwc3 *dwc;
744 unsigned long flags;
745 int ret;
746
747 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
748 pr_debug("dwc3: invalid parameters\n");
749 return -EINVAL;
750 }
751
752 if (!desc->wMaxPacketSize) {
753 pr_debug("dwc3: missing wMaxPacketSize\n");
754 return -EINVAL;
755 }
756
757 dep = to_dwc3_ep(ep);
758 dwc = dep->dwc;
759
760 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
761 "%s is already enabled\n",
762 dep->name))
763 return 0;
764
765 spin_lock_irqsave(&dwc->lock, flags);
766 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
767 spin_unlock_irqrestore(&dwc->lock, flags);
768
769 return ret;
770 }
771
772 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
773 {
774 struct dwc3_ep *dep;
775 struct dwc3 *dwc;
776 unsigned long flags;
777 int ret;
778
779 if (!ep) {
780 pr_debug("dwc3: invalid parameters\n");
781 return -EINVAL;
782 }
783
784 dep = to_dwc3_ep(ep);
785 dwc = dep->dwc;
786
787 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
788 "%s is already disabled\n",
789 dep->name))
790 return 0;
791
792 spin_lock_irqsave(&dwc->lock, flags);
793 ret = __dwc3_gadget_ep_disable(dep);
794 spin_unlock_irqrestore(&dwc->lock, flags);
795
796 return ret;
797 }
798
799 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
800 gfp_t gfp_flags)
801 {
802 struct dwc3_request *req;
803 struct dwc3_ep *dep = to_dwc3_ep(ep);
804
805 req = kzalloc(sizeof(*req), gfp_flags);
806 if (!req)
807 return NULL;
808
809 req->epnum = dep->number;
810 req->dep = dep;
811
812 dep->allocated_requests++;
813
814 trace_dwc3_alloc_request(req);
815
816 return &req->request;
817 }
818
819 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
820 struct usb_request *request)
821 {
822 struct dwc3_request *req = to_dwc3_request(request);
823 struct dwc3_ep *dep = to_dwc3_ep(ep);
824
825 dep->allocated_requests--;
826 trace_dwc3_free_request(req);
827 kfree(req);
828 }
829
830 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep);
831
832 /**
833 * dwc3_prepare_one_trb - setup one TRB from one request
834 * @dep: endpoint for which this request is prepared
835 * @req: dwc3_request pointer
836 */
837 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
838 struct dwc3_request *req, dma_addr_t dma,
839 unsigned length, unsigned chain, unsigned node)
840 {
841 struct dwc3_trb *trb;
842 struct dwc3 *dwc = dep->dwc;
843 struct usb_gadget *gadget = &dwc->gadget;
844 enum usb_device_speed speed = gadget->speed;
845
846 trb = &dep->trb_pool[dep->trb_enqueue];
847
848 if (!req->trb) {
849 dwc3_gadget_move_started_request(req);
850 req->trb = trb;
851 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
852 dep->queued_requests++;
853 }
854
855 dwc3_ep_inc_enq(dep);
856
857 trb->size = DWC3_TRB_SIZE_LENGTH(length);
858 trb->bpl = lower_32_bits(dma);
859 trb->bph = upper_32_bits(dma);
860
861 switch (usb_endpoint_type(dep->endpoint.desc)) {
862 case USB_ENDPOINT_XFER_CONTROL:
863 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
864 break;
865
866 case USB_ENDPOINT_XFER_ISOC:
867 if (!node) {
868 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
869
870 if (speed == USB_SPEED_HIGH) {
871 struct usb_ep *ep = &dep->endpoint;
872 trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
873 }
874 } else {
875 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
876 }
877
878 /* always enable Interrupt on Missed ISOC */
879 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
880 break;
881
882 case USB_ENDPOINT_XFER_BULK:
883 case USB_ENDPOINT_XFER_INT:
884 trb->ctrl = DWC3_TRBCTL_NORMAL;
885 break;
886 default:
887 /*
888 * This is only possible with faulty memory because we
889 * checked it already :)
890 */
891 dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
892 usb_endpoint_type(dep->endpoint.desc));
893 }
894
895 /* always enable Continue on Short Packet */
896 if (usb_endpoint_dir_out(dep->endpoint.desc)) {
897 trb->ctrl |= DWC3_TRB_CTRL_CSP;
898
899 if (req->request.short_not_ok)
900 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
901 }
902
903 if ((!req->request.no_interrupt && !chain) ||
904 (dwc3_calc_trbs_left(dep) == 0))
905 trb->ctrl |= DWC3_TRB_CTRL_IOC;
906
907 if (chain)
908 trb->ctrl |= DWC3_TRB_CTRL_CHN;
909
910 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
911 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
912
913 trb->ctrl |= DWC3_TRB_CTRL_HWO;
914
915 trace_dwc3_prepare_trb(dep, trb);
916 }
917
918 /**
919 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
920 * @dep: The endpoint with the TRB ring
921 * @index: The index of the current TRB in the ring
922 *
923 * Returns the TRB prior to the one pointed to by the index. If the
924 * index is 0, we will wrap backwards, skip the link TRB, and return
925 * the one just before that.
926 */
927 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
928 {
929 u8 tmp = index;
930
931 if (!tmp)
932 tmp = DWC3_TRB_NUM - 1;
933
934 return &dep->trb_pool[tmp - 1];
935 }
936
937 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
938 {
939 struct dwc3_trb *tmp;
940 u8 trbs_left;
941
942 /*
943 * If enqueue & dequeue are equal than it is either full or empty.
944 *
945 * One way to know for sure is if the TRB right before us has HWO bit
946 * set or not. If it has, then we're definitely full and can't fit any
947 * more transfers in our ring.
948 */
949 if (dep->trb_enqueue == dep->trb_dequeue) {
950 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
951 if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
952 return 0;
953
954 return DWC3_TRB_NUM - 1;
955 }
956
957 trbs_left = dep->trb_dequeue - dep->trb_enqueue;
958 trbs_left &= (DWC3_TRB_NUM - 1);
959
960 if (dep->trb_dequeue < dep->trb_enqueue)
961 trbs_left--;
962
963 return trbs_left;
964 }
965
966 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
967 struct dwc3_request *req)
968 {
969 struct scatterlist *sg = req->sg;
970 struct scatterlist *s;
971 unsigned int length;
972 dma_addr_t dma;
973 int i;
974
975 for_each_sg(sg, s, req->num_pending_sgs, i) {
976 unsigned chain = true;
977
978 length = sg_dma_len(s);
979 dma = sg_dma_address(s);
980
981 if (sg_is_last(s))
982 chain = false;
983
984 dwc3_prepare_one_trb(dep, req, dma, length,
985 chain, i);
986
987 if (!dwc3_calc_trbs_left(dep))
988 break;
989 }
990 }
991
992 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
993 struct dwc3_request *req)
994 {
995 unsigned int length;
996 dma_addr_t dma;
997
998 dma = req->request.dma;
999 length = req->request.length;
1000
1001 dwc3_prepare_one_trb(dep, req, dma, length,
1002 false, 0);
1003 }
1004
1005 /*
1006 * dwc3_prepare_trbs - setup TRBs from requests
1007 * @dep: endpoint for which requests are being prepared
1008 *
1009 * The function goes through the requests list and sets up TRBs for the
1010 * transfers. The function returns once there are no more TRBs available or
1011 * it runs out of requests.
1012 */
1013 static void dwc3_prepare_trbs(struct dwc3_ep *dep)
1014 {
1015 struct dwc3_request *req, *n;
1016
1017 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
1018
1019 if (!dwc3_calc_trbs_left(dep))
1020 return;
1021
1022 /*
1023 * We can get in a situation where there's a request in the started list
1024 * but there weren't enough TRBs to fully kick it in the first time
1025 * around, so it has been waiting for more TRBs to be freed up.
1026 *
1027 * In that case, we should check if we have a request with pending_sgs
1028 * in the started list and prepare TRBs for that request first,
1029 * otherwise we will prepare TRBs completely out of order and that will
1030 * break things.
1031 */
1032 list_for_each_entry(req, &dep->started_list, list) {
1033 if (req->num_pending_sgs > 0)
1034 dwc3_prepare_one_trb_sg(dep, req);
1035
1036 if (!dwc3_calc_trbs_left(dep))
1037 return;
1038 }
1039
1040 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
1041 if (req->num_pending_sgs > 0)
1042 dwc3_prepare_one_trb_sg(dep, req);
1043 else
1044 dwc3_prepare_one_trb_linear(dep, req);
1045
1046 if (!dwc3_calc_trbs_left(dep))
1047 return;
1048 }
1049 }
1050
1051 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
1052 {
1053 struct dwc3_gadget_ep_cmd_params params;
1054 struct dwc3_request *req;
1055 int starting;
1056 int ret;
1057 u32 cmd;
1058
1059 starting = !(dep->flags & DWC3_EP_BUSY);
1060
1061 dwc3_prepare_trbs(dep);
1062 req = next_request(&dep->started_list);
1063 if (!req) {
1064 dep->flags |= DWC3_EP_PENDING_REQUEST;
1065 return 0;
1066 }
1067
1068 memset(&params, 0, sizeof(params));
1069
1070 if (starting) {
1071 params.param0 = upper_32_bits(req->trb_dma);
1072 params.param1 = lower_32_bits(req->trb_dma);
1073 cmd = DWC3_DEPCMD_STARTTRANSFER |
1074 DWC3_DEPCMD_PARAM(cmd_param);
1075 } else {
1076 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1077 DWC3_DEPCMD_PARAM(dep->resource_index);
1078 }
1079
1080 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1081 if (ret < 0) {
1082 /*
1083 * FIXME we need to iterate over the list of requests
1084 * here and stop, unmap, free and del each of the linked
1085 * requests instead of what we do now.
1086 */
1087 if (req->trb)
1088 memset(req->trb, 0, sizeof(struct dwc3_trb));
1089 dep->queued_requests--;
1090 dwc3_gadget_giveback(dep, req, ret);
1091 return ret;
1092 }
1093
1094 dep->flags |= DWC3_EP_BUSY;
1095
1096 if (starting) {
1097 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
1098 WARN_ON_ONCE(!dep->resource_index);
1099 }
1100
1101 return 0;
1102 }
1103
1104 static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
1105 {
1106 u32 reg;
1107
1108 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1109 return DWC3_DSTS_SOFFN(reg);
1110 }
1111
1112 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1113 struct dwc3_ep *dep, u32 cur_uf)
1114 {
1115 u32 uf;
1116
1117 if (list_empty(&dep->pending_list)) {
1118 dev_info(dwc->dev, "%s: ran out of requests\n",
1119 dep->name);
1120 dep->flags |= DWC3_EP_PENDING_REQUEST;
1121 return;
1122 }
1123
1124 /* 4 micro frames in the future */
1125 uf = cur_uf + dep->interval * 4;
1126
1127 __dwc3_gadget_kick_transfer(dep, uf);
1128 }
1129
1130 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1131 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1132 {
1133 u32 cur_uf, mask;
1134
1135 mask = ~(dep->interval - 1);
1136 cur_uf = event->parameters & mask;
1137
1138 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1139 }
1140
1141 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1142 {
1143 struct dwc3 *dwc = dep->dwc;
1144 int ret;
1145
1146 if (!dep->endpoint.desc) {
1147 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
1148 dep->name);
1149 return -ESHUTDOWN;
1150 }
1151
1152 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1153 &req->request, req->dep->name)) {
1154 dev_err(dwc->dev, "%s: request %p belongs to '%s'\n",
1155 dep->name, &req->request, req->dep->name);
1156 return -EINVAL;
1157 }
1158
1159 pm_runtime_get(dwc->dev);
1160
1161 req->request.actual = 0;
1162 req->request.status = -EINPROGRESS;
1163 req->direction = dep->direction;
1164 req->epnum = dep->number;
1165
1166 trace_dwc3_ep_queue(req);
1167
1168 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1169 dep->direction);
1170 if (ret)
1171 return ret;
1172
1173 req->sg = req->request.sg;
1174 req->num_pending_sgs = req->request.num_mapped_sgs;
1175
1176 list_add_tail(&req->list, &dep->pending_list);
1177
1178 /*
1179 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
1180 * wait for a XferNotReady event so we will know what's the current
1181 * (micro-)frame number.
1182 *
1183 * Without this trick, we are very, very likely gonna get Bus Expiry
1184 * errors which will force us issue EndTransfer command.
1185 */
1186 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1187 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
1188 if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
1189 dwc3_stop_active_transfer(dwc, dep->number, true);
1190 dep->flags = DWC3_EP_ENABLED;
1191 } else {
1192 u32 cur_uf;
1193
1194 cur_uf = __dwc3_gadget_get_frame(dwc);
1195 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1196 }
1197 }
1198 return 0;
1199 }
1200
1201 if (!dwc3_calc_trbs_left(dep))
1202 return 0;
1203
1204 ret = __dwc3_gadget_kick_transfer(dep, 0);
1205 if (ret == -EBUSY)
1206 ret = 0;
1207
1208 return ret;
1209 }
1210
1211 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1212 struct usb_request *request)
1213 {
1214 dwc3_gadget_ep_free_request(ep, request);
1215 }
1216
1217 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1218 {
1219 struct dwc3_request *req;
1220 struct usb_request *request;
1221 struct usb_ep *ep = &dep->endpoint;
1222
1223 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1224 if (!request)
1225 return -ENOMEM;
1226
1227 request->length = 0;
1228 request->buf = dwc->zlp_buf;
1229 request->complete = __dwc3_gadget_ep_zlp_complete;
1230
1231 req = to_dwc3_request(request);
1232
1233 return __dwc3_gadget_ep_queue(dep, req);
1234 }
1235
1236 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1237 gfp_t gfp_flags)
1238 {
1239 struct dwc3_request *req = to_dwc3_request(request);
1240 struct dwc3_ep *dep = to_dwc3_ep(ep);
1241 struct dwc3 *dwc = dep->dwc;
1242
1243 unsigned long flags;
1244
1245 int ret;
1246
1247 spin_lock_irqsave(&dwc->lock, flags);
1248 ret = __dwc3_gadget_ep_queue(dep, req);
1249
1250 /*
1251 * Okay, here's the thing, if gadget driver has requested for a ZLP by
1252 * setting request->zero, instead of doing magic, we will just queue an
1253 * extra usb_request ourselves so that it gets handled the same way as
1254 * any other request.
1255 */
1256 if (ret == 0 && request->zero && request->length &&
1257 (request->length % ep->maxpacket == 0))
1258 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1259
1260 spin_unlock_irqrestore(&dwc->lock, flags);
1261
1262 return ret;
1263 }
1264
1265 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1266 struct usb_request *request)
1267 {
1268 struct dwc3_request *req = to_dwc3_request(request);
1269 struct dwc3_request *r = NULL;
1270
1271 struct dwc3_ep *dep = to_dwc3_ep(ep);
1272 struct dwc3 *dwc = dep->dwc;
1273
1274 unsigned long flags;
1275 int ret = 0;
1276
1277 trace_dwc3_ep_dequeue(req);
1278
1279 spin_lock_irqsave(&dwc->lock, flags);
1280
1281 list_for_each_entry(r, &dep->pending_list, list) {
1282 if (r == req)
1283 break;
1284 }
1285
1286 if (r != req) {
1287 list_for_each_entry(r, &dep->started_list, list) {
1288 if (r == req)
1289 break;
1290 }
1291 if (r == req) {
1292 /* wait until it is processed */
1293 dwc3_stop_active_transfer(dwc, dep->number, true);
1294 goto out1;
1295 }
1296 dev_err(dwc->dev, "request %p was not queued to %s\n",
1297 request, ep->name);
1298 ret = -EINVAL;
1299 goto out0;
1300 }
1301
1302 out1:
1303 /* giveback the request */
1304 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1305
1306 out0:
1307 spin_unlock_irqrestore(&dwc->lock, flags);
1308
1309 return ret;
1310 }
1311
1312 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1313 {
1314 struct dwc3_gadget_ep_cmd_params params;
1315 struct dwc3 *dwc = dep->dwc;
1316 int ret;
1317
1318 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1319 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1320 return -EINVAL;
1321 }
1322
1323 memset(&params, 0x00, sizeof(params));
1324
1325 if (value) {
1326 struct dwc3_trb *trb;
1327
1328 unsigned transfer_in_flight;
1329 unsigned started;
1330
1331 if (dep->number > 1)
1332 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1333 else
1334 trb = &dwc->ep0_trb[dep->trb_enqueue];
1335
1336 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
1337 started = !list_empty(&dep->started_list);
1338
1339 if (!protocol && ((dep->direction && transfer_in_flight) ||
1340 (!dep->direction && started))) {
1341 return -EAGAIN;
1342 }
1343
1344 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1345 &params);
1346 if (ret)
1347 dev_err(dwc->dev, "failed to set STALL on %s\n",
1348 dep->name);
1349 else
1350 dep->flags |= DWC3_EP_STALL;
1351 } else {
1352
1353 ret = dwc3_send_clear_stall_ep_cmd(dep);
1354 if (ret)
1355 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1356 dep->name);
1357 else
1358 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1359 }
1360
1361 return ret;
1362 }
1363
1364 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1365 {
1366 struct dwc3_ep *dep = to_dwc3_ep(ep);
1367 struct dwc3 *dwc = dep->dwc;
1368
1369 unsigned long flags;
1370
1371 int ret;
1372
1373 spin_lock_irqsave(&dwc->lock, flags);
1374 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1375 spin_unlock_irqrestore(&dwc->lock, flags);
1376
1377 return ret;
1378 }
1379
1380 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1381 {
1382 struct dwc3_ep *dep = to_dwc3_ep(ep);
1383 struct dwc3 *dwc = dep->dwc;
1384 unsigned long flags;
1385 int ret;
1386
1387 spin_lock_irqsave(&dwc->lock, flags);
1388 dep->flags |= DWC3_EP_WEDGE;
1389
1390 if (dep->number == 0 || dep->number == 1)
1391 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1392 else
1393 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1394 spin_unlock_irqrestore(&dwc->lock, flags);
1395
1396 return ret;
1397 }
1398
1399 /* -------------------------------------------------------------------------- */
1400
1401 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1402 .bLength = USB_DT_ENDPOINT_SIZE,
1403 .bDescriptorType = USB_DT_ENDPOINT,
1404 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1405 };
1406
1407 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1408 .enable = dwc3_gadget_ep0_enable,
1409 .disable = dwc3_gadget_ep0_disable,
1410 .alloc_request = dwc3_gadget_ep_alloc_request,
1411 .free_request = dwc3_gadget_ep_free_request,
1412 .queue = dwc3_gadget_ep0_queue,
1413 .dequeue = dwc3_gadget_ep_dequeue,
1414 .set_halt = dwc3_gadget_ep0_set_halt,
1415 .set_wedge = dwc3_gadget_ep_set_wedge,
1416 };
1417
1418 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1419 .enable = dwc3_gadget_ep_enable,
1420 .disable = dwc3_gadget_ep_disable,
1421 .alloc_request = dwc3_gadget_ep_alloc_request,
1422 .free_request = dwc3_gadget_ep_free_request,
1423 .queue = dwc3_gadget_ep_queue,
1424 .dequeue = dwc3_gadget_ep_dequeue,
1425 .set_halt = dwc3_gadget_ep_set_halt,
1426 .set_wedge = dwc3_gadget_ep_set_wedge,
1427 };
1428
1429 /* -------------------------------------------------------------------------- */
1430
1431 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1432 {
1433 struct dwc3 *dwc = gadget_to_dwc(g);
1434
1435 return __dwc3_gadget_get_frame(dwc);
1436 }
1437
1438 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1439 {
1440 int retries;
1441
1442 int ret;
1443 u32 reg;
1444
1445 u8 link_state;
1446 u8 speed;
1447
1448 /*
1449 * According to the Databook Remote wakeup request should
1450 * be issued only when the device is in early suspend state.
1451 *
1452 * We can check that via USB Link State bits in DSTS register.
1453 */
1454 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1455
1456 speed = reg & DWC3_DSTS_CONNECTSPD;
1457 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1458 (speed == DWC3_DSTS_SUPERSPEED_PLUS))
1459 return 0;
1460
1461 link_state = DWC3_DSTS_USBLNKST(reg);
1462
1463 switch (link_state) {
1464 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1465 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1466 break;
1467 default:
1468 return -EINVAL;
1469 }
1470
1471 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1472 if (ret < 0) {
1473 dev_err(dwc->dev, "failed to put link in Recovery\n");
1474 return ret;
1475 }
1476
1477 /* Recent versions do this automatically */
1478 if (dwc->revision < DWC3_REVISION_194A) {
1479 /* write zeroes to Link Change Request */
1480 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1481 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1482 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1483 }
1484
1485 /* poll until Link State changes to ON */
1486 retries = 20000;
1487
1488 while (retries--) {
1489 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1490
1491 /* in HS, means ON */
1492 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1493 break;
1494 }
1495
1496 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1497 dev_err(dwc->dev, "failed to send remote wakeup\n");
1498 return -EINVAL;
1499 }
1500
1501 return 0;
1502 }
1503
1504 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1505 {
1506 struct dwc3 *dwc = gadget_to_dwc(g);
1507 unsigned long flags;
1508 int ret;
1509
1510 spin_lock_irqsave(&dwc->lock, flags);
1511 ret = __dwc3_gadget_wakeup(dwc);
1512 spin_unlock_irqrestore(&dwc->lock, flags);
1513
1514 return ret;
1515 }
1516
1517 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1518 int is_selfpowered)
1519 {
1520 struct dwc3 *dwc = gadget_to_dwc(g);
1521 unsigned long flags;
1522
1523 spin_lock_irqsave(&dwc->lock, flags);
1524 g->is_selfpowered = !!is_selfpowered;
1525 spin_unlock_irqrestore(&dwc->lock, flags);
1526
1527 return 0;
1528 }
1529
1530 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1531 {
1532 u32 reg;
1533 u32 timeout = 500;
1534
1535 if (pm_runtime_suspended(dwc->dev))
1536 return 0;
1537
1538 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1539 if (is_on) {
1540 if (dwc->revision <= DWC3_REVISION_187A) {
1541 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1542 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1543 }
1544
1545 if (dwc->revision >= DWC3_REVISION_194A)
1546 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1547 reg |= DWC3_DCTL_RUN_STOP;
1548
1549 if (dwc->has_hibernation)
1550 reg |= DWC3_DCTL_KEEP_CONNECT;
1551
1552 dwc->pullups_connected = true;
1553 } else {
1554 reg &= ~DWC3_DCTL_RUN_STOP;
1555
1556 if (dwc->has_hibernation && !suspend)
1557 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1558
1559 dwc->pullups_connected = false;
1560 }
1561
1562 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1563
1564 do {
1565 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1566 reg &= DWC3_DSTS_DEVCTRLHLT;
1567 } while (--timeout && !(!is_on ^ !reg));
1568
1569 if (!timeout)
1570 return -ETIMEDOUT;
1571
1572 return 0;
1573 }
1574
1575 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1576 {
1577 struct dwc3 *dwc = gadget_to_dwc(g);
1578 unsigned long flags;
1579 int ret;
1580
1581 is_on = !!is_on;
1582
1583 /*
1584 * Per databook, when we want to stop the gadget, if a control transfer
1585 * is still in process, complete it and get the core into setup phase.
1586 */
1587 if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
1588 reinit_completion(&dwc->ep0_in_setup);
1589
1590 ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
1591 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
1592 if (ret == 0) {
1593 dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
1594 return -ETIMEDOUT;
1595 }
1596 }
1597
1598 spin_lock_irqsave(&dwc->lock, flags);
1599 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1600 spin_unlock_irqrestore(&dwc->lock, flags);
1601
1602 return ret;
1603 }
1604
1605 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1606 {
1607 u32 reg;
1608
1609 /* Enable all but Start and End of Frame IRQs */
1610 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1611 DWC3_DEVTEN_EVNTOVERFLOWEN |
1612 DWC3_DEVTEN_CMDCMPLTEN |
1613 DWC3_DEVTEN_ERRTICERREN |
1614 DWC3_DEVTEN_WKUPEVTEN |
1615 DWC3_DEVTEN_CONNECTDONEEN |
1616 DWC3_DEVTEN_USBRSTEN |
1617 DWC3_DEVTEN_DISCONNEVTEN);
1618
1619 if (dwc->revision < DWC3_REVISION_250A)
1620 reg |= DWC3_DEVTEN_ULSTCNGEN;
1621
1622 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1623 }
1624
1625 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1626 {
1627 /* mask all interrupts */
1628 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1629 }
1630
1631 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1632 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1633
1634 /**
1635 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
1636 * dwc: pointer to our context structure
1637 *
1638 * The following looks like complex but it's actually very simple. In order to
1639 * calculate the number of packets we can burst at once on OUT transfers, we're
1640 * gonna use RxFIFO size.
1641 *
1642 * To calculate RxFIFO size we need two numbers:
1643 * MDWIDTH = size, in bits, of the internal memory bus
1644 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1645 *
1646 * Given these two numbers, the formula is simple:
1647 *
1648 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1649 *
1650 * 24 bytes is for 3x SETUP packets
1651 * 16 bytes is a clock domain crossing tolerance
1652 *
1653 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1654 */
1655 static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
1656 {
1657 u32 ram2_depth;
1658 u32 mdwidth;
1659 u32 nump;
1660 u32 reg;
1661
1662 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
1663 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
1664
1665 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
1666 nump = min_t(u32, nump, 16);
1667
1668 /* update NumP */
1669 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1670 reg &= ~DWC3_DCFG_NUMP_MASK;
1671 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
1672 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1673 }
1674
1675 static int __dwc3_gadget_start(struct dwc3 *dwc)
1676 {
1677 struct dwc3_ep *dep;
1678 int ret = 0;
1679 u32 reg;
1680
1681 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1682 reg &= ~(DWC3_DCFG_SPEED_MASK);
1683
1684 /**
1685 * WORKAROUND: DWC3 revision < 2.20a have an issue
1686 * which would cause metastability state on Run/Stop
1687 * bit if we try to force the IP to USB2-only mode.
1688 *
1689 * Because of that, we cannot configure the IP to any
1690 * speed other than the SuperSpeed
1691 *
1692 * Refers to:
1693 *
1694 * STAR#9000525659: Clock Domain Crossing on DCTL in
1695 * USB 2.0 Mode
1696 */
1697 if (dwc->revision < DWC3_REVISION_220A) {
1698 reg |= DWC3_DCFG_SUPERSPEED;
1699 } else {
1700 switch (dwc->maximum_speed) {
1701 case USB_SPEED_LOW:
1702 reg |= DWC3_DCFG_LOWSPEED;
1703 break;
1704 case USB_SPEED_FULL:
1705 reg |= DWC3_DCFG_FULLSPEED1;
1706 break;
1707 case USB_SPEED_HIGH:
1708 reg |= DWC3_DCFG_HIGHSPEED;
1709 break;
1710 case USB_SPEED_SUPER_PLUS:
1711 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
1712 break;
1713 default:
1714 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1715 dwc->maximum_speed);
1716 /* fall through */
1717 case USB_SPEED_SUPER:
1718 reg |= DWC3_DCFG_SUPERSPEED;
1719 break;
1720 }
1721 }
1722 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1723
1724 /*
1725 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1726 * field instead of letting dwc3 itself calculate that automatically.
1727 *
1728 * This way, we maximize the chances that we'll be able to get several
1729 * bursts of data without going through any sort of endpoint throttling.
1730 */
1731 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1732 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1733 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1734
1735 dwc3_gadget_setup_nump(dwc);
1736
1737 /* Start with SuperSpeed Default */
1738 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1739
1740 dep = dwc->eps[0];
1741 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1742 false);
1743 if (ret) {
1744 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1745 goto err0;
1746 }
1747
1748 dep = dwc->eps[1];
1749 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1750 false);
1751 if (ret) {
1752 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1753 goto err1;
1754 }
1755
1756 /* begin to receive SETUP packets */
1757 dwc->ep0state = EP0_SETUP_PHASE;
1758 dwc3_ep0_out_start(dwc);
1759
1760 dwc3_gadget_enable_irq(dwc);
1761
1762 return 0;
1763
1764 err1:
1765 __dwc3_gadget_ep_disable(dwc->eps[0]);
1766
1767 err0:
1768 return ret;
1769 }
1770
1771 static int dwc3_gadget_start(struct usb_gadget *g,
1772 struct usb_gadget_driver *driver)
1773 {
1774 struct dwc3 *dwc = gadget_to_dwc(g);
1775 unsigned long flags;
1776 int ret = 0;
1777 int irq;
1778
1779 irq = dwc->irq_gadget;
1780 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1781 IRQF_SHARED, "dwc3", dwc->ev_buf);
1782 if (ret) {
1783 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1784 irq, ret);
1785 goto err0;
1786 }
1787
1788 spin_lock_irqsave(&dwc->lock, flags);
1789 if (dwc->gadget_driver) {
1790 dev_err(dwc->dev, "%s is already bound to %s\n",
1791 dwc->gadget.name,
1792 dwc->gadget_driver->driver.name);
1793 ret = -EBUSY;
1794 goto err1;
1795 }
1796
1797 dwc->gadget_driver = driver;
1798
1799 if (pm_runtime_active(dwc->dev))
1800 __dwc3_gadget_start(dwc);
1801
1802 spin_unlock_irqrestore(&dwc->lock, flags);
1803
1804 return 0;
1805
1806 err1:
1807 spin_unlock_irqrestore(&dwc->lock, flags);
1808 free_irq(irq, dwc);
1809
1810 err0:
1811 return ret;
1812 }
1813
1814 static void __dwc3_gadget_stop(struct dwc3 *dwc)
1815 {
1816 dwc3_gadget_disable_irq(dwc);
1817 __dwc3_gadget_ep_disable(dwc->eps[0]);
1818 __dwc3_gadget_ep_disable(dwc->eps[1]);
1819 }
1820
1821 static int dwc3_gadget_stop(struct usb_gadget *g)
1822 {
1823 struct dwc3 *dwc = gadget_to_dwc(g);
1824 unsigned long flags;
1825 int epnum;
1826
1827 spin_lock_irqsave(&dwc->lock, flags);
1828
1829 if (pm_runtime_suspended(dwc->dev))
1830 goto out;
1831
1832 __dwc3_gadget_stop(dwc);
1833
1834 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1835 struct dwc3_ep *dep = dwc->eps[epnum];
1836
1837 if (!dep)
1838 continue;
1839
1840 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
1841 continue;
1842
1843 wait_event_lock_irq(dep->wait_end_transfer,
1844 !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
1845 dwc->lock);
1846 }
1847
1848 out:
1849 dwc->gadget_driver = NULL;
1850 spin_unlock_irqrestore(&dwc->lock, flags);
1851
1852 free_irq(dwc->irq_gadget, dwc->ev_buf);
1853
1854 return 0;
1855 }
1856
1857 static const struct usb_gadget_ops dwc3_gadget_ops = {
1858 .get_frame = dwc3_gadget_get_frame,
1859 .wakeup = dwc3_gadget_wakeup,
1860 .set_selfpowered = dwc3_gadget_set_selfpowered,
1861 .pullup = dwc3_gadget_pullup,
1862 .udc_start = dwc3_gadget_start,
1863 .udc_stop = dwc3_gadget_stop,
1864 };
1865
1866 /* -------------------------------------------------------------------------- */
1867
1868 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1869 u8 num, u32 direction)
1870 {
1871 struct dwc3_ep *dep;
1872 u8 i;
1873
1874 for (i = 0; i < num; i++) {
1875 u8 epnum = (i << 1) | (direction ? 1 : 0);
1876
1877 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1878 if (!dep)
1879 return -ENOMEM;
1880
1881 dep->dwc = dwc;
1882 dep->number = epnum;
1883 dep->direction = !!direction;
1884 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
1885 dwc->eps[epnum] = dep;
1886
1887 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1888 (epnum & 1) ? "in" : "out");
1889
1890 dep->endpoint.name = dep->name;
1891 spin_lock_init(&dep->lock);
1892
1893 if (epnum == 0 || epnum == 1) {
1894 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1895 dep->endpoint.maxburst = 1;
1896 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1897 if (!epnum)
1898 dwc->gadget.ep0 = &dep->endpoint;
1899 } else {
1900 int ret;
1901
1902 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1903 dep->endpoint.max_streams = 15;
1904 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1905 list_add_tail(&dep->endpoint.ep_list,
1906 &dwc->gadget.ep_list);
1907
1908 ret = dwc3_alloc_trb_pool(dep);
1909 if (ret)
1910 return ret;
1911 }
1912
1913 if (epnum == 0 || epnum == 1) {
1914 dep->endpoint.caps.type_control = true;
1915 } else {
1916 dep->endpoint.caps.type_iso = true;
1917 dep->endpoint.caps.type_bulk = true;
1918 dep->endpoint.caps.type_int = true;
1919 }
1920
1921 dep->endpoint.caps.dir_in = !!direction;
1922 dep->endpoint.caps.dir_out = !direction;
1923
1924 INIT_LIST_HEAD(&dep->pending_list);
1925 INIT_LIST_HEAD(&dep->started_list);
1926 }
1927
1928 return 0;
1929 }
1930
1931 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1932 {
1933 int ret;
1934
1935 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1936
1937 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1938 if (ret < 0) {
1939 dev_err(dwc->dev, "failed to initialize OUT endpoints\n");
1940 return ret;
1941 }
1942
1943 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1944 if (ret < 0) {
1945 dev_err(dwc->dev, "failed to initialize IN endpoints\n");
1946 return ret;
1947 }
1948
1949 return 0;
1950 }
1951
1952 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1953 {
1954 struct dwc3_ep *dep;
1955 u8 epnum;
1956
1957 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1958 dep = dwc->eps[epnum];
1959 if (!dep)
1960 continue;
1961 /*
1962 * Physical endpoints 0 and 1 are special; they form the
1963 * bi-directional USB endpoint 0.
1964 *
1965 * For those two physical endpoints, we don't allocate a TRB
1966 * pool nor do we add them the endpoints list. Due to that, we
1967 * shouldn't do these two operations otherwise we would end up
1968 * with all sorts of bugs when removing dwc3.ko.
1969 */
1970 if (epnum != 0 && epnum != 1) {
1971 dwc3_free_trb_pool(dep);
1972 list_del(&dep->endpoint.ep_list);
1973 }
1974
1975 kfree(dep);
1976 }
1977 }
1978
1979 /* -------------------------------------------------------------------------- */
1980
1981 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1982 struct dwc3_request *req, struct dwc3_trb *trb,
1983 const struct dwc3_event_depevt *event, int status,
1984 int chain)
1985 {
1986 unsigned int count;
1987 unsigned int s_pkt = 0;
1988 unsigned int trb_status;
1989
1990 dwc3_ep_inc_deq(dep);
1991
1992 if (req->trb == trb)
1993 dep->queued_requests--;
1994
1995 trace_dwc3_complete_trb(dep, trb);
1996
1997 /*
1998 * If we're in the middle of series of chained TRBs and we
1999 * receive a short transfer along the way, DWC3 will skip
2000 * through all TRBs including the last TRB in the chain (the
2001 * where CHN bit is zero. DWC3 will also avoid clearing HWO
2002 * bit and SW has to do it manually.
2003 *
2004 * We're going to do that here to avoid problems of HW trying
2005 * to use bogus TRBs for transfers.
2006 */
2007 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
2008 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
2009
2010 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
2011 return 1;
2012
2013 count = trb->size & DWC3_TRB_SIZE_MASK;
2014 req->remaining += count;
2015
2016 if (dep->direction) {
2017 if (count) {
2018 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
2019 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
2020 /*
2021 * If missed isoc occurred and there is
2022 * no request queued then issue END
2023 * TRANSFER, so that core generates
2024 * next xfernotready and we will issue
2025 * a fresh START TRANSFER.
2026 * If there are still queued request
2027 * then wait, do not issue either END
2028 * or UPDATE TRANSFER, just attach next
2029 * request in pending_list during
2030 * giveback.If any future queued request
2031 * is successfully transferred then we
2032 * will issue UPDATE TRANSFER for all
2033 * request in the pending_list.
2034 */
2035 dep->flags |= DWC3_EP_MISSED_ISOC;
2036 } else {
2037 dev_err(dwc->dev, "incomplete IN transfer %s\n",
2038 dep->name);
2039 status = -ECONNRESET;
2040 }
2041 } else {
2042 dep->flags &= ~DWC3_EP_MISSED_ISOC;
2043 }
2044 } else {
2045 if (count && (event->status & DEPEVT_STATUS_SHORT))
2046 s_pkt = 1;
2047 }
2048
2049 if (s_pkt && !chain)
2050 return 1;
2051
2052 if ((event->status & DEPEVT_STATUS_IOC) &&
2053 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2054 return 1;
2055
2056 return 0;
2057 }
2058
2059 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2060 const struct dwc3_event_depevt *event, int status)
2061 {
2062 struct dwc3_request *req, *n;
2063 struct dwc3_trb *trb;
2064 bool ioc = false;
2065 int ret = 0;
2066
2067 list_for_each_entry_safe(req, n, &dep->started_list, list) {
2068 unsigned length;
2069 int chain;
2070
2071 length = req->request.length;
2072 chain = req->num_pending_sgs > 0;
2073 if (chain) {
2074 struct scatterlist *sg = req->sg;
2075 struct scatterlist *s;
2076 unsigned int pending = req->num_pending_sgs;
2077 unsigned int i;
2078
2079 for_each_sg(sg, s, pending, i) {
2080 trb = &dep->trb_pool[dep->trb_dequeue];
2081
2082 if (trb->ctrl & DWC3_TRB_CTRL_HWO)
2083 break;
2084
2085 req->sg = sg_next(s);
2086 req->num_pending_sgs--;
2087
2088 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2089 event, status, chain);
2090 if (ret)
2091 break;
2092 }
2093 } else {
2094 trb = &dep->trb_pool[dep->trb_dequeue];
2095 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2096 event, status, chain);
2097 }
2098
2099 req->request.actual = length - req->remaining;
2100
2101 if ((req->request.actual < length) && req->num_pending_sgs)
2102 return __dwc3_gadget_kick_transfer(dep, 0);
2103
2104 dwc3_gadget_giveback(dep, req, status);
2105
2106 if (ret) {
2107 if ((event->status & DEPEVT_STATUS_IOC) &&
2108 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2109 ioc = true;
2110 break;
2111 }
2112 }
2113
2114 /*
2115 * Our endpoint might get disabled by another thread during
2116 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2117 * early on so DWC3_EP_BUSY flag gets cleared
2118 */
2119 if (!dep->endpoint.desc)
2120 return 1;
2121
2122 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2123 list_empty(&dep->started_list)) {
2124 if (list_empty(&dep->pending_list)) {
2125 /*
2126 * If there is no entry in request list then do
2127 * not issue END TRANSFER now. Just set PENDING
2128 * flag, so that END TRANSFER is issued when an
2129 * entry is added into request list.
2130 */
2131 dep->flags = DWC3_EP_PENDING_REQUEST;
2132 } else {
2133 dwc3_stop_active_transfer(dwc, dep->number, true);
2134 dep->flags = DWC3_EP_ENABLED;
2135 }
2136 return 1;
2137 }
2138
2139 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc)
2140 return 0;
2141
2142 return 1;
2143 }
2144
2145 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
2146 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
2147 {
2148 unsigned status = 0;
2149 int clean_busy;
2150 u32 is_xfer_complete;
2151
2152 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
2153
2154 if (event->status & DEPEVT_STATUS_BUSERR)
2155 status = -ECONNRESET;
2156
2157 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
2158 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
2159 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
2160 dep->flags &= ~DWC3_EP_BUSY;
2161
2162 /*
2163 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2164 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2165 */
2166 if (dwc->revision < DWC3_REVISION_183A) {
2167 u32 reg;
2168 int i;
2169
2170 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
2171 dep = dwc->eps[i];
2172
2173 if (!(dep->flags & DWC3_EP_ENABLED))
2174 continue;
2175
2176 if (!list_empty(&dep->started_list))
2177 return;
2178 }
2179
2180 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2181 reg |= dwc->u1u2;
2182 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2183
2184 dwc->u1u2 = 0;
2185 }
2186
2187 /*
2188 * Our endpoint might get disabled by another thread during
2189 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2190 * early on so DWC3_EP_BUSY flag gets cleared
2191 */
2192 if (!dep->endpoint.desc)
2193 return;
2194
2195 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2196 int ret;
2197
2198 ret = __dwc3_gadget_kick_transfer(dep, 0);
2199 if (!ret || ret == -EBUSY)
2200 return;
2201 }
2202 }
2203
2204 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2205 const struct dwc3_event_depevt *event)
2206 {
2207 struct dwc3_ep *dep;
2208 u8 epnum = event->endpoint_number;
2209 u8 cmd;
2210
2211 dep = dwc->eps[epnum];
2212
2213 if (!(dep->flags & DWC3_EP_ENABLED) &&
2214 !(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
2215 return;
2216
2217 if (epnum == 0 || epnum == 1) {
2218 dwc3_ep0_interrupt(dwc, event);
2219 return;
2220 }
2221
2222 switch (event->endpoint_event) {
2223 case DWC3_DEPEVT_XFERCOMPLETE:
2224 dep->resource_index = 0;
2225
2226 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2227 dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n");
2228 return;
2229 }
2230
2231 dwc3_endpoint_transfer_complete(dwc, dep, event);
2232 break;
2233 case DWC3_DEPEVT_XFERINPROGRESS:
2234 dwc3_endpoint_transfer_complete(dwc, dep, event);
2235 break;
2236 case DWC3_DEPEVT_XFERNOTREADY:
2237 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2238 dwc3_gadget_start_isoc(dwc, dep, event);
2239 } else {
2240 int ret;
2241
2242 ret = __dwc3_gadget_kick_transfer(dep, 0);
2243 if (!ret || ret == -EBUSY)
2244 return;
2245 }
2246
2247 break;
2248 case DWC3_DEPEVT_STREAMEVT:
2249 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2250 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2251 dep->name);
2252 return;
2253 }
2254 break;
2255 case DWC3_DEPEVT_EPCMDCMPLT:
2256 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
2257
2258 if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
2259 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
2260 wake_up(&dep->wait_end_transfer);
2261 }
2262 break;
2263 case DWC3_DEPEVT_RXTXFIFOEVT:
2264 break;
2265 }
2266 }
2267
2268 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2269 {
2270 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2271 spin_unlock(&dwc->lock);
2272 dwc->gadget_driver->disconnect(&dwc->gadget);
2273 spin_lock(&dwc->lock);
2274 }
2275 }
2276
2277 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2278 {
2279 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2280 spin_unlock(&dwc->lock);
2281 dwc->gadget_driver->suspend(&dwc->gadget);
2282 spin_lock(&dwc->lock);
2283 }
2284 }
2285
2286 static void dwc3_resume_gadget(struct dwc3 *dwc)
2287 {
2288 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2289 spin_unlock(&dwc->lock);
2290 dwc->gadget_driver->resume(&dwc->gadget);
2291 spin_lock(&dwc->lock);
2292 }
2293 }
2294
2295 static void dwc3_reset_gadget(struct dwc3 *dwc)
2296 {
2297 if (!dwc->gadget_driver)
2298 return;
2299
2300 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2301 spin_unlock(&dwc->lock);
2302 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2303 spin_lock(&dwc->lock);
2304 }
2305 }
2306
2307 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2308 {
2309 struct dwc3_ep *dep;
2310 struct dwc3_gadget_ep_cmd_params params;
2311 u32 cmd;
2312 int ret;
2313
2314 dep = dwc->eps[epnum];
2315
2316 if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
2317 !dep->resource_index)
2318 return;
2319
2320 /*
2321 * NOTICE: We are violating what the Databook says about the
2322 * EndTransfer command. Ideally we would _always_ wait for the
2323 * EndTransfer Command Completion IRQ, but that's causing too
2324 * much trouble synchronizing between us and gadget driver.
2325 *
2326 * We have discussed this with the IP Provider and it was
2327 * suggested to giveback all requests here, but give HW some
2328 * extra time to synchronize with the interconnect. We're using
2329 * an arbitrary 100us delay for that.
2330 *
2331 * Note also that a similar handling was tested by Synopsys
2332 * (thanks a lot Paul) and nothing bad has come out of it.
2333 * In short, what we're doing is:
2334 *
2335 * - Issue EndTransfer WITH CMDIOC bit set
2336 * - Wait 100us
2337 *
2338 * As of IP version 3.10a of the DWC_usb3 IP, the controller
2339 * supports a mode to work around the above limitation. The
2340 * software can poll the CMDACT bit in the DEPCMD register
2341 * after issuing a EndTransfer command. This mode is enabled
2342 * by writing GUCTL2[14]. This polling is already done in the
2343 * dwc3_send_gadget_ep_cmd() function so if the mode is
2344 * enabled, the EndTransfer command will have completed upon
2345 * returning from this function and we don't need to delay for
2346 * 100us.
2347 *
2348 * This mode is NOT available on the DWC_usb31 IP.
2349 */
2350
2351 cmd = DWC3_DEPCMD_ENDTRANSFER;
2352 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2353 cmd |= DWC3_DEPCMD_CMDIOC;
2354 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2355 memset(&params, 0, sizeof(params));
2356 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
2357 WARN_ON_ONCE(ret);
2358 dep->resource_index = 0;
2359 dep->flags &= ~DWC3_EP_BUSY;
2360
2361 if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) {
2362 dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
2363 udelay(100);
2364 }
2365 }
2366
2367 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2368 {
2369 u32 epnum;
2370
2371 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2372 struct dwc3_ep *dep;
2373 int ret;
2374
2375 dep = dwc->eps[epnum];
2376 if (!dep)
2377 continue;
2378
2379 if (!(dep->flags & DWC3_EP_STALL))
2380 continue;
2381
2382 dep->flags &= ~DWC3_EP_STALL;
2383
2384 ret = dwc3_send_clear_stall_ep_cmd(dep);
2385 WARN_ON_ONCE(ret);
2386 }
2387 }
2388
2389 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2390 {
2391 int reg;
2392
2393 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2394 reg &= ~DWC3_DCTL_INITU1ENA;
2395 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2396
2397 reg &= ~DWC3_DCTL_INITU2ENA;
2398 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2399
2400 dwc3_disconnect_gadget(dwc);
2401
2402 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2403 dwc->setup_packet_pending = false;
2404 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2405
2406 dwc->connected = false;
2407 }
2408
2409 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2410 {
2411 u32 reg;
2412
2413 dwc->connected = true;
2414
2415 /*
2416 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2417 * would cause a missing Disconnect Event if there's a
2418 * pending Setup Packet in the FIFO.
2419 *
2420 * There's no suggested workaround on the official Bug
2421 * report, which states that "unless the driver/application
2422 * is doing any special handling of a disconnect event,
2423 * there is no functional issue".
2424 *
2425 * Unfortunately, it turns out that we _do_ some special
2426 * handling of a disconnect event, namely complete all
2427 * pending transfers, notify gadget driver of the
2428 * disconnection, and so on.
2429 *
2430 * Our suggested workaround is to follow the Disconnect
2431 * Event steps here, instead, based on a setup_packet_pending
2432 * flag. Such flag gets set whenever we have a SETUP_PENDING
2433 * status for EP0 TRBs and gets cleared on XferComplete for the
2434 * same endpoint.
2435 *
2436 * Refers to:
2437 *
2438 * STAR#9000466709: RTL: Device : Disconnect event not
2439 * generated if setup packet pending in FIFO
2440 */
2441 if (dwc->revision < DWC3_REVISION_188A) {
2442 if (dwc->setup_packet_pending)
2443 dwc3_gadget_disconnect_interrupt(dwc);
2444 }
2445
2446 dwc3_reset_gadget(dwc);
2447
2448 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2449 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2450 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2451 dwc->test_mode = false;
2452 dwc3_clear_stall_all_ep(dwc);
2453
2454 /* Reset device address to zero */
2455 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2456 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2457 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2458 }
2459
2460 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2461 {
2462 u32 reg;
2463 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2464
2465 /*
2466 * We change the clock only at SS but I dunno why I would want to do
2467 * this. Maybe it becomes part of the power saving plan.
2468 */
2469
2470 if ((speed != DWC3_DSTS_SUPERSPEED) &&
2471 (speed != DWC3_DSTS_SUPERSPEED_PLUS))
2472 return;
2473
2474 /*
2475 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2476 * each time on Connect Done.
2477 */
2478 if (!usb30_clock)
2479 return;
2480
2481 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2482 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2483 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2484 }
2485
2486 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2487 {
2488 struct dwc3_ep *dep;
2489 int ret;
2490 u32 reg;
2491 u8 speed;
2492
2493 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2494 speed = reg & DWC3_DSTS_CONNECTSPD;
2495 dwc->speed = speed;
2496
2497 dwc3_update_ram_clk_sel(dwc, speed);
2498
2499 switch (speed) {
2500 case DWC3_DSTS_SUPERSPEED_PLUS:
2501 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2502 dwc->gadget.ep0->maxpacket = 512;
2503 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2504 break;
2505 case DWC3_DSTS_SUPERSPEED:
2506 /*
2507 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2508 * would cause a missing USB3 Reset event.
2509 *
2510 * In such situations, we should force a USB3 Reset
2511 * event by calling our dwc3_gadget_reset_interrupt()
2512 * routine.
2513 *
2514 * Refers to:
2515 *
2516 * STAR#9000483510: RTL: SS : USB3 reset event may
2517 * not be generated always when the link enters poll
2518 */
2519 if (dwc->revision < DWC3_REVISION_190A)
2520 dwc3_gadget_reset_interrupt(dwc);
2521
2522 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2523 dwc->gadget.ep0->maxpacket = 512;
2524 dwc->gadget.speed = USB_SPEED_SUPER;
2525 break;
2526 case DWC3_DSTS_HIGHSPEED:
2527 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2528 dwc->gadget.ep0->maxpacket = 64;
2529 dwc->gadget.speed = USB_SPEED_HIGH;
2530 break;
2531 case DWC3_DSTS_FULLSPEED2:
2532 case DWC3_DSTS_FULLSPEED1:
2533 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2534 dwc->gadget.ep0->maxpacket = 64;
2535 dwc->gadget.speed = USB_SPEED_FULL;
2536 break;
2537 case DWC3_DSTS_LOWSPEED:
2538 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2539 dwc->gadget.ep0->maxpacket = 8;
2540 dwc->gadget.speed = USB_SPEED_LOW;
2541 break;
2542 }
2543
2544 /* Enable USB2 LPM Capability */
2545
2546 if ((dwc->revision > DWC3_REVISION_194A) &&
2547 (speed != DWC3_DSTS_SUPERSPEED) &&
2548 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
2549 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2550 reg |= DWC3_DCFG_LPM_CAP;
2551 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2552
2553 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2554 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2555
2556 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2557
2558 /*
2559 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2560 * DCFG.LPMCap is set, core responses with an ACK and the
2561 * BESL value in the LPM token is less than or equal to LPM
2562 * NYET threshold.
2563 */
2564 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2565 && dwc->has_lpm_erratum,
2566 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2567
2568 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2569 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2570
2571 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2572 } else {
2573 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2574 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2575 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2576 }
2577
2578 dep = dwc->eps[0];
2579 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2580 false);
2581 if (ret) {
2582 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2583 return;
2584 }
2585
2586 dep = dwc->eps[1];
2587 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2588 false);
2589 if (ret) {
2590 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2591 return;
2592 }
2593
2594 /*
2595 * Configure PHY via GUSB3PIPECTLn if required.
2596 *
2597 * Update GTXFIFOSIZn
2598 *
2599 * In both cases reset values should be sufficient.
2600 */
2601 }
2602
2603 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2604 {
2605 /*
2606 * TODO take core out of low power mode when that's
2607 * implemented.
2608 */
2609
2610 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2611 spin_unlock(&dwc->lock);
2612 dwc->gadget_driver->resume(&dwc->gadget);
2613 spin_lock(&dwc->lock);
2614 }
2615 }
2616
2617 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2618 unsigned int evtinfo)
2619 {
2620 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2621 unsigned int pwropt;
2622
2623 /*
2624 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2625 * Hibernation mode enabled which would show up when device detects
2626 * host-initiated U3 exit.
2627 *
2628 * In that case, device will generate a Link State Change Interrupt
2629 * from U3 to RESUME which is only necessary if Hibernation is
2630 * configured in.
2631 *
2632 * There are no functional changes due to such spurious event and we
2633 * just need to ignore it.
2634 *
2635 * Refers to:
2636 *
2637 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2638 * operational mode
2639 */
2640 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2641 if ((dwc->revision < DWC3_REVISION_250A) &&
2642 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2643 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2644 (next == DWC3_LINK_STATE_RESUME)) {
2645 return;
2646 }
2647 }
2648
2649 /*
2650 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2651 * on the link partner, the USB session might do multiple entry/exit
2652 * of low power states before a transfer takes place.
2653 *
2654 * Due to this problem, we might experience lower throughput. The
2655 * suggested workaround is to disable DCTL[12:9] bits if we're
2656 * transitioning from U1/U2 to U0 and enable those bits again
2657 * after a transfer completes and there are no pending transfers
2658 * on any of the enabled endpoints.
2659 *
2660 * This is the first half of that workaround.
2661 *
2662 * Refers to:
2663 *
2664 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2665 * core send LGO_Ux entering U0
2666 */
2667 if (dwc->revision < DWC3_REVISION_183A) {
2668 if (next == DWC3_LINK_STATE_U0) {
2669 u32 u1u2;
2670 u32 reg;
2671
2672 switch (dwc->link_state) {
2673 case DWC3_LINK_STATE_U1:
2674 case DWC3_LINK_STATE_U2:
2675 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2676 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2677 | DWC3_DCTL_ACCEPTU2ENA
2678 | DWC3_DCTL_INITU1ENA
2679 | DWC3_DCTL_ACCEPTU1ENA);
2680
2681 if (!dwc->u1u2)
2682 dwc->u1u2 = reg & u1u2;
2683
2684 reg &= ~u1u2;
2685
2686 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2687 break;
2688 default:
2689 /* do nothing */
2690 break;
2691 }
2692 }
2693 }
2694
2695 switch (next) {
2696 case DWC3_LINK_STATE_U1:
2697 if (dwc->speed == USB_SPEED_SUPER)
2698 dwc3_suspend_gadget(dwc);
2699 break;
2700 case DWC3_LINK_STATE_U2:
2701 case DWC3_LINK_STATE_U3:
2702 dwc3_suspend_gadget(dwc);
2703 break;
2704 case DWC3_LINK_STATE_RESUME:
2705 dwc3_resume_gadget(dwc);
2706 break;
2707 default:
2708 /* do nothing */
2709 break;
2710 }
2711
2712 dwc->link_state = next;
2713 }
2714
2715 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
2716 unsigned int evtinfo)
2717 {
2718 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2719
2720 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
2721 dwc3_suspend_gadget(dwc);
2722
2723 dwc->link_state = next;
2724 }
2725
2726 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2727 unsigned int evtinfo)
2728 {
2729 unsigned int is_ss = evtinfo & BIT(4);
2730
2731 /**
2732 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2733 * have a known issue which can cause USB CV TD.9.23 to fail
2734 * randomly.
2735 *
2736 * Because of this issue, core could generate bogus hibernation
2737 * events which SW needs to ignore.
2738 *
2739 * Refers to:
2740 *
2741 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2742 * Device Fallback from SuperSpeed
2743 */
2744 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2745 return;
2746
2747 /* enter hibernation here */
2748 }
2749
2750 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2751 const struct dwc3_event_devt *event)
2752 {
2753 switch (event->type) {
2754 case DWC3_DEVICE_EVENT_DISCONNECT:
2755 dwc3_gadget_disconnect_interrupt(dwc);
2756 break;
2757 case DWC3_DEVICE_EVENT_RESET:
2758 dwc3_gadget_reset_interrupt(dwc);
2759 break;
2760 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2761 dwc3_gadget_conndone_interrupt(dwc);
2762 break;
2763 case DWC3_DEVICE_EVENT_WAKEUP:
2764 dwc3_gadget_wakeup_interrupt(dwc);
2765 break;
2766 case DWC3_DEVICE_EVENT_HIBER_REQ:
2767 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2768 "unexpected hibernation event\n"))
2769 break;
2770
2771 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2772 break;
2773 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2774 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2775 break;
2776 case DWC3_DEVICE_EVENT_EOPF:
2777 /* It changed to be suspend event for version 2.30a and above */
2778 if (dwc->revision >= DWC3_REVISION_230A) {
2779 /*
2780 * Ignore suspend event until the gadget enters into
2781 * USB_STATE_CONFIGURED state.
2782 */
2783 if (dwc->gadget.state >= USB_STATE_CONFIGURED)
2784 dwc3_gadget_suspend_interrupt(dwc,
2785 event->event_info);
2786 }
2787 break;
2788 case DWC3_DEVICE_EVENT_SOF:
2789 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2790 case DWC3_DEVICE_EVENT_CMD_CMPL:
2791 case DWC3_DEVICE_EVENT_OVERFLOW:
2792 break;
2793 default:
2794 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2795 }
2796 }
2797
2798 static void dwc3_process_event_entry(struct dwc3 *dwc,
2799 const union dwc3_event *event)
2800 {
2801 trace_dwc3_event(event->raw, dwc);
2802
2803 /* Endpoint IRQ, handle it and return early */
2804 if (event->type.is_devspec == 0) {
2805 /* depevt */
2806 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2807 }
2808
2809 switch (event->type.type) {
2810 case DWC3_EVENT_TYPE_DEV:
2811 dwc3_gadget_interrupt(dwc, &event->devt);
2812 break;
2813 /* REVISIT what to do with Carkit and I2C events ? */
2814 default:
2815 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2816 }
2817 }
2818
2819 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
2820 {
2821 struct dwc3 *dwc = evt->dwc;
2822 irqreturn_t ret = IRQ_NONE;
2823 int left;
2824 u32 reg;
2825
2826 left = evt->count;
2827
2828 if (!(evt->flags & DWC3_EVENT_PENDING))
2829 return IRQ_NONE;
2830
2831 while (left > 0) {
2832 union dwc3_event event;
2833
2834 event.raw = *(u32 *) (evt->buf + evt->lpos);
2835
2836 dwc3_process_event_entry(dwc, &event);
2837
2838 /*
2839 * FIXME we wrap around correctly to the next entry as
2840 * almost all entries are 4 bytes in size. There is one
2841 * entry which has 12 bytes which is a regular entry
2842 * followed by 8 bytes data. ATM I don't know how
2843 * things are organized if we get next to the a
2844 * boundary so I worry about that once we try to handle
2845 * that.
2846 */
2847 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2848 left -= 4;
2849
2850 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
2851 }
2852
2853 evt->count = 0;
2854 evt->flags &= ~DWC3_EVENT_PENDING;
2855 ret = IRQ_HANDLED;
2856
2857 /* Unmask interrupt */
2858 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2859 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2860 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2861
2862 return ret;
2863 }
2864
2865 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
2866 {
2867 struct dwc3_event_buffer *evt = _evt;
2868 struct dwc3 *dwc = evt->dwc;
2869 unsigned long flags;
2870 irqreturn_t ret = IRQ_NONE;
2871
2872 spin_lock_irqsave(&dwc->lock, flags);
2873 ret = dwc3_process_event_buf(evt);
2874 spin_unlock_irqrestore(&dwc->lock, flags);
2875
2876 return ret;
2877 }
2878
2879 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
2880 {
2881 struct dwc3 *dwc = evt->dwc;
2882 u32 count;
2883 u32 reg;
2884
2885 if (pm_runtime_suspended(dwc->dev)) {
2886 pm_runtime_get(dwc->dev);
2887 disable_irq_nosync(dwc->irq_gadget);
2888 dwc->pending_events = true;
2889 return IRQ_HANDLED;
2890 }
2891
2892 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2893 count &= DWC3_GEVNTCOUNT_MASK;
2894 if (!count)
2895 return IRQ_NONE;
2896
2897 evt->count = count;
2898 evt->flags |= DWC3_EVENT_PENDING;
2899
2900 /* Mask interrupt */
2901 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2902 reg |= DWC3_GEVNTSIZ_INTMASK;
2903 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2904
2905 return IRQ_WAKE_THREAD;
2906 }
2907
2908 static irqreturn_t dwc3_interrupt(int irq, void *_evt)
2909 {
2910 struct dwc3_event_buffer *evt = _evt;
2911
2912 return dwc3_check_event_buf(evt);
2913 }
2914
2915 static int dwc3_gadget_get_irq(struct dwc3 *dwc)
2916 {
2917 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
2918 int irq;
2919
2920 irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
2921 if (irq > 0)
2922 goto out;
2923
2924 if (irq == -EPROBE_DEFER)
2925 goto out;
2926
2927 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
2928 if (irq > 0)
2929 goto out;
2930
2931 if (irq == -EPROBE_DEFER)
2932 goto out;
2933
2934 irq = platform_get_irq(dwc3_pdev, 0);
2935 if (irq > 0)
2936 goto out;
2937
2938 if (irq != -EPROBE_DEFER)
2939 dev_err(dwc->dev, "missing peripheral IRQ\n");
2940
2941 if (!irq)
2942 irq = -EINVAL;
2943
2944 out:
2945 return irq;
2946 }
2947
2948 /**
2949 * dwc3_gadget_init - Initializes gadget related registers
2950 * @dwc: pointer to our controller context structure
2951 *
2952 * Returns 0 on success otherwise negative errno.
2953 */
2954 int dwc3_gadget_init(struct dwc3 *dwc)
2955 {
2956 int ret;
2957 int irq;
2958
2959 irq = dwc3_gadget_get_irq(dwc);
2960 if (irq < 0) {
2961 ret = irq;
2962 goto err0;
2963 }
2964
2965 dwc->irq_gadget = irq;
2966
2967 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2968 &dwc->ctrl_req_addr, GFP_KERNEL);
2969 if (!dwc->ctrl_req) {
2970 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2971 ret = -ENOMEM;
2972 goto err0;
2973 }
2974
2975 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2976 &dwc->ep0_trb_addr, GFP_KERNEL);
2977 if (!dwc->ep0_trb) {
2978 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2979 ret = -ENOMEM;
2980 goto err1;
2981 }
2982
2983 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2984 if (!dwc->setup_buf) {
2985 ret = -ENOMEM;
2986 goto err2;
2987 }
2988
2989 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2990 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2991 GFP_KERNEL);
2992 if (!dwc->ep0_bounce) {
2993 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2994 ret = -ENOMEM;
2995 goto err3;
2996 }
2997
2998 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
2999 if (!dwc->zlp_buf) {
3000 ret = -ENOMEM;
3001 goto err4;
3002 }
3003
3004 init_completion(&dwc->ep0_in_setup);
3005
3006 dwc->gadget.ops = &dwc3_gadget_ops;
3007 dwc->gadget.speed = USB_SPEED_UNKNOWN;
3008 dwc->gadget.sg_supported = true;
3009 dwc->gadget.name = "dwc3-gadget";
3010 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
3011
3012 /*
3013 * FIXME We might be setting max_speed to <SUPER, however versions
3014 * <2.20a of dwc3 have an issue with metastability (documented
3015 * elsewhere in this driver) which tells us we can't set max speed to
3016 * anything lower than SUPER.
3017 *
3018 * Because gadget.max_speed is only used by composite.c and function
3019 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
3020 * to happen so we avoid sending SuperSpeed Capability descriptor
3021 * together with our BOS descriptor as that could confuse host into
3022 * thinking we can handle super speed.
3023 *
3024 * Note that, in fact, we won't even support GetBOS requests when speed
3025 * is less than super speed because we don't have means, yet, to tell
3026 * composite.c that we are USB 2.0 + LPM ECN.
3027 */
3028 if (dwc->revision < DWC3_REVISION_220A)
3029 dev_info(dwc->dev, "changing max_speed on rev %08x\n",
3030 dwc->revision);
3031
3032 dwc->gadget.max_speed = dwc->maximum_speed;
3033
3034 /*
3035 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
3036 * on ep out.
3037 */
3038 dwc->gadget.quirk_ep_out_aligned_size = true;
3039
3040 /*
3041 * REVISIT: Here we should clear all pending IRQs to be
3042 * sure we're starting from a well known location.
3043 */
3044
3045 ret = dwc3_gadget_init_endpoints(dwc);
3046 if (ret)
3047 goto err5;
3048
3049 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
3050 if (ret) {
3051 dev_err(dwc->dev, "failed to register udc\n");
3052 goto err5;
3053 }
3054
3055 return 0;
3056
3057 err5:
3058 kfree(dwc->zlp_buf);
3059
3060 err4:
3061 dwc3_gadget_free_endpoints(dwc);
3062 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3063 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3064
3065 err3:
3066 kfree(dwc->setup_buf);
3067
3068 err2:
3069 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
3070 dwc->ep0_trb, dwc->ep0_trb_addr);
3071
3072 err1:
3073 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3074 dwc->ctrl_req, dwc->ctrl_req_addr);
3075
3076 err0:
3077 return ret;
3078 }
3079
3080 /* -------------------------------------------------------------------------- */
3081
3082 void dwc3_gadget_exit(struct dwc3 *dwc)
3083 {
3084 usb_del_gadget_udc(&dwc->gadget);
3085
3086 dwc3_gadget_free_endpoints(dwc);
3087
3088 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3089 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3090
3091 kfree(dwc->setup_buf);
3092 kfree(dwc->zlp_buf);
3093
3094 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
3095 dwc->ep0_trb, dwc->ep0_trb_addr);
3096
3097 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3098 dwc->ctrl_req, dwc->ctrl_req_addr);
3099 }
3100
3101 int dwc3_gadget_suspend(struct dwc3 *dwc)
3102 {
3103 int ret;
3104
3105 if (!dwc->gadget_driver)
3106 return 0;
3107
3108 ret = dwc3_gadget_run_stop(dwc, false, false);
3109 if (ret < 0)
3110 return ret;
3111
3112 dwc3_disconnect_gadget(dwc);
3113 __dwc3_gadget_stop(dwc);
3114
3115 return 0;
3116 }
3117
3118 int dwc3_gadget_resume(struct dwc3 *dwc)
3119 {
3120 int ret;
3121
3122 if (!dwc->gadget_driver)
3123 return 0;
3124
3125 ret = __dwc3_gadget_start(dwc);
3126 if (ret < 0)
3127 goto err0;
3128
3129 ret = dwc3_gadget_run_stop(dwc, true, false);
3130 if (ret < 0)
3131 goto err1;
3132
3133 return 0;
3134
3135 err1:
3136 __dwc3_gadget_stop(dwc);
3137
3138 err0:
3139 return ret;
3140 }
3141
3142 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
3143 {
3144 if (dwc->pending_events) {
3145 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
3146 dwc->pending_events = false;
3147 enable_irq(dwc->irq_gadget);
3148 }
3149 }