]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/usb/dwc3/gadget.c
usb: dwc3: gadget: properly check ep cmd
[mirror_ubuntu-artful-kernel.git] / drivers / usb / dwc3 / gadget.c
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69 }
70
71 /**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
93 * return 0 on success or -ETIMEDOUT.
94 */
95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 int retries = 10000;
98 u32 reg;
99
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
131 /* wait for a change in DSTS */
132 retries = 10000;
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
139 udelay(5);
140 }
141
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
144
145 return -ETIMEDOUT;
146 }
147
148 /**
149 * dwc3_ep_inc_trb() - Increment a TRB index.
150 * @index - Pointer to the TRB index to increment.
151 *
152 * The index should never point to the link TRB. After incrementing,
153 * if it is point to the link TRB, wrap around to the beginning. The
154 * link TRB is always at the last TRB entry.
155 */
156 static void dwc3_ep_inc_trb(u8 *index)
157 {
158 (*index)++;
159 if (*index == (DWC3_TRB_NUM - 1))
160 *index = 0;
161 }
162
163 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
164 {
165 dwc3_ep_inc_trb(&dep->trb_enqueue);
166 }
167
168 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
169 {
170 dwc3_ep_inc_trb(&dep->trb_dequeue);
171 }
172
173 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
174 int status)
175 {
176 struct dwc3 *dwc = dep->dwc;
177
178 req->started = false;
179 list_del(&req->list);
180 req->trb = NULL;
181
182 if (req->request.status == -EINPROGRESS)
183 req->request.status = status;
184
185 if (dwc->ep0_bounced && dep->number == 0)
186 dwc->ep0_bounced = false;
187 else
188 usb_gadget_unmap_request(&dwc->gadget, &req->request,
189 req->direction);
190
191 trace_dwc3_gadget_giveback(req);
192
193 spin_unlock(&dwc->lock);
194 usb_gadget_giveback_request(&dep->endpoint, &req->request);
195 spin_lock(&dwc->lock);
196
197 if (dep->number > 1)
198 pm_runtime_put(dwc->dev);
199 }
200
201 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
202 {
203 u32 timeout = 500;
204 int status = 0;
205 int ret = 0;
206 u32 reg;
207
208 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
209 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
210
211 do {
212 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
213 if (!(reg & DWC3_DGCMD_CMDACT)) {
214 status = DWC3_DGCMD_STATUS(reg);
215 if (status)
216 ret = -EINVAL;
217 break;
218 }
219 } while (timeout--);
220
221 if (!timeout) {
222 ret = -ETIMEDOUT;
223 status = -ETIMEDOUT;
224 }
225
226 trace_dwc3_gadget_generic_cmd(cmd, param, status);
227
228 return ret;
229 }
230
231 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
232
233 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
234 struct dwc3_gadget_ep_cmd_params *params)
235 {
236 struct dwc3 *dwc = dep->dwc;
237 u32 timeout = 500;
238 u32 reg;
239
240 int cmd_status = 0;
241 int susphy = false;
242 int ret = -EINVAL;
243
244 /*
245 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
246 * we're issuing an endpoint command, we must check if
247 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
248 *
249 * We will also set SUSPHY bit to what it was before returning as stated
250 * by the same section on Synopsys databook.
251 */
252 if (dwc->gadget.speed <= USB_SPEED_HIGH) {
253 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
254 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
255 susphy = true;
256 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
257 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
258 }
259 }
260
261 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
262 int needs_wakeup;
263
264 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
265 dwc->link_state == DWC3_LINK_STATE_U2 ||
266 dwc->link_state == DWC3_LINK_STATE_U3);
267
268 if (unlikely(needs_wakeup)) {
269 ret = __dwc3_gadget_wakeup(dwc);
270 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
271 ret);
272 }
273 }
274
275 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
276 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
277 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
278
279 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
280 do {
281 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
282 if (!(reg & DWC3_DEPCMD_CMDACT)) {
283 cmd_status = DWC3_DEPCMD_STATUS(reg);
284
285 switch (cmd_status) {
286 case 0:
287 ret = 0;
288 break;
289 case DEPEVT_TRANSFER_NO_RESOURCE:
290 ret = -EINVAL;
291 break;
292 case DEPEVT_TRANSFER_BUS_EXPIRY:
293 /*
294 * SW issues START TRANSFER command to
295 * isochronous ep with future frame interval. If
296 * future interval time has already passed when
297 * core receives the command, it will respond
298 * with an error status of 'Bus Expiry'.
299 *
300 * Instead of always returning -EINVAL, let's
301 * give a hint to the gadget driver that this is
302 * the case by returning -EAGAIN.
303 */
304 ret = -EAGAIN;
305 break;
306 default:
307 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
308 }
309
310 break;
311 }
312 } while (--timeout);
313
314 if (timeout == 0) {
315 ret = -ETIMEDOUT;
316 cmd_status = -ETIMEDOUT;
317 }
318
319 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
320
321 if (unlikely(susphy)) {
322 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
323 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
324 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
325 }
326
327 return ret;
328 }
329
330 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
331 {
332 struct dwc3 *dwc = dep->dwc;
333 struct dwc3_gadget_ep_cmd_params params;
334 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
335
336 /*
337 * As of core revision 2.60a the recommended programming model
338 * is to set the ClearPendIN bit when issuing a Clear Stall EP
339 * command for IN endpoints. This is to prevent an issue where
340 * some (non-compliant) hosts may not send ACK TPs for pending
341 * IN transfers due to a mishandled error condition. Synopsys
342 * STAR 9000614252.
343 */
344 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) &&
345 (dwc->gadget.speed >= USB_SPEED_SUPER))
346 cmd |= DWC3_DEPCMD_CLEARPENDIN;
347
348 memset(&params, 0, sizeof(params));
349
350 return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
351 }
352
353 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
354 struct dwc3_trb *trb)
355 {
356 u32 offset = (char *) trb - (char *) dep->trb_pool;
357
358 return dep->trb_pool_dma + offset;
359 }
360
361 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
362 {
363 struct dwc3 *dwc = dep->dwc;
364
365 if (dep->trb_pool)
366 return 0;
367
368 dep->trb_pool = dma_alloc_coherent(dwc->dev,
369 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
370 &dep->trb_pool_dma, GFP_KERNEL);
371 if (!dep->trb_pool) {
372 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
373 dep->name);
374 return -ENOMEM;
375 }
376
377 return 0;
378 }
379
380 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
381 {
382 struct dwc3 *dwc = dep->dwc;
383
384 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
385 dep->trb_pool, dep->trb_pool_dma);
386
387 dep->trb_pool = NULL;
388 dep->trb_pool_dma = 0;
389 }
390
391 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
392
393 /**
394 * dwc3_gadget_start_config - Configure EP resources
395 * @dwc: pointer to our controller context structure
396 * @dep: endpoint that is being enabled
397 *
398 * The assignment of transfer resources cannot perfectly follow the
399 * data book due to the fact that the controller driver does not have
400 * all knowledge of the configuration in advance. It is given this
401 * information piecemeal by the composite gadget framework after every
402 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
403 * programming model in this scenario can cause errors. For two
404 * reasons:
405 *
406 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
407 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
408 * multiple interfaces.
409 *
410 * 2) The databook does not mention doing more DEPXFERCFG for new
411 * endpoint on alt setting (8.1.6).
412 *
413 * The following simplified method is used instead:
414 *
415 * All hardware endpoints can be assigned a transfer resource and this
416 * setting will stay persistent until either a core reset or
417 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
418 * do DEPXFERCFG for every hardware endpoint as well. We are
419 * guaranteed that there are as many transfer resources as endpoints.
420 *
421 * This function is called for each endpoint when it is being enabled
422 * but is triggered only when called for EP0-out, which always happens
423 * first, and which should only happen in one of the above conditions.
424 */
425 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
426 {
427 struct dwc3_gadget_ep_cmd_params params;
428 u32 cmd;
429 int i;
430 int ret;
431
432 if (dep->number)
433 return 0;
434
435 memset(&params, 0x00, sizeof(params));
436 cmd = DWC3_DEPCMD_DEPSTARTCFG;
437
438 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
439 if (ret)
440 return ret;
441
442 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
443 struct dwc3_ep *dep = dwc->eps[i];
444
445 if (!dep)
446 continue;
447
448 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
449 if (ret)
450 return ret;
451 }
452
453 return 0;
454 }
455
456 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
457 const struct usb_endpoint_descriptor *desc,
458 const struct usb_ss_ep_comp_descriptor *comp_desc,
459 bool modify, bool restore)
460 {
461 struct dwc3_gadget_ep_cmd_params params;
462
463 if (dev_WARN_ONCE(dwc->dev, modify && restore,
464 "Can't modify and restore\n"))
465 return -EINVAL;
466
467 memset(&params, 0x00, sizeof(params));
468
469 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
470 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
471
472 /* Burst size is only needed in SuperSpeed mode */
473 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
474 u32 burst = dep->endpoint.maxburst;
475 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
476 }
477
478 if (modify) {
479 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
480 } else if (restore) {
481 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
482 params.param2 |= dep->saved_state;
483 } else {
484 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
485 }
486
487 if (usb_endpoint_xfer_control(desc))
488 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
489
490 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
491 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
492
493 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
494 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
495 | DWC3_DEPCFG_STREAM_EVENT_EN;
496 dep->stream_capable = true;
497 }
498
499 if (!usb_endpoint_xfer_control(desc))
500 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
501
502 /*
503 * We are doing 1:1 mapping for endpoints, meaning
504 * Physical Endpoints 2 maps to Logical Endpoint 2 and
505 * so on. We consider the direction bit as part of the physical
506 * endpoint number. So USB endpoint 0x81 is 0x03.
507 */
508 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
509
510 /*
511 * We must use the lower 16 TX FIFOs even though
512 * HW might have more
513 */
514 if (dep->direction)
515 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
516
517 if (desc->bInterval) {
518 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
519 dep->interval = 1 << (desc->bInterval - 1);
520 }
521
522 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
523 }
524
525 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
526 {
527 struct dwc3_gadget_ep_cmd_params params;
528
529 memset(&params, 0x00, sizeof(params));
530
531 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
532
533 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
534 &params);
535 }
536
537 /**
538 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
539 * @dep: endpoint to be initialized
540 * @desc: USB Endpoint Descriptor
541 *
542 * Caller should take care of locking
543 */
544 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
545 const struct usb_endpoint_descriptor *desc,
546 const struct usb_ss_ep_comp_descriptor *comp_desc,
547 bool modify, bool restore)
548 {
549 struct dwc3 *dwc = dep->dwc;
550 u32 reg;
551 int ret;
552
553 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
554
555 if (!(dep->flags & DWC3_EP_ENABLED)) {
556 ret = dwc3_gadget_start_config(dwc, dep);
557 if (ret)
558 return ret;
559 }
560
561 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
562 restore);
563 if (ret)
564 return ret;
565
566 if (!(dep->flags & DWC3_EP_ENABLED)) {
567 struct dwc3_trb *trb_st_hw;
568 struct dwc3_trb *trb_link;
569
570 dep->endpoint.desc = desc;
571 dep->comp_desc = comp_desc;
572 dep->type = usb_endpoint_type(desc);
573 dep->flags |= DWC3_EP_ENABLED;
574
575 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
576 reg |= DWC3_DALEPENA_EP(dep->number);
577 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
578
579 if (usb_endpoint_xfer_control(desc))
580 return 0;
581
582 /* Initialize the TRB ring */
583 dep->trb_dequeue = 0;
584 dep->trb_enqueue = 0;
585 memset(dep->trb_pool, 0,
586 sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
587
588 /* Link TRB. The HWO bit is never reset */
589 trb_st_hw = &dep->trb_pool[0];
590
591 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
592 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
593 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
594 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
595 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
596 }
597
598 return 0;
599 }
600
601 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
602 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
603 {
604 struct dwc3_request *req;
605
606 dwc3_stop_active_transfer(dwc, dep->number, true);
607
608 /* - giveback all requests to gadget driver */
609 while (!list_empty(&dep->started_list)) {
610 req = next_request(&dep->started_list);
611
612 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
613 }
614
615 while (!list_empty(&dep->pending_list)) {
616 req = next_request(&dep->pending_list);
617
618 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
619 }
620 }
621
622 /**
623 * __dwc3_gadget_ep_disable - Disables a HW endpoint
624 * @dep: the endpoint to disable
625 *
626 * This function also removes requests which are currently processed ny the
627 * hardware and those which are not yet scheduled.
628 * Caller should take care of locking.
629 */
630 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
631 {
632 struct dwc3 *dwc = dep->dwc;
633 u32 reg;
634
635 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
636
637 dwc3_remove_requests(dwc, dep);
638
639 /* make sure HW endpoint isn't stalled */
640 if (dep->flags & DWC3_EP_STALL)
641 __dwc3_gadget_ep_set_halt(dep, 0, false);
642
643 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
644 reg &= ~DWC3_DALEPENA_EP(dep->number);
645 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
646
647 dep->stream_capable = false;
648 dep->endpoint.desc = NULL;
649 dep->comp_desc = NULL;
650 dep->type = 0;
651 dep->flags = 0;
652
653 return 0;
654 }
655
656 /* -------------------------------------------------------------------------- */
657
658 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
659 const struct usb_endpoint_descriptor *desc)
660 {
661 return -EINVAL;
662 }
663
664 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
665 {
666 return -EINVAL;
667 }
668
669 /* -------------------------------------------------------------------------- */
670
671 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
672 const struct usb_endpoint_descriptor *desc)
673 {
674 struct dwc3_ep *dep;
675 struct dwc3 *dwc;
676 unsigned long flags;
677 int ret;
678
679 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
680 pr_debug("dwc3: invalid parameters\n");
681 return -EINVAL;
682 }
683
684 if (!desc->wMaxPacketSize) {
685 pr_debug("dwc3: missing wMaxPacketSize\n");
686 return -EINVAL;
687 }
688
689 dep = to_dwc3_ep(ep);
690 dwc = dep->dwc;
691
692 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
693 "%s is already enabled\n",
694 dep->name))
695 return 0;
696
697 spin_lock_irqsave(&dwc->lock, flags);
698 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
699 spin_unlock_irqrestore(&dwc->lock, flags);
700
701 return ret;
702 }
703
704 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
705 {
706 struct dwc3_ep *dep;
707 struct dwc3 *dwc;
708 unsigned long flags;
709 int ret;
710
711 if (!ep) {
712 pr_debug("dwc3: invalid parameters\n");
713 return -EINVAL;
714 }
715
716 dep = to_dwc3_ep(ep);
717 dwc = dep->dwc;
718
719 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
720 "%s is already disabled\n",
721 dep->name))
722 return 0;
723
724 spin_lock_irqsave(&dwc->lock, flags);
725 ret = __dwc3_gadget_ep_disable(dep);
726 spin_unlock_irqrestore(&dwc->lock, flags);
727
728 return ret;
729 }
730
731 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
732 gfp_t gfp_flags)
733 {
734 struct dwc3_request *req;
735 struct dwc3_ep *dep = to_dwc3_ep(ep);
736
737 req = kzalloc(sizeof(*req), gfp_flags);
738 if (!req)
739 return NULL;
740
741 req->epnum = dep->number;
742 req->dep = dep;
743
744 dep->allocated_requests++;
745
746 trace_dwc3_alloc_request(req);
747
748 return &req->request;
749 }
750
751 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
752 struct usb_request *request)
753 {
754 struct dwc3_request *req = to_dwc3_request(request);
755 struct dwc3_ep *dep = to_dwc3_ep(ep);
756
757 dep->allocated_requests--;
758 trace_dwc3_free_request(req);
759 kfree(req);
760 }
761
762 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep);
763
764 /**
765 * dwc3_prepare_one_trb - setup one TRB from one request
766 * @dep: endpoint for which this request is prepared
767 * @req: dwc3_request pointer
768 */
769 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
770 struct dwc3_request *req, dma_addr_t dma,
771 unsigned length, unsigned chain, unsigned node)
772 {
773 struct dwc3_trb *trb;
774 struct dwc3 *dwc = dep->dwc;
775 struct usb_gadget *gadget = &dwc->gadget;
776 enum usb_device_speed speed = gadget->speed;
777
778 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s",
779 dep->name, req, (unsigned long long) dma,
780 length, chain ? " chain" : "");
781
782 trb = &dep->trb_pool[dep->trb_enqueue];
783
784 if (!req->trb) {
785 dwc3_gadget_move_started_request(req);
786 req->trb = trb;
787 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
788 req->first_trb_index = dep->trb_enqueue;
789 dep->queued_requests++;
790 }
791
792 dwc3_ep_inc_enq(dep);
793
794 trb->size = DWC3_TRB_SIZE_LENGTH(length);
795 trb->bpl = lower_32_bits(dma);
796 trb->bph = upper_32_bits(dma);
797
798 switch (usb_endpoint_type(dep->endpoint.desc)) {
799 case USB_ENDPOINT_XFER_CONTROL:
800 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
801 break;
802
803 case USB_ENDPOINT_XFER_ISOC:
804 if (!node) {
805 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
806
807 if (speed == USB_SPEED_HIGH) {
808 struct usb_ep *ep = &dep->endpoint;
809 trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
810 }
811 } else {
812 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
813 }
814
815 /* always enable Interrupt on Missed ISOC */
816 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
817 break;
818
819 case USB_ENDPOINT_XFER_BULK:
820 case USB_ENDPOINT_XFER_INT:
821 trb->ctrl = DWC3_TRBCTL_NORMAL;
822 break;
823 default:
824 /*
825 * This is only possible with faulty memory because we
826 * checked it already :)
827 */
828 BUG();
829 }
830
831 /* always enable Continue on Short Packet */
832 trb->ctrl |= DWC3_TRB_CTRL_CSP;
833
834 if ((!req->request.no_interrupt && !chain) ||
835 (dwc3_calc_trbs_left(dep) == 0))
836 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
837
838 if (chain)
839 trb->ctrl |= DWC3_TRB_CTRL_CHN;
840
841 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
842 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
843
844 trb->ctrl |= DWC3_TRB_CTRL_HWO;
845
846 trace_dwc3_prepare_trb(dep, trb);
847 }
848
849 /**
850 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
851 * @dep: The endpoint with the TRB ring
852 * @index: The index of the current TRB in the ring
853 *
854 * Returns the TRB prior to the one pointed to by the index. If the
855 * index is 0, we will wrap backwards, skip the link TRB, and return
856 * the one just before that.
857 */
858 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
859 {
860 u8 tmp = index;
861
862 if (!tmp)
863 tmp = DWC3_TRB_NUM - 1;
864
865 return &dep->trb_pool[tmp - 1];
866 }
867
868 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
869 {
870 struct dwc3_trb *tmp;
871 u8 trbs_left;
872
873 /*
874 * If enqueue & dequeue are equal than it is either full or empty.
875 *
876 * One way to know for sure is if the TRB right before us has HWO bit
877 * set or not. If it has, then we're definitely full and can't fit any
878 * more transfers in our ring.
879 */
880 if (dep->trb_enqueue == dep->trb_dequeue) {
881 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
882 if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
883 return 0;
884
885 return DWC3_TRB_NUM - 1;
886 }
887
888 trbs_left = dep->trb_dequeue - dep->trb_enqueue;
889 trbs_left &= (DWC3_TRB_NUM - 1);
890
891 if (dep->trb_dequeue < dep->trb_enqueue)
892 trbs_left--;
893
894 return trbs_left;
895 }
896
897 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
898 struct dwc3_request *req)
899 {
900 struct scatterlist *sg = req->sg;
901 struct scatterlist *s;
902 unsigned int length;
903 dma_addr_t dma;
904 int i;
905
906 for_each_sg(sg, s, req->num_pending_sgs, i) {
907 unsigned chain = true;
908
909 length = sg_dma_len(s);
910 dma = sg_dma_address(s);
911
912 if (sg_is_last(s))
913 chain = false;
914
915 dwc3_prepare_one_trb(dep, req, dma, length,
916 chain, i);
917
918 if (!dwc3_calc_trbs_left(dep))
919 break;
920 }
921 }
922
923 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
924 struct dwc3_request *req)
925 {
926 unsigned int length;
927 dma_addr_t dma;
928
929 dma = req->request.dma;
930 length = req->request.length;
931
932 dwc3_prepare_one_trb(dep, req, dma, length,
933 false, 0);
934 }
935
936 /*
937 * dwc3_prepare_trbs - setup TRBs from requests
938 * @dep: endpoint for which requests are being prepared
939 *
940 * The function goes through the requests list and sets up TRBs for the
941 * transfers. The function returns once there are no more TRBs available or
942 * it runs out of requests.
943 */
944 static void dwc3_prepare_trbs(struct dwc3_ep *dep)
945 {
946 struct dwc3_request *req, *n;
947
948 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
949
950 if (!dwc3_calc_trbs_left(dep))
951 return;
952
953 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
954 if (req->num_pending_sgs > 0)
955 dwc3_prepare_one_trb_sg(dep, req);
956 else
957 dwc3_prepare_one_trb_linear(dep, req);
958
959 if (!dwc3_calc_trbs_left(dep))
960 return;
961 }
962 }
963
964 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
965 {
966 struct dwc3_gadget_ep_cmd_params params;
967 struct dwc3_request *req;
968 struct dwc3 *dwc = dep->dwc;
969 int starting;
970 int ret;
971 u32 cmd;
972
973 starting = !(dep->flags & DWC3_EP_BUSY);
974
975 dwc3_prepare_trbs(dep);
976 req = next_request(&dep->started_list);
977 if (!req) {
978 dep->flags |= DWC3_EP_PENDING_REQUEST;
979 return 0;
980 }
981
982 memset(&params, 0, sizeof(params));
983
984 if (starting) {
985 params.param0 = upper_32_bits(req->trb_dma);
986 params.param1 = lower_32_bits(req->trb_dma);
987 cmd = DWC3_DEPCMD_STARTTRANSFER |
988 DWC3_DEPCMD_PARAM(cmd_param);
989 } else {
990 cmd = DWC3_DEPCMD_UPDATETRANSFER |
991 DWC3_DEPCMD_PARAM(dep->resource_index);
992 }
993
994 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
995 if (ret < 0) {
996 /*
997 * FIXME we need to iterate over the list of requests
998 * here and stop, unmap, free and del each of the linked
999 * requests instead of what we do now.
1000 */
1001 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1002 req->direction);
1003 list_del(&req->list);
1004 return ret;
1005 }
1006
1007 dep->flags |= DWC3_EP_BUSY;
1008
1009 if (starting) {
1010 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
1011 WARN_ON_ONCE(!dep->resource_index);
1012 }
1013
1014 return 0;
1015 }
1016
1017 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1018 struct dwc3_ep *dep, u32 cur_uf)
1019 {
1020 u32 uf;
1021
1022 if (list_empty(&dep->pending_list)) {
1023 dwc3_trace(trace_dwc3_gadget,
1024 "ISOC ep %s run out for requests",
1025 dep->name);
1026 dep->flags |= DWC3_EP_PENDING_REQUEST;
1027 return;
1028 }
1029
1030 /* 4 micro frames in the future */
1031 uf = cur_uf + dep->interval * 4;
1032
1033 __dwc3_gadget_kick_transfer(dep, uf);
1034 }
1035
1036 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1037 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1038 {
1039 u32 cur_uf, mask;
1040
1041 mask = ~(dep->interval - 1);
1042 cur_uf = event->parameters & mask;
1043
1044 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1045 }
1046
1047 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1048 {
1049 struct dwc3 *dwc = dep->dwc;
1050 int ret;
1051
1052 if (!dep->endpoint.desc) {
1053 dwc3_trace(trace_dwc3_gadget,
1054 "trying to queue request %p to disabled %s",
1055 &req->request, dep->endpoint.name);
1056 return -ESHUTDOWN;
1057 }
1058
1059 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1060 &req->request, req->dep->name)) {
1061 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
1062 &req->request, req->dep->name);
1063 return -EINVAL;
1064 }
1065
1066 pm_runtime_get(dwc->dev);
1067
1068 req->request.actual = 0;
1069 req->request.status = -EINPROGRESS;
1070 req->direction = dep->direction;
1071 req->epnum = dep->number;
1072
1073 trace_dwc3_ep_queue(req);
1074
1075 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1076 dep->direction);
1077 if (ret)
1078 return ret;
1079
1080 req->sg = req->request.sg;
1081 req->num_pending_sgs = req->request.num_mapped_sgs;
1082
1083 list_add_tail(&req->list, &dep->pending_list);
1084
1085 /*
1086 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
1087 * wait for a XferNotReady event so we will know what's the current
1088 * (micro-)frame number.
1089 *
1090 * Without this trick, we are very, very likely gonna get Bus Expiry
1091 * errors which will force us issue EndTransfer command.
1092 */
1093 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1094 if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
1095 list_empty(&dep->started_list)) {
1096 dwc3_stop_active_transfer(dwc, dep->number, true);
1097 dep->flags = DWC3_EP_ENABLED;
1098 }
1099 return 0;
1100 }
1101
1102 if (!dwc3_calc_trbs_left(dep))
1103 return 0;
1104
1105 ret = __dwc3_gadget_kick_transfer(dep, 0);
1106 if (ret && ret != -EBUSY)
1107 dwc3_trace(trace_dwc3_gadget,
1108 "%s: failed to kick transfers",
1109 dep->name);
1110 if (ret == -EBUSY)
1111 ret = 0;
1112
1113 return ret;
1114 }
1115
1116 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1117 struct usb_request *request)
1118 {
1119 dwc3_gadget_ep_free_request(ep, request);
1120 }
1121
1122 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1123 {
1124 struct dwc3_request *req;
1125 struct usb_request *request;
1126 struct usb_ep *ep = &dep->endpoint;
1127
1128 dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
1129 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1130 if (!request)
1131 return -ENOMEM;
1132
1133 request->length = 0;
1134 request->buf = dwc->zlp_buf;
1135 request->complete = __dwc3_gadget_ep_zlp_complete;
1136
1137 req = to_dwc3_request(request);
1138
1139 return __dwc3_gadget_ep_queue(dep, req);
1140 }
1141
1142 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1143 gfp_t gfp_flags)
1144 {
1145 struct dwc3_request *req = to_dwc3_request(request);
1146 struct dwc3_ep *dep = to_dwc3_ep(ep);
1147 struct dwc3 *dwc = dep->dwc;
1148
1149 unsigned long flags;
1150
1151 int ret;
1152
1153 spin_lock_irqsave(&dwc->lock, flags);
1154 ret = __dwc3_gadget_ep_queue(dep, req);
1155
1156 /*
1157 * Okay, here's the thing, if gadget driver has requested for a ZLP by
1158 * setting request->zero, instead of doing magic, we will just queue an
1159 * extra usb_request ourselves so that it gets handled the same way as
1160 * any other request.
1161 */
1162 if (ret == 0 && request->zero && request->length &&
1163 (request->length % ep->maxpacket == 0))
1164 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1165
1166 spin_unlock_irqrestore(&dwc->lock, flags);
1167
1168 return ret;
1169 }
1170
1171 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1172 struct usb_request *request)
1173 {
1174 struct dwc3_request *req = to_dwc3_request(request);
1175 struct dwc3_request *r = NULL;
1176
1177 struct dwc3_ep *dep = to_dwc3_ep(ep);
1178 struct dwc3 *dwc = dep->dwc;
1179
1180 unsigned long flags;
1181 int ret = 0;
1182
1183 trace_dwc3_ep_dequeue(req);
1184
1185 spin_lock_irqsave(&dwc->lock, flags);
1186
1187 list_for_each_entry(r, &dep->pending_list, list) {
1188 if (r == req)
1189 break;
1190 }
1191
1192 if (r != req) {
1193 list_for_each_entry(r, &dep->started_list, list) {
1194 if (r == req)
1195 break;
1196 }
1197 if (r == req) {
1198 /* wait until it is processed */
1199 dwc3_stop_active_transfer(dwc, dep->number, true);
1200 goto out1;
1201 }
1202 dev_err(dwc->dev, "request %p was not queued to %s\n",
1203 request, ep->name);
1204 ret = -EINVAL;
1205 goto out0;
1206 }
1207
1208 out1:
1209 /* giveback the request */
1210 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1211
1212 out0:
1213 spin_unlock_irqrestore(&dwc->lock, flags);
1214
1215 return ret;
1216 }
1217
1218 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1219 {
1220 struct dwc3_gadget_ep_cmd_params params;
1221 struct dwc3 *dwc = dep->dwc;
1222 int ret;
1223
1224 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1225 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1226 return -EINVAL;
1227 }
1228
1229 memset(&params, 0x00, sizeof(params));
1230
1231 if (value) {
1232 struct dwc3_trb *trb;
1233
1234 unsigned transfer_in_flight;
1235 unsigned started;
1236
1237 if (dep->number > 1)
1238 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1239 else
1240 trb = &dwc->ep0_trb[dep->trb_enqueue];
1241
1242 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
1243 started = !list_empty(&dep->started_list);
1244
1245 if (!protocol && ((dep->direction && transfer_in_flight) ||
1246 (!dep->direction && started))) {
1247 dwc3_trace(trace_dwc3_gadget,
1248 "%s: pending request, cannot halt",
1249 dep->name);
1250 return -EAGAIN;
1251 }
1252
1253 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1254 &params);
1255 if (ret)
1256 dev_err(dwc->dev, "failed to set STALL on %s\n",
1257 dep->name);
1258 else
1259 dep->flags |= DWC3_EP_STALL;
1260 } else {
1261
1262 ret = dwc3_send_clear_stall_ep_cmd(dep);
1263 if (ret)
1264 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1265 dep->name);
1266 else
1267 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1268 }
1269
1270 return ret;
1271 }
1272
1273 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1274 {
1275 struct dwc3_ep *dep = to_dwc3_ep(ep);
1276 struct dwc3 *dwc = dep->dwc;
1277
1278 unsigned long flags;
1279
1280 int ret;
1281
1282 spin_lock_irqsave(&dwc->lock, flags);
1283 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1284 spin_unlock_irqrestore(&dwc->lock, flags);
1285
1286 return ret;
1287 }
1288
1289 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1290 {
1291 struct dwc3_ep *dep = to_dwc3_ep(ep);
1292 struct dwc3 *dwc = dep->dwc;
1293 unsigned long flags;
1294 int ret;
1295
1296 spin_lock_irqsave(&dwc->lock, flags);
1297 dep->flags |= DWC3_EP_WEDGE;
1298
1299 if (dep->number == 0 || dep->number == 1)
1300 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1301 else
1302 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1303 spin_unlock_irqrestore(&dwc->lock, flags);
1304
1305 return ret;
1306 }
1307
1308 /* -------------------------------------------------------------------------- */
1309
1310 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1311 .bLength = USB_DT_ENDPOINT_SIZE,
1312 .bDescriptorType = USB_DT_ENDPOINT,
1313 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1314 };
1315
1316 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1317 .enable = dwc3_gadget_ep0_enable,
1318 .disable = dwc3_gadget_ep0_disable,
1319 .alloc_request = dwc3_gadget_ep_alloc_request,
1320 .free_request = dwc3_gadget_ep_free_request,
1321 .queue = dwc3_gadget_ep0_queue,
1322 .dequeue = dwc3_gadget_ep_dequeue,
1323 .set_halt = dwc3_gadget_ep0_set_halt,
1324 .set_wedge = dwc3_gadget_ep_set_wedge,
1325 };
1326
1327 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1328 .enable = dwc3_gadget_ep_enable,
1329 .disable = dwc3_gadget_ep_disable,
1330 .alloc_request = dwc3_gadget_ep_alloc_request,
1331 .free_request = dwc3_gadget_ep_free_request,
1332 .queue = dwc3_gadget_ep_queue,
1333 .dequeue = dwc3_gadget_ep_dequeue,
1334 .set_halt = dwc3_gadget_ep_set_halt,
1335 .set_wedge = dwc3_gadget_ep_set_wedge,
1336 };
1337
1338 /* -------------------------------------------------------------------------- */
1339
1340 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1341 {
1342 struct dwc3 *dwc = gadget_to_dwc(g);
1343 u32 reg;
1344
1345 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1346 return DWC3_DSTS_SOFFN(reg);
1347 }
1348
1349 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1350 {
1351 int retries;
1352
1353 int ret;
1354 u32 reg;
1355
1356 u8 link_state;
1357 u8 speed;
1358
1359 /*
1360 * According to the Databook Remote wakeup request should
1361 * be issued only when the device is in early suspend state.
1362 *
1363 * We can check that via USB Link State bits in DSTS register.
1364 */
1365 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1366
1367 speed = reg & DWC3_DSTS_CONNECTSPD;
1368 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1369 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
1370 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
1371 return 0;
1372 }
1373
1374 link_state = DWC3_DSTS_USBLNKST(reg);
1375
1376 switch (link_state) {
1377 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1378 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1379 break;
1380 default:
1381 dwc3_trace(trace_dwc3_gadget,
1382 "can't wakeup from '%s'",
1383 dwc3_gadget_link_string(link_state));
1384 return -EINVAL;
1385 }
1386
1387 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1388 if (ret < 0) {
1389 dev_err(dwc->dev, "failed to put link in Recovery\n");
1390 return ret;
1391 }
1392
1393 /* Recent versions do this automatically */
1394 if (dwc->revision < DWC3_REVISION_194A) {
1395 /* write zeroes to Link Change Request */
1396 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1397 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1398 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1399 }
1400
1401 /* poll until Link State changes to ON */
1402 retries = 20000;
1403
1404 while (retries--) {
1405 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1406
1407 /* in HS, means ON */
1408 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1409 break;
1410 }
1411
1412 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1413 dev_err(dwc->dev, "failed to send remote wakeup\n");
1414 return -EINVAL;
1415 }
1416
1417 return 0;
1418 }
1419
1420 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1421 {
1422 struct dwc3 *dwc = gadget_to_dwc(g);
1423 unsigned long flags;
1424 int ret;
1425
1426 spin_lock_irqsave(&dwc->lock, flags);
1427 ret = __dwc3_gadget_wakeup(dwc);
1428 spin_unlock_irqrestore(&dwc->lock, flags);
1429
1430 return ret;
1431 }
1432
1433 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1434 int is_selfpowered)
1435 {
1436 struct dwc3 *dwc = gadget_to_dwc(g);
1437 unsigned long flags;
1438
1439 spin_lock_irqsave(&dwc->lock, flags);
1440 g->is_selfpowered = !!is_selfpowered;
1441 spin_unlock_irqrestore(&dwc->lock, flags);
1442
1443 return 0;
1444 }
1445
1446 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1447 {
1448 u32 reg;
1449 u32 timeout = 500;
1450
1451 if (pm_runtime_suspended(dwc->dev))
1452 return 0;
1453
1454 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1455 if (is_on) {
1456 if (dwc->revision <= DWC3_REVISION_187A) {
1457 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1458 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1459 }
1460
1461 if (dwc->revision >= DWC3_REVISION_194A)
1462 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1463 reg |= DWC3_DCTL_RUN_STOP;
1464
1465 if (dwc->has_hibernation)
1466 reg |= DWC3_DCTL_KEEP_CONNECT;
1467
1468 dwc->pullups_connected = true;
1469 } else {
1470 reg &= ~DWC3_DCTL_RUN_STOP;
1471
1472 if (dwc->has_hibernation && !suspend)
1473 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1474
1475 dwc->pullups_connected = false;
1476 }
1477
1478 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1479
1480 do {
1481 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1482 reg &= DWC3_DSTS_DEVCTRLHLT;
1483 } while (--timeout && !(!is_on ^ !reg));
1484
1485 if (!timeout)
1486 return -ETIMEDOUT;
1487
1488 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1489 dwc->gadget_driver
1490 ? dwc->gadget_driver->function : "no-function",
1491 is_on ? "connect" : "disconnect");
1492
1493 return 0;
1494 }
1495
1496 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1497 {
1498 struct dwc3 *dwc = gadget_to_dwc(g);
1499 unsigned long flags;
1500 int ret;
1501
1502 is_on = !!is_on;
1503
1504 spin_lock_irqsave(&dwc->lock, flags);
1505 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1506 spin_unlock_irqrestore(&dwc->lock, flags);
1507
1508 return ret;
1509 }
1510
1511 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1512 {
1513 u32 reg;
1514
1515 /* Enable all but Start and End of Frame IRQs */
1516 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1517 DWC3_DEVTEN_EVNTOVERFLOWEN |
1518 DWC3_DEVTEN_CMDCMPLTEN |
1519 DWC3_DEVTEN_ERRTICERREN |
1520 DWC3_DEVTEN_WKUPEVTEN |
1521 DWC3_DEVTEN_ULSTCNGEN |
1522 DWC3_DEVTEN_CONNECTDONEEN |
1523 DWC3_DEVTEN_USBRSTEN |
1524 DWC3_DEVTEN_DISCONNEVTEN);
1525
1526 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1527 }
1528
1529 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1530 {
1531 /* mask all interrupts */
1532 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1533 }
1534
1535 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1536 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1537
1538 /**
1539 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
1540 * dwc: pointer to our context structure
1541 *
1542 * The following looks like complex but it's actually very simple. In order to
1543 * calculate the number of packets we can burst at once on OUT transfers, we're
1544 * gonna use RxFIFO size.
1545 *
1546 * To calculate RxFIFO size we need two numbers:
1547 * MDWIDTH = size, in bits, of the internal memory bus
1548 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1549 *
1550 * Given these two numbers, the formula is simple:
1551 *
1552 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1553 *
1554 * 24 bytes is for 3x SETUP packets
1555 * 16 bytes is a clock domain crossing tolerance
1556 *
1557 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1558 */
1559 static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
1560 {
1561 u32 ram2_depth;
1562 u32 mdwidth;
1563 u32 nump;
1564 u32 reg;
1565
1566 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
1567 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
1568
1569 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
1570 nump = min_t(u32, nump, 16);
1571
1572 /* update NumP */
1573 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1574 reg &= ~DWC3_DCFG_NUMP_MASK;
1575 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
1576 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1577 }
1578
1579 static int __dwc3_gadget_start(struct dwc3 *dwc)
1580 {
1581 struct dwc3_ep *dep;
1582 int ret = 0;
1583 u32 reg;
1584
1585 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1586 reg &= ~(DWC3_DCFG_SPEED_MASK);
1587
1588 /**
1589 * WORKAROUND: DWC3 revision < 2.20a have an issue
1590 * which would cause metastability state on Run/Stop
1591 * bit if we try to force the IP to USB2-only mode.
1592 *
1593 * Because of that, we cannot configure the IP to any
1594 * speed other than the SuperSpeed
1595 *
1596 * Refers to:
1597 *
1598 * STAR#9000525659: Clock Domain Crossing on DCTL in
1599 * USB 2.0 Mode
1600 */
1601 if (dwc->revision < DWC3_REVISION_220A) {
1602 reg |= DWC3_DCFG_SUPERSPEED;
1603 } else {
1604 switch (dwc->maximum_speed) {
1605 case USB_SPEED_LOW:
1606 reg |= DWC3_DCFG_LOWSPEED;
1607 break;
1608 case USB_SPEED_FULL:
1609 reg |= DWC3_DCFG_FULLSPEED1;
1610 break;
1611 case USB_SPEED_HIGH:
1612 reg |= DWC3_DCFG_HIGHSPEED;
1613 break;
1614 case USB_SPEED_SUPER_PLUS:
1615 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
1616 break;
1617 default:
1618 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1619 dwc->maximum_speed);
1620 /* fall through */
1621 case USB_SPEED_SUPER:
1622 reg |= DWC3_DCFG_SUPERSPEED;
1623 break;
1624 }
1625 }
1626 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1627
1628 /*
1629 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1630 * field instead of letting dwc3 itself calculate that automatically.
1631 *
1632 * This way, we maximize the chances that we'll be able to get several
1633 * bursts of data without going through any sort of endpoint throttling.
1634 */
1635 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1636 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1637 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1638
1639 dwc3_gadget_setup_nump(dwc);
1640
1641 /* Start with SuperSpeed Default */
1642 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1643
1644 dep = dwc->eps[0];
1645 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1646 false);
1647 if (ret) {
1648 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1649 goto err0;
1650 }
1651
1652 dep = dwc->eps[1];
1653 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1654 false);
1655 if (ret) {
1656 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1657 goto err1;
1658 }
1659
1660 /* begin to receive SETUP packets */
1661 dwc->ep0state = EP0_SETUP_PHASE;
1662 dwc3_ep0_out_start(dwc);
1663
1664 dwc3_gadget_enable_irq(dwc);
1665
1666 return 0;
1667
1668 err1:
1669 __dwc3_gadget_ep_disable(dwc->eps[0]);
1670
1671 err0:
1672 return ret;
1673 }
1674
1675 static int dwc3_gadget_start(struct usb_gadget *g,
1676 struct usb_gadget_driver *driver)
1677 {
1678 struct dwc3 *dwc = gadget_to_dwc(g);
1679 unsigned long flags;
1680 int ret = 0;
1681 int irq;
1682
1683 irq = dwc->irq_gadget;
1684 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1685 IRQF_SHARED, "dwc3", dwc->ev_buf);
1686 if (ret) {
1687 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1688 irq, ret);
1689 goto err0;
1690 }
1691
1692 spin_lock_irqsave(&dwc->lock, flags);
1693 if (dwc->gadget_driver) {
1694 dev_err(dwc->dev, "%s is already bound to %s\n",
1695 dwc->gadget.name,
1696 dwc->gadget_driver->driver.name);
1697 ret = -EBUSY;
1698 goto err1;
1699 }
1700
1701 dwc->gadget_driver = driver;
1702
1703 if (pm_runtime_active(dwc->dev))
1704 __dwc3_gadget_start(dwc);
1705
1706 spin_unlock_irqrestore(&dwc->lock, flags);
1707
1708 return 0;
1709
1710 err1:
1711 spin_unlock_irqrestore(&dwc->lock, flags);
1712 free_irq(irq, dwc);
1713
1714 err0:
1715 return ret;
1716 }
1717
1718 static void __dwc3_gadget_stop(struct dwc3 *dwc)
1719 {
1720 if (pm_runtime_suspended(dwc->dev))
1721 return;
1722
1723 dwc3_gadget_disable_irq(dwc);
1724 __dwc3_gadget_ep_disable(dwc->eps[0]);
1725 __dwc3_gadget_ep_disable(dwc->eps[1]);
1726 }
1727
1728 static int dwc3_gadget_stop(struct usb_gadget *g)
1729 {
1730 struct dwc3 *dwc = gadget_to_dwc(g);
1731 unsigned long flags;
1732
1733 spin_lock_irqsave(&dwc->lock, flags);
1734 __dwc3_gadget_stop(dwc);
1735 dwc->gadget_driver = NULL;
1736 spin_unlock_irqrestore(&dwc->lock, flags);
1737
1738 free_irq(dwc->irq_gadget, dwc->ev_buf);
1739
1740 return 0;
1741 }
1742
1743 static const struct usb_gadget_ops dwc3_gadget_ops = {
1744 .get_frame = dwc3_gadget_get_frame,
1745 .wakeup = dwc3_gadget_wakeup,
1746 .set_selfpowered = dwc3_gadget_set_selfpowered,
1747 .pullup = dwc3_gadget_pullup,
1748 .udc_start = dwc3_gadget_start,
1749 .udc_stop = dwc3_gadget_stop,
1750 };
1751
1752 /* -------------------------------------------------------------------------- */
1753
1754 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1755 u8 num, u32 direction)
1756 {
1757 struct dwc3_ep *dep;
1758 u8 i;
1759
1760 for (i = 0; i < num; i++) {
1761 u8 epnum = (i << 1) | (direction ? 1 : 0);
1762
1763 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1764 if (!dep)
1765 return -ENOMEM;
1766
1767 dep->dwc = dwc;
1768 dep->number = epnum;
1769 dep->direction = !!direction;
1770 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
1771 dwc->eps[epnum] = dep;
1772
1773 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1774 (epnum & 1) ? "in" : "out");
1775
1776 dep->endpoint.name = dep->name;
1777 spin_lock_init(&dep->lock);
1778
1779 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1780
1781 if (epnum == 0 || epnum == 1) {
1782 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1783 dep->endpoint.maxburst = 1;
1784 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1785 if (!epnum)
1786 dwc->gadget.ep0 = &dep->endpoint;
1787 } else {
1788 int ret;
1789
1790 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1791 dep->endpoint.max_streams = 15;
1792 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1793 list_add_tail(&dep->endpoint.ep_list,
1794 &dwc->gadget.ep_list);
1795
1796 ret = dwc3_alloc_trb_pool(dep);
1797 if (ret)
1798 return ret;
1799 }
1800
1801 if (epnum == 0 || epnum == 1) {
1802 dep->endpoint.caps.type_control = true;
1803 } else {
1804 dep->endpoint.caps.type_iso = true;
1805 dep->endpoint.caps.type_bulk = true;
1806 dep->endpoint.caps.type_int = true;
1807 }
1808
1809 dep->endpoint.caps.dir_in = !!direction;
1810 dep->endpoint.caps.dir_out = !direction;
1811
1812 INIT_LIST_HEAD(&dep->pending_list);
1813 INIT_LIST_HEAD(&dep->started_list);
1814 }
1815
1816 return 0;
1817 }
1818
1819 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1820 {
1821 int ret;
1822
1823 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1824
1825 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1826 if (ret < 0) {
1827 dwc3_trace(trace_dwc3_gadget,
1828 "failed to allocate OUT endpoints");
1829 return ret;
1830 }
1831
1832 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1833 if (ret < 0) {
1834 dwc3_trace(trace_dwc3_gadget,
1835 "failed to allocate IN endpoints");
1836 return ret;
1837 }
1838
1839 return 0;
1840 }
1841
1842 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1843 {
1844 struct dwc3_ep *dep;
1845 u8 epnum;
1846
1847 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1848 dep = dwc->eps[epnum];
1849 if (!dep)
1850 continue;
1851 /*
1852 * Physical endpoints 0 and 1 are special; they form the
1853 * bi-directional USB endpoint 0.
1854 *
1855 * For those two physical endpoints, we don't allocate a TRB
1856 * pool nor do we add them the endpoints list. Due to that, we
1857 * shouldn't do these two operations otherwise we would end up
1858 * with all sorts of bugs when removing dwc3.ko.
1859 */
1860 if (epnum != 0 && epnum != 1) {
1861 dwc3_free_trb_pool(dep);
1862 list_del(&dep->endpoint.ep_list);
1863 }
1864
1865 kfree(dep);
1866 }
1867 }
1868
1869 /* -------------------------------------------------------------------------- */
1870
1871 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1872 struct dwc3_request *req, struct dwc3_trb *trb,
1873 const struct dwc3_event_depevt *event, int status,
1874 int chain)
1875 {
1876 unsigned int count;
1877 unsigned int s_pkt = 0;
1878 unsigned int trb_status;
1879
1880 dwc3_ep_inc_deq(dep);
1881
1882 if (req->trb == trb)
1883 dep->queued_requests--;
1884
1885 trace_dwc3_complete_trb(dep, trb);
1886
1887 /*
1888 * If we're in the middle of series of chained TRBs and we
1889 * receive a short transfer along the way, DWC3 will skip
1890 * through all TRBs including the last TRB in the chain (the
1891 * where CHN bit is zero. DWC3 will also avoid clearing HWO
1892 * bit and SW has to do it manually.
1893 *
1894 * We're going to do that here to avoid problems of HW trying
1895 * to use bogus TRBs for transfers.
1896 */
1897 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
1898 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1899
1900 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1901 return 1;
1902
1903 count = trb->size & DWC3_TRB_SIZE_MASK;
1904 req->request.actual += count;
1905
1906 if (dep->direction) {
1907 if (count) {
1908 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1909 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1910 dwc3_trace(trace_dwc3_gadget,
1911 "%s: incomplete IN transfer",
1912 dep->name);
1913 /*
1914 * If missed isoc occurred and there is
1915 * no request queued then issue END
1916 * TRANSFER, so that core generates
1917 * next xfernotready and we will issue
1918 * a fresh START TRANSFER.
1919 * If there are still queued request
1920 * then wait, do not issue either END
1921 * or UPDATE TRANSFER, just attach next
1922 * request in pending_list during
1923 * giveback.If any future queued request
1924 * is successfully transferred then we
1925 * will issue UPDATE TRANSFER for all
1926 * request in the pending_list.
1927 */
1928 dep->flags |= DWC3_EP_MISSED_ISOC;
1929 } else {
1930 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1931 dep->name);
1932 status = -ECONNRESET;
1933 }
1934 } else {
1935 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1936 }
1937 } else {
1938 if (count && (event->status & DEPEVT_STATUS_SHORT))
1939 s_pkt = 1;
1940 }
1941
1942 if (s_pkt && !chain)
1943 return 1;
1944
1945 if ((event->status & DEPEVT_STATUS_IOC) &&
1946 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1947 return 1;
1948
1949 return 0;
1950 }
1951
1952 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1953 const struct dwc3_event_depevt *event, int status)
1954 {
1955 struct dwc3_request *req, *n;
1956 struct dwc3_trb *trb;
1957 bool ioc = false;
1958 int ret;
1959
1960 list_for_each_entry_safe(req, n, &dep->started_list, list) {
1961 unsigned length;
1962 unsigned actual;
1963 int chain;
1964
1965 length = req->request.length;
1966 chain = req->num_pending_sgs > 0;
1967 if (chain) {
1968 struct scatterlist *sg = req->sg;
1969 struct scatterlist *s;
1970 unsigned int pending = req->num_pending_sgs;
1971 unsigned int i;
1972
1973 for_each_sg(sg, s, pending, i) {
1974 trb = &dep->trb_pool[dep->trb_dequeue];
1975
1976 req->sg = sg_next(s);
1977 req->num_pending_sgs--;
1978
1979 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1980 event, status, chain);
1981 if (ret)
1982 break;
1983 }
1984 } else {
1985 trb = &dep->trb_pool[dep->trb_dequeue];
1986 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1987 event, status, chain);
1988 }
1989
1990 /*
1991 * We assume here we will always receive the entire data block
1992 * which we should receive. Meaning, if we program RX to
1993 * receive 4K but we receive only 2K, we assume that's all we
1994 * should receive and we simply bounce the request back to the
1995 * gadget driver for further processing.
1996 */
1997 actual = length - req->request.actual;
1998 req->request.actual = actual;
1999
2000 if (ret && chain && (actual < length) && req->num_pending_sgs)
2001 return __dwc3_gadget_kick_transfer(dep, 0);
2002
2003 dwc3_gadget_giveback(dep, req, status);
2004
2005 if (ret) {
2006 if ((event->status & DEPEVT_STATUS_IOC) &&
2007 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2008 ioc = true;
2009 break;
2010 }
2011 }
2012
2013 /*
2014 * Our endpoint might get disabled by another thread during
2015 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2016 * early on so DWC3_EP_BUSY flag gets cleared
2017 */
2018 if (!dep->endpoint.desc)
2019 return 1;
2020
2021 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2022 list_empty(&dep->started_list)) {
2023 if (list_empty(&dep->pending_list)) {
2024 /*
2025 * If there is no entry in request list then do
2026 * not issue END TRANSFER now. Just set PENDING
2027 * flag, so that END TRANSFER is issued when an
2028 * entry is added into request list.
2029 */
2030 dep->flags = DWC3_EP_PENDING_REQUEST;
2031 } else {
2032 dwc3_stop_active_transfer(dwc, dep->number, true);
2033 dep->flags = DWC3_EP_ENABLED;
2034 }
2035 return 1;
2036 }
2037
2038 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc)
2039 return 0;
2040
2041 return 1;
2042 }
2043
2044 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
2045 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
2046 {
2047 unsigned status = 0;
2048 int clean_busy;
2049 u32 is_xfer_complete;
2050
2051 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
2052
2053 if (event->status & DEPEVT_STATUS_BUSERR)
2054 status = -ECONNRESET;
2055
2056 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
2057 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
2058 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
2059 dep->flags &= ~DWC3_EP_BUSY;
2060
2061 /*
2062 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2063 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2064 */
2065 if (dwc->revision < DWC3_REVISION_183A) {
2066 u32 reg;
2067 int i;
2068
2069 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
2070 dep = dwc->eps[i];
2071
2072 if (!(dep->flags & DWC3_EP_ENABLED))
2073 continue;
2074
2075 if (!list_empty(&dep->started_list))
2076 return;
2077 }
2078
2079 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2080 reg |= dwc->u1u2;
2081 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2082
2083 dwc->u1u2 = 0;
2084 }
2085
2086 /*
2087 * Our endpoint might get disabled by another thread during
2088 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2089 * early on so DWC3_EP_BUSY flag gets cleared
2090 */
2091 if (!dep->endpoint.desc)
2092 return;
2093
2094 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2095 int ret;
2096
2097 ret = __dwc3_gadget_kick_transfer(dep, 0);
2098 if (!ret || ret == -EBUSY)
2099 return;
2100 }
2101 }
2102
2103 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2104 const struct dwc3_event_depevt *event)
2105 {
2106 struct dwc3_ep *dep;
2107 u8 epnum = event->endpoint_number;
2108
2109 dep = dwc->eps[epnum];
2110
2111 if (!(dep->flags & DWC3_EP_ENABLED))
2112 return;
2113
2114 if (epnum == 0 || epnum == 1) {
2115 dwc3_ep0_interrupt(dwc, event);
2116 return;
2117 }
2118
2119 switch (event->endpoint_event) {
2120 case DWC3_DEPEVT_XFERCOMPLETE:
2121 dep->resource_index = 0;
2122
2123 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2124 dwc3_trace(trace_dwc3_gadget,
2125 "%s is an Isochronous endpoint",
2126 dep->name);
2127 return;
2128 }
2129
2130 dwc3_endpoint_transfer_complete(dwc, dep, event);
2131 break;
2132 case DWC3_DEPEVT_XFERINPROGRESS:
2133 dwc3_endpoint_transfer_complete(dwc, dep, event);
2134 break;
2135 case DWC3_DEPEVT_XFERNOTREADY:
2136 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2137 dwc3_gadget_start_isoc(dwc, dep, event);
2138 } else {
2139 int active;
2140 int ret;
2141
2142 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
2143
2144 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
2145 dep->name, active ? "Transfer Active"
2146 : "Transfer Not Active");
2147
2148 ret = __dwc3_gadget_kick_transfer(dep, 0);
2149 if (!ret || ret == -EBUSY)
2150 return;
2151
2152 dwc3_trace(trace_dwc3_gadget,
2153 "%s: failed to kick transfers",
2154 dep->name);
2155 }
2156
2157 break;
2158 case DWC3_DEPEVT_STREAMEVT:
2159 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2160 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2161 dep->name);
2162 return;
2163 }
2164
2165 switch (event->status) {
2166 case DEPEVT_STREAMEVT_FOUND:
2167 dwc3_trace(trace_dwc3_gadget,
2168 "Stream %d found and started",
2169 event->parameters);
2170
2171 break;
2172 case DEPEVT_STREAMEVT_NOTFOUND:
2173 /* FALLTHROUGH */
2174 default:
2175 dwc3_trace(trace_dwc3_gadget,
2176 "unable to find suitable stream");
2177 }
2178 break;
2179 case DWC3_DEPEVT_RXTXFIFOEVT:
2180 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
2181 break;
2182 case DWC3_DEPEVT_EPCMDCMPLT:
2183 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2184 break;
2185 }
2186 }
2187
2188 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2189 {
2190 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2191 spin_unlock(&dwc->lock);
2192 dwc->gadget_driver->disconnect(&dwc->gadget);
2193 spin_lock(&dwc->lock);
2194 }
2195 }
2196
2197 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2198 {
2199 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2200 spin_unlock(&dwc->lock);
2201 dwc->gadget_driver->suspend(&dwc->gadget);
2202 spin_lock(&dwc->lock);
2203 }
2204 }
2205
2206 static void dwc3_resume_gadget(struct dwc3 *dwc)
2207 {
2208 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2209 spin_unlock(&dwc->lock);
2210 dwc->gadget_driver->resume(&dwc->gadget);
2211 spin_lock(&dwc->lock);
2212 }
2213 }
2214
2215 static void dwc3_reset_gadget(struct dwc3 *dwc)
2216 {
2217 if (!dwc->gadget_driver)
2218 return;
2219
2220 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2221 spin_unlock(&dwc->lock);
2222 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2223 spin_lock(&dwc->lock);
2224 }
2225 }
2226
2227 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2228 {
2229 struct dwc3_ep *dep;
2230 struct dwc3_gadget_ep_cmd_params params;
2231 u32 cmd;
2232 int ret;
2233
2234 dep = dwc->eps[epnum];
2235
2236 if (!dep->resource_index)
2237 return;
2238
2239 /*
2240 * NOTICE: We are violating what the Databook says about the
2241 * EndTransfer command. Ideally we would _always_ wait for the
2242 * EndTransfer Command Completion IRQ, but that's causing too
2243 * much trouble synchronizing between us and gadget driver.
2244 *
2245 * We have discussed this with the IP Provider and it was
2246 * suggested to giveback all requests here, but give HW some
2247 * extra time to synchronize with the interconnect. We're using
2248 * an arbitrary 100us delay for that.
2249 *
2250 * Note also that a similar handling was tested by Synopsys
2251 * (thanks a lot Paul) and nothing bad has come out of it.
2252 * In short, what we're doing is:
2253 *
2254 * - Issue EndTransfer WITH CMDIOC bit set
2255 * - Wait 100us
2256 *
2257 * As of IP version 3.10a of the DWC_usb3 IP, the controller
2258 * supports a mode to work around the above limitation. The
2259 * software can poll the CMDACT bit in the DEPCMD register
2260 * after issuing a EndTransfer command. This mode is enabled
2261 * by writing GUCTL2[14]. This polling is already done in the
2262 * dwc3_send_gadget_ep_cmd() function so if the mode is
2263 * enabled, the EndTransfer command will have completed upon
2264 * returning from this function and we don't need to delay for
2265 * 100us.
2266 *
2267 * This mode is NOT available on the DWC_usb31 IP.
2268 */
2269
2270 cmd = DWC3_DEPCMD_ENDTRANSFER;
2271 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2272 cmd |= DWC3_DEPCMD_CMDIOC;
2273 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2274 memset(&params, 0, sizeof(params));
2275 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
2276 WARN_ON_ONCE(ret);
2277 dep->resource_index = 0;
2278 dep->flags &= ~DWC3_EP_BUSY;
2279
2280 if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
2281 udelay(100);
2282 }
2283
2284 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2285 {
2286 u32 epnum;
2287
2288 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2289 struct dwc3_ep *dep;
2290
2291 dep = dwc->eps[epnum];
2292 if (!dep)
2293 continue;
2294
2295 if (!(dep->flags & DWC3_EP_ENABLED))
2296 continue;
2297
2298 dwc3_remove_requests(dwc, dep);
2299 }
2300 }
2301
2302 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2303 {
2304 u32 epnum;
2305
2306 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2307 struct dwc3_ep *dep;
2308 int ret;
2309
2310 dep = dwc->eps[epnum];
2311 if (!dep)
2312 continue;
2313
2314 if (!(dep->flags & DWC3_EP_STALL))
2315 continue;
2316
2317 dep->flags &= ~DWC3_EP_STALL;
2318
2319 ret = dwc3_send_clear_stall_ep_cmd(dep);
2320 WARN_ON_ONCE(ret);
2321 }
2322 }
2323
2324 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2325 {
2326 int reg;
2327
2328 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2329 reg &= ~DWC3_DCTL_INITU1ENA;
2330 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2331
2332 reg &= ~DWC3_DCTL_INITU2ENA;
2333 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2334
2335 dwc3_disconnect_gadget(dwc);
2336
2337 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2338 dwc->setup_packet_pending = false;
2339 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2340
2341 dwc->connected = false;
2342 }
2343
2344 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2345 {
2346 u32 reg;
2347
2348 dwc->connected = true;
2349
2350 /*
2351 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2352 * would cause a missing Disconnect Event if there's a
2353 * pending Setup Packet in the FIFO.
2354 *
2355 * There's no suggested workaround on the official Bug
2356 * report, which states that "unless the driver/application
2357 * is doing any special handling of a disconnect event,
2358 * there is no functional issue".
2359 *
2360 * Unfortunately, it turns out that we _do_ some special
2361 * handling of a disconnect event, namely complete all
2362 * pending transfers, notify gadget driver of the
2363 * disconnection, and so on.
2364 *
2365 * Our suggested workaround is to follow the Disconnect
2366 * Event steps here, instead, based on a setup_packet_pending
2367 * flag. Such flag gets set whenever we have a SETUP_PENDING
2368 * status for EP0 TRBs and gets cleared on XferComplete for the
2369 * same endpoint.
2370 *
2371 * Refers to:
2372 *
2373 * STAR#9000466709: RTL: Device : Disconnect event not
2374 * generated if setup packet pending in FIFO
2375 */
2376 if (dwc->revision < DWC3_REVISION_188A) {
2377 if (dwc->setup_packet_pending)
2378 dwc3_gadget_disconnect_interrupt(dwc);
2379 }
2380
2381 dwc3_reset_gadget(dwc);
2382
2383 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2384 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2385 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2386 dwc->test_mode = false;
2387
2388 dwc3_stop_active_transfers(dwc);
2389 dwc3_clear_stall_all_ep(dwc);
2390
2391 /* Reset device address to zero */
2392 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2393 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2394 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2395 }
2396
2397 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2398 {
2399 u32 reg;
2400 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2401
2402 /*
2403 * We change the clock only at SS but I dunno why I would want to do
2404 * this. Maybe it becomes part of the power saving plan.
2405 */
2406
2407 if ((speed != DWC3_DSTS_SUPERSPEED) &&
2408 (speed != DWC3_DSTS_SUPERSPEED_PLUS))
2409 return;
2410
2411 /*
2412 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2413 * each time on Connect Done.
2414 */
2415 if (!usb30_clock)
2416 return;
2417
2418 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2419 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2420 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2421 }
2422
2423 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2424 {
2425 struct dwc3_ep *dep;
2426 int ret;
2427 u32 reg;
2428 u8 speed;
2429
2430 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2431 speed = reg & DWC3_DSTS_CONNECTSPD;
2432 dwc->speed = speed;
2433
2434 dwc3_update_ram_clk_sel(dwc, speed);
2435
2436 switch (speed) {
2437 case DWC3_DSTS_SUPERSPEED_PLUS:
2438 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2439 dwc->gadget.ep0->maxpacket = 512;
2440 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2441 break;
2442 case DWC3_DSTS_SUPERSPEED:
2443 /*
2444 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2445 * would cause a missing USB3 Reset event.
2446 *
2447 * In such situations, we should force a USB3 Reset
2448 * event by calling our dwc3_gadget_reset_interrupt()
2449 * routine.
2450 *
2451 * Refers to:
2452 *
2453 * STAR#9000483510: RTL: SS : USB3 reset event may
2454 * not be generated always when the link enters poll
2455 */
2456 if (dwc->revision < DWC3_REVISION_190A)
2457 dwc3_gadget_reset_interrupt(dwc);
2458
2459 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2460 dwc->gadget.ep0->maxpacket = 512;
2461 dwc->gadget.speed = USB_SPEED_SUPER;
2462 break;
2463 case DWC3_DSTS_HIGHSPEED:
2464 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2465 dwc->gadget.ep0->maxpacket = 64;
2466 dwc->gadget.speed = USB_SPEED_HIGH;
2467 break;
2468 case DWC3_DSTS_FULLSPEED2:
2469 case DWC3_DSTS_FULLSPEED1:
2470 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2471 dwc->gadget.ep0->maxpacket = 64;
2472 dwc->gadget.speed = USB_SPEED_FULL;
2473 break;
2474 case DWC3_DSTS_LOWSPEED:
2475 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2476 dwc->gadget.ep0->maxpacket = 8;
2477 dwc->gadget.speed = USB_SPEED_LOW;
2478 break;
2479 }
2480
2481 /* Enable USB2 LPM Capability */
2482
2483 if ((dwc->revision > DWC3_REVISION_194A) &&
2484 (speed != DWC3_DSTS_SUPERSPEED) &&
2485 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
2486 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2487 reg |= DWC3_DCFG_LPM_CAP;
2488 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2489
2490 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2491 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2492
2493 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2494
2495 /*
2496 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2497 * DCFG.LPMCap is set, core responses with an ACK and the
2498 * BESL value in the LPM token is less than or equal to LPM
2499 * NYET threshold.
2500 */
2501 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2502 && dwc->has_lpm_erratum,
2503 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2504
2505 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2506 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2507
2508 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2509 } else {
2510 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2511 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2512 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2513 }
2514
2515 dep = dwc->eps[0];
2516 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2517 false);
2518 if (ret) {
2519 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2520 return;
2521 }
2522
2523 dep = dwc->eps[1];
2524 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2525 false);
2526 if (ret) {
2527 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2528 return;
2529 }
2530
2531 /*
2532 * Configure PHY via GUSB3PIPECTLn if required.
2533 *
2534 * Update GTXFIFOSIZn
2535 *
2536 * In both cases reset values should be sufficient.
2537 */
2538 }
2539
2540 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2541 {
2542 /*
2543 * TODO take core out of low power mode when that's
2544 * implemented.
2545 */
2546
2547 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2548 spin_unlock(&dwc->lock);
2549 dwc->gadget_driver->resume(&dwc->gadget);
2550 spin_lock(&dwc->lock);
2551 }
2552 }
2553
2554 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2555 unsigned int evtinfo)
2556 {
2557 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2558 unsigned int pwropt;
2559
2560 /*
2561 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2562 * Hibernation mode enabled which would show up when device detects
2563 * host-initiated U3 exit.
2564 *
2565 * In that case, device will generate a Link State Change Interrupt
2566 * from U3 to RESUME which is only necessary if Hibernation is
2567 * configured in.
2568 *
2569 * There are no functional changes due to such spurious event and we
2570 * just need to ignore it.
2571 *
2572 * Refers to:
2573 *
2574 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2575 * operational mode
2576 */
2577 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2578 if ((dwc->revision < DWC3_REVISION_250A) &&
2579 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2580 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2581 (next == DWC3_LINK_STATE_RESUME)) {
2582 dwc3_trace(trace_dwc3_gadget,
2583 "ignoring transition U3 -> Resume");
2584 return;
2585 }
2586 }
2587
2588 /*
2589 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2590 * on the link partner, the USB session might do multiple entry/exit
2591 * of low power states before a transfer takes place.
2592 *
2593 * Due to this problem, we might experience lower throughput. The
2594 * suggested workaround is to disable DCTL[12:9] bits if we're
2595 * transitioning from U1/U2 to U0 and enable those bits again
2596 * after a transfer completes and there are no pending transfers
2597 * on any of the enabled endpoints.
2598 *
2599 * This is the first half of that workaround.
2600 *
2601 * Refers to:
2602 *
2603 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2604 * core send LGO_Ux entering U0
2605 */
2606 if (dwc->revision < DWC3_REVISION_183A) {
2607 if (next == DWC3_LINK_STATE_U0) {
2608 u32 u1u2;
2609 u32 reg;
2610
2611 switch (dwc->link_state) {
2612 case DWC3_LINK_STATE_U1:
2613 case DWC3_LINK_STATE_U2:
2614 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2615 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2616 | DWC3_DCTL_ACCEPTU2ENA
2617 | DWC3_DCTL_INITU1ENA
2618 | DWC3_DCTL_ACCEPTU1ENA);
2619
2620 if (!dwc->u1u2)
2621 dwc->u1u2 = reg & u1u2;
2622
2623 reg &= ~u1u2;
2624
2625 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2626 break;
2627 default:
2628 /* do nothing */
2629 break;
2630 }
2631 }
2632 }
2633
2634 switch (next) {
2635 case DWC3_LINK_STATE_U1:
2636 if (dwc->speed == USB_SPEED_SUPER)
2637 dwc3_suspend_gadget(dwc);
2638 break;
2639 case DWC3_LINK_STATE_U2:
2640 case DWC3_LINK_STATE_U3:
2641 dwc3_suspend_gadget(dwc);
2642 break;
2643 case DWC3_LINK_STATE_RESUME:
2644 dwc3_resume_gadget(dwc);
2645 break;
2646 default:
2647 /* do nothing */
2648 break;
2649 }
2650
2651 dwc->link_state = next;
2652 }
2653
2654 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
2655 unsigned int evtinfo)
2656 {
2657 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2658
2659 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
2660 dwc3_suspend_gadget(dwc);
2661
2662 dwc->link_state = next;
2663 }
2664
2665 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2666 unsigned int evtinfo)
2667 {
2668 unsigned int is_ss = evtinfo & BIT(4);
2669
2670 /**
2671 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2672 * have a known issue which can cause USB CV TD.9.23 to fail
2673 * randomly.
2674 *
2675 * Because of this issue, core could generate bogus hibernation
2676 * events which SW needs to ignore.
2677 *
2678 * Refers to:
2679 *
2680 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2681 * Device Fallback from SuperSpeed
2682 */
2683 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2684 return;
2685
2686 /* enter hibernation here */
2687 }
2688
2689 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2690 const struct dwc3_event_devt *event)
2691 {
2692 switch (event->type) {
2693 case DWC3_DEVICE_EVENT_DISCONNECT:
2694 dwc3_gadget_disconnect_interrupt(dwc);
2695 break;
2696 case DWC3_DEVICE_EVENT_RESET:
2697 dwc3_gadget_reset_interrupt(dwc);
2698 break;
2699 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2700 dwc3_gadget_conndone_interrupt(dwc);
2701 break;
2702 case DWC3_DEVICE_EVENT_WAKEUP:
2703 dwc3_gadget_wakeup_interrupt(dwc);
2704 break;
2705 case DWC3_DEVICE_EVENT_HIBER_REQ:
2706 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2707 "unexpected hibernation event\n"))
2708 break;
2709
2710 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2711 break;
2712 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2713 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2714 break;
2715 case DWC3_DEVICE_EVENT_EOPF:
2716 /* It changed to be suspend event for version 2.30a and above */
2717 if (dwc->revision < DWC3_REVISION_230A) {
2718 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2719 } else {
2720 dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
2721
2722 /*
2723 * Ignore suspend event until the gadget enters into
2724 * USB_STATE_CONFIGURED state.
2725 */
2726 if (dwc->gadget.state >= USB_STATE_CONFIGURED)
2727 dwc3_gadget_suspend_interrupt(dwc,
2728 event->event_info);
2729 }
2730 break;
2731 case DWC3_DEVICE_EVENT_SOF:
2732 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2733 break;
2734 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2735 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2736 break;
2737 case DWC3_DEVICE_EVENT_CMD_CMPL:
2738 dwc3_trace(trace_dwc3_gadget, "Command Complete");
2739 break;
2740 case DWC3_DEVICE_EVENT_OVERFLOW:
2741 dwc3_trace(trace_dwc3_gadget, "Overflow");
2742 break;
2743 default:
2744 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2745 }
2746 }
2747
2748 static void dwc3_process_event_entry(struct dwc3 *dwc,
2749 const union dwc3_event *event)
2750 {
2751 trace_dwc3_event(event->raw);
2752
2753 /* Endpoint IRQ, handle it and return early */
2754 if (event->type.is_devspec == 0) {
2755 /* depevt */
2756 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2757 }
2758
2759 switch (event->type.type) {
2760 case DWC3_EVENT_TYPE_DEV:
2761 dwc3_gadget_interrupt(dwc, &event->devt);
2762 break;
2763 /* REVISIT what to do with Carkit and I2C events ? */
2764 default:
2765 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2766 }
2767 }
2768
2769 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
2770 {
2771 struct dwc3 *dwc = evt->dwc;
2772 irqreturn_t ret = IRQ_NONE;
2773 int left;
2774 u32 reg;
2775
2776 left = evt->count;
2777
2778 if (!(evt->flags & DWC3_EVENT_PENDING))
2779 return IRQ_NONE;
2780
2781 while (left > 0) {
2782 union dwc3_event event;
2783
2784 event.raw = *(u32 *) (evt->buf + evt->lpos);
2785
2786 dwc3_process_event_entry(dwc, &event);
2787
2788 /*
2789 * FIXME we wrap around correctly to the next entry as
2790 * almost all entries are 4 bytes in size. There is one
2791 * entry which has 12 bytes which is a regular entry
2792 * followed by 8 bytes data. ATM I don't know how
2793 * things are organized if we get next to the a
2794 * boundary so I worry about that once we try to handle
2795 * that.
2796 */
2797 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2798 left -= 4;
2799
2800 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
2801 }
2802
2803 evt->count = 0;
2804 evt->flags &= ~DWC3_EVENT_PENDING;
2805 ret = IRQ_HANDLED;
2806
2807 /* Unmask interrupt */
2808 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2809 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2810 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2811
2812 return ret;
2813 }
2814
2815 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
2816 {
2817 struct dwc3_event_buffer *evt = _evt;
2818 struct dwc3 *dwc = evt->dwc;
2819 unsigned long flags;
2820 irqreturn_t ret = IRQ_NONE;
2821
2822 spin_lock_irqsave(&dwc->lock, flags);
2823 ret = dwc3_process_event_buf(evt);
2824 spin_unlock_irqrestore(&dwc->lock, flags);
2825
2826 return ret;
2827 }
2828
2829 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
2830 {
2831 struct dwc3 *dwc = evt->dwc;
2832 u32 count;
2833 u32 reg;
2834
2835 if (pm_runtime_suspended(dwc->dev)) {
2836 pm_runtime_get(dwc->dev);
2837 disable_irq_nosync(dwc->irq_gadget);
2838 dwc->pending_events = true;
2839 return IRQ_HANDLED;
2840 }
2841
2842 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2843 count &= DWC3_GEVNTCOUNT_MASK;
2844 if (!count)
2845 return IRQ_NONE;
2846
2847 evt->count = count;
2848 evt->flags |= DWC3_EVENT_PENDING;
2849
2850 /* Mask interrupt */
2851 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2852 reg |= DWC3_GEVNTSIZ_INTMASK;
2853 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2854
2855 return IRQ_WAKE_THREAD;
2856 }
2857
2858 static irqreturn_t dwc3_interrupt(int irq, void *_evt)
2859 {
2860 struct dwc3_event_buffer *evt = _evt;
2861
2862 return dwc3_check_event_buf(evt);
2863 }
2864
2865 /**
2866 * dwc3_gadget_init - Initializes gadget related registers
2867 * @dwc: pointer to our controller context structure
2868 *
2869 * Returns 0 on success otherwise negative errno.
2870 */
2871 int dwc3_gadget_init(struct dwc3 *dwc)
2872 {
2873 int ret, irq;
2874 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
2875
2876 irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
2877 if (irq == -EPROBE_DEFER)
2878 return irq;
2879
2880 if (irq <= 0) {
2881 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
2882 if (irq == -EPROBE_DEFER)
2883 return irq;
2884
2885 if (irq <= 0) {
2886 irq = platform_get_irq(dwc3_pdev, 0);
2887 if (irq <= 0) {
2888 if (irq != -EPROBE_DEFER) {
2889 dev_err(dwc->dev,
2890 "missing peripheral IRQ\n");
2891 }
2892 if (!irq)
2893 irq = -EINVAL;
2894 return irq;
2895 }
2896 }
2897 }
2898
2899 dwc->irq_gadget = irq;
2900
2901 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2902 &dwc->ctrl_req_addr, GFP_KERNEL);
2903 if (!dwc->ctrl_req) {
2904 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2905 ret = -ENOMEM;
2906 goto err0;
2907 }
2908
2909 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2910 &dwc->ep0_trb_addr, GFP_KERNEL);
2911 if (!dwc->ep0_trb) {
2912 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2913 ret = -ENOMEM;
2914 goto err1;
2915 }
2916
2917 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2918 if (!dwc->setup_buf) {
2919 ret = -ENOMEM;
2920 goto err2;
2921 }
2922
2923 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2924 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2925 GFP_KERNEL);
2926 if (!dwc->ep0_bounce) {
2927 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2928 ret = -ENOMEM;
2929 goto err3;
2930 }
2931
2932 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
2933 if (!dwc->zlp_buf) {
2934 ret = -ENOMEM;
2935 goto err4;
2936 }
2937
2938 dwc->gadget.ops = &dwc3_gadget_ops;
2939 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2940 dwc->gadget.sg_supported = true;
2941 dwc->gadget.name = "dwc3-gadget";
2942 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
2943
2944 /*
2945 * FIXME We might be setting max_speed to <SUPER, however versions
2946 * <2.20a of dwc3 have an issue with metastability (documented
2947 * elsewhere in this driver) which tells us we can't set max speed to
2948 * anything lower than SUPER.
2949 *
2950 * Because gadget.max_speed is only used by composite.c and function
2951 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2952 * to happen so we avoid sending SuperSpeed Capability descriptor
2953 * together with our BOS descriptor as that could confuse host into
2954 * thinking we can handle super speed.
2955 *
2956 * Note that, in fact, we won't even support GetBOS requests when speed
2957 * is less than super speed because we don't have means, yet, to tell
2958 * composite.c that we are USB 2.0 + LPM ECN.
2959 */
2960 if (dwc->revision < DWC3_REVISION_220A)
2961 dwc3_trace(trace_dwc3_gadget,
2962 "Changing max_speed on rev %08x",
2963 dwc->revision);
2964
2965 dwc->gadget.max_speed = dwc->maximum_speed;
2966
2967 /*
2968 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2969 * on ep out.
2970 */
2971 dwc->gadget.quirk_ep_out_aligned_size = true;
2972
2973 /*
2974 * REVISIT: Here we should clear all pending IRQs to be
2975 * sure we're starting from a well known location.
2976 */
2977
2978 ret = dwc3_gadget_init_endpoints(dwc);
2979 if (ret)
2980 goto err5;
2981
2982 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2983 if (ret) {
2984 dev_err(dwc->dev, "failed to register udc\n");
2985 goto err5;
2986 }
2987
2988 return 0;
2989
2990 err5:
2991 kfree(dwc->zlp_buf);
2992
2993 err4:
2994 dwc3_gadget_free_endpoints(dwc);
2995 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2996 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2997
2998 err3:
2999 kfree(dwc->setup_buf);
3000
3001 err2:
3002 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
3003 dwc->ep0_trb, dwc->ep0_trb_addr);
3004
3005 err1:
3006 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3007 dwc->ctrl_req, dwc->ctrl_req_addr);
3008
3009 err0:
3010 return ret;
3011 }
3012
3013 /* -------------------------------------------------------------------------- */
3014
3015 void dwc3_gadget_exit(struct dwc3 *dwc)
3016 {
3017 usb_del_gadget_udc(&dwc->gadget);
3018
3019 dwc3_gadget_free_endpoints(dwc);
3020
3021 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3022 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3023
3024 kfree(dwc->setup_buf);
3025 kfree(dwc->zlp_buf);
3026
3027 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
3028 dwc->ep0_trb, dwc->ep0_trb_addr);
3029
3030 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3031 dwc->ctrl_req, dwc->ctrl_req_addr);
3032 }
3033
3034 int dwc3_gadget_suspend(struct dwc3 *dwc)
3035 {
3036 int ret;
3037
3038 if (!dwc->gadget_driver)
3039 return 0;
3040
3041 ret = dwc3_gadget_run_stop(dwc, false, false);
3042 if (ret < 0)
3043 return ret;
3044
3045 dwc3_disconnect_gadget(dwc);
3046 __dwc3_gadget_stop(dwc);
3047
3048 return 0;
3049 }
3050
3051 int dwc3_gadget_resume(struct dwc3 *dwc)
3052 {
3053 int ret;
3054
3055 if (!dwc->gadget_driver)
3056 return 0;
3057
3058 ret = __dwc3_gadget_start(dwc);
3059 if (ret < 0)
3060 goto err0;
3061
3062 ret = dwc3_gadget_run_stop(dwc, true, false);
3063 if (ret < 0)
3064 goto err1;
3065
3066 return 0;
3067
3068 err1:
3069 __dwc3_gadget_stop(dwc);
3070
3071 err0:
3072 return ret;
3073 }
3074
3075 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
3076 {
3077 if (dwc->pending_events) {
3078 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
3079 dwc->pending_events = false;
3080 enable_irq(dwc->irq_gadget);
3081 }
3082 }