]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/usb/dwc3/gadget.c
usb: dwc3: gadget: dynamically re-size TxFifos
[mirror_ubuntu-bionic-kernel.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
57#define DMA_ADDR_INVALID (~(dma_addr_t)0)
58
04a9bfcd
FB
59/**
60 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
61 * @dwc: pointer to our context structure
62 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
63 *
64 * Caller should take care of locking. This function will
65 * return 0 on success or -EINVAL if wrong Test Selector
66 * is passed
67 */
68int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
69{
70 u32 reg;
71
72 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
73 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
74
75 switch (mode) {
76 case TEST_J:
77 case TEST_K:
78 case TEST_SE0_NAK:
79 case TEST_PACKET:
80 case TEST_FORCE_EN:
81 reg |= mode << 1;
82 break;
83 default:
84 return -EINVAL;
85 }
86
87 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
88
89 return 0;
90}
91
8598bde7
FB
92/**
93 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
94 * @dwc: pointer to our context structure
95 * @state: the state to put link into
96 *
97 * Caller should take care of locking. This function will
98 * return 0 on success or -EINVAL.
99 */
100int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
101{
102 int retries = 100;
103 u32 reg;
104
105 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
106 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
107
108 /* set requested state */
109 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
110 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
111
112 /* wait for a change in DSTS */
113 while (--retries) {
114 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
115
116 /* in HS, means ON */
117 if (DWC3_DSTS_USBLNKST(reg) == state)
118 return 0;
119
138801aa 120 udelay(500);
8598bde7
FB
121 }
122
123 dev_vdbg(dwc->dev, "link state change request timed out\n");
124
125 return -ETIMEDOUT;
126}
127
457e84b6
FB
128/**
129 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
130 * @dwc: pointer to our context structure
131 *
132 * This function will a best effort FIFO allocation in order
133 * to improve FIFO usage and throughput, while still allowing
134 * us to enable as many endpoints as possible.
135 *
136 * Keep in mind that this operation will be highly dependent
137 * on the configured size for RAM1 - which contains TxFifo -,
138 * the amount of endpoints enabled on coreConsultant tool, and
139 * the width of the Master Bus.
140 *
141 * In the ideal world, we would always be able to satisfy the
142 * following equation:
143 *
144 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
145 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
146 *
147 * Unfortunately, due to many variables that's not always the case.
148 */
149int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
150{
151 int last_fifo_depth = 0;
152 int ram1_depth;
153 int fifo_size;
154 int mdwidth;
155 int num;
156
157 if (!dwc->needs_fifo_resize)
158 return 0;
159
160 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
161 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
162
163 /* MDWIDTH is represented in bits, we need it in bytes */
164 mdwidth >>= 3;
165
166 /*
167 * FIXME For now we will only allocate 1 wMaxPacketSize space
168 * for each enabled endpoint, later patches will come to
169 * improve this algorithm so that we better use the internal
170 * FIFO space
171 */
172 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
173 struct dwc3_ep *dep = dwc->eps[num];
174 int fifo_number = dep->number >> 1;
175 int tmp;
176
177 if (!(dep->number & 1))
178 continue;
179
180 if (!(dep->flags & DWC3_EP_ENABLED))
181 continue;
182
183 tmp = dep->endpoint.maxpacket;
184 tmp += mdwidth;
185 tmp += mdwidth;
186
187 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
188 fifo_size |= (last_fifo_depth << 16);
189
190 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
191 dep->name, last_fifo_depth, fifo_size & 0xffff);
192
193 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
194 fifo_size);
195
196 last_fifo_depth += (fifo_size & 0xffff);
197 }
198
199 return 0;
200}
201
72246da4
FB
202void dwc3_map_buffer_to_dma(struct dwc3_request *req)
203{
204 struct dwc3 *dwc = req->dep->dwc;
205
78c58a53
SAS
206 if (req->request.length == 0) {
207 /* req->request.dma = dwc->setup_buf_addr; */
208 return;
209 }
210
eeb720fb
FB
211 if (req->request.num_sgs) {
212 int mapped;
213
214 mapped = dma_map_sg(dwc->dev, req->request.sg,
215 req->request.num_sgs,
216 req->direction ? DMA_TO_DEVICE
217 : DMA_FROM_DEVICE);
218 if (mapped < 0) {
219 dev_err(dwc->dev, "failed to map SGs\n");
220 return;
221 }
222
223 req->request.num_mapped_sgs = mapped;
224 return;
225 }
226
72246da4
FB
227 if (req->request.dma == DMA_ADDR_INVALID) {
228 req->request.dma = dma_map_single(dwc->dev, req->request.buf,
229 req->request.length, req->direction
230 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
231 req->mapped = true;
72246da4
FB
232 }
233}
234
235void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
236{
237 struct dwc3 *dwc = req->dep->dwc;
238
78c58a53
SAS
239 if (req->request.length == 0) {
240 req->request.dma = DMA_ADDR_INVALID;
241 return;
242 }
243
eeb720fb
FB
244 if (req->request.num_mapped_sgs) {
245 req->request.dma = DMA_ADDR_INVALID;
246 dma_unmap_sg(dwc->dev, req->request.sg,
c09d6b51 247 req->request.num_mapped_sgs,
eeb720fb
FB
248 req->direction ? DMA_TO_DEVICE
249 : DMA_FROM_DEVICE);
250
251 req->request.num_mapped_sgs = 0;
252 return;
253 }
254
72246da4
FB
255 if (req->mapped) {
256 dma_unmap_single(dwc->dev, req->request.dma,
257 req->request.length, req->direction
258 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
259 req->mapped = 0;
f198ead2 260 req->request.dma = DMA_ADDR_INVALID;
72246da4
FB
261 }
262}
263
264void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
265 int status)
266{
267 struct dwc3 *dwc = dep->dwc;
268
269 if (req->queued) {
eeb720fb
FB
270 if (req->request.num_mapped_sgs)
271 dep->busy_slot += req->request.num_mapped_sgs;
272 else
273 dep->busy_slot++;
274
72246da4
FB
275 /*
276 * Skip LINK TRB. We can't use req->trb and check for
277 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
278 * completed (not the LINK TRB).
279 */
280 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
281 usb_endpoint_xfer_isoc(dep->desc))
282 dep->busy_slot++;
283 }
284 list_del(&req->list);
eeb720fb 285 req->trb = NULL;
72246da4
FB
286
287 if (req->request.status == -EINPROGRESS)
288 req->request.status = status;
289
290 dwc3_unmap_buffer_from_dma(req);
291
292 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
293 req, dep->name, req->request.actual,
294 req->request.length, status);
295
296 spin_unlock(&dwc->lock);
297 req->request.complete(&req->dep->endpoint, &req->request);
298 spin_lock(&dwc->lock);
299}
300
301static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
302{
303 switch (cmd) {
304 case DWC3_DEPCMD_DEPSTARTCFG:
305 return "Start New Configuration";
306 case DWC3_DEPCMD_ENDTRANSFER:
307 return "End Transfer";
308 case DWC3_DEPCMD_UPDATETRANSFER:
309 return "Update Transfer";
310 case DWC3_DEPCMD_STARTTRANSFER:
311 return "Start Transfer";
312 case DWC3_DEPCMD_CLEARSTALL:
313 return "Clear Stall";
314 case DWC3_DEPCMD_SETSTALL:
315 return "Set Stall";
316 case DWC3_DEPCMD_GETSEQNUMBER:
317 return "Get Data Sequence Number";
318 case DWC3_DEPCMD_SETTRANSFRESOURCE:
319 return "Set Endpoint Transfer Resource";
320 case DWC3_DEPCMD_SETEPCONFIG:
321 return "Set Endpoint Configuration";
322 default:
323 return "UNKNOWN command";
324 }
325}
326
327int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
328 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
329{
330 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 331 u32 timeout = 500;
72246da4
FB
332 u32 reg;
333
334 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
335 dep->name,
dc1c70a7
FB
336 dwc3_gadget_ep_cmd_string(cmd), params->param0,
337 params->param1, params->param2);
72246da4 338
dc1c70a7
FB
339 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
340 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
341 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
342
343 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
344 do {
345 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
346 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
347 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
348 DWC3_DEPCMD_STATUS(reg));
72246da4
FB
349 return 0;
350 }
351
352 /*
72246da4
FB
353 * We can't sleep here, because it is also called from
354 * interrupt context.
355 */
356 timeout--;
357 if (!timeout)
358 return -ETIMEDOUT;
359
61d58242 360 udelay(1);
72246da4
FB
361 } while (1);
362}
363
364static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
365 struct dwc3_trb_hw *trb)
366{
c439ef87 367 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
368
369 return dep->trb_pool_dma + offset;
370}
371
372static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
373{
374 struct dwc3 *dwc = dep->dwc;
375
376 if (dep->trb_pool)
377 return 0;
378
379 if (dep->number == 0 || dep->number == 1)
380 return 0;
381
382 dep->trb_pool = dma_alloc_coherent(dwc->dev,
383 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
384 &dep->trb_pool_dma, GFP_KERNEL);
385 if (!dep->trb_pool) {
386 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
387 dep->name);
388 return -ENOMEM;
389 }
390
391 return 0;
392}
393
394static void dwc3_free_trb_pool(struct dwc3_ep *dep)
395{
396 struct dwc3 *dwc = dep->dwc;
397
398 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
399 dep->trb_pool, dep->trb_pool_dma);
400
401 dep->trb_pool = NULL;
402 dep->trb_pool_dma = 0;
403}
404
405static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
406{
407 struct dwc3_gadget_ep_cmd_params params;
408 u32 cmd;
409
410 memset(&params, 0x00, sizeof(params));
411
412 if (dep->number != 1) {
413 cmd = DWC3_DEPCMD_DEPSTARTCFG;
414 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
415 if (dep->number > 1) {
416 if (dwc->start_config_issued)
417 return 0;
418 dwc->start_config_issued = true;
72246da4 419 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 420 }
72246da4
FB
421
422 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
423 }
424
425 return 0;
426}
427
428static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec
FB
429 const struct usb_endpoint_descriptor *desc,
430 const struct usb_ss_ep_comp_descriptor *comp_desc)
72246da4
FB
431{
432 struct dwc3_gadget_ep_cmd_params params;
433
434 memset(&params, 0x00, sizeof(params));
435
dc1c70a7
FB
436 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
437 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
438 | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
72246da4 439
dc1c70a7
FB
440 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
441 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 442
18b7ede5 443 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
444 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
445 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
446 dep->stream_capable = true;
447 }
448
72246da4 449 if (usb_endpoint_xfer_isoc(desc))
dc1c70a7 450 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
451
452 /*
453 * We are doing 1:1 mapping for endpoints, meaning
454 * Physical Endpoints 2 maps to Logical Endpoint 2 and
455 * so on. We consider the direction bit as part of the physical
456 * endpoint number. So USB endpoint 0x81 is 0x03.
457 */
dc1c70a7 458 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
459
460 /*
461 * We must use the lower 16 TX FIFOs even though
462 * HW might have more
463 */
464 if (dep->direction)
dc1c70a7 465 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
466
467 if (desc->bInterval) {
dc1c70a7 468 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
469 dep->interval = 1 << (desc->bInterval - 1);
470 }
471
472 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
473 DWC3_DEPCMD_SETEPCONFIG, &params);
474}
475
476static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
477{
478 struct dwc3_gadget_ep_cmd_params params;
479
480 memset(&params, 0x00, sizeof(params));
481
dc1c70a7 482 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
483
484 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
485 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
486}
487
488/**
489 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
490 * @dep: endpoint to be initialized
491 * @desc: USB Endpoint Descriptor
492 *
493 * Caller should take care of locking
494 */
495static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec
FB
496 const struct usb_endpoint_descriptor *desc,
497 const struct usb_ss_ep_comp_descriptor *comp_desc)
72246da4
FB
498{
499 struct dwc3 *dwc = dep->dwc;
500 u32 reg;
501 int ret = -ENOMEM;
502
503 if (!(dep->flags & DWC3_EP_ENABLED)) {
504 ret = dwc3_gadget_start_config(dwc, dep);
505 if (ret)
506 return ret;
507 }
508
c90bfaec 509 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
72246da4
FB
510 if (ret)
511 return ret;
512
513 if (!(dep->flags & DWC3_EP_ENABLED)) {
514 struct dwc3_trb_hw *trb_st_hw;
515 struct dwc3_trb_hw *trb_link_hw;
516 struct dwc3_trb trb_link;
517
518 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
519 if (ret)
520 return ret;
521
522 dep->desc = desc;
c90bfaec 523 dep->comp_desc = comp_desc;
72246da4
FB
524 dep->type = usb_endpoint_type(desc);
525 dep->flags |= DWC3_EP_ENABLED;
526
527 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
528 reg |= DWC3_DALEPENA_EP(dep->number);
529 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
530
531 if (!usb_endpoint_xfer_isoc(desc))
532 return 0;
533
534 memset(&trb_link, 0, sizeof(trb_link));
535
536 /* Link TRB for ISOC. The HWO but is never reset */
537 trb_st_hw = &dep->trb_pool[0];
538
539 trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw);
540 trb_link.trbctl = DWC3_TRBCTL_LINK_TRB;
541 trb_link.hwo = true;
542
543 trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1];
544 dwc3_trb_to_hw(&trb_link, trb_link_hw);
545 }
546
547 return 0;
548}
549
624407f9
SAS
550static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
551static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
552{
553 struct dwc3_request *req;
554
624407f9
SAS
555 if (!list_empty(&dep->req_queued))
556 dwc3_stop_active_transfer(dwc, dep->number);
557
72246da4
FB
558 while (!list_empty(&dep->request_list)) {
559 req = next_request(&dep->request_list);
560
624407f9 561 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 562 }
72246da4
FB
563}
564
565/**
566 * __dwc3_gadget_ep_disable - Disables a HW endpoint
567 * @dep: the endpoint to disable
568 *
624407f9
SAS
569 * This function also removes requests which are currently processed ny the
570 * hardware and those which are not yet scheduled.
571 * Caller should take care of locking.
72246da4 572 */
72246da4
FB
573static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
574{
575 struct dwc3 *dwc = dep->dwc;
576 u32 reg;
577
624407f9 578 dwc3_remove_requests(dwc, dep);
72246da4
FB
579
580 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
581 reg &= ~DWC3_DALEPENA_EP(dep->number);
582 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
583
879631aa 584 dep->stream_capable = false;
72246da4 585 dep->desc = NULL;
c90bfaec 586 dep->comp_desc = NULL;
72246da4 587 dep->type = 0;
879631aa 588 dep->flags = 0;
72246da4
FB
589
590 return 0;
591}
592
593/* -------------------------------------------------------------------------- */
594
595static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
596 const struct usb_endpoint_descriptor *desc)
597{
598 return -EINVAL;
599}
600
601static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
602{
603 return -EINVAL;
604}
605
606/* -------------------------------------------------------------------------- */
607
608static int dwc3_gadget_ep_enable(struct usb_ep *ep,
609 const struct usb_endpoint_descriptor *desc)
610{
611 struct dwc3_ep *dep;
612 struct dwc3 *dwc;
613 unsigned long flags;
614 int ret;
615
616 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
617 pr_debug("dwc3: invalid parameters\n");
618 return -EINVAL;
619 }
620
621 if (!desc->wMaxPacketSize) {
622 pr_debug("dwc3: missing wMaxPacketSize\n");
623 return -EINVAL;
624 }
625
626 dep = to_dwc3_ep(ep);
627 dwc = dep->dwc;
628
629 switch (usb_endpoint_type(desc)) {
630 case USB_ENDPOINT_XFER_CONTROL:
631 strncat(dep->name, "-control", sizeof(dep->name));
632 break;
633 case USB_ENDPOINT_XFER_ISOC:
634 strncat(dep->name, "-isoc", sizeof(dep->name));
635 break;
636 case USB_ENDPOINT_XFER_BULK:
637 strncat(dep->name, "-bulk", sizeof(dep->name));
638 break;
639 case USB_ENDPOINT_XFER_INT:
640 strncat(dep->name, "-int", sizeof(dep->name));
641 break;
642 default:
643 dev_err(dwc->dev, "invalid endpoint transfer type\n");
644 }
645
646 if (dep->flags & DWC3_EP_ENABLED) {
647 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
648 dep->name);
649 return 0;
650 }
651
652 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
653
654 spin_lock_irqsave(&dwc->lock, flags);
c90bfaec 655 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
72246da4
FB
656 spin_unlock_irqrestore(&dwc->lock, flags);
657
658 return ret;
659}
660
661static int dwc3_gadget_ep_disable(struct usb_ep *ep)
662{
663 struct dwc3_ep *dep;
664 struct dwc3 *dwc;
665 unsigned long flags;
666 int ret;
667
668 if (!ep) {
669 pr_debug("dwc3: invalid parameters\n");
670 return -EINVAL;
671 }
672
673 dep = to_dwc3_ep(ep);
674 dwc = dep->dwc;
675
676 if (!(dep->flags & DWC3_EP_ENABLED)) {
677 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
678 dep->name);
679 return 0;
680 }
681
682 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
683 dep->number >> 1,
684 (dep->number & 1) ? "in" : "out");
685
686 spin_lock_irqsave(&dwc->lock, flags);
687 ret = __dwc3_gadget_ep_disable(dep);
688 spin_unlock_irqrestore(&dwc->lock, flags);
689
690 return ret;
691}
692
693static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
694 gfp_t gfp_flags)
695{
696 struct dwc3_request *req;
697 struct dwc3_ep *dep = to_dwc3_ep(ep);
698 struct dwc3 *dwc = dep->dwc;
699
700 req = kzalloc(sizeof(*req), gfp_flags);
701 if (!req) {
702 dev_err(dwc->dev, "not enough memory\n");
703 return NULL;
704 }
705
706 req->epnum = dep->number;
707 req->dep = dep;
708 req->request.dma = DMA_ADDR_INVALID;
709
710 return &req->request;
711}
712
713static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
714 struct usb_request *request)
715{
716 struct dwc3_request *req = to_dwc3_request(request);
717
718 kfree(req);
719}
720
c71fc37c
FB
721/**
722 * dwc3_prepare_one_trb - setup one TRB from one request
723 * @dep: endpoint for which this request is prepared
724 * @req: dwc3_request pointer
725 */
68e823e2 726static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb
FB
727 struct dwc3_request *req, dma_addr_t dma,
728 unsigned length, unsigned last, unsigned chain)
c71fc37c 729{
eeb720fb 730 struct dwc3 *dwc = dep->dwc;
c71fc37c
FB
731 struct dwc3_trb_hw *trb_hw;
732 struct dwc3_trb trb;
733
734 unsigned int cur_slot;
735
eeb720fb
FB
736 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
737 dep->name, req, (unsigned long long) dma,
738 length, last ? " last" : "",
739 chain ? " chain" : "");
740
c71fc37c
FB
741 trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
742 cur_slot = dep->free_slot;
743 dep->free_slot++;
744
745 /* Skip the LINK-TRB on ISOC */
746 if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
747 usb_endpoint_xfer_isoc(dep->desc))
68e823e2 748 return;
c71fc37c 749
c71fc37c 750 memset(&trb, 0, sizeof(trb));
eeb720fb
FB
751 if (!req->trb) {
752 dwc3_gadget_move_request_queued(req);
753 req->trb = trb_hw;
754 req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
755 }
c71fc37c
FB
756
757 if (usb_endpoint_xfer_isoc(dep->desc)) {
758 trb.isp_imi = true;
759 trb.csp = true;
760 } else {
eeb720fb 761 trb.chn = chain;
c71fc37c
FB
762 trb.lst = last;
763 }
764
765 if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
766 trb.sid_sofn = req->request.stream_id;
767
768 switch (usb_endpoint_type(dep->desc)) {
769 case USB_ENDPOINT_XFER_CONTROL:
770 trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
771 break;
772
773 case USB_ENDPOINT_XFER_ISOC:
774 trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
775
776 /* IOC every DWC3_TRB_NUM / 4 so we can refill */
777 if (!(cur_slot % (DWC3_TRB_NUM / 4)))
778 trb.ioc = last;
779 break;
780
781 case USB_ENDPOINT_XFER_BULK:
782 case USB_ENDPOINT_XFER_INT:
783 trb.trbctl = DWC3_TRBCTL_NORMAL;
784 break;
785 default:
786 /*
787 * This is only possible with faulty memory because we
788 * checked it already :)
789 */
790 BUG();
791 }
792
eeb720fb
FB
793 trb.length = length;
794 trb.bplh = dma;
c71fc37c
FB
795 trb.hwo = true;
796
797 dwc3_trb_to_hw(&trb, trb_hw);
c71fc37c
FB
798}
799
72246da4
FB
800/*
801 * dwc3_prepare_trbs - setup TRBs from requests
802 * @dep: endpoint for which requests are being prepared
803 * @starting: true if the endpoint is idle and no requests are queued.
804 *
805 * The functions goes through the requests list and setups TRBs for the
806 * transfers. The functions returns once there are not more TRBs available or
807 * it run out of requests.
808 */
68e823e2 809static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 810{
68e823e2 811 struct dwc3_request *req, *n;
72246da4 812 u32 trbs_left;
c71fc37c 813 unsigned int last_one = 0;
72246da4
FB
814
815 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
816
817 /* the first request must not be queued */
818 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 819
72246da4
FB
820 /*
821 * if busy & slot are equal than it is either full or empty. If we are
822 * starting to proceed requests then we are empty. Otherwise we ar
823 * full and don't do anything
824 */
825 if (!trbs_left) {
826 if (!starting)
68e823e2 827 return;
72246da4
FB
828 trbs_left = DWC3_TRB_NUM;
829 /*
830 * In case we start from scratch, we queue the ISOC requests
831 * starting from slot 1. This is done because we use ring
832 * buffer and have no LST bit to stop us. Instead, we place
833 * IOC bit TRB_NUM/4. We try to avoid to having an interrupt
834 * after the first request so we start at slot 1 and have
835 * 7 requests proceed before we hit the first IOC.
836 * Other transfer types don't use the ring buffer and are
837 * processed from the first TRB until the last one. Since we
838 * don't wrap around we have to start at the beginning.
839 */
840 if (usb_endpoint_xfer_isoc(dep->desc)) {
841 dep->busy_slot = 1;
842 dep->free_slot = 1;
843 } else {
844 dep->busy_slot = 0;
845 dep->free_slot = 0;
846 }
847 }
848
849 /* The last TRB is a link TRB, not used for xfer */
850 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
68e823e2 851 return;
72246da4
FB
852
853 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
854 unsigned length;
855 dma_addr_t dma;
72246da4 856
eeb720fb
FB
857 if (req->request.num_mapped_sgs > 0) {
858 struct usb_request *request = &req->request;
859 struct scatterlist *sg = request->sg;
860 struct scatterlist *s;
861 int i;
72246da4 862
eeb720fb
FB
863 for_each_sg(sg, s, request->num_mapped_sgs, i) {
864 unsigned chain = true;
72246da4 865
eeb720fb
FB
866 length = sg_dma_len(s);
867 dma = sg_dma_address(s);
72246da4 868
eeb720fb
FB
869 if (i == (request->num_mapped_sgs - 1)
870 || sg_is_last(s)) {
871 last_one = true;
872 chain = false;
873 }
72246da4 874
eeb720fb
FB
875 trbs_left--;
876 if (!trbs_left)
877 last_one = true;
72246da4 878
eeb720fb
FB
879 if (last_one)
880 chain = false;
72246da4 881
eeb720fb
FB
882 dwc3_prepare_one_trb(dep, req, dma, length,
883 last_one, chain);
72246da4 884
eeb720fb
FB
885 if (last_one)
886 break;
887 }
72246da4 888 } else {
eeb720fb
FB
889 dma = req->request.dma;
890 length = req->request.length;
891 trbs_left--;
72246da4 892
eeb720fb
FB
893 if (!trbs_left)
894 last_one = 1;
879631aa 895
eeb720fb
FB
896 /* Is this the last request? */
897 if (list_is_last(&req->list, &dep->request_list))
898 last_one = 1;
72246da4 899
eeb720fb
FB
900 dwc3_prepare_one_trb(dep, req, dma, length,
901 last_one, false);
72246da4 902
eeb720fb
FB
903 if (last_one)
904 break;
72246da4 905 }
72246da4 906 }
72246da4
FB
907}
908
909static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
910 int start_new)
911{
912 struct dwc3_gadget_ep_cmd_params params;
913 struct dwc3_request *req;
914 struct dwc3 *dwc = dep->dwc;
915 int ret;
916 u32 cmd;
917
918 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
919 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
920 return -EBUSY;
921 }
922 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
923
924 /*
925 * If we are getting here after a short-out-packet we don't enqueue any
926 * new requests as we try to set the IOC bit only on the last request.
927 */
928 if (start_new) {
929 if (list_empty(&dep->req_queued))
930 dwc3_prepare_trbs(dep, start_new);
931
932 /* req points to the first request which will be sent */
933 req = next_request(&dep->req_queued);
934 } else {
68e823e2
FB
935 dwc3_prepare_trbs(dep, start_new);
936
72246da4
FB
937 /*
938 * req points to the first request where HWO changed
939 * from 0 to 1
940 */
68e823e2 941 req = next_request(&dep->req_queued);
72246da4
FB
942 }
943 if (!req) {
944 dep->flags |= DWC3_EP_PENDING_REQUEST;
945 return 0;
946 }
947
948 memset(&params, 0, sizeof(params));
dc1c70a7
FB
949 params.param0 = upper_32_bits(req->trb_dma);
950 params.param1 = lower_32_bits(req->trb_dma);
72246da4
FB
951
952 if (start_new)
953 cmd = DWC3_DEPCMD_STARTTRANSFER;
954 else
955 cmd = DWC3_DEPCMD_UPDATETRANSFER;
956
957 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
958 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
959 if (ret < 0) {
960 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
961
962 /*
963 * FIXME we need to iterate over the list of requests
964 * here and stop, unmap, free and del each of the linked
965 * requests instead of we do now.
966 */
967 dwc3_unmap_buffer_from_dma(req);
968 list_del(&req->list);
969 return ret;
970 }
971
972 dep->flags |= DWC3_EP_BUSY;
973 dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
974 dep->number);
25b8ff68
FB
975
976 WARN_ON_ONCE(!dep->res_trans_idx);
977
72246da4
FB
978 return 0;
979}
980
981static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
982{
983 req->request.actual = 0;
984 req->request.status = -EINPROGRESS;
985 req->direction = dep->direction;
986 req->epnum = dep->number;
987
988 /*
989 * We only add to our list of requests now and
990 * start consuming the list once we get XferNotReady
991 * IRQ.
992 *
993 * That way, we avoid doing anything that we don't need
994 * to do now and defer it until the point we receive a
995 * particular token from the Host side.
996 *
997 * This will also avoid Host cancelling URBs due to too
998 * many NACKs.
999 */
1000 dwc3_map_buffer_to_dma(req);
1001 list_add_tail(&req->list, &dep->request_list);
1002
1003 /*
1004 * There is one special case: XferNotReady with
1005 * empty list of requests. We need to kick the
1006 * transfer here in that situation, otherwise
1007 * we will be NAKing forever.
1008 *
1009 * If we get XferNotReady before gadget driver
1010 * has a chance to queue a request, we will ACK
1011 * the IRQ but won't be able to receive the data
1012 * until the next request is queued. The following
1013 * code is handling exactly that.
1014 */
1015 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1016 int ret;
1017 int start_trans;
1018
1019 start_trans = 1;
7b7dd025 1020 if (usb_endpoint_xfer_isoc(dep->desc) &&
72246da4
FB
1021 dep->flags & DWC3_EP_BUSY)
1022 start_trans = 0;
1023
1024 ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
1025 if (ret && ret != -EBUSY) {
1026 struct dwc3 *dwc = dep->dwc;
1027
1028 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1029 dep->name);
1030 }
1031 };
1032
1033 return 0;
1034}
1035
1036static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1037 gfp_t gfp_flags)
1038{
1039 struct dwc3_request *req = to_dwc3_request(request);
1040 struct dwc3_ep *dep = to_dwc3_ep(ep);
1041 struct dwc3 *dwc = dep->dwc;
1042
1043 unsigned long flags;
1044
1045 int ret;
1046
1047 if (!dep->desc) {
1048 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1049 request, ep->name);
1050 return -ESHUTDOWN;
1051 }
1052
1053 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1054 request, ep->name, request->length);
1055
1056 spin_lock_irqsave(&dwc->lock, flags);
1057 ret = __dwc3_gadget_ep_queue(dep, req);
1058 spin_unlock_irqrestore(&dwc->lock, flags);
1059
1060 return ret;
1061}
1062
1063static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1064 struct usb_request *request)
1065{
1066 struct dwc3_request *req = to_dwc3_request(request);
1067 struct dwc3_request *r = NULL;
1068
1069 struct dwc3_ep *dep = to_dwc3_ep(ep);
1070 struct dwc3 *dwc = dep->dwc;
1071
1072 unsigned long flags;
1073 int ret = 0;
1074
1075 spin_lock_irqsave(&dwc->lock, flags);
1076
1077 list_for_each_entry(r, &dep->request_list, list) {
1078 if (r == req)
1079 break;
1080 }
1081
1082 if (r != req) {
1083 list_for_each_entry(r, &dep->req_queued, list) {
1084 if (r == req)
1085 break;
1086 }
1087 if (r == req) {
1088 /* wait until it is processed */
1089 dwc3_stop_active_transfer(dwc, dep->number);
1090 goto out0;
1091 }
1092 dev_err(dwc->dev, "request %p was not queued to %s\n",
1093 request, ep->name);
1094 ret = -EINVAL;
1095 goto out0;
1096 }
1097
1098 /* giveback the request */
1099 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1100
1101out0:
1102 spin_unlock_irqrestore(&dwc->lock, flags);
1103
1104 return ret;
1105}
1106
1107int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1108{
1109 struct dwc3_gadget_ep_cmd_params params;
1110 struct dwc3 *dwc = dep->dwc;
1111 int ret;
1112
1113 memset(&params, 0x00, sizeof(params));
1114
1115 if (value) {
0b7836a9
FB
1116 if (dep->number == 0 || dep->number == 1) {
1117 /*
1118 * Whenever EP0 is stalled, we will restart
1119 * the state machine, thus moving back to
1120 * Setup Phase
1121 */
1122 dwc->ep0state = EP0_SETUP_PHASE;
1123 }
72246da4
FB
1124
1125 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1126 DWC3_DEPCMD_SETSTALL, &params);
1127 if (ret)
1128 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1129 value ? "set" : "clear",
1130 dep->name);
1131 else
1132 dep->flags |= DWC3_EP_STALL;
1133 } else {
5275455a
PZ
1134 if (dep->flags & DWC3_EP_WEDGE)
1135 return 0;
1136
72246da4
FB
1137 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1138 DWC3_DEPCMD_CLEARSTALL, &params);
1139 if (ret)
1140 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1141 value ? "set" : "clear",
1142 dep->name);
1143 else
1144 dep->flags &= ~DWC3_EP_STALL;
1145 }
5275455a 1146
72246da4
FB
1147 return ret;
1148}
1149
1150static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1151{
1152 struct dwc3_ep *dep = to_dwc3_ep(ep);
1153 struct dwc3 *dwc = dep->dwc;
1154
1155 unsigned long flags;
1156
1157 int ret;
1158
1159 spin_lock_irqsave(&dwc->lock, flags);
1160
1161 if (usb_endpoint_xfer_isoc(dep->desc)) {
1162 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1163 ret = -EINVAL;
1164 goto out;
1165 }
1166
1167 ret = __dwc3_gadget_ep_set_halt(dep, value);
1168out:
1169 spin_unlock_irqrestore(&dwc->lock, flags);
1170
1171 return ret;
1172}
1173
1174static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1175{
1176 struct dwc3_ep *dep = to_dwc3_ep(ep);
1177
1178 dep->flags |= DWC3_EP_WEDGE;
1179
5275455a 1180 return dwc3_gadget_ep_set_halt(ep, 1);
72246da4
FB
1181}
1182
1183/* -------------------------------------------------------------------------- */
1184
1185static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1186 .bLength = USB_DT_ENDPOINT_SIZE,
1187 .bDescriptorType = USB_DT_ENDPOINT,
1188 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1189};
1190
1191static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1192 .enable = dwc3_gadget_ep0_enable,
1193 .disable = dwc3_gadget_ep0_disable,
1194 .alloc_request = dwc3_gadget_ep_alloc_request,
1195 .free_request = dwc3_gadget_ep_free_request,
1196 .queue = dwc3_gadget_ep0_queue,
1197 .dequeue = dwc3_gadget_ep_dequeue,
1198 .set_halt = dwc3_gadget_ep_set_halt,
1199 .set_wedge = dwc3_gadget_ep_set_wedge,
1200};
1201
1202static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1203 .enable = dwc3_gadget_ep_enable,
1204 .disable = dwc3_gadget_ep_disable,
1205 .alloc_request = dwc3_gadget_ep_alloc_request,
1206 .free_request = dwc3_gadget_ep_free_request,
1207 .queue = dwc3_gadget_ep_queue,
1208 .dequeue = dwc3_gadget_ep_dequeue,
1209 .set_halt = dwc3_gadget_ep_set_halt,
1210 .set_wedge = dwc3_gadget_ep_set_wedge,
1211};
1212
1213/* -------------------------------------------------------------------------- */
1214
1215static int dwc3_gadget_get_frame(struct usb_gadget *g)
1216{
1217 struct dwc3 *dwc = gadget_to_dwc(g);
1218 u32 reg;
1219
1220 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1221 return DWC3_DSTS_SOFFN(reg);
1222}
1223
1224static int dwc3_gadget_wakeup(struct usb_gadget *g)
1225{
1226 struct dwc3 *dwc = gadget_to_dwc(g);
1227
1228 unsigned long timeout;
1229 unsigned long flags;
1230
1231 u32 reg;
1232
1233 int ret = 0;
1234
1235 u8 link_state;
1236 u8 speed;
1237
1238 spin_lock_irqsave(&dwc->lock, flags);
1239
1240 /*
1241 * According to the Databook Remote wakeup request should
1242 * be issued only when the device is in early suspend state.
1243 *
1244 * We can check that via USB Link State bits in DSTS register.
1245 */
1246 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1247
1248 speed = reg & DWC3_DSTS_CONNECTSPD;
1249 if (speed == DWC3_DSTS_SUPERSPEED) {
1250 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1251 ret = -EINVAL;
1252 goto out;
1253 }
1254
1255 link_state = DWC3_DSTS_USBLNKST(reg);
1256
1257 switch (link_state) {
1258 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1259 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1260 break;
1261 default:
1262 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1263 link_state);
1264 ret = -EINVAL;
1265 goto out;
1266 }
1267
8598bde7
FB
1268 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1269 if (ret < 0) {
1270 dev_err(dwc->dev, "failed to put link in Recovery\n");
1271 goto out;
1272 }
72246da4
FB
1273
1274 /* write zeroes to Link Change Request */
1275 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1276 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1277
1278 /* pool until Link State change to ON */
1279 timeout = jiffies + msecs_to_jiffies(100);
1280
1281 while (!(time_after(jiffies, timeout))) {
1282 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1283
1284 /* in HS, means ON */
1285 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1286 break;
1287 }
1288
1289 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1290 dev_err(dwc->dev, "failed to send remote wakeup\n");
1291 ret = -EINVAL;
1292 }
1293
1294out:
1295 spin_unlock_irqrestore(&dwc->lock, flags);
1296
1297 return ret;
1298}
1299
1300static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1301 int is_selfpowered)
1302{
1303 struct dwc3 *dwc = gadget_to_dwc(g);
1304
1305 dwc->is_selfpowered = !!is_selfpowered;
1306
1307 return 0;
1308}
1309
1310static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1311{
1312 u32 reg;
61d58242 1313 u32 timeout = 500;
72246da4
FB
1314
1315 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1316 if (is_on)
1317 reg |= DWC3_DCTL_RUN_STOP;
1318 else
1319 reg &= ~DWC3_DCTL_RUN_STOP;
1320
1321 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1322
1323 do {
1324 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1325 if (is_on) {
1326 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1327 break;
1328 } else {
1329 if (reg & DWC3_DSTS_DEVCTRLHLT)
1330 break;
1331 }
72246da4
FB
1332 timeout--;
1333 if (!timeout)
1334 break;
61d58242 1335 udelay(1);
72246da4
FB
1336 } while (1);
1337
1338 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1339 dwc->gadget_driver
1340 ? dwc->gadget_driver->function : "no-function",
1341 is_on ? "connect" : "disconnect");
1342}
1343
1344static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1345{
1346 struct dwc3 *dwc = gadget_to_dwc(g);
1347 unsigned long flags;
1348
1349 is_on = !!is_on;
1350
1351 spin_lock_irqsave(&dwc->lock, flags);
1352 dwc3_gadget_run_stop(dwc, is_on);
1353 spin_unlock_irqrestore(&dwc->lock, flags);
1354
1355 return 0;
1356}
1357
1358static int dwc3_gadget_start(struct usb_gadget *g,
1359 struct usb_gadget_driver *driver)
1360{
1361 struct dwc3 *dwc = gadget_to_dwc(g);
1362 struct dwc3_ep *dep;
1363 unsigned long flags;
1364 int ret = 0;
1365 u32 reg;
1366
1367 spin_lock_irqsave(&dwc->lock, flags);
1368
1369 if (dwc->gadget_driver) {
1370 dev_err(dwc->dev, "%s is already bound to %s\n",
1371 dwc->gadget.name,
1372 dwc->gadget_driver->driver.name);
1373 ret = -EBUSY;
1374 goto err0;
1375 }
1376
1377 dwc->gadget_driver = driver;
1378 dwc->gadget.dev.driver = &driver->driver;
1379
72246da4
FB
1380 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1381 reg &= ~(DWC3_DCFG_SPEED_MASK);
6c167fc9 1382 reg |= dwc->maximum_speed;
72246da4
FB
1383 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1384
b23c8439
PZ
1385 dwc->start_config_issued = false;
1386
72246da4
FB
1387 /* Start with SuperSpeed Default */
1388 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1389
1390 dep = dwc->eps[0];
c90bfaec 1391 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
72246da4
FB
1392 if (ret) {
1393 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1394 goto err0;
1395 }
1396
1397 dep = dwc->eps[1];
c90bfaec 1398 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
72246da4
FB
1399 if (ret) {
1400 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1401 goto err1;
1402 }
1403
1404 /* begin to receive SETUP packets */
c7fcdeb2 1405 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1406 dwc3_ep0_out_start(dwc);
1407
1408 spin_unlock_irqrestore(&dwc->lock, flags);
1409
1410 return 0;
1411
1412err1:
1413 __dwc3_gadget_ep_disable(dwc->eps[0]);
1414
1415err0:
1416 spin_unlock_irqrestore(&dwc->lock, flags);
1417
1418 return ret;
1419}
1420
1421static int dwc3_gadget_stop(struct usb_gadget *g,
1422 struct usb_gadget_driver *driver)
1423{
1424 struct dwc3 *dwc = gadget_to_dwc(g);
1425 unsigned long flags;
1426
1427 spin_lock_irqsave(&dwc->lock, flags);
1428
1429 __dwc3_gadget_ep_disable(dwc->eps[0]);
1430 __dwc3_gadget_ep_disable(dwc->eps[1]);
1431
1432 dwc->gadget_driver = NULL;
1433 dwc->gadget.dev.driver = NULL;
1434
1435 spin_unlock_irqrestore(&dwc->lock, flags);
1436
1437 return 0;
1438}
1439static const struct usb_gadget_ops dwc3_gadget_ops = {
1440 .get_frame = dwc3_gadget_get_frame,
1441 .wakeup = dwc3_gadget_wakeup,
1442 .set_selfpowered = dwc3_gadget_set_selfpowered,
1443 .pullup = dwc3_gadget_pullup,
1444 .udc_start = dwc3_gadget_start,
1445 .udc_stop = dwc3_gadget_stop,
1446};
1447
1448/* -------------------------------------------------------------------------- */
1449
1450static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1451{
1452 struct dwc3_ep *dep;
1453 u8 epnum;
1454
1455 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1456
1457 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1458 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1459 if (!dep) {
1460 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1461 epnum);
1462 return -ENOMEM;
1463 }
1464
1465 dep->dwc = dwc;
1466 dep->number = epnum;
1467 dwc->eps[epnum] = dep;
1468
1469 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1470 (epnum & 1) ? "in" : "out");
1471 dep->endpoint.name = dep->name;
1472 dep->direction = (epnum & 1);
1473
1474 if (epnum == 0 || epnum == 1) {
1475 dep->endpoint.maxpacket = 512;
1476 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1477 if (!epnum)
1478 dwc->gadget.ep0 = &dep->endpoint;
1479 } else {
1480 int ret;
1481
1482 dep->endpoint.maxpacket = 1024;
12d36c16 1483 dep->endpoint.max_streams = 15;
72246da4
FB
1484 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1485 list_add_tail(&dep->endpoint.ep_list,
1486 &dwc->gadget.ep_list);
1487
1488 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1489 if (ret)
72246da4 1490 return ret;
72246da4 1491 }
25b8ff68 1492
72246da4
FB
1493 INIT_LIST_HEAD(&dep->request_list);
1494 INIT_LIST_HEAD(&dep->req_queued);
1495 }
1496
1497 return 0;
1498}
1499
1500static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1501{
1502 struct dwc3_ep *dep;
1503 u8 epnum;
1504
1505 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1506 dep = dwc->eps[epnum];
1507 dwc3_free_trb_pool(dep);
1508
1509 if (epnum != 0 && epnum != 1)
1510 list_del(&dep->endpoint.ep_list);
1511
1512 kfree(dep);
1513 }
1514}
1515
1516static void dwc3_gadget_release(struct device *dev)
1517{
1518 dev_dbg(dev, "%s\n", __func__);
1519}
1520
1521/* -------------------------------------------------------------------------- */
1522static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1523 const struct dwc3_event_depevt *event, int status)
1524{
1525 struct dwc3_request *req;
1526 struct dwc3_trb trb;
1527 unsigned int count;
1528 unsigned int s_pkt = 0;
1529
1530 do {
1531 req = next_request(&dep->req_queued);
d39ee7be
SAS
1532 if (!req) {
1533 WARN_ON_ONCE(1);
1534 return 1;
1535 }
72246da4
FB
1536
1537 dwc3_trb_to_nat(req->trb, &trb);
1538
0d2f4758
SAS
1539 if (trb.hwo && status != -ESHUTDOWN)
1540 /*
1541 * We continue despite the error. There is not much we
1542 * can do. If we don't clean in up we loop for ever. If
1543 * we skip the TRB than it gets overwritten reused after
1544 * a while since we use them in a ring buffer. a BUG()
1545 * would help. Lets hope that if this occures, someone
1546 * fixes the root cause instead of looking away :)
1547 */
72246da4
FB
1548 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1549 dep->name, req->trb);
72246da4
FB
1550 count = trb.length;
1551
1552 if (dep->direction) {
1553 if (count) {
1554 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1555 dep->name);
1556 status = -ECONNRESET;
1557 }
1558 } else {
1559 if (count && (event->status & DEPEVT_STATUS_SHORT))
1560 s_pkt = 1;
1561 }
1562
1563 /*
1564 * We assume here we will always receive the entire data block
1565 * which we should receive. Meaning, if we program RX to
1566 * receive 4K but we receive only 2K, we assume that's all we
1567 * should receive and we simply bounce the request back to the
1568 * gadget driver for further processing.
1569 */
1570 req->request.actual += req->request.length - count;
1571 dwc3_gadget_giveback(dep, req, status);
1572 if (s_pkt)
1573 break;
1574 if ((event->status & DEPEVT_STATUS_LST) && trb.lst)
1575 break;
1576 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1577 break;
1578 } while (1);
1579
1580 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1581 return 0;
1582 return 1;
1583}
1584
1585static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1586 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1587 int start_new)
1588{
1589 unsigned status = 0;
1590 int clean_busy;
1591
1592 if (event->status & DEPEVT_STATUS_BUSERR)
1593 status = -ECONNRESET;
1594
1595 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
a1ae9be5 1596 if (clean_busy) {
72246da4 1597 dep->flags &= ~DWC3_EP_BUSY;
a1ae9be5
SAS
1598 dep->res_trans_idx = 0;
1599 }
fae2b904
FB
1600
1601 /*
1602 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1603 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1604 */
1605 if (dwc->revision < DWC3_REVISION_183A) {
1606 u32 reg;
1607 int i;
1608
1609 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1610 struct dwc3_ep *dep = dwc->eps[i];
1611
1612 if (!(dep->flags & DWC3_EP_ENABLED))
1613 continue;
1614
1615 if (!list_empty(&dep->req_queued))
1616 return;
1617 }
1618
1619 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1620 reg |= dwc->u1u2;
1621 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1622
1623 dwc->u1u2 = 0;
1624 }
72246da4
FB
1625}
1626
1627static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1628 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1629{
1630 u32 uf;
1631
1632 if (list_empty(&dep->request_list)) {
1633 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1634 dep->name);
1635 return;
1636 }
1637
1638 if (event->parameters) {
1639 u32 mask;
1640
1641 mask = ~(dep->interval - 1);
1642 uf = event->parameters & mask;
1643 /* 4 micro frames in the future */
1644 uf += dep->interval * 4;
1645 } else {
1646 uf = 0;
1647 }
1648
1649 __dwc3_gadget_kick_transfer(dep, uf, 1);
1650}
1651
1652static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1653 const struct dwc3_event_depevt *event)
1654{
1655 struct dwc3 *dwc = dep->dwc;
1656 struct dwc3_event_depevt mod_ev = *event;
1657
1658 /*
1659 * We were asked to remove one requests. It is possible that this
1660 * request and a few other were started together and have the same
1661 * transfer index. Since we stopped the complete endpoint we don't
1662 * know how many requests were already completed (and not yet)
1663 * reported and how could be done (later). We purge them all until
1664 * the end of the list.
1665 */
1666 mod_ev.status = DEPEVT_STATUS_LST;
1667 dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1668 dep->flags &= ~DWC3_EP_BUSY;
1669 /* pending requets are ignored and are queued on XferNotReady */
72246da4
FB
1670}
1671
1672static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1673 const struct dwc3_event_depevt *event)
1674{
1675 u32 param = event->parameters;
1676 u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1677
1678 switch (cmd_type) {
1679 case DWC3_DEPCMD_ENDTRANSFER:
1680 dwc3_process_ep_cmd_complete(dep, event);
1681 break;
1682 case DWC3_DEPCMD_STARTTRANSFER:
1683 dep->res_trans_idx = param & 0x7f;
1684 break;
1685 default:
1686 printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1687 __func__, cmd_type);
1688 break;
1689 };
1690}
1691
1692static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1693 const struct dwc3_event_depevt *event)
1694{
1695 struct dwc3_ep *dep;
1696 u8 epnum = event->endpoint_number;
1697
1698 dep = dwc->eps[epnum];
1699
1700 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1701 dwc3_ep_event_string(event->endpoint_event));
1702
1703 if (epnum == 0 || epnum == 1) {
1704 dwc3_ep0_interrupt(dwc, event);
1705 return;
1706 }
1707
1708 switch (event->endpoint_event) {
1709 case DWC3_DEPEVT_XFERCOMPLETE:
1710 if (usb_endpoint_xfer_isoc(dep->desc)) {
1711 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1712 dep->name);
1713 return;
1714 }
1715
1716 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1717 break;
1718 case DWC3_DEPEVT_XFERINPROGRESS:
1719 if (!usb_endpoint_xfer_isoc(dep->desc)) {
1720 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1721 dep->name);
1722 return;
1723 }
1724
1725 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1726 break;
1727 case DWC3_DEPEVT_XFERNOTREADY:
1728 if (usb_endpoint_xfer_isoc(dep->desc)) {
1729 dwc3_gadget_start_isoc(dwc, dep, event);
1730 } else {
1731 int ret;
1732
1733 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1734 dep->name, event->status &
1735 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1736 ? "Transfer Active"
1737 : "Transfer Not Active");
1738
1739 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1740 if (!ret || ret == -EBUSY)
1741 return;
1742
1743 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1744 dep->name);
1745 }
1746
879631aa
FB
1747 break;
1748 case DWC3_DEPEVT_STREAMEVT:
1749 if (!usb_endpoint_xfer_bulk(dep->desc)) {
1750 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1751 dep->name);
1752 return;
1753 }
1754
1755 switch (event->status) {
1756 case DEPEVT_STREAMEVT_FOUND:
1757 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1758 event->parameters);
1759
1760 break;
1761 case DEPEVT_STREAMEVT_NOTFOUND:
1762 /* FALLTHROUGH */
1763 default:
1764 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1765 }
72246da4
FB
1766 break;
1767 case DWC3_DEPEVT_RXTXFIFOEVT:
1768 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1769 break;
72246da4
FB
1770 case DWC3_DEPEVT_EPCMDCMPLT:
1771 dwc3_ep_cmd_compl(dep, event);
1772 break;
1773 }
1774}
1775
1776static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1777{
1778 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1779 spin_unlock(&dwc->lock);
1780 dwc->gadget_driver->disconnect(&dwc->gadget);
1781 spin_lock(&dwc->lock);
1782 }
1783}
1784
1785static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1786{
1787 struct dwc3_ep *dep;
1788 struct dwc3_gadget_ep_cmd_params params;
1789 u32 cmd;
1790 int ret;
1791
1792 dep = dwc->eps[epnum];
1793
624407f9 1794 WARN_ON(!dep->res_trans_idx);
72246da4
FB
1795 if (dep->res_trans_idx) {
1796 cmd = DWC3_DEPCMD_ENDTRANSFER;
1797 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1798 cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1799 memset(&params, 0, sizeof(params));
1800 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1801 WARN_ON_ONCE(ret);
a1ae9be5 1802 dep->res_trans_idx = 0;
72246da4
FB
1803 }
1804}
1805
1806static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1807{
1808 u32 epnum;
1809
1810 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1811 struct dwc3_ep *dep;
1812
1813 dep = dwc->eps[epnum];
1814 if (!(dep->flags & DWC3_EP_ENABLED))
1815 continue;
1816
624407f9 1817 dwc3_remove_requests(dwc, dep);
72246da4
FB
1818 }
1819}
1820
1821static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1822{
1823 u32 epnum;
1824
1825 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1826 struct dwc3_ep *dep;
1827 struct dwc3_gadget_ep_cmd_params params;
1828 int ret;
1829
1830 dep = dwc->eps[epnum];
1831
1832 if (!(dep->flags & DWC3_EP_STALL))
1833 continue;
1834
1835 dep->flags &= ~DWC3_EP_STALL;
1836
1837 memset(&params, 0, sizeof(params));
1838 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1839 DWC3_DEPCMD_CLEARSTALL, &params);
1840 WARN_ON_ONCE(ret);
1841 }
1842}
1843
1844static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1845{
1846 dev_vdbg(dwc->dev, "%s\n", __func__);
1847#if 0
1848 XXX
1849 U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1850 enable it before we can disable it.
1851
1852 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1853 reg &= ~DWC3_DCTL_INITU1ENA;
1854 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1855
1856 reg &= ~DWC3_DCTL_INITU2ENA;
1857 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1858#endif
1859
1860 dwc3_stop_active_transfers(dwc);
1861 dwc3_disconnect_gadget(dwc);
b23c8439 1862 dwc->start_config_issued = false;
72246da4
FB
1863
1864 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 1865 dwc->setup_packet_pending = false;
72246da4
FB
1866}
1867
1868static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1869{
1870 u32 reg;
1871
1872 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1873
1874 if (on)
1875 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1876 else
1877 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1878
1879 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1880}
1881
1882static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1883{
1884 u32 reg;
1885
1886 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1887
1888 if (on)
1889 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1890 else
1891 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1892
1893 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1894}
1895
1896static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1897{
1898 u32 reg;
1899
1900 dev_vdbg(dwc->dev, "%s\n", __func__);
1901
df62df56
FB
1902 /*
1903 * WORKAROUND: DWC3 revisions <1.88a have an issue which
1904 * would cause a missing Disconnect Event if there's a
1905 * pending Setup Packet in the FIFO.
1906 *
1907 * There's no suggested workaround on the official Bug
1908 * report, which states that "unless the driver/application
1909 * is doing any special handling of a disconnect event,
1910 * there is no functional issue".
1911 *
1912 * Unfortunately, it turns out that we _do_ some special
1913 * handling of a disconnect event, namely complete all
1914 * pending transfers, notify gadget driver of the
1915 * disconnection, and so on.
1916 *
1917 * Our suggested workaround is to follow the Disconnect
1918 * Event steps here, instead, based on a setup_packet_pending
1919 * flag. Such flag gets set whenever we have a XferNotReady
1920 * event on EP0 and gets cleared on XferComplete for the
1921 * same endpoint.
1922 *
1923 * Refers to:
1924 *
1925 * STAR#9000466709: RTL: Device : Disconnect event not
1926 * generated if setup packet pending in FIFO
1927 */
1928 if (dwc->revision < DWC3_REVISION_188A) {
1929 if (dwc->setup_packet_pending)
1930 dwc3_gadget_disconnect_interrupt(dwc);
1931 }
1932
961906ed
FB
1933 /* after reset -> Default State */
1934 dwc->dev_state = DWC3_DEFAULT_STATE;
1935
72246da4
FB
1936 /* Enable PHYs */
1937 dwc3_gadget_usb2_phy_power(dwc, true);
1938 dwc3_gadget_usb3_phy_power(dwc, true);
1939
1940 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1941 dwc3_disconnect_gadget(dwc);
1942
1943 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1944 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1945 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1946
1947 dwc3_stop_active_transfers(dwc);
1948 dwc3_clear_stall_all_ep(dwc);
b23c8439 1949 dwc->start_config_issued = false;
72246da4
FB
1950
1951 /* Reset device address to zero */
1952 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1953 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1954 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
1955}
1956
1957static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1958{
1959 u32 reg;
1960 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1961
1962 /*
1963 * We change the clock only at SS but I dunno why I would want to do
1964 * this. Maybe it becomes part of the power saving plan.
1965 */
1966
1967 if (speed != DWC3_DSTS_SUPERSPEED)
1968 return;
1969
1970 /*
1971 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1972 * each time on Connect Done.
1973 */
1974 if (!usb30_clock)
1975 return;
1976
1977 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1978 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
1979 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1980}
1981
1982static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
1983{
1984 switch (speed) {
1985 case USB_SPEED_SUPER:
1986 dwc3_gadget_usb2_phy_power(dwc, false);
1987 break;
1988 case USB_SPEED_HIGH:
1989 case USB_SPEED_FULL:
1990 case USB_SPEED_LOW:
1991 dwc3_gadget_usb3_phy_power(dwc, false);
1992 break;
1993 }
1994}
1995
1996static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
1997{
1998 struct dwc3_gadget_ep_cmd_params params;
1999 struct dwc3_ep *dep;
2000 int ret;
2001 u32 reg;
2002 u8 speed;
2003
2004 dev_vdbg(dwc->dev, "%s\n", __func__);
2005
2006 memset(&params, 0x00, sizeof(params));
2007
72246da4
FB
2008 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2009 speed = reg & DWC3_DSTS_CONNECTSPD;
2010 dwc->speed = speed;
2011
2012 dwc3_update_ram_clk_sel(dwc, speed);
2013
2014 switch (speed) {
2015 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2016 /*
2017 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2018 * would cause a missing USB3 Reset event.
2019 *
2020 * In such situations, we should force a USB3 Reset
2021 * event by calling our dwc3_gadget_reset_interrupt()
2022 * routine.
2023 *
2024 * Refers to:
2025 *
2026 * STAR#9000483510: RTL: SS : USB3 reset event may
2027 * not be generated always when the link enters poll
2028 */
2029 if (dwc->revision < DWC3_REVISION_190A)
2030 dwc3_gadget_reset_interrupt(dwc);
2031
72246da4
FB
2032 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2033 dwc->gadget.ep0->maxpacket = 512;
2034 dwc->gadget.speed = USB_SPEED_SUPER;
2035 break;
2036 case DWC3_DCFG_HIGHSPEED:
2037 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2038 dwc->gadget.ep0->maxpacket = 64;
2039 dwc->gadget.speed = USB_SPEED_HIGH;
2040 break;
2041 case DWC3_DCFG_FULLSPEED2:
2042 case DWC3_DCFG_FULLSPEED1:
2043 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2044 dwc->gadget.ep0->maxpacket = 64;
2045 dwc->gadget.speed = USB_SPEED_FULL;
2046 break;
2047 case DWC3_DCFG_LOWSPEED:
2048 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2049 dwc->gadget.ep0->maxpacket = 8;
2050 dwc->gadget.speed = USB_SPEED_LOW;
2051 break;
2052 }
2053
2054 /* Disable unneded PHY */
2055 dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
2056
2057 dep = dwc->eps[0];
c90bfaec 2058 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
72246da4
FB
2059 if (ret) {
2060 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2061 return;
2062 }
2063
2064 dep = dwc->eps[1];
c90bfaec 2065 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
72246da4
FB
2066 if (ret) {
2067 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2068 return;
2069 }
2070
2071 /*
2072 * Configure PHY via GUSB3PIPECTLn if required.
2073 *
2074 * Update GTXFIFOSIZn
2075 *
2076 * In both cases reset values should be sufficient.
2077 */
2078}
2079
2080static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2081{
2082 dev_vdbg(dwc->dev, "%s\n", __func__);
2083
2084 /*
2085 * TODO take core out of low power mode when that's
2086 * implemented.
2087 */
2088
2089 dwc->gadget_driver->resume(&dwc->gadget);
2090}
2091
2092static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2093 unsigned int evtinfo)
2094{
fae2b904
FB
2095 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2096
2097 /*
2098 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2099 * on the link partner, the USB session might do multiple entry/exit
2100 * of low power states before a transfer takes place.
2101 *
2102 * Due to this problem, we might experience lower throughput. The
2103 * suggested workaround is to disable DCTL[12:9] bits if we're
2104 * transitioning from U1/U2 to U0 and enable those bits again
2105 * after a transfer completes and there are no pending transfers
2106 * on any of the enabled endpoints.
2107 *
2108 * This is the first half of that workaround.
2109 *
2110 * Refers to:
2111 *
2112 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2113 * core send LGO_Ux entering U0
2114 */
2115 if (dwc->revision < DWC3_REVISION_183A) {
2116 if (next == DWC3_LINK_STATE_U0) {
2117 u32 u1u2;
2118 u32 reg;
2119
2120 switch (dwc->link_state) {
2121 case DWC3_LINK_STATE_U1:
2122 case DWC3_LINK_STATE_U2:
2123 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2124 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2125 | DWC3_DCTL_ACCEPTU2ENA
2126 | DWC3_DCTL_INITU1ENA
2127 | DWC3_DCTL_ACCEPTU1ENA);
2128
2129 if (!dwc->u1u2)
2130 dwc->u1u2 = reg & u1u2;
2131
2132 reg &= ~u1u2;
2133
2134 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2135 break;
2136 default:
2137 /* do nothing */
2138 break;
2139 }
2140 }
2141 }
2142
2143 dwc->link_state = next;
019ac832
FB
2144
2145 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
72246da4
FB
2146}
2147
2148static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2149 const struct dwc3_event_devt *event)
2150{
2151 switch (event->type) {
2152 case DWC3_DEVICE_EVENT_DISCONNECT:
2153 dwc3_gadget_disconnect_interrupt(dwc);
2154 break;
2155 case DWC3_DEVICE_EVENT_RESET:
2156 dwc3_gadget_reset_interrupt(dwc);
2157 break;
2158 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2159 dwc3_gadget_conndone_interrupt(dwc);
2160 break;
2161 case DWC3_DEVICE_EVENT_WAKEUP:
2162 dwc3_gadget_wakeup_interrupt(dwc);
2163 break;
2164 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2165 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2166 break;
2167 case DWC3_DEVICE_EVENT_EOPF:
2168 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2169 break;
2170 case DWC3_DEVICE_EVENT_SOF:
2171 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2172 break;
2173 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2174 dev_vdbg(dwc->dev, "Erratic Error\n");
2175 break;
2176 case DWC3_DEVICE_EVENT_CMD_CMPL:
2177 dev_vdbg(dwc->dev, "Command Complete\n");
2178 break;
2179 case DWC3_DEVICE_EVENT_OVERFLOW:
2180 dev_vdbg(dwc->dev, "Overflow\n");
2181 break;
2182 default:
2183 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2184 }
2185}
2186
2187static void dwc3_process_event_entry(struct dwc3 *dwc,
2188 const union dwc3_event *event)
2189{
2190 /* Endpoint IRQ, handle it and return early */
2191 if (event->type.is_devspec == 0) {
2192 /* depevt */
2193 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2194 }
2195
2196 switch (event->type.type) {
2197 case DWC3_EVENT_TYPE_DEV:
2198 dwc3_gadget_interrupt(dwc, &event->devt);
2199 break;
2200 /* REVISIT what to do with Carkit and I2C events ? */
2201 default:
2202 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2203 }
2204}
2205
2206static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2207{
2208 struct dwc3_event_buffer *evt;
2209 int left;
2210 u32 count;
2211
2212 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2213 count &= DWC3_GEVNTCOUNT_MASK;
2214 if (!count)
2215 return IRQ_NONE;
2216
2217 evt = dwc->ev_buffs[buf];
2218 left = count;
2219
2220 while (left > 0) {
2221 union dwc3_event event;
2222
2223 memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw));
2224 dwc3_process_event_entry(dwc, &event);
2225 /*
2226 * XXX we wrap around correctly to the next entry as almost all
2227 * entries are 4 bytes in size. There is one entry which has 12
2228 * bytes which is a regular entry followed by 8 bytes data. ATM
2229 * I don't know how things are organized if were get next to the
2230 * a boundary so I worry about that once we try to handle that.
2231 */
2232 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2233 left -= 4;
2234
2235 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2236 }
2237
2238 return IRQ_HANDLED;
2239}
2240
2241static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2242{
2243 struct dwc3 *dwc = _dwc;
2244 int i;
2245 irqreturn_t ret = IRQ_NONE;
2246
2247 spin_lock(&dwc->lock);
2248
9f622b2a 2249 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2250 irqreturn_t status;
2251
2252 status = dwc3_process_event_buf(dwc, i);
2253 if (status == IRQ_HANDLED)
2254 ret = status;
2255 }
2256
2257 spin_unlock(&dwc->lock);
2258
2259 return ret;
2260}
2261
2262/**
2263 * dwc3_gadget_init - Initializes gadget related registers
2264 * @dwc: Pointer to out controller context structure
2265 *
2266 * Returns 0 on success otherwise negative errno.
2267 */
2268int __devinit dwc3_gadget_init(struct dwc3 *dwc)
2269{
2270 u32 reg;
2271 int ret;
2272 int irq;
2273
2274 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2275 &dwc->ctrl_req_addr, GFP_KERNEL);
2276 if (!dwc->ctrl_req) {
2277 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2278 ret = -ENOMEM;
2279 goto err0;
2280 }
2281
2282 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2283 &dwc->ep0_trb_addr, GFP_KERNEL);
2284 if (!dwc->ep0_trb) {
2285 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2286 ret = -ENOMEM;
2287 goto err1;
2288 }
2289
2290 dwc->setup_buf = dma_alloc_coherent(dwc->dev,
2291 sizeof(*dwc->setup_buf) * 2,
2292 &dwc->setup_buf_addr, GFP_KERNEL);
2293 if (!dwc->setup_buf) {
2294 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2295 ret = -ENOMEM;
2296 goto err2;
2297 }
2298
5812b1c2
FB
2299 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2300 512, &dwc->ep0_bounce_addr, GFP_KERNEL);
2301 if (!dwc->ep0_bounce) {
2302 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2303 ret = -ENOMEM;
2304 goto err3;
2305 }
2306
72246da4
FB
2307 dev_set_name(&dwc->gadget.dev, "gadget");
2308
2309 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2310 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4
FB
2311 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2312 dwc->gadget.dev.parent = dwc->dev;
eeb720fb 2313 dwc->gadget.sg_supported = true;
72246da4
FB
2314
2315 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2316
2317 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms;
2318 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask;
2319 dwc->gadget.dev.release = dwc3_gadget_release;
2320 dwc->gadget.name = "dwc3-gadget";
2321
2322 /*
2323 * REVISIT: Here we should clear all pending IRQs to be
2324 * sure we're starting from a well known location.
2325 */
2326
2327 ret = dwc3_gadget_init_endpoints(dwc);
2328 if (ret)
5812b1c2 2329 goto err4;
72246da4
FB
2330
2331 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2332
2333 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2334 "dwc3", dwc);
2335 if (ret) {
2336 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2337 irq, ret);
5812b1c2 2338 goto err5;
72246da4
FB
2339 }
2340
2341 /* Enable all but Start and End of Frame IRQs */
2342 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2343 DWC3_DEVTEN_EVNTOVERFLOWEN |
2344 DWC3_DEVTEN_CMDCMPLTEN |
2345 DWC3_DEVTEN_ERRTICERREN |
2346 DWC3_DEVTEN_WKUPEVTEN |
2347 DWC3_DEVTEN_ULSTCNGEN |
2348 DWC3_DEVTEN_CONNECTDONEEN |
2349 DWC3_DEVTEN_USBRSTEN |
2350 DWC3_DEVTEN_DISCONNEVTEN);
2351 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2352
2353 ret = device_register(&dwc->gadget.dev);
2354 if (ret) {
2355 dev_err(dwc->dev, "failed to register gadget device\n");
2356 put_device(&dwc->gadget.dev);
5812b1c2 2357 goto err6;
72246da4
FB
2358 }
2359
2360 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2361 if (ret) {
2362 dev_err(dwc->dev, "failed to register udc\n");
5812b1c2 2363 goto err7;
72246da4
FB
2364 }
2365
2366 return 0;
2367
5812b1c2 2368err7:
72246da4
FB
2369 device_unregister(&dwc->gadget.dev);
2370
5812b1c2 2371err6:
72246da4
FB
2372 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2373 free_irq(irq, dwc);
2374
5812b1c2 2375err5:
72246da4
FB
2376 dwc3_gadget_free_endpoints(dwc);
2377
5812b1c2
FB
2378err4:
2379 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2380 dwc->ep0_bounce_addr);
2381
72246da4
FB
2382err3:
2383 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2384 dwc->setup_buf, dwc->setup_buf_addr);
2385
2386err2:
2387 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2388 dwc->ep0_trb, dwc->ep0_trb_addr);
2389
2390err1:
2391 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2392 dwc->ctrl_req, dwc->ctrl_req_addr);
2393
2394err0:
2395 return ret;
2396}
2397
2398void dwc3_gadget_exit(struct dwc3 *dwc)
2399{
2400 int irq;
72246da4
FB
2401
2402 usb_del_gadget_udc(&dwc->gadget);
2403 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2404
2405 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2406 free_irq(irq, dwc);
2407
72246da4
FB
2408 dwc3_gadget_free_endpoints(dwc);
2409
5812b1c2
FB
2410 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2411 dwc->ep0_bounce_addr);
2412
72246da4
FB
2413 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2414 dwc->setup_buf, dwc->setup_buf_addr);
2415
2416 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2417 dwc->ep0_trb, dwc->ep0_trb_addr);
2418
2419 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2420 dwc->ctrl_req, dwc->ctrl_req_addr);
2421
2422 device_unregister(&dwc->gadget.dev);
2423}