]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/usb/dwc2/gadget.c
usb: dwc2: gadget: register gadget handle to the phy
[mirror_ubuntu-zesty-kernel.git] / drivers / usb / dwc2 / gadget.c
CommitLineData
8b9bc460 1/**
dfbc6fa3
AT
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5b7d70c6
BD
4 *
5 * Copyright 2008 Openmoko, Inc.
6 * Copyright 2008 Simtec Electronics
7 * Ben Dooks <ben@simtec.co.uk>
8 * http://armlinux.simtec.co.uk/
9 *
10 * S3C USB2.0 High-speed / OtG driver
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
8b9bc460 15 */
5b7d70c6
BD
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/dma-mapping.h>
23#include <linux/debugfs.h>
7ad8096e 24#include <linux/mutex.h>
5b7d70c6
BD
25#include <linux/seq_file.h>
26#include <linux/delay.h>
27#include <linux/io.h>
5a0e3ad6 28#include <linux/slab.h>
e50bf385 29#include <linux/clk.h>
fc9a731e 30#include <linux/regulator/consumer.h>
c50f056c 31#include <linux/of_platform.h>
74084844 32#include <linux/phy/phy.h>
5b7d70c6
BD
33
34#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h>
b2e587db 36#include <linux/usb/phy.h>
126625e1 37#include <linux/platform_data/s3c-hsotg.h>
5b7d70c6 38
f7c0b143 39#include "core.h"
941fcce4 40#include "hw.h"
5b7d70c6
BD
41
42/* conversion functions */
43static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
44{
45 return container_of(req, struct s3c_hsotg_req, req);
46}
47
48static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
49{
50 return container_of(ep, struct s3c_hsotg_ep, ep);
51}
52
941fcce4 53static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
5b7d70c6 54{
941fcce4 55 return container_of(gadget, struct dwc2_hsotg, gadget);
5b7d70c6
BD
56}
57
58static inline void __orr32(void __iomem *ptr, u32 val)
59{
60 writel(readl(ptr) | val, ptr);
61}
62
63static inline void __bic32(void __iomem *ptr, u32 val)
64{
65 writel(readl(ptr) & ~val, ptr);
66}
67
997f4f81 68/* forward declaration of functions */
941fcce4 69static void s3c_hsotg_dump(struct dwc2_hsotg *hsotg);
5b7d70c6
BD
70
71/**
72 * using_dma - return the DMA status of the driver.
73 * @hsotg: The driver state.
74 *
75 * Return true if we're using DMA.
76 *
77 * Currently, we have the DMA support code worked into everywhere
78 * that needs it, but the AMBA DMA implementation in the hardware can
79 * only DMA from 32bit aligned addresses. This means that gadgets such
80 * as the CDC Ethernet cannot work as they often pass packets which are
81 * not 32bit aligned.
82 *
83 * Unfortunately the choice to use DMA or not is global to the controller
84 * and seems to be only settable when the controller is being put through
85 * a core reset. This means we either need to fix the gadgets to take
86 * account of DMA alignment, or add bounce buffers (yuerk).
87 *
88 * Until this issue is sorted out, we always return 'false'.
89 */
941fcce4 90static inline bool using_dma(struct dwc2_hsotg *hsotg)
5b7d70c6
BD
91{
92 return false; /* support is not complete */
93}
94
95/**
96 * s3c_hsotg_en_gsint - enable one or more of the general interrupt
97 * @hsotg: The device state
98 * @ints: A bitmask of the interrupts to enable
99 */
941fcce4 100static void s3c_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
5b7d70c6 101{
94cb8fd6 102 u32 gsintmsk = readl(hsotg->regs + GINTMSK);
5b7d70c6
BD
103 u32 new_gsintmsk;
104
105 new_gsintmsk = gsintmsk | ints;
106
107 if (new_gsintmsk != gsintmsk) {
108 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
94cb8fd6 109 writel(new_gsintmsk, hsotg->regs + GINTMSK);
5b7d70c6
BD
110 }
111}
112
113/**
114 * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
115 * @hsotg: The device state
116 * @ints: A bitmask of the interrupts to enable
117 */
941fcce4 118static void s3c_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
5b7d70c6 119{
94cb8fd6 120 u32 gsintmsk = readl(hsotg->regs + GINTMSK);
5b7d70c6
BD
121 u32 new_gsintmsk;
122
123 new_gsintmsk = gsintmsk & ~ints;
124
125 if (new_gsintmsk != gsintmsk)
94cb8fd6 126 writel(new_gsintmsk, hsotg->regs + GINTMSK);
5b7d70c6
BD
127}
128
129/**
130 * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
131 * @hsotg: The device state
132 * @ep: The endpoint index
133 * @dir_in: True if direction is in.
134 * @en: The enable value, true to enable
135 *
136 * Set or clear the mask for an individual endpoint's interrupt
137 * request.
138 */
941fcce4 139static void s3c_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
140 unsigned int ep, unsigned int dir_in,
141 unsigned int en)
142{
143 unsigned long flags;
144 u32 bit = 1 << ep;
145 u32 daint;
146
147 if (!dir_in)
148 bit <<= 16;
149
150 local_irq_save(flags);
94cb8fd6 151 daint = readl(hsotg->regs + DAINTMSK);
5b7d70c6
BD
152 if (en)
153 daint |= bit;
154 else
155 daint &= ~bit;
94cb8fd6 156 writel(daint, hsotg->regs + DAINTMSK);
5b7d70c6
BD
157 local_irq_restore(flags);
158}
159
160/**
161 * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
162 * @hsotg: The device instance.
163 */
941fcce4 164static void s3c_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
5b7d70c6 165{
0f002d20
BD
166 unsigned int ep;
167 unsigned int addr;
168 unsigned int size;
1703a6d3 169 int timeout;
0f002d20
BD
170 u32 val;
171
6d091ee7 172 /* set FIFO sizes to 2048/1024 */
5b7d70c6 173
94cb8fd6 174 writel(2048, hsotg->regs + GRXFSIZ);
47a1685f
DN
175 writel((2048 << FIFOSIZE_STARTADDR_SHIFT) |
176 (1024 << FIFOSIZE_DEPTH_SHIFT), hsotg->regs + GNPTXFSIZ);
0f002d20 177
8b9bc460
LM
178 /*
179 * arange all the rest of the TX FIFOs, as some versions of this
0f002d20
BD
180 * block have overlapping default addresses. This also ensures
181 * that if the settings have been changed, then they are set to
8b9bc460
LM
182 * known values.
183 */
0f002d20
BD
184
185 /* start at the end of the GNPTXFSIZ, rounded up */
186 addr = 2048 + 1024;
0f002d20 187
8b9bc460 188 /*
b203d0a2
RB
189 * Because we have not enough memory to have each TX FIFO of size at
190 * least 3072 bytes (the maximum single packet size), we create four
191 * FIFOs of lenght 1024, and four of length 3072 bytes, and assing
192 * them to endpoints dynamically according to maxpacket size value of
193 * given endpoint.
8b9bc460 194 */
0f002d20 195
b203d0a2
RB
196 /* 256*4=1024 bytes FIFO length */
197 size = 256;
198 for (ep = 1; ep <= 4; ep++) {
199 val = addr;
200 val |= size << FIFOSIZE_DEPTH_SHIFT;
201 WARN_ONCE(addr + size > hsotg->fifo_mem,
202 "insufficient fifo memory");
203 addr += size;
204
205 writel(val, hsotg->regs + DPTXFSIZN(ep));
206 }
207 /* 768*4=3072 bytes FIFO length */
208 size = 768;
209 for (ep = 5; ep <= 8; ep++) {
0f002d20 210 val = addr;
47a1685f 211 val |= size << FIFOSIZE_DEPTH_SHIFT;
cff9eb75
MS
212 WARN_ONCE(addr + size > hsotg->fifo_mem,
213 "insufficient fifo memory");
0f002d20
BD
214 addr += size;
215
47a1685f 216 writel(val, hsotg->regs + DPTXFSIZN(ep));
0f002d20 217 }
1703a6d3 218
8b9bc460
LM
219 /*
220 * according to p428 of the design guide, we need to ensure that
221 * all fifos are flushed before continuing
222 */
1703a6d3 223
47a1685f
DN
224 writel(GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
225 GRSTCTL_RXFFLSH, hsotg->regs + GRSTCTL);
1703a6d3
BD
226
227 /* wait until the fifos are both flushed */
228 timeout = 100;
229 while (1) {
94cb8fd6 230 val = readl(hsotg->regs + GRSTCTL);
1703a6d3 231
47a1685f 232 if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
1703a6d3
BD
233 break;
234
235 if (--timeout == 0) {
236 dev_err(hsotg->dev,
237 "%s: timeout flushing fifos (GRSTCTL=%08x)\n",
238 __func__, val);
239 }
240
241 udelay(1);
242 }
243
244 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
5b7d70c6
BD
245}
246
247/**
248 * @ep: USB endpoint to allocate request for.
249 * @flags: Allocation flags
250 *
251 * Allocate a new USB request structure appropriate for the specified endpoint
252 */
0978f8c5
MB
253static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
254 gfp_t flags)
5b7d70c6
BD
255{
256 struct s3c_hsotg_req *req;
257
258 req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
259 if (!req)
260 return NULL;
261
262 INIT_LIST_HEAD(&req->queue);
263
5b7d70c6
BD
264 return &req->req;
265}
266
267/**
268 * is_ep_periodic - return true if the endpoint is in periodic mode.
269 * @hs_ep: The endpoint to query.
270 *
271 * Returns true if the endpoint is in periodic mode, meaning it is being
272 * used for an Interrupt or ISO transfer.
273 */
274static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
275{
276 return hs_ep->periodic;
277}
278
279/**
280 * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
281 * @hsotg: The device state.
282 * @hs_ep: The endpoint for the request
283 * @hs_req: The request being processed.
284 *
285 * This is the reverse of s3c_hsotg_map_dma(), called for the completion
286 * of a request to ensure the buffer is ready for access by the caller.
8b9bc460 287 */
941fcce4 288static void s3c_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
289 struct s3c_hsotg_ep *hs_ep,
290 struct s3c_hsotg_req *hs_req)
291{
292 struct usb_request *req = &hs_req->req;
5b7d70c6
BD
293
294 /* ignore this if we're not moving any data */
295 if (hs_req->req.length == 0)
296 return;
297
17d966a3 298 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
5b7d70c6
BD
299}
300
301/**
302 * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
303 * @hsotg: The controller state.
304 * @hs_ep: The endpoint we're going to write for.
305 * @hs_req: The request to write data for.
306 *
307 * This is called when the TxFIFO has some space in it to hold a new
308 * transmission and we have something to give it. The actual setup of
309 * the data size is done elsewhere, so all we have to do is to actually
310 * write the data.
311 *
312 * The return value is zero if there is more space (or nothing was done)
313 * otherwise -ENOSPC is returned if the FIFO space was used up.
314 *
315 * This routine is only needed for PIO
8b9bc460 316 */
941fcce4 317static int s3c_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
318 struct s3c_hsotg_ep *hs_ep,
319 struct s3c_hsotg_req *hs_req)
320{
321 bool periodic = is_ep_periodic(hs_ep);
94cb8fd6 322 u32 gnptxsts = readl(hsotg->regs + GNPTXSTS);
5b7d70c6
BD
323 int buf_pos = hs_req->req.actual;
324 int to_write = hs_ep->size_loaded;
325 void *data;
326 int can_write;
327 int pkt_round;
4fca54aa 328 int max_transfer;
5b7d70c6
BD
329
330 to_write -= (buf_pos - hs_ep->last_load);
331
332 /* if there's nothing to write, get out early */
333 if (to_write == 0)
334 return 0;
335
10aebc77 336 if (periodic && !hsotg->dedicated_fifos) {
94cb8fd6 337 u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
5b7d70c6
BD
338 int size_left;
339 int size_done;
340
8b9bc460
LM
341 /*
342 * work out how much data was loaded so we can calculate
343 * how much data is left in the fifo.
344 */
5b7d70c6 345
47a1685f 346 size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
5b7d70c6 347
8b9bc460
LM
348 /*
349 * if shared fifo, we cannot write anything until the
e7a9ff54
BD
350 * previous data has been completely sent.
351 */
352 if (hs_ep->fifo_load != 0) {
47a1685f 353 s3c_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
e7a9ff54
BD
354 return -ENOSPC;
355 }
356
5b7d70c6
BD
357 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
358 __func__, size_left,
359 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
360
361 /* how much of the data has moved */
362 size_done = hs_ep->size_loaded - size_left;
363
364 /* how much data is left in the fifo */
365 can_write = hs_ep->fifo_load - size_done;
366 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
367 __func__, can_write);
368
369 can_write = hs_ep->fifo_size - can_write;
370 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
371 __func__, can_write);
372
373 if (can_write <= 0) {
47a1685f 374 s3c_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
5b7d70c6
BD
375 return -ENOSPC;
376 }
10aebc77 377 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
94cb8fd6 378 can_write = readl(hsotg->regs + DTXFSTS(hs_ep->index));
10aebc77
BD
379
380 can_write &= 0xffff;
381 can_write *= 4;
5b7d70c6 382 } else {
47a1685f 383 if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
5b7d70c6
BD
384 dev_dbg(hsotg->dev,
385 "%s: no queue slots available (0x%08x)\n",
386 __func__, gnptxsts);
387
47a1685f 388 s3c_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
5b7d70c6
BD
389 return -ENOSPC;
390 }
391
47a1685f 392 can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
679f9b7c 393 can_write *= 4; /* fifo size is in 32bit quantities. */
5b7d70c6
BD
394 }
395
4fca54aa
RB
396 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
397
398 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
399 __func__, gnptxsts, can_write, to_write, max_transfer);
5b7d70c6 400
8b9bc460
LM
401 /*
402 * limit to 512 bytes of data, it seems at least on the non-periodic
5b7d70c6
BD
403 * FIFO, requests of >512 cause the endpoint to get stuck with a
404 * fragment of the end of the transfer in it.
405 */
811f3303 406 if (can_write > 512 && !periodic)
5b7d70c6
BD
407 can_write = 512;
408
8b9bc460
LM
409 /*
410 * limit the write to one max-packet size worth of data, but allow
03e10e5a 411 * the transfer to return that it did not run out of fifo space
8b9bc460
LM
412 * doing it.
413 */
4fca54aa
RB
414 if (to_write > max_transfer) {
415 to_write = max_transfer;
03e10e5a 416
5cb2ff0c
RB
417 /* it's needed only when we do not use dedicated fifos */
418 if (!hsotg->dedicated_fifos)
419 s3c_hsotg_en_gsint(hsotg,
47a1685f
DN
420 periodic ? GINTSTS_PTXFEMP :
421 GINTSTS_NPTXFEMP);
03e10e5a
BD
422 }
423
5b7d70c6
BD
424 /* see if we can write data */
425
426 if (to_write > can_write) {
427 to_write = can_write;
4fca54aa 428 pkt_round = to_write % max_transfer;
5b7d70c6 429
8b9bc460
LM
430 /*
431 * Round the write down to an
5b7d70c6
BD
432 * exact number of packets.
433 *
434 * Note, we do not currently check to see if we can ever
435 * write a full packet or not to the FIFO.
436 */
437
438 if (pkt_round)
439 to_write -= pkt_round;
440
8b9bc460
LM
441 /*
442 * enable correct FIFO interrupt to alert us when there
443 * is more room left.
444 */
5b7d70c6 445
5cb2ff0c
RB
446 /* it's needed only when we do not use dedicated fifos */
447 if (!hsotg->dedicated_fifos)
448 s3c_hsotg_en_gsint(hsotg,
47a1685f
DN
449 periodic ? GINTSTS_PTXFEMP :
450 GINTSTS_NPTXFEMP);
5b7d70c6
BD
451 }
452
453 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
454 to_write, hs_req->req.length, can_write, buf_pos);
455
456 if (to_write <= 0)
457 return -ENOSPC;
458
459 hs_req->req.actual = buf_pos + to_write;
460 hs_ep->total_data += to_write;
461
462 if (periodic)
463 hs_ep->fifo_load += to_write;
464
465 to_write = DIV_ROUND_UP(to_write, 4);
466 data = hs_req->req.buf + buf_pos;
467
1a7ed5be 468 iowrite32_rep(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
5b7d70c6
BD
469
470 return (to_write >= can_write) ? -ENOSPC : 0;
471}
472
473/**
474 * get_ep_limit - get the maximum data legnth for this endpoint
475 * @hs_ep: The endpoint
476 *
477 * Return the maximum data that can be queued in one go on a given endpoint
478 * so that transfers that are too long can be split.
479 */
480static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
481{
482 int index = hs_ep->index;
483 unsigned maxsize;
484 unsigned maxpkt;
485
486 if (index != 0) {
47a1685f
DN
487 maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
488 maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
5b7d70c6 489 } else {
b05ca580 490 maxsize = 64+64;
66e5c643 491 if (hs_ep->dir_in)
47a1685f 492 maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
66e5c643 493 else
5b7d70c6 494 maxpkt = 2;
5b7d70c6
BD
495 }
496
497 /* we made the constant loading easier above by using +1 */
498 maxpkt--;
499 maxsize--;
500
8b9bc460
LM
501 /*
502 * constrain by packet count if maxpkts*pktsize is greater
503 * than the length register size.
504 */
5b7d70c6
BD
505
506 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
507 maxsize = maxpkt * hs_ep->ep.maxpacket;
508
509 return maxsize;
510}
511
512/**
513 * s3c_hsotg_start_req - start a USB request from an endpoint's queue
514 * @hsotg: The controller state.
515 * @hs_ep: The endpoint to process a request for
516 * @hs_req: The request to start.
517 * @continuing: True if we are doing more for the current request.
518 *
519 * Start the given request running by setting the endpoint registers
520 * appropriately, and writing any data to the FIFOs.
521 */
941fcce4 522static void s3c_hsotg_start_req(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
523 struct s3c_hsotg_ep *hs_ep,
524 struct s3c_hsotg_req *hs_req,
525 bool continuing)
526{
527 struct usb_request *ureq = &hs_req->req;
528 int index = hs_ep->index;
529 int dir_in = hs_ep->dir_in;
530 u32 epctrl_reg;
531 u32 epsize_reg;
532 u32 epsize;
533 u32 ctrl;
534 unsigned length;
535 unsigned packets;
536 unsigned maxreq;
537
538 if (index != 0) {
539 if (hs_ep->req && !continuing) {
540 dev_err(hsotg->dev, "%s: active request\n", __func__);
541 WARN_ON(1);
542 return;
543 } else if (hs_ep->req != hs_req && continuing) {
544 dev_err(hsotg->dev,
545 "%s: continue different req\n", __func__);
546 WARN_ON(1);
547 return;
548 }
549 }
550
94cb8fd6
LM
551 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
552 epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
5b7d70c6
BD
553
554 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
555 __func__, readl(hsotg->regs + epctrl_reg), index,
556 hs_ep->dir_in ? "in" : "out");
557
9c39ddc6
AT
558 /* If endpoint is stalled, we will restart request later */
559 ctrl = readl(hsotg->regs + epctrl_reg);
560
47a1685f 561 if (ctrl & DXEPCTL_STALL) {
9c39ddc6
AT
562 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
563 return;
564 }
565
5b7d70c6 566 length = ureq->length - ureq->actual;
71225bee
LM
567 dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
568 ureq->length, ureq->actual);
5b7d70c6
BD
569 if (0)
570 dev_dbg(hsotg->dev,
0cc4cf6f 571 "REQ buf %p len %d dma %pad noi=%d zp=%d snok=%d\n",
8b3bc14f 572 ureq->buf, length, &ureq->dma,
5b7d70c6
BD
573 ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
574
575 maxreq = get_ep_limit(hs_ep);
576 if (length > maxreq) {
577 int round = maxreq % hs_ep->ep.maxpacket;
578
579 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
580 __func__, length, maxreq, round);
581
582 /* round down to multiple of packets */
583 if (round)
584 maxreq -= round;
585
586 length = maxreq;
587 }
588
589 if (length)
590 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
591 else
592 packets = 1; /* send one packet if length is zero. */
593
4fca54aa
RB
594 if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
595 dev_err(hsotg->dev, "req length > maxpacket*mc\n");
596 return;
597 }
598
5b7d70c6 599 if (dir_in && index != 0)
4fca54aa 600 if (hs_ep->isochronous)
47a1685f 601 epsize = DXEPTSIZ_MC(packets);
4fca54aa 602 else
47a1685f 603 epsize = DXEPTSIZ_MC(1);
5b7d70c6
BD
604 else
605 epsize = 0;
606
607 if (index != 0 && ureq->zero) {
8b9bc460
LM
608 /*
609 * test for the packets being exactly right for the
610 * transfer
611 */
5b7d70c6
BD
612
613 if (length == (packets * hs_ep->ep.maxpacket))
614 packets++;
615 }
616
47a1685f
DN
617 epsize |= DXEPTSIZ_PKTCNT(packets);
618 epsize |= DXEPTSIZ_XFERSIZE(length);
5b7d70c6
BD
619
620 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
621 __func__, packets, length, ureq->length, epsize, epsize_reg);
622
623 /* store the request as the current one we're doing */
624 hs_ep->req = hs_req;
625
626 /* write size / packets */
627 writel(epsize, hsotg->regs + epsize_reg);
628
db1d8ba3 629 if (using_dma(hsotg) && !continuing) {
5b7d70c6
BD
630 unsigned int dma_reg;
631
8b9bc460
LM
632 /*
633 * write DMA address to control register, buffer already
634 * synced by s3c_hsotg_ep_queue().
635 */
5b7d70c6 636
94cb8fd6 637 dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
5b7d70c6
BD
638 writel(ureq->dma, hsotg->regs + dma_reg);
639
0cc4cf6f 640 dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
8b3bc14f 641 __func__, &ureq->dma, dma_reg);
5b7d70c6
BD
642 }
643
47a1685f
DN
644 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
645 ctrl |= DXEPCTL_USBACTEP;
71225bee
LM
646
647 dev_dbg(hsotg->dev, "setup req:%d\n", hsotg->setup);
648
649 /* For Setup request do not clear NAK */
650 if (hsotg->setup && index == 0)
651 hsotg->setup = 0;
652 else
47a1685f 653 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
71225bee 654
5b7d70c6
BD
655
656 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
657 writel(ctrl, hsotg->regs + epctrl_reg);
658
8b9bc460
LM
659 /*
660 * set these, it seems that DMA support increments past the end
5b7d70c6 661 * of the packet buffer so we need to calculate the length from
8b9bc460
LM
662 * this information.
663 */
5b7d70c6
BD
664 hs_ep->size_loaded = length;
665 hs_ep->last_load = ureq->actual;
666
667 if (dir_in && !using_dma(hsotg)) {
668 /* set these anyway, we may need them for non-periodic in */
669 hs_ep->fifo_load = 0;
670
671 s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
672 }
673
8b9bc460
LM
674 /*
675 * clear the INTknTXFEmpMsk when we start request, more as a aide
676 * to debugging to see what is going on.
677 */
5b7d70c6 678 if (dir_in)
47a1685f 679 writel(DIEPMSK_INTKNTXFEMPMSK,
94cb8fd6 680 hsotg->regs + DIEPINT(index));
5b7d70c6 681
8b9bc460
LM
682 /*
683 * Note, trying to clear the NAK here causes problems with transmit
684 * on the S3C6400 ending up with the TXFIFO becoming full.
685 */
5b7d70c6
BD
686
687 /* check ep is enabled */
47a1685f 688 if (!(readl(hsotg->regs + epctrl_reg) & DXEPCTL_EPENA))
5b7d70c6 689 dev_warn(hsotg->dev,
47a1685f 690 "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
5b7d70c6
BD
691 index, readl(hsotg->regs + epctrl_reg));
692
47a1685f 693 dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
5b7d70c6 694 __func__, readl(hsotg->regs + epctrl_reg));
afcf4169
RB
695
696 /* enable ep interrupts */
697 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
5b7d70c6
BD
698}
699
700/**
701 * s3c_hsotg_map_dma - map the DMA memory being used for the request
702 * @hsotg: The device state.
703 * @hs_ep: The endpoint the request is on.
704 * @req: The request being processed.
705 *
706 * We've been asked to queue a request, so ensure that the memory buffer
707 * is correctly setup for DMA. If we've been passed an extant DMA address
708 * then ensure the buffer has been synced to memory. If our buffer has no
709 * DMA memory, then we map the memory and mark our request to allow us to
710 * cleanup on completion.
8b9bc460 711 */
941fcce4 712static int s3c_hsotg_map_dma(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
713 struct s3c_hsotg_ep *hs_ep,
714 struct usb_request *req)
715{
5b7d70c6 716 struct s3c_hsotg_req *hs_req = our_req(req);
e58ebcd1 717 int ret;
5b7d70c6
BD
718
719 /* if the length is zero, ignore the DMA data */
720 if (hs_req->req.length == 0)
721 return 0;
722
e58ebcd1
FB
723 ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
724 if (ret)
725 goto dma_error;
5b7d70c6
BD
726
727 return 0;
728
729dma_error:
730 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
731 __func__, req->buf, req->length);
732
733 return -EIO;
734}
735
736static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
737 gfp_t gfp_flags)
738{
739 struct s3c_hsotg_req *hs_req = our_req(req);
740 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
941fcce4 741 struct dwc2_hsotg *hs = hs_ep->parent;
5b7d70c6
BD
742 bool first;
743
744 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
745 ep->name, req, req->length, req->buf, req->no_interrupt,
746 req->zero, req->short_not_ok);
747
748 /* initialise status of the request */
749 INIT_LIST_HEAD(&hs_req->queue);
750 req->actual = 0;
751 req->status = -EINPROGRESS;
752
753 /* if we're using DMA, sync the buffers as necessary */
754 if (using_dma(hs)) {
755 int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
756 if (ret)
757 return ret;
758 }
759
5b7d70c6
BD
760 first = list_empty(&hs_ep->queue);
761 list_add_tail(&hs_req->queue, &hs_ep->queue);
762
763 if (first)
764 s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
765
5b7d70c6
BD
766 return 0;
767}
768
5ad1d316
LM
769static int s3c_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
770 gfp_t gfp_flags)
771{
772 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
941fcce4 773 struct dwc2_hsotg *hs = hs_ep->parent;
5ad1d316
LM
774 unsigned long flags = 0;
775 int ret = 0;
776
777 spin_lock_irqsave(&hs->lock, flags);
778 ret = s3c_hsotg_ep_queue(ep, req, gfp_flags);
779 spin_unlock_irqrestore(&hs->lock, flags);
780
781 return ret;
782}
783
5b7d70c6
BD
784static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
785 struct usb_request *req)
786{
787 struct s3c_hsotg_req *hs_req = our_req(req);
788
789 kfree(hs_req);
790}
791
792/**
793 * s3c_hsotg_complete_oursetup - setup completion callback
794 * @ep: The endpoint the request was on.
795 * @req: The request completed.
796 *
797 * Called on completion of any requests the driver itself
798 * submitted that need cleaning up.
799 */
800static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
801 struct usb_request *req)
802{
803 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
941fcce4 804 struct dwc2_hsotg *hsotg = hs_ep->parent;
5b7d70c6
BD
805
806 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
807
808 s3c_hsotg_ep_free_request(ep, req);
809}
810
811/**
812 * ep_from_windex - convert control wIndex value to endpoint
813 * @hsotg: The driver state.
814 * @windex: The control request wIndex field (in host order).
815 *
816 * Convert the given wIndex into a pointer to an driver endpoint
817 * structure, or return NULL if it is not a valid endpoint.
8b9bc460 818 */
941fcce4 819static struct s3c_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
820 u32 windex)
821{
822 struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
823 int dir = (windex & USB_DIR_IN) ? 1 : 0;
824 int idx = windex & 0x7F;
825
826 if (windex >= 0x100)
827 return NULL;
828
b3f489b2 829 if (idx > hsotg->num_of_eps)
5b7d70c6
BD
830 return NULL;
831
832 if (idx && ep->dir_in != dir)
833 return NULL;
834
835 return ep;
836}
837
838/**
839 * s3c_hsotg_send_reply - send reply to control request
840 * @hsotg: The device state
841 * @ep: Endpoint 0
842 * @buff: Buffer for request
843 * @length: Length of reply.
844 *
845 * Create a request and queue it on the given endpoint. This is useful as
846 * an internal method of sending replies to certain control requests, etc.
847 */
941fcce4 848static int s3c_hsotg_send_reply(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
849 struct s3c_hsotg_ep *ep,
850 void *buff,
851 int length)
852{
853 struct usb_request *req;
854 int ret;
855
856 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
857
858 req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
859 hsotg->ep0_reply = req;
860 if (!req) {
861 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
862 return -ENOMEM;
863 }
864
865 req->buf = hsotg->ep0_buff;
866 req->length = length;
867 req->zero = 1; /* always do zero-length final transfer */
868 req->complete = s3c_hsotg_complete_oursetup;
869
870 if (length)
871 memcpy(req->buf, buff, length);
872 else
873 ep->sent_zlp = 1;
874
875 ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
876 if (ret) {
877 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
878 return ret;
879 }
880
881 return 0;
882}
883
884/**
885 * s3c_hsotg_process_req_status - process request GET_STATUS
886 * @hsotg: The device state
887 * @ctrl: USB control request
888 */
941fcce4 889static int s3c_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
890 struct usb_ctrlrequest *ctrl)
891{
892 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
893 struct s3c_hsotg_ep *ep;
894 __le16 reply;
895 int ret;
896
897 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
898
899 if (!ep0->dir_in) {
900 dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
901 return -EINVAL;
902 }
903
904 switch (ctrl->bRequestType & USB_RECIP_MASK) {
905 case USB_RECIP_DEVICE:
906 reply = cpu_to_le16(0); /* bit 0 => self powered,
907 * bit 1 => remote wakeup */
908 break;
909
910 case USB_RECIP_INTERFACE:
911 /* currently, the data result should be zero */
912 reply = cpu_to_le16(0);
913 break;
914
915 case USB_RECIP_ENDPOINT:
916 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
917 if (!ep)
918 return -ENOENT;
919
920 reply = cpu_to_le16(ep->halted ? 1 : 0);
921 break;
922
923 default:
924 return 0;
925 }
926
927 if (le16_to_cpu(ctrl->wLength) != 2)
928 return -EINVAL;
929
930 ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
931 if (ret) {
932 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
933 return ret;
934 }
935
936 return 1;
937}
938
939static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
940
9c39ddc6
AT
941/**
942 * get_ep_head - return the first request on the endpoint
943 * @hs_ep: The controller endpoint to get
944 *
945 * Get the first request on the endpoint.
946 */
947static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
948{
949 if (list_empty(&hs_ep->queue))
950 return NULL;
951
952 return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
953}
954
5b7d70c6
BD
955/**
956 * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
957 * @hsotg: The device state
958 * @ctrl: USB control request
959 */
941fcce4 960static int s3c_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
961 struct usb_ctrlrequest *ctrl)
962{
26ab3d0c 963 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
9c39ddc6
AT
964 struct s3c_hsotg_req *hs_req;
965 bool restart;
5b7d70c6
BD
966 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
967 struct s3c_hsotg_ep *ep;
26ab3d0c 968 int ret;
bd9ef7bf 969 bool halted;
5b7d70c6
BD
970
971 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
972 __func__, set ? "SET" : "CLEAR");
973
974 if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
975 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
976 if (!ep) {
977 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
978 __func__, le16_to_cpu(ctrl->wIndex));
979 return -ENOENT;
980 }
981
982 switch (le16_to_cpu(ctrl->wValue)) {
983 case USB_ENDPOINT_HALT:
bd9ef7bf
RB
984 halted = ep->halted;
985
5b7d70c6 986 s3c_hsotg_ep_sethalt(&ep->ep, set);
26ab3d0c
AT
987
988 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
989 if (ret) {
990 dev_err(hsotg->dev,
991 "%s: failed to send reply\n", __func__);
992 return ret;
993 }
9c39ddc6 994
bd9ef7bf
RB
995 /*
996 * we have to complete all requests for ep if it was
997 * halted, and the halt was cleared by CLEAR_FEATURE
998 */
999
1000 if (!set && halted) {
9c39ddc6
AT
1001 /*
1002 * If we have request in progress,
1003 * then complete it
1004 */
1005 if (ep->req) {
1006 hs_req = ep->req;
1007 ep->req = NULL;
1008 list_del_init(&hs_req->queue);
304f7e5e
MS
1009 usb_gadget_giveback_request(&ep->ep,
1010 &hs_req->req);
9c39ddc6
AT
1011 }
1012
1013 /* If we have pending request, then start it */
1014 restart = !list_empty(&ep->queue);
1015 if (restart) {
1016 hs_req = get_ep_head(ep);
1017 s3c_hsotg_start_req(hsotg, ep,
1018 hs_req, false);
1019 }
1020 }
1021
5b7d70c6
BD
1022 break;
1023
1024 default:
1025 return -ENOENT;
1026 }
1027 } else
1028 return -ENOENT; /* currently only deal with endpoint */
1029
1030 return 1;
1031}
1032
941fcce4 1033static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
ab93e014 1034
c9f721b2
RB
1035/**
1036 * s3c_hsotg_stall_ep0 - stall ep0
1037 * @hsotg: The device state
1038 *
1039 * Set stall for ep0 as response for setup request.
1040 */
941fcce4 1041static void s3c_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
e9ebe7c3 1042{
c9f721b2
RB
1043 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1044 u32 reg;
1045 u32 ctrl;
1046
1047 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
1048 reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
1049
1050 /*
1051 * DxEPCTL_Stall will be cleared by EP once it has
1052 * taken effect, so no need to clear later.
1053 */
1054
1055 ctrl = readl(hsotg->regs + reg);
47a1685f
DN
1056 ctrl |= DXEPCTL_STALL;
1057 ctrl |= DXEPCTL_CNAK;
c9f721b2
RB
1058 writel(ctrl, hsotg->regs + reg);
1059
1060 dev_dbg(hsotg->dev,
47a1685f 1061 "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
c9f721b2
RB
1062 ctrl, reg, readl(hsotg->regs + reg));
1063
1064 /*
1065 * complete won't be called, so we enqueue
1066 * setup request here
1067 */
1068 s3c_hsotg_enqueue_setup(hsotg);
1069}
1070
5b7d70c6
BD
1071/**
1072 * s3c_hsotg_process_control - process a control request
1073 * @hsotg: The device state
1074 * @ctrl: The control request received
1075 *
1076 * The controller has received the SETUP phase of a control request, and
1077 * needs to work out what to do next (and whether to pass it on to the
1078 * gadget driver).
1079 */
941fcce4 1080static void s3c_hsotg_process_control(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
1081 struct usb_ctrlrequest *ctrl)
1082{
1083 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1084 int ret = 0;
1085 u32 dcfg;
1086
1087 ep0->sent_zlp = 0;
1088
1089 dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
1090 ctrl->bRequest, ctrl->bRequestType,
1091 ctrl->wValue, ctrl->wLength);
1092
8b9bc460
LM
1093 /*
1094 * record the direction of the request, for later use when enquing
1095 * packets onto EP0.
1096 */
5b7d70c6
BD
1097
1098 ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
1099 dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
1100
8b9bc460
LM
1101 /*
1102 * if we've no data with this request, then the last part of the
1103 * transaction is going to implicitly be IN.
1104 */
5b7d70c6
BD
1105 if (ctrl->wLength == 0)
1106 ep0->dir_in = 1;
1107
1108 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1109 switch (ctrl->bRequest) {
1110 case USB_REQ_SET_ADDRESS:
94cb8fd6 1111 dcfg = readl(hsotg->regs + DCFG);
47a1685f 1112 dcfg &= ~DCFG_DEVADDR_MASK;
d5dbd3f7
PZ
1113 dcfg |= (le16_to_cpu(ctrl->wValue) <<
1114 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
94cb8fd6 1115 writel(dcfg, hsotg->regs + DCFG);
5b7d70c6
BD
1116
1117 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1118
1119 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1120 return;
1121
1122 case USB_REQ_GET_STATUS:
1123 ret = s3c_hsotg_process_req_status(hsotg, ctrl);
1124 break;
1125
1126 case USB_REQ_CLEAR_FEATURE:
1127 case USB_REQ_SET_FEATURE:
1128 ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
1129 break;
1130 }
1131 }
1132
1133 /* as a fallback, try delivering it to the driver to deal with */
1134
1135 if (ret == 0 && hsotg->driver) {
93f599f2 1136 spin_unlock(&hsotg->lock);
5b7d70c6 1137 ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
93f599f2 1138 spin_lock(&hsotg->lock);
5b7d70c6
BD
1139 if (ret < 0)
1140 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1141 }
1142
8b9bc460
LM
1143 /*
1144 * the request is either unhandlable, or is not formatted correctly
5b7d70c6
BD
1145 * so respond with a STALL for the status stage to indicate failure.
1146 */
1147
c9f721b2
RB
1148 if (ret < 0)
1149 s3c_hsotg_stall_ep0(hsotg);
5b7d70c6
BD
1150}
1151
5b7d70c6
BD
1152/**
1153 * s3c_hsotg_complete_setup - completion of a setup transfer
1154 * @ep: The endpoint the request was on.
1155 * @req: The request completed.
1156 *
1157 * Called on completion of any requests the driver itself submitted for
1158 * EP0 setup packets
1159 */
1160static void s3c_hsotg_complete_setup(struct usb_ep *ep,
1161 struct usb_request *req)
1162{
1163 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
941fcce4 1164 struct dwc2_hsotg *hsotg = hs_ep->parent;
5b7d70c6
BD
1165
1166 if (req->status < 0) {
1167 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
1168 return;
1169 }
1170
93f599f2 1171 spin_lock(&hsotg->lock);
5b7d70c6
BD
1172 if (req->actual == 0)
1173 s3c_hsotg_enqueue_setup(hsotg);
1174 else
1175 s3c_hsotg_process_control(hsotg, req->buf);
93f599f2 1176 spin_unlock(&hsotg->lock);
5b7d70c6
BD
1177}
1178
1179/**
1180 * s3c_hsotg_enqueue_setup - start a request for EP0 packets
1181 * @hsotg: The device state.
1182 *
1183 * Enqueue a request on EP0 if necessary to received any SETUP packets
1184 * received from the host.
1185 */
941fcce4 1186static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
5b7d70c6
BD
1187{
1188 struct usb_request *req = hsotg->ctrl_req;
1189 struct s3c_hsotg_req *hs_req = our_req(req);
1190 int ret;
1191
1192 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
1193
1194 req->zero = 0;
1195 req->length = 8;
1196 req->buf = hsotg->ctrl_buff;
1197 req->complete = s3c_hsotg_complete_setup;
1198
1199 if (!list_empty(&hs_req->queue)) {
1200 dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
1201 return;
1202 }
1203
1204 hsotg->eps[0].dir_in = 0;
1205
1206 ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
1207 if (ret < 0) {
1208 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
8b9bc460
LM
1209 /*
1210 * Don't think there's much we can do other than watch the
1211 * driver fail.
1212 */
5b7d70c6
BD
1213 }
1214}
1215
5b7d70c6
BD
1216/**
1217 * s3c_hsotg_complete_request - complete a request given to us
1218 * @hsotg: The device state.
1219 * @hs_ep: The endpoint the request was on.
1220 * @hs_req: The request to complete.
1221 * @result: The result code (0 => Ok, otherwise errno)
1222 *
1223 * The given request has finished, so call the necessary completion
1224 * if it has one and then look to see if we can start a new request
1225 * on the endpoint.
1226 *
1227 * Note, expects the ep to already be locked as appropriate.
8b9bc460 1228 */
941fcce4 1229static void s3c_hsotg_complete_request(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
1230 struct s3c_hsotg_ep *hs_ep,
1231 struct s3c_hsotg_req *hs_req,
1232 int result)
1233{
1234 bool restart;
1235
1236 if (!hs_req) {
1237 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
1238 return;
1239 }
1240
1241 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
1242 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
1243
8b9bc460
LM
1244 /*
1245 * only replace the status if we've not already set an error
1246 * from a previous transaction
1247 */
5b7d70c6
BD
1248
1249 if (hs_req->req.status == -EINPROGRESS)
1250 hs_req->req.status = result;
1251
1252 hs_ep->req = NULL;
1253 list_del_init(&hs_req->queue);
1254
1255 if (using_dma(hsotg))
1256 s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
1257
8b9bc460
LM
1258 /*
1259 * call the complete request with the locks off, just in case the
1260 * request tries to queue more work for this endpoint.
1261 */
5b7d70c6
BD
1262
1263 if (hs_req->req.complete) {
22258f49 1264 spin_unlock(&hsotg->lock);
304f7e5e 1265 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
22258f49 1266 spin_lock(&hsotg->lock);
5b7d70c6
BD
1267 }
1268
8b9bc460
LM
1269 /*
1270 * Look to see if there is anything else to do. Note, the completion
5b7d70c6 1271 * of the previous request may have caused a new request to be started
8b9bc460
LM
1272 * so be careful when doing this.
1273 */
5b7d70c6
BD
1274
1275 if (!hs_ep->req && result >= 0) {
1276 restart = !list_empty(&hs_ep->queue);
1277 if (restart) {
1278 hs_req = get_ep_head(hs_ep);
1279 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1280 }
1281 }
1282}
1283
5b7d70c6
BD
1284/**
1285 * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
1286 * @hsotg: The device state.
1287 * @ep_idx: The endpoint index for the data
1288 * @size: The size of data in the fifo, in bytes
1289 *
1290 * The FIFO status shows there is data to read from the FIFO for a given
1291 * endpoint, so sort out whether we need to read the data into a request
1292 * that has been made for that endpoint.
1293 */
941fcce4 1294static void s3c_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
5b7d70c6
BD
1295{
1296 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
1297 struct s3c_hsotg_req *hs_req = hs_ep->req;
94cb8fd6 1298 void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx);
5b7d70c6
BD
1299 int to_read;
1300 int max_req;
1301 int read_ptr;
1302
22258f49 1303
5b7d70c6 1304 if (!hs_req) {
94cb8fd6 1305 u32 epctl = readl(hsotg->regs + DOEPCTL(ep_idx));
5b7d70c6
BD
1306 int ptr;
1307
6b448af4 1308 dev_dbg(hsotg->dev,
47a1685f 1309 "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
5b7d70c6
BD
1310 __func__, size, ep_idx, epctl);
1311
1312 /* dump the data from the FIFO, we've nothing we can do */
1313 for (ptr = 0; ptr < size; ptr += 4)
1314 (void)readl(fifo);
1315
1316 return;
1317 }
1318
5b7d70c6
BD
1319 to_read = size;
1320 read_ptr = hs_req->req.actual;
1321 max_req = hs_req->req.length - read_ptr;
1322
a33e7136
BD
1323 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
1324 __func__, to_read, max_req, read_ptr, hs_req->req.length);
1325
5b7d70c6 1326 if (to_read > max_req) {
8b9bc460
LM
1327 /*
1328 * more data appeared than we where willing
5b7d70c6
BD
1329 * to deal with in this request.
1330 */
1331
1332 /* currently we don't deal this */
1333 WARN_ON_ONCE(1);
1334 }
1335
5b7d70c6
BD
1336 hs_ep->total_data += to_read;
1337 hs_req->req.actual += to_read;
1338 to_read = DIV_ROUND_UP(to_read, 4);
1339
8b9bc460
LM
1340 /*
1341 * note, we might over-write the buffer end by 3 bytes depending on
1342 * alignment of the data.
1343 */
1a7ed5be 1344 ioread32_rep(fifo, hs_req->req.buf + read_ptr, to_read);
5b7d70c6
BD
1345}
1346
1347/**
1348 * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
1349 * @hsotg: The device instance
1350 * @req: The request currently on this endpoint
1351 *
1352 * Generate a zero-length IN packet request for terminating a SETUP
1353 * transaction.
1354 *
1355 * Note, since we don't write any data to the TxFIFO, then it is
25985edc 1356 * currently believed that we do not need to wait for any space in
5b7d70c6
BD
1357 * the TxFIFO.
1358 */
941fcce4 1359static void s3c_hsotg_send_zlp(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
1360 struct s3c_hsotg_req *req)
1361{
1362 u32 ctrl;
1363
1364 if (!req) {
1365 dev_warn(hsotg->dev, "%s: no request?\n", __func__);
1366 return;
1367 }
1368
1369 if (req->req.length == 0) {
1370 hsotg->eps[0].sent_zlp = 1;
1371 s3c_hsotg_enqueue_setup(hsotg);
1372 return;
1373 }
1374
1375 hsotg->eps[0].dir_in = 1;
1376 hsotg->eps[0].sent_zlp = 1;
1377
1378 dev_dbg(hsotg->dev, "sending zero-length packet\n");
1379
1380 /* issue a zero-sized packet to terminate this */
47a1685f
DN
1381 writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
1382 DXEPTSIZ_XFERSIZE(0), hsotg->regs + DIEPTSIZ(0));
5b7d70c6 1383
94cb8fd6 1384 ctrl = readl(hsotg->regs + DIEPCTL0);
47a1685f
DN
1385 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
1386 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
1387 ctrl |= DXEPCTL_USBACTEP;
94cb8fd6 1388 writel(ctrl, hsotg->regs + DIEPCTL0);
5b7d70c6
BD
1389}
1390
1391/**
1392 * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
1393 * @hsotg: The device instance
1394 * @epnum: The endpoint received from
1395 * @was_setup: Set if processing a SetupDone event.
1396 *
1397 * The RXFIFO has delivered an OutDone event, which means that the data
1398 * transfer for an OUT endpoint has been completed, either by a short
1399 * packet or by the finish of a transfer.
8b9bc460 1400 */
941fcce4 1401static void s3c_hsotg_handle_outdone(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
1402 int epnum, bool was_setup)
1403{
94cb8fd6 1404 u32 epsize = readl(hsotg->regs + DOEPTSIZ(epnum));
5b7d70c6
BD
1405 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
1406 struct s3c_hsotg_req *hs_req = hs_ep->req;
1407 struct usb_request *req = &hs_req->req;
47a1685f 1408 unsigned size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
5b7d70c6
BD
1409 int result = 0;
1410
1411 if (!hs_req) {
1412 dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
1413 return;
1414 }
1415
1416 if (using_dma(hsotg)) {
5b7d70c6 1417 unsigned size_done;
5b7d70c6 1418
8b9bc460
LM
1419 /*
1420 * Calculate the size of the transfer by checking how much
5b7d70c6
BD
1421 * is left in the endpoint size register and then working it
1422 * out from the amount we loaded for the transfer.
1423 *
1424 * We need to do this as DMA pointers are always 32bit aligned
1425 * so may overshoot/undershoot the transfer.
1426 */
1427
5b7d70c6
BD
1428 size_done = hs_ep->size_loaded - size_left;
1429 size_done += hs_ep->last_load;
1430
1431 req->actual = size_done;
1432 }
1433
a33e7136
BD
1434 /* if there is more request to do, schedule new transfer */
1435 if (req->actual < req->length && size_left == 0) {
1436 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1437 return;
71225bee
LM
1438 } else if (epnum == 0) {
1439 /*
1440 * After was_setup = 1 =>
1441 * set CNAK for non Setup requests
1442 */
1443 hsotg->setup = was_setup ? 0 : 1;
a33e7136
BD
1444 }
1445
5b7d70c6
BD
1446 if (req->actual < req->length && req->short_not_ok) {
1447 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
1448 __func__, req->actual, req->length);
1449
8b9bc460
LM
1450 /*
1451 * todo - what should we return here? there's no one else
1452 * even bothering to check the status.
1453 */
5b7d70c6
BD
1454 }
1455
1456 if (epnum == 0) {
d3ca0259
LM
1457 /*
1458 * Condition req->complete != s3c_hsotg_complete_setup says:
1459 * send ZLP when we have an asynchronous request from gadget
1460 */
5b7d70c6
BD
1461 if (!was_setup && req->complete != s3c_hsotg_complete_setup)
1462 s3c_hsotg_send_zlp(hsotg, hs_req);
1463 }
1464
5ad1d316 1465 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
5b7d70c6
BD
1466}
1467
1468/**
1469 * s3c_hsotg_read_frameno - read current frame number
1470 * @hsotg: The device instance
1471 *
1472 * Return the current frame number
8b9bc460 1473 */
941fcce4 1474static u32 s3c_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
5b7d70c6
BD
1475{
1476 u32 dsts;
1477
94cb8fd6
LM
1478 dsts = readl(hsotg->regs + DSTS);
1479 dsts &= DSTS_SOFFN_MASK;
1480 dsts >>= DSTS_SOFFN_SHIFT;
5b7d70c6
BD
1481
1482 return dsts;
1483}
1484
1485/**
1486 * s3c_hsotg_handle_rx - RX FIFO has data
1487 * @hsotg: The device instance
1488 *
1489 * The IRQ handler has detected that the RX FIFO has some data in it
1490 * that requires processing, so find out what is in there and do the
1491 * appropriate read.
1492 *
25985edc 1493 * The RXFIFO is a true FIFO, the packets coming out are still in packet
5b7d70c6
BD
1494 * chunks, so if you have x packets received on an endpoint you'll get x
1495 * FIFO events delivered, each with a packet's worth of data in it.
1496 *
1497 * When using DMA, we should not be processing events from the RXFIFO
1498 * as the actual data should be sent to the memory directly and we turn
1499 * on the completion interrupts to get notifications of transfer completion.
1500 */
941fcce4 1501static void s3c_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
5b7d70c6 1502{
94cb8fd6 1503 u32 grxstsr = readl(hsotg->regs + GRXSTSP);
5b7d70c6
BD
1504 u32 epnum, status, size;
1505
1506 WARN_ON(using_dma(hsotg));
1507
47a1685f
DN
1508 epnum = grxstsr & GRXSTS_EPNUM_MASK;
1509 status = grxstsr & GRXSTS_PKTSTS_MASK;
5b7d70c6 1510
47a1685f
DN
1511 size = grxstsr & GRXSTS_BYTECNT_MASK;
1512 size >>= GRXSTS_BYTECNT_SHIFT;
5b7d70c6
BD
1513
1514 if (1)
1515 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
1516 __func__, grxstsr, size, epnum);
1517
47a1685f
DN
1518 switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
1519 case GRXSTS_PKTSTS_GLOBALOUTNAK:
1520 dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
5b7d70c6
BD
1521 break;
1522
47a1685f 1523 case GRXSTS_PKTSTS_OUTDONE:
5b7d70c6
BD
1524 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
1525 s3c_hsotg_read_frameno(hsotg));
1526
1527 if (!using_dma(hsotg))
1528 s3c_hsotg_handle_outdone(hsotg, epnum, false);
1529 break;
1530
47a1685f 1531 case GRXSTS_PKTSTS_SETUPDONE:
5b7d70c6
BD
1532 dev_dbg(hsotg->dev,
1533 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1534 s3c_hsotg_read_frameno(hsotg),
94cb8fd6 1535 readl(hsotg->regs + DOEPCTL(0)));
5b7d70c6
BD
1536
1537 s3c_hsotg_handle_outdone(hsotg, epnum, true);
1538 break;
1539
47a1685f 1540 case GRXSTS_PKTSTS_OUTRX:
5b7d70c6
BD
1541 s3c_hsotg_rx_data(hsotg, epnum, size);
1542 break;
1543
47a1685f 1544 case GRXSTS_PKTSTS_SETUPRX:
5b7d70c6
BD
1545 dev_dbg(hsotg->dev,
1546 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1547 s3c_hsotg_read_frameno(hsotg),
94cb8fd6 1548 readl(hsotg->regs + DOEPCTL(0)));
5b7d70c6
BD
1549
1550 s3c_hsotg_rx_data(hsotg, epnum, size);
1551 break;
1552
1553 default:
1554 dev_warn(hsotg->dev, "%s: unknown status %08x\n",
1555 __func__, grxstsr);
1556
1557 s3c_hsotg_dump(hsotg);
1558 break;
1559 }
1560}
1561
1562/**
1563 * s3c_hsotg_ep0_mps - turn max packet size into register setting
1564 * @mps: The maximum packet size in bytes.
8b9bc460 1565 */
5b7d70c6
BD
1566static u32 s3c_hsotg_ep0_mps(unsigned int mps)
1567{
1568 switch (mps) {
1569 case 64:
94cb8fd6 1570 return D0EPCTL_MPS_64;
5b7d70c6 1571 case 32:
94cb8fd6 1572 return D0EPCTL_MPS_32;
5b7d70c6 1573 case 16:
94cb8fd6 1574 return D0EPCTL_MPS_16;
5b7d70c6 1575 case 8:
94cb8fd6 1576 return D0EPCTL_MPS_8;
5b7d70c6
BD
1577 }
1578
1579 /* bad max packet size, warn and return invalid result */
1580 WARN_ON(1);
1581 return (u32)-1;
1582}
1583
1584/**
1585 * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
1586 * @hsotg: The driver state.
1587 * @ep: The index number of the endpoint
1588 * @mps: The maximum packet size in bytes
1589 *
1590 * Configure the maximum packet size for the given endpoint, updating
1591 * the hardware control registers to reflect this.
1592 */
941fcce4 1593static void s3c_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
1594 unsigned int ep, unsigned int mps)
1595{
1596 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
1597 void __iomem *regs = hsotg->regs;
1598 u32 mpsval;
4fca54aa 1599 u32 mcval;
5b7d70c6
BD
1600 u32 reg;
1601
1602 if (ep == 0) {
1603 /* EP0 is a special case */
1604 mpsval = s3c_hsotg_ep0_mps(mps);
1605 if (mpsval > 3)
1606 goto bad_mps;
e9edd199 1607 hs_ep->ep.maxpacket = mps;
4fca54aa 1608 hs_ep->mc = 1;
5b7d70c6 1609 } else {
47a1685f 1610 mpsval = mps & DXEPCTL_MPS_MASK;
e9edd199 1611 if (mpsval > 1024)
5b7d70c6 1612 goto bad_mps;
4fca54aa
RB
1613 mcval = ((mps >> 11) & 0x3) + 1;
1614 hs_ep->mc = mcval;
1615 if (mcval > 3)
1616 goto bad_mps;
e9edd199 1617 hs_ep->ep.maxpacket = mpsval;
5b7d70c6
BD
1618 }
1619
8b9bc460
LM
1620 /*
1621 * update both the in and out endpoint controldir_ registers, even
1622 * if one of the directions may not be in use.
1623 */
5b7d70c6 1624
94cb8fd6 1625 reg = readl(regs + DIEPCTL(ep));
47a1685f 1626 reg &= ~DXEPCTL_MPS_MASK;
5b7d70c6 1627 reg |= mpsval;
94cb8fd6 1628 writel(reg, regs + DIEPCTL(ep));
5b7d70c6 1629
659ad60c 1630 if (ep) {
94cb8fd6 1631 reg = readl(regs + DOEPCTL(ep));
47a1685f 1632 reg &= ~DXEPCTL_MPS_MASK;
659ad60c 1633 reg |= mpsval;
94cb8fd6 1634 writel(reg, regs + DOEPCTL(ep));
659ad60c 1635 }
5b7d70c6
BD
1636
1637 return;
1638
1639bad_mps:
1640 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
1641}
1642
9c39ddc6
AT
1643/**
1644 * s3c_hsotg_txfifo_flush - flush Tx FIFO
1645 * @hsotg: The driver state
1646 * @idx: The index for the endpoint (0..15)
1647 */
941fcce4 1648static void s3c_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
9c39ddc6
AT
1649{
1650 int timeout;
1651 int val;
1652
47a1685f 1653 writel(GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
94cb8fd6 1654 hsotg->regs + GRSTCTL);
9c39ddc6
AT
1655
1656 /* wait until the fifo is flushed */
1657 timeout = 100;
1658
1659 while (1) {
94cb8fd6 1660 val = readl(hsotg->regs + GRSTCTL);
9c39ddc6 1661
47a1685f 1662 if ((val & (GRSTCTL_TXFFLSH)) == 0)
9c39ddc6
AT
1663 break;
1664
1665 if (--timeout == 0) {
1666 dev_err(hsotg->dev,
1667 "%s: timeout flushing fifo (GRSTCTL=%08x)\n",
1668 __func__, val);
e0cbe595 1669 break;
9c39ddc6
AT
1670 }
1671
1672 udelay(1);
1673 }
1674}
5b7d70c6
BD
1675
1676/**
1677 * s3c_hsotg_trytx - check to see if anything needs transmitting
1678 * @hsotg: The driver state
1679 * @hs_ep: The driver endpoint to check.
1680 *
1681 * Check to see if there is a request that has data to send, and if so
1682 * make an attempt to write data into the FIFO.
1683 */
941fcce4 1684static int s3c_hsotg_trytx(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
1685 struct s3c_hsotg_ep *hs_ep)
1686{
1687 struct s3c_hsotg_req *hs_req = hs_ep->req;
1688
afcf4169
RB
1689 if (!hs_ep->dir_in || !hs_req) {
1690 /**
1691 * if request is not enqueued, we disable interrupts
1692 * for endpoints, excepting ep0
1693 */
1694 if (hs_ep->index != 0)
1695 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index,
1696 hs_ep->dir_in, 0);
5b7d70c6 1697 return 0;
afcf4169 1698 }
5b7d70c6
BD
1699
1700 if (hs_req->req.actual < hs_req->req.length) {
1701 dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
1702 hs_ep->index);
1703 return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1704 }
1705
1706 return 0;
1707}
1708
1709/**
1710 * s3c_hsotg_complete_in - complete IN transfer
1711 * @hsotg: The device state.
1712 * @hs_ep: The endpoint that has just completed.
1713 *
1714 * An IN transfer has been completed, update the transfer's state and then
1715 * call the relevant completion routines.
1716 */
941fcce4 1717static void s3c_hsotg_complete_in(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
1718 struct s3c_hsotg_ep *hs_ep)
1719{
1720 struct s3c_hsotg_req *hs_req = hs_ep->req;
94cb8fd6 1721 u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
5b7d70c6
BD
1722 int size_left, size_done;
1723
1724 if (!hs_req) {
1725 dev_dbg(hsotg->dev, "XferCompl but no req\n");
1726 return;
1727 }
1728
d3ca0259
LM
1729 /* Finish ZLP handling for IN EP0 transactions */
1730 if (hsotg->eps[0].sent_zlp) {
1731 dev_dbg(hsotg->dev, "zlp packet received\n");
5ad1d316 1732 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
d3ca0259
LM
1733 return;
1734 }
1735
8b9bc460
LM
1736 /*
1737 * Calculate the size of the transfer by checking how much is left
5b7d70c6
BD
1738 * in the endpoint size register and then working it out from
1739 * the amount we loaded for the transfer.
1740 *
1741 * We do this even for DMA, as the transfer may have incremented
1742 * past the end of the buffer (DMA transfers are always 32bit
1743 * aligned).
1744 */
1745
47a1685f 1746 size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
5b7d70c6
BD
1747
1748 size_done = hs_ep->size_loaded - size_left;
1749 size_done += hs_ep->last_load;
1750
1751 if (hs_req->req.actual != size_done)
1752 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
1753 __func__, hs_req->req.actual, size_done);
1754
1755 hs_req->req.actual = size_done;
d3ca0259
LM
1756 dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
1757 hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
1758
1759 /*
1760 * Check if dealing with Maximum Packet Size(MPS) IN transfer at EP0
1761 * When sent data is a multiple MPS size (e.g. 64B ,128B ,192B
1762 * ,256B ... ), after last MPS sized packet send IN ZLP packet to
1763 * inform the host that no more data is available.
1764 * The state of req.zero member is checked to be sure that the value to
1765 * send is smaller than wValue expected from host.
1766 * Check req.length to NOT send another ZLP when the current one is
1767 * under completion (the one for which this completion has been called).
1768 */
1769 if (hs_req->req.length && hs_ep->index == 0 && hs_req->req.zero &&
1770 hs_req->req.length == hs_req->req.actual &&
1771 !(hs_req->req.length % hs_ep->ep.maxpacket)) {
1772
1773 dev_dbg(hsotg->dev, "ep0 zlp IN packet sent\n");
1774 s3c_hsotg_send_zlp(hsotg, hs_req);
5b7d70c6 1775
d3ca0259
LM
1776 return;
1777 }
5b7d70c6
BD
1778
1779 if (!size_left && hs_req->req.actual < hs_req->req.length) {
1780 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
1781 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1782 } else
5ad1d316 1783 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
5b7d70c6
BD
1784}
1785
1786/**
1787 * s3c_hsotg_epint - handle an in/out endpoint interrupt
1788 * @hsotg: The driver state
1789 * @idx: The index for the endpoint (0..15)
1790 * @dir_in: Set if this is an IN endpoint
1791 *
1792 * Process and clear any interrupt pending for an individual endpoint
8b9bc460 1793 */
941fcce4 1794static void s3c_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
5b7d70c6
BD
1795 int dir_in)
1796{
1797 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
94cb8fd6
LM
1798 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
1799 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
1800 u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
5b7d70c6 1801 u32 ints;
1479e841 1802 u32 ctrl;
5b7d70c6
BD
1803
1804 ints = readl(hsotg->regs + epint_reg);
1479e841 1805 ctrl = readl(hsotg->regs + epctl_reg);
5b7d70c6 1806
a3395f0d
AT
1807 /* Clear endpoint interrupts */
1808 writel(ints, hsotg->regs + epint_reg);
1809
5b7d70c6
BD
1810 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
1811 __func__, idx, dir_in ? "in" : "out", ints);
1812
47a1685f 1813 if (ints & DXEPINT_XFERCOMPL) {
1479e841 1814 if (hs_ep->isochronous && hs_ep->interval == 1) {
47a1685f
DN
1815 if (ctrl & DXEPCTL_EOFRNUM)
1816 ctrl |= DXEPCTL_SETEVENFR;
1479e841 1817 else
47a1685f 1818 ctrl |= DXEPCTL_SETODDFR;
1479e841
RB
1819 writel(ctrl, hsotg->regs + epctl_reg);
1820 }
1821
5b7d70c6 1822 dev_dbg(hsotg->dev,
47a1685f 1823 "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
5b7d70c6
BD
1824 __func__, readl(hsotg->regs + epctl_reg),
1825 readl(hsotg->regs + epsiz_reg));
1826
8b9bc460
LM
1827 /*
1828 * we get OutDone from the FIFO, so we only need to look
1829 * at completing IN requests here
1830 */
5b7d70c6
BD
1831 if (dir_in) {
1832 s3c_hsotg_complete_in(hsotg, hs_ep);
1833
c9a64ea8 1834 if (idx == 0 && !hs_ep->req)
5b7d70c6
BD
1835 s3c_hsotg_enqueue_setup(hsotg);
1836 } else if (using_dma(hsotg)) {
8b9bc460
LM
1837 /*
1838 * We're using DMA, we need to fire an OutDone here
1839 * as we ignore the RXFIFO.
1840 */
5b7d70c6
BD
1841
1842 s3c_hsotg_handle_outdone(hsotg, idx, false);
1843 }
5b7d70c6
BD
1844 }
1845
47a1685f 1846 if (ints & DXEPINT_EPDISBLD) {
5b7d70c6 1847 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
5b7d70c6 1848
9c39ddc6
AT
1849 if (dir_in) {
1850 int epctl = readl(hsotg->regs + epctl_reg);
1851
b203d0a2 1852 s3c_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
9c39ddc6 1853
47a1685f
DN
1854 if ((epctl & DXEPCTL_STALL) &&
1855 (epctl & DXEPCTL_EPTYPE_BULK)) {
94cb8fd6 1856 int dctl = readl(hsotg->regs + DCTL);
9c39ddc6 1857
47a1685f 1858 dctl |= DCTL_CGNPINNAK;
94cb8fd6 1859 writel(dctl, hsotg->regs + DCTL);
9c39ddc6
AT
1860 }
1861 }
1862 }
1863
47a1685f 1864 if (ints & DXEPINT_AHBERR)
5b7d70c6 1865 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
5b7d70c6 1866
47a1685f 1867 if (ints & DXEPINT_SETUP) { /* Setup or Timeout */
5b7d70c6
BD
1868 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
1869
1870 if (using_dma(hsotg) && idx == 0) {
8b9bc460
LM
1871 /*
1872 * this is the notification we've received a
5b7d70c6
BD
1873 * setup packet. In non-DMA mode we'd get this
1874 * from the RXFIFO, instead we need to process
8b9bc460
LM
1875 * the setup here.
1876 */
5b7d70c6
BD
1877
1878 if (dir_in)
1879 WARN_ON_ONCE(1);
1880 else
1881 s3c_hsotg_handle_outdone(hsotg, 0, true);
1882 }
5b7d70c6
BD
1883 }
1884
47a1685f 1885 if (ints & DXEPINT_BACK2BACKSETUP)
5b7d70c6 1886 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
5b7d70c6 1887
1479e841 1888 if (dir_in && !hs_ep->isochronous) {
8b9bc460 1889 /* not sure if this is important, but we'll clear it anyway */
47a1685f 1890 if (ints & DIEPMSK_INTKNTXFEMPMSK) {
5b7d70c6
BD
1891 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
1892 __func__, idx);
5b7d70c6
BD
1893 }
1894
1895 /* this probably means something bad is happening */
47a1685f 1896 if (ints & DIEPMSK_INTKNEPMISMSK) {
5b7d70c6
BD
1897 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
1898 __func__, idx);
5b7d70c6 1899 }
10aebc77
BD
1900
1901 /* FIFO has space or is empty (see GAHBCFG) */
1902 if (hsotg->dedicated_fifos &&
47a1685f 1903 ints & DIEPMSK_TXFIFOEMPTY) {
10aebc77
BD
1904 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
1905 __func__, idx);
70fa030f
AT
1906 if (!using_dma(hsotg))
1907 s3c_hsotg_trytx(hsotg, hs_ep);
10aebc77 1908 }
5b7d70c6 1909 }
5b7d70c6
BD
1910}
1911
1912/**
1913 * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
1914 * @hsotg: The device state.
1915 *
1916 * Handle updating the device settings after the enumeration phase has
1917 * been completed.
8b9bc460 1918 */
941fcce4 1919static void s3c_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
5b7d70c6 1920{
94cb8fd6 1921 u32 dsts = readl(hsotg->regs + DSTS);
9b2667f1 1922 int ep0_mps = 0, ep_mps = 8;
5b7d70c6 1923
8b9bc460
LM
1924 /*
1925 * This should signal the finish of the enumeration phase
5b7d70c6 1926 * of the USB handshaking, so we should now know what rate
8b9bc460
LM
1927 * we connected at.
1928 */
5b7d70c6
BD
1929
1930 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
1931
8b9bc460
LM
1932 /*
1933 * note, since we're limited by the size of transfer on EP0, and
5b7d70c6 1934 * it seems IN transfers must be a even number of packets we do
8b9bc460
LM
1935 * not advertise a 64byte MPS on EP0.
1936 */
5b7d70c6
BD
1937
1938 /* catch both EnumSpd_FS and EnumSpd_FS48 */
47a1685f
DN
1939 switch (dsts & DSTS_ENUMSPD_MASK) {
1940 case DSTS_ENUMSPD_FS:
1941 case DSTS_ENUMSPD_FS48:
5b7d70c6 1942 hsotg->gadget.speed = USB_SPEED_FULL;
5b7d70c6 1943 ep0_mps = EP0_MPS_LIMIT;
295538ff 1944 ep_mps = 1023;
5b7d70c6
BD
1945 break;
1946
47a1685f 1947 case DSTS_ENUMSPD_HS:
5b7d70c6 1948 hsotg->gadget.speed = USB_SPEED_HIGH;
5b7d70c6 1949 ep0_mps = EP0_MPS_LIMIT;
295538ff 1950 ep_mps = 1024;
5b7d70c6
BD
1951 break;
1952
47a1685f 1953 case DSTS_ENUMSPD_LS:
5b7d70c6 1954 hsotg->gadget.speed = USB_SPEED_LOW;
8b9bc460
LM
1955 /*
1956 * note, we don't actually support LS in this driver at the
5b7d70c6
BD
1957 * moment, and the documentation seems to imply that it isn't
1958 * supported by the PHYs on some of the devices.
1959 */
1960 break;
1961 }
e538dfda
MN
1962 dev_info(hsotg->dev, "new device is %s\n",
1963 usb_speed_string(hsotg->gadget.speed));
5b7d70c6 1964
8b9bc460
LM
1965 /*
1966 * we should now know the maximum packet size for an
1967 * endpoint, so set the endpoints to a default value.
1968 */
5b7d70c6
BD
1969
1970 if (ep0_mps) {
1971 int i;
1972 s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
b3f489b2 1973 for (i = 1; i < hsotg->num_of_eps; i++)
5b7d70c6
BD
1974 s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
1975 }
1976
1977 /* ensure after enumeration our EP0 is active */
1978
1979 s3c_hsotg_enqueue_setup(hsotg);
1980
1981 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
94cb8fd6
LM
1982 readl(hsotg->regs + DIEPCTL0),
1983 readl(hsotg->regs + DOEPCTL0));
5b7d70c6
BD
1984}
1985
1986/**
1987 * kill_all_requests - remove all requests from the endpoint's queue
1988 * @hsotg: The device state.
1989 * @ep: The endpoint the requests may be on.
1990 * @result: The result code to use.
5b7d70c6
BD
1991 *
1992 * Go through the requests on the given endpoint and mark them
1993 * completed with the given result code.
1994 */
941fcce4 1995static void kill_all_requests(struct dwc2_hsotg *hsotg,
5b7d70c6 1996 struct s3c_hsotg_ep *ep,
6b448af4 1997 int result)
5b7d70c6
BD
1998{
1999 struct s3c_hsotg_req *req, *treq;
b203d0a2 2000 unsigned size;
5b7d70c6 2001
6b448af4 2002 ep->req = NULL;
5b7d70c6 2003
6b448af4 2004 list_for_each_entry_safe(req, treq, &ep->queue, queue)
5b7d70c6
BD
2005 s3c_hsotg_complete_request(hsotg, ep, req,
2006 result);
6b448af4 2007
b203d0a2
RB
2008 if (!hsotg->dedicated_fifos)
2009 return;
2010 size = (readl(hsotg->regs + DTXFSTS(ep->index)) & 0xffff) * 4;
2011 if (size < ep->fifo_size)
2012 s3c_hsotg_txfifo_flush(hsotg, ep->fifo_index);
5b7d70c6
BD
2013}
2014
5b7d70c6 2015/**
5e891342 2016 * s3c_hsotg_disconnect - disconnect service
5b7d70c6
BD
2017 * @hsotg: The device state.
2018 *
5e891342
LM
2019 * The device has been disconnected. Remove all current
2020 * transactions and signal the gadget driver that this
2021 * has happened.
8b9bc460 2022 */
4ace06e8 2023void s3c_hsotg_disconnect(struct dwc2_hsotg *hsotg)
5b7d70c6
BD
2024{
2025 unsigned ep;
2026
4ace06e8
MS
2027 if (!hsotg->connected)
2028 return;
2029
2030 hsotg->connected = 0;
b3f489b2 2031 for (ep = 0; ep < hsotg->num_of_eps; ep++)
6b448af4 2032 kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN);
5b7d70c6
BD
2033
2034 call_gadget(hsotg, disconnect);
2035}
4ace06e8 2036EXPORT_SYMBOL_GPL(s3c_hsotg_disconnect);
5b7d70c6
BD
2037
2038/**
2039 * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
2040 * @hsotg: The device state:
2041 * @periodic: True if this is a periodic FIFO interrupt
2042 */
941fcce4 2043static void s3c_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
5b7d70c6
BD
2044{
2045 struct s3c_hsotg_ep *ep;
2046 int epno, ret;
2047
2048 /* look through for any more data to transmit */
2049
b3f489b2 2050 for (epno = 0; epno < hsotg->num_of_eps; epno++) {
5b7d70c6
BD
2051 ep = &hsotg->eps[epno];
2052
2053 if (!ep->dir_in)
2054 continue;
2055
2056 if ((periodic && !ep->periodic) ||
2057 (!periodic && ep->periodic))
2058 continue;
2059
2060 ret = s3c_hsotg_trytx(hsotg, ep);
2061 if (ret < 0)
2062 break;
2063 }
2064}
2065
5b7d70c6 2066/* IRQ flags which will trigger a retry around the IRQ loop */
47a1685f
DN
2067#define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
2068 GINTSTS_PTXFEMP | \
2069 GINTSTS_RXFLVL)
5b7d70c6 2070
308d734e
LM
2071/**
2072 * s3c_hsotg_corereset - issue softreset to the core
2073 * @hsotg: The device state
2074 *
2075 * Issue a soft reset to the core, and await the core finishing it.
8b9bc460 2076 */
941fcce4 2077static int s3c_hsotg_corereset(struct dwc2_hsotg *hsotg)
308d734e
LM
2078{
2079 int timeout;
2080 u32 grstctl;
2081
2082 dev_dbg(hsotg->dev, "resetting core\n");
2083
2084 /* issue soft reset */
47a1685f 2085 writel(GRSTCTL_CSFTRST, hsotg->regs + GRSTCTL);
308d734e 2086
2868fea2 2087 timeout = 10000;
308d734e 2088 do {
94cb8fd6 2089 grstctl = readl(hsotg->regs + GRSTCTL);
47a1685f 2090 } while ((grstctl & GRSTCTL_CSFTRST) && timeout-- > 0);
308d734e 2091
47a1685f 2092 if (grstctl & GRSTCTL_CSFTRST) {
308d734e
LM
2093 dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
2094 return -EINVAL;
2095 }
2096
2868fea2 2097 timeout = 10000;
308d734e
LM
2098
2099 while (1) {
94cb8fd6 2100 u32 grstctl = readl(hsotg->regs + GRSTCTL);
308d734e
LM
2101
2102 if (timeout-- < 0) {
2103 dev_info(hsotg->dev,
2104 "%s: reset failed, GRSTCTL=%08x\n",
2105 __func__, grstctl);
2106 return -ETIMEDOUT;
2107 }
2108
47a1685f 2109 if (!(grstctl & GRSTCTL_AHBIDLE))
308d734e
LM
2110 continue;
2111
2112 break; /* reset done */
2113 }
2114
2115 dev_dbg(hsotg->dev, "reset successful\n");
2116 return 0;
2117}
2118
8b9bc460
LM
2119/**
2120 * s3c_hsotg_core_init - issue softreset to the core
2121 * @hsotg: The device state
2122 *
2123 * Issue a soft reset to the core, and await the core finishing it.
2124 */
510ffaa4 2125void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg)
308d734e
LM
2126{
2127 s3c_hsotg_corereset(hsotg);
2128
2129 /*
2130 * we must now enable ep0 ready for host detection and then
2131 * set configuration.
2132 */
2133
2134 /* set the PLL on, remove the HNP/SRP and set the PHY */
47a1685f 2135 writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
94cb8fd6 2136 (0x5 << 10), hsotg->regs + GUSBCFG);
308d734e
LM
2137
2138 s3c_hsotg_init_fifo(hsotg);
2139
47a1685f 2140 __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
308d734e 2141
47a1685f 2142 writel(1 << 18 | DCFG_DEVSPD_HS, hsotg->regs + DCFG);
308d734e
LM
2143
2144 /* Clear any pending OTG interrupts */
94cb8fd6 2145 writel(0xffffffff, hsotg->regs + GOTGINT);
308d734e
LM
2146
2147 /* Clear any pending interrupts */
94cb8fd6 2148 writel(0xffffffff, hsotg->regs + GINTSTS);
308d734e 2149
47a1685f
DN
2150 writel(GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
2151 GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
2152 GINTSTS_CONIDSTSCHNG | GINTSTS_USBRST |
2153 GINTSTS_ENUMDONE | GINTSTS_OTGINT |
2154 GINTSTS_USBSUSP | GINTSTS_WKUPINT,
2155 hsotg->regs + GINTMSK);
308d734e
LM
2156
2157 if (using_dma(hsotg))
47a1685f
DN
2158 writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
2159 GAHBCFG_HBSTLEN_INCR4,
94cb8fd6 2160 hsotg->regs + GAHBCFG);
308d734e 2161 else
47a1685f
DN
2162 writel(((hsotg->dedicated_fifos) ? (GAHBCFG_NP_TXF_EMP_LVL |
2163 GAHBCFG_P_TXF_EMP_LVL) : 0) |
2164 GAHBCFG_GLBL_INTR_EN,
8acc8296 2165 hsotg->regs + GAHBCFG);
308d734e
LM
2166
2167 /*
8acc8296
RB
2168 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
2169 * when we have no data to transfer. Otherwise we get being flooded by
2170 * interrupts.
308d734e
LM
2171 */
2172
47a1685f
DN
2173 writel(((hsotg->dedicated_fifos) ? DIEPMSK_TXFIFOEMPTY |
2174 DIEPMSK_INTKNTXFEMPMSK : 0) |
2175 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
2176 DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
2177 DIEPMSK_INTKNEPMISMSK,
2178 hsotg->regs + DIEPMSK);
308d734e
LM
2179
2180 /*
2181 * don't need XferCompl, we get that from RXFIFO in slave mode. In
2182 * DMA mode we may need this.
2183 */
47a1685f
DN
2184 writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
2185 DIEPMSK_TIMEOUTMSK) : 0) |
2186 DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
2187 DOEPMSK_SETUPMSK,
2188 hsotg->regs + DOEPMSK);
308d734e 2189
94cb8fd6 2190 writel(0, hsotg->regs + DAINTMSK);
308d734e
LM
2191
2192 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
94cb8fd6
LM
2193 readl(hsotg->regs + DIEPCTL0),
2194 readl(hsotg->regs + DOEPCTL0));
308d734e
LM
2195
2196 /* enable in and out endpoint interrupts */
47a1685f 2197 s3c_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
308d734e
LM
2198
2199 /*
2200 * Enable the RXFIFO when in slave mode, as this is how we collect
2201 * the data. In DMA mode, we get events from the FIFO but also
2202 * things we cannot process, so do not use it.
2203 */
2204 if (!using_dma(hsotg))
47a1685f 2205 s3c_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
308d734e
LM
2206
2207 /* Enable interrupts for EP0 in and out */
2208 s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
2209 s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
2210
47a1685f 2211 __orr32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
308d734e 2212 udelay(10); /* see openiboot */
47a1685f 2213 __bic32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
308d734e 2214
94cb8fd6 2215 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + DCTL));
308d734e
LM
2216
2217 /*
94cb8fd6 2218 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
308d734e
LM
2219 * writing to the EPCTL register..
2220 */
2221
2222 /* set to read 1 8byte packet */
47a1685f
DN
2223 writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
2224 DXEPTSIZ_XFERSIZE(8), hsotg->regs + DOEPTSIZ0);
308d734e
LM
2225
2226 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
47a1685f
DN
2227 DXEPCTL_CNAK | DXEPCTL_EPENA |
2228 DXEPCTL_USBACTEP,
94cb8fd6 2229 hsotg->regs + DOEPCTL0);
308d734e
LM
2230
2231 /* enable, but don't activate EP0in */
2232 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
47a1685f 2233 DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
308d734e
LM
2234
2235 s3c_hsotg_enqueue_setup(hsotg);
2236
2237 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
94cb8fd6
LM
2238 readl(hsotg->regs + DIEPCTL0),
2239 readl(hsotg->regs + DOEPCTL0));
308d734e
LM
2240
2241 /* clear global NAKs */
ad38dc5d 2242 writel(DCTL_CGOUTNAK | DCTL_CGNPINNAK | DCTL_SFTDISCON,
94cb8fd6 2243 hsotg->regs + DCTL);
308d734e
LM
2244
2245 /* must be at-least 3ms to allow bus to see disconnect */
2246 mdelay(3);
2247
ac3c81f3 2248 hsotg->last_rst = jiffies;
ad38dc5d
MS
2249}
2250
941fcce4 2251static void s3c_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
ad38dc5d
MS
2252{
2253 /* set the soft-disconnect bit */
2254 __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
2255}
ac3c81f3 2256
510ffaa4 2257void s3c_hsotg_core_connect(struct dwc2_hsotg *hsotg)
ad38dc5d 2258{
308d734e 2259 /* remove the soft-disconnect and let's go */
47a1685f 2260 __bic32(hsotg->regs + DCTL, DCTL_SFTDISCON);
308d734e
LM
2261}
2262
5b7d70c6
BD
2263/**
2264 * s3c_hsotg_irq - handle device interrupt
2265 * @irq: The IRQ number triggered
2266 * @pw: The pw value when registered the handler.
2267 */
2268static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
2269{
941fcce4 2270 struct dwc2_hsotg *hsotg = pw;
5b7d70c6
BD
2271 int retry_count = 8;
2272 u32 gintsts;
2273 u32 gintmsk;
2274
5ad1d316 2275 spin_lock(&hsotg->lock);
5b7d70c6 2276irq_retry:
94cb8fd6
LM
2277 gintsts = readl(hsotg->regs + GINTSTS);
2278 gintmsk = readl(hsotg->regs + GINTMSK);
5b7d70c6
BD
2279
2280 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
2281 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
2282
2283 gintsts &= gintmsk;
2284
47a1685f
DN
2285 if (gintsts & GINTSTS_ENUMDONE) {
2286 writel(GINTSTS_ENUMDONE, hsotg->regs + GINTSTS);
a3395f0d
AT
2287
2288 s3c_hsotg_irq_enumdone(hsotg);
4ace06e8 2289 hsotg->connected = 1;
5b7d70c6
BD
2290 }
2291
47a1685f 2292 if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
94cb8fd6 2293 u32 daint = readl(hsotg->regs + DAINT);
7e804650
RB
2294 u32 daintmsk = readl(hsotg->regs + DAINTMSK);
2295 u32 daint_out, daint_in;
5b7d70c6
BD
2296 int ep;
2297
7e804650 2298 daint &= daintmsk;
47a1685f
DN
2299 daint_out = daint >> DAINT_OUTEP_SHIFT;
2300 daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
7e804650 2301
5b7d70c6
BD
2302 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
2303
2304 for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
2305 if (daint_out & 1)
2306 s3c_hsotg_epint(hsotg, ep, 0);
2307 }
2308
2309 for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
2310 if (daint_in & 1)
2311 s3c_hsotg_epint(hsotg, ep, 1);
2312 }
5b7d70c6
BD
2313 }
2314
47a1685f 2315 if (gintsts & GINTSTS_USBRST) {
12a1f4dc 2316
94cb8fd6 2317 u32 usb_status = readl(hsotg->regs + GOTGCTL);
12a1f4dc 2318
9599815d 2319 dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
5b7d70c6 2320 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
94cb8fd6 2321 readl(hsotg->regs + GNPTXSTS));
5b7d70c6 2322
47a1685f 2323 writel(GINTSTS_USBRST, hsotg->regs + GINTSTS);
a3395f0d 2324
94cb8fd6 2325 if (usb_status & GOTGCTL_BSESVLD) {
12a1f4dc
LM
2326 if (time_after(jiffies, hsotg->last_rst +
2327 msecs_to_jiffies(200))) {
5b7d70c6 2328
12a1f4dc 2329 kill_all_requests(hsotg, &hsotg->eps[0],
6b448af4 2330 -ECONNRESET);
5b7d70c6 2331
ad38dc5d
MS
2332 s3c_hsotg_core_init_disconnected(hsotg);
2333 s3c_hsotg_core_connect(hsotg);
12a1f4dc
LM
2334 }
2335 }
5b7d70c6
BD
2336 }
2337
2338 /* check both FIFOs */
2339
47a1685f 2340 if (gintsts & GINTSTS_NPTXFEMP) {
5b7d70c6
BD
2341 dev_dbg(hsotg->dev, "NPTxFEmp\n");
2342
8b9bc460
LM
2343 /*
2344 * Disable the interrupt to stop it happening again
5b7d70c6 2345 * unless one of these endpoint routines decides that
8b9bc460
LM
2346 * it needs re-enabling
2347 */
5b7d70c6 2348
47a1685f 2349 s3c_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
5b7d70c6 2350 s3c_hsotg_irq_fifoempty(hsotg, false);
5b7d70c6
BD
2351 }
2352
47a1685f 2353 if (gintsts & GINTSTS_PTXFEMP) {
5b7d70c6
BD
2354 dev_dbg(hsotg->dev, "PTxFEmp\n");
2355
94cb8fd6 2356 /* See note in GINTSTS_NPTxFEmp */
5b7d70c6 2357
47a1685f 2358 s3c_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
5b7d70c6 2359 s3c_hsotg_irq_fifoempty(hsotg, true);
5b7d70c6
BD
2360 }
2361
47a1685f 2362 if (gintsts & GINTSTS_RXFLVL) {
8b9bc460
LM
2363 /*
2364 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
5b7d70c6 2365 * we need to retry s3c_hsotg_handle_rx if this is still
8b9bc460
LM
2366 * set.
2367 */
5b7d70c6
BD
2368
2369 s3c_hsotg_handle_rx(hsotg);
5b7d70c6
BD
2370 }
2371
47a1685f 2372 if (gintsts & GINTSTS_ERLYSUSP) {
94cb8fd6 2373 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
47a1685f 2374 writel(GINTSTS_ERLYSUSP, hsotg->regs + GINTSTS);
5b7d70c6
BD
2375 }
2376
8b9bc460
LM
2377 /*
2378 * these next two seem to crop-up occasionally causing the core
5b7d70c6 2379 * to shutdown the USB transfer, so try clearing them and logging
8b9bc460
LM
2380 * the occurrence.
2381 */
5b7d70c6 2382
47a1685f 2383 if (gintsts & GINTSTS_GOUTNAKEFF) {
5b7d70c6
BD
2384 dev_info(hsotg->dev, "GOUTNakEff triggered\n");
2385
47a1685f 2386 writel(DCTL_CGOUTNAK, hsotg->regs + DCTL);
a3395f0d
AT
2387
2388 s3c_hsotg_dump(hsotg);
5b7d70c6
BD
2389 }
2390
47a1685f 2391 if (gintsts & GINTSTS_GINNAKEFF) {
5b7d70c6
BD
2392 dev_info(hsotg->dev, "GINNakEff triggered\n");
2393
47a1685f 2394 writel(DCTL_CGNPINNAK, hsotg->regs + DCTL);
a3395f0d
AT
2395
2396 s3c_hsotg_dump(hsotg);
5b7d70c6
BD
2397 }
2398
8b9bc460
LM
2399 /*
2400 * if we've had fifo events, we should try and go around the
2401 * loop again to see if there's any point in returning yet.
2402 */
5b7d70c6
BD
2403
2404 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
2405 goto irq_retry;
2406
5ad1d316
LM
2407 spin_unlock(&hsotg->lock);
2408
5b7d70c6
BD
2409 return IRQ_HANDLED;
2410}
2411
2412/**
2413 * s3c_hsotg_ep_enable - enable the given endpoint
2414 * @ep: The USB endpint to configure
2415 * @desc: The USB endpoint descriptor to configure with.
2416 *
2417 * This is called from the USB gadget code's usb_ep_enable().
8b9bc460 2418 */
5b7d70c6
BD
2419static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2420 const struct usb_endpoint_descriptor *desc)
2421{
2422 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
941fcce4 2423 struct dwc2_hsotg *hsotg = hs_ep->parent;
5b7d70c6
BD
2424 unsigned long flags;
2425 int index = hs_ep->index;
2426 u32 epctrl_reg;
2427 u32 epctrl;
2428 u32 mps;
2429 int dir_in;
b203d0a2 2430 int i, val, size;
19c190f9 2431 int ret = 0;
5b7d70c6
BD
2432
2433 dev_dbg(hsotg->dev,
2434 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
2435 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
2436 desc->wMaxPacketSize, desc->bInterval);
2437
2438 /* not to be called for EP0 */
2439 WARN_ON(index == 0);
2440
2441 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
2442 if (dir_in != hs_ep->dir_in) {
2443 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
2444 return -EINVAL;
2445 }
2446
29cc8897 2447 mps = usb_endpoint_maxp(desc);
5b7d70c6
BD
2448
2449 /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
2450
94cb8fd6 2451 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
5b7d70c6
BD
2452 epctrl = readl(hsotg->regs + epctrl_reg);
2453
2454 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
2455 __func__, epctrl, epctrl_reg);
2456
22258f49 2457 spin_lock_irqsave(&hsotg->lock, flags);
5b7d70c6 2458
47a1685f
DN
2459 epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
2460 epctrl |= DXEPCTL_MPS(mps);
5b7d70c6 2461
8b9bc460
LM
2462 /*
2463 * mark the endpoint as active, otherwise the core may ignore
2464 * transactions entirely for this endpoint
2465 */
47a1685f 2466 epctrl |= DXEPCTL_USBACTEP;
5b7d70c6 2467
8b9bc460
LM
2468 /*
2469 * set the NAK status on the endpoint, otherwise we might try and
5b7d70c6
BD
2470 * do something with data that we've yet got a request to process
2471 * since the RXFIFO will take data for an endpoint even if the
2472 * size register hasn't been set.
2473 */
2474
47a1685f 2475 epctrl |= DXEPCTL_SNAK;
5b7d70c6
BD
2476
2477 /* update the endpoint state */
e9edd199 2478 s3c_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps);
5b7d70c6
BD
2479
2480 /* default, set to non-periodic */
1479e841 2481 hs_ep->isochronous = 0;
5b7d70c6 2482 hs_ep->periodic = 0;
a18ed7b0 2483 hs_ep->halted = 0;
1479e841 2484 hs_ep->interval = desc->bInterval;
5b7d70c6 2485
4fca54aa
RB
2486 if (hs_ep->interval > 1 && hs_ep->mc > 1)
2487 dev_err(hsotg->dev, "MC > 1 when interval is not 1\n");
2488
5b7d70c6
BD
2489 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
2490 case USB_ENDPOINT_XFER_ISOC:
47a1685f
DN
2491 epctrl |= DXEPCTL_EPTYPE_ISO;
2492 epctrl |= DXEPCTL_SETEVENFR;
1479e841
RB
2493 hs_ep->isochronous = 1;
2494 if (dir_in)
2495 hs_ep->periodic = 1;
2496 break;
5b7d70c6
BD
2497
2498 case USB_ENDPOINT_XFER_BULK:
47a1685f 2499 epctrl |= DXEPCTL_EPTYPE_BULK;
5b7d70c6
BD
2500 break;
2501
2502 case USB_ENDPOINT_XFER_INT:
b203d0a2 2503 if (dir_in)
5b7d70c6 2504 hs_ep->periodic = 1;
5b7d70c6 2505
47a1685f 2506 epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
5b7d70c6
BD
2507 break;
2508
2509 case USB_ENDPOINT_XFER_CONTROL:
47a1685f 2510 epctrl |= DXEPCTL_EPTYPE_CONTROL;
5b7d70c6
BD
2511 break;
2512 }
2513
8b9bc460
LM
2514 /*
2515 * if the hardware has dedicated fifos, we must give each IN EP
10aebc77
BD
2516 * a unique tx-fifo even if it is non-periodic.
2517 */
b203d0a2
RB
2518 if (dir_in && hsotg->dedicated_fifos) {
2519 size = hs_ep->ep.maxpacket*hs_ep->mc;
2520 for (i = 1; i <= 8; ++i) {
2521 if (hsotg->fifo_map & (1<<i))
2522 continue;
2523 val = readl(hsotg->regs + DPTXFSIZN(i));
2524 val = (val >> FIFOSIZE_DEPTH_SHIFT)*4;
2525 if (val < size)
2526 continue;
2527 hsotg->fifo_map |= 1<<i;
2528
2529 epctrl |= DXEPCTL_TXFNUM(i);
2530 hs_ep->fifo_index = i;
2531 hs_ep->fifo_size = val;
2532 break;
2533 }
b585a48b
SM
2534 if (i == 8) {
2535 ret = -ENOMEM;
2536 goto error;
2537 }
b203d0a2 2538 }
10aebc77 2539
5b7d70c6
BD
2540 /* for non control endpoints, set PID to D0 */
2541 if (index)
47a1685f 2542 epctrl |= DXEPCTL_SETD0PID;
5b7d70c6
BD
2543
2544 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
2545 __func__, epctrl);
2546
2547 writel(epctrl, hsotg->regs + epctrl_reg);
2548 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
2549 __func__, readl(hsotg->regs + epctrl_reg));
2550
2551 /* enable the endpoint interrupt */
2552 s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
2553
b585a48b 2554error:
22258f49 2555 spin_unlock_irqrestore(&hsotg->lock, flags);
19c190f9 2556 return ret;
5b7d70c6
BD
2557}
2558
8b9bc460
LM
2559/**
2560 * s3c_hsotg_ep_disable - disable given endpoint
2561 * @ep: The endpoint to disable.
2562 */
5b7d70c6
BD
2563static int s3c_hsotg_ep_disable(struct usb_ep *ep)
2564{
2565 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
941fcce4 2566 struct dwc2_hsotg *hsotg = hs_ep->parent;
5b7d70c6
BD
2567 int dir_in = hs_ep->dir_in;
2568 int index = hs_ep->index;
2569 unsigned long flags;
2570 u32 epctrl_reg;
2571 u32 ctrl;
2572
1e011293 2573 dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
5b7d70c6
BD
2574
2575 if (ep == &hsotg->eps[0].ep) {
2576 dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
2577 return -EINVAL;
2578 }
2579
94cb8fd6 2580 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
5b7d70c6 2581
5ad1d316 2582 spin_lock_irqsave(&hsotg->lock, flags);
5b7d70c6 2583 /* terminate all requests with shutdown */
6b448af4 2584 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
5b7d70c6 2585
b203d0a2
RB
2586 hsotg->fifo_map &= ~(1<<hs_ep->fifo_index);
2587 hs_ep->fifo_index = 0;
2588 hs_ep->fifo_size = 0;
5b7d70c6
BD
2589
2590 ctrl = readl(hsotg->regs + epctrl_reg);
47a1685f
DN
2591 ctrl &= ~DXEPCTL_EPENA;
2592 ctrl &= ~DXEPCTL_USBACTEP;
2593 ctrl |= DXEPCTL_SNAK;
5b7d70c6
BD
2594
2595 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
2596 writel(ctrl, hsotg->regs + epctrl_reg);
2597
2598 /* disable endpoint interrupts */
2599 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
2600
22258f49 2601 spin_unlock_irqrestore(&hsotg->lock, flags);
5b7d70c6
BD
2602 return 0;
2603}
2604
2605/**
2606 * on_list - check request is on the given endpoint
2607 * @ep: The endpoint to check.
2608 * @test: The request to test if it is on the endpoint.
8b9bc460 2609 */
5b7d70c6
BD
2610static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
2611{
2612 struct s3c_hsotg_req *req, *treq;
2613
2614 list_for_each_entry_safe(req, treq, &ep->queue, queue) {
2615 if (req == test)
2616 return true;
2617 }
2618
2619 return false;
2620}
2621
8b9bc460
LM
2622/**
2623 * s3c_hsotg_ep_dequeue - dequeue given endpoint
2624 * @ep: The endpoint to dequeue.
2625 * @req: The request to be removed from a queue.
2626 */
5b7d70c6
BD
2627static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2628{
2629 struct s3c_hsotg_req *hs_req = our_req(req);
2630 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
941fcce4 2631 struct dwc2_hsotg *hs = hs_ep->parent;
5b7d70c6
BD
2632 unsigned long flags;
2633
1e011293 2634 dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
5b7d70c6 2635
22258f49 2636 spin_lock_irqsave(&hs->lock, flags);
5b7d70c6
BD
2637
2638 if (!on_list(hs_ep, hs_req)) {
22258f49 2639 spin_unlock_irqrestore(&hs->lock, flags);
5b7d70c6
BD
2640 return -EINVAL;
2641 }
2642
2643 s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
22258f49 2644 spin_unlock_irqrestore(&hs->lock, flags);
5b7d70c6
BD
2645
2646 return 0;
2647}
2648
8b9bc460
LM
2649/**
2650 * s3c_hsotg_ep_sethalt - set halt on a given endpoint
2651 * @ep: The endpoint to set halt.
2652 * @value: Set or unset the halt.
2653 */
5b7d70c6
BD
2654static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
2655{
2656 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
941fcce4 2657 struct dwc2_hsotg *hs = hs_ep->parent;
5b7d70c6 2658 int index = hs_ep->index;
5b7d70c6
BD
2659 u32 epreg;
2660 u32 epctl;
9c39ddc6 2661 u32 xfertype;
5b7d70c6
BD
2662
2663 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
2664
c9f721b2
RB
2665 if (index == 0) {
2666 if (value)
2667 s3c_hsotg_stall_ep0(hs);
2668 else
2669 dev_warn(hs->dev,
2670 "%s: can't clear halt on ep0\n", __func__);
2671 return 0;
2672 }
2673
5b7d70c6
BD
2674 /* write both IN and OUT control registers */
2675
94cb8fd6 2676 epreg = DIEPCTL(index);
5b7d70c6
BD
2677 epctl = readl(hs->regs + epreg);
2678
9c39ddc6 2679 if (value) {
47a1685f
DN
2680 epctl |= DXEPCTL_STALL + DXEPCTL_SNAK;
2681 if (epctl & DXEPCTL_EPENA)
2682 epctl |= DXEPCTL_EPDIS;
9c39ddc6 2683 } else {
47a1685f
DN
2684 epctl &= ~DXEPCTL_STALL;
2685 xfertype = epctl & DXEPCTL_EPTYPE_MASK;
2686 if (xfertype == DXEPCTL_EPTYPE_BULK ||
2687 xfertype == DXEPCTL_EPTYPE_INTERRUPT)
2688 epctl |= DXEPCTL_SETD0PID;
9c39ddc6 2689 }
5b7d70c6
BD
2690
2691 writel(epctl, hs->regs + epreg);
2692
94cb8fd6 2693 epreg = DOEPCTL(index);
5b7d70c6
BD
2694 epctl = readl(hs->regs + epreg);
2695
2696 if (value)
47a1685f 2697 epctl |= DXEPCTL_STALL;
9c39ddc6 2698 else {
47a1685f
DN
2699 epctl &= ~DXEPCTL_STALL;
2700 xfertype = epctl & DXEPCTL_EPTYPE_MASK;
2701 if (xfertype == DXEPCTL_EPTYPE_BULK ||
2702 xfertype == DXEPCTL_EPTYPE_INTERRUPT)
2703 epctl |= DXEPCTL_SETD0PID;
9c39ddc6 2704 }
5b7d70c6
BD
2705
2706 writel(epctl, hs->regs + epreg);
2707
a18ed7b0
RB
2708 hs_ep->halted = value;
2709
5b7d70c6
BD
2710 return 0;
2711}
2712
5ad1d316
LM
2713/**
2714 * s3c_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
2715 * @ep: The endpoint to set halt.
2716 * @value: Set or unset the halt.
2717 */
2718static int s3c_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
2719{
2720 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
941fcce4 2721 struct dwc2_hsotg *hs = hs_ep->parent;
5ad1d316
LM
2722 unsigned long flags = 0;
2723 int ret = 0;
2724
2725 spin_lock_irqsave(&hs->lock, flags);
2726 ret = s3c_hsotg_ep_sethalt(ep, value);
2727 spin_unlock_irqrestore(&hs->lock, flags);
2728
2729 return ret;
2730}
2731
5b7d70c6
BD
2732static struct usb_ep_ops s3c_hsotg_ep_ops = {
2733 .enable = s3c_hsotg_ep_enable,
2734 .disable = s3c_hsotg_ep_disable,
2735 .alloc_request = s3c_hsotg_ep_alloc_request,
2736 .free_request = s3c_hsotg_ep_free_request,
5ad1d316 2737 .queue = s3c_hsotg_ep_queue_lock,
5b7d70c6 2738 .dequeue = s3c_hsotg_ep_dequeue,
5ad1d316 2739 .set_halt = s3c_hsotg_ep_sethalt_lock,
25985edc 2740 /* note, don't believe we have any call for the fifo routines */
5b7d70c6
BD
2741};
2742
41188786
LM
2743/**
2744 * s3c_hsotg_phy_enable - enable platform phy dev
8b9bc460 2745 * @hsotg: The driver state
41188786
LM
2746 *
2747 * A wrapper for platform code responsible for controlling
2748 * low-level USB code
2749 */
941fcce4 2750static void s3c_hsotg_phy_enable(struct dwc2_hsotg *hsotg)
41188786
LM
2751{
2752 struct platform_device *pdev = to_platform_device(hsotg->dev);
2753
2754 dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
b2e587db 2755
ca2c5ba8 2756 if (hsotg->uphy)
74084844 2757 usb_phy_init(hsotg->uphy);
ca2c5ba8 2758 else if (hsotg->plat && hsotg->plat->phy_init)
41188786 2759 hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
ca2c5ba8
KD
2760 else {
2761 phy_init(hsotg->phy);
2762 phy_power_on(hsotg->phy);
2763 }
41188786
LM
2764}
2765
2766/**
2767 * s3c_hsotg_phy_disable - disable platform phy dev
8b9bc460 2768 * @hsotg: The driver state
41188786
LM
2769 *
2770 * A wrapper for platform code responsible for controlling
2771 * low-level USB code
2772 */
941fcce4 2773static void s3c_hsotg_phy_disable(struct dwc2_hsotg *hsotg)
41188786
LM
2774{
2775 struct platform_device *pdev = to_platform_device(hsotg->dev);
2776
ca2c5ba8 2777 if (hsotg->uphy)
74084844 2778 usb_phy_shutdown(hsotg->uphy);
ca2c5ba8 2779 else if (hsotg->plat && hsotg->plat->phy_exit)
41188786 2780 hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
ca2c5ba8
KD
2781 else {
2782 phy_power_off(hsotg->phy);
2783 phy_exit(hsotg->phy);
2784 }
41188786
LM
2785}
2786
8b9bc460
LM
2787/**
2788 * s3c_hsotg_init - initalize the usb core
2789 * @hsotg: The driver state
2790 */
941fcce4 2791static void s3c_hsotg_init(struct dwc2_hsotg *hsotg)
b3f489b2
LM
2792{
2793 /* unmask subset of endpoint interrupts */
2794
47a1685f
DN
2795 writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
2796 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
2797 hsotg->regs + DIEPMSK);
b3f489b2 2798
47a1685f
DN
2799 writel(DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
2800 DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
2801 hsotg->regs + DOEPMSK);
b3f489b2 2802
94cb8fd6 2803 writel(0, hsotg->regs + DAINTMSK);
b3f489b2
LM
2804
2805 /* Be in disconnected state until gadget is registered */
47a1685f 2806 __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
b3f489b2
LM
2807
2808 if (0) {
2809 /* post global nak until we're ready */
47a1685f 2810 writel(DCTL_SGNPINNAK | DCTL_SGOUTNAK,
94cb8fd6 2811 hsotg->regs + DCTL);
b3f489b2
LM
2812 }
2813
2814 /* setup fifos */
2815
2816 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
94cb8fd6
LM
2817 readl(hsotg->regs + GRXFSIZ),
2818 readl(hsotg->regs + GNPTXFSIZ));
b3f489b2
LM
2819
2820 s3c_hsotg_init_fifo(hsotg);
2821
2822 /* set the PLL on, remove the HNP/SRP and set the PHY */
47a1685f 2823 writel(GUSBCFG_PHYIF16 | GUSBCFG_TOUTCAL(7) | (0x5 << 10),
94cb8fd6 2824 hsotg->regs + GUSBCFG);
b3f489b2 2825
47a1685f 2826 writel(using_dma(hsotg) ? GAHBCFG_DMA_EN : 0x0,
94cb8fd6 2827 hsotg->regs + GAHBCFG);
b3f489b2
LM
2828}
2829
8b9bc460
LM
2830/**
2831 * s3c_hsotg_udc_start - prepare the udc for work
2832 * @gadget: The usb gadget state
2833 * @driver: The usb gadget driver
2834 *
2835 * Perform initialization to prepare udc device and driver
2836 * to work.
2837 */
f65f0f10
LM
2838static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
2839 struct usb_gadget_driver *driver)
5b7d70c6 2840{
941fcce4 2841 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
5b9451f8 2842 unsigned long flags;
5b7d70c6
BD
2843 int ret;
2844
2845 if (!hsotg) {
a023da33 2846 pr_err("%s: called with no device\n", __func__);
5b7d70c6
BD
2847 return -ENODEV;
2848 }
2849
2850 if (!driver) {
2851 dev_err(hsotg->dev, "%s: no driver\n", __func__);
2852 return -EINVAL;
2853 }
2854
7177aed4 2855 if (driver->max_speed < USB_SPEED_FULL)
5b7d70c6 2856 dev_err(hsotg->dev, "%s: bad speed\n", __func__);
5b7d70c6 2857
f65f0f10 2858 if (!driver->setup) {
5b7d70c6
BD
2859 dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
2860 return -EINVAL;
2861 }
2862
7ad8096e 2863 mutex_lock(&hsotg->init_mutex);
5b7d70c6
BD
2864 WARN_ON(hsotg->driver);
2865
2866 driver->driver.bus = NULL;
2867 hsotg->driver = driver;
7d7b2292 2868 hsotg->gadget.dev.of_node = hsotg->dev->of_node;
5b7d70c6
BD
2869 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
2870
d00b4142
RB
2871 clk_enable(hsotg->clk);
2872
f65f0f10
LM
2873 ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
2874 hsotg->supplies);
5b7d70c6 2875 if (ret) {
f65f0f10 2876 dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
5b7d70c6
BD
2877 goto err;
2878 }
2879
c816c47f 2880 s3c_hsotg_phy_enable(hsotg);
f6c01592
GH
2881 if (!IS_ERR_OR_NULL(hsotg->uphy))
2882 otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
c816c47f 2883
5b9451f8
MS
2884 spin_lock_irqsave(&hsotg->lock, flags);
2885 s3c_hsotg_init(hsotg);
2886 s3c_hsotg_core_init_disconnected(hsotg);
dc6e69e6 2887 hsotg->enabled = 0;
5b9451f8
MS
2888 spin_unlock_irqrestore(&hsotg->lock, flags);
2889
5b7d70c6 2890 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
5b9451f8 2891
7ad8096e
MS
2892 mutex_unlock(&hsotg->init_mutex);
2893
5b7d70c6
BD
2894 return 0;
2895
2896err:
7ad8096e 2897 mutex_unlock(&hsotg->init_mutex);
5b7d70c6 2898 hsotg->driver = NULL;
5b7d70c6
BD
2899 return ret;
2900}
2901
8b9bc460
LM
2902/**
2903 * s3c_hsotg_udc_stop - stop the udc
2904 * @gadget: The usb gadget state
2905 * @driver: The usb gadget driver
2906 *
2907 * Stop udc hw block and stay tunned for future transmissions
2908 */
22835b80 2909static int s3c_hsotg_udc_stop(struct usb_gadget *gadget)
5b7d70c6 2910{
941fcce4 2911 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
2b19a52c 2912 unsigned long flags = 0;
5b7d70c6
BD
2913 int ep;
2914
2915 if (!hsotg)
2916 return -ENODEV;
2917
7ad8096e
MS
2918 mutex_lock(&hsotg->init_mutex);
2919
5b7d70c6 2920 /* all endpoints should be shutdown */
604eac3c 2921 for (ep = 1; ep < hsotg->num_of_eps; ep++)
5b7d70c6
BD
2922 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
2923
2b19a52c
LM
2924 spin_lock_irqsave(&hsotg->lock, flags);
2925
32805c35 2926 hsotg->driver = NULL;
5b7d70c6 2927 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
dc6e69e6 2928 hsotg->enabled = 0;
5b7d70c6 2929
2b19a52c
LM
2930 spin_unlock_irqrestore(&hsotg->lock, flags);
2931
f6c01592
GH
2932 if (!IS_ERR_OR_NULL(hsotg->uphy))
2933 otg_set_peripheral(hsotg->uphy->otg, NULL);
c816c47f
MS
2934 s3c_hsotg_phy_disable(hsotg);
2935
c8c10253 2936 regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
5b7d70c6 2937
d00b4142
RB
2938 clk_disable(hsotg->clk);
2939
7ad8096e
MS
2940 mutex_unlock(&hsotg->init_mutex);
2941
5b7d70c6
BD
2942 return 0;
2943}
5b7d70c6 2944
8b9bc460
LM
2945/**
2946 * s3c_hsotg_gadget_getframe - read the frame number
2947 * @gadget: The usb gadget state
2948 *
2949 * Read the {micro} frame number
2950 */
5b7d70c6
BD
2951static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
2952{
2953 return s3c_hsotg_read_frameno(to_hsotg(gadget));
2954}
2955
a188b689
LM
2956/**
2957 * s3c_hsotg_pullup - connect/disconnect the USB PHY
2958 * @gadget: The usb gadget state
2959 * @is_on: Current state of the USB PHY
2960 *
2961 * Connect/Disconnect the USB PHY pullup
2962 */
2963static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
2964{
941fcce4 2965 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
a188b689
LM
2966 unsigned long flags = 0;
2967
d784f1e5 2968 dev_dbg(hsotg->dev, "%s: is_on: %d\n", __func__, is_on);
a188b689 2969
7ad8096e 2970 mutex_lock(&hsotg->init_mutex);
a188b689
LM
2971 spin_lock_irqsave(&hsotg->lock, flags);
2972 if (is_on) {
d00b4142 2973 clk_enable(hsotg->clk);
dc6e69e6 2974 hsotg->enabled = 1;
ad38dc5d 2975 s3c_hsotg_core_connect(hsotg);
a188b689 2976 } else {
5b9451f8 2977 s3c_hsotg_core_disconnect(hsotg);
dc6e69e6 2978 hsotg->enabled = 0;
d00b4142 2979 clk_disable(hsotg->clk);
a188b689
LM
2980 }
2981
2982 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
2983 spin_unlock_irqrestore(&hsotg->lock, flags);
7ad8096e 2984 mutex_unlock(&hsotg->init_mutex);
a188b689
LM
2985
2986 return 0;
2987}
2988
eeef4587 2989static const struct usb_gadget_ops s3c_hsotg_gadget_ops = {
5b7d70c6 2990 .get_frame = s3c_hsotg_gadget_getframe,
f65f0f10
LM
2991 .udc_start = s3c_hsotg_udc_start,
2992 .udc_stop = s3c_hsotg_udc_stop,
a188b689 2993 .pullup = s3c_hsotg_pullup,
5b7d70c6
BD
2994};
2995
2996/**
2997 * s3c_hsotg_initep - initialise a single endpoint
2998 * @hsotg: The device state.
2999 * @hs_ep: The endpoint to be initialised.
3000 * @epnum: The endpoint number
3001 *
3002 * Initialise the given endpoint (as part of the probe and device state
3003 * creation) to give to the gadget driver. Setup the endpoint name, any
3004 * direction information and other state that may be required.
3005 */
941fcce4 3006static void s3c_hsotg_initep(struct dwc2_hsotg *hsotg,
5b7d70c6
BD
3007 struct s3c_hsotg_ep *hs_ep,
3008 int epnum)
3009{
5b7d70c6
BD
3010 char *dir;
3011
3012 if (epnum == 0)
3013 dir = "";
3014 else if ((epnum % 2) == 0) {
3015 dir = "out";
3016 } else {
3017 dir = "in";
3018 hs_ep->dir_in = 1;
3019 }
3020
3021 hs_ep->index = epnum;
3022
3023 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
3024
3025 INIT_LIST_HEAD(&hs_ep->queue);
3026 INIT_LIST_HEAD(&hs_ep->ep.ep_list);
3027
5b7d70c6
BD
3028 /* add to the list of endpoints known by the gadget driver */
3029 if (epnum)
3030 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
3031
3032 hs_ep->parent = hsotg;
3033 hs_ep->ep.name = hs_ep->name;
e117e742 3034 usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT);
5b7d70c6
BD
3035 hs_ep->ep.ops = &s3c_hsotg_ep_ops;
3036
8b9bc460
LM
3037 /*
3038 * if we're using dma, we need to set the next-endpoint pointer
5b7d70c6
BD
3039 * to be something valid.
3040 */
3041
3042 if (using_dma(hsotg)) {
47a1685f 3043 u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
94cb8fd6
LM
3044 writel(next, hsotg->regs + DIEPCTL(epnum));
3045 writel(next, hsotg->regs + DOEPCTL(epnum));
5b7d70c6
BD
3046 }
3047}
3048
b3f489b2
LM
3049/**
3050 * s3c_hsotg_hw_cfg - read HW configuration registers
3051 * @param: The device state
3052 *
3053 * Read the USB core HW configuration registers
3054 */
941fcce4 3055static void s3c_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
5b7d70c6 3056{
cff9eb75 3057 u32 cfg2, cfg3, cfg4;
b3f489b2 3058 /* check hardware configuration */
5b7d70c6 3059
b3f489b2
LM
3060 cfg2 = readl(hsotg->regs + 0x48);
3061 hsotg->num_of_eps = (cfg2 >> 10) & 0xF;
10aebc77 3062
cff9eb75
MS
3063 cfg3 = readl(hsotg->regs + 0x4C);
3064 hsotg->fifo_mem = (cfg3 >> 16);
10aebc77
BD
3065
3066 cfg4 = readl(hsotg->regs + 0x50);
3067 hsotg->dedicated_fifos = (cfg4 >> 25) & 1;
3068
cff9eb75
MS
3069 dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
3070 hsotg->num_of_eps,
3071 hsotg->dedicated_fifos ? "dedicated" : "shared",
3072 hsotg->fifo_mem);
5b7d70c6
BD
3073}
3074
8b9bc460
LM
3075/**
3076 * s3c_hsotg_dump - dump state of the udc
3077 * @param: The device state
3078 */
941fcce4 3079static void s3c_hsotg_dump(struct dwc2_hsotg *hsotg)
5b7d70c6 3080{
83a01804 3081#ifdef DEBUG
5b7d70c6
BD
3082 struct device *dev = hsotg->dev;
3083 void __iomem *regs = hsotg->regs;
3084 u32 val;
3085 int idx;
3086
3087 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
94cb8fd6
LM
3088 readl(regs + DCFG), readl(regs + DCTL),
3089 readl(regs + DIEPMSK));
5b7d70c6
BD
3090
3091 dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
94cb8fd6 3092 readl(regs + GAHBCFG), readl(regs + 0x44));
5b7d70c6
BD
3093
3094 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
94cb8fd6 3095 readl(regs + GRXFSIZ), readl(regs + GNPTXFSIZ));
5b7d70c6
BD
3096
3097 /* show periodic fifo settings */
3098
3099 for (idx = 1; idx <= 15; idx++) {
47a1685f 3100 val = readl(regs + DPTXFSIZN(idx));
5b7d70c6 3101 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
47a1685f
DN
3102 val >> FIFOSIZE_DEPTH_SHIFT,
3103 val & FIFOSIZE_STARTADDR_MASK);
5b7d70c6
BD
3104 }
3105
3106 for (idx = 0; idx < 15; idx++) {
3107 dev_info(dev,
3108 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
94cb8fd6
LM
3109 readl(regs + DIEPCTL(idx)),
3110 readl(regs + DIEPTSIZ(idx)),
3111 readl(regs + DIEPDMA(idx)));
5b7d70c6 3112
94cb8fd6 3113 val = readl(regs + DOEPCTL(idx));
5b7d70c6
BD
3114 dev_info(dev,
3115 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
94cb8fd6
LM
3116 idx, readl(regs + DOEPCTL(idx)),
3117 readl(regs + DOEPTSIZ(idx)),
3118 readl(regs + DOEPDMA(idx)));
5b7d70c6
BD
3119
3120 }
3121
3122 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
94cb8fd6 3123 readl(regs + DVBUSDIS), readl(regs + DVBUSPULSE));
83a01804 3124#endif
5b7d70c6
BD
3125}
3126
5b7d70c6
BD
3127/**
3128 * state_show - debugfs: show overall driver and device state.
3129 * @seq: The seq file to write to.
3130 * @v: Unused parameter.
3131 *
3132 * This debugfs entry shows the overall state of the hardware and
3133 * some general information about each of the endpoints available
3134 * to the system.
3135 */
3136static int state_show(struct seq_file *seq, void *v)
3137{
941fcce4 3138 struct dwc2_hsotg *hsotg = seq->private;
5b7d70c6
BD
3139 void __iomem *regs = hsotg->regs;
3140 int idx;
3141
3142 seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
94cb8fd6
LM
3143 readl(regs + DCFG),
3144 readl(regs + DCTL),
3145 readl(regs + DSTS));
5b7d70c6
BD
3146
3147 seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
94cb8fd6 3148 readl(regs + DIEPMSK), readl(regs + DOEPMSK));
5b7d70c6
BD
3149
3150 seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
94cb8fd6
LM
3151 readl(regs + GINTMSK),
3152 readl(regs + GINTSTS));
5b7d70c6
BD
3153
3154 seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
94cb8fd6
LM
3155 readl(regs + DAINTMSK),
3156 readl(regs + DAINT));
5b7d70c6
BD
3157
3158 seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
94cb8fd6
LM
3159 readl(regs + GNPTXSTS),
3160 readl(regs + GRXSTSR));
5b7d70c6 3161
a023da33 3162 seq_puts(seq, "\nEndpoint status:\n");
5b7d70c6
BD
3163
3164 for (idx = 0; idx < 15; idx++) {
3165 u32 in, out;
3166
94cb8fd6
LM
3167 in = readl(regs + DIEPCTL(idx));
3168 out = readl(regs + DOEPCTL(idx));
5b7d70c6
BD
3169
3170 seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
3171 idx, in, out);
3172
94cb8fd6
LM
3173 in = readl(regs + DIEPTSIZ(idx));
3174 out = readl(regs + DOEPTSIZ(idx));
5b7d70c6
BD
3175
3176 seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
3177 in, out);
3178
a023da33 3179 seq_puts(seq, "\n");
5b7d70c6
BD
3180 }
3181
3182 return 0;
3183}
3184
3185static int state_open(struct inode *inode, struct file *file)
3186{
3187 return single_open(file, state_show, inode->i_private);
3188}
3189
3190static const struct file_operations state_fops = {
3191 .owner = THIS_MODULE,
3192 .open = state_open,
3193 .read = seq_read,
3194 .llseek = seq_lseek,
3195 .release = single_release,
3196};
3197
3198/**
3199 * fifo_show - debugfs: show the fifo information
3200 * @seq: The seq_file to write data to.
3201 * @v: Unused parameter.
3202 *
3203 * Show the FIFO information for the overall fifo and all the
3204 * periodic transmission FIFOs.
8b9bc460 3205 */
5b7d70c6
BD
3206static int fifo_show(struct seq_file *seq, void *v)
3207{
941fcce4 3208 struct dwc2_hsotg *hsotg = seq->private;
5b7d70c6
BD
3209 void __iomem *regs = hsotg->regs;
3210 u32 val;
3211 int idx;
3212
a023da33 3213 seq_puts(seq, "Non-periodic FIFOs:\n");
94cb8fd6 3214 seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + GRXFSIZ));
5b7d70c6 3215
94cb8fd6 3216 val = readl(regs + GNPTXFSIZ);
5b7d70c6 3217 seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
47a1685f
DN
3218 val >> FIFOSIZE_DEPTH_SHIFT,
3219 val & FIFOSIZE_DEPTH_MASK);
5b7d70c6 3220
a023da33 3221 seq_puts(seq, "\nPeriodic TXFIFOs:\n");
5b7d70c6
BD
3222
3223 for (idx = 1; idx <= 15; idx++) {
47a1685f 3224 val = readl(regs + DPTXFSIZN(idx));
5b7d70c6
BD
3225
3226 seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
47a1685f
DN
3227 val >> FIFOSIZE_DEPTH_SHIFT,
3228 val & FIFOSIZE_STARTADDR_MASK);
5b7d70c6
BD
3229 }
3230
3231 return 0;
3232}
3233
3234static int fifo_open(struct inode *inode, struct file *file)
3235{
3236 return single_open(file, fifo_show, inode->i_private);
3237}
3238
3239static const struct file_operations fifo_fops = {
3240 .owner = THIS_MODULE,
3241 .open = fifo_open,
3242 .read = seq_read,
3243 .llseek = seq_lseek,
3244 .release = single_release,
3245};
3246
3247
3248static const char *decode_direction(int is_in)
3249{
3250 return is_in ? "in" : "out";
3251}
3252
3253/**
3254 * ep_show - debugfs: show the state of an endpoint.
3255 * @seq: The seq_file to write data to.
3256 * @v: Unused parameter.
3257 *
3258 * This debugfs entry shows the state of the given endpoint (one is
3259 * registered for each available).
8b9bc460 3260 */
5b7d70c6
BD
3261static int ep_show(struct seq_file *seq, void *v)
3262{
3263 struct s3c_hsotg_ep *ep = seq->private;
941fcce4 3264 struct dwc2_hsotg *hsotg = ep->parent;
5b7d70c6
BD
3265 struct s3c_hsotg_req *req;
3266 void __iomem *regs = hsotg->regs;
3267 int index = ep->index;
3268 int show_limit = 15;
3269 unsigned long flags;
3270
3271 seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n",
3272 ep->index, ep->ep.name, decode_direction(ep->dir_in));
3273
3274 /* first show the register state */
3275
3276 seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
94cb8fd6
LM
3277 readl(regs + DIEPCTL(index)),
3278 readl(regs + DOEPCTL(index)));
5b7d70c6
BD
3279
3280 seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
94cb8fd6
LM
3281 readl(regs + DIEPDMA(index)),
3282 readl(regs + DOEPDMA(index)));
5b7d70c6
BD
3283
3284 seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
94cb8fd6
LM
3285 readl(regs + DIEPINT(index)),
3286 readl(regs + DOEPINT(index)));
5b7d70c6
BD
3287
3288 seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
94cb8fd6
LM
3289 readl(regs + DIEPTSIZ(index)),
3290 readl(regs + DOEPTSIZ(index)));
5b7d70c6 3291
a023da33 3292 seq_puts(seq, "\n");
5b7d70c6
BD
3293 seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
3294 seq_printf(seq, "total_data=%ld\n", ep->total_data);
3295
3296 seq_printf(seq, "request list (%p,%p):\n",
3297 ep->queue.next, ep->queue.prev);
3298
22258f49 3299 spin_lock_irqsave(&hsotg->lock, flags);
5b7d70c6
BD
3300
3301 list_for_each_entry(req, &ep->queue, queue) {
3302 if (--show_limit < 0) {
a023da33 3303 seq_puts(seq, "not showing more requests...\n");
5b7d70c6
BD
3304 break;
3305 }
3306
3307 seq_printf(seq, "%c req %p: %d bytes @%p, ",
3308 req == ep->req ? '*' : ' ',
3309 req, req->req.length, req->req.buf);
3310 seq_printf(seq, "%d done, res %d\n",
3311 req->req.actual, req->req.status);
3312 }
3313
22258f49 3314 spin_unlock_irqrestore(&hsotg->lock, flags);
5b7d70c6
BD
3315
3316 return 0;
3317}
3318
3319static int ep_open(struct inode *inode, struct file *file)
3320{
3321 return single_open(file, ep_show, inode->i_private);
3322}
3323
3324static const struct file_operations ep_fops = {
3325 .owner = THIS_MODULE,
3326 .open = ep_open,
3327 .read = seq_read,
3328 .llseek = seq_lseek,
3329 .release = single_release,
3330};
3331
3332/**
3333 * s3c_hsotg_create_debug - create debugfs directory and files
3334 * @hsotg: The driver state
3335 *
3336 * Create the debugfs files to allow the user to get information
3337 * about the state of the system. The directory name is created
3338 * with the same name as the device itself, in case we end up
3339 * with multiple blocks in future systems.
8b9bc460 3340 */
941fcce4 3341static void s3c_hsotg_create_debug(struct dwc2_hsotg *hsotg)
5b7d70c6
BD
3342{
3343 struct dentry *root;
3344 unsigned epidx;
3345
3346 root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
3347 hsotg->debug_root = root;
3348 if (IS_ERR(root)) {
3349 dev_err(hsotg->dev, "cannot create debug root\n");
3350 return;
3351 }
3352
3353 /* create general state file */
3354
3355 hsotg->debug_file = debugfs_create_file("state", 0444, root,
3356 hsotg, &state_fops);
3357
3358 if (IS_ERR(hsotg->debug_file))
3359 dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
3360
3361 hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
3362 hsotg, &fifo_fops);
3363
3364 if (IS_ERR(hsotg->debug_fifo))
3365 dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
3366
3367 /* create one file for each endpoint */
3368
b3f489b2 3369 for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
5b7d70c6
BD
3370 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3371
3372 ep->debugfs = debugfs_create_file(ep->name, 0444,
3373 root, ep, &ep_fops);
3374
3375 if (IS_ERR(ep->debugfs))
3376 dev_err(hsotg->dev, "failed to create %s debug file\n",
3377 ep->name);
3378 }
3379}
3380
3381/**
3382 * s3c_hsotg_delete_debug - cleanup debugfs entries
3383 * @hsotg: The driver state
3384 *
3385 * Cleanup (remove) the debugfs files for use on module exit.
8b9bc460 3386 */
941fcce4 3387static void s3c_hsotg_delete_debug(struct dwc2_hsotg *hsotg)
5b7d70c6
BD
3388{
3389 unsigned epidx;
3390
b3f489b2 3391 for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
5b7d70c6
BD
3392 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3393 debugfs_remove(ep->debugfs);
3394 }
3395
3396 debugfs_remove(hsotg->debug_file);
3397 debugfs_remove(hsotg->debug_fifo);
3398 debugfs_remove(hsotg->debug_root);
3399}
3400
8b9bc460 3401/**
117777b2
DN
3402 * dwc2_gadget_init - init function for gadget
3403 * @dwc2: The data structure for the DWC2 driver.
3404 * @irq: The IRQ number for the controller.
8b9bc460 3405 */
117777b2 3406int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
5b7d70c6 3407{
117777b2
DN
3408 struct device *dev = hsotg->dev;
3409 struct s3c_hsotg_plat *plat = dev->platform_data;
b3f489b2 3410 struct s3c_hsotg_ep *eps;
5b7d70c6
BD
3411 int epnum;
3412 int ret;
fc9a731e 3413 int i;
5b7d70c6 3414
1b59fc7e
KD
3415 /* Set default UTMI width */
3416 hsotg->phyif = GUSBCFG_PHYIF16;
3417
74084844 3418 /*
135b3c43
YL
3419 * If platform probe couldn't find a generic PHY or an old style
3420 * USB PHY, fall back to pdata
74084844 3421 */
135b3c43
YL
3422 if (IS_ERR_OR_NULL(hsotg->phy) && IS_ERR_OR_NULL(hsotg->uphy)) {
3423 plat = dev_get_platdata(dev);
3424 if (!plat) {
3425 dev_err(dev,
3426 "no platform data or transceiver defined\n");
3427 return -EPROBE_DEFER;
3428 }
3429 hsotg->plat = plat;
3430 } else if (hsotg->phy) {
1b59fc7e
KD
3431 /*
3432 * If using the generic PHY framework, check if the PHY bus
3433 * width is 8-bit and set the phyif appropriately.
3434 */
135b3c43 3435 if (phy_get_bus_width(hsotg->phy) == 8)
1b59fc7e
KD
3436 hsotg->phyif = GUSBCFG_PHYIF8;
3437 }
b2e587db 3438
117777b2 3439 hsotg->clk = devm_clk_get(dev, "otg");
31ee04de 3440 if (IS_ERR(hsotg->clk)) {
8d736d8a 3441 hsotg->clk = NULL;
f415fbd1 3442 dev_dbg(dev, "cannot get otg clock\n");
5b7d70c6
BD
3443 }
3444
d327ab5b 3445 hsotg->gadget.max_speed = USB_SPEED_HIGH;
5b7d70c6
BD
3446 hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
3447 hsotg->gadget.name = dev_name(dev);
5b7d70c6 3448
5b7d70c6
BD
3449 /* reset the system */
3450
f415fbd1
DN
3451 ret = clk_prepare_enable(hsotg->clk);
3452 if (ret) {
3453 dev_err(dev, "failed to enable otg clk\n");
3454 goto err_clk;
3455 }
3456
31ee04de 3457
fc9a731e
LM
3458 /* regulators */
3459
3460 for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
3461 hsotg->supplies[i].supply = s3c_hsotg_supply_names[i];
3462
cd76213e 3463 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsotg->supplies),
fc9a731e
LM
3464 hsotg->supplies);
3465 if (ret) {
3466 dev_err(dev, "failed to request supplies: %d\n", ret);
338edabc 3467 goto err_clk;
fc9a731e
LM
3468 }
3469
3470 ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
3471 hsotg->supplies);
3472
3473 if (ret) {
941fcce4 3474 dev_err(dev, "failed to enable supplies: %d\n", ret);
fc9a731e
LM
3475 goto err_supplies;
3476 }
3477
41188786
LM
3478 /* usb phy enable */
3479 s3c_hsotg_phy_enable(hsotg);
5b7d70c6 3480
5b7d70c6 3481 s3c_hsotg_corereset(hsotg);
b3f489b2 3482 s3c_hsotg_hw_cfg(hsotg);
cff9eb75 3483 s3c_hsotg_init(hsotg);
b3f489b2 3484
db8178c3
DN
3485 ret = devm_request_irq(hsotg->dev, irq, s3c_hsotg_irq, IRQF_SHARED,
3486 dev_name(hsotg->dev), hsotg);
eb3c56c5
MS
3487 if (ret < 0) {
3488 s3c_hsotg_phy_disable(hsotg);
3489 clk_disable_unprepare(hsotg->clk);
3490 regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
3491 hsotg->supplies);
db8178c3 3492 dev_err(dev, "cannot claim IRQ for gadget\n");
eb3c56c5
MS
3493 goto err_clk;
3494 }
3495
b3f489b2
LM
3496 /* hsotg->num_of_eps holds number of EPs other than ep0 */
3497
3498 if (hsotg->num_of_eps == 0) {
3499 dev_err(dev, "wrong number of EPs (zero)\n");
dfdda5a0 3500 ret = -EINVAL;
b3f489b2
LM
3501 goto err_supplies;
3502 }
3503
3504 eps = kcalloc(hsotg->num_of_eps + 1, sizeof(struct s3c_hsotg_ep),
3505 GFP_KERNEL);
3506 if (!eps) {
dfdda5a0 3507 ret = -ENOMEM;
b3f489b2
LM
3508 goto err_supplies;
3509 }
3510
3511 hsotg->eps = eps;
3512
3513 /* setup endpoint information */
3514
3515 INIT_LIST_HEAD(&hsotg->gadget.ep_list);
3516 hsotg->gadget.ep0 = &hsotg->eps[0].ep;
3517
3518 /* allocate EP0 request */
3519
3520 hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
3521 GFP_KERNEL);
3522 if (!hsotg->ctrl_req) {
3523 dev_err(dev, "failed to allocate ctrl req\n");
dfdda5a0 3524 ret = -ENOMEM;
b3f489b2
LM
3525 goto err_ep_mem;
3526 }
5b7d70c6
BD
3527
3528 /* initialise the endpoints now the core has been initialised */
b3f489b2 3529 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++)
5b7d70c6
BD
3530 s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
3531
f65f0f10 3532 /* disable power and clock */
3a8146aa 3533 s3c_hsotg_phy_disable(hsotg);
f65f0f10
LM
3534
3535 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
3536 hsotg->supplies);
3537 if (ret) {
117777b2 3538 dev_err(dev, "failed to disable supplies: %d\n", ret);
f65f0f10
LM
3539 goto err_ep_mem;
3540 }
3541
117777b2 3542 ret = usb_add_gadget_udc(dev, &hsotg->gadget);
0f91349b 3543 if (ret)
b3f489b2 3544 goto err_ep_mem;
0f91349b 3545
5b7d70c6
BD
3546 s3c_hsotg_create_debug(hsotg);
3547
3548 s3c_hsotg_dump(hsotg);
3549
5b7d70c6
BD
3550 return 0;
3551
1d144c67 3552err_ep_mem:
b3f489b2 3553 kfree(eps);
fc9a731e 3554err_supplies:
41188786 3555 s3c_hsotg_phy_disable(hsotg);
31ee04de 3556err_clk:
1d144c67 3557 clk_disable_unprepare(hsotg->clk);
338edabc 3558
5b7d70c6
BD
3559 return ret;
3560}
117777b2 3561EXPORT_SYMBOL_GPL(dwc2_gadget_init);
5b7d70c6 3562
8b9bc460
LM
3563/**
3564 * s3c_hsotg_remove - remove function for hsotg driver
3565 * @pdev: The platform information for the driver
3566 */
117777b2 3567int s3c_hsotg_remove(struct dwc2_hsotg *hsotg)
5b7d70c6 3568{
0f91349b 3569 usb_del_gadget_udc(&hsotg->gadget);
5b7d70c6 3570 s3c_hsotg_delete_debug(hsotg);
04b4a0fc 3571 clk_disable_unprepare(hsotg->clk);
31ee04de 3572
5b7d70c6
BD
3573 return 0;
3574}
117777b2 3575EXPORT_SYMBOL_GPL(s3c_hsotg_remove);
5b7d70c6 3576
117777b2 3577int s3c_hsotg_suspend(struct dwc2_hsotg *hsotg)
b83e333a 3578{
b83e333a
MS
3579 unsigned long flags;
3580 int ret = 0;
3581
7ad8096e
MS
3582 mutex_lock(&hsotg->init_mutex);
3583
dc6e69e6
MS
3584 if (hsotg->driver) {
3585 int ep;
3586
b83e333a
MS
3587 dev_info(hsotg->dev, "suspending usb gadget %s\n",
3588 hsotg->driver->driver.name);
3589
dc6e69e6
MS
3590 spin_lock_irqsave(&hsotg->lock, flags);
3591 if (hsotg->enabled)
3592 s3c_hsotg_core_disconnect(hsotg);
3593 s3c_hsotg_disconnect(hsotg);
3594 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
3595 spin_unlock_irqrestore(&hsotg->lock, flags);
b83e333a 3596
dc6e69e6 3597 s3c_hsotg_phy_disable(hsotg);
b83e333a 3598
b83e333a
MS
3599 for (ep = 0; ep < hsotg->num_of_eps; ep++)
3600 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
3601
3602 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
3603 hsotg->supplies);
d00b4142 3604 clk_disable(hsotg->clk);
b83e333a
MS
3605 }
3606
7ad8096e
MS
3607 mutex_unlock(&hsotg->init_mutex);
3608
b83e333a
MS
3609 return ret;
3610}
117777b2 3611EXPORT_SYMBOL_GPL(s3c_hsotg_suspend);
b83e333a 3612
117777b2 3613int s3c_hsotg_resume(struct dwc2_hsotg *hsotg)
b83e333a 3614{
b83e333a
MS
3615 unsigned long flags;
3616 int ret = 0;
3617
7ad8096e
MS
3618 mutex_lock(&hsotg->init_mutex);
3619
b83e333a
MS
3620 if (hsotg->driver) {
3621 dev_info(hsotg->dev, "resuming usb gadget %s\n",
3622 hsotg->driver->driver.name);
d00b4142
RB
3623
3624 clk_enable(hsotg->clk);
b83e333a 3625 ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
dc6e69e6 3626 hsotg->supplies);
b83e333a 3627
dc6e69e6 3628 s3c_hsotg_phy_enable(hsotg);
b83e333a 3629
dc6e69e6
MS
3630 spin_lock_irqsave(&hsotg->lock, flags);
3631 s3c_hsotg_core_init_disconnected(hsotg);
3632 if (hsotg->enabled)
3633 s3c_hsotg_core_connect(hsotg);
3634 spin_unlock_irqrestore(&hsotg->lock, flags);
3635 }
7ad8096e 3636 mutex_unlock(&hsotg->init_mutex);
b83e333a
MS
3637
3638 return ret;
3639}
117777b2 3640EXPORT_SYMBOL_GPL(s3c_hsotg_resume);