]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/dwc2/hcd.c
staging: dwc2: always release host channel after dequeueing
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / dwc2 / hcd.c
CommitLineData
7359d482
PZ
1/*
2 * hcd.c - DesignWare HS OTG Controller host-mode routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * This file contains the core HCD code, and implements the Linux hc_driver
39 * API
40 */
41#include <linux/kernel.h>
42#include <linux/module.h>
43#include <linux/spinlock.h>
44#include <linux/interrupt.h>
45#include <linux/dma-mapping.h>
46#include <linux/delay.h>
47#include <linux/io.h>
48#include <linux/slab.h>
49#include <linux/usb.h>
50
51#include <linux/usb/hcd.h>
52#include <linux/usb/ch11.h>
53
54#include "core.h"
55#include "hcd.h"
56
57/**
58 * dwc2_dump_channel_info() - Prints the state of a host channel
59 *
60 * @hsotg: Programming view of DWC_otg controller
61 * @chan: Pointer to the channel to dump
62 *
63 * Must be called with interrupt disabled and spinlock held
64 *
65 * NOTE: This function will be removed once the peripheral controller code
66 * is integrated and the driver is stable
67 */
68static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
69 struct dwc2_host_chan *chan)
70{
71#ifdef VERBOSE_DEBUG
72 int num_channels = hsotg->core_params->host_channels;
73 struct dwc2_qh *qh;
74 u32 hcchar;
75 u32 hcsplt;
76 u32 hctsiz;
77 u32 hc_dma;
78 int i;
79
80 if (chan == NULL)
81 return;
82
83 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
84 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
85 hctsiz = readl(hsotg->regs + HCTSIZ(chan->hc_num));
86 hc_dma = readl(hsotg->regs + HCDMA(chan->hc_num));
87
88 dev_dbg(hsotg->dev, " Assigned to channel %p:\n", chan);
89 dev_dbg(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n",
90 hcchar, hcsplt);
91 dev_dbg(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n",
92 hctsiz, hc_dma);
93 dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
94 chan->dev_addr, chan->ep_num, chan->ep_is_in);
95 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
96 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
97 dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start);
98 dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started);
99 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
100 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
101 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
102 (unsigned long)chan->xfer_dma);
103 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
104 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
105 dev_dbg(hsotg->dev, " NP inactive sched:\n");
106 list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive,
107 qh_list_entry)
108 dev_dbg(hsotg->dev, " %p\n", qh);
109 dev_dbg(hsotg->dev, " NP active sched:\n");
110 list_for_each_entry(qh, &hsotg->non_periodic_sched_active,
111 qh_list_entry)
112 dev_dbg(hsotg->dev, " %p\n", qh);
113 dev_dbg(hsotg->dev, " Channels:\n");
114 for (i = 0; i < num_channels; i++) {
115 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
116
117 dev_dbg(hsotg->dev, " %2d: %p\n", i, chan);
118 }
119#endif /* VERBOSE_DEBUG */
120}
121
122/*
123 * Processes all the URBs in a single list of QHs. Completes them with
124 * -ETIMEDOUT and frees the QTD.
125 *
126 * Must be called with interrupt disabled and spinlock held
127 */
128static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg,
129 struct list_head *qh_list)
130{
131 struct dwc2_qh *qh, *qh_tmp;
132 struct dwc2_qtd *qtd, *qtd_tmp;
133
134 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
135 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
136 qtd_list_entry) {
137 if (qtd->urb != NULL) {
138 dwc2_host_complete(hsotg, qtd->urb->priv,
139 qtd->urb, -ETIMEDOUT);
140 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
141 }
142 }
143 }
144}
145
146static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
147 struct list_head *qh_list)
148{
149 struct dwc2_qtd *qtd, *qtd_tmp;
150 struct dwc2_qh *qh, *qh_tmp;
151 unsigned long flags;
152
153 if (!qh_list->next)
154 /* The list hasn't been initialized yet */
155 return;
156
157 spin_lock_irqsave(&hsotg->lock, flags);
158
159 /* Ensure there are no QTDs or URBs left */
160 dwc2_kill_urbs_in_qh_list(hsotg, qh_list);
161
162 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
163 dwc2_hcd_qh_unlink(hsotg, qh);
164
165 /* Free each QTD in the QH's QTD list */
166 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
167 qtd_list_entry)
168 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
169
170 spin_unlock_irqrestore(&hsotg->lock, flags);
171 dwc2_hcd_qh_free(hsotg, qh);
172 spin_lock_irqsave(&hsotg->lock, flags);
173 }
174
175 spin_unlock_irqrestore(&hsotg->lock, flags);
176}
177
178/*
179 * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic
180 * and periodic schedules. The QTD associated with each URB is removed from
181 * the schedule and freed. This function may be called when a disconnect is
182 * detected or when the HCD is being stopped.
183 *
184 * Must be called with interrupt disabled and spinlock held
185 */
186static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg)
187{
188 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive);
189 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active);
190 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive);
191 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready);
192 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned);
193 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued);
194}
195
196/**
197 * dwc2_hcd_start() - Starts the HCD when switching to Host mode
198 *
199 * @hsotg: Pointer to struct dwc2_hsotg
200 */
201void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
202{
203 u32 hprt0;
204
205 if (hsotg->op_state == OTG_STATE_B_HOST) {
206 /*
207 * Reset the port. During a HNP mode switch the reset
208 * needs to occur within 1ms and have a duration of at
209 * least 50ms.
210 */
211 hprt0 = dwc2_read_hprt0(hsotg);
212 hprt0 |= HPRT0_RST;
213 writel(hprt0, hsotg->regs + HPRT0);
214 }
215
216 queue_delayed_work(hsotg->wq_otg, &hsotg->start_work,
217 msecs_to_jiffies(50));
218}
219
220/* Must be called with interrupt disabled and spinlock held */
221static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
222{
223 int num_channels = hsotg->core_params->host_channels;
224 struct dwc2_host_chan *channel;
225 u32 hcchar;
226 int i;
227
228 if (hsotg->core_params->dma_enable <= 0) {
229 /* Flush out any channel requests in slave mode */
230 for (i = 0; i < num_channels; i++) {
231 channel = hsotg->hc_ptr_array[i];
232 if (!list_empty(&channel->hc_list_entry))
233 continue;
234 hcchar = readl(hsotg->regs + HCCHAR(i));
235 if (hcchar & HCCHAR_CHENA) {
236 hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR);
237 hcchar |= HCCHAR_CHDIS;
238 writel(hcchar, hsotg->regs + HCCHAR(i));
239 }
240 }
241 }
242
243 for (i = 0; i < num_channels; i++) {
244 channel = hsotg->hc_ptr_array[i];
245 if (!list_empty(&channel->hc_list_entry))
246 continue;
247 hcchar = readl(hsotg->regs + HCCHAR(i));
248 if (hcchar & HCCHAR_CHENA) {
249 /* Halt the channel */
250 hcchar |= HCCHAR_CHDIS;
251 writel(hcchar, hsotg->regs + HCCHAR(i));
252 }
253
254 dwc2_hc_cleanup(hsotg, channel);
255 list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list);
256 /*
257 * Added for Descriptor DMA to prevent channel double cleanup in
258 * release_channel_ddma(), which is called from ep_disable when
259 * device disconnects
260 */
261 channel->qh = NULL;
262 }
263}
264
265/**
266 * dwc2_hcd_disconnect() - Handles disconnect of the HCD
267 *
268 * @hsotg: Pointer to struct dwc2_hsotg
269 *
270 * Must be called with interrupt disabled and spinlock held
271 */
272void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg)
273{
274 u32 intr;
275
276 /* Set status flags for the hub driver */
277 hsotg->flags.b.port_connect_status_change = 1;
278 hsotg->flags.b.port_connect_status = 0;
279
280 /*
281 * Shutdown any transfers in process by clearing the Tx FIFO Empty
282 * interrupt mask and status bits and disabling subsequent host
283 * channel interrupts.
284 */
285 intr = readl(hsotg->regs + GINTMSK);
286 intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT);
287 writel(intr, hsotg->regs + GINTMSK);
288 intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT;
289 writel(intr, hsotg->regs + GINTSTS);
290
291 /*
292 * Turn off the vbus power only if the core has transitioned to device
293 * mode. If still in host mode, need to keep power on to detect a
294 * reconnection.
295 */
296 if (dwc2_is_device_mode(hsotg)) {
297 if (hsotg->op_state != OTG_STATE_A_SUSPEND) {
298 dev_dbg(hsotg->dev, "Disconnect: PortPower off\n");
299 writel(0, hsotg->regs + HPRT0);
300 }
301
302 dwc2_disable_host_interrupts(hsotg);
303 }
304
305 /* Respond with an error status to all URBs in the schedule */
306 dwc2_kill_all_urbs(hsotg);
307
308 if (dwc2_is_host_mode(hsotg))
309 /* Clean up any host channels that were in use */
310 dwc2_hcd_cleanup_channels(hsotg);
311
312 dwc2_host_disconnect(hsotg);
313}
314
315/**
316 * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup
317 *
318 * @hsotg: Pointer to struct dwc2_hsotg
319 */
320static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
321{
322 if (hsotg->lx_state == DWC2_L2)
323 hsotg->flags.b.port_suspend_change = 1;
324 else
325 hsotg->flags.b.port_l1_change = 1;
326}
327
328/**
329 * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner
330 *
331 * @hsotg: Pointer to struct dwc2_hsotg
332 *
333 * Must be called with interrupt disabled and spinlock held
334 */
335void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
336{
337 dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n");
338
339 /*
340 * The root hub should be disconnected before this function is called.
341 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
342 * and the QH lists (via ..._hcd_endpoint_disable).
343 */
344
345 /* Turn off all host-specific interrupts */
346 dwc2_disable_host_interrupts(hsotg);
347
348 /* Turn off the vbus power */
349 dev_dbg(hsotg->dev, "PortPower off\n");
350 writel(0, hsotg->regs + HPRT0);
351}
352
353static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
354 struct dwc2_hcd_urb *urb, void **ep_handle,
355 gfp_t mem_flags)
356{
357 struct dwc2_qtd *qtd;
358 unsigned long flags;
359 u32 intr_mask;
360 int retval;
361
362 if (!hsotg->flags.b.port_connect_status) {
363 /* No longer connected */
364 dev_err(hsotg->dev, "Not connected\n");
365 return -ENODEV;
366 }
367
368 qtd = kzalloc(sizeof(*qtd), mem_flags);
369 if (!qtd)
370 return -ENOMEM;
371
372 dwc2_hcd_qtd_init(qtd, urb);
373 retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle,
374 mem_flags);
375 if (retval < 0) {
376 dev_err(hsotg->dev,
377 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
378 retval);
379 kfree(qtd);
380 return retval;
381 }
382
383 intr_mask = readl(hsotg->regs + GINTMSK);
384 if (!(intr_mask & GINTSTS_SOF) && retval == 0) {
385 enum dwc2_transaction_type tr_type;
386
387 if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
388 !(qtd->urb->flags & URB_GIVEBACK_ASAP))
389 /*
390 * Do not schedule SG transactions until qtd has
391 * URB_GIVEBACK_ASAP set
392 */
393 return 0;
394
395 spin_lock_irqsave(&hsotg->lock, flags);
396 tr_type = dwc2_hcd_select_transactions(hsotg);
397 if (tr_type != DWC2_TRANSACTION_NONE)
398 dwc2_hcd_queue_transactions(hsotg, tr_type);
399 spin_unlock_irqrestore(&hsotg->lock, flags);
400 }
401
402 return retval;
403}
404
405/* Must be called with interrupt disabled and spinlock held */
406static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
407 struct dwc2_hcd_urb *urb)
408{
409 struct dwc2_qh *qh;
410 struct dwc2_qtd *urb_qtd;
411
412 urb_qtd = urb->qtd;
413 if (!urb_qtd) {
414 dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n");
415 return -EINVAL;
416 }
417
418 qh = urb_qtd->qh;
419 if (!qh) {
420 dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n");
421 return -EINVAL;
422 }
423
424 if (urb_qtd->in_process && qh->channel) {
425 dwc2_dump_channel_info(hsotg, qh->channel);
426
427 /* The QTD is in process (it has been assigned to a channel) */
428 if (hsotg->flags.b.port_connect_status)
429 /*
430 * If still connected (i.e. in host mode), halt the
431 * channel so it can be used for other transfers. If
432 * no longer connected, the host registers can't be
433 * written to halt the channel since the core is in
434 * device mode.
435 */
436 dwc2_hc_halt(hsotg, qh->channel,
437 DWC2_HC_XFER_URB_DEQUEUE);
438 }
439
440 /*
441 * Free the QTD and clean up the associated QH. Leave the QH in the
442 * schedule if it has any remaining QTDs.
443 */
444 if (hsotg->core_params->dma_desc_enable <= 0) {
445 u8 in_process = urb_qtd->in_process;
446
447 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
448 if (in_process) {
449 dwc2_hcd_qh_deactivate(hsotg, qh, 0);
450 qh->channel = NULL;
451 } else if (list_empty(&qh->qtd_list)) {
452 dwc2_hcd_qh_unlink(hsotg, qh);
453 }
454 } else {
455 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
456 }
457
458 return 0;
459}
460
461/* Must NOT be called with interrupt disabled or spinlock held */
462static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg,
463 struct usb_host_endpoint *ep, int retry)
464{
465 struct dwc2_qtd *qtd, *qtd_tmp;
466 struct dwc2_qh *qh;
467 unsigned long flags;
468 int rc;
469
470 spin_lock_irqsave(&hsotg->lock, flags);
471
472 qh = ep->hcpriv;
473 if (!qh) {
474 rc = -EINVAL;
475 goto err;
476 }
477
478 while (!list_empty(&qh->qtd_list) && retry--) {
479 if (retry == 0) {
480 dev_err(hsotg->dev,
481 "## timeout in dwc2_hcd_endpoint_disable() ##\n");
482 rc = -EBUSY;
483 goto err;
484 }
485
486 spin_unlock_irqrestore(&hsotg->lock, flags);
487 usleep_range(20000, 40000);
488 spin_lock_irqsave(&hsotg->lock, flags);
489 qh = ep->hcpriv;
490 if (!qh) {
491 rc = -EINVAL;
492 goto err;
493 }
494 }
495
496 dwc2_hcd_qh_unlink(hsotg, qh);
497
498 /* Free each QTD in the QH's QTD list */
499 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry)
500 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
501
502 ep->hcpriv = NULL;
503 spin_unlock_irqrestore(&hsotg->lock, flags);
504 dwc2_hcd_qh_free(hsotg, qh);
505
506 return 0;
507
508err:
509 ep->hcpriv = NULL;
510 spin_unlock_irqrestore(&hsotg->lock, flags);
511
512 return rc;
513}
514
515/* Must be called with interrupt disabled and spinlock held */
516static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg,
517 struct usb_host_endpoint *ep)
518{
519 struct dwc2_qh *qh = ep->hcpriv;
520
521 if (!qh)
522 return -EINVAL;
523
524 qh->data_toggle = DWC2_HC_PID_DATA0;
525
526 return 0;
527}
528
529/*
530 * Initializes dynamic portions of the DWC_otg HCD state
531 *
532 * Must be called with interrupt disabled and spinlock held
533 */
534static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
535{
536 struct dwc2_host_chan *chan, *chan_tmp;
537 int num_channels;
538 int i;
539
540 hsotg->flags.d32 = 0;
541
542 hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
543 hsotg->non_periodic_channels = 0;
544 hsotg->periodic_channels = 0;
545
546 /*
547 * Put all channels in the free channel list and clean up channel
548 * states
549 */
550 list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list,
551 hc_list_entry)
552 list_del_init(&chan->hc_list_entry);
553
554 num_channels = hsotg->core_params->host_channels;
555 for (i = 0; i < num_channels; i++) {
556 chan = hsotg->hc_ptr_array[i];
557 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
558 dwc2_hc_cleanup(hsotg, chan);
559 }
560
561 /* Initialize the DWC core for host mode operation */
562 dwc2_core_host_init(hsotg);
563}
564
565static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg,
566 struct dwc2_host_chan *chan,
567 struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
568{
569 int hub_addr, hub_port;
570
571 chan->do_split = 1;
572 chan->xact_pos = qtd->isoc_split_pos;
573 chan->complete_split = qtd->complete_split;
574 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
575 chan->hub_addr = (u8)hub_addr;
576 chan->hub_port = (u8)hub_port;
577}
578
579static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
580 struct dwc2_host_chan *chan,
581 struct dwc2_qtd *qtd, void *bufptr)
582{
583 struct dwc2_hcd_urb *urb = qtd->urb;
584 struct dwc2_hcd_iso_packet_desc *frame_desc;
585
586 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
587 case USB_ENDPOINT_XFER_CONTROL:
588 chan->ep_type = USB_ENDPOINT_XFER_CONTROL;
589
590 switch (qtd->control_phase) {
591 case DWC2_CONTROL_SETUP:
592 dev_vdbg(hsotg->dev, " Control setup transaction\n");
593 chan->do_ping = 0;
594 chan->ep_is_in = 0;
595 chan->data_pid_start = DWC2_HC_PID_SETUP;
596 if (hsotg->core_params->dma_enable > 0)
597 chan->xfer_dma = urb->setup_dma;
598 else
599 chan->xfer_buf = urb->setup_packet;
600 chan->xfer_len = 8;
601 bufptr = NULL;
602 break;
603
604 case DWC2_CONTROL_DATA:
605 dev_vdbg(hsotg->dev, " Control data transaction\n");
606 chan->data_pid_start = qtd->data_toggle;
607 break;
608
609 case DWC2_CONTROL_STATUS:
610 /*
611 * Direction is opposite of data direction or IN if no
612 * data
613 */
614 dev_vdbg(hsotg->dev, " Control status transaction\n");
615 if (urb->length == 0)
616 chan->ep_is_in = 1;
617 else
618 chan->ep_is_in =
619 dwc2_hcd_is_pipe_out(&urb->pipe_info);
620 if (chan->ep_is_in)
621 chan->do_ping = 0;
622 chan->data_pid_start = DWC2_HC_PID_DATA1;
623 chan->xfer_len = 0;
624 if (hsotg->core_params->dma_enable > 0)
625 chan->xfer_dma = hsotg->status_buf_dma;
626 else
627 chan->xfer_buf = hsotg->status_buf;
628 bufptr = NULL;
629 break;
630 }
631 break;
632
633 case USB_ENDPOINT_XFER_BULK:
634 chan->ep_type = USB_ENDPOINT_XFER_BULK;
635 break;
636
637 case USB_ENDPOINT_XFER_INT:
638 chan->ep_type = USB_ENDPOINT_XFER_INT;
639 break;
640
641 case USB_ENDPOINT_XFER_ISOC:
642 chan->ep_type = USB_ENDPOINT_XFER_ISOC;
643 if (hsotg->core_params->dma_desc_enable > 0)
644 break;
645
646 frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
647 frame_desc->status = 0;
648
649 if (hsotg->core_params->dma_enable > 0) {
650 chan->xfer_dma = urb->dma;
651 chan->xfer_dma += frame_desc->offset +
652 qtd->isoc_split_offset;
653 } else {
654 chan->xfer_buf = urb->buf;
655 chan->xfer_buf += frame_desc->offset +
656 qtd->isoc_split_offset;
657 }
658
659 chan->xfer_len = frame_desc->length - qtd->isoc_split_offset;
660
661 /* For non-dword aligned buffers */
662 if (hsotg->core_params->dma_enable > 0 &&
663 (chan->xfer_dma & 0x3))
664 bufptr = (u8 *)urb->buf + frame_desc->offset +
665 qtd->isoc_split_offset;
666 else
667 bufptr = NULL;
668
669 if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) {
670 if (chan->xfer_len <= 188)
671 chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL;
672 else
673 chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN;
674 }
675 break;
676 }
677
678 return bufptr;
679}
680
681static int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
682 struct dwc2_host_chan *chan, void *bufptr)
683{
684 u32 buf_size;
685
686 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC)
687 buf_size = hsotg->core_params->max_transfer_size;
688 else
689 buf_size = 4096;
690
691 if (!qh->dw_align_buf) {
692 qh->dw_align_buf = dma_alloc_coherent(hsotg->dev, buf_size,
693 &qh->dw_align_buf_dma,
694 GFP_ATOMIC);
695 if (!qh->dw_align_buf)
696 return -ENOMEM;
697 }
698
699 if (!chan->ep_is_in && chan->xfer_len) {
700 dma_sync_single_for_cpu(hsotg->dev, chan->xfer_dma, buf_size,
701 DMA_TO_DEVICE);
702 memcpy(qh->dw_align_buf, bufptr, chan->xfer_len);
703 dma_sync_single_for_device(hsotg->dev, chan->xfer_dma, buf_size,
704 DMA_TO_DEVICE);
705 }
706
707 chan->align_buf = qh->dw_align_buf_dma;
708 return 0;
709}
710
711/**
712 * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host
713 * channel and initializes the host channel to perform the transactions. The
714 * host channel is removed from the free list.
715 *
716 * @hsotg: The HCD state structure
717 * @qh: Transactions from the first QTD for this QH are selected and assigned
718 * to a free host channel
719 */
720static void dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg,
721 struct dwc2_qh *qh)
722{
723 struct dwc2_host_chan *chan;
724 struct dwc2_hcd_urb *urb;
725 struct dwc2_qtd *qtd;
726 void *bufptr = NULL;
727
728 dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh);
729
730 if (list_empty(&qh->qtd_list)) {
731 dev_dbg(hsotg->dev, "No QTDs in QH list\n");
732 return;
733 }
734
735 if (list_empty(&hsotg->free_hc_list)) {
736 dev_dbg(hsotg->dev, "No free channel to assign\n");
737 return;
738 }
739
740 chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan,
741 hc_list_entry);
742
743 /* Remove the host channel from the free list */
744 list_del_init(&chan->hc_list_entry);
745
746 qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
747 urb = qtd->urb;
748 qh->channel = chan;
749 qtd->in_process = 1;
750
751 /*
752 * Use usb_pipedevice to determine device address. This address is
753 * 0 before the SET_ADDRESS command and the correct address afterward.
754 */
755 chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
756 chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
757 chan->speed = qh->dev_speed;
758 chan->max_packet = dwc2_max_packet(qh->maxp);
759
760 chan->xfer_started = 0;
761 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
762 chan->error_state = (qtd->error_count > 0);
763 chan->halt_on_queue = 0;
764 chan->halt_pending = 0;
765 chan->requests = 0;
766
767 /*
768 * The following values may be modified in the transfer type section
769 * below. The xfer_len value may be reduced when the transfer is
770 * started to accommodate the max widths of the XferSize and PktCnt
771 * fields in the HCTSIZn register.
772 */
773
774 chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0);
775 if (chan->ep_is_in)
776 chan->do_ping = 0;
777 else
778 chan->do_ping = qh->ping_state;
779
780 chan->data_pid_start = qh->data_toggle;
781 chan->multi_count = 1;
782
783 if (hsotg->core_params->dma_enable > 0) {
784 chan->xfer_dma = urb->dma + urb->actual_length;
785
786 /* For non-dword aligned case */
787 if (hsotg->core_params->dma_desc_enable <= 0 &&
788 (chan->xfer_dma & 0x3))
789 bufptr = (u8 *)urb->buf + urb->actual_length;
790 } else {
791 chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
792 }
793
794 chan->xfer_len = urb->length - urb->actual_length;
795 chan->xfer_count = 0;
796
797 /* Set the split attributes if required */
798 if (qh->do_split)
799 dwc2_hc_init_split(hsotg, chan, qtd, urb);
800 else
801 chan->do_split = 0;
802
803 /* Set the transfer attributes */
804 bufptr = dwc2_hc_init_xfer(hsotg, chan, qtd, bufptr);
805
806 /* Non DWORD-aligned buffer case */
807 if (bufptr) {
808 dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
809 if (dwc2_hc_setup_align_buf(hsotg, qh, chan, bufptr)) {
810 dev_err(hsotg->dev,
811 "%s: Failed to allocate memory to handle non-dword aligned buffer\n",
812 __func__);
813 /* Add channel back to free list */
814 chan->align_buf = 0;
815 chan->multi_count = 0;
816 list_add_tail(&chan->hc_list_entry,
817 &hsotg->free_hc_list);
818 qtd->in_process = 0;
819 qh->channel = NULL;
820 return;
821 }
822 } else {
823 chan->align_buf = 0;
824 }
825
826 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
827 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
828 /*
829 * This value may be modified when the transfer is started
830 * to reflect the actual transfer length
831 */
832 chan->multi_count = dwc2_hb_mult(qh->maxp);
833
834 if (hsotg->core_params->dma_desc_enable > 0)
835 chan->desc_list_addr = qh->desc_list_dma;
836
837 dwc2_hc_init(hsotg, chan);
838 chan->qh = qh;
839}
840
841/**
842 * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer
843 * schedule and assigns them to available host channels. Called from the HCD
844 * interrupt handler functions.
845 *
846 * @hsotg: The HCD state structure
847 *
848 * Return: The types of new transactions that were assigned to host channels
849 */
850enum dwc2_transaction_type dwc2_hcd_select_transactions(
851 struct dwc2_hsotg *hsotg)
852{
853 enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE;
854 struct list_head *qh_ptr;
855 struct dwc2_qh *qh;
856 int num_channels;
857
858#ifdef DWC2_DEBUG_SOF
859 dev_vdbg(hsotg->dev, " Select Transactions\n");
860#endif
861
862 /* Process entries in the periodic ready list */
863 qh_ptr = hsotg->periodic_sched_ready.next;
864 while (qh_ptr != &hsotg->periodic_sched_ready) {
865 if (list_empty(&hsotg->free_hc_list))
866 break;
867 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
868 dwc2_assign_and_init_hc(hsotg, qh);
869
870 /*
871 * Move the QH from the periodic ready schedule to the
872 * periodic assigned schedule
873 */
874 qh_ptr = qh_ptr->next;
875 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned);
876 ret_val = DWC2_TRANSACTION_PERIODIC;
877 }
878
879 /*
880 * Process entries in the inactive portion of the non-periodic
881 * schedule. Some free host channels may not be used if they are
882 * reserved for periodic transfers.
883 */
884 num_channels = hsotg->core_params->host_channels;
885 qh_ptr = hsotg->non_periodic_sched_inactive.next;
886 while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
887 if (hsotg->non_periodic_channels >= num_channels -
888 hsotg->periodic_channels)
889 break;
890 if (list_empty(&hsotg->free_hc_list))
891 break;
892 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
893 dwc2_assign_and_init_hc(hsotg, qh);
894
895 /*
896 * Move the QH from the non-periodic inactive schedule to the
897 * non-periodic active schedule
898 */
899 qh_ptr = qh_ptr->next;
900 list_move(&qh->qh_list_entry,
901 &hsotg->non_periodic_sched_active);
902
903 if (ret_val == DWC2_TRANSACTION_NONE)
904 ret_val = DWC2_TRANSACTION_NON_PERIODIC;
905 else
906 ret_val = DWC2_TRANSACTION_ALL;
907
908 hsotg->non_periodic_channels++;
909 }
910
911 return ret_val;
912}
913
914/**
915 * dwc2_queue_transaction() - Attempts to queue a single transaction request for
916 * a host channel associated with either a periodic or non-periodic transfer
917 *
918 * @hsotg: The HCD state structure
919 * @chan: Host channel descriptor associated with either a periodic or
920 * non-periodic transfer
921 * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO
922 * for periodic transfers or the non-periodic Tx FIFO
923 * for non-periodic transfers
924 *
925 * Return: 1 if a request is queued and more requests may be needed to
926 * complete the transfer, 0 if no more requests are required for this
927 * transfer, -1 if there is insufficient space in the Tx FIFO
928 *
929 * This function assumes that there is space available in the appropriate
930 * request queue. For an OUT transfer or SETUP transaction in Slave mode,
931 * it checks whether space is available in the appropriate Tx FIFO.
932 *
933 * Must be called with interrupt disabled and spinlock held
934 */
935static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
936 struct dwc2_host_chan *chan,
937 u16 fifo_dwords_avail)
938{
939 int retval = 0;
940
941 if (hsotg->core_params->dma_enable > 0) {
942 if (hsotg->core_params->dma_desc_enable > 0) {
943 if (!chan->xfer_started ||
944 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
945 dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
946 chan->qh->ping_state = 0;
947 }
948 } else if (!chan->xfer_started) {
949 dwc2_hc_start_transfer(hsotg, chan);
950 chan->qh->ping_state = 0;
951 }
952 } else if (chan->halt_pending) {
953 /* Don't queue a request if the channel has been halted */
954 } else if (chan->halt_on_queue) {
955 dwc2_hc_halt(hsotg, chan, chan->halt_status);
956 } else if (chan->do_ping) {
957 if (!chan->xfer_started)
958 dwc2_hc_start_transfer(hsotg, chan);
959 } else if (!chan->ep_is_in ||
960 chan->data_pid_start == DWC2_HC_PID_SETUP) {
961 if ((fifo_dwords_avail * 4) >= chan->max_packet) {
962 if (!chan->xfer_started) {
963 dwc2_hc_start_transfer(hsotg, chan);
964 retval = 1;
965 } else {
966 retval = dwc2_hc_continue_transfer(hsotg, chan);
967 }
968 } else {
969 retval = -1;
970 }
971 } else {
972 if (!chan->xfer_started) {
973 dwc2_hc_start_transfer(hsotg, chan);
974 retval = 1;
975 } else {
976 retval = dwc2_hc_continue_transfer(hsotg, chan);
977 }
978 }
979
980 return retval;
981}
982
983/*
984 * Processes periodic channels for the next frame and queues transactions for
985 * these channels to the DWC_otg controller. After queueing transactions, the
986 * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
987 * to queue as Periodic Tx FIFO or request queue space becomes available.
988 * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
989 *
990 * Must be called with interrupt disabled and spinlock held
991 */
992static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
993{
994 struct list_head *qh_ptr;
995 struct dwc2_qh *qh;
996 u32 tx_status;
997 u32 fspcavail;
998 u32 gintmsk;
999 int status;
1000 int no_queue_space = 0;
1001 int no_fifo_space = 0;
1002 u32 qspcavail;
1003
1004 dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
1005
1006 tx_status = readl(hsotg->regs + HPTXSTS);
1007 qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
1008 TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
1009 fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
1010 TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
1011 dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n",
1012 qspcavail);
1013 dev_vdbg(hsotg->dev, " P Tx FIFO Space Avail (before queue): %d\n",
1014 fspcavail);
1015
1016 qh_ptr = hsotg->periodic_sched_assigned.next;
1017 while (qh_ptr != &hsotg->periodic_sched_assigned) {
1018 tx_status = readl(hsotg->regs + HPTXSTS);
1019 if ((tx_status & TXSTS_QSPCAVAIL_MASK) == 0) {
1020 no_queue_space = 1;
1021 break;
1022 }
1023
1024 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
1025 if (!qh->channel) {
1026 qh_ptr = qh_ptr->next;
1027 continue;
1028 }
1029
1030 /* Make sure EP's TT buffer is clean before queueing qtds */
1031 if (qh->tt_buffer_dirty) {
1032 qh_ptr = qh_ptr->next;
1033 continue;
1034 }
1035
1036 /*
1037 * Set a flag if we're queuing high-bandwidth in slave mode.
1038 * The flag prevents any halts to get into the request queue in
1039 * the middle of multiple high-bandwidth packets getting queued.
1040 */
1041 if (hsotg->core_params->dma_enable <= 0 &&
1042 qh->channel->multi_count > 1)
1043 hsotg->queuing_high_bandwidth = 1;
1044
1045 fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
1046 TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
1047 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
1048 if (status < 0) {
1049 no_fifo_space = 1;
1050 break;
1051 }
1052
1053 /*
1054 * In Slave mode, stay on the current transfer until there is
1055 * nothing more to do or the high-bandwidth request count is
1056 * reached. In DMA mode, only need to queue one request. The
1057 * controller automatically handles multiple packets for
1058 * high-bandwidth transfers.
1059 */
1060 if (hsotg->core_params->dma_enable > 0 || status == 0 ||
1061 qh->channel->requests == qh->channel->multi_count) {
1062 qh_ptr = qh_ptr->next;
1063 /*
1064 * Move the QH from the periodic assigned schedule to
1065 * the periodic queued schedule
1066 */
1067 list_move(&qh->qh_list_entry,
1068 &hsotg->periodic_sched_queued);
1069
1070 /* done queuing high bandwidth */
1071 hsotg->queuing_high_bandwidth = 0;
1072 }
1073 }
1074
1075 if (hsotg->core_params->dma_enable <= 0) {
1076 tx_status = readl(hsotg->regs + HPTXSTS);
1077 qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
1078 TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
1079 fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
1080 TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
1081 dev_vdbg(hsotg->dev,
1082 " P Tx Req Queue Space Avail (after queue): %d\n",
1083 qspcavail);
1084 dev_vdbg(hsotg->dev,
1085 " P Tx FIFO Space Avail (after queue): %d\n",
1086 fspcavail);
1087
1088 if (!list_empty(&hsotg->periodic_sched_assigned) ||
1089 no_queue_space || no_fifo_space) {
1090 /*
1091 * May need to queue more transactions as the request
1092 * queue or Tx FIFO empties. Enable the periodic Tx
1093 * FIFO empty interrupt. (Always use the half-empty
1094 * level to ensure that new requests are loaded as
1095 * soon as possible.)
1096 */
1097 gintmsk = readl(hsotg->regs + GINTMSK);
1098 gintmsk |= GINTSTS_PTXFEMP;
1099 writel(gintmsk, hsotg->regs + GINTMSK);
1100 } else {
1101 /*
1102 * Disable the Tx FIFO empty interrupt since there are
1103 * no more transactions that need to be queued right
1104 * now. This function is called from interrupt
1105 * handlers to queue more transactions as transfer
1106 * states change.
1107 */
1108 gintmsk = readl(hsotg->regs + GINTMSK);
1109 gintmsk &= ~GINTSTS_PTXFEMP;
1110 writel(gintmsk, hsotg->regs + GINTMSK);
1111 }
1112 }
1113}
1114
1115/*
1116 * Processes active non-periodic channels and queues transactions for these
1117 * channels to the DWC_otg controller. After queueing transactions, the NP Tx
1118 * FIFO Empty interrupt is enabled if there are more transactions to queue as
1119 * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
1120 * FIFO Empty interrupt is disabled.
1121 *
1122 * Must be called with interrupt disabled and spinlock held
1123 */
1124static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
1125{
1126 struct list_head *orig_qh_ptr;
1127 struct dwc2_qh *qh;
1128 u32 tx_status;
1129 u32 qspcavail;
1130 u32 fspcavail;
1131 u32 gintmsk;
1132 int status;
1133 int no_queue_space = 0;
1134 int no_fifo_space = 0;
1135 int more_to_do = 0;
1136
1137 dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n");
1138
1139 tx_status = readl(hsotg->regs + GNPTXSTS);
1140 qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
1141 TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
1142 fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
1143 TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
1144 dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n",
1145 qspcavail);
1146 dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n",
1147 fspcavail);
1148
1149 /*
1150 * Keep track of the starting point. Skip over the start-of-list
1151 * entry.
1152 */
1153 if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active)
1154 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
1155 orig_qh_ptr = hsotg->non_periodic_qh_ptr;
1156
1157 /*
1158 * Process once through the active list or until no more space is
1159 * available in the request queue or the Tx FIFO
1160 */
1161 do {
1162 tx_status = readl(hsotg->regs + GNPTXSTS);
1163 qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
1164 TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
1165 if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) {
1166 no_queue_space = 1;
1167 break;
1168 }
1169
1170 qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh,
1171 qh_list_entry);
1172 if (!qh->channel)
1173 goto next;
1174
1175 /* Make sure EP's TT buffer is clean before queueing qtds */
1176 if (qh->tt_buffer_dirty)
1177 goto next;
1178
1179 fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
1180 TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
1181 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
1182
1183 if (status > 0) {
1184 more_to_do = 1;
1185 } else if (status < 0) {
1186 no_fifo_space = 1;
1187 break;
1188 }
1189next:
1190 /* Advance to next QH, skipping start-of-list entry */
1191 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
1192 if (hsotg->non_periodic_qh_ptr ==
1193 &hsotg->non_periodic_sched_active)
1194 hsotg->non_periodic_qh_ptr =
1195 hsotg->non_periodic_qh_ptr->next;
1196 } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
1197
1198 if (hsotg->core_params->dma_enable <= 0) {
1199 tx_status = readl(hsotg->regs + GNPTXSTS);
1200 qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
1201 TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
1202 fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
1203 TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
1204 dev_vdbg(hsotg->dev,
1205 " NP Tx Req Queue Space Avail (after queue): %d\n",
1206 qspcavail);
1207 dev_vdbg(hsotg->dev,
1208 " NP Tx FIFO Space Avail (after queue): %d\n",
1209 fspcavail);
1210
1211 if (more_to_do || no_queue_space || no_fifo_space) {
1212 /*
1213 * May need to queue more transactions as the request
1214 * queue or Tx FIFO empties. Enable the non-periodic
1215 * Tx FIFO empty interrupt. (Always use the half-empty
1216 * level to ensure that new requests are loaded as
1217 * soon as possible.)
1218 */
1219 gintmsk = readl(hsotg->regs + GINTMSK);
1220 gintmsk |= GINTSTS_NPTXFEMP;
1221 writel(gintmsk, hsotg->regs + GINTMSK);
1222 } else {
1223 /*
1224 * Disable the Tx FIFO empty interrupt since there are
1225 * no more transactions that need to be queued right
1226 * now. This function is called from interrupt
1227 * handlers to queue more transactions as transfer
1228 * states change.
1229 */
1230 gintmsk = readl(hsotg->regs + GINTMSK);
1231 gintmsk &= ~GINTSTS_NPTXFEMP;
1232 writel(gintmsk, hsotg->regs + GINTMSK);
1233 }
1234 }
1235}
1236
1237/**
1238 * dwc2_hcd_queue_transactions() - Processes the currently active host channels
1239 * and queues transactions for these channels to the DWC_otg controller. Called
1240 * from the HCD interrupt handler functions.
1241 *
1242 * @hsotg: The HCD state structure
1243 * @tr_type: The type(s) of transactions to queue (non-periodic, periodic,
1244 * or both)
1245 *
1246 * Must be called with interrupt disabled and spinlock held
1247 */
1248void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
1249 enum dwc2_transaction_type tr_type)
1250{
1251#ifdef DWC2_DEBUG_SOF
1252 dev_vdbg(hsotg->dev, "Queue Transactions\n");
1253#endif
1254 /* Process host channels associated with periodic transfers */
1255 if ((tr_type == DWC2_TRANSACTION_PERIODIC ||
1256 tr_type == DWC2_TRANSACTION_ALL) &&
1257 !list_empty(&hsotg->periodic_sched_assigned))
1258 dwc2_process_periodic_channels(hsotg);
1259
1260 /* Process host channels associated with non-periodic transfers */
1261 if (tr_type == DWC2_TRANSACTION_NON_PERIODIC ||
1262 tr_type == DWC2_TRANSACTION_ALL) {
1263 if (!list_empty(&hsotg->non_periodic_sched_active)) {
1264 dwc2_process_non_periodic_channels(hsotg);
1265 } else {
1266 /*
1267 * Ensure NP Tx FIFO empty interrupt is disabled when
1268 * there are no non-periodic transfers to process
1269 */
1270 u32 gintmsk = readl(hsotg->regs + GINTMSK);
1271
1272 gintmsk &= ~GINTSTS_NPTXFEMP;
1273 writel(gintmsk, hsotg->regs + GINTMSK);
1274 }
1275 }
1276}
1277
1278static void dwc2_conn_id_status_change(struct work_struct *work)
1279{
1280 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
1281 wf_otg);
1282 u32 count = 0;
1283 u32 gotgctl;
1284
1285 dev_dbg(hsotg->dev, "%s()\n", __func__);
1286
1287 gotgctl = readl(hsotg->regs + GOTGCTL);
1288 dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl);
1289 dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n",
1290 !!(gotgctl & GOTGCTL_CONID_B));
1291
1292 /* B-Device connector (Device Mode) */
1293 if (gotgctl & GOTGCTL_CONID_B) {
1294 /* Wait for switch to device mode */
1295 dev_dbg(hsotg->dev, "connId B\n");
1296 while (!dwc2_is_device_mode(hsotg)) {
1297 dev_info(hsotg->dev,
1298 "Waiting for Peripheral Mode, Mode=%s\n",
1299 dwc2_is_host_mode(hsotg) ? "Host" :
1300 "Peripheral");
1301 usleep_range(20000, 40000);
1302 if (++count > 250)
1303 break;
1304 }
1305 if (count > 250)
1306 dev_err(hsotg->dev,
1307 "Connection id status change timed out");
1308 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
1309 dwc2_core_init(hsotg, false);
1310 dwc2_enable_global_interrupts(hsotg);
1311 } else {
1312 /* A-Device connector (Host Mode) */
1313 dev_dbg(hsotg->dev, "connId A\n");
1314 while (!dwc2_is_host_mode(hsotg)) {
1315 dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n",
1316 dwc2_is_host_mode(hsotg) ?
1317 "Host" : "Peripheral");
1318 usleep_range(20000, 40000);
1319 if (++count > 250)
1320 break;
1321 }
1322 if (count > 250)
1323 dev_err(hsotg->dev,
1324 "Connection id status change timed out");
1325 hsotg->op_state = OTG_STATE_A_HOST;
1326
1327 /* Initialize the Core for Host mode */
1328 dwc2_core_init(hsotg, false);
1329 dwc2_enable_global_interrupts(hsotg);
1330 dwc2_hcd_start(hsotg);
1331 }
1332}
1333
1334static void dwc2_wakeup_detected(unsigned long data)
1335{
1336 struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
1337 u32 hprt0;
1338
1339 dev_dbg(hsotg->dev, "%s()\n", __func__);
1340
1341 /*
1342 * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
1343 * so that OPT tests pass with all PHYs.)
1344 */
1345 hprt0 = dwc2_read_hprt0(hsotg);
1346 dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0);
1347 hprt0 &= ~HPRT0_RES;
1348 writel(hprt0, hsotg->regs + HPRT0);
1349 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
1350 readl(hsotg->regs + HPRT0));
1351
1352 dwc2_hcd_rem_wakeup(hsotg);
1353
1354 /* Change to L0 state */
1355 hsotg->lx_state = DWC2_L0;
1356}
1357
1358static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
1359{
1360 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
1361
1362 return hcd->self.b_hnp_enable;
1363}
1364
1365/* Must NOT be called with interrupt disabled or spinlock held */
1366static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
1367{
1368 unsigned long flags;
1369 u32 hprt0;
1370 u32 pcgctl;
1371 u32 gotgctl;
1372
1373 dev_dbg(hsotg->dev, "%s()\n", __func__);
1374
1375 spin_lock_irqsave(&hsotg->lock, flags);
1376
1377 if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) {
1378 gotgctl = readl(hsotg->regs + GOTGCTL);
1379 gotgctl |= GOTGCTL_HSTSETHNPEN;
1380 writel(gotgctl, hsotg->regs + GOTGCTL);
1381 hsotg->op_state = OTG_STATE_A_SUSPEND;
1382 }
1383
1384 hprt0 = dwc2_read_hprt0(hsotg);
1385 hprt0 |= HPRT0_SUSP;
1386 writel(hprt0, hsotg->regs + HPRT0);
1387
1388 /* Update lx_state */
1389 hsotg->lx_state = DWC2_L2;
1390
1391 /* Suspend the Phy Clock */
1392 pcgctl = readl(hsotg->regs + PCGCTL);
1393 pcgctl |= PCGCTL_STOPPCLK;
1394 writel(pcgctl, hsotg->regs + PCGCTL);
1395 udelay(10);
1396
1397 /* For HNP the bus must be suspended for at least 200ms */
1398 if (dwc2_host_is_b_hnp_enabled(hsotg)) {
1399 pcgctl = readl(hsotg->regs + PCGCTL);
1400 pcgctl &= ~PCGCTL_STOPPCLK;
1401 writel(pcgctl, hsotg->regs + PCGCTL);
1402
1403 spin_unlock_irqrestore(&hsotg->lock, flags);
1404
1405 usleep_range(200000, 250000);
1406 } else {
1407 spin_unlock_irqrestore(&hsotg->lock, flags);
1408 }
1409}
1410
1411/* Handles hub class-specific requests */
1412static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
1413 u16 wvalue, u16 windex, char *buf, u16 wlength)
1414{
1415 struct usb_hub_descriptor *hub_desc;
1416 int retval = 0;
1417 u32 hprt0;
1418 u32 port_status;
1419 u32 speed;
1420 u32 pcgctl;
1421
1422 switch (typereq) {
1423 case ClearHubFeature:
1424 dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue);
1425
1426 switch (wvalue) {
1427 case C_HUB_LOCAL_POWER:
1428 case C_HUB_OVER_CURRENT:
1429 /* Nothing required here */
1430 break;
1431
1432 default:
1433 retval = -EINVAL;
1434 dev_err(hsotg->dev,
1435 "ClearHubFeature request %1xh unknown\n",
1436 wvalue);
1437 }
1438 break;
1439
1440 case ClearPortFeature:
1441 if (wvalue != USB_PORT_FEAT_L1)
1442 if (!windex || windex > 1)
1443 goto error;
1444 switch (wvalue) {
1445 case USB_PORT_FEAT_ENABLE:
1446 dev_dbg(hsotg->dev,
1447 "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
1448 hprt0 = dwc2_read_hprt0(hsotg);
1449 hprt0 |= HPRT0_ENA;
1450 writel(hprt0, hsotg->regs + HPRT0);
1451 break;
1452
1453 case USB_PORT_FEAT_SUSPEND:
1454 dev_dbg(hsotg->dev,
1455 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
1456 writel(0, hsotg->regs + PCGCTL);
1457 usleep_range(20000, 40000);
1458
1459 hprt0 = dwc2_read_hprt0(hsotg);
1460 hprt0 |= HPRT0_RES;
1461 writel(hprt0, hsotg->regs + HPRT0);
1462 hprt0 &= ~HPRT0_SUSP;
1463 usleep_range(100000, 150000);
1464
1465 hprt0 &= ~HPRT0_RES;
1466 writel(hprt0, hsotg->regs + HPRT0);
1467 break;
1468
1469 case USB_PORT_FEAT_POWER:
1470 dev_dbg(hsotg->dev,
1471 "ClearPortFeature USB_PORT_FEAT_POWER\n");
1472 hprt0 = dwc2_read_hprt0(hsotg);
1473 hprt0 &= ~HPRT0_PWR;
1474 writel(hprt0, hsotg->regs + HPRT0);
1475 break;
1476
1477 case USB_PORT_FEAT_INDICATOR:
1478 dev_dbg(hsotg->dev,
1479 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
1480 /* Port indicator not supported */
1481 break;
1482
1483 case USB_PORT_FEAT_C_CONNECTION:
1484 /*
1485 * Clears driver's internal Connect Status Change flag
1486 */
1487 dev_dbg(hsotg->dev,
1488 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
1489 hsotg->flags.b.port_connect_status_change = 0;
1490 break;
1491
1492 case USB_PORT_FEAT_C_RESET:
1493 /* Clears driver's internal Port Reset Change flag */
1494 dev_dbg(hsotg->dev,
1495 "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
1496 hsotg->flags.b.port_reset_change = 0;
1497 break;
1498
1499 case USB_PORT_FEAT_C_ENABLE:
1500 /*
1501 * Clears the driver's internal Port Enable/Disable
1502 * Change flag
1503 */
1504 dev_dbg(hsotg->dev,
1505 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
1506 hsotg->flags.b.port_enable_change = 0;
1507 break;
1508
1509 case USB_PORT_FEAT_C_SUSPEND:
1510 /*
1511 * Clears the driver's internal Port Suspend Change
1512 * flag, which is set when resume signaling on the host
1513 * port is complete
1514 */
1515 dev_dbg(hsotg->dev,
1516 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
1517 hsotg->flags.b.port_suspend_change = 0;
1518 break;
1519
1520 case USB_PORT_FEAT_C_PORT_L1:
1521 dev_dbg(hsotg->dev,
1522 "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n");
1523 hsotg->flags.b.port_l1_change = 0;
1524 break;
1525
1526 case USB_PORT_FEAT_C_OVER_CURRENT:
1527 dev_dbg(hsotg->dev,
1528 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
1529 hsotg->flags.b.port_over_current_change = 0;
1530 break;
1531
1532 default:
1533 retval = -EINVAL;
1534 dev_err(hsotg->dev,
1535 "ClearPortFeature request %1xh unknown or unsupported\n",
1536 wvalue);
1537 }
1538 break;
1539
1540 case GetHubDescriptor:
1541 dev_dbg(hsotg->dev, "GetHubDescriptor\n");
1542 hub_desc = (struct usb_hub_descriptor *)buf;
1543 hub_desc->bDescLength = 9;
1544 hub_desc->bDescriptorType = 0x29;
1545 hub_desc->bNbrPorts = 1;
1546 hub_desc->wHubCharacteristics = cpu_to_le16(0x08);
1547 hub_desc->bPwrOn2PwrGood = 1;
1548 hub_desc->bHubContrCurrent = 0;
1549 hub_desc->u.hs.DeviceRemovable[0] = 0;
1550 hub_desc->u.hs.DeviceRemovable[1] = 0xff;
1551 break;
1552
1553 case GetHubStatus:
1554 dev_dbg(hsotg->dev, "GetHubStatus\n");
1555 memset(buf, 0, 4);
1556 break;
1557
1558 case GetPortStatus:
1559 dev_dbg(hsotg->dev,
1560 "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex,
1561 hsotg->flags.d32);
1562 if (!windex || windex > 1)
1563 goto error;
1564
1565 port_status = 0;
1566 if (hsotg->flags.b.port_connect_status_change)
1567 port_status |= USB_PORT_STAT_C_CONNECTION << 16;
1568 if (hsotg->flags.b.port_enable_change)
1569 port_status |= USB_PORT_STAT_C_ENABLE << 16;
1570 if (hsotg->flags.b.port_suspend_change)
1571 port_status |= USB_PORT_STAT_C_SUSPEND << 16;
1572 if (hsotg->flags.b.port_l1_change)
1573 port_status |= USB_PORT_STAT_C_L1 << 16;
1574 if (hsotg->flags.b.port_reset_change)
1575 port_status |= USB_PORT_STAT_C_RESET << 16;
1576 if (hsotg->flags.b.port_over_current_change) {
1577 dev_warn(hsotg->dev, "Overcurrent change detected\n");
1578 port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
1579 }
1580
1581 if (!hsotg->flags.b.port_connect_status) {
1582 /*
1583 * The port is disconnected, which means the core is
1584 * either in device mode or it soon will be. Just
1585 * return 0's for the remainder of the port status
1586 * since the port register can't be read if the core
1587 * is in device mode.
1588 */
1589 *(__le32 *)buf = cpu_to_le32(port_status);
1590 break;
1591 }
1592
1593 hprt0 = readl(hsotg->regs + HPRT0);
1594 dev_dbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0);
1595
1596 if (hprt0 & HPRT0_CONNSTS)
1597 port_status |= USB_PORT_STAT_CONNECTION;
1598 if (hprt0 & HPRT0_ENA)
1599 port_status |= USB_PORT_STAT_ENABLE;
1600 if (hprt0 & HPRT0_SUSP)
1601 port_status |= USB_PORT_STAT_SUSPEND;
1602 if (hprt0 & HPRT0_OVRCURRACT)
1603 port_status |= USB_PORT_STAT_OVERCURRENT;
1604 if (hprt0 & HPRT0_RST)
1605 port_status |= USB_PORT_STAT_RESET;
1606 if (hprt0 & HPRT0_PWR)
1607 port_status |= USB_PORT_STAT_POWER;
1608
1609 speed = hprt0 & HPRT0_SPD_MASK;
1610 if (speed == HPRT0_SPD_HIGH_SPEED)
1611 port_status |= USB_PORT_STAT_HIGH_SPEED;
1612 else if (speed == HPRT0_SPD_LOW_SPEED)
1613 port_status |= USB_PORT_STAT_LOW_SPEED;
1614
1615 if (hprt0 & HPRT0_TSTCTL_MASK)
1616 port_status |= USB_PORT_STAT_TEST;
1617 /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
1618
1619 dev_dbg(hsotg->dev, "port_status=%08x\n", port_status);
1620 *(__le32 *)buf = cpu_to_le32(port_status);
1621 break;
1622
1623 case SetHubFeature:
1624 dev_dbg(hsotg->dev, "SetHubFeature\n");
1625 /* No HUB features supported */
1626 break;
1627
1628 case SetPortFeature:
1629 dev_dbg(hsotg->dev, "SetPortFeature\n");
1630 if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
1631 goto error;
1632
1633 if (!hsotg->flags.b.port_connect_status) {
1634 /*
1635 * The port is disconnected, which means the core is
1636 * either in device mode or it soon will be. Just
1637 * return without doing anything since the port
1638 * register can't be written if the core is in device
1639 * mode.
1640 */
1641 break;
1642 }
1643
1644 switch (wvalue) {
1645 case USB_PORT_FEAT_SUSPEND:
1646 dev_dbg(hsotg->dev,
1647 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
1648 if (windex != hsotg->otg_port)
1649 goto error;
1650 dwc2_port_suspend(hsotg, windex);
1651 break;
1652
1653 case USB_PORT_FEAT_POWER:
1654 dev_dbg(hsotg->dev,
1655 "SetPortFeature - USB_PORT_FEAT_POWER\n");
1656 hprt0 = dwc2_read_hprt0(hsotg);
1657 hprt0 |= HPRT0_PWR;
1658 writel(hprt0, hsotg->regs + HPRT0);
1659 break;
1660
1661 case USB_PORT_FEAT_RESET:
1662 hprt0 = dwc2_read_hprt0(hsotg);
1663 dev_dbg(hsotg->dev,
1664 "SetPortFeature - USB_PORT_FEAT_RESET\n");
1665 pcgctl = readl(hsotg->regs + PCGCTL);
1666 pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK);
1667 writel(pcgctl, hsotg->regs + PCGCTL);
1668 /* ??? Original driver does this */
1669 writel(0, hsotg->regs + PCGCTL);
1670
1671 hprt0 = dwc2_read_hprt0(hsotg);
1672 /* Clear suspend bit if resetting from suspend state */
1673 hprt0 &= ~HPRT0_SUSP;
1674
1675 /*
1676 * When B-Host the Port reset bit is set in the Start
1677 * HCD Callback function, so that the reset is started
1678 * within 1ms of the HNP success interrupt
1679 */
1680 if (!dwc2_hcd_is_b_host(hsotg)) {
1681 hprt0 |= HPRT0_PWR | HPRT0_RST;
1682 dev_dbg(hsotg->dev,
1683 "In host mode, hprt0=%08x\n", hprt0);
1684 writel(hprt0, hsotg->regs + HPRT0);
1685 }
1686
1687 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
1688 usleep_range(50000, 70000);
1689 hprt0 &= ~HPRT0_RST;
1690 writel(hprt0, hsotg->regs + HPRT0);
1691 hsotg->lx_state = DWC2_L0; /* Now back to On state */
1692 break;
1693
1694 case USB_PORT_FEAT_INDICATOR:
1695 dev_dbg(hsotg->dev,
1696 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
1697 /* Not supported */
1698 break;
1699
1700 default:
1701 retval = -EINVAL;
1702 dev_err(hsotg->dev,
1703 "SetPortFeature %1xh unknown or unsupported\n",
1704 wvalue);
1705 break;
1706 }
1707 break;
1708
1709 default:
1710error:
1711 retval = -EINVAL;
1712 dev_dbg(hsotg->dev,
1713 "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n",
1714 typereq, windex, wvalue);
1715 break;
1716 }
1717
1718 return retval;
1719}
1720
1721static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port)
1722{
1723 int retval;
1724
1725 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1726
1727 if (port != 1)
1728 return -EINVAL;
1729
1730 retval = (hsotg->flags.b.port_connect_status_change ||
1731 hsotg->flags.b.port_reset_change ||
1732 hsotg->flags.b.port_enable_change ||
1733 hsotg->flags.b.port_suspend_change ||
1734 hsotg->flags.b.port_over_current_change);
1735
1736 if (retval) {
1737 dev_dbg(hsotg->dev,
1738 "DWC OTG HCD HUB STATUS DATA: Root port status changed\n");
1739 dev_dbg(hsotg->dev, " port_connect_status_change: %d\n",
1740 hsotg->flags.b.port_connect_status_change);
1741 dev_dbg(hsotg->dev, " port_reset_change: %d\n",
1742 hsotg->flags.b.port_reset_change);
1743 dev_dbg(hsotg->dev, " port_enable_change: %d\n",
1744 hsotg->flags.b.port_enable_change);
1745 dev_dbg(hsotg->dev, " port_suspend_change: %d\n",
1746 hsotg->flags.b.port_suspend_change);
1747 dev_dbg(hsotg->dev, " port_over_current_change: %d\n",
1748 hsotg->flags.b.port_over_current_change);
1749 }
1750
1751 return retval;
1752}
1753
1754int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
1755{
1756 u32 hfnum = readl(hsotg->regs + HFNUM);
1757
1758#ifdef DWC2_DEBUG_SOF
1759 dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
1760 hfnum >> HFNUM_FRNUM_SHIFT &
1761 HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT);
1762#endif
1763 return hfnum >> HFNUM_FRNUM_SHIFT &
1764 HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
1765}
1766
1767int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
1768{
1769 return (hsotg->op_state == OTG_STATE_B_HOST);
1770}
1771
1772static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
1773 int iso_desc_count,
1774 gfp_t mem_flags)
1775{
1776 struct dwc2_hcd_urb *urb;
1777 u32 size = sizeof(*urb) + iso_desc_count *
1778 sizeof(struct dwc2_hcd_iso_packet_desc);
1779
1780 urb = kzalloc(size, mem_flags);
1781 if (urb)
1782 urb->packet_count = iso_desc_count;
1783 return urb;
1784}
1785
1786static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
1787 struct dwc2_hcd_urb *urb, u8 dev_addr,
1788 u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps)
1789{
1790 dev_vdbg(hsotg->dev,
1791 "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n",
1792 dev_addr, ep_num, ep_dir, ep_type, mps);
1793 urb->pipe_info.dev_addr = dev_addr;
1794 urb->pipe_info.ep_num = ep_num;
1795 urb->pipe_info.pipe_type = ep_type;
1796 urb->pipe_info.pipe_dir = ep_dir;
1797 urb->pipe_info.mps = mps;
1798}
1799
1800/*
1801 * NOTE: This function will be removed once the peripheral controller code
1802 * is integrated and the driver is stable
1803 */
1804void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
1805{
1806#ifdef DEBUG
1807 struct dwc2_host_chan *chan;
1808 struct dwc2_hcd_urb *urb;
1809 struct dwc2_qtd *qtd;
1810 int num_channels;
1811 u32 np_tx_status;
1812 u32 p_tx_status;
1813 int i;
1814
1815 num_channels = hsotg->core_params->host_channels;
1816 dev_dbg(hsotg->dev, "\n");
1817 dev_dbg(hsotg->dev,
1818 "************************************************************\n");
1819 dev_dbg(hsotg->dev, "HCD State:\n");
1820 dev_dbg(hsotg->dev, " Num channels: %d\n", num_channels);
1821
1822 for (i = 0; i < num_channels; i++) {
1823 chan = hsotg->hc_ptr_array[i];
1824 dev_dbg(hsotg->dev, " Channel %d:\n", i);
1825 dev_dbg(hsotg->dev,
1826 " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
1827 chan->dev_addr, chan->ep_num, chan->ep_is_in);
1828 dev_dbg(hsotg->dev, " speed: %d\n", chan->speed);
1829 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
1830 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
1831 dev_dbg(hsotg->dev, " data_pid_start: %d\n",
1832 chan->data_pid_start);
1833 dev_dbg(hsotg->dev, " multi_count: %d\n", chan->multi_count);
1834 dev_dbg(hsotg->dev, " xfer_started: %d\n",
1835 chan->xfer_started);
1836 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
1837 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
1838 (unsigned long)chan->xfer_dma);
1839 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
1840 dev_dbg(hsotg->dev, " xfer_count: %d\n", chan->xfer_count);
1841 dev_dbg(hsotg->dev, " halt_on_queue: %d\n",
1842 chan->halt_on_queue);
1843 dev_dbg(hsotg->dev, " halt_pending: %d\n",
1844 chan->halt_pending);
1845 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
1846 dev_dbg(hsotg->dev, " do_split: %d\n", chan->do_split);
1847 dev_dbg(hsotg->dev, " complete_split: %d\n",
1848 chan->complete_split);
1849 dev_dbg(hsotg->dev, " hub_addr: %d\n", chan->hub_addr);
1850 dev_dbg(hsotg->dev, " hub_port: %d\n", chan->hub_port);
1851 dev_dbg(hsotg->dev, " xact_pos: %d\n", chan->xact_pos);
1852 dev_dbg(hsotg->dev, " requests: %d\n", chan->requests);
1853 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
1854
1855 if (chan->xfer_started) {
1856 u32 hfnum, hcchar, hctsiz, hcint, hcintmsk;
1857
1858 hfnum = readl(hsotg->regs + HFNUM);
1859 hcchar = readl(hsotg->regs + HCCHAR(i));
1860 hctsiz = readl(hsotg->regs + HCTSIZ(i));
1861 hcint = readl(hsotg->regs + HCINT(i));
1862 hcintmsk = readl(hsotg->regs + HCINTMSK(i));
1863 dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum);
1864 dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar);
1865 dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz);
1866 dev_dbg(hsotg->dev, " hcint: 0x%08x\n", hcint);
1867 dev_dbg(hsotg->dev, " hcintmsk: 0x%08x\n", hcintmsk);
1868 }
1869
1870 if (!(chan->xfer_started && chan->qh))
1871 continue;
1872
1873 list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) {
1874 if (!qtd->in_process)
1875 break;
1876 urb = qtd->urb;
1877 dev_dbg(hsotg->dev, " URB Info:\n");
1878 dev_dbg(hsotg->dev, " qtd: %p, urb: %p\n",
1879 qtd, urb);
1880 if (urb) {
1881 dev_dbg(hsotg->dev,
1882 " Dev: %d, EP: %d %s\n",
1883 dwc2_hcd_get_dev_addr(&urb->pipe_info),
1884 dwc2_hcd_get_ep_num(&urb->pipe_info),
1885 dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
1886 "IN" : "OUT");
1887 dev_dbg(hsotg->dev,
1888 " Max packet size: %d\n",
1889 dwc2_hcd_get_mps(&urb->pipe_info));
1890 dev_dbg(hsotg->dev,
1891 " transfer_buffer: %p\n",
1892 urb->buf);
157dfaac
PZ
1893 dev_dbg(hsotg->dev,
1894 " transfer_dma: %08lx\n",
1895 (unsigned long)urb->dma);
7359d482
PZ
1896 dev_dbg(hsotg->dev,
1897 " transfer_buffer_length: %d\n",
1898 urb->length);
1899 dev_dbg(hsotg->dev, " actual_length: %d\n",
1900 urb->actual_length);
1901 }
1902 }
1903 }
1904
1905 dev_dbg(hsotg->dev, " non_periodic_channels: %d\n",
1906 hsotg->non_periodic_channels);
1907 dev_dbg(hsotg->dev, " periodic_channels: %d\n",
1908 hsotg->periodic_channels);
1909 dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs);
1910 np_tx_status = readl(hsotg->regs + GNPTXSTS);
1911 dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n",
1912 np_tx_status >> TXSTS_QSPCAVAIL_SHIFT &
1913 TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT);
1914 dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n",
1915 np_tx_status >> TXSTS_FSPCAVAIL_SHIFT &
1916 TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT);
1917 p_tx_status = readl(hsotg->regs + HPTXSTS);
1918 dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n",
1919 p_tx_status >> TXSTS_QSPCAVAIL_SHIFT &
1920 TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT);
1921 dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n",
1922 p_tx_status >> TXSTS_FSPCAVAIL_SHIFT &
1923 TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT);
1924 dwc2_hcd_dump_frrem(hsotg);
1925 dwc2_dump_global_registers(hsotg);
1926 dwc2_dump_host_registers(hsotg);
1927 dev_dbg(hsotg->dev,
1928 "************************************************************\n");
1929 dev_dbg(hsotg->dev, "\n");
1930#endif
1931}
1932
1933/*
1934 * NOTE: This function will be removed once the peripheral controller code
1935 * is integrated and the driver is stable
1936 */
1937void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg)
1938{
1939#ifdef DWC2_DUMP_FRREM
1940 dev_dbg(hsotg->dev, "Frame remaining at SOF:\n");
1941 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
1942 hsotg->frrem_samples, hsotg->frrem_accum,
1943 hsotg->frrem_samples > 0 ?
1944 hsotg->frrem_accum / hsotg->frrem_samples : 0);
1945 dev_dbg(hsotg->dev, "\n");
1946 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 7):\n");
1947 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
1948 hsotg->hfnum_7_samples,
1949 hsotg->hfnum_7_frrem_accum,
1950 hsotg->hfnum_7_samples > 0 ?
1951 hsotg->hfnum_7_frrem_accum / hsotg->hfnum_7_samples : 0);
1952 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 0):\n");
1953 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
1954 hsotg->hfnum_0_samples,
1955 hsotg->hfnum_0_frrem_accum,
1956 hsotg->hfnum_0_samples > 0 ?
1957 hsotg->hfnum_0_frrem_accum / hsotg->hfnum_0_samples : 0);
1958 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 1-6):\n");
1959 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
1960 hsotg->hfnum_other_samples,
1961 hsotg->hfnum_other_frrem_accum,
1962 hsotg->hfnum_other_samples > 0 ?
1963 hsotg->hfnum_other_frrem_accum / hsotg->hfnum_other_samples :
1964 0);
1965 dev_dbg(hsotg->dev, "\n");
1966 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 7):\n");
1967 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
1968 hsotg->hfnum_7_samples_a, hsotg->hfnum_7_frrem_accum_a,
1969 hsotg->hfnum_7_samples_a > 0 ?
1970 hsotg->hfnum_7_frrem_accum_a / hsotg->hfnum_7_samples_a : 0);
1971 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 0):\n");
1972 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
1973 hsotg->hfnum_0_samples_a, hsotg->hfnum_0_frrem_accum_a,
1974 hsotg->hfnum_0_samples_a > 0 ?
1975 hsotg->hfnum_0_frrem_accum_a / hsotg->hfnum_0_samples_a : 0);
1976 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 1-6):\n");
1977 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
1978 hsotg->hfnum_other_samples_a, hsotg->hfnum_other_frrem_accum_a,
1979 hsotg->hfnum_other_samples_a > 0 ?
1980 hsotg->hfnum_other_frrem_accum_a / hsotg->hfnum_other_samples_a
1981 : 0);
1982 dev_dbg(hsotg->dev, "\n");
1983 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 7):\n");
1984 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
1985 hsotg->hfnum_7_samples_b, hsotg->hfnum_7_frrem_accum_b,
1986 hsotg->hfnum_7_samples_b > 0 ?
1987 hsotg->hfnum_7_frrem_accum_b / hsotg->hfnum_7_samples_b : 0);
1988 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 0):\n");
1989 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
1990 hsotg->hfnum_0_samples_b, hsotg->hfnum_0_frrem_accum_b,
1991 (hsotg->hfnum_0_samples_b > 0) ?
1992 hsotg->hfnum_0_frrem_accum_b / hsotg->hfnum_0_samples_b : 0);
1993 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 1-6):\n");
1994 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
1995 hsotg->hfnum_other_samples_b, hsotg->hfnum_other_frrem_accum_b,
1996 (hsotg->hfnum_other_samples_b > 0) ?
1997 hsotg->hfnum_other_frrem_accum_b / hsotg->hfnum_other_samples_b
1998 : 0);
1999#endif
2000}
2001
2002struct wrapper_priv_data {
2003 struct dwc2_hsotg *hsotg;
2004};
2005
2006/* Gets the dwc2_hsotg from a usb_hcd */
2007static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
2008{
2009 struct wrapper_priv_data *p;
2010
2011 p = (struct wrapper_priv_data *) &hcd->hcd_priv;
2012 return p->hsotg;
2013}
2014
2015static int _dwc2_hcd_start(struct usb_hcd *hcd);
2016
2017void dwc2_host_start(struct dwc2_hsotg *hsotg)
2018{
2019 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
2020
2021 hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
2022 _dwc2_hcd_start(hcd);
2023}
2024
2025void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
2026{
2027 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
2028
2029 hcd->self.is_b_host = 0;
2030}
2031
2032void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
2033 int *hub_port)
2034{
2035 struct urb *urb = context;
2036
2037 if (urb->dev->tt)
2038 *hub_addr = urb->dev->tt->hub->devnum;
2039 else
2040 *hub_addr = 0;
2041 *hub_port = urb->dev->ttport;
2042}
2043
2044int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
2045{
2046 struct urb *urb = context;
2047
2048 return urb->dev->speed;
2049}
2050
2051static void dwc2_allocate_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
2052 struct urb *urb)
2053{
2054 struct usb_bus *bus = hcd_to_bus(hcd);
2055
2056 if (urb->interval)
2057 bus->bandwidth_allocated += bw / urb->interval;
2058 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2059 bus->bandwidth_isoc_reqs++;
2060 else
2061 bus->bandwidth_int_reqs++;
2062}
2063
2064static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
2065 struct urb *urb)
2066{
2067 struct usb_bus *bus = hcd_to_bus(hcd);
2068
2069 if (urb->interval)
2070 bus->bandwidth_allocated -= bw / urb->interval;
2071 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2072 bus->bandwidth_isoc_reqs--;
2073 else
2074 bus->bandwidth_int_reqs--;
2075}
2076
2077/*
2078 * Sets the final status of an URB and returns it to the upper layer. Any
2079 * required cleanup of the URB is performed.
2080 *
2081 * Must be called with interrupt disabled and spinlock held
2082 */
2083void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
2084 struct dwc2_hcd_urb *dwc2_urb, int status)
2085{
2086 struct urb *urb = context;
2087 int i;
2088
2089 if (!urb) {
2090 dev_dbg(hsotg->dev, "## %s: context is NULL ##\n", __func__);
2091 return;
2092 }
2093
2094 if (!dwc2_urb) {
2095 dev_dbg(hsotg->dev, "## %s: dwc2_urb is NULL ##\n", __func__);
2096 return;
2097 }
2098
2099 urb->actual_length = dwc2_hcd_urb_get_actual_length(dwc2_urb);
2100
2101 dev_vdbg(hsotg->dev,
2102 "%s: urb %p device %d ep %d-%s status %d actual %d\n",
2103 __func__, urb, usb_pipedevice(urb->pipe),
2104 usb_pipeendpoint(urb->pipe),
2105 usb_pipein(urb->pipe) ? "IN" : "OUT", status,
2106 urb->actual_length);
2107
2108 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2109 for (i = 0; i < urb->number_of_packets; i++)
2110 dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n",
2111 i, urb->iso_frame_desc[i].status);
2112 }
2113
2114 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2115 urb->error_count = dwc2_hcd_urb_get_error_count(dwc2_urb);
2116 for (i = 0; i < urb->number_of_packets; ++i) {
2117 urb->iso_frame_desc[i].actual_length =
2118 dwc2_hcd_urb_get_iso_desc_actual_length(
2119 dwc2_urb, i);
2120 urb->iso_frame_desc[i].status =
2121 dwc2_hcd_urb_get_iso_desc_status(dwc2_urb, i);
2122 }
2123 }
2124
2125 urb->status = status;
2126 urb->hcpriv = NULL;
2127 if (!status) {
2128 if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
2129 urb->actual_length < urb->transfer_buffer_length)
2130 urb->status = -EREMOTEIO;
2131 }
2132
2133 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
2134 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
2135 struct usb_host_endpoint *ep = urb->ep;
2136
2137 if (ep)
2138 dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg),
2139 dwc2_hcd_get_ep_bandwidth(hsotg, ep),
2140 urb);
2141 }
2142
2143 kfree(dwc2_urb);
2144
2145 spin_unlock(&hsotg->lock);
2146 usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status);
2147 spin_lock(&hsotg->lock);
2148}
2149
2150/*
2151 * Work queue function for starting the HCD when A-Cable is connected
2152 */
2153static void dwc2_hcd_start_func(struct work_struct *work)
2154{
2155 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
2156 start_work.work);
2157
2158 dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg);
2159 dwc2_host_start(hsotg);
2160}
2161
2162/*
2163 * Reset work queue function
2164 */
2165static void dwc2_hcd_reset_func(struct work_struct *work)
2166{
2167 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
2168 reset_work.work);
2169 u32 hprt0;
2170
2171 dev_dbg(hsotg->dev, "USB RESET function called\n");
2172 hprt0 = dwc2_read_hprt0(hsotg);
2173 hprt0 &= ~HPRT0_RST;
2174 writel(hprt0, hsotg->regs + HPRT0);
2175 hsotg->flags.b.port_reset_change = 1;
2176}
2177
2178/*
2179 * =========================================================================
2180 * Linux HC Driver Functions
2181 * =========================================================================
2182 */
2183
2184/*
2185 * Initializes the DWC_otg controller and its root hub and prepares it for host
2186 * mode operation. Activates the root port. Returns 0 on success and a negative
2187 * error code on failure.
2188 */
2189static int _dwc2_hcd_start(struct usb_hcd *hcd)
2190{
2191 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2192 struct usb_bus *bus = hcd_to_bus(hcd);
2193 unsigned long flags;
2194
2195 dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
2196
2197 spin_lock_irqsave(&hsotg->lock, flags);
2198
2199 hcd->state = HC_STATE_RUNNING;
2200
2201 if (dwc2_is_device_mode(hsotg)) {
2202 spin_unlock_irqrestore(&hsotg->lock, flags);
2203 return 0; /* why 0 ?? */
2204 }
2205
2206 dwc2_hcd_reinit(hsotg);
2207
2208 /* Initialize and connect root hub if one is not already attached */
2209 if (bus->root_hub) {
2210 dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
2211 /* Inform the HUB driver to resume */
2212 usb_hcd_resume_root_hub(hcd);
2213 }
2214
2215 spin_unlock_irqrestore(&hsotg->lock, flags);
2216 return 0;
2217}
2218
2219/*
2220 * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
2221 * stopped.
2222 */
2223static void _dwc2_hcd_stop(struct usb_hcd *hcd)
2224{
2225 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2226 unsigned long flags;
2227
2228 spin_lock_irqsave(&hsotg->lock, flags);
2229 dwc2_hcd_stop(hsotg);
2230 spin_unlock_irqrestore(&hsotg->lock, flags);
2231
2232 usleep_range(1000, 3000);
2233}
2234
2235/* Returns the current frame number */
2236static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd)
2237{
2238 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2239
2240 return dwc2_hcd_get_frame_number(hsotg);
2241}
2242
2243static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
2244 char *fn_name)
2245{
2246#ifdef VERBOSE_DEBUG
2247 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2248 char *pipetype;
2249 char *speed;
2250
2251 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
2252 dev_vdbg(hsotg->dev, " Device address: %d\n",
2253 usb_pipedevice(urb->pipe));
2254 dev_vdbg(hsotg->dev, " Endpoint: %d, %s\n",
2255 usb_pipeendpoint(urb->pipe),
2256 usb_pipein(urb->pipe) ? "IN" : "OUT");
2257
2258 switch (usb_pipetype(urb->pipe)) {
2259 case PIPE_CONTROL:
2260 pipetype = "CONTROL";
2261 break;
2262 case PIPE_BULK:
2263 pipetype = "BULK";
2264 break;
2265 case PIPE_INTERRUPT:
2266 pipetype = "INTERRUPT";
2267 break;
2268 case PIPE_ISOCHRONOUS:
2269 pipetype = "ISOCHRONOUS";
2270 break;
2271 default:
2272 pipetype = "UNKNOWN";
2273 break;
2274 }
2275
2276 dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype,
2277 usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ?
2278 "IN" : "OUT");
2279
2280 switch (urb->dev->speed) {
2281 case USB_SPEED_HIGH:
2282 speed = "HIGH";
2283 break;
2284 case USB_SPEED_FULL:
2285 speed = "FULL";
2286 break;
2287 case USB_SPEED_LOW:
2288 speed = "LOW";
2289 break;
2290 default:
2291 speed = "UNKNOWN";
2292 break;
2293 }
2294
2295 dev_vdbg(hsotg->dev, " Speed: %s\n", speed);
2296 dev_vdbg(hsotg->dev, " Max packet size: %d\n",
2297 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
2298 dev_vdbg(hsotg->dev, " Data buffer length: %d\n",
2299 urb->transfer_buffer_length);
157dfaac
PZ
2300 dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
2301 urb->transfer_buffer, (unsigned long)urb->transfer_dma);
2302 dev_vdbg(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
2303 urb->setup_packet, (unsigned long)urb->setup_dma);
7359d482
PZ
2304 dev_vdbg(hsotg->dev, " Interval: %d\n", urb->interval);
2305
2306 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2307 int i;
2308
2309 for (i = 0; i < urb->number_of_packets; i++) {
2310 dev_vdbg(hsotg->dev, " ISO Desc %d:\n", i);
2311 dev_vdbg(hsotg->dev, " offset: %d, length %d\n",
2312 urb->iso_frame_desc[i].offset,
2313 urb->iso_frame_desc[i].length);
2314 }
2315 }
2316#endif
2317}
2318
2319/*
2320 * Starts processing a USB transfer request specified by a USB Request Block
2321 * (URB). mem_flags indicates the type of memory allocation to use while
2322 * processing this URB.
2323 */
2324static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
2325 gfp_t mem_flags)
2326{
2327 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2328 struct usb_host_endpoint *ep = urb->ep;
2329 struct dwc2_hcd_urb *dwc2_urb;
2330 int i;
2331 int alloc_bandwidth = 0;
2332 int retval = 0;
2333 u8 ep_type = 0;
2334 u32 tflags = 0;
2335 void *buf;
2336 unsigned long flags;
2337
2338 dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
2339 dwc2_dump_urb_info(hcd, urb, "urb_enqueue");
2340
2341 if (ep == NULL)
2342 return -EINVAL;
2343
2344 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
2345 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
2346 spin_lock_irqsave(&hsotg->lock, flags);
2347 if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep))
2348 alloc_bandwidth = 1;
2349 spin_unlock_irqrestore(&hsotg->lock, flags);
2350 }
2351
2352 switch (usb_pipetype(urb->pipe)) {
2353 case PIPE_CONTROL:
2354 ep_type = USB_ENDPOINT_XFER_CONTROL;
2355 break;
2356 case PIPE_ISOCHRONOUS:
2357 ep_type = USB_ENDPOINT_XFER_ISOC;
2358 break;
2359 case PIPE_BULK:
2360 ep_type = USB_ENDPOINT_XFER_BULK;
2361 break;
2362 case PIPE_INTERRUPT:
2363 ep_type = USB_ENDPOINT_XFER_INT;
2364 break;
2365 default:
2366 dev_warn(hsotg->dev, "Wrong ep type\n");
2367 }
2368
2369 dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
2370 mem_flags);
2371 if (!dwc2_urb)
2372 return -ENOMEM;
2373
2374 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
2375 usb_pipeendpoint(urb->pipe), ep_type,
2376 usb_pipein(urb->pipe),
2377 usb_maxpacket(urb->dev, urb->pipe,
2378 !(usb_pipein(urb->pipe))));
2379
2380 buf = urb->transfer_buffer;
2381 if (hcd->self.uses_dma) {
2382 /*
2383 * Calculate virtual address from physical address, because
2384 * some class driver may not fill transfer_buffer.
2385 * In Buffer DMA mode virtual address is used, when handling
2386 * non-DWORD aligned buffers.
2387 */
2388 buf = bus_to_virt(urb->transfer_dma);
2389 }
2390
2391 if (!(urb->transfer_flags & URB_NO_INTERRUPT))
2392 tflags |= URB_GIVEBACK_ASAP;
2393 if (urb->transfer_flags & URB_ZERO_PACKET)
2394 tflags |= URB_SEND_ZERO_PACKET;
2395
2396 dwc2_urb->priv = urb;
2397 dwc2_urb->buf = buf;
2398 dwc2_urb->dma = urb->transfer_dma;
2399 dwc2_urb->length = urb->transfer_buffer_length;
2400 dwc2_urb->setup_packet = urb->setup_packet;
2401 dwc2_urb->setup_dma = urb->setup_dma;
2402 dwc2_urb->flags = tflags;
2403 dwc2_urb->interval = urb->interval;
2404 dwc2_urb->status = -EINPROGRESS;
2405
2406 for (i = 0; i < urb->number_of_packets; ++i)
2407 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i,
2408 urb->iso_frame_desc[i].offset,
2409 urb->iso_frame_desc[i].length);
2410
2411 urb->hcpriv = dwc2_urb;
2412 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv,
2413 mem_flags);
2414 if (retval) {
2415 urb->hcpriv = NULL;
2416 kfree(dwc2_urb);
2417 } else {
2418 if (alloc_bandwidth) {
2419 spin_lock_irqsave(&hsotg->lock, flags);
2420 dwc2_allocate_bus_bandwidth(hcd,
2421 dwc2_hcd_get_ep_bandwidth(hsotg, ep),
2422 urb);
2423 spin_unlock_irqrestore(&hsotg->lock, flags);
2424 }
2425 }
2426
2427 return retval;
2428}
2429
2430/*
2431 * Aborts/cancels a USB transfer request. Always returns 0 to indicate success.
2432 */
2433static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
2434 int status)
2435{
2436 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2437 int rc = 0;
2438 unsigned long flags;
2439
2440 dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n");
2441 dwc2_dump_urb_info(hcd, urb, "urb_dequeue");
2442
2443 spin_lock_irqsave(&hsotg->lock, flags);
2444
2445 if (!urb->hcpriv) {
2446 dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n");
2447 goto out;
2448 }
2449
2450 rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv);
2451
2452 kfree(urb->hcpriv);
2453 urb->hcpriv = NULL;
2454
2455 /* Higher layer software sets URB status */
2456 spin_unlock(&hsotg->lock);
2457 usb_hcd_giveback_urb(hcd, urb, status);
2458 spin_lock(&hsotg->lock);
2459
2460 dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n");
2461 dev_dbg(hsotg->dev, " urb->status = %d\n", urb->status);
2462out:
2463 spin_unlock_irqrestore(&hsotg->lock, flags);
2464
2465 return rc;
2466}
2467
2468/*
2469 * Frees resources in the DWC_otg controller related to a given endpoint. Also
2470 * clears state in the HCD related to the endpoint. Any URBs for the endpoint
2471 * must already be dequeued.
2472 */
2473static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd,
2474 struct usb_host_endpoint *ep)
2475{
2476 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2477
2478 dev_dbg(hsotg->dev,
2479 "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n",
2480 ep->desc.bEndpointAddress, ep->hcpriv);
2481 dwc2_hcd_endpoint_disable(hsotg, ep, 250);
2482}
2483
2484/*
2485 * Resets endpoint specific parameter values, in current version used to reset
2486 * the data toggle (as a WA). This function can be called from usb_clear_halt
2487 * routine.
2488 */
2489static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
2490 struct usb_host_endpoint *ep)
2491{
2492 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2493 int is_control = usb_endpoint_xfer_control(&ep->desc);
2494 int is_out = usb_endpoint_dir_out(&ep->desc);
2495 int epnum = usb_endpoint_num(&ep->desc);
2496 struct usb_device *udev;
2497 unsigned long flags;
2498
2499 dev_dbg(hsotg->dev,
2500 "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
2501 ep->desc.bEndpointAddress);
2502
2503 udev = to_usb_device(hsotg->dev);
2504
2505 spin_lock_irqsave(&hsotg->lock, flags);
2506
2507 usb_settoggle(udev, epnum, is_out, 0);
2508 if (is_control)
2509 usb_settoggle(udev, epnum, !is_out, 0);
2510 dwc2_hcd_endpoint_reset(hsotg, ep);
2511
2512 spin_unlock_irqrestore(&hsotg->lock, flags);
2513}
2514
2515/*
2516 * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
2517 * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
2518 * interrupt.
2519 *
2520 * This function is called by the USB core when an interrupt occurs
2521 */
2522static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd)
2523{
2524 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2525 int retval = dwc2_hcd_intr(hsotg);
2526
2527 return IRQ_RETVAL(retval);
2528}
2529
2530/*
2531 * Creates Status Change bitmap for the root hub and root port. The bitmap is
2532 * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
2533 * is the status change indicator for the single root port. Returns 1 if either
2534 * change indicator is 1, otherwise returns 0.
2535 */
2536static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
2537{
2538 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2539
2540 buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1;
2541 return buf[0] != 0;
2542}
2543
2544/* Handles hub class-specific requests */
2545static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue,
2546 u16 windex, char *buf, u16 wlength)
2547{
2548 int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq,
2549 wvalue, windex, buf, wlength);
2550 return retval;
2551}
2552
2553/* Handles hub TT buffer clear completions */
2554static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd,
2555 struct usb_host_endpoint *ep)
2556{
2557 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2558 struct dwc2_qh *qh;
2559 unsigned long flags;
2560
2561 qh = ep->hcpriv;
2562 if (!qh)
2563 return;
2564
2565 spin_lock_irqsave(&hsotg->lock, flags);
2566 qh->tt_buffer_dirty = 0;
2567
2568 if (hsotg->flags.b.port_connect_status)
2569 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL);
2570
2571 spin_unlock_irqrestore(&hsotg->lock, flags);
2572}
2573
2574static struct hc_driver dwc2_hc_driver = {
2575 .description = "dwc2_hsotg",
2576 .product_desc = "DWC OTG Controller",
2577 .hcd_priv_size = sizeof(struct wrapper_priv_data),
2578
2579 .irq = _dwc2_hcd_irq,
2580 .flags = HCD_MEMORY | HCD_USB2,
2581
2582 .start = _dwc2_hcd_start,
2583 .stop = _dwc2_hcd_stop,
2584 .urb_enqueue = _dwc2_hcd_urb_enqueue,
2585 .urb_dequeue = _dwc2_hcd_urb_dequeue,
2586 .endpoint_disable = _dwc2_hcd_endpoint_disable,
2587 .endpoint_reset = _dwc2_hcd_endpoint_reset,
2588 .get_frame_number = _dwc2_hcd_get_frame_number,
2589
2590 .hub_status_data = _dwc2_hcd_hub_status_data,
2591 .hub_control = _dwc2_hcd_hub_control,
2592 .clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete,
2593};
2594
2595/*
2596 * Frees secondary storage associated with the dwc2_hsotg structure contained
2597 * in the struct usb_hcd field
2598 */
2599static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
2600{
2601 u32 ahbcfg;
2602 u32 dctl;
2603 int i;
2604
2605 dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n");
2606
2607 /* Free memory for QH/QTD lists */
2608 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive);
2609 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active);
2610 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive);
2611 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready);
2612 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned);
2613 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued);
2614
2615 /* Free memory for the host channels */
2616 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
2617 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
2618
2619 if (chan != NULL) {
2620 dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n",
2621 i, chan);
2622 hsotg->hc_ptr_array[i] = NULL;
2623 kfree(chan);
2624 }
2625 }
2626
2627 if (hsotg->core_params->dma_enable > 0) {
2628 if (hsotg->status_buf) {
2629 dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
2630 hsotg->status_buf,
2631 hsotg->status_buf_dma);
2632 hsotg->status_buf = NULL;
2633 }
2634 } else {
2635 kfree(hsotg->status_buf);
2636 hsotg->status_buf = NULL;
2637 }
2638
2639 ahbcfg = readl(hsotg->regs + GAHBCFG);
2640
2641 /* Disable all interrupts */
2642 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2643 writel(ahbcfg, hsotg->regs + GAHBCFG);
2644 writel(0, hsotg->regs + GINTMSK);
2645
2646 if (hsotg->snpsid >= DWC2_CORE_REV_3_00a) {
2647 dctl = readl(hsotg->regs + DCTL);
2648 dctl |= DCTL_SFTDISCON;
2649 writel(dctl, hsotg->regs + DCTL);
2650 }
2651
2652 if (hsotg->wq_otg) {
2653 if (!cancel_work_sync(&hsotg->wf_otg))
2654 flush_workqueue(hsotg->wq_otg);
2655 destroy_workqueue(hsotg->wq_otg);
2656 }
2657
2658 kfree(hsotg->core_params);
2659 hsotg->core_params = NULL;
2660 del_timer(&hsotg->wkp_timer);
2661}
2662
2663static void dwc2_hcd_release(struct dwc2_hsotg *hsotg)
2664{
2665 /* Turn off all host-specific interrupts */
2666 dwc2_disable_host_interrupts(hsotg);
2667
2668 dwc2_hcd_free(hsotg);
2669}
2670
2671static void dwc2_set_uninitialized(int *p, int size)
2672{
2673 int i;
2674
2675 for (i = 0; i < size; i++)
2676 p[i] = -1;
2677}
2678
2679/*
2680 * Initializes the HCD. This function allocates memory for and initializes the
2681 * static parts of the usb_hcd and dwc2_hsotg structures. It also registers the
2682 * USB bus with the core and calls the hc_driver->start() function. It returns
2683 * a negative error on failure.
2684 */
2685int dwc2_hcd_init(struct device *dev, struct dwc2_hsotg *hsotg, int irq,
2686 struct dwc2_core_params *params)
2687{
2688 struct usb_hcd *hcd;
2689 struct dwc2_host_chan *channel;
2690 u32 snpsid, gusbcfg, hcfg;
2691 int i, num_channels;
2692 int retval = -ENOMEM;
2693
2694 dev_dbg(dev, "DWC OTG HCD INIT\n");
2695
2696 /*
2697 * Attempt to ensure this device is really a DWC_otg Controller.
2698 * Read and verify the GSNPSID register contents. The value should be
2699 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
2700 * as in "OTG version 2.xx" or "OTG version 3.xx".
2701 */
2702 snpsid = readl(hsotg->regs + GSNPSID);
2703 if ((snpsid & 0xfffff000) != 0x4f542000 &&
2704 (snpsid & 0xfffff000) != 0x4f543000) {
2705 dev_err(dev, "Bad value for GSNPSID: 0x%08x\n", snpsid);
2706 retval = -ENODEV;
2707 goto error1;
2708 }
2709
2710 hcd = usb_create_hcd(&dwc2_hc_driver, dev, dev_name(dev));
2711 if (!hcd)
2712 goto error1;
2713
2714 hcd->has_tt = 1;
2715
2716 spin_lock_init(&hsotg->lock);
2717 ((struct wrapper_priv_data *) &hcd->hcd_priv)->hsotg = hsotg;
2718 hsotg->priv = hcd;
2719 hsotg->dev = dev;
2720
2721 /*
2722 * Store the contents of the hardware configuration registers here for
2723 * easy access later
2724 */
2725 hsotg->hwcfg1 = readl(hsotg->regs + GHWCFG1);
2726 hsotg->hwcfg2 = readl(hsotg->regs + GHWCFG2);
2727 hsotg->hwcfg3 = readl(hsotg->regs + GHWCFG3);
2728 hsotg->hwcfg4 = readl(hsotg->regs + GHWCFG4);
2729
2730 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hsotg->hwcfg1);
2731 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hsotg->hwcfg2);
2732 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hsotg->hwcfg3);
2733 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hsotg->hwcfg4);
2734
2735 /* Force host mode to get HPTXFSIZ exact power on value */
2736 gusbcfg = readl(hsotg->regs + GUSBCFG);
2737 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
2738 writel(gusbcfg, hsotg->regs + GUSBCFG);
2739 usleep_range(100000, 150000);
2740
2741 hsotg->hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
2742 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hsotg->hptxfsiz);
2743 gusbcfg = readl(hsotg->regs + GUSBCFG);
2744 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
2745 writel(gusbcfg, hsotg->regs + GUSBCFG);
2746 usleep_range(100000, 150000);
2747
2748 hcfg = readl(hsotg->regs + HCFG);
2749 dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
2750 dev_dbg(hsotg->dev, "op_mode=%0x\n",
2751 hsotg->hwcfg2 >> GHWCFG2_OP_MODE_SHIFT &
2752 GHWCFG2_OP_MODE_MASK >> GHWCFG2_OP_MODE_SHIFT);
2753 dev_dbg(hsotg->dev, "arch=%0x\n",
2754 hsotg->hwcfg2 >> GHWCFG2_ARCHITECTURE_SHIFT &
2755 GHWCFG2_ARCHITECTURE_MASK >> GHWCFG2_ARCHITECTURE_SHIFT);
2756 dev_dbg(hsotg->dev, "num_dev_ep=%d\n",
2757 hsotg->hwcfg2 >> GHWCFG2_NUM_DEV_EP_SHIFT &
2758 GHWCFG2_NUM_DEV_EP_MASK >> GHWCFG2_NUM_DEV_EP_SHIFT);
2759 dev_dbg(hsotg->dev, "max_host_chan=%d\n",
2760 hsotg->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT &
2761 GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT);
2762 dev_dbg(hsotg->dev, "nonperio_tx_q_depth=0x%0x\n",
2763 hsotg->hwcfg2 >> GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT &
2764 GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK >>
2765 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT);
2766 dev_dbg(hsotg->dev, "host_perio_tx_q_depth=0x%0x\n",
2767 hsotg->hwcfg2 >> GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT &
2768 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK >>
2769 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT);
2770 dev_dbg(hsotg->dev, "dev_token_q_depth=0x%0x\n",
2771 hsotg->hwcfg2 >> GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT &
2772 GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >>
2773 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT);
2774
2775#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
2776 hsotg->frame_num_array = kzalloc(sizeof(*hsotg->frame_num_array) *
2777 FRAME_NUM_ARRAY_SIZE, GFP_KERNEL);
2778 if (!hsotg->frame_num_array)
2779 goto error2;
2780 hsotg->last_frame_num_array = kzalloc(
2781 sizeof(*hsotg->last_frame_num_array) *
2782 FRAME_NUM_ARRAY_SIZE, GFP_KERNEL);
2783 if (!hsotg->last_frame_num_array)
2784 goto error2;
2785 hsotg->last_frame_num = HFNUM_MAX_FRNUM;
2786#endif
2787
2788 hsotg->core_params = kzalloc(sizeof(*hsotg->core_params), GFP_KERNEL);
2789 if (!hsotg->core_params)
2790 goto error2;
2791
2792 dwc2_set_uninitialized((int *)hsotg->core_params,
2793 sizeof(*hsotg->core_params) / sizeof(int));
2794
2795 /* Validate parameter values */
2796 dwc2_set_parameters(hsotg, params);
2797
2798 /* Initialize the DWC_otg core, and select the Phy type */
2799 retval = dwc2_core_init(hsotg, true);
2800 if (retval)
2801 goto error2;
2802
2803 /*
2804 * Disable the global interrupt until all the interrupt handlers are
2805 * installed
2806 */
2807 dwc2_disable_global_interrupts(hsotg);
2808
2809 /* Create new workqueue and init work */
2810 hsotg->wq_otg = create_singlethread_workqueue("dwc_otg");
2811 if (!hsotg->wq_otg) {
2812 dev_err(hsotg->dev, "Failed to create workqueue\n");
2813 goto error2;
2814 }
2815 INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
2816
2817 hsotg->snpsid = readl(hsotg->regs + GSNPSID);
2818 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x\n",
2819 hsotg->snpsid >> 12 & 0xf, hsotg->snpsid >> 8 & 0xf,
2820 hsotg->snpsid >> 4 & 0xf, hsotg->snpsid & 0xf);
2821
2822 setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected,
2823 (unsigned long)hsotg);
2824
2825 /* Initialize the non-periodic schedule */
2826 INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
2827 INIT_LIST_HEAD(&hsotg->non_periodic_sched_active);
2828
2829 /* Initialize the periodic schedule */
2830 INIT_LIST_HEAD(&hsotg->periodic_sched_inactive);
2831 INIT_LIST_HEAD(&hsotg->periodic_sched_ready);
2832 INIT_LIST_HEAD(&hsotg->periodic_sched_assigned);
2833 INIT_LIST_HEAD(&hsotg->periodic_sched_queued);
2834
2835 /*
2836 * Create a host channel descriptor for each host channel implemented
2837 * in the controller. Initialize the channel descriptor array.
2838 */
2839 INIT_LIST_HEAD(&hsotg->free_hc_list);
2840 num_channels = hsotg->core_params->host_channels;
2841 memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
2842
2843 for (i = 0; i < num_channels; i++) {
2844 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2845 if (channel == NULL)
2846 goto error3;
2847 channel->hc_num = i;
2848 hsotg->hc_ptr_array[i] = channel;
2849 }
2850
2851 /* Initialize hsotg start work */
2852 INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func);
2853
2854 /* Initialize port reset work */
2855 INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func);
2856
2857 /*
2858 * Allocate space for storing data on status transactions. Normally no
2859 * data is sent, but this space acts as a bit bucket. This must be
2860 * done after usb_add_hcd since that function allocates the DMA buffer
2861 * pool.
2862 */
2863 if (hsotg->core_params->dma_enable > 0)
2864 hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
2865 DWC2_HCD_STATUS_BUF_SIZE,
2866 &hsotg->status_buf_dma, GFP_KERNEL);
2867 else
2868 hsotg->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE,
2869 GFP_KERNEL);
2870
2871 if (!hsotg->status_buf)
2872 goto error3;
2873
2874 hsotg->otg_port = 1;
2875 hsotg->frame_list = NULL;
2876 hsotg->frame_list_dma = 0;
2877 hsotg->periodic_qh_count = 0;
2878
2879 /* Initiate lx_state to L3 disconnected state */
2880 hsotg->lx_state = DWC2_L3;
2881
2882 hcd->self.otg_port = hsotg->otg_port;
2883
2884 /* Don't support SG list at this point */
2885 hcd->self.sg_tablesize = 0;
2886
2887 /*
2888 * Finish generic HCD initialization and start the HCD. This function
2889 * allocates the DMA buffer pool, registers the USB bus, requests the
2890 * IRQ line, and calls hcd_start method.
2891 */
2892 retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
2893 if (retval < 0)
2894 goto error3;
2895
2896 dwc2_dump_global_registers(hsotg);
2897 dwc2_dump_host_registers(hsotg);
2898 dwc2_hcd_dump_state(hsotg);
2899
2900 dwc2_enable_global_interrupts(hsotg);
2901
2902 return 0;
2903
2904error3:
2905 dwc2_hcd_release(hsotg);
2906error2:
2907 kfree(hsotg->core_params);
2908
2909#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
2910 kfree(hsotg->last_frame_num_array);
2911 kfree(hsotg->frame_num_array);
2912#endif
2913
2914 usb_put_hcd(hcd);
2915error1:
2916 dev_err(dev, "%s() FAILED, returning %d\n", __func__, retval);
2917 return retval;
2918}
2919EXPORT_SYMBOL_GPL(dwc2_hcd_init);
2920
2921/*
2922 * Removes the HCD.
2923 * Frees memory and resources associated with the HCD and deregisters the bus.
2924 */
2925void dwc2_hcd_remove(struct device *dev, struct dwc2_hsotg *hsotg)
2926{
2927 struct usb_hcd *hcd;
2928
2929 dev_dbg(dev, "DWC OTG HCD REMOVE\n");
2930
2931 hcd = dwc2_hsotg_to_hcd(hsotg);
2932 dev_dbg(dev, "hsotg->hcd = %p\n", hcd);
2933
2934 if (!hcd) {
2935 dev_dbg(dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n",
2936 __func__);
2937 return;
2938 }
2939
2940 usb_remove_hcd(hcd);
2941 hsotg->priv = NULL;
2942 dwc2_hcd_release(hsotg);
7359d482
PZ
2943
2944#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
2945 kfree(hsotg->last_frame_num_array);
2946 kfree(hsotg->frame_num_array);
2947#endif
2948
2949 usb_put_hcd(hcd);
2950}
2951EXPORT_SYMBOL_GPL(dwc2_hcd_remove);