2 * core.c - DesignWare HS OTG Controller common routines
4 * Copyright (C) 2004-2013 Synopsys, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/moduleparam.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/delay.h>
50 #include <linux/slab.h>
51 #include <linux/usb.h>
53 #include <linux/usb/hcd.h>
54 #include <linux/usb/ch11.h>
59 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
61 * dwc2_backup_host_registers() - Backup controller host registers.
62 * When suspending usb bus, registers needs to be backuped
63 * if controller power is disabled once suspended.
65 * @hsotg: Programming view of the DWC_otg controller
67 static int dwc2_backup_host_registers(struct dwc2_hsotg
*hsotg
)
69 struct dwc2_hregs_backup
*hr
;
72 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
74 /* Backup Host regs */
75 hr
= &hsotg
->hr_backup
;
76 hr
->hcfg
= dwc2_readl(hsotg
->regs
+ HCFG
);
77 hr
->haintmsk
= dwc2_readl(hsotg
->regs
+ HAINTMSK
);
78 for (i
= 0; i
< hsotg
->core_params
->host_channels
; ++i
)
79 hr
->hcintmsk
[i
] = dwc2_readl(hsotg
->regs
+ HCINTMSK(i
));
81 hr
->hprt0
= dwc2_read_hprt0(hsotg
);
82 hr
->hfir
= dwc2_readl(hsotg
->regs
+ HFIR
);
89 * dwc2_restore_host_registers() - Restore controller host registers.
90 * When resuming usb bus, device registers needs to be restored
91 * if controller power were disabled.
93 * @hsotg: Programming view of the DWC_otg controller
95 static int dwc2_restore_host_registers(struct dwc2_hsotg
*hsotg
)
97 struct dwc2_hregs_backup
*hr
;
100 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
102 /* Restore host regs */
103 hr
= &hsotg
->hr_backup
;
105 dev_err(hsotg
->dev
, "%s: no host registers to restore\n",
111 dwc2_writel(hr
->hcfg
, hsotg
->regs
+ HCFG
);
112 dwc2_writel(hr
->haintmsk
, hsotg
->regs
+ HAINTMSK
);
114 for (i
= 0; i
< hsotg
->core_params
->host_channels
; ++i
)
115 dwc2_writel(hr
->hcintmsk
[i
], hsotg
->regs
+ HCINTMSK(i
));
117 dwc2_writel(hr
->hprt0
, hsotg
->regs
+ HPRT0
);
118 dwc2_writel(hr
->hfir
, hsotg
->regs
+ HFIR
);
119 hsotg
->frame_number
= 0;
124 static inline int dwc2_backup_host_registers(struct dwc2_hsotg
*hsotg
)
127 static inline int dwc2_restore_host_registers(struct dwc2_hsotg
*hsotg
)
131 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
132 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
134 * dwc2_backup_device_registers() - Backup controller device registers.
135 * When suspending usb bus, registers needs to be backuped
136 * if controller power is disabled once suspended.
138 * @hsotg: Programming view of the DWC_otg controller
140 static int dwc2_backup_device_registers(struct dwc2_hsotg
*hsotg
)
142 struct dwc2_dregs_backup
*dr
;
145 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
147 /* Backup dev regs */
148 dr
= &hsotg
->dr_backup
;
150 dr
->dcfg
= dwc2_readl(hsotg
->regs
+ DCFG
);
151 dr
->dctl
= dwc2_readl(hsotg
->regs
+ DCTL
);
152 dr
->daintmsk
= dwc2_readl(hsotg
->regs
+ DAINTMSK
);
153 dr
->diepmsk
= dwc2_readl(hsotg
->regs
+ DIEPMSK
);
154 dr
->doepmsk
= dwc2_readl(hsotg
->regs
+ DOEPMSK
);
156 for (i
= 0; i
< hsotg
->num_of_eps
; i
++) {
158 dr
->diepctl
[i
] = dwc2_readl(hsotg
->regs
+ DIEPCTL(i
));
160 /* Ensure DATA PID is correctly configured */
161 if (dr
->diepctl
[i
] & DXEPCTL_DPID
)
162 dr
->diepctl
[i
] |= DXEPCTL_SETD1PID
;
164 dr
->diepctl
[i
] |= DXEPCTL_SETD0PID
;
166 dr
->dieptsiz
[i
] = dwc2_readl(hsotg
->regs
+ DIEPTSIZ(i
));
167 dr
->diepdma
[i
] = dwc2_readl(hsotg
->regs
+ DIEPDMA(i
));
170 dr
->doepctl
[i
] = dwc2_readl(hsotg
->regs
+ DOEPCTL(i
));
172 /* Ensure DATA PID is correctly configured */
173 if (dr
->doepctl
[i
] & DXEPCTL_DPID
)
174 dr
->doepctl
[i
] |= DXEPCTL_SETD1PID
;
176 dr
->doepctl
[i
] |= DXEPCTL_SETD0PID
;
178 dr
->doeptsiz
[i
] = dwc2_readl(hsotg
->regs
+ DOEPTSIZ(i
));
179 dr
->doepdma
[i
] = dwc2_readl(hsotg
->regs
+ DOEPDMA(i
));
186 * dwc2_restore_device_registers() - Restore controller device registers.
187 * When resuming usb bus, device registers needs to be restored
188 * if controller power were disabled.
190 * @hsotg: Programming view of the DWC_otg controller
192 static int dwc2_restore_device_registers(struct dwc2_hsotg
*hsotg
)
194 struct dwc2_dregs_backup
*dr
;
198 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
200 /* Restore dev regs */
201 dr
= &hsotg
->dr_backup
;
203 dev_err(hsotg
->dev
, "%s: no device registers to restore\n",
209 dwc2_writel(dr
->dcfg
, hsotg
->regs
+ DCFG
);
210 dwc2_writel(dr
->dctl
, hsotg
->regs
+ DCTL
);
211 dwc2_writel(dr
->daintmsk
, hsotg
->regs
+ DAINTMSK
);
212 dwc2_writel(dr
->diepmsk
, hsotg
->regs
+ DIEPMSK
);
213 dwc2_writel(dr
->doepmsk
, hsotg
->regs
+ DOEPMSK
);
215 for (i
= 0; i
< hsotg
->num_of_eps
; i
++) {
217 dwc2_writel(dr
->diepctl
[i
], hsotg
->regs
+ DIEPCTL(i
));
218 dwc2_writel(dr
->dieptsiz
[i
], hsotg
->regs
+ DIEPTSIZ(i
));
219 dwc2_writel(dr
->diepdma
[i
], hsotg
->regs
+ DIEPDMA(i
));
221 /* Restore OUT EPs */
222 dwc2_writel(dr
->doepctl
[i
], hsotg
->regs
+ DOEPCTL(i
));
223 dwc2_writel(dr
->doeptsiz
[i
], hsotg
->regs
+ DOEPTSIZ(i
));
224 dwc2_writel(dr
->doepdma
[i
], hsotg
->regs
+ DOEPDMA(i
));
227 /* Set the Power-On Programming done bit */
228 dctl
= dwc2_readl(hsotg
->regs
+ DCTL
);
229 dctl
|= DCTL_PWRONPRGDONE
;
230 dwc2_writel(dctl
, hsotg
->regs
+ DCTL
);
235 static inline int dwc2_backup_device_registers(struct dwc2_hsotg
*hsotg
)
238 static inline int dwc2_restore_device_registers(struct dwc2_hsotg
*hsotg
)
243 * dwc2_backup_global_registers() - Backup global controller registers.
244 * When suspending usb bus, registers needs to be backuped
245 * if controller power is disabled once suspended.
247 * @hsotg: Programming view of the DWC_otg controller
249 static int dwc2_backup_global_registers(struct dwc2_hsotg
*hsotg
)
251 struct dwc2_gregs_backup
*gr
;
254 /* Backup global regs */
255 gr
= &hsotg
->gr_backup
;
257 gr
->gotgctl
= dwc2_readl(hsotg
->regs
+ GOTGCTL
);
258 gr
->gintmsk
= dwc2_readl(hsotg
->regs
+ GINTMSK
);
259 gr
->gahbcfg
= dwc2_readl(hsotg
->regs
+ GAHBCFG
);
260 gr
->gusbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
261 gr
->grxfsiz
= dwc2_readl(hsotg
->regs
+ GRXFSIZ
);
262 gr
->gnptxfsiz
= dwc2_readl(hsotg
->regs
+ GNPTXFSIZ
);
263 gr
->hptxfsiz
= dwc2_readl(hsotg
->regs
+ HPTXFSIZ
);
264 gr
->gdfifocfg
= dwc2_readl(hsotg
->regs
+ GDFIFOCFG
);
265 for (i
= 0; i
< MAX_EPS_CHANNELS
; i
++)
266 gr
->dtxfsiz
[i
] = dwc2_readl(hsotg
->regs
+ DPTXFSIZN(i
));
273 * dwc2_restore_global_registers() - Restore controller global registers.
274 * When resuming usb bus, device registers needs to be restored
275 * if controller power were disabled.
277 * @hsotg: Programming view of the DWC_otg controller
279 static int dwc2_restore_global_registers(struct dwc2_hsotg
*hsotg
)
281 struct dwc2_gregs_backup
*gr
;
284 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
286 /* Restore global regs */
287 gr
= &hsotg
->gr_backup
;
289 dev_err(hsotg
->dev
, "%s: no global registers to restore\n",
295 dwc2_writel(0xffffffff, hsotg
->regs
+ GINTSTS
);
296 dwc2_writel(gr
->gotgctl
, hsotg
->regs
+ GOTGCTL
);
297 dwc2_writel(gr
->gintmsk
, hsotg
->regs
+ GINTMSK
);
298 dwc2_writel(gr
->gusbcfg
, hsotg
->regs
+ GUSBCFG
);
299 dwc2_writel(gr
->gahbcfg
, hsotg
->regs
+ GAHBCFG
);
300 dwc2_writel(gr
->grxfsiz
, hsotg
->regs
+ GRXFSIZ
);
301 dwc2_writel(gr
->gnptxfsiz
, hsotg
->regs
+ GNPTXFSIZ
);
302 dwc2_writel(gr
->hptxfsiz
, hsotg
->regs
+ HPTXFSIZ
);
303 dwc2_writel(gr
->gdfifocfg
, hsotg
->regs
+ GDFIFOCFG
);
304 for (i
= 0; i
< MAX_EPS_CHANNELS
; i
++)
305 dwc2_writel(gr
->dtxfsiz
[i
], hsotg
->regs
+ DPTXFSIZN(i
));
311 * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
313 * @hsotg: Programming view of the DWC_otg controller
314 * @restore: Controller registers need to be restored
316 int dwc2_exit_hibernation(struct dwc2_hsotg
*hsotg
, bool restore
)
321 if (!hsotg
->core_params
->hibernation
)
324 pcgcctl
= dwc2_readl(hsotg
->regs
+ PCGCTL
);
325 pcgcctl
&= ~PCGCTL_STOPPCLK
;
326 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
328 pcgcctl
= dwc2_readl(hsotg
->regs
+ PCGCTL
);
329 pcgcctl
&= ~PCGCTL_PWRCLMP
;
330 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
332 pcgcctl
= dwc2_readl(hsotg
->regs
+ PCGCTL
);
333 pcgcctl
&= ~PCGCTL_RSTPDWNMODULE
;
334 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
338 ret
= dwc2_restore_global_registers(hsotg
);
340 dev_err(hsotg
->dev
, "%s: failed to restore registers\n",
344 if (dwc2_is_host_mode(hsotg
)) {
345 ret
= dwc2_restore_host_registers(hsotg
);
347 dev_err(hsotg
->dev
, "%s: failed to restore host registers\n",
352 ret
= dwc2_restore_device_registers(hsotg
);
354 dev_err(hsotg
->dev
, "%s: failed to restore device registers\n",
365 * dwc2_enter_hibernation() - Put controller in Partial Power Down.
367 * @hsotg: Programming view of the DWC_otg controller
369 int dwc2_enter_hibernation(struct dwc2_hsotg
*hsotg
)
374 if (!hsotg
->core_params
->hibernation
)
377 /* Backup all registers */
378 ret
= dwc2_backup_global_registers(hsotg
);
380 dev_err(hsotg
->dev
, "%s: failed to backup global registers\n",
385 if (dwc2_is_host_mode(hsotg
)) {
386 ret
= dwc2_backup_host_registers(hsotg
);
388 dev_err(hsotg
->dev
, "%s: failed to backup host registers\n",
393 ret
= dwc2_backup_device_registers(hsotg
);
395 dev_err(hsotg
->dev
, "%s: failed to backup device registers\n",
402 * Clear any pending interrupts since dwc2 will not be able to
403 * clear them after entering hibernation.
405 dwc2_writel(0xffffffff, hsotg
->regs
+ GINTSTS
);
407 /* Put the controller in low power state */
408 pcgcctl
= dwc2_readl(hsotg
->regs
+ PCGCTL
);
410 pcgcctl
|= PCGCTL_PWRCLMP
;
411 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
414 pcgcctl
|= PCGCTL_RSTPDWNMODULE
;
415 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
418 pcgcctl
|= PCGCTL_STOPPCLK
;
419 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
425 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
426 * used in both device and host modes
428 * @hsotg: Programming view of the DWC_otg controller
430 static void dwc2_enable_common_interrupts(struct dwc2_hsotg
*hsotg
)
434 /* Clear any pending OTG Interrupts */
435 dwc2_writel(0xffffffff, hsotg
->regs
+ GOTGINT
);
437 /* Clear any pending interrupts */
438 dwc2_writel(0xffffffff, hsotg
->regs
+ GINTSTS
);
440 /* Enable the interrupts in the GINTMSK */
441 intmsk
= GINTSTS_MODEMIS
| GINTSTS_OTGINT
;
443 if (hsotg
->core_params
->dma_enable
<= 0)
444 intmsk
|= GINTSTS_RXFLVL
;
445 if (hsotg
->core_params
->external_id_pin_ctl
<= 0)
446 intmsk
|= GINTSTS_CONIDSTSCHNG
;
448 intmsk
|= GINTSTS_WKUPINT
| GINTSTS_USBSUSP
|
451 dwc2_writel(intmsk
, hsotg
->regs
+ GINTMSK
);
455 * Initializes the FSLSPClkSel field of the HCFG register depending on the
458 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg
*hsotg
)
462 if ((hsotg
->hw_params
.hs_phy_type
== GHWCFG2_HS_PHY_TYPE_ULPI
&&
463 hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_DEDICATED
&&
464 hsotg
->core_params
->ulpi_fs_ls
> 0) ||
465 hsotg
->core_params
->phy_type
== DWC2_PHY_TYPE_PARAM_FS
) {
467 val
= HCFG_FSLSPCLKSEL_48_MHZ
;
469 /* High speed PHY running at full speed or high speed */
470 val
= HCFG_FSLSPCLKSEL_30_60_MHZ
;
473 dev_dbg(hsotg
->dev
, "Initializing HCFG.FSLSPClkSel to %08x\n", val
);
474 hcfg
= dwc2_readl(hsotg
->regs
+ HCFG
);
475 hcfg
&= ~HCFG_FSLSPCLKSEL_MASK
;
476 hcfg
|= val
<< HCFG_FSLSPCLKSEL_SHIFT
;
477 dwc2_writel(hcfg
, hsotg
->regs
+ HCFG
);
481 * Do core a soft reset of the core. Be careful with this because it
482 * resets all the internal state machines of the core.
484 int dwc2_core_reset(struct dwc2_hsotg
*hsotg
)
489 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
491 /* Core Soft Reset */
492 greset
= dwc2_readl(hsotg
->regs
+ GRSTCTL
);
493 greset
|= GRSTCTL_CSFTRST
;
494 dwc2_writel(greset
, hsotg
->regs
+ GRSTCTL
);
497 greset
= dwc2_readl(hsotg
->regs
+ GRSTCTL
);
500 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
504 } while (greset
& GRSTCTL_CSFTRST
);
506 /* Wait for AHB master IDLE state */
510 greset
= dwc2_readl(hsotg
->regs
+ GRSTCTL
);
513 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
517 } while (!(greset
& GRSTCTL_AHBIDLE
));
523 * Force the mode of the controller.
525 * Forcing the mode is needed for two cases:
527 * 1) If the dr_mode is set to either HOST or PERIPHERAL we force the
528 * controller to stay in a particular mode regardless of ID pin
529 * changes. We do this usually after a core reset.
531 * 2) During probe we want to read reset values of the hw
532 * configuration registers that are only available in either host or
533 * device mode. We may need to force the mode if the current mode does
534 * not allow us to access the register in the mode that we want.
536 * In either case it only makes sense to force the mode if the
537 * controller hardware is OTG capable.
539 * Checks are done in this function to determine whether doing a force
540 * would be valid or not.
542 * If a force is done, it requires a 25ms delay to take effect.
544 * Returns true if the mode was forced.
546 static bool dwc2_force_mode(struct dwc2_hsotg
*hsotg
, bool host
)
552 dev_dbg(hsotg
->dev
, "Forcing mode to %s\n", host
? "host" : "device");
555 * Force mode has no effect if the hardware is not OTG.
557 if (!dwc2_hw_is_otg(hsotg
))
561 * If dr_mode is either peripheral or host only, there is no
562 * need to ever force the mode to the opposite mode.
564 if (WARN_ON(host
&& hsotg
->dr_mode
== USB_DR_MODE_PERIPHERAL
))
567 if (WARN_ON(!host
&& hsotg
->dr_mode
== USB_DR_MODE_HOST
))
570 gusbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
572 set
= host
? GUSBCFG_FORCEHOSTMODE
: GUSBCFG_FORCEDEVMODE
;
573 clear
= host
? GUSBCFG_FORCEDEVMODE
: GUSBCFG_FORCEHOSTMODE
;
577 dwc2_writel(gusbcfg
, hsotg
->regs
+ GUSBCFG
);
584 * Clears the force mode bits.
586 static void dwc2_clear_force_mode(struct dwc2_hsotg
*hsotg
)
590 gusbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
591 gusbcfg
&= ~GUSBCFG_FORCEHOSTMODE
;
592 gusbcfg
&= ~GUSBCFG_FORCEDEVMODE
;
593 dwc2_writel(gusbcfg
, hsotg
->regs
+ GUSBCFG
);
596 * NOTE: This long sleep is _very_ important, otherwise the core will
597 * not stay in host mode after a connector ID change!
603 * Sets or clears force mode based on the dr_mode parameter.
605 void dwc2_force_dr_mode(struct dwc2_hsotg
*hsotg
)
607 switch (hsotg
->dr_mode
) {
608 case USB_DR_MODE_HOST
:
609 dwc2_force_mode(hsotg
, true);
611 case USB_DR_MODE_PERIPHERAL
:
612 dwc2_force_mode(hsotg
, false);
614 case USB_DR_MODE_OTG
:
615 dwc2_clear_force_mode(hsotg
);
618 dev_warn(hsotg
->dev
, "%s() Invalid dr_mode=%d\n",
619 __func__
, hsotg
->dr_mode
);
624 * NOTE: This is required for some rockchip soc based
631 * Do core a soft reset of the core. Be careful with this because it
632 * resets all the internal state machines of the core.
634 * Additionally this will apply force mode as per the hsotg->dr_mode
637 int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg
*hsotg
)
641 retval
= dwc2_core_reset(hsotg
);
645 dwc2_force_dr_mode(hsotg
);
649 static int dwc2_fs_phy_init(struct dwc2_hsotg
*hsotg
, bool select_phy
)
655 * core_init() is now called on every switch so only call the
656 * following for the first time through
659 dev_dbg(hsotg
->dev
, "FS PHY selected\n");
661 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
662 if (!(usbcfg
& GUSBCFG_PHYSEL
)) {
663 usbcfg
|= GUSBCFG_PHYSEL
;
664 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
666 /* Reset after a PHY select */
667 retval
= dwc2_core_reset_and_force_dr_mode(hsotg
);
671 "%s: Reset failed, aborting", __func__
);
678 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
679 * do this on HNP Dev/Host mode switches (done in dev_init and
682 if (dwc2_is_host_mode(hsotg
))
683 dwc2_init_fs_ls_pclk_sel(hsotg
);
685 if (hsotg
->core_params
->i2c_enable
> 0) {
686 dev_dbg(hsotg
->dev
, "FS PHY enabling I2C\n");
688 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
689 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
690 usbcfg
|= GUSBCFG_OTG_UTMI_FS_SEL
;
691 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
693 /* Program GI2CCTL.I2CEn */
694 i2cctl
= dwc2_readl(hsotg
->regs
+ GI2CCTL
);
695 i2cctl
&= ~GI2CCTL_I2CDEVADDR_MASK
;
696 i2cctl
|= 1 << GI2CCTL_I2CDEVADDR_SHIFT
;
697 i2cctl
&= ~GI2CCTL_I2CEN
;
698 dwc2_writel(i2cctl
, hsotg
->regs
+ GI2CCTL
);
699 i2cctl
|= GI2CCTL_I2CEN
;
700 dwc2_writel(i2cctl
, hsotg
->regs
+ GI2CCTL
);
706 static int dwc2_hs_phy_init(struct dwc2_hsotg
*hsotg
, bool select_phy
)
708 u32 usbcfg
, usbcfg_old
;
714 usbcfg
= usbcfg_old
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
717 * HS PHY parameters. These parameters are preserved during soft reset
718 * so only program the first time. Do a soft reset immediately after
721 switch (hsotg
->core_params
->phy_type
) {
722 case DWC2_PHY_TYPE_PARAM_ULPI
:
724 dev_dbg(hsotg
->dev
, "HS ULPI PHY selected\n");
725 usbcfg
|= GUSBCFG_ULPI_UTMI_SEL
;
726 usbcfg
&= ~(GUSBCFG_PHYIF16
| GUSBCFG_DDRSEL
);
727 if (hsotg
->core_params
->phy_ulpi_ddr
> 0)
728 usbcfg
|= GUSBCFG_DDRSEL
;
730 case DWC2_PHY_TYPE_PARAM_UTMI
:
731 /* UTMI+ interface */
732 dev_dbg(hsotg
->dev
, "HS UTMI+ PHY selected\n");
733 usbcfg
&= ~(GUSBCFG_ULPI_UTMI_SEL
| GUSBCFG_PHYIF16
);
734 if (hsotg
->core_params
->phy_utmi_width
== 16)
735 usbcfg
|= GUSBCFG_PHYIF16
;
738 dev_err(hsotg
->dev
, "FS PHY selected at HS!\n");
742 if (usbcfg
!= usbcfg_old
) {
743 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
745 /* Reset after setting the PHY parameters */
746 retval
= dwc2_core_reset_and_force_dr_mode(hsotg
);
749 "%s: Reset failed, aborting", __func__
);
757 static int dwc2_phy_init(struct dwc2_hsotg
*hsotg
, bool select_phy
)
762 if (hsotg
->core_params
->speed
== DWC2_SPEED_PARAM_FULL
&&
763 hsotg
->core_params
->phy_type
== DWC2_PHY_TYPE_PARAM_FS
) {
764 /* If FS mode with FS PHY */
765 retval
= dwc2_fs_phy_init(hsotg
, select_phy
);
770 retval
= dwc2_hs_phy_init(hsotg
, select_phy
);
775 if (hsotg
->hw_params
.hs_phy_type
== GHWCFG2_HS_PHY_TYPE_ULPI
&&
776 hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_DEDICATED
&&
777 hsotg
->core_params
->ulpi_fs_ls
> 0) {
778 dev_dbg(hsotg
->dev
, "Setting ULPI FSLS\n");
779 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
780 usbcfg
|= GUSBCFG_ULPI_FS_LS
;
781 usbcfg
|= GUSBCFG_ULPI_CLK_SUSP_M
;
782 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
784 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
785 usbcfg
&= ~GUSBCFG_ULPI_FS_LS
;
786 usbcfg
&= ~GUSBCFG_ULPI_CLK_SUSP_M
;
787 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
793 static int dwc2_gahbcfg_init(struct dwc2_hsotg
*hsotg
)
795 u32 ahbcfg
= dwc2_readl(hsotg
->regs
+ GAHBCFG
);
797 switch (hsotg
->hw_params
.arch
) {
798 case GHWCFG2_EXT_DMA_ARCH
:
799 dev_err(hsotg
->dev
, "External DMA Mode not supported\n");
802 case GHWCFG2_INT_DMA_ARCH
:
803 dev_dbg(hsotg
->dev
, "Internal DMA Mode\n");
804 if (hsotg
->core_params
->ahbcfg
!= -1) {
805 ahbcfg
&= GAHBCFG_CTRL_MASK
;
806 ahbcfg
|= hsotg
->core_params
->ahbcfg
&
811 case GHWCFG2_SLAVE_ONLY_ARCH
:
813 dev_dbg(hsotg
->dev
, "Slave Only Mode\n");
817 dev_dbg(hsotg
->dev
, "dma_enable:%d dma_desc_enable:%d\n",
818 hsotg
->core_params
->dma_enable
,
819 hsotg
->core_params
->dma_desc_enable
);
821 if (hsotg
->core_params
->dma_enable
> 0) {
822 if (hsotg
->core_params
->dma_desc_enable
> 0)
823 dev_dbg(hsotg
->dev
, "Using Descriptor DMA mode\n");
825 dev_dbg(hsotg
->dev
, "Using Buffer DMA mode\n");
827 dev_dbg(hsotg
->dev
, "Using Slave mode\n");
828 hsotg
->core_params
->dma_desc_enable
= 0;
831 if (hsotg
->core_params
->dma_enable
> 0)
832 ahbcfg
|= GAHBCFG_DMA_EN
;
834 dwc2_writel(ahbcfg
, hsotg
->regs
+ GAHBCFG
);
839 static void dwc2_gusbcfg_init(struct dwc2_hsotg
*hsotg
)
843 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
844 usbcfg
&= ~(GUSBCFG_HNPCAP
| GUSBCFG_SRPCAP
);
846 switch (hsotg
->hw_params
.op_mode
) {
847 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE
:
848 if (hsotg
->core_params
->otg_cap
==
849 DWC2_CAP_PARAM_HNP_SRP_CAPABLE
)
850 usbcfg
|= GUSBCFG_HNPCAP
;
851 if (hsotg
->core_params
->otg_cap
!=
852 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE
)
853 usbcfg
|= GUSBCFG_SRPCAP
;
856 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE
:
857 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
:
858 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST
:
859 if (hsotg
->core_params
->otg_cap
!=
860 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE
)
861 usbcfg
|= GUSBCFG_SRPCAP
;
864 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE
:
865 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE
:
866 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST
:
871 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
875 * dwc2_core_init() - Initializes the DWC_otg controller registers and
876 * prepares the core for device mode or host mode operation
878 * @hsotg: Programming view of the DWC_otg controller
879 * @initial_setup: If true then this is the first init for this instance.
881 int dwc2_core_init(struct dwc2_hsotg
*hsotg
, bool initial_setup
)
886 dev_dbg(hsotg
->dev
, "%s(%p)\n", __func__
, hsotg
);
888 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
890 /* Set ULPI External VBUS bit if needed */
891 usbcfg
&= ~GUSBCFG_ULPI_EXT_VBUS_DRV
;
892 if (hsotg
->core_params
->phy_ulpi_ext_vbus
==
893 DWC2_PHY_ULPI_EXTERNAL_VBUS
)
894 usbcfg
|= GUSBCFG_ULPI_EXT_VBUS_DRV
;
896 /* Set external TS Dline pulsing bit if needed */
897 usbcfg
&= ~GUSBCFG_TERMSELDLPULSE
;
898 if (hsotg
->core_params
->ts_dline
> 0)
899 usbcfg
|= GUSBCFG_TERMSELDLPULSE
;
901 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
904 * Reset the Controller
906 * We only need to reset the controller if this is a re-init.
907 * For the first init we know for sure that earlier code reset us (it
908 * needed to in order to properly detect various parameters).
910 if (!initial_setup
) {
911 retval
= dwc2_core_reset_and_force_dr_mode(hsotg
);
913 dev_err(hsotg
->dev
, "%s(): Reset failed, aborting\n",
920 * This needs to happen in FS mode before any other programming occurs
922 retval
= dwc2_phy_init(hsotg
, initial_setup
);
926 /* Program the GAHBCFG Register */
927 retval
= dwc2_gahbcfg_init(hsotg
);
931 /* Program the GUSBCFG register */
932 dwc2_gusbcfg_init(hsotg
);
934 /* Program the GOTGCTL register */
935 otgctl
= dwc2_readl(hsotg
->regs
+ GOTGCTL
);
936 otgctl
&= ~GOTGCTL_OTGVER
;
937 if (hsotg
->core_params
->otg_ver
> 0)
938 otgctl
|= GOTGCTL_OTGVER
;
939 dwc2_writel(otgctl
, hsotg
->regs
+ GOTGCTL
);
940 dev_dbg(hsotg
->dev
, "OTG VER PARAM: %d\n", hsotg
->core_params
->otg_ver
);
942 /* Clear the SRP success bit for FS-I2c */
943 hsotg
->srp_success
= 0;
945 /* Enable common interrupts */
946 dwc2_enable_common_interrupts(hsotg
);
949 * Do device or host initialization based on mode during PCD and
952 if (dwc2_is_host_mode(hsotg
)) {
953 dev_dbg(hsotg
->dev
, "Host Mode\n");
954 hsotg
->op_state
= OTG_STATE_A_HOST
;
956 dev_dbg(hsotg
->dev
, "Device Mode\n");
957 hsotg
->op_state
= OTG_STATE_B_PERIPHERAL
;
964 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
966 * @hsotg: Programming view of DWC_otg controller
968 void dwc2_enable_host_interrupts(struct dwc2_hsotg
*hsotg
)
972 dev_dbg(hsotg
->dev
, "%s()\n", __func__
);
974 /* Disable all interrupts */
975 dwc2_writel(0, hsotg
->regs
+ GINTMSK
);
976 dwc2_writel(0, hsotg
->regs
+ HAINTMSK
);
978 /* Enable the common interrupts */
979 dwc2_enable_common_interrupts(hsotg
);
981 /* Enable host mode interrupts without disturbing common interrupts */
982 intmsk
= dwc2_readl(hsotg
->regs
+ GINTMSK
);
983 intmsk
|= GINTSTS_DISCONNINT
| GINTSTS_PRTINT
| GINTSTS_HCHINT
;
984 dwc2_writel(intmsk
, hsotg
->regs
+ GINTMSK
);
988 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
990 * @hsotg: Programming view of DWC_otg controller
992 void dwc2_disable_host_interrupts(struct dwc2_hsotg
*hsotg
)
994 u32 intmsk
= dwc2_readl(hsotg
->regs
+ GINTMSK
);
996 /* Disable host mode interrupts without disturbing common interrupts */
997 intmsk
&= ~(GINTSTS_SOF
| GINTSTS_PRTINT
| GINTSTS_HCHINT
|
998 GINTSTS_PTXFEMP
| GINTSTS_NPTXFEMP
| GINTSTS_DISCONNINT
);
999 dwc2_writel(intmsk
, hsotg
->regs
+ GINTMSK
);
1003 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
1004 * For system that have a total fifo depth that is smaller than the default
1005 * RX + TX fifo size.
1007 * @hsotg: Programming view of DWC_otg controller
1009 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg
*hsotg
)
1011 struct dwc2_core_params
*params
= hsotg
->core_params
;
1012 struct dwc2_hw_params
*hw
= &hsotg
->hw_params
;
1013 u32 rxfsiz
, nptxfsiz
, ptxfsiz
, total_fifo_size
;
1015 total_fifo_size
= hw
->total_fifo_size
;
1016 rxfsiz
= params
->host_rx_fifo_size
;
1017 nptxfsiz
= params
->host_nperio_tx_fifo_size
;
1018 ptxfsiz
= params
->host_perio_tx_fifo_size
;
1021 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
1022 * allocation with support for high bandwidth endpoints. Synopsys
1023 * defines MPS(Max Packet size) for a periodic EP=1024, and for
1024 * non-periodic as 512.
1026 if (total_fifo_size
< (rxfsiz
+ nptxfsiz
+ ptxfsiz
)) {
1028 * For Buffer DMA mode/Scatter Gather DMA mode
1029 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
1030 * with n = number of host channel.
1031 * 2 * ((1024/4) + 2) = 516
1033 rxfsiz
= 516 + hw
->host_channels
;
1036 * min non-periodic tx fifo depth
1037 * 2 * (largest non-periodic USB packet used / 4)
1043 * min periodic tx fifo depth
1044 * (largest packet size*MC)/4
1045 * (1024 * 3)/4 = 768
1049 params
->host_rx_fifo_size
= rxfsiz
;
1050 params
->host_nperio_tx_fifo_size
= nptxfsiz
;
1051 params
->host_perio_tx_fifo_size
= ptxfsiz
;
1055 * If the summation of RX, NPTX and PTX fifo sizes is still
1056 * bigger than the total_fifo_size, then we have a problem.
1058 * We won't be able to allocate as many endpoints. Right now,
1059 * we're just printing an error message, but ideally this FIFO
1060 * allocation algorithm would be improved in the future.
1062 * FIXME improve this FIFO allocation algorithm.
1064 if (unlikely(total_fifo_size
< (rxfsiz
+ nptxfsiz
+ ptxfsiz
)))
1065 dev_err(hsotg
->dev
, "invalid fifo sizes\n");
1068 static void dwc2_config_fifos(struct dwc2_hsotg
*hsotg
)
1070 struct dwc2_core_params
*params
= hsotg
->core_params
;
1071 u32 nptxfsiz
, hptxfsiz
, dfifocfg
, grxfsiz
;
1073 if (!params
->enable_dynamic_fifo
)
1076 dwc2_calculate_dynamic_fifo(hsotg
);
1079 grxfsiz
= dwc2_readl(hsotg
->regs
+ GRXFSIZ
);
1080 dev_dbg(hsotg
->dev
, "initial grxfsiz=%08x\n", grxfsiz
);
1081 grxfsiz
&= ~GRXFSIZ_DEPTH_MASK
;
1082 grxfsiz
|= params
->host_rx_fifo_size
<<
1083 GRXFSIZ_DEPTH_SHIFT
& GRXFSIZ_DEPTH_MASK
;
1084 dwc2_writel(grxfsiz
, hsotg
->regs
+ GRXFSIZ
);
1085 dev_dbg(hsotg
->dev
, "new grxfsiz=%08x\n",
1086 dwc2_readl(hsotg
->regs
+ GRXFSIZ
));
1088 /* Non-periodic Tx FIFO */
1089 dev_dbg(hsotg
->dev
, "initial gnptxfsiz=%08x\n",
1090 dwc2_readl(hsotg
->regs
+ GNPTXFSIZ
));
1091 nptxfsiz
= params
->host_nperio_tx_fifo_size
<<
1092 FIFOSIZE_DEPTH_SHIFT
& FIFOSIZE_DEPTH_MASK
;
1093 nptxfsiz
|= params
->host_rx_fifo_size
<<
1094 FIFOSIZE_STARTADDR_SHIFT
& FIFOSIZE_STARTADDR_MASK
;
1095 dwc2_writel(nptxfsiz
, hsotg
->regs
+ GNPTXFSIZ
);
1096 dev_dbg(hsotg
->dev
, "new gnptxfsiz=%08x\n",
1097 dwc2_readl(hsotg
->regs
+ GNPTXFSIZ
));
1099 /* Periodic Tx FIFO */
1100 dev_dbg(hsotg
->dev
, "initial hptxfsiz=%08x\n",
1101 dwc2_readl(hsotg
->regs
+ HPTXFSIZ
));
1102 hptxfsiz
= params
->host_perio_tx_fifo_size
<<
1103 FIFOSIZE_DEPTH_SHIFT
& FIFOSIZE_DEPTH_MASK
;
1104 hptxfsiz
|= (params
->host_rx_fifo_size
+
1105 params
->host_nperio_tx_fifo_size
) <<
1106 FIFOSIZE_STARTADDR_SHIFT
& FIFOSIZE_STARTADDR_MASK
;
1107 dwc2_writel(hptxfsiz
, hsotg
->regs
+ HPTXFSIZ
);
1108 dev_dbg(hsotg
->dev
, "new hptxfsiz=%08x\n",
1109 dwc2_readl(hsotg
->regs
+ HPTXFSIZ
));
1111 if (hsotg
->core_params
->en_multiple_tx_fifo
> 0 &&
1112 hsotg
->hw_params
.snpsid
<= DWC2_CORE_REV_2_94a
) {
1114 * Global DFIFOCFG calculation for Host mode -
1115 * include RxFIFO, NPTXFIFO and HPTXFIFO
1117 dfifocfg
= dwc2_readl(hsotg
->regs
+ GDFIFOCFG
);
1118 dfifocfg
&= ~GDFIFOCFG_EPINFOBASE_MASK
;
1119 dfifocfg
|= (params
->host_rx_fifo_size
+
1120 params
->host_nperio_tx_fifo_size
+
1121 params
->host_perio_tx_fifo_size
) <<
1122 GDFIFOCFG_EPINFOBASE_SHIFT
&
1123 GDFIFOCFG_EPINFOBASE_MASK
;
1124 dwc2_writel(dfifocfg
, hsotg
->regs
+ GDFIFOCFG
);
1129 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
1132 * @hsotg: Programming view of DWC_otg controller
1134 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
1135 * request queues. Host channels are reset to ensure that they are ready for
1136 * performing transfers.
1138 void dwc2_core_host_init(struct dwc2_hsotg
*hsotg
)
1140 u32 hcfg
, hfir
, otgctl
;
1142 dev_dbg(hsotg
->dev
, "%s(%p)\n", __func__
, hsotg
);
1144 /* Restart the Phy Clock */
1145 dwc2_writel(0, hsotg
->regs
+ PCGCTL
);
1147 /* Initialize Host Configuration Register */
1148 dwc2_init_fs_ls_pclk_sel(hsotg
);
1149 if (hsotg
->core_params
->speed
== DWC2_SPEED_PARAM_FULL
) {
1150 hcfg
= dwc2_readl(hsotg
->regs
+ HCFG
);
1151 hcfg
|= HCFG_FSLSSUPP
;
1152 dwc2_writel(hcfg
, hsotg
->regs
+ HCFG
);
1156 * This bit allows dynamic reloading of the HFIR register during
1157 * runtime. This bit needs to be programmed during initial configuration
1158 * and its value must not be changed during runtime.
1160 if (hsotg
->core_params
->reload_ctl
> 0) {
1161 hfir
= dwc2_readl(hsotg
->regs
+ HFIR
);
1162 hfir
|= HFIR_RLDCTRL
;
1163 dwc2_writel(hfir
, hsotg
->regs
+ HFIR
);
1166 if (hsotg
->core_params
->dma_desc_enable
> 0) {
1167 u32 op_mode
= hsotg
->hw_params
.op_mode
;
1168 if (hsotg
->hw_params
.snpsid
< DWC2_CORE_REV_2_90a
||
1169 !hsotg
->hw_params
.dma_desc_enable
||
1170 op_mode
== GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
||
1171 op_mode
== GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE
||
1172 op_mode
== GHWCFG2_OP_MODE_UNDEFINED
) {
1174 "Hardware does not support descriptor DMA mode -\n");
1176 "falling back to buffer DMA mode.\n");
1177 hsotg
->core_params
->dma_desc_enable
= 0;
1179 hcfg
= dwc2_readl(hsotg
->regs
+ HCFG
);
1180 hcfg
|= HCFG_DESCDMA
;
1181 dwc2_writel(hcfg
, hsotg
->regs
+ HCFG
);
1185 /* Configure data FIFO sizes */
1186 dwc2_config_fifos(hsotg
);
1188 /* TODO - check this */
1189 /* Clear Host Set HNP Enable in the OTG Control Register */
1190 otgctl
= dwc2_readl(hsotg
->regs
+ GOTGCTL
);
1191 otgctl
&= ~GOTGCTL_HSTSETHNPEN
;
1192 dwc2_writel(otgctl
, hsotg
->regs
+ GOTGCTL
);
1194 /* Make sure the FIFOs are flushed */
1195 dwc2_flush_tx_fifo(hsotg
, 0x10 /* all TX FIFOs */);
1196 dwc2_flush_rx_fifo(hsotg
);
1198 /* Clear Host Set HNP Enable in the OTG Control Register */
1199 otgctl
= dwc2_readl(hsotg
->regs
+ GOTGCTL
);
1200 otgctl
&= ~GOTGCTL_HSTSETHNPEN
;
1201 dwc2_writel(otgctl
, hsotg
->regs
+ GOTGCTL
);
1203 if (hsotg
->core_params
->dma_desc_enable
<= 0) {
1204 int num_channels
, i
;
1207 /* Flush out any leftover queued requests */
1208 num_channels
= hsotg
->core_params
->host_channels
;
1209 for (i
= 0; i
< num_channels
; i
++) {
1210 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(i
));
1211 hcchar
&= ~HCCHAR_CHENA
;
1212 hcchar
|= HCCHAR_CHDIS
;
1213 hcchar
&= ~HCCHAR_EPDIR
;
1214 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(i
));
1217 /* Halt all channels to put them into a known state */
1218 for (i
= 0; i
< num_channels
; i
++) {
1221 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(i
));
1222 hcchar
|= HCCHAR_CHENA
| HCCHAR_CHDIS
;
1223 hcchar
&= ~HCCHAR_EPDIR
;
1224 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(i
));
1225 dev_dbg(hsotg
->dev
, "%s: Halt channel %d\n",
1228 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(i
));
1229 if (++count
> 1000) {
1231 "Unable to clear enable on channel %d\n",
1236 } while (hcchar
& HCCHAR_CHENA
);
1240 /* Turn on the vbus power */
1241 dev_dbg(hsotg
->dev
, "Init: Port Power? op_state=%d\n", hsotg
->op_state
);
1242 if (hsotg
->op_state
== OTG_STATE_A_HOST
) {
1243 u32 hprt0
= dwc2_read_hprt0(hsotg
);
1245 dev_dbg(hsotg
->dev
, "Init: Power Port (%d)\n",
1246 !!(hprt0
& HPRT0_PWR
));
1247 if (!(hprt0
& HPRT0_PWR
)) {
1249 dwc2_writel(hprt0
, hsotg
->regs
+ HPRT0
);
1253 dwc2_enable_host_interrupts(hsotg
);
1256 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg
*hsotg
,
1257 struct dwc2_host_chan
*chan
)
1259 u32 hcintmsk
= HCINTMSK_CHHLTD
;
1261 switch (chan
->ep_type
) {
1262 case USB_ENDPOINT_XFER_CONTROL
:
1263 case USB_ENDPOINT_XFER_BULK
:
1264 dev_vdbg(hsotg
->dev
, "control/bulk\n");
1265 hcintmsk
|= HCINTMSK_XFERCOMPL
;
1266 hcintmsk
|= HCINTMSK_STALL
;
1267 hcintmsk
|= HCINTMSK_XACTERR
;
1268 hcintmsk
|= HCINTMSK_DATATGLERR
;
1269 if (chan
->ep_is_in
) {
1270 hcintmsk
|= HCINTMSK_BBLERR
;
1272 hcintmsk
|= HCINTMSK_NAK
;
1273 hcintmsk
|= HCINTMSK_NYET
;
1275 hcintmsk
|= HCINTMSK_ACK
;
1278 if (chan
->do_split
) {
1279 hcintmsk
|= HCINTMSK_NAK
;
1280 if (chan
->complete_split
)
1281 hcintmsk
|= HCINTMSK_NYET
;
1283 hcintmsk
|= HCINTMSK_ACK
;
1286 if (chan
->error_state
)
1287 hcintmsk
|= HCINTMSK_ACK
;
1290 case USB_ENDPOINT_XFER_INT
:
1292 dev_vdbg(hsotg
->dev
, "intr\n");
1293 hcintmsk
|= HCINTMSK_XFERCOMPL
;
1294 hcintmsk
|= HCINTMSK_NAK
;
1295 hcintmsk
|= HCINTMSK_STALL
;
1296 hcintmsk
|= HCINTMSK_XACTERR
;
1297 hcintmsk
|= HCINTMSK_DATATGLERR
;
1298 hcintmsk
|= HCINTMSK_FRMOVRUN
;
1301 hcintmsk
|= HCINTMSK_BBLERR
;
1302 if (chan
->error_state
)
1303 hcintmsk
|= HCINTMSK_ACK
;
1304 if (chan
->do_split
) {
1305 if (chan
->complete_split
)
1306 hcintmsk
|= HCINTMSK_NYET
;
1308 hcintmsk
|= HCINTMSK_ACK
;
1312 case USB_ENDPOINT_XFER_ISOC
:
1314 dev_vdbg(hsotg
->dev
, "isoc\n");
1315 hcintmsk
|= HCINTMSK_XFERCOMPL
;
1316 hcintmsk
|= HCINTMSK_FRMOVRUN
;
1317 hcintmsk
|= HCINTMSK_ACK
;
1319 if (chan
->ep_is_in
) {
1320 hcintmsk
|= HCINTMSK_XACTERR
;
1321 hcintmsk
|= HCINTMSK_BBLERR
;
1325 dev_err(hsotg
->dev
, "## Unknown EP type ##\n");
1329 dwc2_writel(hcintmsk
, hsotg
->regs
+ HCINTMSK(chan
->hc_num
));
1331 dev_vdbg(hsotg
->dev
, "set HCINTMSK to %08x\n", hcintmsk
);
1334 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg
*hsotg
,
1335 struct dwc2_host_chan
*chan
)
1337 u32 hcintmsk
= HCINTMSK_CHHLTD
;
1340 * For Descriptor DMA mode core halts the channel on AHB error.
1341 * Interrupt is not required.
1343 if (hsotg
->core_params
->dma_desc_enable
<= 0) {
1345 dev_vdbg(hsotg
->dev
, "desc DMA disabled\n");
1346 hcintmsk
|= HCINTMSK_AHBERR
;
1349 dev_vdbg(hsotg
->dev
, "desc DMA enabled\n");
1350 if (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1351 hcintmsk
|= HCINTMSK_XFERCOMPL
;
1354 if (chan
->error_state
&& !chan
->do_split
&&
1355 chan
->ep_type
!= USB_ENDPOINT_XFER_ISOC
) {
1357 dev_vdbg(hsotg
->dev
, "setting ACK\n");
1358 hcintmsk
|= HCINTMSK_ACK
;
1359 if (chan
->ep_is_in
) {
1360 hcintmsk
|= HCINTMSK_DATATGLERR
;
1361 if (chan
->ep_type
!= USB_ENDPOINT_XFER_INT
)
1362 hcintmsk
|= HCINTMSK_NAK
;
1366 dwc2_writel(hcintmsk
, hsotg
->regs
+ HCINTMSK(chan
->hc_num
));
1368 dev_vdbg(hsotg
->dev
, "set HCINTMSK to %08x\n", hcintmsk
);
1371 static void dwc2_hc_enable_ints(struct dwc2_hsotg
*hsotg
,
1372 struct dwc2_host_chan
*chan
)
1376 if (hsotg
->core_params
->dma_enable
> 0) {
1378 dev_vdbg(hsotg
->dev
, "DMA enabled\n");
1379 dwc2_hc_enable_dma_ints(hsotg
, chan
);
1382 dev_vdbg(hsotg
->dev
, "DMA disabled\n");
1383 dwc2_hc_enable_slave_ints(hsotg
, chan
);
1386 /* Enable the top level host channel interrupt */
1387 intmsk
= dwc2_readl(hsotg
->regs
+ HAINTMSK
);
1388 intmsk
|= 1 << chan
->hc_num
;
1389 dwc2_writel(intmsk
, hsotg
->regs
+ HAINTMSK
);
1391 dev_vdbg(hsotg
->dev
, "set HAINTMSK to %08x\n", intmsk
);
1393 /* Make sure host channel interrupts are enabled */
1394 intmsk
= dwc2_readl(hsotg
->regs
+ GINTMSK
);
1395 intmsk
|= GINTSTS_HCHINT
;
1396 dwc2_writel(intmsk
, hsotg
->regs
+ GINTMSK
);
1398 dev_vdbg(hsotg
->dev
, "set GINTMSK to %08x\n", intmsk
);
1402 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
1403 * a specific endpoint
1405 * @hsotg: Programming view of DWC_otg controller
1406 * @chan: Information needed to initialize the host channel
1408 * The HCCHARn register is set up with the characteristics specified in chan.
1409 * Host channel interrupts that may need to be serviced while this transfer is
1410 * in progress are enabled.
1412 void dwc2_hc_init(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
)
1414 u8 hc_num
= chan
->hc_num
;
1420 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1422 /* Clear old interrupt conditions for this host channel */
1423 hcintmsk
= 0xffffffff;
1424 hcintmsk
&= ~HCINTMSK_RESERVED14_31
;
1425 dwc2_writel(hcintmsk
, hsotg
->regs
+ HCINT(hc_num
));
1427 /* Enable channel interrupts required for this transfer */
1428 dwc2_hc_enable_ints(hsotg
, chan
);
1431 * Program the HCCHARn register with the endpoint characteristics for
1432 * the current transfer
1434 hcchar
= chan
->dev_addr
<< HCCHAR_DEVADDR_SHIFT
& HCCHAR_DEVADDR_MASK
;
1435 hcchar
|= chan
->ep_num
<< HCCHAR_EPNUM_SHIFT
& HCCHAR_EPNUM_MASK
;
1437 hcchar
|= HCCHAR_EPDIR
;
1438 if (chan
->speed
== USB_SPEED_LOW
)
1439 hcchar
|= HCCHAR_LSPDDEV
;
1440 hcchar
|= chan
->ep_type
<< HCCHAR_EPTYPE_SHIFT
& HCCHAR_EPTYPE_MASK
;
1441 hcchar
|= chan
->max_packet
<< HCCHAR_MPS_SHIFT
& HCCHAR_MPS_MASK
;
1442 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(hc_num
));
1444 dev_vdbg(hsotg
->dev
, "set HCCHAR(%d) to %08x\n",
1447 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n",
1449 dev_vdbg(hsotg
->dev
, " Dev Addr: %d\n",
1451 dev_vdbg(hsotg
->dev
, " Ep Num: %d\n",
1453 dev_vdbg(hsotg
->dev
, " Is In: %d\n",
1455 dev_vdbg(hsotg
->dev
, " Is Low Speed: %d\n",
1456 chan
->speed
== USB_SPEED_LOW
);
1457 dev_vdbg(hsotg
->dev
, " Ep Type: %d\n",
1459 dev_vdbg(hsotg
->dev
, " Max Pkt: %d\n",
1463 /* Program the HCSPLT register for SPLITs */
1464 if (chan
->do_split
) {
1466 dev_vdbg(hsotg
->dev
,
1467 "Programming HC %d with split --> %s\n",
1469 chan
->complete_split
? "CSPLIT" : "SSPLIT");
1470 if (chan
->complete_split
)
1471 hcsplt
|= HCSPLT_COMPSPLT
;
1472 hcsplt
|= chan
->xact_pos
<< HCSPLT_XACTPOS_SHIFT
&
1473 HCSPLT_XACTPOS_MASK
;
1474 hcsplt
|= chan
->hub_addr
<< HCSPLT_HUBADDR_SHIFT
&
1475 HCSPLT_HUBADDR_MASK
;
1476 hcsplt
|= chan
->hub_port
<< HCSPLT_PRTADDR_SHIFT
&
1477 HCSPLT_PRTADDR_MASK
;
1479 dev_vdbg(hsotg
->dev
, " comp split %d\n",
1480 chan
->complete_split
);
1481 dev_vdbg(hsotg
->dev
, " xact pos %d\n",
1483 dev_vdbg(hsotg
->dev
, " hub addr %d\n",
1485 dev_vdbg(hsotg
->dev
, " hub port %d\n",
1487 dev_vdbg(hsotg
->dev
, " is_in %d\n",
1489 dev_vdbg(hsotg
->dev
, " Max Pkt %d\n",
1491 dev_vdbg(hsotg
->dev
, " xferlen %d\n",
1496 dwc2_writel(hcsplt
, hsotg
->regs
+ HCSPLT(hc_num
));
1500 * dwc2_hc_halt() - Attempts to halt a host channel
1502 * @hsotg: Controller register interface
1503 * @chan: Host channel to halt
1504 * @halt_status: Reason for halting the channel
1506 * This function should only be called in Slave mode or to abort a transfer in
1507 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1508 * controller halts the channel when the transfer is complete or a condition
1509 * occurs that requires application intervention.
1511 * In slave mode, checks for a free request queue entry, then sets the Channel
1512 * Enable and Channel Disable bits of the Host Channel Characteristics
1513 * register of the specified channel to intiate the halt. If there is no free
1514 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1515 * register to flush requests for this channel. In the latter case, sets a
1516 * flag to indicate that the host channel needs to be halted when a request
1517 * queue slot is open.
1519 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1520 * HCCHARn register. The controller ensures there is space in the request
1521 * queue before submitting the halt request.
1523 * Some time may elapse before the core flushes any posted requests for this
1524 * host channel and halts. The Channel Halted interrupt handler completes the
1525 * deactivation of the host channel.
1527 void dwc2_hc_halt(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
,
1528 enum dwc2_halt_status halt_status
)
1530 u32 nptxsts
, hptxsts
, hcchar
;
1533 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1534 if (halt_status
== DWC2_HC_XFER_NO_HALT_STATUS
)
1535 dev_err(hsotg
->dev
, "!!! halt_status = %d !!!\n", halt_status
);
1537 if (halt_status
== DWC2_HC_XFER_URB_DEQUEUE
||
1538 halt_status
== DWC2_HC_XFER_AHB_ERR
) {
1540 * Disable all channel interrupts except Ch Halted. The QTD
1541 * and QH state associated with this transfer has been cleared
1542 * (in the case of URB_DEQUEUE), so the channel needs to be
1543 * shut down carefully to prevent crashes.
1545 u32 hcintmsk
= HCINTMSK_CHHLTD
;
1547 dev_vdbg(hsotg
->dev
, "dequeue/error\n");
1548 dwc2_writel(hcintmsk
, hsotg
->regs
+ HCINTMSK(chan
->hc_num
));
1551 * Make sure no other interrupts besides halt are currently
1552 * pending. Handling another interrupt could cause a crash due
1553 * to the QTD and QH state.
1555 dwc2_writel(~hcintmsk
, hsotg
->regs
+ HCINT(chan
->hc_num
));
1558 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1559 * even if the channel was already halted for some other
1562 chan
->halt_status
= halt_status
;
1564 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1565 if (!(hcchar
& HCCHAR_CHENA
)) {
1567 * The channel is either already halted or it hasn't
1568 * started yet. In DMA mode, the transfer may halt if
1569 * it finishes normally or a condition occurs that
1570 * requires driver intervention. Don't want to halt
1571 * the channel again. In either Slave or DMA mode,
1572 * it's possible that the transfer has been assigned
1573 * to a channel, but not started yet when an URB is
1574 * dequeued. Don't want to halt a channel that hasn't
1580 if (chan
->halt_pending
) {
1582 * A halt has already been issued for this channel. This might
1583 * happen when a transfer is aborted by a higher level in
1586 dev_vdbg(hsotg
->dev
,
1587 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1588 __func__
, chan
->hc_num
);
1592 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1594 /* No need to set the bit in DDMA for disabling the channel */
1595 /* TODO check it everywhere channel is disabled */
1596 if (hsotg
->core_params
->dma_desc_enable
<= 0) {
1598 dev_vdbg(hsotg
->dev
, "desc DMA disabled\n");
1599 hcchar
|= HCCHAR_CHENA
;
1602 dev_dbg(hsotg
->dev
, "desc DMA enabled\n");
1604 hcchar
|= HCCHAR_CHDIS
;
1606 if (hsotg
->core_params
->dma_enable
<= 0) {
1608 dev_vdbg(hsotg
->dev
, "DMA not enabled\n");
1609 hcchar
|= HCCHAR_CHENA
;
1611 /* Check for space in the request queue to issue the halt */
1612 if (chan
->ep_type
== USB_ENDPOINT_XFER_CONTROL
||
1613 chan
->ep_type
== USB_ENDPOINT_XFER_BULK
) {
1614 dev_vdbg(hsotg
->dev
, "control/bulk\n");
1615 nptxsts
= dwc2_readl(hsotg
->regs
+ GNPTXSTS
);
1616 if ((nptxsts
& TXSTS_QSPCAVAIL_MASK
) == 0) {
1617 dev_vdbg(hsotg
->dev
, "Disabling channel\n");
1618 hcchar
&= ~HCCHAR_CHENA
;
1622 dev_vdbg(hsotg
->dev
, "isoc/intr\n");
1623 hptxsts
= dwc2_readl(hsotg
->regs
+ HPTXSTS
);
1624 if ((hptxsts
& TXSTS_QSPCAVAIL_MASK
) == 0 ||
1625 hsotg
->queuing_high_bandwidth
) {
1627 dev_vdbg(hsotg
->dev
, "Disabling channel\n");
1628 hcchar
&= ~HCCHAR_CHENA
;
1633 dev_vdbg(hsotg
->dev
, "DMA enabled\n");
1636 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1637 chan
->halt_status
= halt_status
;
1639 if (hcchar
& HCCHAR_CHENA
) {
1641 dev_vdbg(hsotg
->dev
, "Channel enabled\n");
1642 chan
->halt_pending
= 1;
1643 chan
->halt_on_queue
= 0;
1646 dev_vdbg(hsotg
->dev
, "Channel disabled\n");
1647 chan
->halt_on_queue
= 1;
1651 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
1653 dev_vdbg(hsotg
->dev
, " hcchar: 0x%08x\n",
1655 dev_vdbg(hsotg
->dev
, " halt_pending: %d\n",
1656 chan
->halt_pending
);
1657 dev_vdbg(hsotg
->dev
, " halt_on_queue: %d\n",
1658 chan
->halt_on_queue
);
1659 dev_vdbg(hsotg
->dev
, " halt_status: %d\n",
1665 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1667 * @hsotg: Programming view of DWC_otg controller
1668 * @chan: Identifies the host channel to clean up
1670 * This function is normally called after a transfer is done and the host
1671 * channel is being released
1673 void dwc2_hc_cleanup(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
)
1677 chan
->xfer_started
= 0;
1680 * Clear channel interrupt enables and any unhandled channel interrupt
1683 dwc2_writel(0, hsotg
->regs
+ HCINTMSK(chan
->hc_num
));
1684 hcintmsk
= 0xffffffff;
1685 hcintmsk
&= ~HCINTMSK_RESERVED14_31
;
1686 dwc2_writel(hcintmsk
, hsotg
->regs
+ HCINT(chan
->hc_num
));
1690 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1691 * which frame a periodic transfer should occur
1693 * @hsotg: Programming view of DWC_otg controller
1694 * @chan: Identifies the host channel to set up and its properties
1695 * @hcchar: Current value of the HCCHAR register for the specified host channel
1697 * This function has no effect on non-periodic transfers
1699 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg
*hsotg
,
1700 struct dwc2_host_chan
*chan
, u32
*hcchar
)
1702 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1703 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
1704 /* 1 if _next_ frame is odd, 0 if it's even */
1705 if (!(dwc2_hcd_get_frame_number(hsotg
) & 0x1))
1706 *hcchar
|= HCCHAR_ODDFRM
;
1710 static void dwc2_set_pid_isoc(struct dwc2_host_chan
*chan
)
1712 /* Set up the initial PID for the transfer */
1713 if (chan
->speed
== USB_SPEED_HIGH
) {
1714 if (chan
->ep_is_in
) {
1715 if (chan
->multi_count
== 1)
1716 chan
->data_pid_start
= DWC2_HC_PID_DATA0
;
1717 else if (chan
->multi_count
== 2)
1718 chan
->data_pid_start
= DWC2_HC_PID_DATA1
;
1720 chan
->data_pid_start
= DWC2_HC_PID_DATA2
;
1722 if (chan
->multi_count
== 1)
1723 chan
->data_pid_start
= DWC2_HC_PID_DATA0
;
1725 chan
->data_pid_start
= DWC2_HC_PID_MDATA
;
1728 chan
->data_pid_start
= DWC2_HC_PID_DATA0
;
1733 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1736 * @hsotg: Programming view of DWC_otg controller
1737 * @chan: Information needed to initialize the host channel
1739 * This function should only be called in Slave mode. For a channel associated
1740 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1741 * associated with a periodic EP, the periodic Tx FIFO is written.
1743 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1744 * the number of bytes written to the Tx FIFO.
1746 static void dwc2_hc_write_packet(struct dwc2_hsotg
*hsotg
,
1747 struct dwc2_host_chan
*chan
)
1750 u32 remaining_count
;
1753 u32 __iomem
*data_fifo
;
1754 u32
*data_buf
= (u32
*)chan
->xfer_buf
;
1757 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1759 data_fifo
= (u32 __iomem
*)(hsotg
->regs
+ HCFIFO(chan
->hc_num
));
1761 remaining_count
= chan
->xfer_len
- chan
->xfer_count
;
1762 if (remaining_count
> chan
->max_packet
)
1763 byte_count
= chan
->max_packet
;
1765 byte_count
= remaining_count
;
1767 dword_count
= (byte_count
+ 3) / 4;
1769 if (((unsigned long)data_buf
& 0x3) == 0) {
1770 /* xfer_buf is DWORD aligned */
1771 for (i
= 0; i
< dword_count
; i
++, data_buf
++)
1772 dwc2_writel(*data_buf
, data_fifo
);
1774 /* xfer_buf is not DWORD aligned */
1775 for (i
= 0; i
< dword_count
; i
++, data_buf
++) {
1776 u32 data
= data_buf
[0] | data_buf
[1] << 8 |
1777 data_buf
[2] << 16 | data_buf
[3] << 24;
1778 dwc2_writel(data
, data_fifo
);
1782 chan
->xfer_count
+= byte_count
;
1783 chan
->xfer_buf
+= byte_count
;
1787 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1788 * channel and starts the transfer
1790 * @hsotg: Programming view of DWC_otg controller
1791 * @chan: Information needed to initialize the host channel. The xfer_len value
1792 * may be reduced to accommodate the max widths of the XferSize and
1793 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1794 * changed to reflect the final xfer_len value.
1796 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1797 * the caller must ensure that there is sufficient space in the request queue
1800 * For an OUT transfer in Slave mode, it loads a data packet into the
1801 * appropriate FIFO. If necessary, additional data packets are loaded in the
1804 * For an IN transfer in Slave mode, a data packet is requested. The data
1805 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1806 * additional data packets are requested in the Host ISR.
1808 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1809 * register along with a packet count of 1 and the channel is enabled. This
1810 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1811 * simply set to 0 since no data transfer occurs in this case.
1813 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1814 * all the information required to perform the subsequent data transfer. In
1815 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1816 * controller performs the entire PING protocol, then starts the data
1819 void dwc2_hc_start_transfer(struct dwc2_hsotg
*hsotg
,
1820 struct dwc2_host_chan
*chan
)
1822 u32 max_hc_xfer_size
= hsotg
->core_params
->max_transfer_size
;
1823 u16 max_hc_pkt_count
= hsotg
->core_params
->max_packet_count
;
1830 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1832 if (chan
->do_ping
) {
1833 if (hsotg
->core_params
->dma_enable
<= 0) {
1835 dev_vdbg(hsotg
->dev
, "ping, no DMA\n");
1836 dwc2_hc_do_ping(hsotg
, chan
);
1837 chan
->xfer_started
= 1;
1841 dev_vdbg(hsotg
->dev
, "ping, DMA\n");
1842 hctsiz
|= TSIZ_DOPNG
;
1846 if (chan
->do_split
) {
1848 dev_vdbg(hsotg
->dev
, "split\n");
1851 if (chan
->complete_split
&& !chan
->ep_is_in
)
1853 * For CSPLIT OUT Transfer, set the size to 0 so the
1854 * core doesn't expect any data written to the FIFO
1857 else if (chan
->ep_is_in
|| chan
->xfer_len
> chan
->max_packet
)
1858 chan
->xfer_len
= chan
->max_packet
;
1859 else if (!chan
->ep_is_in
&& chan
->xfer_len
> 188)
1860 chan
->xfer_len
= 188;
1862 hctsiz
|= chan
->xfer_len
<< TSIZ_XFERSIZE_SHIFT
&
1865 /* For split set ec_mc for immediate retries */
1866 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1867 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1873 dev_vdbg(hsotg
->dev
, "no split\n");
1875 * Ensure that the transfer length and packet count will fit
1876 * in the widths allocated for them in the HCTSIZn register
1878 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1879 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
1881 * Make sure the transfer size is no larger than one
1882 * (micro)frame's worth of data. (A check was done
1883 * when the periodic transfer was accepted to ensure
1884 * that a (micro)frame's worth of data can be
1885 * programmed into a channel.)
1887 u32 max_periodic_len
=
1888 chan
->multi_count
* chan
->max_packet
;
1890 if (chan
->xfer_len
> max_periodic_len
)
1891 chan
->xfer_len
= max_periodic_len
;
1892 } else if (chan
->xfer_len
> max_hc_xfer_size
) {
1894 * Make sure that xfer_len is a multiple of max packet
1898 max_hc_xfer_size
- chan
->max_packet
+ 1;
1901 if (chan
->xfer_len
> 0) {
1902 num_packets
= (chan
->xfer_len
+ chan
->max_packet
- 1) /
1904 if (num_packets
> max_hc_pkt_count
) {
1905 num_packets
= max_hc_pkt_count
;
1906 chan
->xfer_len
= num_packets
* chan
->max_packet
;
1909 /* Need 1 packet for transfer length of 0 */
1915 * Always program an integral # of max packets for IN
1918 chan
->xfer_len
= num_packets
* chan
->max_packet
;
1920 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1921 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1923 * Make sure that the multi_count field matches the
1924 * actual transfer length
1926 chan
->multi_count
= num_packets
;
1928 if (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1929 dwc2_set_pid_isoc(chan
);
1931 hctsiz
|= chan
->xfer_len
<< TSIZ_XFERSIZE_SHIFT
&
1934 /* The ec_mc gets the multi_count for non-split */
1935 ec_mc
= chan
->multi_count
;
1938 chan
->start_pkt_count
= num_packets
;
1939 hctsiz
|= num_packets
<< TSIZ_PKTCNT_SHIFT
& TSIZ_PKTCNT_MASK
;
1940 hctsiz
|= chan
->data_pid_start
<< TSIZ_SC_MC_PID_SHIFT
&
1941 TSIZ_SC_MC_PID_MASK
;
1942 dwc2_writel(hctsiz
, hsotg
->regs
+ HCTSIZ(chan
->hc_num
));
1944 dev_vdbg(hsotg
->dev
, "Wrote %08x to HCTSIZ(%d)\n",
1945 hctsiz
, chan
->hc_num
);
1947 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
1949 dev_vdbg(hsotg
->dev
, " Xfer Size: %d\n",
1950 (hctsiz
& TSIZ_XFERSIZE_MASK
) >>
1951 TSIZ_XFERSIZE_SHIFT
);
1952 dev_vdbg(hsotg
->dev
, " Num Pkts: %d\n",
1953 (hctsiz
& TSIZ_PKTCNT_MASK
) >>
1955 dev_vdbg(hsotg
->dev
, " Start PID: %d\n",
1956 (hctsiz
& TSIZ_SC_MC_PID_MASK
) >>
1957 TSIZ_SC_MC_PID_SHIFT
);
1960 if (hsotg
->core_params
->dma_enable
> 0) {
1961 dma_addr_t dma_addr
;
1963 if (chan
->align_buf
) {
1965 dev_vdbg(hsotg
->dev
, "align_buf\n");
1966 dma_addr
= chan
->align_buf
;
1968 dma_addr
= chan
->xfer_dma
;
1970 dwc2_writel((u32
)dma_addr
, hsotg
->regs
+ HCDMA(chan
->hc_num
));
1972 dev_vdbg(hsotg
->dev
, "Wrote %08lx to HCDMA(%d)\n",
1973 (unsigned long)dma_addr
, chan
->hc_num
);
1976 /* Start the split */
1977 if (chan
->do_split
) {
1978 u32 hcsplt
= dwc2_readl(hsotg
->regs
+ HCSPLT(chan
->hc_num
));
1980 hcsplt
|= HCSPLT_SPLTENA
;
1981 dwc2_writel(hcsplt
, hsotg
->regs
+ HCSPLT(chan
->hc_num
));
1984 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1985 hcchar
&= ~HCCHAR_MULTICNT_MASK
;
1986 hcchar
|= (ec_mc
<< HCCHAR_MULTICNT_SHIFT
) & HCCHAR_MULTICNT_MASK
;
1987 dwc2_hc_set_even_odd_frame(hsotg
, chan
, &hcchar
);
1989 if (hcchar
& HCCHAR_CHDIS
)
1990 dev_warn(hsotg
->dev
,
1991 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1992 __func__
, chan
->hc_num
, hcchar
);
1994 /* Set host channel enable after all other setup is complete */
1995 hcchar
|= HCCHAR_CHENA
;
1996 hcchar
&= ~HCCHAR_CHDIS
;
1999 dev_vdbg(hsotg
->dev
, " Multi Cnt: %d\n",
2000 (hcchar
& HCCHAR_MULTICNT_MASK
) >>
2001 HCCHAR_MULTICNT_SHIFT
);
2003 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2005 dev_vdbg(hsotg
->dev
, "Wrote %08x to HCCHAR(%d)\n", hcchar
,
2008 chan
->xfer_started
= 1;
2011 if (hsotg
->core_params
->dma_enable
<= 0 &&
2012 !chan
->ep_is_in
&& chan
->xfer_len
> 0)
2013 /* Load OUT packet into the appropriate Tx FIFO */
2014 dwc2_hc_write_packet(hsotg
, chan
);
2018 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
2019 * host channel and starts the transfer in Descriptor DMA mode
2021 * @hsotg: Programming view of DWC_otg controller
2022 * @chan: Information needed to initialize the host channel
2024 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
2025 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
2026 * with micro-frame bitmap.
2028 * Initializes HCDMA register with descriptor list address and CTD value then
2029 * starts the transfer via enabling the channel.
2031 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg
*hsotg
,
2032 struct dwc2_host_chan
*chan
)
2038 hctsiz
|= TSIZ_DOPNG
;
2040 if (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
2041 dwc2_set_pid_isoc(chan
);
2043 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
2044 hctsiz
|= chan
->data_pid_start
<< TSIZ_SC_MC_PID_SHIFT
&
2045 TSIZ_SC_MC_PID_MASK
;
2047 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
2048 hctsiz
|= (chan
->ntd
- 1) << TSIZ_NTD_SHIFT
& TSIZ_NTD_MASK
;
2050 /* Non-zero only for high-speed interrupt endpoints */
2051 hctsiz
|= chan
->schinfo
<< TSIZ_SCHINFO_SHIFT
& TSIZ_SCHINFO_MASK
;
2054 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
2056 dev_vdbg(hsotg
->dev
, " Start PID: %d\n",
2057 chan
->data_pid_start
);
2058 dev_vdbg(hsotg
->dev
, " NTD: %d\n", chan
->ntd
- 1);
2061 dwc2_writel(hctsiz
, hsotg
->regs
+ HCTSIZ(chan
->hc_num
));
2063 dma_sync_single_for_device(hsotg
->dev
, chan
->desc_list_addr
,
2064 chan
->desc_list_sz
, DMA_TO_DEVICE
);
2066 dwc2_writel(chan
->desc_list_addr
, hsotg
->regs
+ HCDMA(chan
->hc_num
));
2069 dev_vdbg(hsotg
->dev
, "Wrote %pad to HCDMA(%d)\n",
2070 &chan
->desc_list_addr
, chan
->hc_num
);
2072 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2073 hcchar
&= ~HCCHAR_MULTICNT_MASK
;
2074 hcchar
|= chan
->multi_count
<< HCCHAR_MULTICNT_SHIFT
&
2075 HCCHAR_MULTICNT_MASK
;
2077 if (hcchar
& HCCHAR_CHDIS
)
2078 dev_warn(hsotg
->dev
,
2079 "%s: chdis set, channel %d, hcchar 0x%08x\n",
2080 __func__
, chan
->hc_num
, hcchar
);
2082 /* Set host channel enable after all other setup is complete */
2083 hcchar
|= HCCHAR_CHENA
;
2084 hcchar
&= ~HCCHAR_CHDIS
;
2087 dev_vdbg(hsotg
->dev
, " Multi Cnt: %d\n",
2088 (hcchar
& HCCHAR_MULTICNT_MASK
) >>
2089 HCCHAR_MULTICNT_SHIFT
);
2091 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2093 dev_vdbg(hsotg
->dev
, "Wrote %08x to HCCHAR(%d)\n", hcchar
,
2096 chan
->xfer_started
= 1;
2101 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
2102 * a previous call to dwc2_hc_start_transfer()
2104 * @hsotg: Programming view of DWC_otg controller
2105 * @chan: Information needed to initialize the host channel
2107 * The caller must ensure there is sufficient space in the request queue and Tx
2108 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
2109 * the controller acts autonomously to complete transfers programmed to a host
2112 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
2113 * if there is any data remaining to be queued. For an IN transfer, another
2114 * data packet is always requested. For the SETUP phase of a control transfer,
2115 * this function does nothing.
2117 * Return: 1 if a new request is queued, 0 if no more requests are required
2120 int dwc2_hc_continue_transfer(struct dwc2_hsotg
*hsotg
,
2121 struct dwc2_host_chan
*chan
)
2124 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
2128 /* SPLITs always queue just once per channel */
2131 if (chan
->data_pid_start
== DWC2_HC_PID_SETUP
)
2132 /* SETUPs are queued only once since they can't be NAK'd */
2135 if (chan
->ep_is_in
) {
2137 * Always queue another request for other IN transfers. If
2138 * back-to-back INs are issued and NAKs are received for both,
2139 * the driver may still be processing the first NAK when the
2140 * second NAK is received. When the interrupt handler clears
2141 * the NAK interrupt for the first NAK, the second NAK will
2142 * not be seen. So we can't depend on the NAK interrupt
2143 * handler to requeue a NAK'd request. Instead, IN requests
2144 * are issued each time this function is called. When the
2145 * transfer completes, the extra requests for the channel will
2148 u32 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2150 dwc2_hc_set_even_odd_frame(hsotg
, chan
, &hcchar
);
2151 hcchar
|= HCCHAR_CHENA
;
2152 hcchar
&= ~HCCHAR_CHDIS
;
2154 dev_vdbg(hsotg
->dev
, " IN xfer: hcchar = 0x%08x\n",
2156 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2163 if (chan
->xfer_count
< chan
->xfer_len
) {
2164 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
2165 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
2166 u32 hcchar
= dwc2_readl(hsotg
->regs
+
2167 HCCHAR(chan
->hc_num
));
2169 dwc2_hc_set_even_odd_frame(hsotg
, chan
,
2173 /* Load OUT packet into the appropriate Tx FIFO */
2174 dwc2_hc_write_packet(hsotg
, chan
);
2183 * dwc2_hc_do_ping() - Starts a PING transfer
2185 * @hsotg: Programming view of DWC_otg controller
2186 * @chan: Information needed to initialize the host channel
2188 * This function should only be called in Slave mode. The Do Ping bit is set in
2189 * the HCTSIZ register, then the channel is enabled.
2191 void dwc2_hc_do_ping(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
)
2197 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
2201 hctsiz
= TSIZ_DOPNG
;
2202 hctsiz
|= 1 << TSIZ_PKTCNT_SHIFT
;
2203 dwc2_writel(hctsiz
, hsotg
->regs
+ HCTSIZ(chan
->hc_num
));
2205 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2206 hcchar
|= HCCHAR_CHENA
;
2207 hcchar
&= ~HCCHAR_CHDIS
;
2208 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2212 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
2213 * the HFIR register according to PHY type and speed
2215 * @hsotg: Programming view of DWC_otg controller
2217 * NOTE: The caller can modify the value of the HFIR register only after the
2218 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
2221 u32
dwc2_calc_frame_interval(struct dwc2_hsotg
*hsotg
)
2225 int clock
= 60; /* default value */
2227 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
2228 hprt0
= dwc2_readl(hsotg
->regs
+ HPRT0
);
2230 if (!(usbcfg
& GUSBCFG_PHYSEL
) && (usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) &&
2231 !(usbcfg
& GUSBCFG_PHYIF16
))
2233 if ((usbcfg
& GUSBCFG_PHYSEL
) && hsotg
->hw_params
.fs_phy_type
==
2234 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI
)
2236 if (!(usbcfg
& GUSBCFG_PHY_LP_CLK_SEL
) && !(usbcfg
& GUSBCFG_PHYSEL
) &&
2237 !(usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) && (usbcfg
& GUSBCFG_PHYIF16
))
2239 if (!(usbcfg
& GUSBCFG_PHY_LP_CLK_SEL
) && !(usbcfg
& GUSBCFG_PHYSEL
) &&
2240 !(usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) && !(usbcfg
& GUSBCFG_PHYIF16
))
2242 if ((usbcfg
& GUSBCFG_PHY_LP_CLK_SEL
) && !(usbcfg
& GUSBCFG_PHYSEL
) &&
2243 !(usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) && (usbcfg
& GUSBCFG_PHYIF16
))
2245 if ((usbcfg
& GUSBCFG_PHYSEL
) && !(usbcfg
& GUSBCFG_PHYIF16
) &&
2246 hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_SHARED_UTMI
)
2248 if ((usbcfg
& GUSBCFG_PHYSEL
) &&
2249 hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_DEDICATED
)
2252 if ((hprt0
& HPRT0_SPD_MASK
) >> HPRT0_SPD_SHIFT
== HPRT0_SPD_HIGH_SPEED
)
2253 /* High speed case */
2257 return 1000 * clock
;
2261 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
2264 * @core_if: Programming view of DWC_otg controller
2265 * @dest: Destination buffer for the packet
2266 * @bytes: Number of bytes to copy to the destination
2268 void dwc2_read_packet(struct dwc2_hsotg
*hsotg
, u8
*dest
, u16 bytes
)
2270 u32 __iomem
*fifo
= hsotg
->regs
+ HCFIFO(0);
2271 u32
*data_buf
= (u32
*)dest
;
2272 int word_count
= (bytes
+ 3) / 4;
2276 * Todo: Account for the case where dest is not dword aligned. This
2277 * requires reading data from the FIFO into a u32 temp buffer, then
2278 * moving it into the data buffer.
2281 dev_vdbg(hsotg
->dev
, "%s(%p,%p,%d)\n", __func__
, hsotg
, dest
, bytes
);
2283 for (i
= 0; i
< word_count
; i
++, data_buf
++)
2284 *data_buf
= dwc2_readl(fifo
);
2288 * dwc2_dump_host_registers() - Prints the host registers
2290 * @hsotg: Programming view of DWC_otg controller
2292 * NOTE: This function will be removed once the peripheral controller code
2293 * is integrated and the driver is stable
2295 void dwc2_dump_host_registers(struct dwc2_hsotg
*hsotg
)
2301 dev_dbg(hsotg
->dev
, "Host Global Registers\n");
2302 addr
= hsotg
->regs
+ HCFG
;
2303 dev_dbg(hsotg
->dev
, "HCFG @0x%08lX : 0x%08X\n",
2304 (unsigned long)addr
, dwc2_readl(addr
));
2305 addr
= hsotg
->regs
+ HFIR
;
2306 dev_dbg(hsotg
->dev
, "HFIR @0x%08lX : 0x%08X\n",
2307 (unsigned long)addr
, dwc2_readl(addr
));
2308 addr
= hsotg
->regs
+ HFNUM
;
2309 dev_dbg(hsotg
->dev
, "HFNUM @0x%08lX : 0x%08X\n",
2310 (unsigned long)addr
, dwc2_readl(addr
));
2311 addr
= hsotg
->regs
+ HPTXSTS
;
2312 dev_dbg(hsotg
->dev
, "HPTXSTS @0x%08lX : 0x%08X\n",
2313 (unsigned long)addr
, dwc2_readl(addr
));
2314 addr
= hsotg
->regs
+ HAINT
;
2315 dev_dbg(hsotg
->dev
, "HAINT @0x%08lX : 0x%08X\n",
2316 (unsigned long)addr
, dwc2_readl(addr
));
2317 addr
= hsotg
->regs
+ HAINTMSK
;
2318 dev_dbg(hsotg
->dev
, "HAINTMSK @0x%08lX : 0x%08X\n",
2319 (unsigned long)addr
, dwc2_readl(addr
));
2320 if (hsotg
->core_params
->dma_desc_enable
> 0) {
2321 addr
= hsotg
->regs
+ HFLBADDR
;
2322 dev_dbg(hsotg
->dev
, "HFLBADDR @0x%08lX : 0x%08X\n",
2323 (unsigned long)addr
, dwc2_readl(addr
));
2326 addr
= hsotg
->regs
+ HPRT0
;
2327 dev_dbg(hsotg
->dev
, "HPRT0 @0x%08lX : 0x%08X\n",
2328 (unsigned long)addr
, dwc2_readl(addr
));
2330 for (i
= 0; i
< hsotg
->core_params
->host_channels
; i
++) {
2331 dev_dbg(hsotg
->dev
, "Host Channel %d Specific Registers\n", i
);
2332 addr
= hsotg
->regs
+ HCCHAR(i
);
2333 dev_dbg(hsotg
->dev
, "HCCHAR @0x%08lX : 0x%08X\n",
2334 (unsigned long)addr
, dwc2_readl(addr
));
2335 addr
= hsotg
->regs
+ HCSPLT(i
);
2336 dev_dbg(hsotg
->dev
, "HCSPLT @0x%08lX : 0x%08X\n",
2337 (unsigned long)addr
, dwc2_readl(addr
));
2338 addr
= hsotg
->regs
+ HCINT(i
);
2339 dev_dbg(hsotg
->dev
, "HCINT @0x%08lX : 0x%08X\n",
2340 (unsigned long)addr
, dwc2_readl(addr
));
2341 addr
= hsotg
->regs
+ HCINTMSK(i
);
2342 dev_dbg(hsotg
->dev
, "HCINTMSK @0x%08lX : 0x%08X\n",
2343 (unsigned long)addr
, dwc2_readl(addr
));
2344 addr
= hsotg
->regs
+ HCTSIZ(i
);
2345 dev_dbg(hsotg
->dev
, "HCTSIZ @0x%08lX : 0x%08X\n",
2346 (unsigned long)addr
, dwc2_readl(addr
));
2347 addr
= hsotg
->regs
+ HCDMA(i
);
2348 dev_dbg(hsotg
->dev
, "HCDMA @0x%08lX : 0x%08X\n",
2349 (unsigned long)addr
, dwc2_readl(addr
));
2350 if (hsotg
->core_params
->dma_desc_enable
> 0) {
2351 addr
= hsotg
->regs
+ HCDMAB(i
);
2352 dev_dbg(hsotg
->dev
, "HCDMAB @0x%08lX : 0x%08X\n",
2353 (unsigned long)addr
, dwc2_readl(addr
));
2360 * dwc2_dump_global_registers() - Prints the core global registers
2362 * @hsotg: Programming view of DWC_otg controller
2364 * NOTE: This function will be removed once the peripheral controller code
2365 * is integrated and the driver is stable
2367 void dwc2_dump_global_registers(struct dwc2_hsotg
*hsotg
)
2372 dev_dbg(hsotg
->dev
, "Core Global Registers\n");
2373 addr
= hsotg
->regs
+ GOTGCTL
;
2374 dev_dbg(hsotg
->dev
, "GOTGCTL @0x%08lX : 0x%08X\n",
2375 (unsigned long)addr
, dwc2_readl(addr
));
2376 addr
= hsotg
->regs
+ GOTGINT
;
2377 dev_dbg(hsotg
->dev
, "GOTGINT @0x%08lX : 0x%08X\n",
2378 (unsigned long)addr
, dwc2_readl(addr
));
2379 addr
= hsotg
->regs
+ GAHBCFG
;
2380 dev_dbg(hsotg
->dev
, "GAHBCFG @0x%08lX : 0x%08X\n",
2381 (unsigned long)addr
, dwc2_readl(addr
));
2382 addr
= hsotg
->regs
+ GUSBCFG
;
2383 dev_dbg(hsotg
->dev
, "GUSBCFG @0x%08lX : 0x%08X\n",
2384 (unsigned long)addr
, dwc2_readl(addr
));
2385 addr
= hsotg
->regs
+ GRSTCTL
;
2386 dev_dbg(hsotg
->dev
, "GRSTCTL @0x%08lX : 0x%08X\n",
2387 (unsigned long)addr
, dwc2_readl(addr
));
2388 addr
= hsotg
->regs
+ GINTSTS
;
2389 dev_dbg(hsotg
->dev
, "GINTSTS @0x%08lX : 0x%08X\n",
2390 (unsigned long)addr
, dwc2_readl(addr
));
2391 addr
= hsotg
->regs
+ GINTMSK
;
2392 dev_dbg(hsotg
->dev
, "GINTMSK @0x%08lX : 0x%08X\n",
2393 (unsigned long)addr
, dwc2_readl(addr
));
2394 addr
= hsotg
->regs
+ GRXSTSR
;
2395 dev_dbg(hsotg
->dev
, "GRXSTSR @0x%08lX : 0x%08X\n",
2396 (unsigned long)addr
, dwc2_readl(addr
));
2397 addr
= hsotg
->regs
+ GRXFSIZ
;
2398 dev_dbg(hsotg
->dev
, "GRXFSIZ @0x%08lX : 0x%08X\n",
2399 (unsigned long)addr
, dwc2_readl(addr
));
2400 addr
= hsotg
->regs
+ GNPTXFSIZ
;
2401 dev_dbg(hsotg
->dev
, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
2402 (unsigned long)addr
, dwc2_readl(addr
));
2403 addr
= hsotg
->regs
+ GNPTXSTS
;
2404 dev_dbg(hsotg
->dev
, "GNPTXSTS @0x%08lX : 0x%08X\n",
2405 (unsigned long)addr
, dwc2_readl(addr
));
2406 addr
= hsotg
->regs
+ GI2CCTL
;
2407 dev_dbg(hsotg
->dev
, "GI2CCTL @0x%08lX : 0x%08X\n",
2408 (unsigned long)addr
, dwc2_readl(addr
));
2409 addr
= hsotg
->regs
+ GPVNDCTL
;
2410 dev_dbg(hsotg
->dev
, "GPVNDCTL @0x%08lX : 0x%08X\n",
2411 (unsigned long)addr
, dwc2_readl(addr
));
2412 addr
= hsotg
->regs
+ GGPIO
;
2413 dev_dbg(hsotg
->dev
, "GGPIO @0x%08lX : 0x%08X\n",
2414 (unsigned long)addr
, dwc2_readl(addr
));
2415 addr
= hsotg
->regs
+ GUID
;
2416 dev_dbg(hsotg
->dev
, "GUID @0x%08lX : 0x%08X\n",
2417 (unsigned long)addr
, dwc2_readl(addr
));
2418 addr
= hsotg
->regs
+ GSNPSID
;
2419 dev_dbg(hsotg
->dev
, "GSNPSID @0x%08lX : 0x%08X\n",
2420 (unsigned long)addr
, dwc2_readl(addr
));
2421 addr
= hsotg
->regs
+ GHWCFG1
;
2422 dev_dbg(hsotg
->dev
, "GHWCFG1 @0x%08lX : 0x%08X\n",
2423 (unsigned long)addr
, dwc2_readl(addr
));
2424 addr
= hsotg
->regs
+ GHWCFG2
;
2425 dev_dbg(hsotg
->dev
, "GHWCFG2 @0x%08lX : 0x%08X\n",
2426 (unsigned long)addr
, dwc2_readl(addr
));
2427 addr
= hsotg
->regs
+ GHWCFG3
;
2428 dev_dbg(hsotg
->dev
, "GHWCFG3 @0x%08lX : 0x%08X\n",
2429 (unsigned long)addr
, dwc2_readl(addr
));
2430 addr
= hsotg
->regs
+ GHWCFG4
;
2431 dev_dbg(hsotg
->dev
, "GHWCFG4 @0x%08lX : 0x%08X\n",
2432 (unsigned long)addr
, dwc2_readl(addr
));
2433 addr
= hsotg
->regs
+ GLPMCFG
;
2434 dev_dbg(hsotg
->dev
, "GLPMCFG @0x%08lX : 0x%08X\n",
2435 (unsigned long)addr
, dwc2_readl(addr
));
2436 addr
= hsotg
->regs
+ GPWRDN
;
2437 dev_dbg(hsotg
->dev
, "GPWRDN @0x%08lX : 0x%08X\n",
2438 (unsigned long)addr
, dwc2_readl(addr
));
2439 addr
= hsotg
->regs
+ GDFIFOCFG
;
2440 dev_dbg(hsotg
->dev
, "GDFIFOCFG @0x%08lX : 0x%08X\n",
2441 (unsigned long)addr
, dwc2_readl(addr
));
2442 addr
= hsotg
->regs
+ HPTXFSIZ
;
2443 dev_dbg(hsotg
->dev
, "HPTXFSIZ @0x%08lX : 0x%08X\n",
2444 (unsigned long)addr
, dwc2_readl(addr
));
2446 addr
= hsotg
->regs
+ PCGCTL
;
2447 dev_dbg(hsotg
->dev
, "PCGCTL @0x%08lX : 0x%08X\n",
2448 (unsigned long)addr
, dwc2_readl(addr
));
2453 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
2455 * @hsotg: Programming view of DWC_otg controller
2456 * @num: Tx FIFO to flush
2458 void dwc2_flush_tx_fifo(struct dwc2_hsotg
*hsotg
, const int num
)
2463 dev_vdbg(hsotg
->dev
, "Flush Tx FIFO %d\n", num
);
2465 greset
= GRSTCTL_TXFFLSH
;
2466 greset
|= num
<< GRSTCTL_TXFNUM_SHIFT
& GRSTCTL_TXFNUM_MASK
;
2467 dwc2_writel(greset
, hsotg
->regs
+ GRSTCTL
);
2470 greset
= dwc2_readl(hsotg
->regs
+ GRSTCTL
);
2471 if (++count
> 10000) {
2472 dev_warn(hsotg
->dev
,
2473 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
2475 dwc2_readl(hsotg
->regs
+ GNPTXSTS
));
2479 } while (greset
& GRSTCTL_TXFFLSH
);
2481 /* Wait for at least 3 PHY Clocks */
2486 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
2488 * @hsotg: Programming view of DWC_otg controller
2490 void dwc2_flush_rx_fifo(struct dwc2_hsotg
*hsotg
)
2495 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
2497 greset
= GRSTCTL_RXFFLSH
;
2498 dwc2_writel(greset
, hsotg
->regs
+ GRSTCTL
);
2501 greset
= dwc2_readl(hsotg
->regs
+ GRSTCTL
);
2502 if (++count
> 10000) {
2503 dev_warn(hsotg
->dev
, "%s() HANG! GRSTCTL=%0x\n",
2508 } while (greset
& GRSTCTL_RXFFLSH
);
2510 /* Wait for at least 3 PHY Clocks */
2514 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
2516 /* Parameter access functions */
2517 void dwc2_set_param_otg_cap(struct dwc2_hsotg
*hsotg
, int val
)
2522 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE
:
2523 if (hsotg
->hw_params
.op_mode
!= GHWCFG2_OP_MODE_HNP_SRP_CAPABLE
)
2526 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE
:
2527 switch (hsotg
->hw_params
.op_mode
) {
2528 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE
:
2529 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE
:
2530 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
:
2531 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST
:
2538 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE
:
2549 "%d invalid for otg_cap parameter. Check HW configuration.\n",
2551 switch (hsotg
->hw_params
.op_mode
) {
2552 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE
:
2553 val
= DWC2_CAP_PARAM_HNP_SRP_CAPABLE
;
2555 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE
:
2556 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
:
2557 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST
:
2558 val
= DWC2_CAP_PARAM_SRP_ONLY_CAPABLE
;
2561 val
= DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE
;
2564 dev_dbg(hsotg
->dev
, "Setting otg_cap to %d\n", val
);
2567 hsotg
->core_params
->otg_cap
= val
;
2570 void dwc2_set_param_dma_enable(struct dwc2_hsotg
*hsotg
, int val
)
2574 if (val
> 0 && hsotg
->hw_params
.arch
== GHWCFG2_SLAVE_ONLY_ARCH
)
2582 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2584 val
= hsotg
->hw_params
.arch
!= GHWCFG2_SLAVE_ONLY_ARCH
;
2585 dev_dbg(hsotg
->dev
, "Setting dma_enable to %d\n", val
);
2588 hsotg
->core_params
->dma_enable
= val
;
2591 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg
*hsotg
, int val
)
2595 if (val
> 0 && (hsotg
->core_params
->dma_enable
<= 0 ||
2596 !hsotg
->hw_params
.dma_desc_enable
))
2604 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2606 val
= (hsotg
->core_params
->dma_enable
> 0 &&
2607 hsotg
->hw_params
.dma_desc_enable
);
2608 dev_dbg(hsotg
->dev
, "Setting dma_desc_enable to %d\n", val
);
2611 hsotg
->core_params
->dma_desc_enable
= val
;
2614 void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg
*hsotg
, int val
)
2618 if (val
> 0 && (hsotg
->core_params
->dma_enable
<= 0 ||
2619 !hsotg
->hw_params
.dma_desc_enable
))
2627 "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
2629 val
= (hsotg
->core_params
->dma_enable
> 0 &&
2630 hsotg
->hw_params
.dma_desc_enable
);
2633 hsotg
->core_params
->dma_desc_fs_enable
= val
;
2634 dev_dbg(hsotg
->dev
, "Setting dma_desc_fs_enable to %d\n", val
);
2637 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg
*hsotg
,
2640 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2643 "Wrong value for host_support_fs_low_power\n");
2645 "host_support_fs_low_power must be 0 or 1\n");
2649 "Setting host_support_fs_low_power to %d\n", val
);
2652 hsotg
->core_params
->host_support_fs_ls_low_power
= val
;
2655 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg
*hsotg
, int val
)
2659 if (val
> 0 && !hsotg
->hw_params
.enable_dynamic_fifo
)
2667 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2669 val
= hsotg
->hw_params
.enable_dynamic_fifo
;
2670 dev_dbg(hsotg
->dev
, "Setting enable_dynamic_fifo to %d\n", val
);
2673 hsotg
->core_params
->enable_dynamic_fifo
= val
;
2676 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg
*hsotg
, int val
)
2680 if (val
< 16 || val
> hsotg
->hw_params
.host_rx_fifo_size
)
2686 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2688 val
= hsotg
->hw_params
.host_rx_fifo_size
;
2689 dev_dbg(hsotg
->dev
, "Setting host_rx_fifo_size to %d\n", val
);
2692 hsotg
->core_params
->host_rx_fifo_size
= val
;
2695 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg
*hsotg
, int val
)
2699 if (val
< 16 || val
> hsotg
->hw_params
.host_nperio_tx_fifo_size
)
2705 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2707 val
= hsotg
->hw_params
.host_nperio_tx_fifo_size
;
2708 dev_dbg(hsotg
->dev
, "Setting host_nperio_tx_fifo_size to %d\n",
2712 hsotg
->core_params
->host_nperio_tx_fifo_size
= val
;
2715 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg
*hsotg
, int val
)
2719 if (val
< 16 || val
> hsotg
->hw_params
.host_perio_tx_fifo_size
)
2725 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2727 val
= hsotg
->hw_params
.host_perio_tx_fifo_size
;
2728 dev_dbg(hsotg
->dev
, "Setting host_perio_tx_fifo_size to %d\n",
2732 hsotg
->core_params
->host_perio_tx_fifo_size
= val
;
2735 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg
*hsotg
, int val
)
2739 if (val
< 2047 || val
> hsotg
->hw_params
.max_transfer_size
)
2745 "%d invalid for max_transfer_size. Check HW configuration.\n",
2747 val
= hsotg
->hw_params
.max_transfer_size
;
2748 dev_dbg(hsotg
->dev
, "Setting max_transfer_size to %d\n", val
);
2751 hsotg
->core_params
->max_transfer_size
= val
;
2754 void dwc2_set_param_max_packet_count(struct dwc2_hsotg
*hsotg
, int val
)
2758 if (val
< 15 || val
> hsotg
->hw_params
.max_packet_count
)
2764 "%d invalid for max_packet_count. Check HW configuration.\n",
2766 val
= hsotg
->hw_params
.max_packet_count
;
2767 dev_dbg(hsotg
->dev
, "Setting max_packet_count to %d\n", val
);
2770 hsotg
->core_params
->max_packet_count
= val
;
2773 void dwc2_set_param_host_channels(struct dwc2_hsotg
*hsotg
, int val
)
2777 if (val
< 1 || val
> hsotg
->hw_params
.host_channels
)
2783 "%d invalid for host_channels. Check HW configuration.\n",
2785 val
= hsotg
->hw_params
.host_channels
;
2786 dev_dbg(hsotg
->dev
, "Setting host_channels to %d\n", val
);
2789 hsotg
->core_params
->host_channels
= val
;
2792 void dwc2_set_param_phy_type(struct dwc2_hsotg
*hsotg
, int val
)
2795 u32 hs_phy_type
, fs_phy_type
;
2797 if (DWC2_OUT_OF_BOUNDS(val
, DWC2_PHY_TYPE_PARAM_FS
,
2798 DWC2_PHY_TYPE_PARAM_ULPI
)) {
2800 dev_err(hsotg
->dev
, "Wrong value for phy_type\n");
2801 dev_err(hsotg
->dev
, "phy_type must be 0, 1 or 2\n");
2807 hs_phy_type
= hsotg
->hw_params
.hs_phy_type
;
2808 fs_phy_type
= hsotg
->hw_params
.fs_phy_type
;
2809 if (val
== DWC2_PHY_TYPE_PARAM_UTMI
&&
2810 (hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI
||
2811 hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI_ULPI
))
2813 else if (val
== DWC2_PHY_TYPE_PARAM_ULPI
&&
2814 (hs_phy_type
== GHWCFG2_HS_PHY_TYPE_ULPI
||
2815 hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI_ULPI
))
2817 else if (val
== DWC2_PHY_TYPE_PARAM_FS
&&
2818 fs_phy_type
== GHWCFG2_FS_PHY_TYPE_DEDICATED
)
2824 "%d invalid for phy_type. Check HW configuration.\n",
2826 val
= DWC2_PHY_TYPE_PARAM_FS
;
2827 if (hs_phy_type
!= GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED
) {
2828 if (hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI
||
2829 hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI_ULPI
)
2830 val
= DWC2_PHY_TYPE_PARAM_UTMI
;
2832 val
= DWC2_PHY_TYPE_PARAM_ULPI
;
2834 dev_dbg(hsotg
->dev
, "Setting phy_type to %d\n", val
);
2837 hsotg
->core_params
->phy_type
= val
;
2840 static int dwc2_get_param_phy_type(struct dwc2_hsotg
*hsotg
)
2842 return hsotg
->core_params
->phy_type
;
2845 void dwc2_set_param_speed(struct dwc2_hsotg
*hsotg
, int val
)
2849 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2851 dev_err(hsotg
->dev
, "Wrong value for speed parameter\n");
2852 dev_err(hsotg
->dev
, "max_speed parameter must be 0 or 1\n");
2857 if (val
== DWC2_SPEED_PARAM_HIGH
&&
2858 dwc2_get_param_phy_type(hsotg
) == DWC2_PHY_TYPE_PARAM_FS
)
2864 "%d invalid for speed parameter. Check HW configuration.\n",
2866 val
= dwc2_get_param_phy_type(hsotg
) == DWC2_PHY_TYPE_PARAM_FS
?
2867 DWC2_SPEED_PARAM_FULL
: DWC2_SPEED_PARAM_HIGH
;
2868 dev_dbg(hsotg
->dev
, "Setting speed to %d\n", val
);
2871 hsotg
->core_params
->speed
= val
;
2874 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg
*hsotg
, int val
)
2878 if (DWC2_OUT_OF_BOUNDS(val
, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ
,
2879 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
)) {
2882 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2884 "host_ls_low_power_phy_clk must be 0 or 1\n");
2889 if (val
== DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ
&&
2890 dwc2_get_param_phy_type(hsotg
) == DWC2_PHY_TYPE_PARAM_FS
)
2896 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2898 val
= dwc2_get_param_phy_type(hsotg
) == DWC2_PHY_TYPE_PARAM_FS
2899 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2900 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ
;
2901 dev_dbg(hsotg
->dev
, "Setting host_ls_low_power_phy_clk to %d\n",
2905 hsotg
->core_params
->host_ls_low_power_phy_clk
= val
;
2908 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg
*hsotg
, int val
)
2910 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2912 dev_err(hsotg
->dev
, "Wrong value for phy_ulpi_ddr\n");
2913 dev_err(hsotg
->dev
, "phy_upli_ddr must be 0 or 1\n");
2916 dev_dbg(hsotg
->dev
, "Setting phy_upli_ddr to %d\n", val
);
2919 hsotg
->core_params
->phy_ulpi_ddr
= val
;
2922 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg
*hsotg
, int val
)
2924 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2927 "Wrong value for phy_ulpi_ext_vbus\n");
2929 "phy_ulpi_ext_vbus must be 0 or 1\n");
2932 dev_dbg(hsotg
->dev
, "Setting phy_ulpi_ext_vbus to %d\n", val
);
2935 hsotg
->core_params
->phy_ulpi_ext_vbus
= val
;
2938 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg
*hsotg
, int val
)
2942 switch (hsotg
->hw_params
.utmi_phy_data_width
) {
2943 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8
:
2946 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16
:
2947 valid
= (val
== 16);
2949 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16
:
2950 valid
= (val
== 8 || val
== 16);
2957 "%d invalid for phy_utmi_width. Check HW configuration.\n",
2960 val
= (hsotg
->hw_params
.utmi_phy_data_width
==
2961 GHWCFG4_UTMI_PHY_DATA_WIDTH_8
) ? 8 : 16;
2962 dev_dbg(hsotg
->dev
, "Setting phy_utmi_width to %d\n", val
);
2965 hsotg
->core_params
->phy_utmi_width
= val
;
2968 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg
*hsotg
, int val
)
2970 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2972 dev_err(hsotg
->dev
, "Wrong value for ulpi_fs_ls\n");
2973 dev_err(hsotg
->dev
, "ulpi_fs_ls must be 0 or 1\n");
2976 dev_dbg(hsotg
->dev
, "Setting ulpi_fs_ls to %d\n", val
);
2979 hsotg
->core_params
->ulpi_fs_ls
= val
;
2982 void dwc2_set_param_ts_dline(struct dwc2_hsotg
*hsotg
, int val
)
2984 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2986 dev_err(hsotg
->dev
, "Wrong value for ts_dline\n");
2987 dev_err(hsotg
->dev
, "ts_dline must be 0 or 1\n");
2990 dev_dbg(hsotg
->dev
, "Setting ts_dline to %d\n", val
);
2993 hsotg
->core_params
->ts_dline
= val
;
2996 void dwc2_set_param_i2c_enable(struct dwc2_hsotg
*hsotg
, int val
)
3000 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
3002 dev_err(hsotg
->dev
, "Wrong value for i2c_enable\n");
3003 dev_err(hsotg
->dev
, "i2c_enable must be 0 or 1\n");
3009 if (val
== 1 && !(hsotg
->hw_params
.i2c_enable
))
3015 "%d invalid for i2c_enable. Check HW configuration.\n",
3017 val
= hsotg
->hw_params
.i2c_enable
;
3018 dev_dbg(hsotg
->dev
, "Setting i2c_enable to %d\n", val
);
3021 hsotg
->core_params
->i2c_enable
= val
;
3024 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg
*hsotg
, int val
)
3028 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
3031 "Wrong value for en_multiple_tx_fifo,\n");
3033 "en_multiple_tx_fifo must be 0 or 1\n");
3038 if (val
== 1 && !hsotg
->hw_params
.en_multiple_tx_fifo
)
3044 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
3046 val
= hsotg
->hw_params
.en_multiple_tx_fifo
;
3047 dev_dbg(hsotg
->dev
, "Setting en_multiple_tx_fifo to %d\n", val
);
3050 hsotg
->core_params
->en_multiple_tx_fifo
= val
;
3053 void dwc2_set_param_reload_ctl(struct dwc2_hsotg
*hsotg
, int val
)
3057 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
3060 "'%d' invalid for parameter reload_ctl\n", val
);
3061 dev_err(hsotg
->dev
, "reload_ctl must be 0 or 1\n");
3066 if (val
== 1 && hsotg
->hw_params
.snpsid
< DWC2_CORE_REV_2_92a
)
3072 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
3074 val
= hsotg
->hw_params
.snpsid
>= DWC2_CORE_REV_2_92a
;
3075 dev_dbg(hsotg
->dev
, "Setting reload_ctl to %d\n", val
);
3078 hsotg
->core_params
->reload_ctl
= val
;
3081 void dwc2_set_param_ahbcfg(struct dwc2_hsotg
*hsotg
, int val
)
3084 hsotg
->core_params
->ahbcfg
= val
;
3086 hsotg
->core_params
->ahbcfg
= GAHBCFG_HBSTLEN_INCR4
<<
3087 GAHBCFG_HBSTLEN_SHIFT
;
3090 void dwc2_set_param_otg_ver(struct dwc2_hsotg
*hsotg
, int val
)
3092 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
3095 "'%d' invalid for parameter otg_ver\n", val
);
3097 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
3100 dev_dbg(hsotg
->dev
, "Setting otg_ver to %d\n", val
);
3103 hsotg
->core_params
->otg_ver
= val
;
3106 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg
*hsotg
, int val
)
3108 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
3111 "'%d' invalid for parameter uframe_sched\n",
3113 dev_err(hsotg
->dev
, "uframe_sched must be 0 or 1\n");
3116 dev_dbg(hsotg
->dev
, "Setting uframe_sched to %d\n", val
);
3119 hsotg
->core_params
->uframe_sched
= val
;
3122 static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg
*hsotg
,
3125 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
3128 "'%d' invalid for parameter external_id_pin_ctl\n",
3130 dev_err(hsotg
->dev
, "external_id_pin_ctl must be 0 or 1\n");
3133 dev_dbg(hsotg
->dev
, "Setting external_id_pin_ctl to %d\n", val
);
3136 hsotg
->core_params
->external_id_pin_ctl
= val
;
3139 static void dwc2_set_param_hibernation(struct dwc2_hsotg
*hsotg
,
3142 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
3145 "'%d' invalid for parameter hibernation\n",
3147 dev_err(hsotg
->dev
, "hibernation must be 0 or 1\n");
3150 dev_dbg(hsotg
->dev
, "Setting hibernation to %d\n", val
);
3153 hsotg
->core_params
->hibernation
= val
;
3157 * This function is called during module intialization to pass module parameters
3158 * for the DWC_otg core.
3160 void dwc2_set_parameters(struct dwc2_hsotg
*hsotg
,
3161 const struct dwc2_core_params
*params
)
3163 dev_dbg(hsotg
->dev
, "%s()\n", __func__
);
3165 dwc2_set_param_otg_cap(hsotg
, params
->otg_cap
);
3166 dwc2_set_param_dma_enable(hsotg
, params
->dma_enable
);
3167 dwc2_set_param_dma_desc_enable(hsotg
, params
->dma_desc_enable
);
3168 dwc2_set_param_dma_desc_fs_enable(hsotg
, params
->dma_desc_fs_enable
);
3169 dwc2_set_param_host_support_fs_ls_low_power(hsotg
,
3170 params
->host_support_fs_ls_low_power
);
3171 dwc2_set_param_enable_dynamic_fifo(hsotg
,
3172 params
->enable_dynamic_fifo
);
3173 dwc2_set_param_host_rx_fifo_size(hsotg
,
3174 params
->host_rx_fifo_size
);
3175 dwc2_set_param_host_nperio_tx_fifo_size(hsotg
,
3176 params
->host_nperio_tx_fifo_size
);
3177 dwc2_set_param_host_perio_tx_fifo_size(hsotg
,
3178 params
->host_perio_tx_fifo_size
);
3179 dwc2_set_param_max_transfer_size(hsotg
,
3180 params
->max_transfer_size
);
3181 dwc2_set_param_max_packet_count(hsotg
,
3182 params
->max_packet_count
);
3183 dwc2_set_param_host_channels(hsotg
, params
->host_channels
);
3184 dwc2_set_param_phy_type(hsotg
, params
->phy_type
);
3185 dwc2_set_param_speed(hsotg
, params
->speed
);
3186 dwc2_set_param_host_ls_low_power_phy_clk(hsotg
,
3187 params
->host_ls_low_power_phy_clk
);
3188 dwc2_set_param_phy_ulpi_ddr(hsotg
, params
->phy_ulpi_ddr
);
3189 dwc2_set_param_phy_ulpi_ext_vbus(hsotg
,
3190 params
->phy_ulpi_ext_vbus
);
3191 dwc2_set_param_phy_utmi_width(hsotg
, params
->phy_utmi_width
);
3192 dwc2_set_param_ulpi_fs_ls(hsotg
, params
->ulpi_fs_ls
);
3193 dwc2_set_param_ts_dline(hsotg
, params
->ts_dline
);
3194 dwc2_set_param_i2c_enable(hsotg
, params
->i2c_enable
);
3195 dwc2_set_param_en_multiple_tx_fifo(hsotg
,
3196 params
->en_multiple_tx_fifo
);
3197 dwc2_set_param_reload_ctl(hsotg
, params
->reload_ctl
);
3198 dwc2_set_param_ahbcfg(hsotg
, params
->ahbcfg
);
3199 dwc2_set_param_otg_ver(hsotg
, params
->otg_ver
);
3200 dwc2_set_param_uframe_sched(hsotg
, params
->uframe_sched
);
3201 dwc2_set_param_external_id_pin_ctl(hsotg
, params
->external_id_pin_ctl
);
3202 dwc2_set_param_hibernation(hsotg
, params
->hibernation
);
3206 * Forces either host or device mode if the controller is not
3207 * currently in that mode.
3209 * Returns true if the mode was forced.
3211 static bool dwc2_force_mode_if_needed(struct dwc2_hsotg
*hsotg
, bool host
)
3213 if (host
&& dwc2_is_host_mode(hsotg
))
3215 else if (!host
&& dwc2_is_device_mode(hsotg
))
3218 return dwc2_force_mode(hsotg
, host
);
3222 * Gets host hardware parameters. Forces host mode if not currently in
3223 * host mode. Should be called immediately after a core soft reset in
3224 * order to get the reset values.
3226 static void dwc2_get_host_hwparams(struct dwc2_hsotg
*hsotg
)
3228 struct dwc2_hw_params
*hw
= &hsotg
->hw_params
;
3233 if (hsotg
->dr_mode
== USB_DR_MODE_PERIPHERAL
)
3236 forced
= dwc2_force_mode_if_needed(hsotg
, true);
3238 gnptxfsiz
= dwc2_readl(hsotg
->regs
+ GNPTXFSIZ
);
3239 hptxfsiz
= dwc2_readl(hsotg
->regs
+ HPTXFSIZ
);
3240 dev_dbg(hsotg
->dev
, "gnptxfsiz=%08x\n", gnptxfsiz
);
3241 dev_dbg(hsotg
->dev
, "hptxfsiz=%08x\n", hptxfsiz
);
3244 dwc2_clear_force_mode(hsotg
);
3246 hw
->host_nperio_tx_fifo_size
= (gnptxfsiz
& FIFOSIZE_DEPTH_MASK
) >>
3247 FIFOSIZE_DEPTH_SHIFT
;
3248 hw
->host_perio_tx_fifo_size
= (hptxfsiz
& FIFOSIZE_DEPTH_MASK
) >>
3249 FIFOSIZE_DEPTH_SHIFT
;
3253 * Gets device hardware parameters. Forces device mode if not
3254 * currently in device mode. Should be called immediately after a core
3255 * soft reset in order to get the reset values.
3257 static void dwc2_get_dev_hwparams(struct dwc2_hsotg
*hsotg
)
3259 struct dwc2_hw_params
*hw
= &hsotg
->hw_params
;
3263 if (hsotg
->dr_mode
== USB_DR_MODE_HOST
)
3266 forced
= dwc2_force_mode_if_needed(hsotg
, false);
3268 gnptxfsiz
= dwc2_readl(hsotg
->regs
+ GNPTXFSIZ
);
3269 dev_dbg(hsotg
->dev
, "gnptxfsiz=%08x\n", gnptxfsiz
);
3272 dwc2_clear_force_mode(hsotg
);
3274 hw
->dev_nperio_tx_fifo_size
= (gnptxfsiz
& FIFOSIZE_DEPTH_MASK
) >>
3275 FIFOSIZE_DEPTH_SHIFT
;
3279 * During device initialization, read various hardware configuration
3280 * registers and interpret the contents.
3282 int dwc2_get_hwparams(struct dwc2_hsotg
*hsotg
)
3284 struct dwc2_hw_params
*hw
= &hsotg
->hw_params
;
3286 u32 hwcfg1
, hwcfg2
, hwcfg3
, hwcfg4
;
3290 * Attempt to ensure this device is really a DWC_otg Controller.
3291 * Read and verify the GSNPSID register contents. The value should be
3292 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
3293 * as in "OTG version 2.xx" or "OTG version 3.xx".
3295 hw
->snpsid
= dwc2_readl(hsotg
->regs
+ GSNPSID
);
3296 if ((hw
->snpsid
& 0xfffff000) != 0x4f542000 &&
3297 (hw
->snpsid
& 0xfffff000) != 0x4f543000) {
3298 dev_err(hsotg
->dev
, "Bad value for GSNPSID: 0x%08x\n",
3303 dev_dbg(hsotg
->dev
, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
3304 hw
->snpsid
>> 12 & 0xf, hw
->snpsid
>> 8 & 0xf,
3305 hw
->snpsid
>> 4 & 0xf, hw
->snpsid
& 0xf, hw
->snpsid
);
3307 hwcfg1
= dwc2_readl(hsotg
->regs
+ GHWCFG1
);
3308 hwcfg2
= dwc2_readl(hsotg
->regs
+ GHWCFG2
);
3309 hwcfg3
= dwc2_readl(hsotg
->regs
+ GHWCFG3
);
3310 hwcfg4
= dwc2_readl(hsotg
->regs
+ GHWCFG4
);
3311 grxfsiz
= dwc2_readl(hsotg
->regs
+ GRXFSIZ
);
3313 dev_dbg(hsotg
->dev
, "hwcfg1=%08x\n", hwcfg1
);
3314 dev_dbg(hsotg
->dev
, "hwcfg2=%08x\n", hwcfg2
);
3315 dev_dbg(hsotg
->dev
, "hwcfg3=%08x\n", hwcfg3
);
3316 dev_dbg(hsotg
->dev
, "hwcfg4=%08x\n", hwcfg4
);
3317 dev_dbg(hsotg
->dev
, "grxfsiz=%08x\n", grxfsiz
);
3320 * Host specific hardware parameters. Reading these parameters
3321 * requires the controller to be in host mode. The mode will
3322 * be forced, if necessary, to read these values.
3324 dwc2_get_host_hwparams(hsotg
);
3325 dwc2_get_dev_hwparams(hsotg
);
3328 hw
->dev_ep_dirs
= hwcfg1
;
3331 hw
->op_mode
= (hwcfg2
& GHWCFG2_OP_MODE_MASK
) >>
3332 GHWCFG2_OP_MODE_SHIFT
;
3333 hw
->arch
= (hwcfg2
& GHWCFG2_ARCHITECTURE_MASK
) >>
3334 GHWCFG2_ARCHITECTURE_SHIFT
;
3335 hw
->enable_dynamic_fifo
= !!(hwcfg2
& GHWCFG2_DYNAMIC_FIFO
);
3336 hw
->host_channels
= 1 + ((hwcfg2
& GHWCFG2_NUM_HOST_CHAN_MASK
) >>
3337 GHWCFG2_NUM_HOST_CHAN_SHIFT
);
3338 hw
->hs_phy_type
= (hwcfg2
& GHWCFG2_HS_PHY_TYPE_MASK
) >>
3339 GHWCFG2_HS_PHY_TYPE_SHIFT
;
3340 hw
->fs_phy_type
= (hwcfg2
& GHWCFG2_FS_PHY_TYPE_MASK
) >>
3341 GHWCFG2_FS_PHY_TYPE_SHIFT
;
3342 hw
->num_dev_ep
= (hwcfg2
& GHWCFG2_NUM_DEV_EP_MASK
) >>
3343 GHWCFG2_NUM_DEV_EP_SHIFT
;
3344 hw
->nperio_tx_q_depth
=
3345 (hwcfg2
& GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK
) >>
3346 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT
<< 1;
3347 hw
->host_perio_tx_q_depth
=
3348 (hwcfg2
& GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK
) >>
3349 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT
<< 1;
3350 hw
->dev_token_q_depth
=
3351 (hwcfg2
& GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK
) >>
3352 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT
;
3355 width
= (hwcfg3
& GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK
) >>
3356 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT
;
3357 hw
->max_transfer_size
= (1 << (width
+ 11)) - 1;
3359 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
3360 * coherent buffers with this size, and if it's too large we can
3361 * exhaust the coherent DMA pool.
3363 if (hw
->max_transfer_size
> 65535)
3364 hw
->max_transfer_size
= 65535;
3365 width
= (hwcfg3
& GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK
) >>
3366 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT
;
3367 hw
->max_packet_count
= (1 << (width
+ 4)) - 1;
3368 hw
->i2c_enable
= !!(hwcfg3
& GHWCFG3_I2C
);
3369 hw
->total_fifo_size
= (hwcfg3
& GHWCFG3_DFIFO_DEPTH_MASK
) >>
3370 GHWCFG3_DFIFO_DEPTH_SHIFT
;
3373 hw
->en_multiple_tx_fifo
= !!(hwcfg4
& GHWCFG4_DED_FIFO_EN
);
3374 hw
->num_dev_perio_in_ep
= (hwcfg4
& GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK
) >>
3375 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT
;
3376 hw
->dma_desc_enable
= !!(hwcfg4
& GHWCFG4_DESC_DMA
);
3377 hw
->power_optimized
= !!(hwcfg4
& GHWCFG4_POWER_OPTIMIZ
);
3378 hw
->utmi_phy_data_width
= (hwcfg4
& GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK
) >>
3379 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT
;
3382 hw
->host_rx_fifo_size
= (grxfsiz
& GRXFSIZ_DEPTH_MASK
) >>
3383 GRXFSIZ_DEPTH_SHIFT
;
3385 dev_dbg(hsotg
->dev
, "Detected values from hardware:\n");
3386 dev_dbg(hsotg
->dev
, " op_mode=%d\n",
3388 dev_dbg(hsotg
->dev
, " arch=%d\n",
3390 dev_dbg(hsotg
->dev
, " dma_desc_enable=%d\n",
3391 hw
->dma_desc_enable
);
3392 dev_dbg(hsotg
->dev
, " power_optimized=%d\n",
3393 hw
->power_optimized
);
3394 dev_dbg(hsotg
->dev
, " i2c_enable=%d\n",
3396 dev_dbg(hsotg
->dev
, " hs_phy_type=%d\n",
3398 dev_dbg(hsotg
->dev
, " fs_phy_type=%d\n",
3400 dev_dbg(hsotg
->dev
, " utmi_phy_data_width=%d\n",
3401 hw
->utmi_phy_data_width
);
3402 dev_dbg(hsotg
->dev
, " num_dev_ep=%d\n",
3404 dev_dbg(hsotg
->dev
, " num_dev_perio_in_ep=%d\n",
3405 hw
->num_dev_perio_in_ep
);
3406 dev_dbg(hsotg
->dev
, " host_channels=%d\n",
3408 dev_dbg(hsotg
->dev
, " max_transfer_size=%d\n",
3409 hw
->max_transfer_size
);
3410 dev_dbg(hsotg
->dev
, " max_packet_count=%d\n",
3411 hw
->max_packet_count
);
3412 dev_dbg(hsotg
->dev
, " nperio_tx_q_depth=0x%0x\n",
3413 hw
->nperio_tx_q_depth
);
3414 dev_dbg(hsotg
->dev
, " host_perio_tx_q_depth=0x%0x\n",
3415 hw
->host_perio_tx_q_depth
);
3416 dev_dbg(hsotg
->dev
, " dev_token_q_depth=0x%0x\n",
3417 hw
->dev_token_q_depth
);
3418 dev_dbg(hsotg
->dev
, " enable_dynamic_fifo=%d\n",
3419 hw
->enable_dynamic_fifo
);
3420 dev_dbg(hsotg
->dev
, " en_multiple_tx_fifo=%d\n",
3421 hw
->en_multiple_tx_fifo
);
3422 dev_dbg(hsotg
->dev
, " total_fifo_size=%d\n",
3423 hw
->total_fifo_size
);
3424 dev_dbg(hsotg
->dev
, " host_rx_fifo_size=%d\n",
3425 hw
->host_rx_fifo_size
);
3426 dev_dbg(hsotg
->dev
, " host_nperio_tx_fifo_size=%d\n",
3427 hw
->host_nperio_tx_fifo_size
);
3428 dev_dbg(hsotg
->dev
, " host_perio_tx_fifo_size=%d\n",
3429 hw
->host_perio_tx_fifo_size
);
3430 dev_dbg(hsotg
->dev
, "\n");
3436 * Sets all parameters to the given value.
3438 * Assumes that the dwc2_core_params struct contains only integers.
3440 void dwc2_set_all_params(struct dwc2_core_params
*params
, int value
)
3442 int *p
= (int *)params
;
3443 size_t size
= sizeof(*params
) / sizeof(*p
);
3446 for (i
= 0; i
< size
; i
++)
3451 u16
dwc2_get_otg_version(struct dwc2_hsotg
*hsotg
)
3453 return hsotg
->core_params
->otg_ver
== 1 ? 0x0200 : 0x0103;
3456 bool dwc2_is_controller_alive(struct dwc2_hsotg
*hsotg
)
3458 if (dwc2_readl(hsotg
->regs
+ GSNPSID
) == 0xffffffff)
3465 * dwc2_enable_global_interrupts() - Enables the controller's Global
3466 * Interrupt in the AHB Config register
3468 * @hsotg: Programming view of DWC_otg controller
3470 void dwc2_enable_global_interrupts(struct dwc2_hsotg
*hsotg
)
3472 u32 ahbcfg
= dwc2_readl(hsotg
->regs
+ GAHBCFG
);
3474 ahbcfg
|= GAHBCFG_GLBL_INTR_EN
;
3475 dwc2_writel(ahbcfg
, hsotg
->regs
+ GAHBCFG
);
3479 * dwc2_disable_global_interrupts() - Disables the controller's Global
3480 * Interrupt in the AHB Config register
3482 * @hsotg: Programming view of DWC_otg controller
3484 void dwc2_disable_global_interrupts(struct dwc2_hsotg
*hsotg
)
3486 u32 ahbcfg
= dwc2_readl(hsotg
->regs
+ GAHBCFG
);
3488 ahbcfg
&= ~GAHBCFG_GLBL_INTR_EN
;
3489 dwc2_writel(ahbcfg
, hsotg
->regs
+ GAHBCFG
);
3492 /* Returns the controller's GHWCFG2.OTG_MODE. */
3493 unsigned dwc2_op_mode(struct dwc2_hsotg
*hsotg
)
3495 u32 ghwcfg2
= dwc2_readl(hsotg
->regs
+ GHWCFG2
);
3497 return (ghwcfg2
& GHWCFG2_OP_MODE_MASK
) >>
3498 GHWCFG2_OP_MODE_SHIFT
;
3501 /* Returns true if the controller is capable of DRD. */
3502 bool dwc2_hw_is_otg(struct dwc2_hsotg
*hsotg
)
3504 unsigned op_mode
= dwc2_op_mode(hsotg
);
3506 return (op_mode
== GHWCFG2_OP_MODE_HNP_SRP_CAPABLE
) ||
3507 (op_mode
== GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE
) ||
3508 (op_mode
== GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE
);
3511 /* Returns true if the controller is host-only. */
3512 bool dwc2_hw_is_host(struct dwc2_hsotg
*hsotg
)
3514 unsigned op_mode
= dwc2_op_mode(hsotg
);
3516 return (op_mode
== GHWCFG2_OP_MODE_SRP_CAPABLE_HOST
) ||
3517 (op_mode
== GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST
);
3520 /* Returns true if the controller is device-only. */
3521 bool dwc2_hw_is_device(struct dwc2_hsotg
*hsotg
)
3523 unsigned op_mode
= dwc2_op_mode(hsotg
);
3525 return (op_mode
== GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
) ||
3526 (op_mode
== GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE
);
3529 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
3530 MODULE_AUTHOR("Synopsys, Inc.");
3531 MODULE_LICENSE("Dual BSD/GPL");