2 * core.c - DesignWare HS OTG Controller common routines
4 * Copyright (C) 2004-2013 Synopsys, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/moduleparam.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/delay.h>
50 #include <linux/slab.h>
51 #include <linux/usb.h>
53 #include <linux/usb/hcd.h>
54 #include <linux/usb/ch11.h>
59 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
61 * dwc2_backup_host_registers() - Backup controller host registers.
62 * When suspending usb bus, registers needs to be backuped
63 * if controller power is disabled once suspended.
65 * @hsotg: Programming view of the DWC_otg controller
67 static int dwc2_backup_host_registers(struct dwc2_hsotg
*hsotg
)
69 struct dwc2_hregs_backup
*hr
;
72 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
74 /* Backup Host regs */
75 hr
= &hsotg
->hr_backup
;
76 hr
->hcfg
= dwc2_readl(hsotg
->regs
+ HCFG
);
77 hr
->haintmsk
= dwc2_readl(hsotg
->regs
+ HAINTMSK
);
78 for (i
= 0; i
< hsotg
->core_params
->host_channels
; ++i
)
79 hr
->hcintmsk
[i
] = dwc2_readl(hsotg
->regs
+ HCINTMSK(i
));
81 hr
->hprt0
= dwc2_read_hprt0(hsotg
);
82 hr
->hfir
= dwc2_readl(hsotg
->regs
+ HFIR
);
89 * dwc2_restore_host_registers() - Restore controller host registers.
90 * When resuming usb bus, device registers needs to be restored
91 * if controller power were disabled.
93 * @hsotg: Programming view of the DWC_otg controller
95 static int dwc2_restore_host_registers(struct dwc2_hsotg
*hsotg
)
97 struct dwc2_hregs_backup
*hr
;
100 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
102 /* Restore host regs */
103 hr
= &hsotg
->hr_backup
;
105 dev_err(hsotg
->dev
, "%s: no host registers to restore\n",
111 dwc2_writel(hr
->hcfg
, hsotg
->regs
+ HCFG
);
112 dwc2_writel(hr
->haintmsk
, hsotg
->regs
+ HAINTMSK
);
114 for (i
= 0; i
< hsotg
->core_params
->host_channels
; ++i
)
115 dwc2_writel(hr
->hcintmsk
[i
], hsotg
->regs
+ HCINTMSK(i
));
117 dwc2_writel(hr
->hprt0
, hsotg
->regs
+ HPRT0
);
118 dwc2_writel(hr
->hfir
, hsotg
->regs
+ HFIR
);
119 hsotg
->frame_number
= 0;
124 static inline int dwc2_backup_host_registers(struct dwc2_hsotg
*hsotg
)
127 static inline int dwc2_restore_host_registers(struct dwc2_hsotg
*hsotg
)
131 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
132 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
134 * dwc2_backup_device_registers() - Backup controller device registers.
135 * When suspending usb bus, registers needs to be backuped
136 * if controller power is disabled once suspended.
138 * @hsotg: Programming view of the DWC_otg controller
140 static int dwc2_backup_device_registers(struct dwc2_hsotg
*hsotg
)
142 struct dwc2_dregs_backup
*dr
;
145 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
147 /* Backup dev regs */
148 dr
= &hsotg
->dr_backup
;
150 dr
->dcfg
= dwc2_readl(hsotg
->regs
+ DCFG
);
151 dr
->dctl
= dwc2_readl(hsotg
->regs
+ DCTL
);
152 dr
->daintmsk
= dwc2_readl(hsotg
->regs
+ DAINTMSK
);
153 dr
->diepmsk
= dwc2_readl(hsotg
->regs
+ DIEPMSK
);
154 dr
->doepmsk
= dwc2_readl(hsotg
->regs
+ DOEPMSK
);
156 for (i
= 0; i
< hsotg
->num_of_eps
; i
++) {
158 dr
->diepctl
[i
] = dwc2_readl(hsotg
->regs
+ DIEPCTL(i
));
160 /* Ensure DATA PID is correctly configured */
161 if (dr
->diepctl
[i
] & DXEPCTL_DPID
)
162 dr
->diepctl
[i
] |= DXEPCTL_SETD1PID
;
164 dr
->diepctl
[i
] |= DXEPCTL_SETD0PID
;
166 dr
->dieptsiz
[i
] = dwc2_readl(hsotg
->regs
+ DIEPTSIZ(i
));
167 dr
->diepdma
[i
] = dwc2_readl(hsotg
->regs
+ DIEPDMA(i
));
170 dr
->doepctl
[i
] = dwc2_readl(hsotg
->regs
+ DOEPCTL(i
));
172 /* Ensure DATA PID is correctly configured */
173 if (dr
->doepctl
[i
] & DXEPCTL_DPID
)
174 dr
->doepctl
[i
] |= DXEPCTL_SETD1PID
;
176 dr
->doepctl
[i
] |= DXEPCTL_SETD0PID
;
178 dr
->doeptsiz
[i
] = dwc2_readl(hsotg
->regs
+ DOEPTSIZ(i
));
179 dr
->doepdma
[i
] = dwc2_readl(hsotg
->regs
+ DOEPDMA(i
));
186 * dwc2_restore_device_registers() - Restore controller device registers.
187 * When resuming usb bus, device registers needs to be restored
188 * if controller power were disabled.
190 * @hsotg: Programming view of the DWC_otg controller
192 static int dwc2_restore_device_registers(struct dwc2_hsotg
*hsotg
)
194 struct dwc2_dregs_backup
*dr
;
198 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
200 /* Restore dev regs */
201 dr
= &hsotg
->dr_backup
;
203 dev_err(hsotg
->dev
, "%s: no device registers to restore\n",
209 dwc2_writel(dr
->dcfg
, hsotg
->regs
+ DCFG
);
210 dwc2_writel(dr
->dctl
, hsotg
->regs
+ DCTL
);
211 dwc2_writel(dr
->daintmsk
, hsotg
->regs
+ DAINTMSK
);
212 dwc2_writel(dr
->diepmsk
, hsotg
->regs
+ DIEPMSK
);
213 dwc2_writel(dr
->doepmsk
, hsotg
->regs
+ DOEPMSK
);
215 for (i
= 0; i
< hsotg
->num_of_eps
; i
++) {
217 dwc2_writel(dr
->diepctl
[i
], hsotg
->regs
+ DIEPCTL(i
));
218 dwc2_writel(dr
->dieptsiz
[i
], hsotg
->regs
+ DIEPTSIZ(i
));
219 dwc2_writel(dr
->diepdma
[i
], hsotg
->regs
+ DIEPDMA(i
));
221 /* Restore OUT EPs */
222 dwc2_writel(dr
->doepctl
[i
], hsotg
->regs
+ DOEPCTL(i
));
223 dwc2_writel(dr
->doeptsiz
[i
], hsotg
->regs
+ DOEPTSIZ(i
));
224 dwc2_writel(dr
->doepdma
[i
], hsotg
->regs
+ DOEPDMA(i
));
227 /* Set the Power-On Programming done bit */
228 dctl
= dwc2_readl(hsotg
->regs
+ DCTL
);
229 dctl
|= DCTL_PWRONPRGDONE
;
230 dwc2_writel(dctl
, hsotg
->regs
+ DCTL
);
235 static inline int dwc2_backup_device_registers(struct dwc2_hsotg
*hsotg
)
238 static inline int dwc2_restore_device_registers(struct dwc2_hsotg
*hsotg
)
243 * dwc2_backup_global_registers() - Backup global controller registers.
244 * When suspending usb bus, registers needs to be backuped
245 * if controller power is disabled once suspended.
247 * @hsotg: Programming view of the DWC_otg controller
249 static int dwc2_backup_global_registers(struct dwc2_hsotg
*hsotg
)
251 struct dwc2_gregs_backup
*gr
;
254 /* Backup global regs */
255 gr
= &hsotg
->gr_backup
;
257 gr
->gotgctl
= dwc2_readl(hsotg
->regs
+ GOTGCTL
);
258 gr
->gintmsk
= dwc2_readl(hsotg
->regs
+ GINTMSK
);
259 gr
->gahbcfg
= dwc2_readl(hsotg
->regs
+ GAHBCFG
);
260 gr
->gusbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
261 gr
->grxfsiz
= dwc2_readl(hsotg
->regs
+ GRXFSIZ
);
262 gr
->gnptxfsiz
= dwc2_readl(hsotg
->regs
+ GNPTXFSIZ
);
263 gr
->hptxfsiz
= dwc2_readl(hsotg
->regs
+ HPTXFSIZ
);
264 gr
->gdfifocfg
= dwc2_readl(hsotg
->regs
+ GDFIFOCFG
);
265 for (i
= 0; i
< MAX_EPS_CHANNELS
; i
++)
266 gr
->dtxfsiz
[i
] = dwc2_readl(hsotg
->regs
+ DPTXFSIZN(i
));
273 * dwc2_restore_global_registers() - Restore controller global registers.
274 * When resuming usb bus, device registers needs to be restored
275 * if controller power were disabled.
277 * @hsotg: Programming view of the DWC_otg controller
279 static int dwc2_restore_global_registers(struct dwc2_hsotg
*hsotg
)
281 struct dwc2_gregs_backup
*gr
;
284 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
286 /* Restore global regs */
287 gr
= &hsotg
->gr_backup
;
289 dev_err(hsotg
->dev
, "%s: no global registers to restore\n",
295 dwc2_writel(0xffffffff, hsotg
->regs
+ GINTSTS
);
296 dwc2_writel(gr
->gotgctl
, hsotg
->regs
+ GOTGCTL
);
297 dwc2_writel(gr
->gintmsk
, hsotg
->regs
+ GINTMSK
);
298 dwc2_writel(gr
->gusbcfg
, hsotg
->regs
+ GUSBCFG
);
299 dwc2_writel(gr
->gahbcfg
, hsotg
->regs
+ GAHBCFG
);
300 dwc2_writel(gr
->grxfsiz
, hsotg
->regs
+ GRXFSIZ
);
301 dwc2_writel(gr
->gnptxfsiz
, hsotg
->regs
+ GNPTXFSIZ
);
302 dwc2_writel(gr
->hptxfsiz
, hsotg
->regs
+ HPTXFSIZ
);
303 dwc2_writel(gr
->gdfifocfg
, hsotg
->regs
+ GDFIFOCFG
);
304 for (i
= 0; i
< MAX_EPS_CHANNELS
; i
++)
305 dwc2_writel(gr
->dtxfsiz
[i
], hsotg
->regs
+ DPTXFSIZN(i
));
311 * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
313 * @hsotg: Programming view of the DWC_otg controller
314 * @restore: Controller registers need to be restored
316 int dwc2_exit_hibernation(struct dwc2_hsotg
*hsotg
, bool restore
)
321 if (!hsotg
->core_params
->hibernation
)
324 pcgcctl
= dwc2_readl(hsotg
->regs
+ PCGCTL
);
325 pcgcctl
&= ~PCGCTL_STOPPCLK
;
326 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
328 pcgcctl
= dwc2_readl(hsotg
->regs
+ PCGCTL
);
329 pcgcctl
&= ~PCGCTL_PWRCLMP
;
330 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
332 pcgcctl
= dwc2_readl(hsotg
->regs
+ PCGCTL
);
333 pcgcctl
&= ~PCGCTL_RSTPDWNMODULE
;
334 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
338 ret
= dwc2_restore_global_registers(hsotg
);
340 dev_err(hsotg
->dev
, "%s: failed to restore registers\n",
344 if (dwc2_is_host_mode(hsotg
)) {
345 ret
= dwc2_restore_host_registers(hsotg
);
347 dev_err(hsotg
->dev
, "%s: failed to restore host registers\n",
352 ret
= dwc2_restore_device_registers(hsotg
);
354 dev_err(hsotg
->dev
, "%s: failed to restore device registers\n",
365 * dwc2_enter_hibernation() - Put controller in Partial Power Down.
367 * @hsotg: Programming view of the DWC_otg controller
369 int dwc2_enter_hibernation(struct dwc2_hsotg
*hsotg
)
374 if (!hsotg
->core_params
->hibernation
)
377 /* Backup all registers */
378 ret
= dwc2_backup_global_registers(hsotg
);
380 dev_err(hsotg
->dev
, "%s: failed to backup global registers\n",
385 if (dwc2_is_host_mode(hsotg
)) {
386 ret
= dwc2_backup_host_registers(hsotg
);
388 dev_err(hsotg
->dev
, "%s: failed to backup host registers\n",
393 ret
= dwc2_backup_device_registers(hsotg
);
395 dev_err(hsotg
->dev
, "%s: failed to backup device registers\n",
402 * Clear any pending interrupts since dwc2 will not be able to
403 * clear them after entering hibernation.
405 dwc2_writel(0xffffffff, hsotg
->regs
+ GINTSTS
);
407 /* Put the controller in low power state */
408 pcgcctl
= dwc2_readl(hsotg
->regs
+ PCGCTL
);
410 pcgcctl
|= PCGCTL_PWRCLMP
;
411 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
414 pcgcctl
|= PCGCTL_RSTPDWNMODULE
;
415 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
418 pcgcctl
|= PCGCTL_STOPPCLK
;
419 dwc2_writel(pcgcctl
, hsotg
->regs
+ PCGCTL
);
425 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
426 * used in both device and host modes
428 * @hsotg: Programming view of the DWC_otg controller
430 static void dwc2_enable_common_interrupts(struct dwc2_hsotg
*hsotg
)
434 /* Clear any pending OTG Interrupts */
435 dwc2_writel(0xffffffff, hsotg
->regs
+ GOTGINT
);
437 /* Clear any pending interrupts */
438 dwc2_writel(0xffffffff, hsotg
->regs
+ GINTSTS
);
440 /* Enable the interrupts in the GINTMSK */
441 intmsk
= GINTSTS_MODEMIS
| GINTSTS_OTGINT
;
443 if (hsotg
->core_params
->dma_enable
<= 0)
444 intmsk
|= GINTSTS_RXFLVL
;
445 if (hsotg
->core_params
->external_id_pin_ctl
<= 0)
446 intmsk
|= GINTSTS_CONIDSTSCHNG
;
448 intmsk
|= GINTSTS_WKUPINT
| GINTSTS_USBSUSP
|
451 dwc2_writel(intmsk
, hsotg
->regs
+ GINTMSK
);
455 * Initializes the FSLSPClkSel field of the HCFG register depending on the
458 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg
*hsotg
)
462 if ((hsotg
->hw_params
.hs_phy_type
== GHWCFG2_HS_PHY_TYPE_ULPI
&&
463 hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_DEDICATED
&&
464 hsotg
->core_params
->ulpi_fs_ls
> 0) ||
465 hsotg
->core_params
->phy_type
== DWC2_PHY_TYPE_PARAM_FS
) {
467 val
= HCFG_FSLSPCLKSEL_48_MHZ
;
469 /* High speed PHY running at full speed or high speed */
470 val
= HCFG_FSLSPCLKSEL_30_60_MHZ
;
473 dev_dbg(hsotg
->dev
, "Initializing HCFG.FSLSPClkSel to %08x\n", val
);
474 hcfg
= dwc2_readl(hsotg
->regs
+ HCFG
);
475 hcfg
&= ~HCFG_FSLSPCLKSEL_MASK
;
476 hcfg
|= val
<< HCFG_FSLSPCLKSEL_SHIFT
;
477 dwc2_writel(hcfg
, hsotg
->regs
+ HCFG
);
481 * Do core a soft reset of the core. Be careful with this because it
482 * resets all the internal state machines of the core.
484 int dwc2_core_reset(struct dwc2_hsotg
*hsotg
)
490 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
492 /* Wait for AHB master IDLE state */
495 greset
= dwc2_readl(hsotg
->regs
+ GRSTCTL
);
498 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
502 } while (!(greset
& GRSTCTL_AHBIDLE
));
504 /* Core Soft Reset */
506 greset
|= GRSTCTL_CSFTRST
;
507 dwc2_writel(greset
, hsotg
->regs
+ GRSTCTL
);
510 greset
= dwc2_readl(hsotg
->regs
+ GRSTCTL
);
513 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
517 } while (greset
& GRSTCTL_CSFTRST
);
519 if (hsotg
->dr_mode
== USB_DR_MODE_HOST
) {
520 gusbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
521 gusbcfg
&= ~GUSBCFG_FORCEDEVMODE
;
522 gusbcfg
|= GUSBCFG_FORCEHOSTMODE
;
523 dwc2_writel(gusbcfg
, hsotg
->regs
+ GUSBCFG
);
524 } else if (hsotg
->dr_mode
== USB_DR_MODE_PERIPHERAL
) {
525 gusbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
526 gusbcfg
&= ~GUSBCFG_FORCEHOSTMODE
;
527 gusbcfg
|= GUSBCFG_FORCEDEVMODE
;
528 dwc2_writel(gusbcfg
, hsotg
->regs
+ GUSBCFG
);
529 } else if (hsotg
->dr_mode
== USB_DR_MODE_OTG
) {
530 gusbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
531 gusbcfg
&= ~GUSBCFG_FORCEHOSTMODE
;
532 gusbcfg
&= ~GUSBCFG_FORCEDEVMODE
;
533 dwc2_writel(gusbcfg
, hsotg
->regs
+ GUSBCFG
);
537 * NOTE: This long sleep is _very_ important, otherwise the core will
538 * not stay in host mode after a connector ID change!
540 usleep_range(150000, 160000);
545 static int dwc2_fs_phy_init(struct dwc2_hsotg
*hsotg
, bool select_phy
)
551 * core_init() is now called on every switch so only call the
552 * following for the first time through
555 dev_dbg(hsotg
->dev
, "FS PHY selected\n");
557 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
558 if (!(usbcfg
& GUSBCFG_PHYSEL
)) {
559 usbcfg
|= GUSBCFG_PHYSEL
;
560 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
562 /* Reset after a PHY select */
563 retval
= dwc2_core_reset(hsotg
);
567 "%s: Reset failed, aborting", __func__
);
574 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
575 * do this on HNP Dev/Host mode switches (done in dev_init and
578 if (dwc2_is_host_mode(hsotg
))
579 dwc2_init_fs_ls_pclk_sel(hsotg
);
581 if (hsotg
->core_params
->i2c_enable
> 0) {
582 dev_dbg(hsotg
->dev
, "FS PHY enabling I2C\n");
584 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
585 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
586 usbcfg
|= GUSBCFG_OTG_UTMI_FS_SEL
;
587 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
589 /* Program GI2CCTL.I2CEn */
590 i2cctl
= dwc2_readl(hsotg
->regs
+ GI2CCTL
);
591 i2cctl
&= ~GI2CCTL_I2CDEVADDR_MASK
;
592 i2cctl
|= 1 << GI2CCTL_I2CDEVADDR_SHIFT
;
593 i2cctl
&= ~GI2CCTL_I2CEN
;
594 dwc2_writel(i2cctl
, hsotg
->regs
+ GI2CCTL
);
595 i2cctl
|= GI2CCTL_I2CEN
;
596 dwc2_writel(i2cctl
, hsotg
->regs
+ GI2CCTL
);
602 static int dwc2_hs_phy_init(struct dwc2_hsotg
*hsotg
, bool select_phy
)
604 u32 usbcfg
, usbcfg_old
;
610 usbcfg
= usbcfg_old
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
613 * HS PHY parameters. These parameters are preserved during soft reset
614 * so only program the first time. Do a soft reset immediately after
617 switch (hsotg
->core_params
->phy_type
) {
618 case DWC2_PHY_TYPE_PARAM_ULPI
:
620 dev_dbg(hsotg
->dev
, "HS ULPI PHY selected\n");
621 usbcfg
|= GUSBCFG_ULPI_UTMI_SEL
;
622 usbcfg
&= ~(GUSBCFG_PHYIF16
| GUSBCFG_DDRSEL
);
623 if (hsotg
->core_params
->phy_ulpi_ddr
> 0)
624 usbcfg
|= GUSBCFG_DDRSEL
;
626 case DWC2_PHY_TYPE_PARAM_UTMI
:
627 /* UTMI+ interface */
628 dev_dbg(hsotg
->dev
, "HS UTMI+ PHY selected\n");
629 usbcfg
&= ~(GUSBCFG_ULPI_UTMI_SEL
| GUSBCFG_PHYIF16
);
630 if (hsotg
->core_params
->phy_utmi_width
== 16)
631 usbcfg
|= GUSBCFG_PHYIF16
;
634 dev_err(hsotg
->dev
, "FS PHY selected at HS!\n");
638 if (usbcfg
!= usbcfg_old
) {
639 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
641 /* Reset after setting the PHY parameters */
642 retval
= dwc2_core_reset(hsotg
);
645 "%s: Reset failed, aborting", __func__
);
653 static int dwc2_phy_init(struct dwc2_hsotg
*hsotg
, bool select_phy
)
658 if (hsotg
->core_params
->speed
== DWC2_SPEED_PARAM_FULL
&&
659 hsotg
->core_params
->phy_type
== DWC2_PHY_TYPE_PARAM_FS
) {
660 /* If FS mode with FS PHY */
661 retval
= dwc2_fs_phy_init(hsotg
, select_phy
);
666 retval
= dwc2_hs_phy_init(hsotg
, select_phy
);
671 if (hsotg
->hw_params
.hs_phy_type
== GHWCFG2_HS_PHY_TYPE_ULPI
&&
672 hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_DEDICATED
&&
673 hsotg
->core_params
->ulpi_fs_ls
> 0) {
674 dev_dbg(hsotg
->dev
, "Setting ULPI FSLS\n");
675 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
676 usbcfg
|= GUSBCFG_ULPI_FS_LS
;
677 usbcfg
|= GUSBCFG_ULPI_CLK_SUSP_M
;
678 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
680 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
681 usbcfg
&= ~GUSBCFG_ULPI_FS_LS
;
682 usbcfg
&= ~GUSBCFG_ULPI_CLK_SUSP_M
;
683 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
689 static int dwc2_gahbcfg_init(struct dwc2_hsotg
*hsotg
)
691 u32 ahbcfg
= dwc2_readl(hsotg
->regs
+ GAHBCFG
);
693 switch (hsotg
->hw_params
.arch
) {
694 case GHWCFG2_EXT_DMA_ARCH
:
695 dev_err(hsotg
->dev
, "External DMA Mode not supported\n");
698 case GHWCFG2_INT_DMA_ARCH
:
699 dev_dbg(hsotg
->dev
, "Internal DMA Mode\n");
700 if (hsotg
->core_params
->ahbcfg
!= -1) {
701 ahbcfg
&= GAHBCFG_CTRL_MASK
;
702 ahbcfg
|= hsotg
->core_params
->ahbcfg
&
707 case GHWCFG2_SLAVE_ONLY_ARCH
:
709 dev_dbg(hsotg
->dev
, "Slave Only Mode\n");
713 dev_dbg(hsotg
->dev
, "dma_enable:%d dma_desc_enable:%d\n",
714 hsotg
->core_params
->dma_enable
,
715 hsotg
->core_params
->dma_desc_enable
);
717 if (hsotg
->core_params
->dma_enable
> 0) {
718 if (hsotg
->core_params
->dma_desc_enable
> 0)
719 dev_dbg(hsotg
->dev
, "Using Descriptor DMA mode\n");
721 dev_dbg(hsotg
->dev
, "Using Buffer DMA mode\n");
723 dev_dbg(hsotg
->dev
, "Using Slave mode\n");
724 hsotg
->core_params
->dma_desc_enable
= 0;
727 if (hsotg
->core_params
->dma_enable
> 0)
728 ahbcfg
|= GAHBCFG_DMA_EN
;
730 dwc2_writel(ahbcfg
, hsotg
->regs
+ GAHBCFG
);
735 static void dwc2_gusbcfg_init(struct dwc2_hsotg
*hsotg
)
739 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
740 usbcfg
&= ~(GUSBCFG_HNPCAP
| GUSBCFG_SRPCAP
);
742 switch (hsotg
->hw_params
.op_mode
) {
743 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE
:
744 if (hsotg
->core_params
->otg_cap
==
745 DWC2_CAP_PARAM_HNP_SRP_CAPABLE
)
746 usbcfg
|= GUSBCFG_HNPCAP
;
747 if (hsotg
->core_params
->otg_cap
!=
748 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE
)
749 usbcfg
|= GUSBCFG_SRPCAP
;
752 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE
:
753 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
:
754 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST
:
755 if (hsotg
->core_params
->otg_cap
!=
756 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE
)
757 usbcfg
|= GUSBCFG_SRPCAP
;
760 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE
:
761 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE
:
762 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST
:
767 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
771 * dwc2_core_init() - Initializes the DWC_otg controller registers and
772 * prepares the core for device mode or host mode operation
774 * @hsotg: Programming view of the DWC_otg controller
775 * @initial_setup: If true then this is the first init for this instance.
777 int dwc2_core_init(struct dwc2_hsotg
*hsotg
, bool initial_setup
)
782 dev_dbg(hsotg
->dev
, "%s(%p)\n", __func__
, hsotg
);
784 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
786 /* Set ULPI External VBUS bit if needed */
787 usbcfg
&= ~GUSBCFG_ULPI_EXT_VBUS_DRV
;
788 if (hsotg
->core_params
->phy_ulpi_ext_vbus
==
789 DWC2_PHY_ULPI_EXTERNAL_VBUS
)
790 usbcfg
|= GUSBCFG_ULPI_EXT_VBUS_DRV
;
792 /* Set external TS Dline pulsing bit if needed */
793 usbcfg
&= ~GUSBCFG_TERMSELDLPULSE
;
794 if (hsotg
->core_params
->ts_dline
> 0)
795 usbcfg
|= GUSBCFG_TERMSELDLPULSE
;
797 dwc2_writel(usbcfg
, hsotg
->regs
+ GUSBCFG
);
800 * Reset the Controller
802 * We only need to reset the controller if this is a re-init.
803 * For the first init we know for sure that earlier code reset us (it
804 * needed to in order to properly detect various parameters).
806 if (!initial_setup
) {
807 retval
= dwc2_core_reset(hsotg
);
809 dev_err(hsotg
->dev
, "%s(): Reset failed, aborting\n",
816 * This needs to happen in FS mode before any other programming occurs
818 retval
= dwc2_phy_init(hsotg
, initial_setup
);
822 /* Program the GAHBCFG Register */
823 retval
= dwc2_gahbcfg_init(hsotg
);
827 /* Program the GUSBCFG register */
828 dwc2_gusbcfg_init(hsotg
);
830 /* Program the GOTGCTL register */
831 otgctl
= dwc2_readl(hsotg
->regs
+ GOTGCTL
);
832 otgctl
&= ~GOTGCTL_OTGVER
;
833 if (hsotg
->core_params
->otg_ver
> 0)
834 otgctl
|= GOTGCTL_OTGVER
;
835 dwc2_writel(otgctl
, hsotg
->regs
+ GOTGCTL
);
836 dev_dbg(hsotg
->dev
, "OTG VER PARAM: %d\n", hsotg
->core_params
->otg_ver
);
838 /* Clear the SRP success bit for FS-I2c */
839 hsotg
->srp_success
= 0;
841 /* Enable common interrupts */
842 dwc2_enable_common_interrupts(hsotg
);
845 * Do device or host initialization based on mode during PCD and
848 if (dwc2_is_host_mode(hsotg
)) {
849 dev_dbg(hsotg
->dev
, "Host Mode\n");
850 hsotg
->op_state
= OTG_STATE_A_HOST
;
852 dev_dbg(hsotg
->dev
, "Device Mode\n");
853 hsotg
->op_state
= OTG_STATE_B_PERIPHERAL
;
860 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
862 * @hsotg: Programming view of DWC_otg controller
864 void dwc2_enable_host_interrupts(struct dwc2_hsotg
*hsotg
)
868 dev_dbg(hsotg
->dev
, "%s()\n", __func__
);
870 /* Disable all interrupts */
871 dwc2_writel(0, hsotg
->regs
+ GINTMSK
);
872 dwc2_writel(0, hsotg
->regs
+ HAINTMSK
);
874 /* Enable the common interrupts */
875 dwc2_enable_common_interrupts(hsotg
);
877 /* Enable host mode interrupts without disturbing common interrupts */
878 intmsk
= dwc2_readl(hsotg
->regs
+ GINTMSK
);
879 intmsk
|= GINTSTS_DISCONNINT
| GINTSTS_PRTINT
| GINTSTS_HCHINT
;
880 dwc2_writel(intmsk
, hsotg
->regs
+ GINTMSK
);
884 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
886 * @hsotg: Programming view of DWC_otg controller
888 void dwc2_disable_host_interrupts(struct dwc2_hsotg
*hsotg
)
890 u32 intmsk
= dwc2_readl(hsotg
->regs
+ GINTMSK
);
892 /* Disable host mode interrupts without disturbing common interrupts */
893 intmsk
&= ~(GINTSTS_SOF
| GINTSTS_PRTINT
| GINTSTS_HCHINT
|
894 GINTSTS_PTXFEMP
| GINTSTS_NPTXFEMP
| GINTSTS_DISCONNINT
);
895 dwc2_writel(intmsk
, hsotg
->regs
+ GINTMSK
);
899 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
900 * For system that have a total fifo depth that is smaller than the default
903 * @hsotg: Programming view of DWC_otg controller
905 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg
*hsotg
)
907 struct dwc2_core_params
*params
= hsotg
->core_params
;
908 struct dwc2_hw_params
*hw
= &hsotg
->hw_params
;
909 u32 rxfsiz
, nptxfsiz
, ptxfsiz
, total_fifo_size
;
911 total_fifo_size
= hw
->total_fifo_size
;
912 rxfsiz
= params
->host_rx_fifo_size
;
913 nptxfsiz
= params
->host_nperio_tx_fifo_size
;
914 ptxfsiz
= params
->host_perio_tx_fifo_size
;
917 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
918 * allocation with support for high bandwidth endpoints. Synopsys
919 * defines MPS(Max Packet size) for a periodic EP=1024, and for
920 * non-periodic as 512.
922 if (total_fifo_size
< (rxfsiz
+ nptxfsiz
+ ptxfsiz
)) {
924 * For Buffer DMA mode/Scatter Gather DMA mode
925 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
926 * with n = number of host channel.
927 * 2 * ((1024/4) + 2) = 516
929 rxfsiz
= 516 + hw
->host_channels
;
932 * min non-periodic tx fifo depth
933 * 2 * (largest non-periodic USB packet used / 4)
939 * min periodic tx fifo depth
940 * (largest packet size*MC)/4
945 params
->host_rx_fifo_size
= rxfsiz
;
946 params
->host_nperio_tx_fifo_size
= nptxfsiz
;
947 params
->host_perio_tx_fifo_size
= ptxfsiz
;
951 * If the summation of RX, NPTX and PTX fifo sizes is still
952 * bigger than the total_fifo_size, then we have a problem.
954 * We won't be able to allocate as many endpoints. Right now,
955 * we're just printing an error message, but ideally this FIFO
956 * allocation algorithm would be improved in the future.
958 * FIXME improve this FIFO allocation algorithm.
960 if (unlikely(total_fifo_size
< (rxfsiz
+ nptxfsiz
+ ptxfsiz
)))
961 dev_err(hsotg
->dev
, "invalid fifo sizes\n");
964 static void dwc2_config_fifos(struct dwc2_hsotg
*hsotg
)
966 struct dwc2_core_params
*params
= hsotg
->core_params
;
967 u32 nptxfsiz
, hptxfsiz
, dfifocfg
, grxfsiz
;
969 if (!params
->enable_dynamic_fifo
)
972 dwc2_calculate_dynamic_fifo(hsotg
);
975 grxfsiz
= dwc2_readl(hsotg
->regs
+ GRXFSIZ
);
976 dev_dbg(hsotg
->dev
, "initial grxfsiz=%08x\n", grxfsiz
);
977 grxfsiz
&= ~GRXFSIZ_DEPTH_MASK
;
978 grxfsiz
|= params
->host_rx_fifo_size
<<
979 GRXFSIZ_DEPTH_SHIFT
& GRXFSIZ_DEPTH_MASK
;
980 dwc2_writel(grxfsiz
, hsotg
->regs
+ GRXFSIZ
);
981 dev_dbg(hsotg
->dev
, "new grxfsiz=%08x\n",
982 dwc2_readl(hsotg
->regs
+ GRXFSIZ
));
984 /* Non-periodic Tx FIFO */
985 dev_dbg(hsotg
->dev
, "initial gnptxfsiz=%08x\n",
986 dwc2_readl(hsotg
->regs
+ GNPTXFSIZ
));
987 nptxfsiz
= params
->host_nperio_tx_fifo_size
<<
988 FIFOSIZE_DEPTH_SHIFT
& FIFOSIZE_DEPTH_MASK
;
989 nptxfsiz
|= params
->host_rx_fifo_size
<<
990 FIFOSIZE_STARTADDR_SHIFT
& FIFOSIZE_STARTADDR_MASK
;
991 dwc2_writel(nptxfsiz
, hsotg
->regs
+ GNPTXFSIZ
);
992 dev_dbg(hsotg
->dev
, "new gnptxfsiz=%08x\n",
993 dwc2_readl(hsotg
->regs
+ GNPTXFSIZ
));
995 /* Periodic Tx FIFO */
996 dev_dbg(hsotg
->dev
, "initial hptxfsiz=%08x\n",
997 dwc2_readl(hsotg
->regs
+ HPTXFSIZ
));
998 hptxfsiz
= params
->host_perio_tx_fifo_size
<<
999 FIFOSIZE_DEPTH_SHIFT
& FIFOSIZE_DEPTH_MASK
;
1000 hptxfsiz
|= (params
->host_rx_fifo_size
+
1001 params
->host_nperio_tx_fifo_size
) <<
1002 FIFOSIZE_STARTADDR_SHIFT
& FIFOSIZE_STARTADDR_MASK
;
1003 dwc2_writel(hptxfsiz
, hsotg
->regs
+ HPTXFSIZ
);
1004 dev_dbg(hsotg
->dev
, "new hptxfsiz=%08x\n",
1005 dwc2_readl(hsotg
->regs
+ HPTXFSIZ
));
1007 if (hsotg
->core_params
->en_multiple_tx_fifo
> 0 &&
1008 hsotg
->hw_params
.snpsid
<= DWC2_CORE_REV_2_94a
) {
1010 * Global DFIFOCFG calculation for Host mode -
1011 * include RxFIFO, NPTXFIFO and HPTXFIFO
1013 dfifocfg
= dwc2_readl(hsotg
->regs
+ GDFIFOCFG
);
1014 dfifocfg
&= ~GDFIFOCFG_EPINFOBASE_MASK
;
1015 dfifocfg
|= (params
->host_rx_fifo_size
+
1016 params
->host_nperio_tx_fifo_size
+
1017 params
->host_perio_tx_fifo_size
) <<
1018 GDFIFOCFG_EPINFOBASE_SHIFT
&
1019 GDFIFOCFG_EPINFOBASE_MASK
;
1020 dwc2_writel(dfifocfg
, hsotg
->regs
+ GDFIFOCFG
);
1025 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
1028 * @hsotg: Programming view of DWC_otg controller
1030 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
1031 * request queues. Host channels are reset to ensure that they are ready for
1032 * performing transfers.
1034 void dwc2_core_host_init(struct dwc2_hsotg
*hsotg
)
1036 u32 hcfg
, hfir
, otgctl
;
1038 dev_dbg(hsotg
->dev
, "%s(%p)\n", __func__
, hsotg
);
1040 /* Restart the Phy Clock */
1041 dwc2_writel(0, hsotg
->regs
+ PCGCTL
);
1043 /* Initialize Host Configuration Register */
1044 dwc2_init_fs_ls_pclk_sel(hsotg
);
1045 if (hsotg
->core_params
->speed
== DWC2_SPEED_PARAM_FULL
) {
1046 hcfg
= dwc2_readl(hsotg
->regs
+ HCFG
);
1047 hcfg
|= HCFG_FSLSSUPP
;
1048 dwc2_writel(hcfg
, hsotg
->regs
+ HCFG
);
1052 * This bit allows dynamic reloading of the HFIR register during
1053 * runtime. This bit needs to be programmed during initial configuration
1054 * and its value must not be changed during runtime.
1056 if (hsotg
->core_params
->reload_ctl
> 0) {
1057 hfir
= dwc2_readl(hsotg
->regs
+ HFIR
);
1058 hfir
|= HFIR_RLDCTRL
;
1059 dwc2_writel(hfir
, hsotg
->regs
+ HFIR
);
1062 if (hsotg
->core_params
->dma_desc_enable
> 0) {
1063 u32 op_mode
= hsotg
->hw_params
.op_mode
;
1064 if (hsotg
->hw_params
.snpsid
< DWC2_CORE_REV_2_90a
||
1065 !hsotg
->hw_params
.dma_desc_enable
||
1066 op_mode
== GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
||
1067 op_mode
== GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE
||
1068 op_mode
== GHWCFG2_OP_MODE_UNDEFINED
) {
1070 "Hardware does not support descriptor DMA mode -\n");
1072 "falling back to buffer DMA mode.\n");
1073 hsotg
->core_params
->dma_desc_enable
= 0;
1075 hcfg
= dwc2_readl(hsotg
->regs
+ HCFG
);
1076 hcfg
|= HCFG_DESCDMA
;
1077 dwc2_writel(hcfg
, hsotg
->regs
+ HCFG
);
1081 /* Configure data FIFO sizes */
1082 dwc2_config_fifos(hsotg
);
1084 /* TODO - check this */
1085 /* Clear Host Set HNP Enable in the OTG Control Register */
1086 otgctl
= dwc2_readl(hsotg
->regs
+ GOTGCTL
);
1087 otgctl
&= ~GOTGCTL_HSTSETHNPEN
;
1088 dwc2_writel(otgctl
, hsotg
->regs
+ GOTGCTL
);
1090 /* Make sure the FIFOs are flushed */
1091 dwc2_flush_tx_fifo(hsotg
, 0x10 /* all TX FIFOs */);
1092 dwc2_flush_rx_fifo(hsotg
);
1094 /* Clear Host Set HNP Enable in the OTG Control Register */
1095 otgctl
= dwc2_readl(hsotg
->regs
+ GOTGCTL
);
1096 otgctl
&= ~GOTGCTL_HSTSETHNPEN
;
1097 dwc2_writel(otgctl
, hsotg
->regs
+ GOTGCTL
);
1099 if (hsotg
->core_params
->dma_desc_enable
<= 0) {
1100 int num_channels
, i
;
1103 /* Flush out any leftover queued requests */
1104 num_channels
= hsotg
->core_params
->host_channels
;
1105 for (i
= 0; i
< num_channels
; i
++) {
1106 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(i
));
1107 hcchar
&= ~HCCHAR_CHENA
;
1108 hcchar
|= HCCHAR_CHDIS
;
1109 hcchar
&= ~HCCHAR_EPDIR
;
1110 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(i
));
1113 /* Halt all channels to put them into a known state */
1114 for (i
= 0; i
< num_channels
; i
++) {
1117 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(i
));
1118 hcchar
|= HCCHAR_CHENA
| HCCHAR_CHDIS
;
1119 hcchar
&= ~HCCHAR_EPDIR
;
1120 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(i
));
1121 dev_dbg(hsotg
->dev
, "%s: Halt channel %d\n",
1124 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(i
));
1125 if (++count
> 1000) {
1127 "Unable to clear enable on channel %d\n",
1132 } while (hcchar
& HCCHAR_CHENA
);
1136 /* Turn on the vbus power */
1137 dev_dbg(hsotg
->dev
, "Init: Port Power? op_state=%d\n", hsotg
->op_state
);
1138 if (hsotg
->op_state
== OTG_STATE_A_HOST
) {
1139 u32 hprt0
= dwc2_read_hprt0(hsotg
);
1141 dev_dbg(hsotg
->dev
, "Init: Power Port (%d)\n",
1142 !!(hprt0
& HPRT0_PWR
));
1143 if (!(hprt0
& HPRT0_PWR
)) {
1145 dwc2_writel(hprt0
, hsotg
->regs
+ HPRT0
);
1149 dwc2_enable_host_interrupts(hsotg
);
1152 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg
*hsotg
,
1153 struct dwc2_host_chan
*chan
)
1155 u32 hcintmsk
= HCINTMSK_CHHLTD
;
1157 switch (chan
->ep_type
) {
1158 case USB_ENDPOINT_XFER_CONTROL
:
1159 case USB_ENDPOINT_XFER_BULK
:
1160 dev_vdbg(hsotg
->dev
, "control/bulk\n");
1161 hcintmsk
|= HCINTMSK_XFERCOMPL
;
1162 hcintmsk
|= HCINTMSK_STALL
;
1163 hcintmsk
|= HCINTMSK_XACTERR
;
1164 hcintmsk
|= HCINTMSK_DATATGLERR
;
1165 if (chan
->ep_is_in
) {
1166 hcintmsk
|= HCINTMSK_BBLERR
;
1168 hcintmsk
|= HCINTMSK_NAK
;
1169 hcintmsk
|= HCINTMSK_NYET
;
1171 hcintmsk
|= HCINTMSK_ACK
;
1174 if (chan
->do_split
) {
1175 hcintmsk
|= HCINTMSK_NAK
;
1176 if (chan
->complete_split
)
1177 hcintmsk
|= HCINTMSK_NYET
;
1179 hcintmsk
|= HCINTMSK_ACK
;
1182 if (chan
->error_state
)
1183 hcintmsk
|= HCINTMSK_ACK
;
1186 case USB_ENDPOINT_XFER_INT
:
1188 dev_vdbg(hsotg
->dev
, "intr\n");
1189 hcintmsk
|= HCINTMSK_XFERCOMPL
;
1190 hcintmsk
|= HCINTMSK_NAK
;
1191 hcintmsk
|= HCINTMSK_STALL
;
1192 hcintmsk
|= HCINTMSK_XACTERR
;
1193 hcintmsk
|= HCINTMSK_DATATGLERR
;
1194 hcintmsk
|= HCINTMSK_FRMOVRUN
;
1197 hcintmsk
|= HCINTMSK_BBLERR
;
1198 if (chan
->error_state
)
1199 hcintmsk
|= HCINTMSK_ACK
;
1200 if (chan
->do_split
) {
1201 if (chan
->complete_split
)
1202 hcintmsk
|= HCINTMSK_NYET
;
1204 hcintmsk
|= HCINTMSK_ACK
;
1208 case USB_ENDPOINT_XFER_ISOC
:
1210 dev_vdbg(hsotg
->dev
, "isoc\n");
1211 hcintmsk
|= HCINTMSK_XFERCOMPL
;
1212 hcintmsk
|= HCINTMSK_FRMOVRUN
;
1213 hcintmsk
|= HCINTMSK_ACK
;
1215 if (chan
->ep_is_in
) {
1216 hcintmsk
|= HCINTMSK_XACTERR
;
1217 hcintmsk
|= HCINTMSK_BBLERR
;
1221 dev_err(hsotg
->dev
, "## Unknown EP type ##\n");
1225 dwc2_writel(hcintmsk
, hsotg
->regs
+ HCINTMSK(chan
->hc_num
));
1227 dev_vdbg(hsotg
->dev
, "set HCINTMSK to %08x\n", hcintmsk
);
1230 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg
*hsotg
,
1231 struct dwc2_host_chan
*chan
)
1233 u32 hcintmsk
= HCINTMSK_CHHLTD
;
1236 * For Descriptor DMA mode core halts the channel on AHB error.
1237 * Interrupt is not required.
1239 if (hsotg
->core_params
->dma_desc_enable
<= 0) {
1241 dev_vdbg(hsotg
->dev
, "desc DMA disabled\n");
1242 hcintmsk
|= HCINTMSK_AHBERR
;
1245 dev_vdbg(hsotg
->dev
, "desc DMA enabled\n");
1246 if (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1247 hcintmsk
|= HCINTMSK_XFERCOMPL
;
1250 if (chan
->error_state
&& !chan
->do_split
&&
1251 chan
->ep_type
!= USB_ENDPOINT_XFER_ISOC
) {
1253 dev_vdbg(hsotg
->dev
, "setting ACK\n");
1254 hcintmsk
|= HCINTMSK_ACK
;
1255 if (chan
->ep_is_in
) {
1256 hcintmsk
|= HCINTMSK_DATATGLERR
;
1257 if (chan
->ep_type
!= USB_ENDPOINT_XFER_INT
)
1258 hcintmsk
|= HCINTMSK_NAK
;
1262 dwc2_writel(hcintmsk
, hsotg
->regs
+ HCINTMSK(chan
->hc_num
));
1264 dev_vdbg(hsotg
->dev
, "set HCINTMSK to %08x\n", hcintmsk
);
1267 static void dwc2_hc_enable_ints(struct dwc2_hsotg
*hsotg
,
1268 struct dwc2_host_chan
*chan
)
1272 if (hsotg
->core_params
->dma_enable
> 0) {
1274 dev_vdbg(hsotg
->dev
, "DMA enabled\n");
1275 dwc2_hc_enable_dma_ints(hsotg
, chan
);
1278 dev_vdbg(hsotg
->dev
, "DMA disabled\n");
1279 dwc2_hc_enable_slave_ints(hsotg
, chan
);
1282 /* Enable the top level host channel interrupt */
1283 intmsk
= dwc2_readl(hsotg
->regs
+ HAINTMSK
);
1284 intmsk
|= 1 << chan
->hc_num
;
1285 dwc2_writel(intmsk
, hsotg
->regs
+ HAINTMSK
);
1287 dev_vdbg(hsotg
->dev
, "set HAINTMSK to %08x\n", intmsk
);
1289 /* Make sure host channel interrupts are enabled */
1290 intmsk
= dwc2_readl(hsotg
->regs
+ GINTMSK
);
1291 intmsk
|= GINTSTS_HCHINT
;
1292 dwc2_writel(intmsk
, hsotg
->regs
+ GINTMSK
);
1294 dev_vdbg(hsotg
->dev
, "set GINTMSK to %08x\n", intmsk
);
1298 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
1299 * a specific endpoint
1301 * @hsotg: Programming view of DWC_otg controller
1302 * @chan: Information needed to initialize the host channel
1304 * The HCCHARn register is set up with the characteristics specified in chan.
1305 * Host channel interrupts that may need to be serviced while this transfer is
1306 * in progress are enabled.
1308 void dwc2_hc_init(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
)
1310 u8 hc_num
= chan
->hc_num
;
1316 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1318 /* Clear old interrupt conditions for this host channel */
1319 hcintmsk
= 0xffffffff;
1320 hcintmsk
&= ~HCINTMSK_RESERVED14_31
;
1321 dwc2_writel(hcintmsk
, hsotg
->regs
+ HCINT(hc_num
));
1323 /* Enable channel interrupts required for this transfer */
1324 dwc2_hc_enable_ints(hsotg
, chan
);
1327 * Program the HCCHARn register with the endpoint characteristics for
1328 * the current transfer
1330 hcchar
= chan
->dev_addr
<< HCCHAR_DEVADDR_SHIFT
& HCCHAR_DEVADDR_MASK
;
1331 hcchar
|= chan
->ep_num
<< HCCHAR_EPNUM_SHIFT
& HCCHAR_EPNUM_MASK
;
1333 hcchar
|= HCCHAR_EPDIR
;
1334 if (chan
->speed
== USB_SPEED_LOW
)
1335 hcchar
|= HCCHAR_LSPDDEV
;
1336 hcchar
|= chan
->ep_type
<< HCCHAR_EPTYPE_SHIFT
& HCCHAR_EPTYPE_MASK
;
1337 hcchar
|= chan
->max_packet
<< HCCHAR_MPS_SHIFT
& HCCHAR_MPS_MASK
;
1338 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(hc_num
));
1340 dev_vdbg(hsotg
->dev
, "set HCCHAR(%d) to %08x\n",
1343 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n",
1345 dev_vdbg(hsotg
->dev
, " Dev Addr: %d\n",
1347 dev_vdbg(hsotg
->dev
, " Ep Num: %d\n",
1349 dev_vdbg(hsotg
->dev
, " Is In: %d\n",
1351 dev_vdbg(hsotg
->dev
, " Is Low Speed: %d\n",
1352 chan
->speed
== USB_SPEED_LOW
);
1353 dev_vdbg(hsotg
->dev
, " Ep Type: %d\n",
1355 dev_vdbg(hsotg
->dev
, " Max Pkt: %d\n",
1359 /* Program the HCSPLT register for SPLITs */
1360 if (chan
->do_split
) {
1362 dev_vdbg(hsotg
->dev
,
1363 "Programming HC %d with split --> %s\n",
1365 chan
->complete_split
? "CSPLIT" : "SSPLIT");
1366 if (chan
->complete_split
)
1367 hcsplt
|= HCSPLT_COMPSPLT
;
1368 hcsplt
|= chan
->xact_pos
<< HCSPLT_XACTPOS_SHIFT
&
1369 HCSPLT_XACTPOS_MASK
;
1370 hcsplt
|= chan
->hub_addr
<< HCSPLT_HUBADDR_SHIFT
&
1371 HCSPLT_HUBADDR_MASK
;
1372 hcsplt
|= chan
->hub_port
<< HCSPLT_PRTADDR_SHIFT
&
1373 HCSPLT_PRTADDR_MASK
;
1375 dev_vdbg(hsotg
->dev
, " comp split %d\n",
1376 chan
->complete_split
);
1377 dev_vdbg(hsotg
->dev
, " xact pos %d\n",
1379 dev_vdbg(hsotg
->dev
, " hub addr %d\n",
1381 dev_vdbg(hsotg
->dev
, " hub port %d\n",
1383 dev_vdbg(hsotg
->dev
, " is_in %d\n",
1385 dev_vdbg(hsotg
->dev
, " Max Pkt %d\n",
1387 dev_vdbg(hsotg
->dev
, " xferlen %d\n",
1392 dwc2_writel(hcsplt
, hsotg
->regs
+ HCSPLT(hc_num
));
1396 * dwc2_hc_halt() - Attempts to halt a host channel
1398 * @hsotg: Controller register interface
1399 * @chan: Host channel to halt
1400 * @halt_status: Reason for halting the channel
1402 * This function should only be called in Slave mode or to abort a transfer in
1403 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1404 * controller halts the channel when the transfer is complete or a condition
1405 * occurs that requires application intervention.
1407 * In slave mode, checks for a free request queue entry, then sets the Channel
1408 * Enable and Channel Disable bits of the Host Channel Characteristics
1409 * register of the specified channel to intiate the halt. If there is no free
1410 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1411 * register to flush requests for this channel. In the latter case, sets a
1412 * flag to indicate that the host channel needs to be halted when a request
1413 * queue slot is open.
1415 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1416 * HCCHARn register. The controller ensures there is space in the request
1417 * queue before submitting the halt request.
1419 * Some time may elapse before the core flushes any posted requests for this
1420 * host channel and halts. The Channel Halted interrupt handler completes the
1421 * deactivation of the host channel.
1423 void dwc2_hc_halt(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
,
1424 enum dwc2_halt_status halt_status
)
1426 u32 nptxsts
, hptxsts
, hcchar
;
1429 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1430 if (halt_status
== DWC2_HC_XFER_NO_HALT_STATUS
)
1431 dev_err(hsotg
->dev
, "!!! halt_status = %d !!!\n", halt_status
);
1433 if (halt_status
== DWC2_HC_XFER_URB_DEQUEUE
||
1434 halt_status
== DWC2_HC_XFER_AHB_ERR
) {
1436 * Disable all channel interrupts except Ch Halted. The QTD
1437 * and QH state associated with this transfer has been cleared
1438 * (in the case of URB_DEQUEUE), so the channel needs to be
1439 * shut down carefully to prevent crashes.
1441 u32 hcintmsk
= HCINTMSK_CHHLTD
;
1443 dev_vdbg(hsotg
->dev
, "dequeue/error\n");
1444 dwc2_writel(hcintmsk
, hsotg
->regs
+ HCINTMSK(chan
->hc_num
));
1447 * Make sure no other interrupts besides halt are currently
1448 * pending. Handling another interrupt could cause a crash due
1449 * to the QTD and QH state.
1451 dwc2_writel(~hcintmsk
, hsotg
->regs
+ HCINT(chan
->hc_num
));
1454 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1455 * even if the channel was already halted for some other
1458 chan
->halt_status
= halt_status
;
1460 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1461 if (!(hcchar
& HCCHAR_CHENA
)) {
1463 * The channel is either already halted or it hasn't
1464 * started yet. In DMA mode, the transfer may halt if
1465 * it finishes normally or a condition occurs that
1466 * requires driver intervention. Don't want to halt
1467 * the channel again. In either Slave or DMA mode,
1468 * it's possible that the transfer has been assigned
1469 * to a channel, but not started yet when an URB is
1470 * dequeued. Don't want to halt a channel that hasn't
1476 if (chan
->halt_pending
) {
1478 * A halt has already been issued for this channel. This might
1479 * happen when a transfer is aborted by a higher level in
1482 dev_vdbg(hsotg
->dev
,
1483 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1484 __func__
, chan
->hc_num
);
1488 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1490 /* No need to set the bit in DDMA for disabling the channel */
1491 /* TODO check it everywhere channel is disabled */
1492 if (hsotg
->core_params
->dma_desc_enable
<= 0) {
1494 dev_vdbg(hsotg
->dev
, "desc DMA disabled\n");
1495 hcchar
|= HCCHAR_CHENA
;
1498 dev_dbg(hsotg
->dev
, "desc DMA enabled\n");
1500 hcchar
|= HCCHAR_CHDIS
;
1502 if (hsotg
->core_params
->dma_enable
<= 0) {
1504 dev_vdbg(hsotg
->dev
, "DMA not enabled\n");
1505 hcchar
|= HCCHAR_CHENA
;
1507 /* Check for space in the request queue to issue the halt */
1508 if (chan
->ep_type
== USB_ENDPOINT_XFER_CONTROL
||
1509 chan
->ep_type
== USB_ENDPOINT_XFER_BULK
) {
1510 dev_vdbg(hsotg
->dev
, "control/bulk\n");
1511 nptxsts
= dwc2_readl(hsotg
->regs
+ GNPTXSTS
);
1512 if ((nptxsts
& TXSTS_QSPCAVAIL_MASK
) == 0) {
1513 dev_vdbg(hsotg
->dev
, "Disabling channel\n");
1514 hcchar
&= ~HCCHAR_CHENA
;
1518 dev_vdbg(hsotg
->dev
, "isoc/intr\n");
1519 hptxsts
= dwc2_readl(hsotg
->regs
+ HPTXSTS
);
1520 if ((hptxsts
& TXSTS_QSPCAVAIL_MASK
) == 0 ||
1521 hsotg
->queuing_high_bandwidth
) {
1523 dev_vdbg(hsotg
->dev
, "Disabling channel\n");
1524 hcchar
&= ~HCCHAR_CHENA
;
1529 dev_vdbg(hsotg
->dev
, "DMA enabled\n");
1532 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1533 chan
->halt_status
= halt_status
;
1535 if (hcchar
& HCCHAR_CHENA
) {
1537 dev_vdbg(hsotg
->dev
, "Channel enabled\n");
1538 chan
->halt_pending
= 1;
1539 chan
->halt_on_queue
= 0;
1542 dev_vdbg(hsotg
->dev
, "Channel disabled\n");
1543 chan
->halt_on_queue
= 1;
1547 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
1549 dev_vdbg(hsotg
->dev
, " hcchar: 0x%08x\n",
1551 dev_vdbg(hsotg
->dev
, " halt_pending: %d\n",
1552 chan
->halt_pending
);
1553 dev_vdbg(hsotg
->dev
, " halt_on_queue: %d\n",
1554 chan
->halt_on_queue
);
1555 dev_vdbg(hsotg
->dev
, " halt_status: %d\n",
1561 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1563 * @hsotg: Programming view of DWC_otg controller
1564 * @chan: Identifies the host channel to clean up
1566 * This function is normally called after a transfer is done and the host
1567 * channel is being released
1569 void dwc2_hc_cleanup(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
)
1573 chan
->xfer_started
= 0;
1576 * Clear channel interrupt enables and any unhandled channel interrupt
1579 dwc2_writel(0, hsotg
->regs
+ HCINTMSK(chan
->hc_num
));
1580 hcintmsk
= 0xffffffff;
1581 hcintmsk
&= ~HCINTMSK_RESERVED14_31
;
1582 dwc2_writel(hcintmsk
, hsotg
->regs
+ HCINT(chan
->hc_num
));
1586 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1587 * which frame a periodic transfer should occur
1589 * @hsotg: Programming view of DWC_otg controller
1590 * @chan: Identifies the host channel to set up and its properties
1591 * @hcchar: Current value of the HCCHAR register for the specified host channel
1593 * This function has no effect on non-periodic transfers
1595 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg
*hsotg
,
1596 struct dwc2_host_chan
*chan
, u32
*hcchar
)
1598 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1599 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
1600 /* 1 if _next_ frame is odd, 0 if it's even */
1601 if (!(dwc2_hcd_get_frame_number(hsotg
) & 0x1))
1602 *hcchar
|= HCCHAR_ODDFRM
;
1606 static void dwc2_set_pid_isoc(struct dwc2_host_chan
*chan
)
1608 /* Set up the initial PID for the transfer */
1609 if (chan
->speed
== USB_SPEED_HIGH
) {
1610 if (chan
->ep_is_in
) {
1611 if (chan
->multi_count
== 1)
1612 chan
->data_pid_start
= DWC2_HC_PID_DATA0
;
1613 else if (chan
->multi_count
== 2)
1614 chan
->data_pid_start
= DWC2_HC_PID_DATA1
;
1616 chan
->data_pid_start
= DWC2_HC_PID_DATA2
;
1618 if (chan
->multi_count
== 1)
1619 chan
->data_pid_start
= DWC2_HC_PID_DATA0
;
1621 chan
->data_pid_start
= DWC2_HC_PID_MDATA
;
1624 chan
->data_pid_start
= DWC2_HC_PID_DATA0
;
1629 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1632 * @hsotg: Programming view of DWC_otg controller
1633 * @chan: Information needed to initialize the host channel
1635 * This function should only be called in Slave mode. For a channel associated
1636 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1637 * associated with a periodic EP, the periodic Tx FIFO is written.
1639 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1640 * the number of bytes written to the Tx FIFO.
1642 static void dwc2_hc_write_packet(struct dwc2_hsotg
*hsotg
,
1643 struct dwc2_host_chan
*chan
)
1646 u32 remaining_count
;
1649 u32 __iomem
*data_fifo
;
1650 u32
*data_buf
= (u32
*)chan
->xfer_buf
;
1653 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1655 data_fifo
= (u32 __iomem
*)(hsotg
->regs
+ HCFIFO(chan
->hc_num
));
1657 remaining_count
= chan
->xfer_len
- chan
->xfer_count
;
1658 if (remaining_count
> chan
->max_packet
)
1659 byte_count
= chan
->max_packet
;
1661 byte_count
= remaining_count
;
1663 dword_count
= (byte_count
+ 3) / 4;
1665 if (((unsigned long)data_buf
& 0x3) == 0) {
1666 /* xfer_buf is DWORD aligned */
1667 for (i
= 0; i
< dword_count
; i
++, data_buf
++)
1668 dwc2_writel(*data_buf
, data_fifo
);
1670 /* xfer_buf is not DWORD aligned */
1671 for (i
= 0; i
< dword_count
; i
++, data_buf
++) {
1672 u32 data
= data_buf
[0] | data_buf
[1] << 8 |
1673 data_buf
[2] << 16 | data_buf
[3] << 24;
1674 dwc2_writel(data
, data_fifo
);
1678 chan
->xfer_count
+= byte_count
;
1679 chan
->xfer_buf
+= byte_count
;
1683 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1684 * channel and starts the transfer
1686 * @hsotg: Programming view of DWC_otg controller
1687 * @chan: Information needed to initialize the host channel. The xfer_len value
1688 * may be reduced to accommodate the max widths of the XferSize and
1689 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1690 * changed to reflect the final xfer_len value.
1692 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1693 * the caller must ensure that there is sufficient space in the request queue
1696 * For an OUT transfer in Slave mode, it loads a data packet into the
1697 * appropriate FIFO. If necessary, additional data packets are loaded in the
1700 * For an IN transfer in Slave mode, a data packet is requested. The data
1701 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1702 * additional data packets are requested in the Host ISR.
1704 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1705 * register along with a packet count of 1 and the channel is enabled. This
1706 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1707 * simply set to 0 since no data transfer occurs in this case.
1709 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1710 * all the information required to perform the subsequent data transfer. In
1711 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1712 * controller performs the entire PING protocol, then starts the data
1715 void dwc2_hc_start_transfer(struct dwc2_hsotg
*hsotg
,
1716 struct dwc2_host_chan
*chan
)
1718 u32 max_hc_xfer_size
= hsotg
->core_params
->max_transfer_size
;
1719 u16 max_hc_pkt_count
= hsotg
->core_params
->max_packet_count
;
1726 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1728 if (chan
->do_ping
) {
1729 if (hsotg
->core_params
->dma_enable
<= 0) {
1731 dev_vdbg(hsotg
->dev
, "ping, no DMA\n");
1732 dwc2_hc_do_ping(hsotg
, chan
);
1733 chan
->xfer_started
= 1;
1737 dev_vdbg(hsotg
->dev
, "ping, DMA\n");
1738 hctsiz
|= TSIZ_DOPNG
;
1742 if (chan
->do_split
) {
1744 dev_vdbg(hsotg
->dev
, "split\n");
1747 if (chan
->complete_split
&& !chan
->ep_is_in
)
1749 * For CSPLIT OUT Transfer, set the size to 0 so the
1750 * core doesn't expect any data written to the FIFO
1753 else if (chan
->ep_is_in
|| chan
->xfer_len
> chan
->max_packet
)
1754 chan
->xfer_len
= chan
->max_packet
;
1755 else if (!chan
->ep_is_in
&& chan
->xfer_len
> 188)
1756 chan
->xfer_len
= 188;
1758 hctsiz
|= chan
->xfer_len
<< TSIZ_XFERSIZE_SHIFT
&
1761 /* For split set ec_mc for immediate retries */
1762 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1763 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1769 dev_vdbg(hsotg
->dev
, "no split\n");
1771 * Ensure that the transfer length and packet count will fit
1772 * in the widths allocated for them in the HCTSIZn register
1774 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1775 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
1777 * Make sure the transfer size is no larger than one
1778 * (micro)frame's worth of data. (A check was done
1779 * when the periodic transfer was accepted to ensure
1780 * that a (micro)frame's worth of data can be
1781 * programmed into a channel.)
1783 u32 max_periodic_len
=
1784 chan
->multi_count
* chan
->max_packet
;
1786 if (chan
->xfer_len
> max_periodic_len
)
1787 chan
->xfer_len
= max_periodic_len
;
1788 } else if (chan
->xfer_len
> max_hc_xfer_size
) {
1790 * Make sure that xfer_len is a multiple of max packet
1794 max_hc_xfer_size
- chan
->max_packet
+ 1;
1797 if (chan
->xfer_len
> 0) {
1798 num_packets
= (chan
->xfer_len
+ chan
->max_packet
- 1) /
1800 if (num_packets
> max_hc_pkt_count
) {
1801 num_packets
= max_hc_pkt_count
;
1802 chan
->xfer_len
= num_packets
* chan
->max_packet
;
1805 /* Need 1 packet for transfer length of 0 */
1811 * Always program an integral # of max packets for IN
1814 chan
->xfer_len
= num_packets
* chan
->max_packet
;
1816 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1817 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1819 * Make sure that the multi_count field matches the
1820 * actual transfer length
1822 chan
->multi_count
= num_packets
;
1824 if (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1825 dwc2_set_pid_isoc(chan
);
1827 hctsiz
|= chan
->xfer_len
<< TSIZ_XFERSIZE_SHIFT
&
1830 /* The ec_mc gets the multi_count for non-split */
1831 ec_mc
= chan
->multi_count
;
1834 chan
->start_pkt_count
= num_packets
;
1835 hctsiz
|= num_packets
<< TSIZ_PKTCNT_SHIFT
& TSIZ_PKTCNT_MASK
;
1836 hctsiz
|= chan
->data_pid_start
<< TSIZ_SC_MC_PID_SHIFT
&
1837 TSIZ_SC_MC_PID_MASK
;
1838 dwc2_writel(hctsiz
, hsotg
->regs
+ HCTSIZ(chan
->hc_num
));
1840 dev_vdbg(hsotg
->dev
, "Wrote %08x to HCTSIZ(%d)\n",
1841 hctsiz
, chan
->hc_num
);
1843 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
1845 dev_vdbg(hsotg
->dev
, " Xfer Size: %d\n",
1846 (hctsiz
& TSIZ_XFERSIZE_MASK
) >>
1847 TSIZ_XFERSIZE_SHIFT
);
1848 dev_vdbg(hsotg
->dev
, " Num Pkts: %d\n",
1849 (hctsiz
& TSIZ_PKTCNT_MASK
) >>
1851 dev_vdbg(hsotg
->dev
, " Start PID: %d\n",
1852 (hctsiz
& TSIZ_SC_MC_PID_MASK
) >>
1853 TSIZ_SC_MC_PID_SHIFT
);
1856 if (hsotg
->core_params
->dma_enable
> 0) {
1857 dma_addr_t dma_addr
;
1859 if (chan
->align_buf
) {
1861 dev_vdbg(hsotg
->dev
, "align_buf\n");
1862 dma_addr
= chan
->align_buf
;
1864 dma_addr
= chan
->xfer_dma
;
1866 dwc2_writel((u32
)dma_addr
, hsotg
->regs
+ HCDMA(chan
->hc_num
));
1868 dev_vdbg(hsotg
->dev
, "Wrote %08lx to HCDMA(%d)\n",
1869 (unsigned long)dma_addr
, chan
->hc_num
);
1872 /* Start the split */
1873 if (chan
->do_split
) {
1874 u32 hcsplt
= dwc2_readl(hsotg
->regs
+ HCSPLT(chan
->hc_num
));
1876 hcsplt
|= HCSPLT_SPLTENA
;
1877 dwc2_writel(hcsplt
, hsotg
->regs
+ HCSPLT(chan
->hc_num
));
1880 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1881 hcchar
&= ~HCCHAR_MULTICNT_MASK
;
1882 hcchar
|= (ec_mc
<< HCCHAR_MULTICNT_SHIFT
) & HCCHAR_MULTICNT_MASK
;
1883 dwc2_hc_set_even_odd_frame(hsotg
, chan
, &hcchar
);
1885 if (hcchar
& HCCHAR_CHDIS
)
1886 dev_warn(hsotg
->dev
,
1887 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1888 __func__
, chan
->hc_num
, hcchar
);
1890 /* Set host channel enable after all other setup is complete */
1891 hcchar
|= HCCHAR_CHENA
;
1892 hcchar
&= ~HCCHAR_CHDIS
;
1895 dev_vdbg(hsotg
->dev
, " Multi Cnt: %d\n",
1896 (hcchar
& HCCHAR_MULTICNT_MASK
) >>
1897 HCCHAR_MULTICNT_SHIFT
);
1899 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1901 dev_vdbg(hsotg
->dev
, "Wrote %08x to HCCHAR(%d)\n", hcchar
,
1904 chan
->xfer_started
= 1;
1907 if (hsotg
->core_params
->dma_enable
<= 0 &&
1908 !chan
->ep_is_in
&& chan
->xfer_len
> 0)
1909 /* Load OUT packet into the appropriate Tx FIFO */
1910 dwc2_hc_write_packet(hsotg
, chan
);
1914 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1915 * host channel and starts the transfer in Descriptor DMA mode
1917 * @hsotg: Programming view of DWC_otg controller
1918 * @chan: Information needed to initialize the host channel
1920 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1921 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1922 * with micro-frame bitmap.
1924 * Initializes HCDMA register with descriptor list address and CTD value then
1925 * starts the transfer via enabling the channel.
1927 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg
*hsotg
,
1928 struct dwc2_host_chan
*chan
)
1934 hctsiz
|= TSIZ_DOPNG
;
1936 if (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1937 dwc2_set_pid_isoc(chan
);
1939 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1940 hctsiz
|= chan
->data_pid_start
<< TSIZ_SC_MC_PID_SHIFT
&
1941 TSIZ_SC_MC_PID_MASK
;
1943 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1944 hctsiz
|= (chan
->ntd
- 1) << TSIZ_NTD_SHIFT
& TSIZ_NTD_MASK
;
1946 /* Non-zero only for high-speed interrupt endpoints */
1947 hctsiz
|= chan
->schinfo
<< TSIZ_SCHINFO_SHIFT
& TSIZ_SCHINFO_MASK
;
1950 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
1952 dev_vdbg(hsotg
->dev
, " Start PID: %d\n",
1953 chan
->data_pid_start
);
1954 dev_vdbg(hsotg
->dev
, " NTD: %d\n", chan
->ntd
- 1);
1957 dwc2_writel(hctsiz
, hsotg
->regs
+ HCTSIZ(chan
->hc_num
));
1959 dma_sync_single_for_device(hsotg
->dev
, chan
->desc_list_addr
,
1960 chan
->desc_list_sz
, DMA_TO_DEVICE
);
1962 dwc2_writel(chan
->desc_list_addr
, hsotg
->regs
+ HCDMA(chan
->hc_num
));
1965 dev_vdbg(hsotg
->dev
, "Wrote %pad to HCDMA(%d)\n",
1966 &chan
->desc_list_addr
, chan
->hc_num
);
1968 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1969 hcchar
&= ~HCCHAR_MULTICNT_MASK
;
1970 hcchar
|= chan
->multi_count
<< HCCHAR_MULTICNT_SHIFT
&
1971 HCCHAR_MULTICNT_MASK
;
1973 if (hcchar
& HCCHAR_CHDIS
)
1974 dev_warn(hsotg
->dev
,
1975 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1976 __func__
, chan
->hc_num
, hcchar
);
1978 /* Set host channel enable after all other setup is complete */
1979 hcchar
|= HCCHAR_CHENA
;
1980 hcchar
&= ~HCCHAR_CHDIS
;
1983 dev_vdbg(hsotg
->dev
, " Multi Cnt: %d\n",
1984 (hcchar
& HCCHAR_MULTICNT_MASK
) >>
1985 HCCHAR_MULTICNT_SHIFT
);
1987 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(chan
->hc_num
));
1989 dev_vdbg(hsotg
->dev
, "Wrote %08x to HCCHAR(%d)\n", hcchar
,
1992 chan
->xfer_started
= 1;
1997 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1998 * a previous call to dwc2_hc_start_transfer()
2000 * @hsotg: Programming view of DWC_otg controller
2001 * @chan: Information needed to initialize the host channel
2003 * The caller must ensure there is sufficient space in the request queue and Tx
2004 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
2005 * the controller acts autonomously to complete transfers programmed to a host
2008 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
2009 * if there is any data remaining to be queued. For an IN transfer, another
2010 * data packet is always requested. For the SETUP phase of a control transfer,
2011 * this function does nothing.
2013 * Return: 1 if a new request is queued, 0 if no more requests are required
2016 int dwc2_hc_continue_transfer(struct dwc2_hsotg
*hsotg
,
2017 struct dwc2_host_chan
*chan
)
2020 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
2024 /* SPLITs always queue just once per channel */
2027 if (chan
->data_pid_start
== DWC2_HC_PID_SETUP
)
2028 /* SETUPs are queued only once since they can't be NAK'd */
2031 if (chan
->ep_is_in
) {
2033 * Always queue another request for other IN transfers. If
2034 * back-to-back INs are issued and NAKs are received for both,
2035 * the driver may still be processing the first NAK when the
2036 * second NAK is received. When the interrupt handler clears
2037 * the NAK interrupt for the first NAK, the second NAK will
2038 * not be seen. So we can't depend on the NAK interrupt
2039 * handler to requeue a NAK'd request. Instead, IN requests
2040 * are issued each time this function is called. When the
2041 * transfer completes, the extra requests for the channel will
2044 u32 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2046 dwc2_hc_set_even_odd_frame(hsotg
, chan
, &hcchar
);
2047 hcchar
|= HCCHAR_CHENA
;
2048 hcchar
&= ~HCCHAR_CHDIS
;
2050 dev_vdbg(hsotg
->dev
, " IN xfer: hcchar = 0x%08x\n",
2052 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2059 if (chan
->xfer_count
< chan
->xfer_len
) {
2060 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
2061 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
2062 u32 hcchar
= dwc2_readl(hsotg
->regs
+
2063 HCCHAR(chan
->hc_num
));
2065 dwc2_hc_set_even_odd_frame(hsotg
, chan
,
2069 /* Load OUT packet into the appropriate Tx FIFO */
2070 dwc2_hc_write_packet(hsotg
, chan
);
2079 * dwc2_hc_do_ping() - Starts a PING transfer
2081 * @hsotg: Programming view of DWC_otg controller
2082 * @chan: Information needed to initialize the host channel
2084 * This function should only be called in Slave mode. The Do Ping bit is set in
2085 * the HCTSIZ register, then the channel is enabled.
2087 void dwc2_hc_do_ping(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
)
2093 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
2097 hctsiz
= TSIZ_DOPNG
;
2098 hctsiz
|= 1 << TSIZ_PKTCNT_SHIFT
;
2099 dwc2_writel(hctsiz
, hsotg
->regs
+ HCTSIZ(chan
->hc_num
));
2101 hcchar
= dwc2_readl(hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2102 hcchar
|= HCCHAR_CHENA
;
2103 hcchar
&= ~HCCHAR_CHDIS
;
2104 dwc2_writel(hcchar
, hsotg
->regs
+ HCCHAR(chan
->hc_num
));
2108 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
2109 * the HFIR register according to PHY type and speed
2111 * @hsotg: Programming view of DWC_otg controller
2113 * NOTE: The caller can modify the value of the HFIR register only after the
2114 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
2117 u32
dwc2_calc_frame_interval(struct dwc2_hsotg
*hsotg
)
2121 int clock
= 60; /* default value */
2123 usbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
2124 hprt0
= dwc2_readl(hsotg
->regs
+ HPRT0
);
2126 if (!(usbcfg
& GUSBCFG_PHYSEL
) && (usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) &&
2127 !(usbcfg
& GUSBCFG_PHYIF16
))
2129 if ((usbcfg
& GUSBCFG_PHYSEL
) && hsotg
->hw_params
.fs_phy_type
==
2130 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI
)
2132 if (!(usbcfg
& GUSBCFG_PHY_LP_CLK_SEL
) && !(usbcfg
& GUSBCFG_PHYSEL
) &&
2133 !(usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) && (usbcfg
& GUSBCFG_PHYIF16
))
2135 if (!(usbcfg
& GUSBCFG_PHY_LP_CLK_SEL
) && !(usbcfg
& GUSBCFG_PHYSEL
) &&
2136 !(usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) && !(usbcfg
& GUSBCFG_PHYIF16
))
2138 if ((usbcfg
& GUSBCFG_PHY_LP_CLK_SEL
) && !(usbcfg
& GUSBCFG_PHYSEL
) &&
2139 !(usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) && (usbcfg
& GUSBCFG_PHYIF16
))
2141 if ((usbcfg
& GUSBCFG_PHYSEL
) && !(usbcfg
& GUSBCFG_PHYIF16
) &&
2142 hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_SHARED_UTMI
)
2144 if ((usbcfg
& GUSBCFG_PHYSEL
) &&
2145 hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_DEDICATED
)
2148 if ((hprt0
& HPRT0_SPD_MASK
) >> HPRT0_SPD_SHIFT
== HPRT0_SPD_HIGH_SPEED
)
2149 /* High speed case */
2153 return 1000 * clock
;
2157 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
2160 * @core_if: Programming view of DWC_otg controller
2161 * @dest: Destination buffer for the packet
2162 * @bytes: Number of bytes to copy to the destination
2164 void dwc2_read_packet(struct dwc2_hsotg
*hsotg
, u8
*dest
, u16 bytes
)
2166 u32 __iomem
*fifo
= hsotg
->regs
+ HCFIFO(0);
2167 u32
*data_buf
= (u32
*)dest
;
2168 int word_count
= (bytes
+ 3) / 4;
2172 * Todo: Account for the case where dest is not dword aligned. This
2173 * requires reading data from the FIFO into a u32 temp buffer, then
2174 * moving it into the data buffer.
2177 dev_vdbg(hsotg
->dev
, "%s(%p,%p,%d)\n", __func__
, hsotg
, dest
, bytes
);
2179 for (i
= 0; i
< word_count
; i
++, data_buf
++)
2180 *data_buf
= dwc2_readl(fifo
);
2184 * dwc2_dump_host_registers() - Prints the host registers
2186 * @hsotg: Programming view of DWC_otg controller
2188 * NOTE: This function will be removed once the peripheral controller code
2189 * is integrated and the driver is stable
2191 void dwc2_dump_host_registers(struct dwc2_hsotg
*hsotg
)
2197 dev_dbg(hsotg
->dev
, "Host Global Registers\n");
2198 addr
= hsotg
->regs
+ HCFG
;
2199 dev_dbg(hsotg
->dev
, "HCFG @0x%08lX : 0x%08X\n",
2200 (unsigned long)addr
, dwc2_readl(addr
));
2201 addr
= hsotg
->regs
+ HFIR
;
2202 dev_dbg(hsotg
->dev
, "HFIR @0x%08lX : 0x%08X\n",
2203 (unsigned long)addr
, dwc2_readl(addr
));
2204 addr
= hsotg
->regs
+ HFNUM
;
2205 dev_dbg(hsotg
->dev
, "HFNUM @0x%08lX : 0x%08X\n",
2206 (unsigned long)addr
, dwc2_readl(addr
));
2207 addr
= hsotg
->regs
+ HPTXSTS
;
2208 dev_dbg(hsotg
->dev
, "HPTXSTS @0x%08lX : 0x%08X\n",
2209 (unsigned long)addr
, dwc2_readl(addr
));
2210 addr
= hsotg
->regs
+ HAINT
;
2211 dev_dbg(hsotg
->dev
, "HAINT @0x%08lX : 0x%08X\n",
2212 (unsigned long)addr
, dwc2_readl(addr
));
2213 addr
= hsotg
->regs
+ HAINTMSK
;
2214 dev_dbg(hsotg
->dev
, "HAINTMSK @0x%08lX : 0x%08X\n",
2215 (unsigned long)addr
, dwc2_readl(addr
));
2216 if (hsotg
->core_params
->dma_desc_enable
> 0) {
2217 addr
= hsotg
->regs
+ HFLBADDR
;
2218 dev_dbg(hsotg
->dev
, "HFLBADDR @0x%08lX : 0x%08X\n",
2219 (unsigned long)addr
, dwc2_readl(addr
));
2222 addr
= hsotg
->regs
+ HPRT0
;
2223 dev_dbg(hsotg
->dev
, "HPRT0 @0x%08lX : 0x%08X\n",
2224 (unsigned long)addr
, dwc2_readl(addr
));
2226 for (i
= 0; i
< hsotg
->core_params
->host_channels
; i
++) {
2227 dev_dbg(hsotg
->dev
, "Host Channel %d Specific Registers\n", i
);
2228 addr
= hsotg
->regs
+ HCCHAR(i
);
2229 dev_dbg(hsotg
->dev
, "HCCHAR @0x%08lX : 0x%08X\n",
2230 (unsigned long)addr
, dwc2_readl(addr
));
2231 addr
= hsotg
->regs
+ HCSPLT(i
);
2232 dev_dbg(hsotg
->dev
, "HCSPLT @0x%08lX : 0x%08X\n",
2233 (unsigned long)addr
, dwc2_readl(addr
));
2234 addr
= hsotg
->regs
+ HCINT(i
);
2235 dev_dbg(hsotg
->dev
, "HCINT @0x%08lX : 0x%08X\n",
2236 (unsigned long)addr
, dwc2_readl(addr
));
2237 addr
= hsotg
->regs
+ HCINTMSK(i
);
2238 dev_dbg(hsotg
->dev
, "HCINTMSK @0x%08lX : 0x%08X\n",
2239 (unsigned long)addr
, dwc2_readl(addr
));
2240 addr
= hsotg
->regs
+ HCTSIZ(i
);
2241 dev_dbg(hsotg
->dev
, "HCTSIZ @0x%08lX : 0x%08X\n",
2242 (unsigned long)addr
, dwc2_readl(addr
));
2243 addr
= hsotg
->regs
+ HCDMA(i
);
2244 dev_dbg(hsotg
->dev
, "HCDMA @0x%08lX : 0x%08X\n",
2245 (unsigned long)addr
, dwc2_readl(addr
));
2246 if (hsotg
->core_params
->dma_desc_enable
> 0) {
2247 addr
= hsotg
->regs
+ HCDMAB(i
);
2248 dev_dbg(hsotg
->dev
, "HCDMAB @0x%08lX : 0x%08X\n",
2249 (unsigned long)addr
, dwc2_readl(addr
));
2256 * dwc2_dump_global_registers() - Prints the core global registers
2258 * @hsotg: Programming view of DWC_otg controller
2260 * NOTE: This function will be removed once the peripheral controller code
2261 * is integrated and the driver is stable
2263 void dwc2_dump_global_registers(struct dwc2_hsotg
*hsotg
)
2268 dev_dbg(hsotg
->dev
, "Core Global Registers\n");
2269 addr
= hsotg
->regs
+ GOTGCTL
;
2270 dev_dbg(hsotg
->dev
, "GOTGCTL @0x%08lX : 0x%08X\n",
2271 (unsigned long)addr
, dwc2_readl(addr
));
2272 addr
= hsotg
->regs
+ GOTGINT
;
2273 dev_dbg(hsotg
->dev
, "GOTGINT @0x%08lX : 0x%08X\n",
2274 (unsigned long)addr
, dwc2_readl(addr
));
2275 addr
= hsotg
->regs
+ GAHBCFG
;
2276 dev_dbg(hsotg
->dev
, "GAHBCFG @0x%08lX : 0x%08X\n",
2277 (unsigned long)addr
, dwc2_readl(addr
));
2278 addr
= hsotg
->regs
+ GUSBCFG
;
2279 dev_dbg(hsotg
->dev
, "GUSBCFG @0x%08lX : 0x%08X\n",
2280 (unsigned long)addr
, dwc2_readl(addr
));
2281 addr
= hsotg
->regs
+ GRSTCTL
;
2282 dev_dbg(hsotg
->dev
, "GRSTCTL @0x%08lX : 0x%08X\n",
2283 (unsigned long)addr
, dwc2_readl(addr
));
2284 addr
= hsotg
->regs
+ GINTSTS
;
2285 dev_dbg(hsotg
->dev
, "GINTSTS @0x%08lX : 0x%08X\n",
2286 (unsigned long)addr
, dwc2_readl(addr
));
2287 addr
= hsotg
->regs
+ GINTMSK
;
2288 dev_dbg(hsotg
->dev
, "GINTMSK @0x%08lX : 0x%08X\n",
2289 (unsigned long)addr
, dwc2_readl(addr
));
2290 addr
= hsotg
->regs
+ GRXSTSR
;
2291 dev_dbg(hsotg
->dev
, "GRXSTSR @0x%08lX : 0x%08X\n",
2292 (unsigned long)addr
, dwc2_readl(addr
));
2293 addr
= hsotg
->regs
+ GRXFSIZ
;
2294 dev_dbg(hsotg
->dev
, "GRXFSIZ @0x%08lX : 0x%08X\n",
2295 (unsigned long)addr
, dwc2_readl(addr
));
2296 addr
= hsotg
->regs
+ GNPTXFSIZ
;
2297 dev_dbg(hsotg
->dev
, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
2298 (unsigned long)addr
, dwc2_readl(addr
));
2299 addr
= hsotg
->regs
+ GNPTXSTS
;
2300 dev_dbg(hsotg
->dev
, "GNPTXSTS @0x%08lX : 0x%08X\n",
2301 (unsigned long)addr
, dwc2_readl(addr
));
2302 addr
= hsotg
->regs
+ GI2CCTL
;
2303 dev_dbg(hsotg
->dev
, "GI2CCTL @0x%08lX : 0x%08X\n",
2304 (unsigned long)addr
, dwc2_readl(addr
));
2305 addr
= hsotg
->regs
+ GPVNDCTL
;
2306 dev_dbg(hsotg
->dev
, "GPVNDCTL @0x%08lX : 0x%08X\n",
2307 (unsigned long)addr
, dwc2_readl(addr
));
2308 addr
= hsotg
->regs
+ GGPIO
;
2309 dev_dbg(hsotg
->dev
, "GGPIO @0x%08lX : 0x%08X\n",
2310 (unsigned long)addr
, dwc2_readl(addr
));
2311 addr
= hsotg
->regs
+ GUID
;
2312 dev_dbg(hsotg
->dev
, "GUID @0x%08lX : 0x%08X\n",
2313 (unsigned long)addr
, dwc2_readl(addr
));
2314 addr
= hsotg
->regs
+ GSNPSID
;
2315 dev_dbg(hsotg
->dev
, "GSNPSID @0x%08lX : 0x%08X\n",
2316 (unsigned long)addr
, dwc2_readl(addr
));
2317 addr
= hsotg
->regs
+ GHWCFG1
;
2318 dev_dbg(hsotg
->dev
, "GHWCFG1 @0x%08lX : 0x%08X\n",
2319 (unsigned long)addr
, dwc2_readl(addr
));
2320 addr
= hsotg
->regs
+ GHWCFG2
;
2321 dev_dbg(hsotg
->dev
, "GHWCFG2 @0x%08lX : 0x%08X\n",
2322 (unsigned long)addr
, dwc2_readl(addr
));
2323 addr
= hsotg
->regs
+ GHWCFG3
;
2324 dev_dbg(hsotg
->dev
, "GHWCFG3 @0x%08lX : 0x%08X\n",
2325 (unsigned long)addr
, dwc2_readl(addr
));
2326 addr
= hsotg
->regs
+ GHWCFG4
;
2327 dev_dbg(hsotg
->dev
, "GHWCFG4 @0x%08lX : 0x%08X\n",
2328 (unsigned long)addr
, dwc2_readl(addr
));
2329 addr
= hsotg
->regs
+ GLPMCFG
;
2330 dev_dbg(hsotg
->dev
, "GLPMCFG @0x%08lX : 0x%08X\n",
2331 (unsigned long)addr
, dwc2_readl(addr
));
2332 addr
= hsotg
->regs
+ GPWRDN
;
2333 dev_dbg(hsotg
->dev
, "GPWRDN @0x%08lX : 0x%08X\n",
2334 (unsigned long)addr
, dwc2_readl(addr
));
2335 addr
= hsotg
->regs
+ GDFIFOCFG
;
2336 dev_dbg(hsotg
->dev
, "GDFIFOCFG @0x%08lX : 0x%08X\n",
2337 (unsigned long)addr
, dwc2_readl(addr
));
2338 addr
= hsotg
->regs
+ HPTXFSIZ
;
2339 dev_dbg(hsotg
->dev
, "HPTXFSIZ @0x%08lX : 0x%08X\n",
2340 (unsigned long)addr
, dwc2_readl(addr
));
2342 addr
= hsotg
->regs
+ PCGCTL
;
2343 dev_dbg(hsotg
->dev
, "PCGCTL @0x%08lX : 0x%08X\n",
2344 (unsigned long)addr
, dwc2_readl(addr
));
2349 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
2351 * @hsotg: Programming view of DWC_otg controller
2352 * @num: Tx FIFO to flush
2354 void dwc2_flush_tx_fifo(struct dwc2_hsotg
*hsotg
, const int num
)
2359 dev_vdbg(hsotg
->dev
, "Flush Tx FIFO %d\n", num
);
2361 greset
= GRSTCTL_TXFFLSH
;
2362 greset
|= num
<< GRSTCTL_TXFNUM_SHIFT
& GRSTCTL_TXFNUM_MASK
;
2363 dwc2_writel(greset
, hsotg
->regs
+ GRSTCTL
);
2366 greset
= dwc2_readl(hsotg
->regs
+ GRSTCTL
);
2367 if (++count
> 10000) {
2368 dev_warn(hsotg
->dev
,
2369 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
2371 dwc2_readl(hsotg
->regs
+ GNPTXSTS
));
2375 } while (greset
& GRSTCTL_TXFFLSH
);
2377 /* Wait for at least 3 PHY Clocks */
2382 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
2384 * @hsotg: Programming view of DWC_otg controller
2386 void dwc2_flush_rx_fifo(struct dwc2_hsotg
*hsotg
)
2391 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
2393 greset
= GRSTCTL_RXFFLSH
;
2394 dwc2_writel(greset
, hsotg
->regs
+ GRSTCTL
);
2397 greset
= dwc2_readl(hsotg
->regs
+ GRSTCTL
);
2398 if (++count
> 10000) {
2399 dev_warn(hsotg
->dev
, "%s() HANG! GRSTCTL=%0x\n",
2404 } while (greset
& GRSTCTL_RXFFLSH
);
2406 /* Wait for at least 3 PHY Clocks */
2410 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
2412 /* Parameter access functions */
2413 void dwc2_set_param_otg_cap(struct dwc2_hsotg
*hsotg
, int val
)
2418 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE
:
2419 if (hsotg
->hw_params
.op_mode
!= GHWCFG2_OP_MODE_HNP_SRP_CAPABLE
)
2422 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE
:
2423 switch (hsotg
->hw_params
.op_mode
) {
2424 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE
:
2425 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE
:
2426 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
:
2427 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST
:
2434 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE
:
2445 "%d invalid for otg_cap parameter. Check HW configuration.\n",
2447 switch (hsotg
->hw_params
.op_mode
) {
2448 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE
:
2449 val
= DWC2_CAP_PARAM_HNP_SRP_CAPABLE
;
2451 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE
:
2452 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
:
2453 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST
:
2454 val
= DWC2_CAP_PARAM_SRP_ONLY_CAPABLE
;
2457 val
= DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE
;
2460 dev_dbg(hsotg
->dev
, "Setting otg_cap to %d\n", val
);
2463 hsotg
->core_params
->otg_cap
= val
;
2466 void dwc2_set_param_dma_enable(struct dwc2_hsotg
*hsotg
, int val
)
2470 if (val
> 0 && hsotg
->hw_params
.arch
== GHWCFG2_SLAVE_ONLY_ARCH
)
2478 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2480 val
= hsotg
->hw_params
.arch
!= GHWCFG2_SLAVE_ONLY_ARCH
;
2481 dev_dbg(hsotg
->dev
, "Setting dma_enable to %d\n", val
);
2484 hsotg
->core_params
->dma_enable
= val
;
2487 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg
*hsotg
, int val
)
2491 if (val
> 0 && (hsotg
->core_params
->dma_enable
<= 0 ||
2492 !hsotg
->hw_params
.dma_desc_enable
))
2500 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2502 val
= (hsotg
->core_params
->dma_enable
> 0 &&
2503 hsotg
->hw_params
.dma_desc_enable
);
2504 dev_dbg(hsotg
->dev
, "Setting dma_desc_enable to %d\n", val
);
2507 hsotg
->core_params
->dma_desc_enable
= val
;
2510 void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg
*hsotg
, int val
)
2514 if (val
> 0 && (hsotg
->core_params
->dma_enable
<= 0 ||
2515 !hsotg
->hw_params
.dma_desc_enable
))
2523 "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
2525 val
= (hsotg
->core_params
->dma_enable
> 0 &&
2526 hsotg
->hw_params
.dma_desc_enable
);
2529 hsotg
->core_params
->dma_desc_fs_enable
= val
;
2530 dev_dbg(hsotg
->dev
, "Setting dma_desc_fs_enable to %d\n", val
);
2533 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg
*hsotg
,
2536 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2539 "Wrong value for host_support_fs_low_power\n");
2541 "host_support_fs_low_power must be 0 or 1\n");
2545 "Setting host_support_fs_low_power to %d\n", val
);
2548 hsotg
->core_params
->host_support_fs_ls_low_power
= val
;
2551 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg
*hsotg
, int val
)
2555 if (val
> 0 && !hsotg
->hw_params
.enable_dynamic_fifo
)
2563 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2565 val
= hsotg
->hw_params
.enable_dynamic_fifo
;
2566 dev_dbg(hsotg
->dev
, "Setting enable_dynamic_fifo to %d\n", val
);
2569 hsotg
->core_params
->enable_dynamic_fifo
= val
;
2572 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg
*hsotg
, int val
)
2576 if (val
< 16 || val
> hsotg
->hw_params
.host_rx_fifo_size
)
2582 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2584 val
= hsotg
->hw_params
.host_rx_fifo_size
;
2585 dev_dbg(hsotg
->dev
, "Setting host_rx_fifo_size to %d\n", val
);
2588 hsotg
->core_params
->host_rx_fifo_size
= val
;
2591 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg
*hsotg
, int val
)
2595 if (val
< 16 || val
> hsotg
->hw_params
.host_nperio_tx_fifo_size
)
2601 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2603 val
= hsotg
->hw_params
.host_nperio_tx_fifo_size
;
2604 dev_dbg(hsotg
->dev
, "Setting host_nperio_tx_fifo_size to %d\n",
2608 hsotg
->core_params
->host_nperio_tx_fifo_size
= val
;
2611 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg
*hsotg
, int val
)
2615 if (val
< 16 || val
> hsotg
->hw_params
.host_perio_tx_fifo_size
)
2621 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2623 val
= hsotg
->hw_params
.host_perio_tx_fifo_size
;
2624 dev_dbg(hsotg
->dev
, "Setting host_perio_tx_fifo_size to %d\n",
2628 hsotg
->core_params
->host_perio_tx_fifo_size
= val
;
2631 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg
*hsotg
, int val
)
2635 if (val
< 2047 || val
> hsotg
->hw_params
.max_transfer_size
)
2641 "%d invalid for max_transfer_size. Check HW configuration.\n",
2643 val
= hsotg
->hw_params
.max_transfer_size
;
2644 dev_dbg(hsotg
->dev
, "Setting max_transfer_size to %d\n", val
);
2647 hsotg
->core_params
->max_transfer_size
= val
;
2650 void dwc2_set_param_max_packet_count(struct dwc2_hsotg
*hsotg
, int val
)
2654 if (val
< 15 || val
> hsotg
->hw_params
.max_packet_count
)
2660 "%d invalid for max_packet_count. Check HW configuration.\n",
2662 val
= hsotg
->hw_params
.max_packet_count
;
2663 dev_dbg(hsotg
->dev
, "Setting max_packet_count to %d\n", val
);
2666 hsotg
->core_params
->max_packet_count
= val
;
2669 void dwc2_set_param_host_channels(struct dwc2_hsotg
*hsotg
, int val
)
2673 if (val
< 1 || val
> hsotg
->hw_params
.host_channels
)
2679 "%d invalid for host_channels. Check HW configuration.\n",
2681 val
= hsotg
->hw_params
.host_channels
;
2682 dev_dbg(hsotg
->dev
, "Setting host_channels to %d\n", val
);
2685 hsotg
->core_params
->host_channels
= val
;
2688 void dwc2_set_param_phy_type(struct dwc2_hsotg
*hsotg
, int val
)
2691 u32 hs_phy_type
, fs_phy_type
;
2693 if (DWC2_OUT_OF_BOUNDS(val
, DWC2_PHY_TYPE_PARAM_FS
,
2694 DWC2_PHY_TYPE_PARAM_ULPI
)) {
2696 dev_err(hsotg
->dev
, "Wrong value for phy_type\n");
2697 dev_err(hsotg
->dev
, "phy_type must be 0, 1 or 2\n");
2703 hs_phy_type
= hsotg
->hw_params
.hs_phy_type
;
2704 fs_phy_type
= hsotg
->hw_params
.fs_phy_type
;
2705 if (val
== DWC2_PHY_TYPE_PARAM_UTMI
&&
2706 (hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI
||
2707 hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI_ULPI
))
2709 else if (val
== DWC2_PHY_TYPE_PARAM_ULPI
&&
2710 (hs_phy_type
== GHWCFG2_HS_PHY_TYPE_ULPI
||
2711 hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI_ULPI
))
2713 else if (val
== DWC2_PHY_TYPE_PARAM_FS
&&
2714 fs_phy_type
== GHWCFG2_FS_PHY_TYPE_DEDICATED
)
2720 "%d invalid for phy_type. Check HW configuration.\n",
2722 val
= DWC2_PHY_TYPE_PARAM_FS
;
2723 if (hs_phy_type
!= GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED
) {
2724 if (hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI
||
2725 hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI_ULPI
)
2726 val
= DWC2_PHY_TYPE_PARAM_UTMI
;
2728 val
= DWC2_PHY_TYPE_PARAM_ULPI
;
2730 dev_dbg(hsotg
->dev
, "Setting phy_type to %d\n", val
);
2733 hsotg
->core_params
->phy_type
= val
;
2736 static int dwc2_get_param_phy_type(struct dwc2_hsotg
*hsotg
)
2738 return hsotg
->core_params
->phy_type
;
2741 void dwc2_set_param_speed(struct dwc2_hsotg
*hsotg
, int val
)
2745 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2747 dev_err(hsotg
->dev
, "Wrong value for speed parameter\n");
2748 dev_err(hsotg
->dev
, "max_speed parameter must be 0 or 1\n");
2753 if (val
== DWC2_SPEED_PARAM_HIGH
&&
2754 dwc2_get_param_phy_type(hsotg
) == DWC2_PHY_TYPE_PARAM_FS
)
2760 "%d invalid for speed parameter. Check HW configuration.\n",
2762 val
= dwc2_get_param_phy_type(hsotg
) == DWC2_PHY_TYPE_PARAM_FS
?
2763 DWC2_SPEED_PARAM_FULL
: DWC2_SPEED_PARAM_HIGH
;
2764 dev_dbg(hsotg
->dev
, "Setting speed to %d\n", val
);
2767 hsotg
->core_params
->speed
= val
;
2770 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg
*hsotg
, int val
)
2774 if (DWC2_OUT_OF_BOUNDS(val
, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ
,
2775 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
)) {
2778 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2780 "host_ls_low_power_phy_clk must be 0 or 1\n");
2785 if (val
== DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ
&&
2786 dwc2_get_param_phy_type(hsotg
) == DWC2_PHY_TYPE_PARAM_FS
)
2792 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2794 val
= dwc2_get_param_phy_type(hsotg
) == DWC2_PHY_TYPE_PARAM_FS
2795 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2796 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ
;
2797 dev_dbg(hsotg
->dev
, "Setting host_ls_low_power_phy_clk to %d\n",
2801 hsotg
->core_params
->host_ls_low_power_phy_clk
= val
;
2804 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg
*hsotg
, int val
)
2806 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2808 dev_err(hsotg
->dev
, "Wrong value for phy_ulpi_ddr\n");
2809 dev_err(hsotg
->dev
, "phy_upli_ddr must be 0 or 1\n");
2812 dev_dbg(hsotg
->dev
, "Setting phy_upli_ddr to %d\n", val
);
2815 hsotg
->core_params
->phy_ulpi_ddr
= val
;
2818 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg
*hsotg
, int val
)
2820 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2823 "Wrong value for phy_ulpi_ext_vbus\n");
2825 "phy_ulpi_ext_vbus must be 0 or 1\n");
2828 dev_dbg(hsotg
->dev
, "Setting phy_ulpi_ext_vbus to %d\n", val
);
2831 hsotg
->core_params
->phy_ulpi_ext_vbus
= val
;
2834 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg
*hsotg
, int val
)
2838 switch (hsotg
->hw_params
.utmi_phy_data_width
) {
2839 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8
:
2842 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16
:
2843 valid
= (val
== 16);
2845 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16
:
2846 valid
= (val
== 8 || val
== 16);
2853 "%d invalid for phy_utmi_width. Check HW configuration.\n",
2856 val
= (hsotg
->hw_params
.utmi_phy_data_width
==
2857 GHWCFG4_UTMI_PHY_DATA_WIDTH_8
) ? 8 : 16;
2858 dev_dbg(hsotg
->dev
, "Setting phy_utmi_width to %d\n", val
);
2861 hsotg
->core_params
->phy_utmi_width
= val
;
2864 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg
*hsotg
, int val
)
2866 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2868 dev_err(hsotg
->dev
, "Wrong value for ulpi_fs_ls\n");
2869 dev_err(hsotg
->dev
, "ulpi_fs_ls must be 0 or 1\n");
2872 dev_dbg(hsotg
->dev
, "Setting ulpi_fs_ls to %d\n", val
);
2875 hsotg
->core_params
->ulpi_fs_ls
= val
;
2878 void dwc2_set_param_ts_dline(struct dwc2_hsotg
*hsotg
, int val
)
2880 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2882 dev_err(hsotg
->dev
, "Wrong value for ts_dline\n");
2883 dev_err(hsotg
->dev
, "ts_dline must be 0 or 1\n");
2886 dev_dbg(hsotg
->dev
, "Setting ts_dline to %d\n", val
);
2889 hsotg
->core_params
->ts_dline
= val
;
2892 void dwc2_set_param_i2c_enable(struct dwc2_hsotg
*hsotg
, int val
)
2896 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2898 dev_err(hsotg
->dev
, "Wrong value for i2c_enable\n");
2899 dev_err(hsotg
->dev
, "i2c_enable must be 0 or 1\n");
2905 if (val
== 1 && !(hsotg
->hw_params
.i2c_enable
))
2911 "%d invalid for i2c_enable. Check HW configuration.\n",
2913 val
= hsotg
->hw_params
.i2c_enable
;
2914 dev_dbg(hsotg
->dev
, "Setting i2c_enable to %d\n", val
);
2917 hsotg
->core_params
->i2c_enable
= val
;
2920 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg
*hsotg
, int val
)
2924 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2927 "Wrong value for en_multiple_tx_fifo,\n");
2929 "en_multiple_tx_fifo must be 0 or 1\n");
2934 if (val
== 1 && !hsotg
->hw_params
.en_multiple_tx_fifo
)
2940 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2942 val
= hsotg
->hw_params
.en_multiple_tx_fifo
;
2943 dev_dbg(hsotg
->dev
, "Setting en_multiple_tx_fifo to %d\n", val
);
2946 hsotg
->core_params
->en_multiple_tx_fifo
= val
;
2949 void dwc2_set_param_reload_ctl(struct dwc2_hsotg
*hsotg
, int val
)
2953 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2956 "'%d' invalid for parameter reload_ctl\n", val
);
2957 dev_err(hsotg
->dev
, "reload_ctl must be 0 or 1\n");
2962 if (val
== 1 && hsotg
->hw_params
.snpsid
< DWC2_CORE_REV_2_92a
)
2968 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2970 val
= hsotg
->hw_params
.snpsid
>= DWC2_CORE_REV_2_92a
;
2971 dev_dbg(hsotg
->dev
, "Setting reload_ctl to %d\n", val
);
2974 hsotg
->core_params
->reload_ctl
= val
;
2977 void dwc2_set_param_ahbcfg(struct dwc2_hsotg
*hsotg
, int val
)
2980 hsotg
->core_params
->ahbcfg
= val
;
2982 hsotg
->core_params
->ahbcfg
= GAHBCFG_HBSTLEN_INCR4
<<
2983 GAHBCFG_HBSTLEN_SHIFT
;
2986 void dwc2_set_param_otg_ver(struct dwc2_hsotg
*hsotg
, int val
)
2988 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
2991 "'%d' invalid for parameter otg_ver\n", val
);
2993 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2996 dev_dbg(hsotg
->dev
, "Setting otg_ver to %d\n", val
);
2999 hsotg
->core_params
->otg_ver
= val
;
3002 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg
*hsotg
, int val
)
3004 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
3007 "'%d' invalid for parameter uframe_sched\n",
3009 dev_err(hsotg
->dev
, "uframe_sched must be 0 or 1\n");
3012 dev_dbg(hsotg
->dev
, "Setting uframe_sched to %d\n", val
);
3015 hsotg
->core_params
->uframe_sched
= val
;
3018 static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg
*hsotg
,
3021 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
3024 "'%d' invalid for parameter external_id_pin_ctl\n",
3026 dev_err(hsotg
->dev
, "external_id_pin_ctl must be 0 or 1\n");
3029 dev_dbg(hsotg
->dev
, "Setting external_id_pin_ctl to %d\n", val
);
3032 hsotg
->core_params
->external_id_pin_ctl
= val
;
3035 static void dwc2_set_param_hibernation(struct dwc2_hsotg
*hsotg
,
3038 if (DWC2_OUT_OF_BOUNDS(val
, 0, 1)) {
3041 "'%d' invalid for parameter hibernation\n",
3043 dev_err(hsotg
->dev
, "hibernation must be 0 or 1\n");
3046 dev_dbg(hsotg
->dev
, "Setting hibernation to %d\n", val
);
3049 hsotg
->core_params
->hibernation
= val
;
3053 * This function is called during module intialization to pass module parameters
3054 * for the DWC_otg core.
3056 void dwc2_set_parameters(struct dwc2_hsotg
*hsotg
,
3057 const struct dwc2_core_params
*params
)
3059 dev_dbg(hsotg
->dev
, "%s()\n", __func__
);
3061 dwc2_set_param_otg_cap(hsotg
, params
->otg_cap
);
3062 dwc2_set_param_dma_enable(hsotg
, params
->dma_enable
);
3063 dwc2_set_param_dma_desc_enable(hsotg
, params
->dma_desc_enable
);
3064 dwc2_set_param_dma_desc_fs_enable(hsotg
, params
->dma_desc_fs_enable
);
3065 dwc2_set_param_host_support_fs_ls_low_power(hsotg
,
3066 params
->host_support_fs_ls_low_power
);
3067 dwc2_set_param_enable_dynamic_fifo(hsotg
,
3068 params
->enable_dynamic_fifo
);
3069 dwc2_set_param_host_rx_fifo_size(hsotg
,
3070 params
->host_rx_fifo_size
);
3071 dwc2_set_param_host_nperio_tx_fifo_size(hsotg
,
3072 params
->host_nperio_tx_fifo_size
);
3073 dwc2_set_param_host_perio_tx_fifo_size(hsotg
,
3074 params
->host_perio_tx_fifo_size
);
3075 dwc2_set_param_max_transfer_size(hsotg
,
3076 params
->max_transfer_size
);
3077 dwc2_set_param_max_packet_count(hsotg
,
3078 params
->max_packet_count
);
3079 dwc2_set_param_host_channels(hsotg
, params
->host_channels
);
3080 dwc2_set_param_phy_type(hsotg
, params
->phy_type
);
3081 dwc2_set_param_speed(hsotg
, params
->speed
);
3082 dwc2_set_param_host_ls_low_power_phy_clk(hsotg
,
3083 params
->host_ls_low_power_phy_clk
);
3084 dwc2_set_param_phy_ulpi_ddr(hsotg
, params
->phy_ulpi_ddr
);
3085 dwc2_set_param_phy_ulpi_ext_vbus(hsotg
,
3086 params
->phy_ulpi_ext_vbus
);
3087 dwc2_set_param_phy_utmi_width(hsotg
, params
->phy_utmi_width
);
3088 dwc2_set_param_ulpi_fs_ls(hsotg
, params
->ulpi_fs_ls
);
3089 dwc2_set_param_ts_dline(hsotg
, params
->ts_dline
);
3090 dwc2_set_param_i2c_enable(hsotg
, params
->i2c_enable
);
3091 dwc2_set_param_en_multiple_tx_fifo(hsotg
,
3092 params
->en_multiple_tx_fifo
);
3093 dwc2_set_param_reload_ctl(hsotg
, params
->reload_ctl
);
3094 dwc2_set_param_ahbcfg(hsotg
, params
->ahbcfg
);
3095 dwc2_set_param_otg_ver(hsotg
, params
->otg_ver
);
3096 dwc2_set_param_uframe_sched(hsotg
, params
->uframe_sched
);
3097 dwc2_set_param_external_id_pin_ctl(hsotg
, params
->external_id_pin_ctl
);
3098 dwc2_set_param_hibernation(hsotg
, params
->hibernation
);
3102 * During device initialization, read various hardware configuration
3103 * registers and interpret the contents.
3105 int dwc2_get_hwparams(struct dwc2_hsotg
*hsotg
)
3107 struct dwc2_hw_params
*hw
= &hsotg
->hw_params
;
3109 u32 hwcfg1
, hwcfg2
, hwcfg3
, hwcfg4
;
3110 u32 hptxfsiz
, grxfsiz
, gnptxfsiz
;
3114 * Attempt to ensure this device is really a DWC_otg Controller.
3115 * Read and verify the GSNPSID register contents. The value should be
3116 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
3117 * as in "OTG version 2.xx" or "OTG version 3.xx".
3119 hw
->snpsid
= dwc2_readl(hsotg
->regs
+ GSNPSID
);
3120 if ((hw
->snpsid
& 0xfffff000) != 0x4f542000 &&
3121 (hw
->snpsid
& 0xfffff000) != 0x4f543000) {
3122 dev_err(hsotg
->dev
, "Bad value for GSNPSID: 0x%08x\n",
3127 dev_dbg(hsotg
->dev
, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
3128 hw
->snpsid
>> 12 & 0xf, hw
->snpsid
>> 8 & 0xf,
3129 hw
->snpsid
>> 4 & 0xf, hw
->snpsid
& 0xf, hw
->snpsid
);
3131 hwcfg1
= dwc2_readl(hsotg
->regs
+ GHWCFG1
);
3132 hwcfg2
= dwc2_readl(hsotg
->regs
+ GHWCFG2
);
3133 hwcfg3
= dwc2_readl(hsotg
->regs
+ GHWCFG3
);
3134 hwcfg4
= dwc2_readl(hsotg
->regs
+ GHWCFG4
);
3135 grxfsiz
= dwc2_readl(hsotg
->regs
+ GRXFSIZ
);
3137 dev_dbg(hsotg
->dev
, "hwcfg1=%08x\n", hwcfg1
);
3138 dev_dbg(hsotg
->dev
, "hwcfg2=%08x\n", hwcfg2
);
3139 dev_dbg(hsotg
->dev
, "hwcfg3=%08x\n", hwcfg3
);
3140 dev_dbg(hsotg
->dev
, "hwcfg4=%08x\n", hwcfg4
);
3141 dev_dbg(hsotg
->dev
, "grxfsiz=%08x\n", grxfsiz
);
3143 /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */
3144 if (hsotg
->dr_mode
!= USB_DR_MODE_HOST
) {
3145 gusbcfg
= dwc2_readl(hsotg
->regs
+ GUSBCFG
);
3146 dwc2_writel(gusbcfg
| GUSBCFG_FORCEHOSTMODE
,
3147 hsotg
->regs
+ GUSBCFG
);
3148 usleep_range(25000, 50000);
3151 gnptxfsiz
= dwc2_readl(hsotg
->regs
+ GNPTXFSIZ
);
3152 hptxfsiz
= dwc2_readl(hsotg
->regs
+ HPTXFSIZ
);
3153 dev_dbg(hsotg
->dev
, "gnptxfsiz=%08x\n", gnptxfsiz
);
3154 dev_dbg(hsotg
->dev
, "hptxfsiz=%08x\n", hptxfsiz
);
3155 if (hsotg
->dr_mode
!= USB_DR_MODE_HOST
) {
3156 dwc2_writel(gusbcfg
, hsotg
->regs
+ GUSBCFG
);
3157 usleep_range(25000, 50000);
3161 hw
->op_mode
= (hwcfg2
& GHWCFG2_OP_MODE_MASK
) >>
3162 GHWCFG2_OP_MODE_SHIFT
;
3163 hw
->arch
= (hwcfg2
& GHWCFG2_ARCHITECTURE_MASK
) >>
3164 GHWCFG2_ARCHITECTURE_SHIFT
;
3165 hw
->enable_dynamic_fifo
= !!(hwcfg2
& GHWCFG2_DYNAMIC_FIFO
);
3166 hw
->host_channels
= 1 + ((hwcfg2
& GHWCFG2_NUM_HOST_CHAN_MASK
) >>
3167 GHWCFG2_NUM_HOST_CHAN_SHIFT
);
3168 hw
->hs_phy_type
= (hwcfg2
& GHWCFG2_HS_PHY_TYPE_MASK
) >>
3169 GHWCFG2_HS_PHY_TYPE_SHIFT
;
3170 hw
->fs_phy_type
= (hwcfg2
& GHWCFG2_FS_PHY_TYPE_MASK
) >>
3171 GHWCFG2_FS_PHY_TYPE_SHIFT
;
3172 hw
->num_dev_ep
= (hwcfg2
& GHWCFG2_NUM_DEV_EP_MASK
) >>
3173 GHWCFG2_NUM_DEV_EP_SHIFT
;
3174 hw
->nperio_tx_q_depth
=
3175 (hwcfg2
& GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK
) >>
3176 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT
<< 1;
3177 hw
->host_perio_tx_q_depth
=
3178 (hwcfg2
& GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK
) >>
3179 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT
<< 1;
3180 hw
->dev_token_q_depth
=
3181 (hwcfg2
& GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK
) >>
3182 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT
;
3185 width
= (hwcfg3
& GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK
) >>
3186 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT
;
3187 hw
->max_transfer_size
= (1 << (width
+ 11)) - 1;
3189 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
3190 * coherent buffers with this size, and if it's too large we can
3191 * exhaust the coherent DMA pool.
3193 if (hw
->max_transfer_size
> 65535)
3194 hw
->max_transfer_size
= 65535;
3195 width
= (hwcfg3
& GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK
) >>
3196 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT
;
3197 hw
->max_packet_count
= (1 << (width
+ 4)) - 1;
3198 hw
->i2c_enable
= !!(hwcfg3
& GHWCFG3_I2C
);
3199 hw
->total_fifo_size
= (hwcfg3
& GHWCFG3_DFIFO_DEPTH_MASK
) >>
3200 GHWCFG3_DFIFO_DEPTH_SHIFT
;
3203 hw
->en_multiple_tx_fifo
= !!(hwcfg4
& GHWCFG4_DED_FIFO_EN
);
3204 hw
->num_dev_perio_in_ep
= (hwcfg4
& GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK
) >>
3205 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT
;
3206 hw
->dma_desc_enable
= !!(hwcfg4
& GHWCFG4_DESC_DMA
);
3207 hw
->power_optimized
= !!(hwcfg4
& GHWCFG4_POWER_OPTIMIZ
);
3208 hw
->utmi_phy_data_width
= (hwcfg4
& GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK
) >>
3209 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT
;
3212 hw
->host_rx_fifo_size
= (grxfsiz
& GRXFSIZ_DEPTH_MASK
) >>
3213 GRXFSIZ_DEPTH_SHIFT
;
3214 hw
->host_nperio_tx_fifo_size
= (gnptxfsiz
& FIFOSIZE_DEPTH_MASK
) >>
3215 FIFOSIZE_DEPTH_SHIFT
;
3216 hw
->host_perio_tx_fifo_size
= (hptxfsiz
& FIFOSIZE_DEPTH_MASK
) >>
3217 FIFOSIZE_DEPTH_SHIFT
;
3219 dev_dbg(hsotg
->dev
, "Detected values from hardware:\n");
3220 dev_dbg(hsotg
->dev
, " op_mode=%d\n",
3222 dev_dbg(hsotg
->dev
, " arch=%d\n",
3224 dev_dbg(hsotg
->dev
, " dma_desc_enable=%d\n",
3225 hw
->dma_desc_enable
);
3226 dev_dbg(hsotg
->dev
, " power_optimized=%d\n",
3227 hw
->power_optimized
);
3228 dev_dbg(hsotg
->dev
, " i2c_enable=%d\n",
3230 dev_dbg(hsotg
->dev
, " hs_phy_type=%d\n",
3232 dev_dbg(hsotg
->dev
, " fs_phy_type=%d\n",
3234 dev_dbg(hsotg
->dev
, " utmi_phy_data_width=%d\n",
3235 hw
->utmi_phy_data_width
);
3236 dev_dbg(hsotg
->dev
, " num_dev_ep=%d\n",
3238 dev_dbg(hsotg
->dev
, " num_dev_perio_in_ep=%d\n",
3239 hw
->num_dev_perio_in_ep
);
3240 dev_dbg(hsotg
->dev
, " host_channels=%d\n",
3242 dev_dbg(hsotg
->dev
, " max_transfer_size=%d\n",
3243 hw
->max_transfer_size
);
3244 dev_dbg(hsotg
->dev
, " max_packet_count=%d\n",
3245 hw
->max_packet_count
);
3246 dev_dbg(hsotg
->dev
, " nperio_tx_q_depth=0x%0x\n",
3247 hw
->nperio_tx_q_depth
);
3248 dev_dbg(hsotg
->dev
, " host_perio_tx_q_depth=0x%0x\n",
3249 hw
->host_perio_tx_q_depth
);
3250 dev_dbg(hsotg
->dev
, " dev_token_q_depth=0x%0x\n",
3251 hw
->dev_token_q_depth
);
3252 dev_dbg(hsotg
->dev
, " enable_dynamic_fifo=%d\n",
3253 hw
->enable_dynamic_fifo
);
3254 dev_dbg(hsotg
->dev
, " en_multiple_tx_fifo=%d\n",
3255 hw
->en_multiple_tx_fifo
);
3256 dev_dbg(hsotg
->dev
, " total_fifo_size=%d\n",
3257 hw
->total_fifo_size
);
3258 dev_dbg(hsotg
->dev
, " host_rx_fifo_size=%d\n",
3259 hw
->host_rx_fifo_size
);
3260 dev_dbg(hsotg
->dev
, " host_nperio_tx_fifo_size=%d\n",
3261 hw
->host_nperio_tx_fifo_size
);
3262 dev_dbg(hsotg
->dev
, " host_perio_tx_fifo_size=%d\n",
3263 hw
->host_perio_tx_fifo_size
);
3264 dev_dbg(hsotg
->dev
, "\n");
3270 * Sets all parameters to the given value.
3272 * Assumes that the dwc2_core_params struct contains only integers.
3274 void dwc2_set_all_params(struct dwc2_core_params
*params
, int value
)
3276 int *p
= (int *)params
;
3277 size_t size
= sizeof(*params
) / sizeof(*p
);
3280 for (i
= 0; i
< size
; i
++)
3285 u16
dwc2_get_otg_version(struct dwc2_hsotg
*hsotg
)
3287 return hsotg
->core_params
->otg_ver
== 1 ? 0x0200 : 0x0103;
3290 bool dwc2_is_controller_alive(struct dwc2_hsotg
*hsotg
)
3292 if (dwc2_readl(hsotg
->regs
+ GSNPSID
) == 0xffffffff)
3299 * dwc2_enable_global_interrupts() - Enables the controller's Global
3300 * Interrupt in the AHB Config register
3302 * @hsotg: Programming view of DWC_otg controller
3304 void dwc2_enable_global_interrupts(struct dwc2_hsotg
*hsotg
)
3306 u32 ahbcfg
= dwc2_readl(hsotg
->regs
+ GAHBCFG
);
3308 ahbcfg
|= GAHBCFG_GLBL_INTR_EN
;
3309 dwc2_writel(ahbcfg
, hsotg
->regs
+ GAHBCFG
);
3313 * dwc2_disable_global_interrupts() - Disables the controller's Global
3314 * Interrupt in the AHB Config register
3316 * @hsotg: Programming view of DWC_otg controller
3318 void dwc2_disable_global_interrupts(struct dwc2_hsotg
*hsotg
)
3320 u32 ahbcfg
= dwc2_readl(hsotg
->regs
+ GAHBCFG
);
3322 ahbcfg
&= ~GAHBCFG_GLBL_INTR_EN
;
3323 dwc2_writel(ahbcfg
, hsotg
->regs
+ GAHBCFG
);
3326 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
3327 MODULE_AUTHOR("Synopsys, Inc.");
3328 MODULE_LICENSE("Dual BSD/GPL");