]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/usb/dwc2/core.c
c8f66ad48a55ceba5b00488484b947634ab9986f
[mirror_ubuntu-artful-kernel.git] / drivers / usb / dwc2 / core.c
1 /*
2 * core.c - DesignWare HS OTG Controller common routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
41 */
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/moduleparam.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/delay.h>
49 #include <linux/io.h>
50 #include <linux/slab.h>
51 #include <linux/usb.h>
52
53 #include <linux/usb/hcd.h>
54 #include <linux/usb/ch11.h>
55
56 #include "core.h"
57 #include "hcd.h"
58
59 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
60 /**
61 * dwc2_backup_host_registers() - Backup controller host registers.
62 * When suspending usb bus, registers needs to be backuped
63 * if controller power is disabled once suspended.
64 *
65 * @hsotg: Programming view of the DWC_otg controller
66 */
67 static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
68 {
69 struct dwc2_hregs_backup *hr;
70 int i;
71
72 dev_dbg(hsotg->dev, "%s\n", __func__);
73
74 /* Backup Host regs */
75 hr = &hsotg->hr_backup;
76 hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
77 hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
78 for (i = 0; i < hsotg->core_params->host_channels; ++i)
79 hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
80
81 hr->hprt0 = dwc2_read_hprt0(hsotg);
82 hr->hfir = dwc2_readl(hsotg->regs + HFIR);
83 hr->valid = true;
84
85 return 0;
86 }
87
88 /**
89 * dwc2_restore_host_registers() - Restore controller host registers.
90 * When resuming usb bus, device registers needs to be restored
91 * if controller power were disabled.
92 *
93 * @hsotg: Programming view of the DWC_otg controller
94 */
95 static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
96 {
97 struct dwc2_hregs_backup *hr;
98 int i;
99
100 dev_dbg(hsotg->dev, "%s\n", __func__);
101
102 /* Restore host regs */
103 hr = &hsotg->hr_backup;
104 if (!hr->valid) {
105 dev_err(hsotg->dev, "%s: no host registers to restore\n",
106 __func__);
107 return -EINVAL;
108 }
109 hr->valid = false;
110
111 dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
112 dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
113
114 for (i = 0; i < hsotg->core_params->host_channels; ++i)
115 dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
116
117 dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
118 dwc2_writel(hr->hfir, hsotg->regs + HFIR);
119 hsotg->frame_number = 0;
120
121 return 0;
122 }
123 #else
124 static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
125 { return 0; }
126
127 static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
128 { return 0; }
129 #endif
130
131 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
132 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
133 /**
134 * dwc2_backup_device_registers() - Backup controller device registers.
135 * When suspending usb bus, registers needs to be backuped
136 * if controller power is disabled once suspended.
137 *
138 * @hsotg: Programming view of the DWC_otg controller
139 */
140 static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
141 {
142 struct dwc2_dregs_backup *dr;
143 int i;
144
145 dev_dbg(hsotg->dev, "%s\n", __func__);
146
147 /* Backup dev regs */
148 dr = &hsotg->dr_backup;
149
150 dr->dcfg = dwc2_readl(hsotg->regs + DCFG);
151 dr->dctl = dwc2_readl(hsotg->regs + DCTL);
152 dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
153 dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK);
154 dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
155
156 for (i = 0; i < hsotg->num_of_eps; i++) {
157 /* Backup IN EPs */
158 dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i));
159
160 /* Ensure DATA PID is correctly configured */
161 if (dr->diepctl[i] & DXEPCTL_DPID)
162 dr->diepctl[i] |= DXEPCTL_SETD1PID;
163 else
164 dr->diepctl[i] |= DXEPCTL_SETD0PID;
165
166 dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i));
167 dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i));
168
169 /* Backup OUT EPs */
170 dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i));
171
172 /* Ensure DATA PID is correctly configured */
173 if (dr->doepctl[i] & DXEPCTL_DPID)
174 dr->doepctl[i] |= DXEPCTL_SETD1PID;
175 else
176 dr->doepctl[i] |= DXEPCTL_SETD0PID;
177
178 dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i));
179 dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i));
180 }
181 dr->valid = true;
182 return 0;
183 }
184
185 /**
186 * dwc2_restore_device_registers() - Restore controller device registers.
187 * When resuming usb bus, device registers needs to be restored
188 * if controller power were disabled.
189 *
190 * @hsotg: Programming view of the DWC_otg controller
191 */
192 static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
193 {
194 struct dwc2_dregs_backup *dr;
195 u32 dctl;
196 int i;
197
198 dev_dbg(hsotg->dev, "%s\n", __func__);
199
200 /* Restore dev regs */
201 dr = &hsotg->dr_backup;
202 if (!dr->valid) {
203 dev_err(hsotg->dev, "%s: no device registers to restore\n",
204 __func__);
205 return -EINVAL;
206 }
207 dr->valid = false;
208
209 dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
210 dwc2_writel(dr->dctl, hsotg->regs + DCTL);
211 dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK);
212 dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK);
213 dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK);
214
215 for (i = 0; i < hsotg->num_of_eps; i++) {
216 /* Restore IN EPs */
217 dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
218 dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
219 dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
220
221 /* Restore OUT EPs */
222 dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
223 dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
224 dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
225 }
226
227 /* Set the Power-On Programming done bit */
228 dctl = dwc2_readl(hsotg->regs + DCTL);
229 dctl |= DCTL_PWRONPRGDONE;
230 dwc2_writel(dctl, hsotg->regs + DCTL);
231
232 return 0;
233 }
234 #else
235 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
236 { return 0; }
237
238 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
239 { return 0; }
240 #endif
241
242 /**
243 * dwc2_backup_global_registers() - Backup global controller registers.
244 * When suspending usb bus, registers needs to be backuped
245 * if controller power is disabled once suspended.
246 *
247 * @hsotg: Programming view of the DWC_otg controller
248 */
249 static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
250 {
251 struct dwc2_gregs_backup *gr;
252 int i;
253
254 /* Backup global regs */
255 gr = &hsotg->gr_backup;
256
257 gr->gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
258 gr->gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
259 gr->gahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
260 gr->gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
261 gr->grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
262 gr->gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
263 gr->hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
264 gr->gdfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
265 for (i = 0; i < MAX_EPS_CHANNELS; i++)
266 gr->dtxfsiz[i] = dwc2_readl(hsotg->regs + DPTXFSIZN(i));
267
268 gr->valid = true;
269 return 0;
270 }
271
272 /**
273 * dwc2_restore_global_registers() - Restore controller global registers.
274 * When resuming usb bus, device registers needs to be restored
275 * if controller power were disabled.
276 *
277 * @hsotg: Programming view of the DWC_otg controller
278 */
279 static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
280 {
281 struct dwc2_gregs_backup *gr;
282 int i;
283
284 dev_dbg(hsotg->dev, "%s\n", __func__);
285
286 /* Restore global regs */
287 gr = &hsotg->gr_backup;
288 if (!gr->valid) {
289 dev_err(hsotg->dev, "%s: no global registers to restore\n",
290 __func__);
291 return -EINVAL;
292 }
293 gr->valid = false;
294
295 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
296 dwc2_writel(gr->gotgctl, hsotg->regs + GOTGCTL);
297 dwc2_writel(gr->gintmsk, hsotg->regs + GINTMSK);
298 dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
299 dwc2_writel(gr->gahbcfg, hsotg->regs + GAHBCFG);
300 dwc2_writel(gr->grxfsiz, hsotg->regs + GRXFSIZ);
301 dwc2_writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ);
302 dwc2_writel(gr->hptxfsiz, hsotg->regs + HPTXFSIZ);
303 dwc2_writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG);
304 for (i = 0; i < MAX_EPS_CHANNELS; i++)
305 dwc2_writel(gr->dtxfsiz[i], hsotg->regs + DPTXFSIZN(i));
306
307 return 0;
308 }
309
310 /**
311 * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
312 *
313 * @hsotg: Programming view of the DWC_otg controller
314 * @restore: Controller registers need to be restored
315 */
316 int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
317 {
318 u32 pcgcctl;
319 int ret = 0;
320
321 if (!hsotg->core_params->hibernation)
322 return -ENOTSUPP;
323
324 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
325 pcgcctl &= ~PCGCTL_STOPPCLK;
326 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
327
328 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
329 pcgcctl &= ~PCGCTL_PWRCLMP;
330 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
331
332 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
333 pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
334 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
335
336 udelay(100);
337 if (restore) {
338 ret = dwc2_restore_global_registers(hsotg);
339 if (ret) {
340 dev_err(hsotg->dev, "%s: failed to restore registers\n",
341 __func__);
342 return ret;
343 }
344 if (dwc2_is_host_mode(hsotg)) {
345 ret = dwc2_restore_host_registers(hsotg);
346 if (ret) {
347 dev_err(hsotg->dev, "%s: failed to restore host registers\n",
348 __func__);
349 return ret;
350 }
351 } else {
352 ret = dwc2_restore_device_registers(hsotg);
353 if (ret) {
354 dev_err(hsotg->dev, "%s: failed to restore device registers\n",
355 __func__);
356 return ret;
357 }
358 }
359 }
360
361 return ret;
362 }
363
364 /**
365 * dwc2_enter_hibernation() - Put controller in Partial Power Down.
366 *
367 * @hsotg: Programming view of the DWC_otg controller
368 */
369 int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
370 {
371 u32 pcgcctl;
372 int ret = 0;
373
374 if (!hsotg->core_params->hibernation)
375 return -ENOTSUPP;
376
377 /* Backup all registers */
378 ret = dwc2_backup_global_registers(hsotg);
379 if (ret) {
380 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
381 __func__);
382 return ret;
383 }
384
385 if (dwc2_is_host_mode(hsotg)) {
386 ret = dwc2_backup_host_registers(hsotg);
387 if (ret) {
388 dev_err(hsotg->dev, "%s: failed to backup host registers\n",
389 __func__);
390 return ret;
391 }
392 } else {
393 ret = dwc2_backup_device_registers(hsotg);
394 if (ret) {
395 dev_err(hsotg->dev, "%s: failed to backup device registers\n",
396 __func__);
397 return ret;
398 }
399 }
400
401 /*
402 * Clear any pending interrupts since dwc2 will not be able to
403 * clear them after entering hibernation.
404 */
405 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
406
407 /* Put the controller in low power state */
408 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
409
410 pcgcctl |= PCGCTL_PWRCLMP;
411 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
412 ndelay(20);
413
414 pcgcctl |= PCGCTL_RSTPDWNMODULE;
415 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
416 ndelay(20);
417
418 pcgcctl |= PCGCTL_STOPPCLK;
419 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
420
421 return ret;
422 }
423
424 /**
425 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
426 * used in both device and host modes
427 *
428 * @hsotg: Programming view of the DWC_otg controller
429 */
430 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
431 {
432 u32 intmsk;
433
434 /* Clear any pending OTG Interrupts */
435 dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
436
437 /* Clear any pending interrupts */
438 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
439
440 /* Enable the interrupts in the GINTMSK */
441 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
442
443 if (hsotg->core_params->dma_enable <= 0)
444 intmsk |= GINTSTS_RXFLVL;
445 if (hsotg->core_params->external_id_pin_ctl <= 0)
446 intmsk |= GINTSTS_CONIDSTSCHNG;
447
448 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
449 GINTSTS_SESSREQINT;
450
451 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
452 }
453
454 /*
455 * Initializes the FSLSPClkSel field of the HCFG register depending on the
456 * PHY type
457 */
458 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
459 {
460 u32 hcfg, val;
461
462 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
463 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
464 hsotg->core_params->ulpi_fs_ls > 0) ||
465 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
466 /* Full speed PHY */
467 val = HCFG_FSLSPCLKSEL_48_MHZ;
468 } else {
469 /* High speed PHY running at full speed or high speed */
470 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
471 }
472
473 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
474 hcfg = dwc2_readl(hsotg->regs + HCFG);
475 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
476 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
477 dwc2_writel(hcfg, hsotg->regs + HCFG);
478 }
479
480 /*
481 * Do core a soft reset of the core. Be careful with this because it
482 * resets all the internal state machines of the core.
483 */
484 int dwc2_core_reset(struct dwc2_hsotg *hsotg)
485 {
486 u32 greset;
487 int count = 0;
488 u32 gusbcfg;
489
490 dev_vdbg(hsotg->dev, "%s()\n", __func__);
491
492 /* Wait for AHB master IDLE state */
493 do {
494 udelay(1);
495 greset = dwc2_readl(hsotg->regs + GRSTCTL);
496 if (++count > 50) {
497 dev_warn(hsotg->dev,
498 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
499 __func__, greset);
500 return -EBUSY;
501 }
502 } while (!(greset & GRSTCTL_AHBIDLE));
503
504 /* Core Soft Reset */
505 count = 0;
506 greset |= GRSTCTL_CSFTRST;
507 dwc2_writel(greset, hsotg->regs + GRSTCTL);
508 do {
509 udelay(1);
510 greset = dwc2_readl(hsotg->regs + GRSTCTL);
511 if (++count > 50) {
512 dev_warn(hsotg->dev,
513 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
514 __func__, greset);
515 return -EBUSY;
516 }
517 } while (greset & GRSTCTL_CSFTRST);
518
519 if (hsotg->dr_mode == USB_DR_MODE_HOST) {
520 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
521 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
522 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
523 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
524 } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
525 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
526 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
527 gusbcfg |= GUSBCFG_FORCEDEVMODE;
528 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
529 } else if (hsotg->dr_mode == USB_DR_MODE_OTG) {
530 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
531 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
532 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
533 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
534 }
535
536 /*
537 * NOTE: This long sleep is _very_ important, otherwise the core will
538 * not stay in host mode after a connector ID change!
539 */
540 usleep_range(150000, 160000);
541
542 return 0;
543 }
544
545 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
546 {
547 u32 usbcfg, i2cctl;
548 int retval = 0;
549
550 /*
551 * core_init() is now called on every switch so only call the
552 * following for the first time through
553 */
554 if (select_phy) {
555 dev_dbg(hsotg->dev, "FS PHY selected\n");
556
557 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
558 if (!(usbcfg & GUSBCFG_PHYSEL)) {
559 usbcfg |= GUSBCFG_PHYSEL;
560 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
561
562 /* Reset after a PHY select */
563 retval = dwc2_core_reset(hsotg);
564
565 if (retval) {
566 dev_err(hsotg->dev,
567 "%s: Reset failed, aborting", __func__);
568 return retval;
569 }
570 }
571 }
572
573 /*
574 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
575 * do this on HNP Dev/Host mode switches (done in dev_init and
576 * host_init).
577 */
578 if (dwc2_is_host_mode(hsotg))
579 dwc2_init_fs_ls_pclk_sel(hsotg);
580
581 if (hsotg->core_params->i2c_enable > 0) {
582 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
583
584 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
585 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
586 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
587 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
588
589 /* Program GI2CCTL.I2CEn */
590 i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
591 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
592 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
593 i2cctl &= ~GI2CCTL_I2CEN;
594 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
595 i2cctl |= GI2CCTL_I2CEN;
596 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
597 }
598
599 return retval;
600 }
601
602 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
603 {
604 u32 usbcfg, usbcfg_old;
605 int retval = 0;
606
607 if (!select_phy)
608 return 0;
609
610 usbcfg = usbcfg_old = dwc2_readl(hsotg->regs + GUSBCFG);
611
612 /*
613 * HS PHY parameters. These parameters are preserved during soft reset
614 * so only program the first time. Do a soft reset immediately after
615 * setting phyif.
616 */
617 switch (hsotg->core_params->phy_type) {
618 case DWC2_PHY_TYPE_PARAM_ULPI:
619 /* ULPI interface */
620 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
621 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
622 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
623 if (hsotg->core_params->phy_ulpi_ddr > 0)
624 usbcfg |= GUSBCFG_DDRSEL;
625 break;
626 case DWC2_PHY_TYPE_PARAM_UTMI:
627 /* UTMI+ interface */
628 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
629 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
630 if (hsotg->core_params->phy_utmi_width == 16)
631 usbcfg |= GUSBCFG_PHYIF16;
632 break;
633 default:
634 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
635 break;
636 }
637
638 if (usbcfg != usbcfg_old) {
639 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
640
641 /* Reset after setting the PHY parameters */
642 retval = dwc2_core_reset(hsotg);
643 if (retval) {
644 dev_err(hsotg->dev,
645 "%s: Reset failed, aborting", __func__);
646 return retval;
647 }
648 }
649
650 return retval;
651 }
652
653 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
654 {
655 u32 usbcfg;
656 int retval = 0;
657
658 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
659 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
660 /* If FS mode with FS PHY */
661 retval = dwc2_fs_phy_init(hsotg, select_phy);
662 if (retval)
663 return retval;
664 } else {
665 /* High speed PHY */
666 retval = dwc2_hs_phy_init(hsotg, select_phy);
667 if (retval)
668 return retval;
669 }
670
671 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
672 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
673 hsotg->core_params->ulpi_fs_ls > 0) {
674 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
675 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
676 usbcfg |= GUSBCFG_ULPI_FS_LS;
677 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
678 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
679 } else {
680 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
681 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
682 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
683 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
684 }
685
686 return retval;
687 }
688
689 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
690 {
691 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
692
693 switch (hsotg->hw_params.arch) {
694 case GHWCFG2_EXT_DMA_ARCH:
695 dev_err(hsotg->dev, "External DMA Mode not supported\n");
696 return -EINVAL;
697
698 case GHWCFG2_INT_DMA_ARCH:
699 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
700 if (hsotg->core_params->ahbcfg != -1) {
701 ahbcfg &= GAHBCFG_CTRL_MASK;
702 ahbcfg |= hsotg->core_params->ahbcfg &
703 ~GAHBCFG_CTRL_MASK;
704 }
705 break;
706
707 case GHWCFG2_SLAVE_ONLY_ARCH:
708 default:
709 dev_dbg(hsotg->dev, "Slave Only Mode\n");
710 break;
711 }
712
713 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
714 hsotg->core_params->dma_enable,
715 hsotg->core_params->dma_desc_enable);
716
717 if (hsotg->core_params->dma_enable > 0) {
718 if (hsotg->core_params->dma_desc_enable > 0)
719 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
720 else
721 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
722 } else {
723 dev_dbg(hsotg->dev, "Using Slave mode\n");
724 hsotg->core_params->dma_desc_enable = 0;
725 }
726
727 if (hsotg->core_params->dma_enable > 0)
728 ahbcfg |= GAHBCFG_DMA_EN;
729
730 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
731
732 return 0;
733 }
734
735 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
736 {
737 u32 usbcfg;
738
739 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
740 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
741
742 switch (hsotg->hw_params.op_mode) {
743 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
744 if (hsotg->core_params->otg_cap ==
745 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
746 usbcfg |= GUSBCFG_HNPCAP;
747 if (hsotg->core_params->otg_cap !=
748 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
749 usbcfg |= GUSBCFG_SRPCAP;
750 break;
751
752 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
753 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
754 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
755 if (hsotg->core_params->otg_cap !=
756 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
757 usbcfg |= GUSBCFG_SRPCAP;
758 break;
759
760 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
761 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
762 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
763 default:
764 break;
765 }
766
767 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
768 }
769
770 /**
771 * dwc2_core_init() - Initializes the DWC_otg controller registers and
772 * prepares the core for device mode or host mode operation
773 *
774 * @hsotg: Programming view of the DWC_otg controller
775 * @initial_setup: If true then this is the first init for this instance.
776 */
777 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
778 {
779 u32 usbcfg, otgctl;
780 int retval;
781
782 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
783
784 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
785
786 /* Set ULPI External VBUS bit if needed */
787 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
788 if (hsotg->core_params->phy_ulpi_ext_vbus ==
789 DWC2_PHY_ULPI_EXTERNAL_VBUS)
790 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
791
792 /* Set external TS Dline pulsing bit if needed */
793 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
794 if (hsotg->core_params->ts_dline > 0)
795 usbcfg |= GUSBCFG_TERMSELDLPULSE;
796
797 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
798
799 /*
800 * Reset the Controller
801 *
802 * We only need to reset the controller if this is a re-init.
803 * For the first init we know for sure that earlier code reset us (it
804 * needed to in order to properly detect various parameters).
805 */
806 if (!initial_setup) {
807 retval = dwc2_core_reset(hsotg);
808 if (retval) {
809 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
810 __func__);
811 return retval;
812 }
813 }
814
815 /*
816 * This needs to happen in FS mode before any other programming occurs
817 */
818 retval = dwc2_phy_init(hsotg, initial_setup);
819 if (retval)
820 return retval;
821
822 /* Program the GAHBCFG Register */
823 retval = dwc2_gahbcfg_init(hsotg);
824 if (retval)
825 return retval;
826
827 /* Program the GUSBCFG register */
828 dwc2_gusbcfg_init(hsotg);
829
830 /* Program the GOTGCTL register */
831 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
832 otgctl &= ~GOTGCTL_OTGVER;
833 if (hsotg->core_params->otg_ver > 0)
834 otgctl |= GOTGCTL_OTGVER;
835 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
836 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
837
838 /* Clear the SRP success bit for FS-I2c */
839 hsotg->srp_success = 0;
840
841 /* Enable common interrupts */
842 dwc2_enable_common_interrupts(hsotg);
843
844 /*
845 * Do device or host initialization based on mode during PCD and
846 * HCD initialization
847 */
848 if (dwc2_is_host_mode(hsotg)) {
849 dev_dbg(hsotg->dev, "Host Mode\n");
850 hsotg->op_state = OTG_STATE_A_HOST;
851 } else {
852 dev_dbg(hsotg->dev, "Device Mode\n");
853 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
854 }
855
856 return 0;
857 }
858
859 /**
860 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
861 *
862 * @hsotg: Programming view of DWC_otg controller
863 */
864 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
865 {
866 u32 intmsk;
867
868 dev_dbg(hsotg->dev, "%s()\n", __func__);
869
870 /* Disable all interrupts */
871 dwc2_writel(0, hsotg->regs + GINTMSK);
872 dwc2_writel(0, hsotg->regs + HAINTMSK);
873
874 /* Enable the common interrupts */
875 dwc2_enable_common_interrupts(hsotg);
876
877 /* Enable host mode interrupts without disturbing common interrupts */
878 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
879 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
880 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
881 }
882
883 /**
884 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
885 *
886 * @hsotg: Programming view of DWC_otg controller
887 */
888 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
889 {
890 u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
891
892 /* Disable host mode interrupts without disturbing common interrupts */
893 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
894 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
895 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
896 }
897
898 /*
899 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
900 * For system that have a total fifo depth that is smaller than the default
901 * RX + TX fifo size.
902 *
903 * @hsotg: Programming view of DWC_otg controller
904 */
905 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
906 {
907 struct dwc2_core_params *params = hsotg->core_params;
908 struct dwc2_hw_params *hw = &hsotg->hw_params;
909 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
910
911 total_fifo_size = hw->total_fifo_size;
912 rxfsiz = params->host_rx_fifo_size;
913 nptxfsiz = params->host_nperio_tx_fifo_size;
914 ptxfsiz = params->host_perio_tx_fifo_size;
915
916 /*
917 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
918 * allocation with support for high bandwidth endpoints. Synopsys
919 * defines MPS(Max Packet size) for a periodic EP=1024, and for
920 * non-periodic as 512.
921 */
922 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
923 /*
924 * For Buffer DMA mode/Scatter Gather DMA mode
925 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
926 * with n = number of host channel.
927 * 2 * ((1024/4) + 2) = 516
928 */
929 rxfsiz = 516 + hw->host_channels;
930
931 /*
932 * min non-periodic tx fifo depth
933 * 2 * (largest non-periodic USB packet used / 4)
934 * 2 * (512/4) = 256
935 */
936 nptxfsiz = 256;
937
938 /*
939 * min periodic tx fifo depth
940 * (largest packet size*MC)/4
941 * (1024 * 3)/4 = 768
942 */
943 ptxfsiz = 768;
944
945 params->host_rx_fifo_size = rxfsiz;
946 params->host_nperio_tx_fifo_size = nptxfsiz;
947 params->host_perio_tx_fifo_size = ptxfsiz;
948 }
949
950 /*
951 * If the summation of RX, NPTX and PTX fifo sizes is still
952 * bigger than the total_fifo_size, then we have a problem.
953 *
954 * We won't be able to allocate as many endpoints. Right now,
955 * we're just printing an error message, but ideally this FIFO
956 * allocation algorithm would be improved in the future.
957 *
958 * FIXME improve this FIFO allocation algorithm.
959 */
960 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
961 dev_err(hsotg->dev, "invalid fifo sizes\n");
962 }
963
964 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
965 {
966 struct dwc2_core_params *params = hsotg->core_params;
967 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
968
969 if (!params->enable_dynamic_fifo)
970 return;
971
972 dwc2_calculate_dynamic_fifo(hsotg);
973
974 /* Rx FIFO */
975 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
976 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
977 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
978 grxfsiz |= params->host_rx_fifo_size <<
979 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
980 dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ);
981 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
982 dwc2_readl(hsotg->regs + GRXFSIZ));
983
984 /* Non-periodic Tx FIFO */
985 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
986 dwc2_readl(hsotg->regs + GNPTXFSIZ));
987 nptxfsiz = params->host_nperio_tx_fifo_size <<
988 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
989 nptxfsiz |= params->host_rx_fifo_size <<
990 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
991 dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
992 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
993 dwc2_readl(hsotg->regs + GNPTXFSIZ));
994
995 /* Periodic Tx FIFO */
996 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
997 dwc2_readl(hsotg->regs + HPTXFSIZ));
998 hptxfsiz = params->host_perio_tx_fifo_size <<
999 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
1000 hptxfsiz |= (params->host_rx_fifo_size +
1001 params->host_nperio_tx_fifo_size) <<
1002 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
1003 dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
1004 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
1005 dwc2_readl(hsotg->regs + HPTXFSIZ));
1006
1007 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
1008 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
1009 /*
1010 * Global DFIFOCFG calculation for Host mode -
1011 * include RxFIFO, NPTXFIFO and HPTXFIFO
1012 */
1013 dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
1014 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
1015 dfifocfg |= (params->host_rx_fifo_size +
1016 params->host_nperio_tx_fifo_size +
1017 params->host_perio_tx_fifo_size) <<
1018 GDFIFOCFG_EPINFOBASE_SHIFT &
1019 GDFIFOCFG_EPINFOBASE_MASK;
1020 dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG);
1021 }
1022 }
1023
1024 /**
1025 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
1026 * Host mode
1027 *
1028 * @hsotg: Programming view of DWC_otg controller
1029 *
1030 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
1031 * request queues. Host channels are reset to ensure that they are ready for
1032 * performing transfers.
1033 */
1034 void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
1035 {
1036 u32 hcfg, hfir, otgctl;
1037
1038 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
1039
1040 /* Restart the Phy Clock */
1041 dwc2_writel(0, hsotg->regs + PCGCTL);
1042
1043 /* Initialize Host Configuration Register */
1044 dwc2_init_fs_ls_pclk_sel(hsotg);
1045 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
1046 hcfg = dwc2_readl(hsotg->regs + HCFG);
1047 hcfg |= HCFG_FSLSSUPP;
1048 dwc2_writel(hcfg, hsotg->regs + HCFG);
1049 }
1050
1051 /*
1052 * This bit allows dynamic reloading of the HFIR register during
1053 * runtime. This bit needs to be programmed during initial configuration
1054 * and its value must not be changed during runtime.
1055 */
1056 if (hsotg->core_params->reload_ctl > 0) {
1057 hfir = dwc2_readl(hsotg->regs + HFIR);
1058 hfir |= HFIR_RLDCTRL;
1059 dwc2_writel(hfir, hsotg->regs + HFIR);
1060 }
1061
1062 if (hsotg->core_params->dma_desc_enable > 0) {
1063 u32 op_mode = hsotg->hw_params.op_mode;
1064 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
1065 !hsotg->hw_params.dma_desc_enable ||
1066 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
1067 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
1068 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
1069 dev_err(hsotg->dev,
1070 "Hardware does not support descriptor DMA mode -\n");
1071 dev_err(hsotg->dev,
1072 "falling back to buffer DMA mode.\n");
1073 hsotg->core_params->dma_desc_enable = 0;
1074 } else {
1075 hcfg = dwc2_readl(hsotg->regs + HCFG);
1076 hcfg |= HCFG_DESCDMA;
1077 dwc2_writel(hcfg, hsotg->regs + HCFG);
1078 }
1079 }
1080
1081 /* Configure data FIFO sizes */
1082 dwc2_config_fifos(hsotg);
1083
1084 /* TODO - check this */
1085 /* Clear Host Set HNP Enable in the OTG Control Register */
1086 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
1087 otgctl &= ~GOTGCTL_HSTSETHNPEN;
1088 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
1089
1090 /* Make sure the FIFOs are flushed */
1091 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
1092 dwc2_flush_rx_fifo(hsotg);
1093
1094 /* Clear Host Set HNP Enable in the OTG Control Register */
1095 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
1096 otgctl &= ~GOTGCTL_HSTSETHNPEN;
1097 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
1098
1099 if (hsotg->core_params->dma_desc_enable <= 0) {
1100 int num_channels, i;
1101 u32 hcchar;
1102
1103 /* Flush out any leftover queued requests */
1104 num_channels = hsotg->core_params->host_channels;
1105 for (i = 0; i < num_channels; i++) {
1106 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
1107 hcchar &= ~HCCHAR_CHENA;
1108 hcchar |= HCCHAR_CHDIS;
1109 hcchar &= ~HCCHAR_EPDIR;
1110 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
1111 }
1112
1113 /* Halt all channels to put them into a known state */
1114 for (i = 0; i < num_channels; i++) {
1115 int count = 0;
1116
1117 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
1118 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
1119 hcchar &= ~HCCHAR_EPDIR;
1120 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
1121 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
1122 __func__, i);
1123 do {
1124 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
1125 if (++count > 1000) {
1126 dev_err(hsotg->dev,
1127 "Unable to clear enable on channel %d\n",
1128 i);
1129 break;
1130 }
1131 udelay(1);
1132 } while (hcchar & HCCHAR_CHENA);
1133 }
1134 }
1135
1136 /* Turn on the vbus power */
1137 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
1138 if (hsotg->op_state == OTG_STATE_A_HOST) {
1139 u32 hprt0 = dwc2_read_hprt0(hsotg);
1140
1141 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
1142 !!(hprt0 & HPRT0_PWR));
1143 if (!(hprt0 & HPRT0_PWR)) {
1144 hprt0 |= HPRT0_PWR;
1145 dwc2_writel(hprt0, hsotg->regs + HPRT0);
1146 }
1147 }
1148
1149 dwc2_enable_host_interrupts(hsotg);
1150 }
1151
1152 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
1153 struct dwc2_host_chan *chan)
1154 {
1155 u32 hcintmsk = HCINTMSK_CHHLTD;
1156
1157 switch (chan->ep_type) {
1158 case USB_ENDPOINT_XFER_CONTROL:
1159 case USB_ENDPOINT_XFER_BULK:
1160 dev_vdbg(hsotg->dev, "control/bulk\n");
1161 hcintmsk |= HCINTMSK_XFERCOMPL;
1162 hcintmsk |= HCINTMSK_STALL;
1163 hcintmsk |= HCINTMSK_XACTERR;
1164 hcintmsk |= HCINTMSK_DATATGLERR;
1165 if (chan->ep_is_in) {
1166 hcintmsk |= HCINTMSK_BBLERR;
1167 } else {
1168 hcintmsk |= HCINTMSK_NAK;
1169 hcintmsk |= HCINTMSK_NYET;
1170 if (chan->do_ping)
1171 hcintmsk |= HCINTMSK_ACK;
1172 }
1173
1174 if (chan->do_split) {
1175 hcintmsk |= HCINTMSK_NAK;
1176 if (chan->complete_split)
1177 hcintmsk |= HCINTMSK_NYET;
1178 else
1179 hcintmsk |= HCINTMSK_ACK;
1180 }
1181
1182 if (chan->error_state)
1183 hcintmsk |= HCINTMSK_ACK;
1184 break;
1185
1186 case USB_ENDPOINT_XFER_INT:
1187 if (dbg_perio())
1188 dev_vdbg(hsotg->dev, "intr\n");
1189 hcintmsk |= HCINTMSK_XFERCOMPL;
1190 hcintmsk |= HCINTMSK_NAK;
1191 hcintmsk |= HCINTMSK_STALL;
1192 hcintmsk |= HCINTMSK_XACTERR;
1193 hcintmsk |= HCINTMSK_DATATGLERR;
1194 hcintmsk |= HCINTMSK_FRMOVRUN;
1195
1196 if (chan->ep_is_in)
1197 hcintmsk |= HCINTMSK_BBLERR;
1198 if (chan->error_state)
1199 hcintmsk |= HCINTMSK_ACK;
1200 if (chan->do_split) {
1201 if (chan->complete_split)
1202 hcintmsk |= HCINTMSK_NYET;
1203 else
1204 hcintmsk |= HCINTMSK_ACK;
1205 }
1206 break;
1207
1208 case USB_ENDPOINT_XFER_ISOC:
1209 if (dbg_perio())
1210 dev_vdbg(hsotg->dev, "isoc\n");
1211 hcintmsk |= HCINTMSK_XFERCOMPL;
1212 hcintmsk |= HCINTMSK_FRMOVRUN;
1213 hcintmsk |= HCINTMSK_ACK;
1214
1215 if (chan->ep_is_in) {
1216 hcintmsk |= HCINTMSK_XACTERR;
1217 hcintmsk |= HCINTMSK_BBLERR;
1218 }
1219 break;
1220 default:
1221 dev_err(hsotg->dev, "## Unknown EP type ##\n");
1222 break;
1223 }
1224
1225 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1226 if (dbg_hc(chan))
1227 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
1228 }
1229
1230 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
1231 struct dwc2_host_chan *chan)
1232 {
1233 u32 hcintmsk = HCINTMSK_CHHLTD;
1234
1235 /*
1236 * For Descriptor DMA mode core halts the channel on AHB error.
1237 * Interrupt is not required.
1238 */
1239 if (hsotg->core_params->dma_desc_enable <= 0) {
1240 if (dbg_hc(chan))
1241 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1242 hcintmsk |= HCINTMSK_AHBERR;
1243 } else {
1244 if (dbg_hc(chan))
1245 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
1246 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1247 hcintmsk |= HCINTMSK_XFERCOMPL;
1248 }
1249
1250 if (chan->error_state && !chan->do_split &&
1251 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1252 if (dbg_hc(chan))
1253 dev_vdbg(hsotg->dev, "setting ACK\n");
1254 hcintmsk |= HCINTMSK_ACK;
1255 if (chan->ep_is_in) {
1256 hcintmsk |= HCINTMSK_DATATGLERR;
1257 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
1258 hcintmsk |= HCINTMSK_NAK;
1259 }
1260 }
1261
1262 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1263 if (dbg_hc(chan))
1264 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
1265 }
1266
1267 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
1268 struct dwc2_host_chan *chan)
1269 {
1270 u32 intmsk;
1271
1272 if (hsotg->core_params->dma_enable > 0) {
1273 if (dbg_hc(chan))
1274 dev_vdbg(hsotg->dev, "DMA enabled\n");
1275 dwc2_hc_enable_dma_ints(hsotg, chan);
1276 } else {
1277 if (dbg_hc(chan))
1278 dev_vdbg(hsotg->dev, "DMA disabled\n");
1279 dwc2_hc_enable_slave_ints(hsotg, chan);
1280 }
1281
1282 /* Enable the top level host channel interrupt */
1283 intmsk = dwc2_readl(hsotg->regs + HAINTMSK);
1284 intmsk |= 1 << chan->hc_num;
1285 dwc2_writel(intmsk, hsotg->regs + HAINTMSK);
1286 if (dbg_hc(chan))
1287 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
1288
1289 /* Make sure host channel interrupts are enabled */
1290 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
1291 intmsk |= GINTSTS_HCHINT;
1292 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
1293 if (dbg_hc(chan))
1294 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
1295 }
1296
1297 /**
1298 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
1299 * a specific endpoint
1300 *
1301 * @hsotg: Programming view of DWC_otg controller
1302 * @chan: Information needed to initialize the host channel
1303 *
1304 * The HCCHARn register is set up with the characteristics specified in chan.
1305 * Host channel interrupts that may need to be serviced while this transfer is
1306 * in progress are enabled.
1307 */
1308 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1309 {
1310 u8 hc_num = chan->hc_num;
1311 u32 hcintmsk;
1312 u32 hcchar;
1313 u32 hcsplt = 0;
1314
1315 if (dbg_hc(chan))
1316 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1317
1318 /* Clear old interrupt conditions for this host channel */
1319 hcintmsk = 0xffffffff;
1320 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1321 dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num));
1322
1323 /* Enable channel interrupts required for this transfer */
1324 dwc2_hc_enable_ints(hsotg, chan);
1325
1326 /*
1327 * Program the HCCHARn register with the endpoint characteristics for
1328 * the current transfer
1329 */
1330 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
1331 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
1332 if (chan->ep_is_in)
1333 hcchar |= HCCHAR_EPDIR;
1334 if (chan->speed == USB_SPEED_LOW)
1335 hcchar |= HCCHAR_LSPDDEV;
1336 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
1337 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
1338 dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num));
1339 if (dbg_hc(chan)) {
1340 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
1341 hc_num, hcchar);
1342
1343 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
1344 __func__, hc_num);
1345 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
1346 chan->dev_addr);
1347 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
1348 chan->ep_num);
1349 dev_vdbg(hsotg->dev, " Is In: %d\n",
1350 chan->ep_is_in);
1351 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
1352 chan->speed == USB_SPEED_LOW);
1353 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
1354 chan->ep_type);
1355 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
1356 chan->max_packet);
1357 }
1358
1359 /* Program the HCSPLT register for SPLITs */
1360 if (chan->do_split) {
1361 if (dbg_hc(chan))
1362 dev_vdbg(hsotg->dev,
1363 "Programming HC %d with split --> %s\n",
1364 hc_num,
1365 chan->complete_split ? "CSPLIT" : "SSPLIT");
1366 if (chan->complete_split)
1367 hcsplt |= HCSPLT_COMPSPLT;
1368 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
1369 HCSPLT_XACTPOS_MASK;
1370 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
1371 HCSPLT_HUBADDR_MASK;
1372 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
1373 HCSPLT_PRTADDR_MASK;
1374 if (dbg_hc(chan)) {
1375 dev_vdbg(hsotg->dev, " comp split %d\n",
1376 chan->complete_split);
1377 dev_vdbg(hsotg->dev, " xact pos %d\n",
1378 chan->xact_pos);
1379 dev_vdbg(hsotg->dev, " hub addr %d\n",
1380 chan->hub_addr);
1381 dev_vdbg(hsotg->dev, " hub port %d\n",
1382 chan->hub_port);
1383 dev_vdbg(hsotg->dev, " is_in %d\n",
1384 chan->ep_is_in);
1385 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
1386 chan->max_packet);
1387 dev_vdbg(hsotg->dev, " xferlen %d\n",
1388 chan->xfer_len);
1389 }
1390 }
1391
1392 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
1393 }
1394
1395 /**
1396 * dwc2_hc_halt() - Attempts to halt a host channel
1397 *
1398 * @hsotg: Controller register interface
1399 * @chan: Host channel to halt
1400 * @halt_status: Reason for halting the channel
1401 *
1402 * This function should only be called in Slave mode or to abort a transfer in
1403 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1404 * controller halts the channel when the transfer is complete or a condition
1405 * occurs that requires application intervention.
1406 *
1407 * In slave mode, checks for a free request queue entry, then sets the Channel
1408 * Enable and Channel Disable bits of the Host Channel Characteristics
1409 * register of the specified channel to intiate the halt. If there is no free
1410 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1411 * register to flush requests for this channel. In the latter case, sets a
1412 * flag to indicate that the host channel needs to be halted when a request
1413 * queue slot is open.
1414 *
1415 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1416 * HCCHARn register. The controller ensures there is space in the request
1417 * queue before submitting the halt request.
1418 *
1419 * Some time may elapse before the core flushes any posted requests for this
1420 * host channel and halts. The Channel Halted interrupt handler completes the
1421 * deactivation of the host channel.
1422 */
1423 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
1424 enum dwc2_halt_status halt_status)
1425 {
1426 u32 nptxsts, hptxsts, hcchar;
1427
1428 if (dbg_hc(chan))
1429 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1430 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
1431 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
1432
1433 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1434 halt_status == DWC2_HC_XFER_AHB_ERR) {
1435 /*
1436 * Disable all channel interrupts except Ch Halted. The QTD
1437 * and QH state associated with this transfer has been cleared
1438 * (in the case of URB_DEQUEUE), so the channel needs to be
1439 * shut down carefully to prevent crashes.
1440 */
1441 u32 hcintmsk = HCINTMSK_CHHLTD;
1442
1443 dev_vdbg(hsotg->dev, "dequeue/error\n");
1444 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1445
1446 /*
1447 * Make sure no other interrupts besides halt are currently
1448 * pending. Handling another interrupt could cause a crash due
1449 * to the QTD and QH state.
1450 */
1451 dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1452
1453 /*
1454 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1455 * even if the channel was already halted for some other
1456 * reason
1457 */
1458 chan->halt_status = halt_status;
1459
1460 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1461 if (!(hcchar & HCCHAR_CHENA)) {
1462 /*
1463 * The channel is either already halted or it hasn't
1464 * started yet. In DMA mode, the transfer may halt if
1465 * it finishes normally or a condition occurs that
1466 * requires driver intervention. Don't want to halt
1467 * the channel again. In either Slave or DMA mode,
1468 * it's possible that the transfer has been assigned
1469 * to a channel, but not started yet when an URB is
1470 * dequeued. Don't want to halt a channel that hasn't
1471 * started yet.
1472 */
1473 return;
1474 }
1475 }
1476 if (chan->halt_pending) {
1477 /*
1478 * A halt has already been issued for this channel. This might
1479 * happen when a transfer is aborted by a higher level in
1480 * the stack.
1481 */
1482 dev_vdbg(hsotg->dev,
1483 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1484 __func__, chan->hc_num);
1485 return;
1486 }
1487
1488 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1489
1490 /* No need to set the bit in DDMA for disabling the channel */
1491 /* TODO check it everywhere channel is disabled */
1492 if (hsotg->core_params->dma_desc_enable <= 0) {
1493 if (dbg_hc(chan))
1494 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1495 hcchar |= HCCHAR_CHENA;
1496 } else {
1497 if (dbg_hc(chan))
1498 dev_dbg(hsotg->dev, "desc DMA enabled\n");
1499 }
1500 hcchar |= HCCHAR_CHDIS;
1501
1502 if (hsotg->core_params->dma_enable <= 0) {
1503 if (dbg_hc(chan))
1504 dev_vdbg(hsotg->dev, "DMA not enabled\n");
1505 hcchar |= HCCHAR_CHENA;
1506
1507 /* Check for space in the request queue to issue the halt */
1508 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1509 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1510 dev_vdbg(hsotg->dev, "control/bulk\n");
1511 nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
1512 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1513 dev_vdbg(hsotg->dev, "Disabling channel\n");
1514 hcchar &= ~HCCHAR_CHENA;
1515 }
1516 } else {
1517 if (dbg_perio())
1518 dev_vdbg(hsotg->dev, "isoc/intr\n");
1519 hptxsts = dwc2_readl(hsotg->regs + HPTXSTS);
1520 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1521 hsotg->queuing_high_bandwidth) {
1522 if (dbg_perio())
1523 dev_vdbg(hsotg->dev, "Disabling channel\n");
1524 hcchar &= ~HCCHAR_CHENA;
1525 }
1526 }
1527 } else {
1528 if (dbg_hc(chan))
1529 dev_vdbg(hsotg->dev, "DMA enabled\n");
1530 }
1531
1532 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1533 chan->halt_status = halt_status;
1534
1535 if (hcchar & HCCHAR_CHENA) {
1536 if (dbg_hc(chan))
1537 dev_vdbg(hsotg->dev, "Channel enabled\n");
1538 chan->halt_pending = 1;
1539 chan->halt_on_queue = 0;
1540 } else {
1541 if (dbg_hc(chan))
1542 dev_vdbg(hsotg->dev, "Channel disabled\n");
1543 chan->halt_on_queue = 1;
1544 }
1545
1546 if (dbg_hc(chan)) {
1547 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1548 chan->hc_num);
1549 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1550 hcchar);
1551 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1552 chan->halt_pending);
1553 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1554 chan->halt_on_queue);
1555 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1556 chan->halt_status);
1557 }
1558 }
1559
1560 /**
1561 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1562 *
1563 * @hsotg: Programming view of DWC_otg controller
1564 * @chan: Identifies the host channel to clean up
1565 *
1566 * This function is normally called after a transfer is done and the host
1567 * channel is being released
1568 */
1569 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1570 {
1571 u32 hcintmsk;
1572
1573 chan->xfer_started = 0;
1574
1575 /*
1576 * Clear channel interrupt enables and any unhandled channel interrupt
1577 * conditions
1578 */
1579 dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1580 hcintmsk = 0xffffffff;
1581 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1582 dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1583 }
1584
1585 /**
1586 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1587 * which frame a periodic transfer should occur
1588 *
1589 * @hsotg: Programming view of DWC_otg controller
1590 * @chan: Identifies the host channel to set up and its properties
1591 * @hcchar: Current value of the HCCHAR register for the specified host channel
1592 *
1593 * This function has no effect on non-periodic transfers
1594 */
1595 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1596 struct dwc2_host_chan *chan, u32 *hcchar)
1597 {
1598 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1599 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1600 /* 1 if _next_ frame is odd, 0 if it's even */
1601 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1602 *hcchar |= HCCHAR_ODDFRM;
1603 }
1604 }
1605
1606 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1607 {
1608 /* Set up the initial PID for the transfer */
1609 if (chan->speed == USB_SPEED_HIGH) {
1610 if (chan->ep_is_in) {
1611 if (chan->multi_count == 1)
1612 chan->data_pid_start = DWC2_HC_PID_DATA0;
1613 else if (chan->multi_count == 2)
1614 chan->data_pid_start = DWC2_HC_PID_DATA1;
1615 else
1616 chan->data_pid_start = DWC2_HC_PID_DATA2;
1617 } else {
1618 if (chan->multi_count == 1)
1619 chan->data_pid_start = DWC2_HC_PID_DATA0;
1620 else
1621 chan->data_pid_start = DWC2_HC_PID_MDATA;
1622 }
1623 } else {
1624 chan->data_pid_start = DWC2_HC_PID_DATA0;
1625 }
1626 }
1627
1628 /**
1629 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1630 * the Host Channel
1631 *
1632 * @hsotg: Programming view of DWC_otg controller
1633 * @chan: Information needed to initialize the host channel
1634 *
1635 * This function should only be called in Slave mode. For a channel associated
1636 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1637 * associated with a periodic EP, the periodic Tx FIFO is written.
1638 *
1639 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1640 * the number of bytes written to the Tx FIFO.
1641 */
1642 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1643 struct dwc2_host_chan *chan)
1644 {
1645 u32 i;
1646 u32 remaining_count;
1647 u32 byte_count;
1648 u32 dword_count;
1649 u32 __iomem *data_fifo;
1650 u32 *data_buf = (u32 *)chan->xfer_buf;
1651
1652 if (dbg_hc(chan))
1653 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1654
1655 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1656
1657 remaining_count = chan->xfer_len - chan->xfer_count;
1658 if (remaining_count > chan->max_packet)
1659 byte_count = chan->max_packet;
1660 else
1661 byte_count = remaining_count;
1662
1663 dword_count = (byte_count + 3) / 4;
1664
1665 if (((unsigned long)data_buf & 0x3) == 0) {
1666 /* xfer_buf is DWORD aligned */
1667 for (i = 0; i < dword_count; i++, data_buf++)
1668 dwc2_writel(*data_buf, data_fifo);
1669 } else {
1670 /* xfer_buf is not DWORD aligned */
1671 for (i = 0; i < dword_count; i++, data_buf++) {
1672 u32 data = data_buf[0] | data_buf[1] << 8 |
1673 data_buf[2] << 16 | data_buf[3] << 24;
1674 dwc2_writel(data, data_fifo);
1675 }
1676 }
1677
1678 chan->xfer_count += byte_count;
1679 chan->xfer_buf += byte_count;
1680 }
1681
1682 /**
1683 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1684 * channel and starts the transfer
1685 *
1686 * @hsotg: Programming view of DWC_otg controller
1687 * @chan: Information needed to initialize the host channel. The xfer_len value
1688 * may be reduced to accommodate the max widths of the XferSize and
1689 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1690 * changed to reflect the final xfer_len value.
1691 *
1692 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1693 * the caller must ensure that there is sufficient space in the request queue
1694 * and Tx Data FIFO.
1695 *
1696 * For an OUT transfer in Slave mode, it loads a data packet into the
1697 * appropriate FIFO. If necessary, additional data packets are loaded in the
1698 * Host ISR.
1699 *
1700 * For an IN transfer in Slave mode, a data packet is requested. The data
1701 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1702 * additional data packets are requested in the Host ISR.
1703 *
1704 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1705 * register along with a packet count of 1 and the channel is enabled. This
1706 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1707 * simply set to 0 since no data transfer occurs in this case.
1708 *
1709 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1710 * all the information required to perform the subsequent data transfer. In
1711 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1712 * controller performs the entire PING protocol, then starts the data
1713 * transfer.
1714 */
1715 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1716 struct dwc2_host_chan *chan)
1717 {
1718 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1719 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1720 u32 hcchar;
1721 u32 hctsiz = 0;
1722 u16 num_packets;
1723 u32 ec_mc;
1724
1725 if (dbg_hc(chan))
1726 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1727
1728 if (chan->do_ping) {
1729 if (hsotg->core_params->dma_enable <= 0) {
1730 if (dbg_hc(chan))
1731 dev_vdbg(hsotg->dev, "ping, no DMA\n");
1732 dwc2_hc_do_ping(hsotg, chan);
1733 chan->xfer_started = 1;
1734 return;
1735 } else {
1736 if (dbg_hc(chan))
1737 dev_vdbg(hsotg->dev, "ping, DMA\n");
1738 hctsiz |= TSIZ_DOPNG;
1739 }
1740 }
1741
1742 if (chan->do_split) {
1743 if (dbg_hc(chan))
1744 dev_vdbg(hsotg->dev, "split\n");
1745 num_packets = 1;
1746
1747 if (chan->complete_split && !chan->ep_is_in)
1748 /*
1749 * For CSPLIT OUT Transfer, set the size to 0 so the
1750 * core doesn't expect any data written to the FIFO
1751 */
1752 chan->xfer_len = 0;
1753 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1754 chan->xfer_len = chan->max_packet;
1755 else if (!chan->ep_is_in && chan->xfer_len > 188)
1756 chan->xfer_len = 188;
1757
1758 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1759 TSIZ_XFERSIZE_MASK;
1760
1761 /* For split set ec_mc for immediate retries */
1762 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1763 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1764 ec_mc = 3;
1765 else
1766 ec_mc = 1;
1767 } else {
1768 if (dbg_hc(chan))
1769 dev_vdbg(hsotg->dev, "no split\n");
1770 /*
1771 * Ensure that the transfer length and packet count will fit
1772 * in the widths allocated for them in the HCTSIZn register
1773 */
1774 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1775 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1776 /*
1777 * Make sure the transfer size is no larger than one
1778 * (micro)frame's worth of data. (A check was done
1779 * when the periodic transfer was accepted to ensure
1780 * that a (micro)frame's worth of data can be
1781 * programmed into a channel.)
1782 */
1783 u32 max_periodic_len =
1784 chan->multi_count * chan->max_packet;
1785
1786 if (chan->xfer_len > max_periodic_len)
1787 chan->xfer_len = max_periodic_len;
1788 } else if (chan->xfer_len > max_hc_xfer_size) {
1789 /*
1790 * Make sure that xfer_len is a multiple of max packet
1791 * size
1792 */
1793 chan->xfer_len =
1794 max_hc_xfer_size - chan->max_packet + 1;
1795 }
1796
1797 if (chan->xfer_len > 0) {
1798 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1799 chan->max_packet;
1800 if (num_packets > max_hc_pkt_count) {
1801 num_packets = max_hc_pkt_count;
1802 chan->xfer_len = num_packets * chan->max_packet;
1803 }
1804 } else {
1805 /* Need 1 packet for transfer length of 0 */
1806 num_packets = 1;
1807 }
1808
1809 if (chan->ep_is_in)
1810 /*
1811 * Always program an integral # of max packets for IN
1812 * transfers
1813 */
1814 chan->xfer_len = num_packets * chan->max_packet;
1815
1816 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1817 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1818 /*
1819 * Make sure that the multi_count field matches the
1820 * actual transfer length
1821 */
1822 chan->multi_count = num_packets;
1823
1824 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1825 dwc2_set_pid_isoc(chan);
1826
1827 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1828 TSIZ_XFERSIZE_MASK;
1829
1830 /* The ec_mc gets the multi_count for non-split */
1831 ec_mc = chan->multi_count;
1832 }
1833
1834 chan->start_pkt_count = num_packets;
1835 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1836 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1837 TSIZ_SC_MC_PID_MASK;
1838 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1839 if (dbg_hc(chan)) {
1840 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1841 hctsiz, chan->hc_num);
1842
1843 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1844 chan->hc_num);
1845 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1846 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1847 TSIZ_XFERSIZE_SHIFT);
1848 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1849 (hctsiz & TSIZ_PKTCNT_MASK) >>
1850 TSIZ_PKTCNT_SHIFT);
1851 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1852 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1853 TSIZ_SC_MC_PID_SHIFT);
1854 }
1855
1856 if (hsotg->core_params->dma_enable > 0) {
1857 dma_addr_t dma_addr;
1858
1859 if (chan->align_buf) {
1860 if (dbg_hc(chan))
1861 dev_vdbg(hsotg->dev, "align_buf\n");
1862 dma_addr = chan->align_buf;
1863 } else {
1864 dma_addr = chan->xfer_dma;
1865 }
1866 dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
1867 if (dbg_hc(chan))
1868 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1869 (unsigned long)dma_addr, chan->hc_num);
1870 }
1871
1872 /* Start the split */
1873 if (chan->do_split) {
1874 u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
1875
1876 hcsplt |= HCSPLT_SPLTENA;
1877 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1878 }
1879
1880 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1881 hcchar &= ~HCCHAR_MULTICNT_MASK;
1882 hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
1883 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1884
1885 if (hcchar & HCCHAR_CHDIS)
1886 dev_warn(hsotg->dev,
1887 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1888 __func__, chan->hc_num, hcchar);
1889
1890 /* Set host channel enable after all other setup is complete */
1891 hcchar |= HCCHAR_CHENA;
1892 hcchar &= ~HCCHAR_CHDIS;
1893
1894 if (dbg_hc(chan))
1895 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1896 (hcchar & HCCHAR_MULTICNT_MASK) >>
1897 HCCHAR_MULTICNT_SHIFT);
1898
1899 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1900 if (dbg_hc(chan))
1901 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1902 chan->hc_num);
1903
1904 chan->xfer_started = 1;
1905 chan->requests++;
1906
1907 if (hsotg->core_params->dma_enable <= 0 &&
1908 !chan->ep_is_in && chan->xfer_len > 0)
1909 /* Load OUT packet into the appropriate Tx FIFO */
1910 dwc2_hc_write_packet(hsotg, chan);
1911 }
1912
1913 /**
1914 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1915 * host channel and starts the transfer in Descriptor DMA mode
1916 *
1917 * @hsotg: Programming view of DWC_otg controller
1918 * @chan: Information needed to initialize the host channel
1919 *
1920 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1921 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1922 * with micro-frame bitmap.
1923 *
1924 * Initializes HCDMA register with descriptor list address and CTD value then
1925 * starts the transfer via enabling the channel.
1926 */
1927 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1928 struct dwc2_host_chan *chan)
1929 {
1930 u32 hcchar;
1931 u32 hctsiz = 0;
1932
1933 if (chan->do_ping)
1934 hctsiz |= TSIZ_DOPNG;
1935
1936 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1937 dwc2_set_pid_isoc(chan);
1938
1939 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1940 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1941 TSIZ_SC_MC_PID_MASK;
1942
1943 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1944 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1945
1946 /* Non-zero only for high-speed interrupt endpoints */
1947 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1948
1949 if (dbg_hc(chan)) {
1950 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1951 chan->hc_num);
1952 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1953 chan->data_pid_start);
1954 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1955 }
1956
1957 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1958
1959 dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
1960 chan->desc_list_sz, DMA_TO_DEVICE);
1961
1962 dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num));
1963
1964 if (dbg_hc(chan))
1965 dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
1966 &chan->desc_list_addr, chan->hc_num);
1967
1968 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1969 hcchar &= ~HCCHAR_MULTICNT_MASK;
1970 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1971 HCCHAR_MULTICNT_MASK;
1972
1973 if (hcchar & HCCHAR_CHDIS)
1974 dev_warn(hsotg->dev,
1975 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1976 __func__, chan->hc_num, hcchar);
1977
1978 /* Set host channel enable after all other setup is complete */
1979 hcchar |= HCCHAR_CHENA;
1980 hcchar &= ~HCCHAR_CHDIS;
1981
1982 if (dbg_hc(chan))
1983 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1984 (hcchar & HCCHAR_MULTICNT_MASK) >>
1985 HCCHAR_MULTICNT_SHIFT);
1986
1987 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1988 if (dbg_hc(chan))
1989 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1990 chan->hc_num);
1991
1992 chan->xfer_started = 1;
1993 chan->requests++;
1994 }
1995
1996 /**
1997 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1998 * a previous call to dwc2_hc_start_transfer()
1999 *
2000 * @hsotg: Programming view of DWC_otg controller
2001 * @chan: Information needed to initialize the host channel
2002 *
2003 * The caller must ensure there is sufficient space in the request queue and Tx
2004 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
2005 * the controller acts autonomously to complete transfers programmed to a host
2006 * channel.
2007 *
2008 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
2009 * if there is any data remaining to be queued. For an IN transfer, another
2010 * data packet is always requested. For the SETUP phase of a control transfer,
2011 * this function does nothing.
2012 *
2013 * Return: 1 if a new request is queued, 0 if no more requests are required
2014 * for this transfer
2015 */
2016 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
2017 struct dwc2_host_chan *chan)
2018 {
2019 if (dbg_hc(chan))
2020 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2021 chan->hc_num);
2022
2023 if (chan->do_split)
2024 /* SPLITs always queue just once per channel */
2025 return 0;
2026
2027 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
2028 /* SETUPs are queued only once since they can't be NAK'd */
2029 return 0;
2030
2031 if (chan->ep_is_in) {
2032 /*
2033 * Always queue another request for other IN transfers. If
2034 * back-to-back INs are issued and NAKs are received for both,
2035 * the driver may still be processing the first NAK when the
2036 * second NAK is received. When the interrupt handler clears
2037 * the NAK interrupt for the first NAK, the second NAK will
2038 * not be seen. So we can't depend on the NAK interrupt
2039 * handler to requeue a NAK'd request. Instead, IN requests
2040 * are issued each time this function is called. When the
2041 * transfer completes, the extra requests for the channel will
2042 * be flushed.
2043 */
2044 u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
2045
2046 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
2047 hcchar |= HCCHAR_CHENA;
2048 hcchar &= ~HCCHAR_CHDIS;
2049 if (dbg_hc(chan))
2050 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
2051 hcchar);
2052 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
2053 chan->requests++;
2054 return 1;
2055 }
2056
2057 /* OUT transfers */
2058
2059 if (chan->xfer_count < chan->xfer_len) {
2060 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2061 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
2062 u32 hcchar = dwc2_readl(hsotg->regs +
2063 HCCHAR(chan->hc_num));
2064
2065 dwc2_hc_set_even_odd_frame(hsotg, chan,
2066 &hcchar);
2067 }
2068
2069 /* Load OUT packet into the appropriate Tx FIFO */
2070 dwc2_hc_write_packet(hsotg, chan);
2071 chan->requests++;
2072 return 1;
2073 }
2074
2075 return 0;
2076 }
2077
2078 /**
2079 * dwc2_hc_do_ping() - Starts a PING transfer
2080 *
2081 * @hsotg: Programming view of DWC_otg controller
2082 * @chan: Information needed to initialize the host channel
2083 *
2084 * This function should only be called in Slave mode. The Do Ping bit is set in
2085 * the HCTSIZ register, then the channel is enabled.
2086 */
2087 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
2088 {
2089 u32 hcchar;
2090 u32 hctsiz;
2091
2092 if (dbg_hc(chan))
2093 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2094 chan->hc_num);
2095
2096
2097 hctsiz = TSIZ_DOPNG;
2098 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
2099 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
2100
2101 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
2102 hcchar |= HCCHAR_CHENA;
2103 hcchar &= ~HCCHAR_CHDIS;
2104 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
2105 }
2106
2107 /**
2108 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
2109 * the HFIR register according to PHY type and speed
2110 *
2111 * @hsotg: Programming view of DWC_otg controller
2112 *
2113 * NOTE: The caller can modify the value of the HFIR register only after the
2114 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
2115 * has been set
2116 */
2117 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
2118 {
2119 u32 usbcfg;
2120 u32 hprt0;
2121 int clock = 60; /* default value */
2122
2123 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
2124 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
2125
2126 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
2127 !(usbcfg & GUSBCFG_PHYIF16))
2128 clock = 60;
2129 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
2130 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
2131 clock = 48;
2132 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2133 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2134 clock = 30;
2135 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2136 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
2137 clock = 60;
2138 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2139 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2140 clock = 48;
2141 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
2142 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
2143 clock = 48;
2144 if ((usbcfg & GUSBCFG_PHYSEL) &&
2145 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2146 clock = 48;
2147
2148 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
2149 /* High speed case */
2150 return 125 * clock;
2151 else
2152 /* FS/LS case */
2153 return 1000 * clock;
2154 }
2155
2156 /**
2157 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
2158 * buffer
2159 *
2160 * @core_if: Programming view of DWC_otg controller
2161 * @dest: Destination buffer for the packet
2162 * @bytes: Number of bytes to copy to the destination
2163 */
2164 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
2165 {
2166 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
2167 u32 *data_buf = (u32 *)dest;
2168 int word_count = (bytes + 3) / 4;
2169 int i;
2170
2171 /*
2172 * Todo: Account for the case where dest is not dword aligned. This
2173 * requires reading data from the FIFO into a u32 temp buffer, then
2174 * moving it into the data buffer.
2175 */
2176
2177 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
2178
2179 for (i = 0; i < word_count; i++, data_buf++)
2180 *data_buf = dwc2_readl(fifo);
2181 }
2182
2183 /**
2184 * dwc2_dump_host_registers() - Prints the host registers
2185 *
2186 * @hsotg: Programming view of DWC_otg controller
2187 *
2188 * NOTE: This function will be removed once the peripheral controller code
2189 * is integrated and the driver is stable
2190 */
2191 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
2192 {
2193 #ifdef DEBUG
2194 u32 __iomem *addr;
2195 int i;
2196
2197 dev_dbg(hsotg->dev, "Host Global Registers\n");
2198 addr = hsotg->regs + HCFG;
2199 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
2200 (unsigned long)addr, dwc2_readl(addr));
2201 addr = hsotg->regs + HFIR;
2202 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
2203 (unsigned long)addr, dwc2_readl(addr));
2204 addr = hsotg->regs + HFNUM;
2205 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
2206 (unsigned long)addr, dwc2_readl(addr));
2207 addr = hsotg->regs + HPTXSTS;
2208 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
2209 (unsigned long)addr, dwc2_readl(addr));
2210 addr = hsotg->regs + HAINT;
2211 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
2212 (unsigned long)addr, dwc2_readl(addr));
2213 addr = hsotg->regs + HAINTMSK;
2214 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
2215 (unsigned long)addr, dwc2_readl(addr));
2216 if (hsotg->core_params->dma_desc_enable > 0) {
2217 addr = hsotg->regs + HFLBADDR;
2218 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
2219 (unsigned long)addr, dwc2_readl(addr));
2220 }
2221
2222 addr = hsotg->regs + HPRT0;
2223 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
2224 (unsigned long)addr, dwc2_readl(addr));
2225
2226 for (i = 0; i < hsotg->core_params->host_channels; i++) {
2227 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
2228 addr = hsotg->regs + HCCHAR(i);
2229 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
2230 (unsigned long)addr, dwc2_readl(addr));
2231 addr = hsotg->regs + HCSPLT(i);
2232 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
2233 (unsigned long)addr, dwc2_readl(addr));
2234 addr = hsotg->regs + HCINT(i);
2235 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
2236 (unsigned long)addr, dwc2_readl(addr));
2237 addr = hsotg->regs + HCINTMSK(i);
2238 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
2239 (unsigned long)addr, dwc2_readl(addr));
2240 addr = hsotg->regs + HCTSIZ(i);
2241 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
2242 (unsigned long)addr, dwc2_readl(addr));
2243 addr = hsotg->regs + HCDMA(i);
2244 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
2245 (unsigned long)addr, dwc2_readl(addr));
2246 if (hsotg->core_params->dma_desc_enable > 0) {
2247 addr = hsotg->regs + HCDMAB(i);
2248 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
2249 (unsigned long)addr, dwc2_readl(addr));
2250 }
2251 }
2252 #endif
2253 }
2254
2255 /**
2256 * dwc2_dump_global_registers() - Prints the core global registers
2257 *
2258 * @hsotg: Programming view of DWC_otg controller
2259 *
2260 * NOTE: This function will be removed once the peripheral controller code
2261 * is integrated and the driver is stable
2262 */
2263 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
2264 {
2265 #ifdef DEBUG
2266 u32 __iomem *addr;
2267
2268 dev_dbg(hsotg->dev, "Core Global Registers\n");
2269 addr = hsotg->regs + GOTGCTL;
2270 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
2271 (unsigned long)addr, dwc2_readl(addr));
2272 addr = hsotg->regs + GOTGINT;
2273 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
2274 (unsigned long)addr, dwc2_readl(addr));
2275 addr = hsotg->regs + GAHBCFG;
2276 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
2277 (unsigned long)addr, dwc2_readl(addr));
2278 addr = hsotg->regs + GUSBCFG;
2279 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
2280 (unsigned long)addr, dwc2_readl(addr));
2281 addr = hsotg->regs + GRSTCTL;
2282 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
2283 (unsigned long)addr, dwc2_readl(addr));
2284 addr = hsotg->regs + GINTSTS;
2285 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
2286 (unsigned long)addr, dwc2_readl(addr));
2287 addr = hsotg->regs + GINTMSK;
2288 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
2289 (unsigned long)addr, dwc2_readl(addr));
2290 addr = hsotg->regs + GRXSTSR;
2291 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
2292 (unsigned long)addr, dwc2_readl(addr));
2293 addr = hsotg->regs + GRXFSIZ;
2294 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
2295 (unsigned long)addr, dwc2_readl(addr));
2296 addr = hsotg->regs + GNPTXFSIZ;
2297 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
2298 (unsigned long)addr, dwc2_readl(addr));
2299 addr = hsotg->regs + GNPTXSTS;
2300 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
2301 (unsigned long)addr, dwc2_readl(addr));
2302 addr = hsotg->regs + GI2CCTL;
2303 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
2304 (unsigned long)addr, dwc2_readl(addr));
2305 addr = hsotg->regs + GPVNDCTL;
2306 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
2307 (unsigned long)addr, dwc2_readl(addr));
2308 addr = hsotg->regs + GGPIO;
2309 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
2310 (unsigned long)addr, dwc2_readl(addr));
2311 addr = hsotg->regs + GUID;
2312 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
2313 (unsigned long)addr, dwc2_readl(addr));
2314 addr = hsotg->regs + GSNPSID;
2315 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
2316 (unsigned long)addr, dwc2_readl(addr));
2317 addr = hsotg->regs + GHWCFG1;
2318 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
2319 (unsigned long)addr, dwc2_readl(addr));
2320 addr = hsotg->regs + GHWCFG2;
2321 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
2322 (unsigned long)addr, dwc2_readl(addr));
2323 addr = hsotg->regs + GHWCFG3;
2324 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
2325 (unsigned long)addr, dwc2_readl(addr));
2326 addr = hsotg->regs + GHWCFG4;
2327 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
2328 (unsigned long)addr, dwc2_readl(addr));
2329 addr = hsotg->regs + GLPMCFG;
2330 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
2331 (unsigned long)addr, dwc2_readl(addr));
2332 addr = hsotg->regs + GPWRDN;
2333 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
2334 (unsigned long)addr, dwc2_readl(addr));
2335 addr = hsotg->regs + GDFIFOCFG;
2336 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
2337 (unsigned long)addr, dwc2_readl(addr));
2338 addr = hsotg->regs + HPTXFSIZ;
2339 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
2340 (unsigned long)addr, dwc2_readl(addr));
2341
2342 addr = hsotg->regs + PCGCTL;
2343 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
2344 (unsigned long)addr, dwc2_readl(addr));
2345 #endif
2346 }
2347
2348 /**
2349 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
2350 *
2351 * @hsotg: Programming view of DWC_otg controller
2352 * @num: Tx FIFO to flush
2353 */
2354 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
2355 {
2356 u32 greset;
2357 int count = 0;
2358
2359 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
2360
2361 greset = GRSTCTL_TXFFLSH;
2362 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
2363 dwc2_writel(greset, hsotg->regs + GRSTCTL);
2364
2365 do {
2366 greset = dwc2_readl(hsotg->regs + GRSTCTL);
2367 if (++count > 10000) {
2368 dev_warn(hsotg->dev,
2369 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
2370 __func__, greset,
2371 dwc2_readl(hsotg->regs + GNPTXSTS));
2372 break;
2373 }
2374 udelay(1);
2375 } while (greset & GRSTCTL_TXFFLSH);
2376
2377 /* Wait for at least 3 PHY Clocks */
2378 udelay(1);
2379 }
2380
2381 /**
2382 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
2383 *
2384 * @hsotg: Programming view of DWC_otg controller
2385 */
2386 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
2387 {
2388 u32 greset;
2389 int count = 0;
2390
2391 dev_vdbg(hsotg->dev, "%s()\n", __func__);
2392
2393 greset = GRSTCTL_RXFFLSH;
2394 dwc2_writel(greset, hsotg->regs + GRSTCTL);
2395
2396 do {
2397 greset = dwc2_readl(hsotg->regs + GRSTCTL);
2398 if (++count > 10000) {
2399 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
2400 __func__, greset);
2401 break;
2402 }
2403 udelay(1);
2404 } while (greset & GRSTCTL_RXFFLSH);
2405
2406 /* Wait for at least 3 PHY Clocks */
2407 udelay(1);
2408 }
2409
2410 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
2411
2412 /* Parameter access functions */
2413 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
2414 {
2415 int valid = 1;
2416
2417 switch (val) {
2418 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
2419 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
2420 valid = 0;
2421 break;
2422 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
2423 switch (hsotg->hw_params.op_mode) {
2424 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2425 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2426 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2427 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2428 break;
2429 default:
2430 valid = 0;
2431 break;
2432 }
2433 break;
2434 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
2435 /* always valid */
2436 break;
2437 default:
2438 valid = 0;
2439 break;
2440 }
2441
2442 if (!valid) {
2443 if (val >= 0)
2444 dev_err(hsotg->dev,
2445 "%d invalid for otg_cap parameter. Check HW configuration.\n",
2446 val);
2447 switch (hsotg->hw_params.op_mode) {
2448 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2449 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2450 break;
2451 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2452 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2453 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2454 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2455 break;
2456 default:
2457 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2458 break;
2459 }
2460 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2461 }
2462
2463 hsotg->core_params->otg_cap = val;
2464 }
2465
2466 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2467 {
2468 int valid = 1;
2469
2470 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
2471 valid = 0;
2472 if (val < 0)
2473 valid = 0;
2474
2475 if (!valid) {
2476 if (val >= 0)
2477 dev_err(hsotg->dev,
2478 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2479 val);
2480 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
2481 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2482 }
2483
2484 hsotg->core_params->dma_enable = val;
2485 }
2486
2487 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2488 {
2489 int valid = 1;
2490
2491 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2492 !hsotg->hw_params.dma_desc_enable))
2493 valid = 0;
2494 if (val < 0)
2495 valid = 0;
2496
2497 if (!valid) {
2498 if (val >= 0)
2499 dev_err(hsotg->dev,
2500 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2501 val);
2502 val = (hsotg->core_params->dma_enable > 0 &&
2503 hsotg->hw_params.dma_desc_enable);
2504 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2505 }
2506
2507 hsotg->core_params->dma_desc_enable = val;
2508 }
2509
2510 void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
2511 {
2512 int valid = 1;
2513
2514 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2515 !hsotg->hw_params.dma_desc_enable))
2516 valid = 0;
2517 if (val < 0)
2518 valid = 0;
2519
2520 if (!valid) {
2521 if (val >= 0)
2522 dev_err(hsotg->dev,
2523 "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
2524 val);
2525 val = (hsotg->core_params->dma_enable > 0 &&
2526 hsotg->hw_params.dma_desc_enable);
2527 }
2528
2529 hsotg->core_params->dma_desc_fs_enable = val;
2530 dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
2531 }
2532
2533 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2534 int val)
2535 {
2536 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2537 if (val >= 0) {
2538 dev_err(hsotg->dev,
2539 "Wrong value for host_support_fs_low_power\n");
2540 dev_err(hsotg->dev,
2541 "host_support_fs_low_power must be 0 or 1\n");
2542 }
2543 val = 0;
2544 dev_dbg(hsotg->dev,
2545 "Setting host_support_fs_low_power to %d\n", val);
2546 }
2547
2548 hsotg->core_params->host_support_fs_ls_low_power = val;
2549 }
2550
2551 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2552 {
2553 int valid = 1;
2554
2555 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
2556 valid = 0;
2557 if (val < 0)
2558 valid = 0;
2559
2560 if (!valid) {
2561 if (val >= 0)
2562 dev_err(hsotg->dev,
2563 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2564 val);
2565 val = hsotg->hw_params.enable_dynamic_fifo;
2566 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2567 }
2568
2569 hsotg->core_params->enable_dynamic_fifo = val;
2570 }
2571
2572 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2573 {
2574 int valid = 1;
2575
2576 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
2577 valid = 0;
2578
2579 if (!valid) {
2580 if (val >= 0)
2581 dev_err(hsotg->dev,
2582 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2583 val);
2584 val = hsotg->hw_params.host_rx_fifo_size;
2585 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2586 }
2587
2588 hsotg->core_params->host_rx_fifo_size = val;
2589 }
2590
2591 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2592 {
2593 int valid = 1;
2594
2595 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
2596 valid = 0;
2597
2598 if (!valid) {
2599 if (val >= 0)
2600 dev_err(hsotg->dev,
2601 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2602 val);
2603 val = hsotg->hw_params.host_nperio_tx_fifo_size;
2604 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2605 val);
2606 }
2607
2608 hsotg->core_params->host_nperio_tx_fifo_size = val;
2609 }
2610
2611 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2612 {
2613 int valid = 1;
2614
2615 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
2616 valid = 0;
2617
2618 if (!valid) {
2619 if (val >= 0)
2620 dev_err(hsotg->dev,
2621 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2622 val);
2623 val = hsotg->hw_params.host_perio_tx_fifo_size;
2624 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2625 val);
2626 }
2627
2628 hsotg->core_params->host_perio_tx_fifo_size = val;
2629 }
2630
2631 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2632 {
2633 int valid = 1;
2634
2635 if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
2636 valid = 0;
2637
2638 if (!valid) {
2639 if (val >= 0)
2640 dev_err(hsotg->dev,
2641 "%d invalid for max_transfer_size. Check HW configuration.\n",
2642 val);
2643 val = hsotg->hw_params.max_transfer_size;
2644 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2645 }
2646
2647 hsotg->core_params->max_transfer_size = val;
2648 }
2649
2650 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2651 {
2652 int valid = 1;
2653
2654 if (val < 15 || val > hsotg->hw_params.max_packet_count)
2655 valid = 0;
2656
2657 if (!valid) {
2658 if (val >= 0)
2659 dev_err(hsotg->dev,
2660 "%d invalid for max_packet_count. Check HW configuration.\n",
2661 val);
2662 val = hsotg->hw_params.max_packet_count;
2663 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2664 }
2665
2666 hsotg->core_params->max_packet_count = val;
2667 }
2668
2669 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2670 {
2671 int valid = 1;
2672
2673 if (val < 1 || val > hsotg->hw_params.host_channels)
2674 valid = 0;
2675
2676 if (!valid) {
2677 if (val >= 0)
2678 dev_err(hsotg->dev,
2679 "%d invalid for host_channels. Check HW configuration.\n",
2680 val);
2681 val = hsotg->hw_params.host_channels;
2682 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2683 }
2684
2685 hsotg->core_params->host_channels = val;
2686 }
2687
2688 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2689 {
2690 int valid = 0;
2691 u32 hs_phy_type, fs_phy_type;
2692
2693 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2694 DWC2_PHY_TYPE_PARAM_ULPI)) {
2695 if (val >= 0) {
2696 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2697 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2698 }
2699
2700 valid = 0;
2701 }
2702
2703 hs_phy_type = hsotg->hw_params.hs_phy_type;
2704 fs_phy_type = hsotg->hw_params.fs_phy_type;
2705 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2706 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2707 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2708 valid = 1;
2709 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2710 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2711 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2712 valid = 1;
2713 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2714 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2715 valid = 1;
2716
2717 if (!valid) {
2718 if (val >= 0)
2719 dev_err(hsotg->dev,
2720 "%d invalid for phy_type. Check HW configuration.\n",
2721 val);
2722 val = DWC2_PHY_TYPE_PARAM_FS;
2723 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2724 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2725 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2726 val = DWC2_PHY_TYPE_PARAM_UTMI;
2727 else
2728 val = DWC2_PHY_TYPE_PARAM_ULPI;
2729 }
2730 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2731 }
2732
2733 hsotg->core_params->phy_type = val;
2734 }
2735
2736 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2737 {
2738 return hsotg->core_params->phy_type;
2739 }
2740
2741 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2742 {
2743 int valid = 1;
2744
2745 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2746 if (val >= 0) {
2747 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2748 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2749 }
2750 valid = 0;
2751 }
2752
2753 if (val == DWC2_SPEED_PARAM_HIGH &&
2754 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2755 valid = 0;
2756
2757 if (!valid) {
2758 if (val >= 0)
2759 dev_err(hsotg->dev,
2760 "%d invalid for speed parameter. Check HW configuration.\n",
2761 val);
2762 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2763 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2764 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2765 }
2766
2767 hsotg->core_params->speed = val;
2768 }
2769
2770 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2771 {
2772 int valid = 1;
2773
2774 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2775 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2776 if (val >= 0) {
2777 dev_err(hsotg->dev,
2778 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2779 dev_err(hsotg->dev,
2780 "host_ls_low_power_phy_clk must be 0 or 1\n");
2781 }
2782 valid = 0;
2783 }
2784
2785 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2786 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2787 valid = 0;
2788
2789 if (!valid) {
2790 if (val >= 0)
2791 dev_err(hsotg->dev,
2792 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2793 val);
2794 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2795 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2796 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2797 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2798 val);
2799 }
2800
2801 hsotg->core_params->host_ls_low_power_phy_clk = val;
2802 }
2803
2804 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2805 {
2806 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2807 if (val >= 0) {
2808 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2809 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2810 }
2811 val = 0;
2812 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2813 }
2814
2815 hsotg->core_params->phy_ulpi_ddr = val;
2816 }
2817
2818 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2819 {
2820 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2821 if (val >= 0) {
2822 dev_err(hsotg->dev,
2823 "Wrong value for phy_ulpi_ext_vbus\n");
2824 dev_err(hsotg->dev,
2825 "phy_ulpi_ext_vbus must be 0 or 1\n");
2826 }
2827 val = 0;
2828 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2829 }
2830
2831 hsotg->core_params->phy_ulpi_ext_vbus = val;
2832 }
2833
2834 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2835 {
2836 int valid = 0;
2837
2838 switch (hsotg->hw_params.utmi_phy_data_width) {
2839 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2840 valid = (val == 8);
2841 break;
2842 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2843 valid = (val == 16);
2844 break;
2845 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2846 valid = (val == 8 || val == 16);
2847 break;
2848 }
2849
2850 if (!valid) {
2851 if (val >= 0) {
2852 dev_err(hsotg->dev,
2853 "%d invalid for phy_utmi_width. Check HW configuration.\n",
2854 val);
2855 }
2856 val = (hsotg->hw_params.utmi_phy_data_width ==
2857 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
2858 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2859 }
2860
2861 hsotg->core_params->phy_utmi_width = val;
2862 }
2863
2864 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2865 {
2866 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2867 if (val >= 0) {
2868 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2869 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2870 }
2871 val = 0;
2872 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2873 }
2874
2875 hsotg->core_params->ulpi_fs_ls = val;
2876 }
2877
2878 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2879 {
2880 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2881 if (val >= 0) {
2882 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2883 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2884 }
2885 val = 0;
2886 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2887 }
2888
2889 hsotg->core_params->ts_dline = val;
2890 }
2891
2892 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2893 {
2894 int valid = 1;
2895
2896 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2897 if (val >= 0) {
2898 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2899 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2900 }
2901
2902 valid = 0;
2903 }
2904
2905 if (val == 1 && !(hsotg->hw_params.i2c_enable))
2906 valid = 0;
2907
2908 if (!valid) {
2909 if (val >= 0)
2910 dev_err(hsotg->dev,
2911 "%d invalid for i2c_enable. Check HW configuration.\n",
2912 val);
2913 val = hsotg->hw_params.i2c_enable;
2914 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2915 }
2916
2917 hsotg->core_params->i2c_enable = val;
2918 }
2919
2920 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2921 {
2922 int valid = 1;
2923
2924 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2925 if (val >= 0) {
2926 dev_err(hsotg->dev,
2927 "Wrong value for en_multiple_tx_fifo,\n");
2928 dev_err(hsotg->dev,
2929 "en_multiple_tx_fifo must be 0 or 1\n");
2930 }
2931 valid = 0;
2932 }
2933
2934 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
2935 valid = 0;
2936
2937 if (!valid) {
2938 if (val >= 0)
2939 dev_err(hsotg->dev,
2940 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2941 val);
2942 val = hsotg->hw_params.en_multiple_tx_fifo;
2943 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2944 }
2945
2946 hsotg->core_params->en_multiple_tx_fifo = val;
2947 }
2948
2949 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2950 {
2951 int valid = 1;
2952
2953 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2954 if (val >= 0) {
2955 dev_err(hsotg->dev,
2956 "'%d' invalid for parameter reload_ctl\n", val);
2957 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2958 }
2959 valid = 0;
2960 }
2961
2962 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
2963 valid = 0;
2964
2965 if (!valid) {
2966 if (val >= 0)
2967 dev_err(hsotg->dev,
2968 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2969 val);
2970 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
2971 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2972 }
2973
2974 hsotg->core_params->reload_ctl = val;
2975 }
2976
2977 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
2978 {
2979 if (val != -1)
2980 hsotg->core_params->ahbcfg = val;
2981 else
2982 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
2983 GAHBCFG_HBSTLEN_SHIFT;
2984 }
2985
2986 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2987 {
2988 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2989 if (val >= 0) {
2990 dev_err(hsotg->dev,
2991 "'%d' invalid for parameter otg_ver\n", val);
2992 dev_err(hsotg->dev,
2993 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2994 }
2995 val = 0;
2996 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2997 }
2998
2999 hsotg->core_params->otg_ver = val;
3000 }
3001
3002 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
3003 {
3004 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3005 if (val >= 0) {
3006 dev_err(hsotg->dev,
3007 "'%d' invalid for parameter uframe_sched\n",
3008 val);
3009 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
3010 }
3011 val = 1;
3012 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
3013 }
3014
3015 hsotg->core_params->uframe_sched = val;
3016 }
3017
3018 static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
3019 int val)
3020 {
3021 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3022 if (val >= 0) {
3023 dev_err(hsotg->dev,
3024 "'%d' invalid for parameter external_id_pin_ctl\n",
3025 val);
3026 dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
3027 }
3028 val = 0;
3029 dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
3030 }
3031
3032 hsotg->core_params->external_id_pin_ctl = val;
3033 }
3034
3035 static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
3036 int val)
3037 {
3038 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3039 if (val >= 0) {
3040 dev_err(hsotg->dev,
3041 "'%d' invalid for parameter hibernation\n",
3042 val);
3043 dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
3044 }
3045 val = 0;
3046 dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
3047 }
3048
3049 hsotg->core_params->hibernation = val;
3050 }
3051
3052 /*
3053 * This function is called during module intialization to pass module parameters
3054 * for the DWC_otg core.
3055 */
3056 void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
3057 const struct dwc2_core_params *params)
3058 {
3059 dev_dbg(hsotg->dev, "%s()\n", __func__);
3060
3061 dwc2_set_param_otg_cap(hsotg, params->otg_cap);
3062 dwc2_set_param_dma_enable(hsotg, params->dma_enable);
3063 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
3064 dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
3065 dwc2_set_param_host_support_fs_ls_low_power(hsotg,
3066 params->host_support_fs_ls_low_power);
3067 dwc2_set_param_enable_dynamic_fifo(hsotg,
3068 params->enable_dynamic_fifo);
3069 dwc2_set_param_host_rx_fifo_size(hsotg,
3070 params->host_rx_fifo_size);
3071 dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
3072 params->host_nperio_tx_fifo_size);
3073 dwc2_set_param_host_perio_tx_fifo_size(hsotg,
3074 params->host_perio_tx_fifo_size);
3075 dwc2_set_param_max_transfer_size(hsotg,
3076 params->max_transfer_size);
3077 dwc2_set_param_max_packet_count(hsotg,
3078 params->max_packet_count);
3079 dwc2_set_param_host_channels(hsotg, params->host_channels);
3080 dwc2_set_param_phy_type(hsotg, params->phy_type);
3081 dwc2_set_param_speed(hsotg, params->speed);
3082 dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
3083 params->host_ls_low_power_phy_clk);
3084 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
3085 dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
3086 params->phy_ulpi_ext_vbus);
3087 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
3088 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
3089 dwc2_set_param_ts_dline(hsotg, params->ts_dline);
3090 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
3091 dwc2_set_param_en_multiple_tx_fifo(hsotg,
3092 params->en_multiple_tx_fifo);
3093 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
3094 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
3095 dwc2_set_param_otg_ver(hsotg, params->otg_ver);
3096 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
3097 dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
3098 dwc2_set_param_hibernation(hsotg, params->hibernation);
3099 }
3100
3101 /**
3102 * During device initialization, read various hardware configuration
3103 * registers and interpret the contents.
3104 */
3105 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3106 {
3107 struct dwc2_hw_params *hw = &hsotg->hw_params;
3108 unsigned width;
3109 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
3110 u32 hptxfsiz, grxfsiz, gnptxfsiz;
3111 u32 gusbcfg = 0;
3112
3113 /*
3114 * Attempt to ensure this device is really a DWC_otg Controller.
3115 * Read and verify the GSNPSID register contents. The value should be
3116 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
3117 * as in "OTG version 2.xx" or "OTG version 3.xx".
3118 */
3119 hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
3120 if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
3121 (hw->snpsid & 0xfffff000) != 0x4f543000) {
3122 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
3123 hw->snpsid);
3124 return -ENODEV;
3125 }
3126
3127 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
3128 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
3129 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
3130
3131 hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
3132 hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
3133 hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
3134 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
3135 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
3136
3137 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
3138 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
3139 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
3140 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
3141 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
3142
3143 /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */
3144 if (hsotg->dr_mode != USB_DR_MODE_HOST) {
3145 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
3146 dwc2_writel(gusbcfg | GUSBCFG_FORCEHOSTMODE,
3147 hsotg->regs + GUSBCFG);
3148 usleep_range(25000, 50000);
3149 }
3150
3151 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
3152 hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
3153 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
3154 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
3155 if (hsotg->dr_mode != USB_DR_MODE_HOST) {
3156 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
3157 usleep_range(25000, 50000);
3158 }
3159
3160 /* hwcfg2 */
3161 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
3162 GHWCFG2_OP_MODE_SHIFT;
3163 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
3164 GHWCFG2_ARCHITECTURE_SHIFT;
3165 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
3166 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
3167 GHWCFG2_NUM_HOST_CHAN_SHIFT);
3168 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
3169 GHWCFG2_HS_PHY_TYPE_SHIFT;
3170 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
3171 GHWCFG2_FS_PHY_TYPE_SHIFT;
3172 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
3173 GHWCFG2_NUM_DEV_EP_SHIFT;
3174 hw->nperio_tx_q_depth =
3175 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
3176 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
3177 hw->host_perio_tx_q_depth =
3178 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
3179 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
3180 hw->dev_token_q_depth =
3181 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
3182 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
3183
3184 /* hwcfg3 */
3185 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
3186 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
3187 hw->max_transfer_size = (1 << (width + 11)) - 1;
3188 /*
3189 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
3190 * coherent buffers with this size, and if it's too large we can
3191 * exhaust the coherent DMA pool.
3192 */
3193 if (hw->max_transfer_size > 65535)
3194 hw->max_transfer_size = 65535;
3195 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
3196 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
3197 hw->max_packet_count = (1 << (width + 4)) - 1;
3198 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
3199 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
3200 GHWCFG3_DFIFO_DEPTH_SHIFT;
3201
3202 /* hwcfg4 */
3203 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
3204 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
3205 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
3206 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
3207 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
3208 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
3209 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
3210
3211 /* fifo sizes */
3212 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
3213 GRXFSIZ_DEPTH_SHIFT;
3214 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3215 FIFOSIZE_DEPTH_SHIFT;
3216 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3217 FIFOSIZE_DEPTH_SHIFT;
3218
3219 dev_dbg(hsotg->dev, "Detected values from hardware:\n");
3220 dev_dbg(hsotg->dev, " op_mode=%d\n",
3221 hw->op_mode);
3222 dev_dbg(hsotg->dev, " arch=%d\n",
3223 hw->arch);
3224 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
3225 hw->dma_desc_enable);
3226 dev_dbg(hsotg->dev, " power_optimized=%d\n",
3227 hw->power_optimized);
3228 dev_dbg(hsotg->dev, " i2c_enable=%d\n",
3229 hw->i2c_enable);
3230 dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
3231 hw->hs_phy_type);
3232 dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
3233 hw->fs_phy_type);
3234 dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
3235 hw->utmi_phy_data_width);
3236 dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
3237 hw->num_dev_ep);
3238 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
3239 hw->num_dev_perio_in_ep);
3240 dev_dbg(hsotg->dev, " host_channels=%d\n",
3241 hw->host_channels);
3242 dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
3243 hw->max_transfer_size);
3244 dev_dbg(hsotg->dev, " max_packet_count=%d\n",
3245 hw->max_packet_count);
3246 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
3247 hw->nperio_tx_q_depth);
3248 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
3249 hw->host_perio_tx_q_depth);
3250 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
3251 hw->dev_token_q_depth);
3252 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
3253 hw->enable_dynamic_fifo);
3254 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
3255 hw->en_multiple_tx_fifo);
3256 dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
3257 hw->total_fifo_size);
3258 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
3259 hw->host_rx_fifo_size);
3260 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
3261 hw->host_nperio_tx_fifo_size);
3262 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
3263 hw->host_perio_tx_fifo_size);
3264 dev_dbg(hsotg->dev, "\n");
3265
3266 return 0;
3267 }
3268
3269 /*
3270 * Sets all parameters to the given value.
3271 *
3272 * Assumes that the dwc2_core_params struct contains only integers.
3273 */
3274 void dwc2_set_all_params(struct dwc2_core_params *params, int value)
3275 {
3276 int *p = (int *)params;
3277 size_t size = sizeof(*params) / sizeof(*p);
3278 int i;
3279
3280 for (i = 0; i < size; i++)
3281 p[i] = value;
3282 }
3283
3284
3285 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
3286 {
3287 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
3288 }
3289
3290 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
3291 {
3292 if (dwc2_readl(hsotg->regs + GSNPSID) == 0xffffffff)
3293 return false;
3294 else
3295 return true;
3296 }
3297
3298 /**
3299 * dwc2_enable_global_interrupts() - Enables the controller's Global
3300 * Interrupt in the AHB Config register
3301 *
3302 * @hsotg: Programming view of DWC_otg controller
3303 */
3304 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
3305 {
3306 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
3307
3308 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
3309 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
3310 }
3311
3312 /**
3313 * dwc2_disable_global_interrupts() - Disables the controller's Global
3314 * Interrupt in the AHB Config register
3315 *
3316 * @hsotg: Programming view of DWC_otg controller
3317 */
3318 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
3319 {
3320 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
3321
3322 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
3323 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
3324 }
3325
3326 MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
3327 MODULE_AUTHOR("Synopsys, Inc.");
3328 MODULE_LICENSE("Dual BSD/GPL");