1 /* $NetBSD: dwc2_core.c,v 1.13 2016/02/24 22:17:54 skrll Exp $ */
2
3 /*
4 * core.c - DesignWare HS OTG Controller common routines
5 *
6 * Copyright (C) 2004-2013 Synopsys, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The names of the above-listed copyright holders may not be used
18 * to endorse or promote products derived from this software without
19 * specific prior written permission.
20 *
21 * ALTERNATIVELY, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") as published by the Free Software
23 * Foundation; either version 2 of the License, or (at your option) any
24 * later version.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * The Core code provides basic services for accessing and managing the
41 * DWC_otg hardware. These services are used by both the Host Controller
42 * Driver and the Peripheral Controller Driver.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: dwc2_core.c,v 1.13 2016/02/24 22:17:54 skrll Exp $");
47
48 #include <sys/types.h>
49 #include <sys/bus.h>
50 #include <sys/proc.h>
51 #include <sys/callout.h>
52 #include <sys/mutex.h>
53 #include <sys/pool.h>
54
55 #include <dev/usb/usb.h>
56 #include <dev/usb/usbdi.h>
57 #include <dev/usb/usbdivar.h>
58 #include <dev/usb/usb_mem.h>
59
60 #include <linux/kernel.h>
61 #include <linux/list.h>
62
63 #include <dwc2/dwc2.h>
64 #include <dwc2/dwc2var.h>
65
66 #include "dwc2_core.h"
67 #include "dwc2_hcd.h"
68
69 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
70 /**
71 * dwc2_backup_host_registers() - Backup controller host registers.
72 * When suspending usb bus, registers needs to be backuped
73 * if controller power is disabled once suspended.
74 *
75 * @hsotg: Programming view of the DWC_otg controller
76 */
dwc2_backup_host_registers(struct dwc2_hsotg * hsotg)77 static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
78 {
79 struct dwc2_hregs_backup *hr;
80 int i;
81
82 dev_dbg(hsotg->dev, "%s\n", __func__);
83
84 /* Backup Host regs */
85 hr = &hsotg->hr_backup;
86 hr->hcfg = DWC2_READ_4(hsotg, HCFG);
87 hr->haintmsk = DWC2_READ_4(hsotg, HAINTMSK);
88 for (i = 0; i < hsotg->core_params->host_channels; ++i)
89 hr->hcintmsk[i] = DWC2_READ_4(hsotg, HCINTMSK(i));
90
91 hr->hprt0 = DWC2_READ_4(hsotg, HPRT0);
92 hr->hfir = DWC2_READ_4(hsotg, HFIR);
93 hr->valid = true;
94
95 return 0;
96 }
97
98 /**
99 * dwc2_restore_host_registers() - Restore controller host registers.
100 * When resuming usb bus, device registers needs to be restored
101 * if controller power were disabled.
102 *
103 * @hsotg: Programming view of the DWC_otg controller
104 */
dwc2_restore_host_registers(struct dwc2_hsotg * hsotg)105 static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
106 {
107 struct dwc2_hregs_backup *hr;
108 int i;
109
110 dev_dbg(hsotg->dev, "%s\n", __func__);
111
112 /* Restore host regs */
113 hr = &hsotg->hr_backup;
114 if (!hr->valid) {
115 dev_err(hsotg->dev, "%s: no host registers to restore\n",
116 __func__);
117 return -EINVAL;
118 }
119 hr->valid = false;
120
121 DWC2_WRITE_4(hsotg, HCFG, hr->hcfg);
122 DWC2_WRITE_4(hsotg, HAINTMSK, hr->haintmsk);
123
124 for (i = 0; i < hsotg->core_params->host_channels; ++i)
125 DWC2_WRITE_4(hsotg, HCINTMSK(i), hr->hcintmsk[i]);
126
127 DWC2_WRITE_4(hsotg, HPRT0, hr->hprt0);
128 DWC2_WRITE_4(hsotg, HFIR, hr->hfir);
129 hsotg->frame_number = 0;
130
131 return 0;
132 }
133 #else
dwc2_backup_host_registers(struct dwc2_hsotg * hsotg)134 static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
135 { return 0; }
136
dwc2_restore_host_registers(struct dwc2_hsotg * hsotg)137 static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
138 { return 0; }
139 #endif
140
141 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
142 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
143 /**
144 * dwc2_backup_device_registers() - Backup controller device registers.
145 * When suspending usb bus, registers needs to be backuped
146 * if controller power is disabled once suspended.
147 *
148 * @hsotg: Programming view of the DWC_otg controller
149 */
dwc2_backup_device_registers(struct dwc2_hsotg * hsotg)150 static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
151 {
152 struct dwc2_dregs_backup *dr;
153 int i;
154
155 dev_dbg(hsotg->dev, "%s\n", __func__);
156
157 /* Backup dev regs */
158 dr = &hsotg->dr_backup;
159
160 dr->dcfg = DWC2_READ_4(hsotg, DCFG);
161 dr->dctl = DWC2_READ_4(hsotg, DCTL);
162 dr->daintmsk = DWC2_READ_4(hsotg, DAINTMSK);
163 dr->diepmsk = DWC2_READ_4(hsotg, DIEPMSK);
164 dr->doepmsk = DWC2_READ_4(hsotg, DOEPMSK);
165
166 for (i = 0; i < hsotg->num_of_eps; i++) {
167 /* Backup IN EPs */
168 dr->diepctl[i] = DWC2_READ_4(hsotg, DIEPCTL(i));
169
170 /* Ensure DATA PID is correctly configured */
171 if (dr->diepctl[i] & DXEPCTL_DPID)
172 dr->diepctl[i] |= DXEPCTL_SETD1PID;
173 else
174 dr->diepctl[i] |= DXEPCTL_SETD0PID;
175
176 dr->dieptsiz[i] = DWC2_READ_4(hsotg, DIEPTSIZ(i));
177 dr->diepdma[i] = DWC2_READ_4(hsotg, DIEPDMA(i));
178
179 /* Backup OUT EPs */
180 dr->doepctl[i] = DWC2_READ_4(hsotg, DOEPCTL(i));
181
182 /* Ensure DATA PID is correctly configured */
183 if (dr->doepctl[i] & DXEPCTL_DPID)
184 dr->doepctl[i] |= DXEPCTL_SETD1PID;
185 else
186 dr->doepctl[i] |= DXEPCTL_SETD0PID;
187
188 dr->doeptsiz[i] = DWC2_READ_4(hsotg, DOEPTSIZ(i));
189 dr->doepdma[i] = DWC2_READ_4(hsotg, DOEPDMA(i));
190 }
191 dr->valid = true;
192 return 0;
193 }
194
195 /**
196 * dwc2_restore_device_registers() - Restore controller device registers.
197 * When resuming usb bus, device registers needs to be restored
198 * if controller power were disabled.
199 *
200 * @hsotg: Programming view of the DWC_otg controller
201 */
dwc2_restore_device_registers(struct dwc2_hsotg * hsotg)202 static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
203 {
204 struct dwc2_dregs_backup *dr;
205 u32 dctl;
206 int i;
207
208 dev_dbg(hsotg->dev, "%s\n", __func__);
209
210 /* Restore dev regs */
211 dr = &hsotg->dr_backup;
212 if (!dr->valid) {
213 dev_err(hsotg->dev, "%s: no device registers to restore\n",
214 __func__);
215 return -EINVAL;
216 }
217 dr->valid = false;
218
219 DWC2_WRITE_4(hsotg, DCFG, dr->dcfg);
220 DWC2_WRITE_4(hsotg, DCTL, dr->dctl);
221 DWC2_WRITE_4(hsotg, DAINTMSK, dr->daintmsk);
222 DWC2_WRITE_4(hsotg, DIEPMSK, dr->diepmsk);
223 DWC2_WRITE_4(hsotg, DOEPMSK, dr->doepmsk);
224
225 for (i = 0; i < hsotg->num_of_eps; i++) {
226 /* Restore IN EPs */
227 DWC2_WRITE_4(hsotg, DIEPCTL(i), dr->diepctl[i]);
228 DWC2_WRITE_4(hsotg, DIEPTSIZ(i), dr->dieptsiz[i]);
229 DWC2_WRITE_4(hsotg, DIEPDMA(i), dr->diepdma[i]);
230
231 /* Restore OUT EPs */
232 DWC2_WRITE_4(hsotg, DOEPCTL(i), dr->doepctl[i]);
233 DWC2_WRITE_4(hsotg, DOEPTSIZ(i), dr->doeptsiz[i]);
234 DWC2_WRITE_4(hsotg, DOEPDMA(i), dr->doepdma[i]);
235 }
236
237 /* Set the Power-On Programming done bit */
238 dctl = DWC2_READ_4(hsotg, DCTL);
239 dctl |= DCTL_PWRONPRGDONE;
240 DWC2_WRITE_4(hsotg, DCTL, dctl);
241
242 return 0;
243 }
244 #else
dwc2_backup_device_registers(struct dwc2_hsotg * hsotg)245 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
246 { return 0; }
247
dwc2_restore_device_registers(struct dwc2_hsotg * hsotg)248 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
249 { return 0; }
250 #endif
251
252 /**
253 * dwc2_backup_global_registers() - Backup global controller registers.
254 * When suspending usb bus, registers needs to be backuped
255 * if controller power is disabled once suspended.
256 *
257 * @hsotg: Programming view of the DWC_otg controller
258 */
dwc2_backup_global_registers(struct dwc2_hsotg * hsotg)259 static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
260 {
261 struct dwc2_gregs_backup *gr;
262 int i;
263
264 /* Backup global regs */
265 gr = &hsotg->gr_backup;
266
267 gr->gotgctl = DWC2_READ_4(hsotg, GOTGCTL);
268 gr->gintmsk = DWC2_READ_4(hsotg, GINTMSK);
269 gr->gahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
270 gr->gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
271 gr->grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
272 gr->gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
273 gr->hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ);
274 gr->gdfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG);
275 for (i = 0; i < MAX_EPS_CHANNELS; i++)
276 gr->dtxfsiz[i] = DWC2_READ_4(hsotg, DPTXFSIZN(i));
277
278 gr->valid = true;
279 return 0;
280 }
281
282 /**
283 * dwc2_restore_global_registers() - Restore controller global registers.
284 * When resuming usb bus, device registers needs to be restored
285 * if controller power were disabled.
286 *
287 * @hsotg: Programming view of the DWC_otg controller
288 */
dwc2_restore_global_registers(struct dwc2_hsotg * hsotg)289 static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
290 {
291 struct dwc2_gregs_backup *gr;
292 int i;
293
294 dev_dbg(hsotg->dev, "%s\n", __func__);
295
296 /* Restore global regs */
297 gr = &hsotg->gr_backup;
298 if (!gr->valid) {
299 dev_err(hsotg->dev, "%s: no global registers to restore\n",
300 __func__);
301 return -EINVAL;
302 }
303 gr->valid = false;
304
305 DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
306 DWC2_WRITE_4(hsotg, GOTGCTL, gr->gotgctl);
307 DWC2_WRITE_4(hsotg, GINTMSK, gr->gintmsk);
308 DWC2_WRITE_4(hsotg, GUSBCFG, gr->gusbcfg);
309 DWC2_WRITE_4(hsotg, GAHBCFG, gr->gahbcfg);
310 DWC2_WRITE_4(hsotg, GRXFSIZ, gr->grxfsiz);
311 DWC2_WRITE_4(hsotg, GNPTXFSIZ, gr->gnptxfsiz);
312 DWC2_WRITE_4(hsotg, HPTXFSIZ, gr->hptxfsiz);
313 DWC2_WRITE_4(hsotg, GDFIFOCFG, gr->gdfifocfg);
314 for (i = 0; i < MAX_EPS_CHANNELS; i++)
315 DWC2_WRITE_4(hsotg, DPTXFSIZN(i), gr->dtxfsiz[i]);
316
317 return 0;
318 }
319
320 /**
321 * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
322 *
323 * @hsotg: Programming view of the DWC_otg controller
324 * @restore: Controller registers need to be restored
325 */
dwc2_exit_hibernation(struct dwc2_hsotg * hsotg,bool restore)326 int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
327 {
328 u32 pcgcctl;
329 int ret = 0;
330
331 if (!hsotg->core_params->hibernation)
332 return -ENOTSUPP;
333
334 pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
335 pcgcctl &= ~PCGCTL_STOPPCLK;
336 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
337
338 pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
339 pcgcctl &= ~PCGCTL_PWRCLMP;
340 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
341
342 pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
343 pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
344 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
345
346 udelay(100);
347 if (restore) {
348 ret = dwc2_restore_global_registers(hsotg);
349 if (ret) {
350 dev_err(hsotg->dev, "%s: failed to restore registers\n",
351 __func__);
352 return ret;
353 }
354 if (dwc2_is_host_mode(hsotg)) {
355 ret = dwc2_restore_host_registers(hsotg);
356 if (ret) {
357 dev_err(hsotg->dev, "%s: failed to restore host registers\n",
358 __func__);
359 return ret;
360 }
361 } else {
362 ret = dwc2_restore_device_registers(hsotg);
363 if (ret) {
364 dev_err(hsotg->dev, "%s: failed to restore device registers\n",
365 __func__);
366 return ret;
367 }
368 }
369 }
370
371 return ret;
372 }
373
374 /**
375 * dwc2_enter_hibernation() - Put controller in Partial Power Down.
376 *
377 * @hsotg: Programming view of the DWC_otg controller
378 */
dwc2_enter_hibernation(struct dwc2_hsotg * hsotg)379 int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
380 {
381 u32 pcgcctl;
382 int ret = 0;
383
384 if (!hsotg->core_params->hibernation)
385 return -ENOTSUPP;
386
387 /* Backup all registers */
388 ret = dwc2_backup_global_registers(hsotg);
389 if (ret) {
390 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
391 __func__);
392 return ret;
393 }
394
395 if (dwc2_is_host_mode(hsotg)) {
396 ret = dwc2_backup_host_registers(hsotg);
397 if (ret) {
398 dev_err(hsotg->dev, "%s: failed to backup host registers\n",
399 __func__);
400 return ret;
401 }
402 } else {
403 ret = dwc2_backup_device_registers(hsotg);
404 if (ret) {
405 dev_err(hsotg->dev, "%s: failed to backup device registers\n",
406 __func__);
407 return ret;
408 }
409 }
410
411 /*
412 * Clear any pending interrupts since dwc2 will not be able to
413 * clear them after entering hibernation.
414 */
415 DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
416
417 /* Put the controller in low power state */
418 pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
419
420 pcgcctl |= PCGCTL_PWRCLMP;
421 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
422 ndelay(20);
423
424 pcgcctl |= PCGCTL_RSTPDWNMODULE;
425 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
426 ndelay(20);
427
428 pcgcctl |= PCGCTL_STOPPCLK;
429 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
430
431 return ret;
432 }
433
434 /**
435 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
436 * used in both device and host modes
437 *
438 * @hsotg: Programming view of the DWC_otg controller
439 */
dwc2_enable_common_interrupts(struct dwc2_hsotg * hsotg)440 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
441 {
442 u32 intmsk;
443
444 /* Clear any pending OTG Interrupts */
445 DWC2_WRITE_4(hsotg, GOTGINT, 0xffffffff);
446
447 /* Clear any pending interrupts */
448 DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
449
450 /* Enable the interrupts in the GINTMSK */
451 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
452
453 if (hsotg->core_params->dma_enable <= 0)
454 intmsk |= GINTSTS_RXFLVL;
455 if (hsotg->core_params->external_id_pin_ctl <= 0)
456 intmsk |= GINTSTS_CONIDSTSCHNG;
457
458 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
459 GINTSTS_SESSREQINT;
460
461 DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
462 }
463
464 /*
465 * Initializes the FSLSPClkSel field of the HCFG register depending on the
466 * PHY type
467 */
dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg * hsotg)468 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
469 {
470 u32 hcfg, val;
471
472 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
473 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
474 hsotg->core_params->ulpi_fs_ls > 0) ||
475 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
476 /* Full speed PHY */
477 val = HCFG_FSLSPCLKSEL_48_MHZ;
478 } else {
479 /* High speed PHY running at full speed or high speed */
480 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
481 }
482
483 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
484 hcfg = DWC2_READ_4(hsotg, HCFG);
485 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
486 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
487 DWC2_WRITE_4(hsotg, HCFG, hcfg);
488 }
489
490 /*
491 * Do core a soft reset of the core. Be careful with this because it
492 * resets all the internal state machines of the core.
493 */
dwc2_core_reset(struct dwc2_hsotg * hsotg)494 int dwc2_core_reset(struct dwc2_hsotg *hsotg)
495 {
496 u32 greset;
497 int count = 0;
498
499 dev_vdbg(hsotg->dev, "%s()\n", __func__);
500
501 /* Core Soft Reset */
502 greset = DWC2_READ_4(hsotg, GRSTCTL);
503 greset |= GRSTCTL_CSFTRST;
504 DWC2_WRITE_4(hsotg, GRSTCTL, greset);
505 do {
506 udelay(1);
507 greset = DWC2_READ_4(hsotg, GRSTCTL);
508 if (++count > 50) {
509 dev_warn(hsotg->dev,
510 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
511 __func__, greset);
512 return -EBUSY;
513 }
514 } while (greset & GRSTCTL_CSFTRST);
515
516 /* Wait for AHB master IDLE state */
517 count = 0;
518 do {
519 udelay(1);
520 greset = DWC2_READ_4(hsotg, GRSTCTL);
521 if (++count > 50) {
522 dev_warn(hsotg->dev,
523 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
524 __func__, greset);
525 return -EBUSY;
526 }
527 } while (!(greset & GRSTCTL_AHBIDLE));
528
529 return 0;
530 }
531
532 /*
533 * Force the mode of the controller.
534 *
535 * Forcing the mode is needed for two cases:
536 *
537 * 1) If the dr_mode is set to either HOST or PERIPHERAL we force the
538 * controller to stay in a particular mode regardless of ID pin
539 * changes. We do this usually after a core reset.
540 *
541 * 2) During probe we want to read reset values of the hw
542 * configuration registers that are only available in either host or
543 * device mode. We may need to force the mode if the current mode does
544 * not allow us to access the register in the mode that we want.
545 *
546 * In either case it only makes sense to force the mode if the
547 * controller hardware is OTG capable.
548 *
549 * Checks are done in this function to determine whether doing a force
550 * would be valid or not.
551 *
552 * If a force is done, it requires a 25ms delay to take effect.
553 *
554 * Returns true if the mode was forced.
555 */
dwc2_force_mode(struct dwc2_hsotg * hsotg,bool host)556 static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
557 {
558 u32 gusbcfg;
559 u32 set;
560 u32 clear;
561
562 dev_dbg(hsotg->dev, "Forcing mode to %s\n", host ? "host" : "device");
563
564 /*
565 * Force mode has no effect if the hardware is not OTG.
566 */
567 if (!dwc2_hw_is_otg(hsotg))
568 return false;
569
570 /*
571 * If dr_mode is either peripheral or host only, there is no
572 * need to ever force the mode to the opposite mode.
573 */
574 if (host && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
575 WARN_ON(1);
576 return false;
577 }
578
579 if (!host && hsotg->dr_mode == USB_DR_MODE_HOST) {
580 WARN_ON(1);
581 return false;
582 }
583
584 gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
585
586 set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
587 clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
588
589 gusbcfg &= ~clear;
590 gusbcfg |= set;
591 DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
592
593 msleep(25);
594 return true;
595 }
596
597 /*
598 * Clears the force mode bits.
599 */
dwc2_clear_force_mode(struct dwc2_hsotg * hsotg)600 static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
601 {
602 u32 gusbcfg;
603
604 gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
605 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
606 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
607 DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
608
609 /*
610 * NOTE: This long sleep is _very_ important, otherwise the core will
611 * not stay in host mode after a connector ID change!
612 */
613 msleep(25);
614 }
615
616 /*
617 * Sets or clears force mode based on the dr_mode parameter.
618 */
dwc2_force_dr_mode(struct dwc2_hsotg * hsotg)619 void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
620 {
621 switch (hsotg->dr_mode) {
622 case USB_DR_MODE_HOST:
623 dwc2_force_mode(hsotg, true);
624 break;
625 case USB_DR_MODE_PERIPHERAL:
626 dwc2_force_mode(hsotg, false);
627 break;
628 case USB_DR_MODE_OTG:
629 dwc2_clear_force_mode(hsotg);
630 break;
631 default:
632 dev_warn(hsotg->dev, "%s() Invalid dr_mode=%d\n",
633 __func__, hsotg->dr_mode);
634 break;
635 }
636 }
637
638 /*
639 * Do core a soft reset of the core. Be careful with this because it
640 * resets all the internal state machines of the core.
641 *
642 * Additionally this will apply force mode as per the hsotg->dr_mode
643 * parameter.
644 */
dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg * hsotg)645 int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg)
646 {
647 int retval;
648
649 retval = dwc2_core_reset(hsotg);
650 if (retval)
651 return retval;
652
653 dwc2_force_dr_mode(hsotg);
654 return 0;
655 }
656
dwc2_fs_phy_init(struct dwc2_hsotg * hsotg,bool select_phy)657 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
658 {
659 u32 usbcfg, i2cctl;
660 int retval = 0;
661
662 /*
663 * core_init() is now called on every switch so only call the
664 * following for the first time through
665 */
666 if (select_phy) {
667 dev_dbg(hsotg->dev, "FS PHY selected\n");
668
669 usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
670 if (!(usbcfg & GUSBCFG_PHYSEL)) {
671 usbcfg |= GUSBCFG_PHYSEL;
672 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
673
674 /* Reset after a PHY select */
675 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
676
677 if (retval) {
678 dev_err(hsotg->dev,
679 "%s: Reset failed, aborting", __func__);
680 return retval;
681 }
682 }
683 }
684
685 /*
686 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
687 * do this on HNP Dev/Host mode switches (done in dev_init and
688 * host_init).
689 */
690 if (dwc2_is_host_mode(hsotg))
691 dwc2_init_fs_ls_pclk_sel(hsotg);
692
693 if (hsotg->core_params->i2c_enable > 0) {
694 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
695
696 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
697 usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
698 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
699 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
700
701 /* Program GI2CCTL.I2CEn */
702 i2cctl = DWC2_READ_4(hsotg, GI2CCTL);
703 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
704 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
705 i2cctl &= ~GI2CCTL_I2CEN;
706 DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl);
707 i2cctl |= GI2CCTL_I2CEN;
708 DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl);
709 }
710
711 return retval;
712 }
713
dwc2_hs_phy_init(struct dwc2_hsotg * hsotg,bool select_phy)714 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
715 {
716 u32 usbcfg, usbcfg_old;
717 int retval = 0;
718
719 if (!select_phy)
720 return 0;
721
722 usbcfg = usbcfg_old = DWC2_READ_4(hsotg, GUSBCFG);
723
724 /*
725 * HS PHY parameters. These parameters are preserved during soft reset
726 * so only program the first time. Do a soft reset immediately after
727 * setting phyif.
728 */
729 switch (hsotg->core_params->phy_type) {
730 case DWC2_PHY_TYPE_PARAM_ULPI:
731 /* ULPI interface */
732 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
733 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
734 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
735 if (hsotg->core_params->phy_ulpi_ddr > 0)
736 usbcfg |= GUSBCFG_DDRSEL;
737 break;
738 case DWC2_PHY_TYPE_PARAM_UTMI:
739 /* UTMI+ interface */
740 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
741 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
742 if (hsotg->core_params->phy_utmi_width == 16)
743 usbcfg |= GUSBCFG_PHYIF16;
744 break;
745 default:
746 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
747 break;
748 }
749
750 if (usbcfg != usbcfg_old) {
751 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
752
753 /* Reset after setting the PHY parameters */
754 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
755 if (retval) {
756 dev_err(hsotg->dev,
757 "%s: Reset failed, aborting", __func__);
758 return retval;
759 }
760 }
761
762 return retval;
763 }
764
dwc2_phy_init(struct dwc2_hsotg * hsotg,bool select_phy)765 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
766 {
767 u32 usbcfg;
768 int retval = 0;
769
770 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
771 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
772 /* If FS mode with FS PHY */
773 retval = dwc2_fs_phy_init(hsotg, select_phy);
774 if (retval)
775 return retval;
776 } else {
777 /* High speed PHY */
778 retval = dwc2_hs_phy_init(hsotg, select_phy);
779 if (retval)
780 return retval;
781 }
782
783 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
784 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
785 hsotg->core_params->ulpi_fs_ls > 0) {
786 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
787 usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
788 usbcfg |= GUSBCFG_ULPI_FS_LS;
789 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
790 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
791 } else {
792 usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
793 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
794 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
795 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
796 }
797
798 return retval;
799 }
800
dwc2_gahbcfg_init(struct dwc2_hsotg * hsotg)801 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
802 {
803 struct dwc2_softc *sc = hsotg->hsotg_sc;
804 u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
805
806 switch (hsotg->hw_params.arch) {
807 case GHWCFG2_EXT_DMA_ARCH:
808 dev_dbg(hsotg->dev, "External DMA Mode\n");
809 if (!sc->sc_set_dma_addr) {
810 dev_err(hsotg->dev, "External DMA Mode not supported\n");
811 return -EINVAL;
812 }
813 if (hsotg->core_params->ahbcfg != -1) {
814 ahbcfg &= GAHBCFG_CTRL_MASK;
815 ahbcfg |= hsotg->core_params->ahbcfg &
816 ~GAHBCFG_CTRL_MASK;
817 }
818 break;
819
820 case GHWCFG2_INT_DMA_ARCH:
821 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
822 if (hsotg->core_params->ahbcfg != -1) {
823 ahbcfg &= GAHBCFG_CTRL_MASK;
824 ahbcfg |= hsotg->core_params->ahbcfg &
825 ~GAHBCFG_CTRL_MASK;
826 }
827 break;
828
829 case GHWCFG2_SLAVE_ONLY_ARCH:
830 default:
831 dev_dbg(hsotg->dev, "Slave Only Mode\n");
832 break;
833 }
834
835 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
836 hsotg->core_params->dma_enable,
837 hsotg->core_params->dma_desc_enable);
838
839 if (hsotg->core_params->dma_enable > 0) {
840 if (hsotg->core_params->dma_desc_enable > 0)
841 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
842 else
843 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
844 } else {
845 dev_dbg(hsotg->dev, "Using Slave mode\n");
846 hsotg->core_params->dma_desc_enable = 0;
847 }
848
849 if (hsotg->core_params->dma_enable > 0)
850 ahbcfg |= GAHBCFG_DMA_EN;
851
852 DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
853
854 return 0;
855 }
856
dwc2_gusbcfg_init(struct dwc2_hsotg * hsotg)857 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
858 {
859 u32 usbcfg;
860
861 usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
862 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
863
864 switch (hsotg->hw_params.op_mode) {
865 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
866 if (hsotg->core_params->otg_cap ==
867 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
868 usbcfg |= GUSBCFG_HNPCAP;
869 if (hsotg->core_params->otg_cap !=
870 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
871 usbcfg |= GUSBCFG_SRPCAP;
872 break;
873
874 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
875 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
876 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
877 if (hsotg->core_params->otg_cap !=
878 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
879 usbcfg |= GUSBCFG_SRPCAP;
880 break;
881
882 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
883 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
884 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
885 default:
886 break;
887 }
888
889 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
890 }
891
892 /**
893 * dwc2_core_init() - Initializes the DWC_otg controller registers and
894 * prepares the core for device mode or host mode operation
895 *
896 * @hsotg: Programming view of the DWC_otg controller
897 * @initial_setup: If true then this is the first init for this instance.
898 */
dwc2_core_init(struct dwc2_hsotg * hsotg,bool initial_setup)899 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
900 {
901 u32 usbcfg, otgctl;
902 int retval;
903
904 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
905
906 usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
907
908 /* Set ULPI External VBUS bit if needed */
909 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
910 if (hsotg->core_params->phy_ulpi_ext_vbus ==
911 DWC2_PHY_ULPI_EXTERNAL_VBUS)
912 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
913
914 /* Set external TS Dline pulsing bit if needed */
915 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
916 if (hsotg->core_params->ts_dline > 0)
917 usbcfg |= GUSBCFG_TERMSELDLPULSE;
918
919 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
920
921 /*
922 * Reset the Controller
923 *
924 * We only need to reset the controller if this is a re-init.
925 * For the first init we know for sure that earlier code reset us (it
926 * needed to in order to properly detect various parameters).
927 */
928 if (!initial_setup) {
929 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
930 if (retval) {
931 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
932 __func__);
933 return retval;
934 }
935 }
936
937 /*
938 * This needs to happen in FS mode before any other programming occurs
939 */
940 retval = dwc2_phy_init(hsotg, initial_setup);
941 if (retval)
942 return retval;
943
944 /* Program the GAHBCFG Register */
945 retval = dwc2_gahbcfg_init(hsotg);
946 if (retval)
947 return retval;
948
949 /* Program the GUSBCFG register */
950 dwc2_gusbcfg_init(hsotg);
951
952 /* Program the GOTGCTL register */
953 otgctl = DWC2_READ_4(hsotg, GOTGCTL);
954 otgctl &= ~GOTGCTL_OTGVER;
955 if (hsotg->core_params->otg_ver > 0)
956 otgctl |= GOTGCTL_OTGVER;
957 DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
958 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
959
960 /* Clear the SRP success bit for FS-I2c */
961 hsotg->srp_success = 0;
962
963 /* Enable common interrupts */
964 dwc2_enable_common_interrupts(hsotg);
965
966 /*
967 * Do device or host initialization based on mode during PCD and
968 * HCD initialization
969 */
970 if (dwc2_is_host_mode(hsotg)) {
971 dev_dbg(hsotg->dev, "Host Mode\n");
972 hsotg->op_state = OTG_STATE_A_HOST;
973 } else {
974 dev_dbg(hsotg->dev, "Device Mode\n");
975 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
976 }
977
978 return 0;
979 }
980
981 /**
982 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
983 *
984 * @hsotg: Programming view of DWC_otg controller
985 */
dwc2_enable_host_interrupts(struct dwc2_hsotg * hsotg)986 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
987 {
988 u32 intmsk;
989
990 dev_dbg(hsotg->dev, "%s()\n", __func__);
991
992 /* Disable all interrupts */
993 DWC2_WRITE_4(hsotg, GINTMSK, 0);
994 DWC2_WRITE_4(hsotg, HAINTMSK, 0);
995
996 /* Enable the common interrupts */
997 dwc2_enable_common_interrupts(hsotg);
998
999 /* Enable host mode interrupts without disturbing common interrupts */
1000 intmsk = DWC2_READ_4(hsotg, GINTMSK);
1001 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
1002 DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
1003 }
1004
1005 /**
1006 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
1007 *
1008 * @hsotg: Programming view of DWC_otg controller
1009 */
dwc2_disable_host_interrupts(struct dwc2_hsotg * hsotg)1010 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
1011 {
1012 u32 intmsk = DWC2_READ_4(hsotg, GINTMSK);
1013
1014 /* Disable host mode interrupts without disturbing common interrupts */
1015 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
1016 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
1017 DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
1018 }
1019
1020 /*
1021 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
1022 * For system that have a total fifo depth that is smaller than the default
1023 * RX + TX fifo size.
1024 *
1025 * @hsotg: Programming view of DWC_otg controller
1026 */
dwc2_calculate_dynamic_fifo(struct dwc2_hsotg * hsotg)1027 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
1028 {
1029 struct dwc2_core_params *params = hsotg->core_params;
1030 struct dwc2_hw_params *hw = &hsotg->hw_params;
1031 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
1032
1033 total_fifo_size = hw->total_fifo_size;
1034 rxfsiz = params->host_rx_fifo_size;
1035 nptxfsiz = params->host_nperio_tx_fifo_size;
1036 ptxfsiz = params->host_perio_tx_fifo_size;
1037
1038 /*
1039 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
1040 * allocation with support for high bandwidth endpoints. Synopsys
1041 * defines MPS(Max Packet size) for a periodic EP=1024, and for
1042 * non-periodic as 512.
1043 */
1044 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
1045 /*
1046 * For Buffer DMA mode/Scatter Gather DMA mode
1047 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
1048 * with n = number of host channel.
1049 * 2 * ((1024/4) + 2) = 516
1050 */
1051 rxfsiz = 516 + hw->host_channels;
1052
1053 /*
1054 * min non-periodic tx fifo depth
1055 * 2 * (largest non-periodic USB packet used / 4)
1056 * 2 * (512/4) = 256
1057 */
1058 nptxfsiz = 256;
1059
1060 /*
1061 * min periodic tx fifo depth
1062 * (largest packet size*MC)/4
1063 * (1024 * 3)/4 = 768
1064 */
1065 ptxfsiz = 768;
1066
1067 params->host_rx_fifo_size = rxfsiz;
1068 params->host_nperio_tx_fifo_size = nptxfsiz;
1069 params->host_perio_tx_fifo_size = ptxfsiz;
1070 }
1071
1072 /*
1073 * If the summation of RX, NPTX and PTX fifo sizes is still
1074 * bigger than the total_fifo_size, then we have a problem.
1075 *
1076 * We won't be able to allocate as many endpoints. Right now,
1077 * we're just printing an error message, but ideally this FIFO
1078 * allocation algorithm would be improved in the future.
1079 *
1080 * FIXME improve this FIFO allocation algorithm.
1081 */
1082 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
1083 dev_err(hsotg->dev, "invalid fifo sizes\n");
1084 }
1085
dwc2_config_fifos(struct dwc2_hsotg * hsotg)1086 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
1087 {
1088 struct dwc2_core_params *params = hsotg->core_params;
1089 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
1090
1091 if (!params->enable_dynamic_fifo)
1092 return;
1093
1094 dwc2_calculate_dynamic_fifo(hsotg);
1095
1096 /* Rx FIFO */
1097 grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
1098 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
1099 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
1100 grxfsiz |= params->host_rx_fifo_size <<
1101 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
1102 DWC2_WRITE_4(hsotg, GRXFSIZ, grxfsiz);
1103 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
1104 DWC2_READ_4(hsotg, GRXFSIZ));
1105
1106 /* Non-periodic Tx FIFO */
1107 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
1108 DWC2_READ_4(hsotg, GNPTXFSIZ));
1109 nptxfsiz = params->host_nperio_tx_fifo_size <<
1110 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
1111 nptxfsiz |= params->host_rx_fifo_size <<
1112 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
1113 DWC2_WRITE_4(hsotg, GNPTXFSIZ, nptxfsiz);
1114 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
1115 DWC2_READ_4(hsotg, GNPTXFSIZ));
1116
1117 /* Periodic Tx FIFO */
1118 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
1119 DWC2_READ_4(hsotg, HPTXFSIZ));
1120 hptxfsiz = params->host_perio_tx_fifo_size <<
1121 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
1122 hptxfsiz |= (params->host_rx_fifo_size +
1123 params->host_nperio_tx_fifo_size) <<
1124 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
1125 DWC2_WRITE_4(hsotg, HPTXFSIZ, hptxfsiz);
1126 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
1127 DWC2_READ_4(hsotg, HPTXFSIZ));
1128
1129 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
1130 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
1131 /*
1132 * Global DFIFOCFG calculation for Host mode -
1133 * include RxFIFO, NPTXFIFO and HPTXFIFO
1134 */
1135 dfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG);
1136 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
1137 dfifocfg |= (params->host_rx_fifo_size +
1138 params->host_nperio_tx_fifo_size +
1139 params->host_perio_tx_fifo_size) <<
1140 GDFIFOCFG_EPINFOBASE_SHIFT &
1141 GDFIFOCFG_EPINFOBASE_MASK;
1142 DWC2_WRITE_4(hsotg, GDFIFOCFG, dfifocfg);
1143 }
1144 }
1145
1146 /**
1147 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
1148 * Host mode
1149 *
1150 * @hsotg: Programming view of DWC_otg controller
1151 *
1152 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
1153 * request queues. Host channels are reset to ensure that they are ready for
1154 * performing transfers.
1155 */
dwc2_core_host_init(struct dwc2_hsotg * hsotg)1156 void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
1157 {
1158 u32 hcfg, hfir, otgctl;
1159
1160 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
1161
1162 /* Restart the Phy Clock */
1163 DWC2_WRITE_4(hsotg, PCGCTL, 0);
1164
1165 /* Initialize Host Configuration Register */
1166 dwc2_init_fs_ls_pclk_sel(hsotg);
1167 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
1168 hcfg = DWC2_READ_4(hsotg, HCFG);
1169 hcfg |= HCFG_FSLSSUPP;
1170 DWC2_WRITE_4(hsotg, HCFG, hcfg);
1171 }
1172
1173 /*
1174 * This bit allows dynamic reloading of the HFIR register during
1175 * runtime. This bit needs to be programmed during initial configuration
1176 * and its value must not be changed during runtime.
1177 */
1178 if (hsotg->core_params->reload_ctl > 0) {
1179 hfir = DWC2_READ_4(hsotg, HFIR);
1180 hfir |= HFIR_RLDCTRL;
1181 DWC2_WRITE_4(hsotg, HFIR, hfir);
1182 }
1183
1184 if (hsotg->core_params->dma_desc_enable > 0) {
1185 u32 op_mode = hsotg->hw_params.op_mode;
1186 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
1187 !hsotg->hw_params.dma_desc_enable ||
1188 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
1189 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
1190 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
1191 dev_err(hsotg->dev,
1192 "Hardware does not support descriptor DMA mode -\n");
1193 dev_err(hsotg->dev,
1194 "falling back to buffer DMA mode.\n");
1195 hsotg->core_params->dma_desc_enable = 0;
1196 } else {
1197 hcfg = DWC2_READ_4(hsotg, HCFG);
1198 hcfg |= HCFG_DESCDMA;
1199 DWC2_WRITE_4(hsotg, HCFG, hcfg);
1200 }
1201 }
1202
1203 /* Configure data FIFO sizes */
1204 dwc2_config_fifos(hsotg);
1205
1206 /* TODO - check this */
1207 /* Clear Host Set HNP Enable in the OTG Control Register */
1208 otgctl = DWC2_READ_4(hsotg, GOTGCTL);
1209 otgctl &= ~GOTGCTL_HSTSETHNPEN;
1210 DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
1211
1212 /* Make sure the FIFOs are flushed */
1213 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
1214 dwc2_flush_rx_fifo(hsotg);
1215
1216 /* Clear Host Set HNP Enable in the OTG Control Register */
1217 otgctl = DWC2_READ_4(hsotg, GOTGCTL);
1218 otgctl &= ~GOTGCTL_HSTSETHNPEN;
1219 DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
1220
1221 if (hsotg->core_params->dma_desc_enable <= 0) {
1222 int num_channels, i;
1223 u32 hcchar;
1224
1225 /* Flush out any leftover queued requests */
1226 num_channels = hsotg->core_params->host_channels;
1227 for (i = 0; i < num_channels; i++) {
1228 hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
1229 hcchar &= ~HCCHAR_CHENA;
1230 hcchar |= HCCHAR_CHDIS;
1231 hcchar &= ~HCCHAR_EPDIR;
1232 DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar);
1233 }
1234
1235 /* Halt all channels to put them into a known state */
1236 for (i = 0; i < num_channels; i++) {
1237 int count = 0;
1238
1239 hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
1240 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
1241 hcchar &= ~HCCHAR_EPDIR;
1242 DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar);
1243 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
1244 __func__, i);
1245 do {
1246 hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
1247 if (++count > 1000) {
1248 dev_err(hsotg->dev,
1249 "Unable to clear enable on channel %d\n",
1250 i);
1251 break;
1252 }
1253 udelay(1);
1254 } while (hcchar & HCCHAR_CHENA);
1255 }
1256 }
1257
1258 /* Turn on the vbus power */
1259 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
1260 if (hsotg->op_state == OTG_STATE_A_HOST) {
1261 u32 hprt0 = dwc2_read_hprt0(hsotg);
1262
1263 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
1264 !!(hprt0 & HPRT0_PWR));
1265 if (!(hprt0 & HPRT0_PWR)) {
1266 hprt0 |= HPRT0_PWR;
1267 DWC2_WRITE_4(hsotg, HPRT0, hprt0);
1268 }
1269 }
1270
1271 dwc2_enable_host_interrupts(hsotg);
1272 }
1273
dwc2_hc_enable_slave_ints(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1274 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
1275 struct dwc2_host_chan *chan)
1276 {
1277 u32 hcintmsk = HCINTMSK_CHHLTD;
1278
1279 switch (chan->ep_type) {
1280 case USB_ENDPOINT_XFER_CONTROL:
1281 case USB_ENDPOINT_XFER_BULK:
1282 dev_vdbg(hsotg->dev, "control/bulk\n");
1283 hcintmsk |= HCINTMSK_XFERCOMPL;
1284 hcintmsk |= HCINTMSK_STALL;
1285 hcintmsk |= HCINTMSK_XACTERR;
1286 hcintmsk |= HCINTMSK_DATATGLERR;
1287 if (chan->ep_is_in) {
1288 hcintmsk |= HCINTMSK_BBLERR;
1289 } else {
1290 hcintmsk |= HCINTMSK_NAK;
1291 hcintmsk |= HCINTMSK_NYET;
1292 if (chan->do_ping)
1293 hcintmsk |= HCINTMSK_ACK;
1294 }
1295
1296 if (chan->do_split) {
1297 hcintmsk |= HCINTMSK_NAK;
1298 if (chan->complete_split)
1299 hcintmsk |= HCINTMSK_NYET;
1300 else
1301 hcintmsk |= HCINTMSK_ACK;
1302 }
1303
1304 if (chan->error_state)
1305 hcintmsk |= HCINTMSK_ACK;
1306 break;
1307
1308 case USB_ENDPOINT_XFER_INT:
1309 if (dbg_perio())
1310 dev_vdbg(hsotg->dev, "intr\n");
1311 hcintmsk |= HCINTMSK_XFERCOMPL;
1312 hcintmsk |= HCINTMSK_NAK;
1313 hcintmsk |= HCINTMSK_STALL;
1314 hcintmsk |= HCINTMSK_XACTERR;
1315 hcintmsk |= HCINTMSK_DATATGLERR;
1316 hcintmsk |= HCINTMSK_FRMOVRUN;
1317
1318 if (chan->ep_is_in)
1319 hcintmsk |= HCINTMSK_BBLERR;
1320 if (chan->error_state)
1321 hcintmsk |= HCINTMSK_ACK;
1322 if (chan->do_split) {
1323 if (chan->complete_split)
1324 hcintmsk |= HCINTMSK_NYET;
1325 else
1326 hcintmsk |= HCINTMSK_ACK;
1327 }
1328 break;
1329
1330 case USB_ENDPOINT_XFER_ISOC:
1331 if (dbg_perio())
1332 dev_vdbg(hsotg->dev, "isoc\n");
1333 hcintmsk |= HCINTMSK_XFERCOMPL;
1334 hcintmsk |= HCINTMSK_FRMOVRUN;
1335 hcintmsk |= HCINTMSK_ACK;
1336
1337 if (chan->ep_is_in) {
1338 hcintmsk |= HCINTMSK_XACTERR;
1339 hcintmsk |= HCINTMSK_BBLERR;
1340 }
1341 break;
1342 default:
1343 dev_err(hsotg->dev, "## Unknown EP type ##\n");
1344 break;
1345 }
1346
1347 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
1348 if (dbg_hc(chan))
1349 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
1350 }
1351
dwc2_hc_enable_dma_ints(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1352 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
1353 struct dwc2_host_chan *chan)
1354 {
1355 u32 hcintmsk = HCINTMSK_CHHLTD;
1356
1357 /*
1358 * For Descriptor DMA mode core halts the channel on AHB error.
1359 * Interrupt is not required.
1360 */
1361 if (hsotg->core_params->dma_desc_enable <= 0) {
1362 if (dbg_hc(chan))
1363 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1364 hcintmsk |= HCINTMSK_AHBERR;
1365 } else {
1366 if (dbg_hc(chan))
1367 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
1368 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1369 hcintmsk |= HCINTMSK_XFERCOMPL;
1370 }
1371
1372 if (chan->error_state && !chan->do_split &&
1373 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1374 if (dbg_hc(chan))
1375 dev_vdbg(hsotg->dev, "setting ACK\n");
1376 hcintmsk |= HCINTMSK_ACK;
1377 if (chan->ep_is_in) {
1378 hcintmsk |= HCINTMSK_DATATGLERR;
1379 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
1380 hcintmsk |= HCINTMSK_NAK;
1381 }
1382 }
1383
1384 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
1385 if (dbg_hc(chan))
1386 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
1387 }
1388
dwc2_hc_enable_ints(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1389 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
1390 struct dwc2_host_chan *chan)
1391 {
1392 u32 intmsk;
1393
1394 if (hsotg->core_params->dma_enable > 0) {
1395 if (dbg_hc(chan))
1396 dev_vdbg(hsotg->dev, "DMA enabled\n");
1397 dwc2_hc_enable_dma_ints(hsotg, chan);
1398 } else {
1399 if (dbg_hc(chan))
1400 dev_vdbg(hsotg->dev, "DMA disabled\n");
1401 dwc2_hc_enable_slave_ints(hsotg, chan);
1402 }
1403
1404 /* Enable the top level host channel interrupt */
1405 intmsk = DWC2_READ_4(hsotg, HAINTMSK);
1406 intmsk |= 1 << chan->hc_num;
1407 DWC2_WRITE_4(hsotg, HAINTMSK, intmsk);
1408 if (dbg_hc(chan))
1409 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
1410
1411 /* Make sure host channel interrupts are enabled */
1412 intmsk = DWC2_READ_4(hsotg, GINTMSK);
1413 intmsk |= GINTSTS_HCHINT;
1414 DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
1415 if (dbg_hc(chan))
1416 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
1417 }
1418
1419 /**
1420 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
1421 * a specific endpoint
1422 *
1423 * @hsotg: Programming view of DWC_otg controller
1424 * @chan: Information needed to initialize the host channel
1425 *
1426 * The HCCHARn register is set up with the characteristics specified in chan.
1427 * Host channel interrupts that may need to be serviced while this transfer is
1428 * in progress are enabled.
1429 */
dwc2_hc_init(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1430 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1431 {
1432 u8 hc_num = chan->hc_num;
1433 u32 hcintmsk;
1434 u32 hcchar;
1435 u32 hcsplt = 0;
1436
1437 if (dbg_hc(chan))
1438 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1439
1440 /* Clear old interrupt conditions for this host channel */
1441 hcintmsk = 0xffffffff;
1442 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1443 DWC2_WRITE_4(hsotg, HCINT(hc_num), hcintmsk);
1444
1445 /* Enable channel interrupts required for this transfer */
1446 dwc2_hc_enable_ints(hsotg, chan);
1447
1448 /*
1449 * Program the HCCHARn register with the endpoint characteristics for
1450 * the current transfer
1451 */
1452 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
1453 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
1454 if (chan->ep_is_in)
1455 hcchar |= HCCHAR_EPDIR;
1456 if (chan->speed == USB_SPEED_LOW)
1457 hcchar |= HCCHAR_LSPDDEV;
1458 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
1459 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
1460 DWC2_WRITE_4(hsotg, HCCHAR(hc_num), hcchar);
1461 if (dbg_hc(chan)) {
1462 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
1463 hc_num, hcchar);
1464
1465 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
1466 __func__, hc_num);
1467 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
1468 chan->dev_addr);
1469 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
1470 chan->ep_num);
1471 dev_vdbg(hsotg->dev, " Is In: %d\n",
1472 chan->ep_is_in);
1473 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
1474 chan->speed == USB_SPEED_LOW);
1475 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
1476 chan->ep_type);
1477 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
1478 chan->max_packet);
1479 }
1480
1481 /* Program the HCSPLT register for SPLITs */
1482 if (chan->do_split) {
1483 if (dbg_hc(chan))
1484 dev_vdbg(hsotg->dev,
1485 "Programming HC %d with split --> %s\n",
1486 hc_num,
1487 chan->complete_split ? "CSPLIT" : "SSPLIT");
1488 if (chan->complete_split)
1489 hcsplt |= HCSPLT_COMPSPLT;
1490 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
1491 HCSPLT_XACTPOS_MASK;
1492 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
1493 HCSPLT_HUBADDR_MASK;
1494 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
1495 HCSPLT_PRTADDR_MASK;
1496 if (dbg_hc(chan)) {
1497 dev_vdbg(hsotg->dev, " comp split %d\n",
1498 chan->complete_split);
1499 dev_vdbg(hsotg->dev, " xact pos %d\n",
1500 chan->xact_pos);
1501 dev_vdbg(hsotg->dev, " hub addr %d\n",
1502 chan->hub_addr);
1503 dev_vdbg(hsotg->dev, " hub port %d\n",
1504 chan->hub_port);
1505 dev_vdbg(hsotg->dev, " is_in %d\n",
1506 chan->ep_is_in);
1507 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
1508 chan->max_packet);
1509 dev_vdbg(hsotg->dev, " xferlen %d\n",
1510 chan->xfer_len);
1511 }
1512 }
1513
1514 DWC2_WRITE_4(hsotg, HCSPLT(hc_num), hcsplt);
1515 }
1516
1517 /**
1518 * dwc2_hc_halt() - Attempts to halt a host channel
1519 *
1520 * @hsotg: Controller register interface
1521 * @chan: Host channel to halt
1522 * @halt_status: Reason for halting the channel
1523 *
1524 * This function should only be called in Slave mode or to abort a transfer in
1525 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1526 * controller halts the channel when the transfer is complete or a condition
1527 * occurs that requires application intervention.
1528 *
1529 * In slave mode, checks for a free request queue entry, then sets the Channel
1530 * Enable and Channel Disable bits of the Host Channel Characteristics
1531 * register of the specified channel to intiate the halt. If there is no free
1532 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1533 * register to flush requests for this channel. In the latter case, sets a
1534 * flag to indicate that the host channel needs to be halted when a request
1535 * queue slot is open.
1536 *
1537 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1538 * HCCHARn register. The controller ensures there is space in the request
1539 * queue before submitting the halt request.
1540 *
1541 * Some time may elapse before the core flushes any posted requests for this
1542 * host channel and halts. The Channel Halted interrupt handler completes the
1543 * deactivation of the host channel.
1544 */
dwc2_hc_halt(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,enum dwc2_halt_status halt_status)1545 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
1546 enum dwc2_halt_status halt_status)
1547 {
1548 u32 nptxsts, hptxsts, hcchar;
1549
1550 if (dbg_hc(chan))
1551 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1552 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
1553 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
1554
1555 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1556 halt_status == DWC2_HC_XFER_AHB_ERR) {
1557 /*
1558 * Disable all channel interrupts except Ch Halted. The QTD
1559 * and QH state associated with this transfer has been cleared
1560 * (in the case of URB_DEQUEUE), so the channel needs to be
1561 * shut down carefully to prevent crashes.
1562 */
1563 u32 hcintmsk = HCINTMSK_CHHLTD;
1564
1565 dev_vdbg(hsotg->dev, "dequeue/error\n");
1566 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
1567
1568 /*
1569 * Make sure no other interrupts besides halt are currently
1570 * pending. Handling another interrupt could cause a crash due
1571 * to the QTD and QH state.
1572 */
1573 DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), ~hcintmsk);
1574
1575 /*
1576 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1577 * even if the channel was already halted for some other
1578 * reason
1579 */
1580 chan->halt_status = halt_status;
1581
1582 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1583 if (!(hcchar & HCCHAR_CHENA)) {
1584 /*
1585 * The channel is either already halted or it hasn't
1586 * started yet. In DMA mode, the transfer may halt if
1587 * it finishes normally or a condition occurs that
1588 * requires driver intervention. Don't want to halt
1589 * the channel again. In either Slave or DMA mode,
1590 * it's possible that the transfer has been assigned
1591 * to a channel, but not started yet when an URB is
1592 * dequeued. Don't want to halt a channel that hasn't
1593 * started yet.
1594 */
1595 return;
1596 }
1597 }
1598 if (chan->halt_pending) {
1599 /*
1600 * A halt has already been issued for this channel. This might
1601 * happen when a transfer is aborted by a higher level in
1602 * the stack.
1603 */
1604 dev_vdbg(hsotg->dev,
1605 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1606 __func__, chan->hc_num);
1607 return;
1608 }
1609
1610 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1611
1612 /* No need to set the bit in DDMA for disabling the channel */
1613 /* TODO check it everywhere channel is disabled */
1614 if (hsotg->core_params->dma_desc_enable <= 0) {
1615 if (dbg_hc(chan))
1616 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1617 hcchar |= HCCHAR_CHENA;
1618 } else {
1619 if (dbg_hc(chan))
1620 dev_dbg(hsotg->dev, "desc DMA enabled\n");
1621 }
1622 hcchar |= HCCHAR_CHDIS;
1623
1624 if (hsotg->core_params->dma_enable <= 0) {
1625 if (dbg_hc(chan))
1626 dev_vdbg(hsotg->dev, "DMA not enabled\n");
1627 hcchar |= HCCHAR_CHENA;
1628
1629 /* Check for space in the request queue to issue the halt */
1630 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1631 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1632 dev_vdbg(hsotg->dev, "control/bulk\n");
1633 nptxsts = DWC2_READ_4(hsotg, GNPTXSTS);
1634 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1635 dev_vdbg(hsotg->dev, "Disabling channel\n");
1636 hcchar &= ~HCCHAR_CHENA;
1637 }
1638 } else {
1639 if (dbg_perio())
1640 dev_vdbg(hsotg->dev, "isoc/intr\n");
1641 hptxsts = DWC2_READ_4(hsotg, HPTXSTS);
1642 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1643 hsotg->queuing_high_bandwidth) {
1644 if (dbg_perio())
1645 dev_vdbg(hsotg->dev, "Disabling channel\n");
1646 hcchar &= ~HCCHAR_CHENA;
1647 }
1648 }
1649 } else {
1650 if (dbg_hc(chan))
1651 dev_vdbg(hsotg->dev, "DMA enabled\n");
1652 }
1653
1654 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1655 chan->halt_status = halt_status;
1656
1657 if (hcchar & HCCHAR_CHENA) {
1658 if (dbg_hc(chan))
1659 dev_vdbg(hsotg->dev, "Channel enabled\n");
1660 chan->halt_pending = 1;
1661 chan->halt_on_queue = 0;
1662 } else {
1663 if (dbg_hc(chan))
1664 dev_vdbg(hsotg->dev, "Channel disabled\n");
1665 chan->halt_on_queue = 1;
1666 }
1667
1668 if (dbg_hc(chan)) {
1669 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1670 chan->hc_num);
1671 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1672 hcchar);
1673 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1674 chan->halt_pending);
1675 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1676 chan->halt_on_queue);
1677 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1678 chan->halt_status);
1679 }
1680 }
1681
1682 /**
1683 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1684 *
1685 * @hsotg: Programming view of DWC_otg controller
1686 * @chan: Identifies the host channel to clean up
1687 *
1688 * This function is normally called after a transfer is done and the host
1689 * channel is being released
1690 */
dwc2_hc_cleanup(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1691 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1692 {
1693 u32 hcintmsk;
1694
1695 chan->xfer_started = 0;
1696
1697 /*
1698 * Clear channel interrupt enables and any unhandled channel interrupt
1699 * conditions
1700 */
1701 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), 0);
1702 hcintmsk = 0xffffffff;
1703 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1704 DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), hcintmsk);
1705 }
1706
1707 /**
1708 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1709 * which frame a periodic transfer should occur
1710 *
1711 * @hsotg: Programming view of DWC_otg controller
1712 * @chan: Identifies the host channel to set up and its properties
1713 * @hcchar: Current value of the HCCHAR register for the specified host channel
1714 *
1715 * This function has no effect on non-periodic transfers
1716 */
dwc2_hc_set_even_odd_frame(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,u32 * hcchar)1717 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1718 struct dwc2_host_chan *chan, u32 *hcchar)
1719 {
1720 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1721 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1722 /* 1 if _next_ frame is odd, 0 if it's even */
1723 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1724 *hcchar |= HCCHAR_ODDFRM;
1725 }
1726 }
1727
dwc2_set_pid_isoc(struct dwc2_host_chan * chan)1728 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1729 {
1730 /* Set up the initial PID for the transfer */
1731 if (chan->speed == USB_SPEED_HIGH) {
1732 if (chan->ep_is_in) {
1733 if (chan->multi_count == 1)
1734 chan->data_pid_start = DWC2_HC_PID_DATA0;
1735 else if (chan->multi_count == 2)
1736 chan->data_pid_start = DWC2_HC_PID_DATA1;
1737 else
1738 chan->data_pid_start = DWC2_HC_PID_DATA2;
1739 } else {
1740 if (chan->multi_count == 1)
1741 chan->data_pid_start = DWC2_HC_PID_DATA0;
1742 else
1743 chan->data_pid_start = DWC2_HC_PID_MDATA;
1744 }
1745 } else {
1746 chan->data_pid_start = DWC2_HC_PID_DATA0;
1747 }
1748 }
1749
1750 /**
1751 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1752 * the Host Channel
1753 *
1754 * @hsotg: Programming view of DWC_otg controller
1755 * @chan: Information needed to initialize the host channel
1756 *
1757 * This function should only be called in Slave mode. For a channel associated
1758 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1759 * associated with a periodic EP, the periodic Tx FIFO is written.
1760 *
1761 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1762 * the number of bytes written to the Tx FIFO.
1763 */
dwc2_hc_write_packet(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1764 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1765 struct dwc2_host_chan *chan)
1766 {
1767 u32 i;
1768 u32 remaining_count;
1769 u32 byte_count;
1770 u32 dword_count;
1771 u32 *data_buf = (u32 *)chan->xfer_buf;
1772 u32 data_fifo;
1773
1774 if (dbg_hc(chan))
1775 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1776
1777 data_fifo = HCFIFO(chan->hc_num);
1778
1779 remaining_count = chan->xfer_len - chan->xfer_count;
1780 if (remaining_count > chan->max_packet)
1781 byte_count = chan->max_packet;
1782 else
1783 byte_count = remaining_count;
1784
1785 dword_count = (byte_count + 3) / 4;
1786
1787 if (((unsigned long)data_buf & 0x3) == 0) {
1788 /* xfer_buf is DWORD aligned */
1789 for (i = 0; i < dword_count; i++, data_buf++)
1790 DWC2_WRITE_4(hsotg, data_fifo, *data_buf);
1791 } else {
1792 /* xfer_buf is not DWORD aligned */
1793 for (i = 0; i < dword_count; i++, data_buf++) {
1794 u32 data = data_buf[0] | data_buf[1] << 8 |
1795 data_buf[2] << 16 | data_buf[3] << 24;
1796 DWC2_WRITE_4(hsotg, data_fifo, data);
1797 }
1798 }
1799
1800 chan->xfer_count += byte_count;
1801 chan->xfer_buf += byte_count;
1802 }
1803
1804 /**
1805 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1806 * channel and starts the transfer
1807 *
1808 * @hsotg: Programming view of DWC_otg controller
1809 * @chan: Information needed to initialize the host channel. The xfer_len value
1810 * may be reduced to accommodate the max widths of the XferSize and
1811 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1812 * changed to reflect the final xfer_len value.
1813 *
1814 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1815 * the caller must ensure that there is sufficient space in the request queue
1816 * and Tx Data FIFO.
1817 *
1818 * For an OUT transfer in Slave mode, it loads a data packet into the
1819 * appropriate FIFO. If necessary, additional data packets are loaded in the
1820 * Host ISR.
1821 *
1822 * For an IN transfer in Slave mode, a data packet is requested. The data
1823 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1824 * additional data packets are requested in the Host ISR.
1825 *
1826 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1827 * register along with a packet count of 1 and the channel is enabled. This
1828 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1829 * simply set to 0 since no data transfer occurs in this case.
1830 *
1831 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1832 * all the information required to perform the subsequent data transfer. In
1833 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1834 * controller performs the entire PING protocol, then starts the data
1835 * transfer.
1836 */
dwc2_hc_start_transfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1837 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1838 struct dwc2_host_chan *chan)
1839 {
1840 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1841 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1842 u32 hcchar;
1843 u32 hctsiz = 0;
1844 u16 num_packets;
1845 u32 ec_mc;
1846
1847 if (dbg_hc(chan))
1848 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1849
1850 if (chan->do_ping) {
1851 if (hsotg->core_params->dma_enable <= 0) {
1852 if (dbg_hc(chan))
1853 dev_vdbg(hsotg->dev, "ping, no DMA\n");
1854 dwc2_hc_do_ping(hsotg, chan);
1855 chan->xfer_started = 1;
1856 return;
1857 } else {
1858 if (dbg_hc(chan))
1859 dev_vdbg(hsotg->dev, "ping, DMA\n");
1860 hctsiz |= TSIZ_DOPNG;
1861 }
1862 }
1863
1864 if (chan->do_split) {
1865 if (dbg_hc(chan))
1866 dev_vdbg(hsotg->dev, "split\n");
1867 num_packets = 1;
1868
1869 if (chan->complete_split && !chan->ep_is_in)
1870 /*
1871 * For CSPLIT OUT Transfer, set the size to 0 so the
1872 * core doesn't expect any data written to the FIFO
1873 */
1874 chan->xfer_len = 0;
1875 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1876 chan->xfer_len = chan->max_packet;
1877 else if (!chan->ep_is_in && chan->xfer_len > 188)
1878 chan->xfer_len = 188;
1879
1880 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1881 TSIZ_XFERSIZE_MASK;
1882
1883 /* For split set ec_mc for immediate retries */
1884 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1885 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1886 ec_mc = 3;
1887 else
1888 ec_mc = 1;
1889 } else {
1890 if (dbg_hc(chan))
1891 dev_vdbg(hsotg->dev, "no split\n");
1892 /*
1893 * Ensure that the transfer length and packet count will fit
1894 * in the widths allocated for them in the HCTSIZn register
1895 */
1896 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1897 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1898 /*
1899 * Make sure the transfer size is no larger than one
1900 * (micro)frame's worth of data. (A check was done
1901 * when the periodic transfer was accepted to ensure
1902 * that a (micro)frame's worth of data can be
1903 * programmed into a channel.)
1904 */
1905 u32 max_periodic_len =
1906 chan->multi_count * chan->max_packet;
1907
1908 if (chan->xfer_len > max_periodic_len)
1909 chan->xfer_len = max_periodic_len;
1910 } else if (chan->xfer_len > max_hc_xfer_size) {
1911 /*
1912 * Make sure that xfer_len is a multiple of max packet
1913 * size
1914 */
1915 chan->xfer_len =
1916 max_hc_xfer_size - chan->max_packet + 1;
1917 }
1918
1919 if (chan->xfer_len > 0) {
1920 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1921 chan->max_packet;
1922 if (num_packets > max_hc_pkt_count) {
1923 num_packets = max_hc_pkt_count;
1924 chan->xfer_len = num_packets * chan->max_packet;
1925 }
1926 } else {
1927 /* Need 1 packet for transfer length of 0 */
1928 num_packets = 1;
1929 }
1930
1931 if (chan->ep_is_in)
1932 /*
1933 * Always program an integral # of max packets for IN
1934 * transfers
1935 */
1936 chan->xfer_len = num_packets * chan->max_packet;
1937
1938 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1939 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1940 /*
1941 * Make sure that the multi_count field matches the
1942 * actual transfer length
1943 */
1944 chan->multi_count = num_packets;
1945
1946 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1947 dwc2_set_pid_isoc(chan);
1948
1949 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1950 TSIZ_XFERSIZE_MASK;
1951
1952 /* The ec_mc gets the multi_count for non-split */
1953 ec_mc = chan->multi_count;
1954 }
1955
1956 chan->start_pkt_count = num_packets;
1957 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1958 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1959 TSIZ_SC_MC_PID_MASK;
1960 DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1961 if (dbg_hc(chan)) {
1962 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1963 hctsiz, chan->hc_num);
1964
1965 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1966 chan->hc_num);
1967 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1968 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1969 TSIZ_XFERSIZE_SHIFT);
1970 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1971 (hctsiz & TSIZ_PKTCNT_MASK) >>
1972 TSIZ_PKTCNT_SHIFT);
1973 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1974 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1975 TSIZ_SC_MC_PID_SHIFT);
1976 }
1977
1978 if (hsotg->core_params->dma_enable > 0) {
1979 dma_addr_t dma_addr;
1980
1981 if (chan->align_buf) {
1982 if (dbg_hc(chan))
1983 dev_vdbg(hsotg->dev, "align_buf\n");
1984 dma_addr = chan->align_buf;
1985 } else {
1986 dma_addr = chan->xfer_dma;
1987 }
1988 if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) {
1989 DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num),
1990 (u32)dma_addr);
1991 if (dbg_hc(chan))
1992 dev_vdbg(hsotg->dev,
1993 "Wrote %08lx to HCDMA(%d)\n",
1994 (unsigned long)dma_addr,
1995 chan->hc_num);
1996 } else {
1997 (void)(*hsotg->hsotg_sc->sc_set_dma_addr)(
1998 hsotg->dev, dma_addr, chan->hc_num);
1999 }
2000 }
2001
2002 /* Start the split */
2003 if (chan->do_split) {
2004 u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chan->hc_num));
2005
2006 hcsplt |= HCSPLT_SPLTENA;
2007 DWC2_WRITE_4(hsotg, HCSPLT(chan->hc_num), hcsplt);
2008 }
2009
2010 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
2011 hcchar &= ~HCCHAR_MULTICNT_MASK;
2012 hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
2013 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
2014
2015 if (hcchar & HCCHAR_CHDIS)
2016 dev_warn(hsotg->dev,
2017 "%s: chdis set, channel %d, hcchar 0x%08x\n",
2018 __func__, chan->hc_num, hcchar);
2019
2020 /* Set host channel enable after all other setup is complete */
2021 hcchar |= HCCHAR_CHENA;
2022 hcchar &= ~HCCHAR_CHDIS;
2023
2024 if (dbg_hc(chan))
2025 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
2026 (hcchar & HCCHAR_MULTICNT_MASK) >>
2027 HCCHAR_MULTICNT_SHIFT);
2028
2029 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
2030 if (dbg_hc(chan))
2031 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
2032 chan->hc_num);
2033
2034 chan->xfer_started = 1;
2035 chan->requests++;
2036
2037 if (hsotg->core_params->dma_enable <= 0 &&
2038 !chan->ep_is_in && chan->xfer_len > 0)
2039 /* Load OUT packet into the appropriate Tx FIFO */
2040 dwc2_hc_write_packet(hsotg, chan);
2041 }
2042
2043 /**
2044 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
2045 * host channel and starts the transfer in Descriptor DMA mode
2046 *
2047 * @hsotg: Programming view of DWC_otg controller
2048 * @chan: Information needed to initialize the host channel
2049 *
2050 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
2051 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
2052 * with micro-frame bitmap.
2053 *
2054 * Initializes HCDMA register with descriptor list address and CTD value then
2055 * starts the transfer via enabling the channel.
2056 */
dwc2_hc_start_transfer_ddma(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)2057 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
2058 struct dwc2_host_chan *chan)
2059 {
2060 u32 hcchar;
2061 u32 hctsiz = 0;
2062
2063 if (chan->do_ping)
2064 hctsiz |= TSIZ_DOPNG;
2065
2066 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
2067 dwc2_set_pid_isoc(chan);
2068
2069 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
2070 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
2071 TSIZ_SC_MC_PID_MASK;
2072
2073 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
2074 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
2075
2076 /* Non-zero only for high-speed interrupt endpoints */
2077 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
2078
2079 if (dbg_hc(chan)) {
2080 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2081 chan->hc_num);
2082 dev_vdbg(hsotg->dev, " Start PID: %d\n",
2083 chan->data_pid_start);
2084 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
2085 }
2086
2087 DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
2088
2089 usb_syncmem(&chan->desc_list_usbdma, 0, chan->desc_list_sz,
2090 BUS_DMASYNC_PREWRITE);
2091
2092 if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) {
2093 DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), chan->desc_list_addr);
2094 if (dbg_hc(chan))
2095 dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
2096 &chan->desc_list_addr, chan->hc_num);
2097 } else {
2098 (void)(*hsotg->hsotg_sc->sc_set_dma_addr)(
2099 hsotg->dev, chan->desc_list_addr, chan->hc_num);
2100 if (dbg_hc(chan))
2101 dev_vdbg(hsotg->dev, "Wrote %pad to ext dma(%d)\n",
2102 &chan->desc_list_addr, chan->hc_num);
2103 }
2104
2105 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
2106 hcchar &= ~HCCHAR_MULTICNT_MASK;
2107 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
2108 HCCHAR_MULTICNT_MASK;
2109
2110 if (hcchar & HCCHAR_CHDIS)
2111 dev_warn(hsotg->dev,
2112 "%s: chdis set, channel %d, hcchar 0x%08x\n",
2113 __func__, chan->hc_num, hcchar);
2114
2115 /* Set host channel enable after all other setup is complete */
2116 hcchar |= HCCHAR_CHENA;
2117 hcchar &= ~HCCHAR_CHDIS;
2118
2119 if (dbg_hc(chan))
2120 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
2121 (hcchar & HCCHAR_MULTICNT_MASK) >>
2122 HCCHAR_MULTICNT_SHIFT);
2123
2124 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
2125 if (dbg_hc(chan))
2126 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
2127 chan->hc_num);
2128
2129 chan->xfer_started = 1;
2130 chan->requests++;
2131 }
2132
2133 /**
2134 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
2135 * a previous call to dwc2_hc_start_transfer()
2136 *
2137 * @hsotg: Programming view of DWC_otg controller
2138 * @chan: Information needed to initialize the host channel
2139 *
2140 * The caller must ensure there is sufficient space in the request queue and Tx
2141 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
2142 * the controller acts autonomously to complete transfers programmed to a host
2143 * channel.
2144 *
2145 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
2146 * if there is any data remaining to be queued. For an IN transfer, another
2147 * data packet is always requested. For the SETUP phase of a control transfer,
2148 * this function does nothing.
2149 *
2150 * Return: 1 if a new request is queued, 0 if no more requests are required
2151 * for this transfer
2152 */
dwc2_hc_continue_transfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)2153 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
2154 struct dwc2_host_chan *chan)
2155 {
2156 if (dbg_hc(chan))
2157 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2158 chan->hc_num);
2159
2160 if (chan->do_split)
2161 /* SPLITs always queue just once per channel */
2162 return 0;
2163
2164 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
2165 /* SETUPs are queued only once since they can't be NAK'd */
2166 return 0;
2167
2168 if (chan->ep_is_in) {
2169 /*
2170 * Always queue another request for other IN transfers. If
2171 * back-to-back INs are issued and NAKs are received for both,
2172 * the driver may still be processing the first NAK when the
2173 * second NAK is received. When the interrupt handler clears
2174 * the NAK interrupt for the first NAK, the second NAK will
2175 * not be seen. So we can't depend on the NAK interrupt
2176 * handler to requeue a NAK'd request. Instead, IN requests
2177 * are issued each time this function is called. When the
2178 * transfer completes, the extra requests for the channel will
2179 * be flushed.
2180 */
2181 u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
2182
2183 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
2184 hcchar |= HCCHAR_CHENA;
2185 hcchar &= ~HCCHAR_CHDIS;
2186 if (dbg_hc(chan))
2187 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
2188 hcchar);
2189 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
2190 chan->requests++;
2191 return 1;
2192 }
2193
2194 /* OUT transfers */
2195
2196 if (chan->xfer_count < chan->xfer_len) {
2197 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2198 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
2199 u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
2200
2201 dwc2_hc_set_even_odd_frame(hsotg, chan,
2202 &hcchar);
2203 }
2204
2205 /* Load OUT packet into the appropriate Tx FIFO */
2206 dwc2_hc_write_packet(hsotg, chan);
2207 chan->requests++;
2208 return 1;
2209 }
2210
2211 return 0;
2212 }
2213
2214 /**
2215 * dwc2_hc_do_ping() - Starts a PING transfer
2216 *
2217 * @hsotg: Programming view of DWC_otg controller
2218 * @chan: Information needed to initialize the host channel
2219 *
2220 * This function should only be called in Slave mode. The Do Ping bit is set in
2221 * the HCTSIZ register, then the channel is enabled.
2222 */
dwc2_hc_do_ping(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)2223 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
2224 {
2225 u32 hcchar;
2226 u32 hctsiz;
2227
2228 if (dbg_hc(chan))
2229 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2230 chan->hc_num);
2231
2232
2233 hctsiz = TSIZ_DOPNG;
2234 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
2235 DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
2236
2237 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
2238 hcchar |= HCCHAR_CHENA;
2239 hcchar &= ~HCCHAR_CHDIS;
2240 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
2241 }
2242
2243 /**
2244 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
2245 * the HFIR register according to PHY type and speed
2246 *
2247 * @hsotg: Programming view of DWC_otg controller
2248 *
2249 * NOTE: The caller can modify the value of the HFIR register only after the
2250 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
2251 * has been set
2252 */
dwc2_calc_frame_interval(struct dwc2_hsotg * hsotg)2253 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
2254 {
2255 u32 usbcfg;
2256 u32 hprt0;
2257 int clock = 60; /* default value */
2258
2259 usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
2260 hprt0 = DWC2_READ_4(hsotg, HPRT0);
2261
2262 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
2263 !(usbcfg & GUSBCFG_PHYIF16))
2264 clock = 60;
2265 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
2266 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
2267 clock = 48;
2268 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2269 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2270 clock = 30;
2271 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2272 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
2273 clock = 60;
2274 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2275 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2276 clock = 48;
2277 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
2278 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
2279 clock = 48;
2280 if ((usbcfg & GUSBCFG_PHYSEL) &&
2281 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2282 clock = 48;
2283
2284 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
2285 /* High speed case */
2286 return 125 * clock;
2287 else
2288 /* FS/LS case */
2289 return 1000 * clock;
2290 }
2291
2292 /**
2293 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
2294 * buffer
2295 *
2296 * @core_if: Programming view of DWC_otg controller
2297 * @dest: Destination buffer for the packet
2298 * @bytes: Number of bytes to copy to the destination
2299 */
dwc2_read_packet(struct dwc2_hsotg * hsotg,u8 * dest,u16 bytes)2300 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
2301 {
2302 bus_size_t fifo = HCFIFO(0);
2303 u32 *data_buf = (u32 *)dest;
2304 int word_count = (bytes + 3) / 4;
2305 int i;
2306
2307 /*
2308 * Todo: Account for the case where dest is not dword aligned. This
2309 * requires reading data from the FIFO into a u32 temp buffer, then
2310 * moving it into the data buffer.
2311 */
2312
2313 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
2314
2315 for (i = 0; i < word_count; i++, data_buf++)
2316 *data_buf = DWC2_READ_4(hsotg, fifo);
2317 }
2318
2319 /**
2320 * dwc2_dump_host_registers() - Prints the host registers
2321 *
2322 * @hsotg: Programming view of DWC_otg controller
2323 *
2324 * NOTE: This function will be removed once the peripheral controller code
2325 * is integrated and the driver is stable
2326 */
dwc2_dump_host_registers(struct dwc2_hsotg * hsotg)2327 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
2328 {
2329 #ifdef DWC2_DEBUG
2330 bus_size_t addr;
2331 int i;
2332
2333 dev_dbg(hsotg->dev, "Host Global Registers\n");
2334 addr = HCFG;
2335 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
2336 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2337 addr = HFIR;
2338 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
2339 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2340 addr = HFNUM;
2341 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
2342 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2343 addr = HPTXSTS;
2344 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
2345 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2346 addr = HAINT;
2347 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
2348 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2349 addr = HAINTMSK;
2350 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
2351 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2352 if (hsotg->core_params->dma_desc_enable > 0) {
2353 addr = HFLBADDR;
2354 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
2355 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2356 }
2357
2358 addr = HPRT0;
2359 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
2360 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2361
2362 for (i = 0; i < hsotg->core_params->host_channels; i++) {
2363 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
2364 addr = HCCHAR(i);
2365 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
2366 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2367 addr = HCSPLT(i);
2368 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
2369 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2370 addr = HCINT(i);
2371 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
2372 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2373 addr = HCINTMSK(i);
2374 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
2375 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2376 addr = HCTSIZ(i);
2377 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
2378 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2379 addr = HCDMA(i);
2380 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
2381 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2382 if (hsotg->core_params->dma_desc_enable > 0) {
2383 addr = HCDMAB(i);
2384 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
2385 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2386 }
2387 }
2388 #endif
2389 }
2390
2391 /**
2392 * dwc2_dump_global_registers() - Prints the core global registers
2393 *
2394 * @hsotg: Programming view of DWC_otg controller
2395 *
2396 * NOTE: This function will be removed once the peripheral controller code
2397 * is integrated and the driver is stable
2398 */
dwc2_dump_global_registers(struct dwc2_hsotg * hsotg)2399 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
2400 {
2401 #ifdef DWC2_DEBUG
2402 bus_size_t addr;
2403
2404 dev_dbg(hsotg->dev, "Core Global Registers\n");
2405 addr = GOTGCTL;
2406 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
2407 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2408 addr = GOTGINT;
2409 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
2410 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2411 addr = GAHBCFG;
2412 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
2413 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2414 addr = GUSBCFG;
2415 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
2416 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2417 addr = GRSTCTL;
2418 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
2419 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2420 addr = GINTSTS;
2421 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
2422 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2423 addr = GINTMSK;
2424 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
2425 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2426 addr = GRXSTSR;
2427 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
2428 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2429 addr = GRXFSIZ;
2430 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
2431 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2432 addr = GNPTXFSIZ;
2433 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
2434 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2435 addr = GNPTXSTS;
2436 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
2437 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2438 addr = GI2CCTL;
2439 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
2440 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2441 addr = GPVNDCTL;
2442 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
2443 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2444 addr = GGPIO;
2445 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
2446 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2447 addr = GUID;
2448 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
2449 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2450 addr = GSNPSID;
2451 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
2452 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2453 addr = GHWCFG1;
2454 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
2455 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2456 addr = GHWCFG2;
2457 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
2458 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2459 addr = GHWCFG3;
2460 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
2461 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2462 addr = GHWCFG4;
2463 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
2464 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2465 addr = GLPMCFG;
2466 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
2467 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2468 addr = GPWRDN;
2469 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
2470 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2471 addr = GDFIFOCFG;
2472 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
2473 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2474 addr = HPTXFSIZ;
2475 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
2476 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2477
2478 addr = PCGCTL;
2479 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
2480 (unsigned long)addr, DWC2_READ_4(hsotg, addr));
2481 #endif
2482 }
2483
2484 /**
2485 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
2486 *
2487 * @hsotg: Programming view of DWC_otg controller
2488 * @num: Tx FIFO to flush
2489 */
dwc2_flush_tx_fifo(struct dwc2_hsotg * hsotg,const int num)2490 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
2491 {
2492 u32 greset;
2493 int count = 0;
2494
2495 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
2496
2497 greset = GRSTCTL_TXFFLSH;
2498 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
2499 DWC2_WRITE_4(hsotg, GRSTCTL, greset);
2500
2501 do {
2502 greset = DWC2_READ_4(hsotg, GRSTCTL);
2503 if (++count > 10000) {
2504 dev_warn(hsotg->dev,
2505 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
2506 __func__, greset,
2507 DWC2_READ_4(hsotg, GNPTXSTS));
2508 break;
2509 }
2510 udelay(1);
2511 } while (greset & GRSTCTL_TXFFLSH);
2512
2513 /* Wait for at least 3 PHY Clocks */
2514 udelay(1);
2515 }
2516
2517 /**
2518 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
2519 *
2520 * @hsotg: Programming view of DWC_otg controller
2521 */
dwc2_flush_rx_fifo(struct dwc2_hsotg * hsotg)2522 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
2523 {
2524 u32 greset;
2525 int count = 0;
2526
2527 dev_vdbg(hsotg->dev, "%s()\n", __func__);
2528
2529 greset = GRSTCTL_RXFFLSH;
2530 DWC2_WRITE_4(hsotg, GRSTCTL, greset);
2531
2532 do {
2533 greset = DWC2_READ_4(hsotg, GRSTCTL);
2534 if (++count > 10000) {
2535 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
2536 __func__, greset);
2537 break;
2538 }
2539 udelay(1);
2540 } while (greset & GRSTCTL_RXFFLSH);
2541
2542 /* Wait for at least 3 PHY Clocks */
2543 udelay(1);
2544 }
2545
2546 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
2547
2548 /* Parameter access functions */
dwc2_set_param_otg_cap(struct dwc2_hsotg * hsotg,int val)2549 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
2550 {
2551 int valid = 1;
2552
2553 switch (val) {
2554 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
2555 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
2556 valid = 0;
2557 break;
2558 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
2559 switch (hsotg->hw_params.op_mode) {
2560 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2561 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2562 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2563 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2564 break;
2565 default:
2566 valid = 0;
2567 break;
2568 }
2569 break;
2570 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
2571 /* always valid */
2572 break;
2573 default:
2574 valid = 0;
2575 break;
2576 }
2577
2578 if (!valid) {
2579 if (val >= 0)
2580 dev_err(hsotg->dev,
2581 "%d invalid for otg_cap parameter. Check HW configuration.\n",
2582 val);
2583 switch (hsotg->hw_params.op_mode) {
2584 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2585 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2586 break;
2587 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2588 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2589 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2590 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2591 break;
2592 default:
2593 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2594 break;
2595 }
2596 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2597 }
2598
2599 hsotg->core_params->otg_cap = val;
2600 }
2601
dwc2_set_param_dma_enable(struct dwc2_hsotg * hsotg,int val)2602 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2603 {
2604 int valid = 1;
2605
2606 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
2607 valid = 0;
2608 if (val < 0)
2609 valid = 0;
2610
2611 if (!valid) {
2612 if (val >= 0)
2613 dev_err(hsotg->dev,
2614 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2615 val);
2616 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
2617 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2618 }
2619
2620 hsotg->core_params->dma_enable = val;
2621 }
2622
dwc2_set_param_dma_desc_enable(struct dwc2_hsotg * hsotg,int val)2623 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2624 {
2625 int valid = 1;
2626
2627 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2628 !hsotg->hw_params.dma_desc_enable))
2629 valid = 0;
2630 if (val < 0)
2631 valid = 0;
2632
2633 if (!valid) {
2634 if (val >= 0)
2635 dev_err(hsotg->dev,
2636 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2637 val);
2638 val = (hsotg->core_params->dma_enable > 0 &&
2639 hsotg->hw_params.dma_desc_enable);
2640 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2641 }
2642
2643 hsotg->core_params->dma_desc_enable = val;
2644 }
2645
dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg * hsotg,int val)2646 void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
2647 {
2648 int valid = 1;
2649
2650 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2651 !hsotg->hw_params.dma_desc_enable))
2652 valid = 0;
2653 if (val < 0)
2654 valid = 0;
2655
2656 if (!valid) {
2657 if (val >= 0)
2658 dev_err(hsotg->dev,
2659 "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
2660 val);
2661 val = (hsotg->core_params->dma_enable > 0 &&
2662 hsotg->hw_params.dma_desc_enable);
2663 }
2664
2665 hsotg->core_params->dma_desc_fs_enable = val;
2666 dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
2667 }
2668
dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg * hsotg,int val)2669 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2670 int val)
2671 {
2672 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2673 if (val >= 0) {
2674 dev_err(hsotg->dev,
2675 "Wrong value for host_support_fs_low_power\n");
2676 dev_err(hsotg->dev,
2677 "host_support_fs_low_power must be 0 or 1\n");
2678 }
2679 val = 0;
2680 dev_dbg(hsotg->dev,
2681 "Setting host_support_fs_low_power to %d\n", val);
2682 }
2683
2684 hsotg->core_params->host_support_fs_ls_low_power = val;
2685 }
2686
dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg * hsotg,int val)2687 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2688 {
2689 int valid = 1;
2690
2691 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
2692 valid = 0;
2693 if (val < 0)
2694 valid = 0;
2695
2696 if (!valid) {
2697 if (val >= 0)
2698 dev_err(hsotg->dev,
2699 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2700 val);
2701 val = hsotg->hw_params.enable_dynamic_fifo;
2702 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2703 }
2704
2705 hsotg->core_params->enable_dynamic_fifo = val;
2706 }
2707
dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg * hsotg,int val)2708 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2709 {
2710 int valid = 1;
2711
2712 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
2713 valid = 0;
2714
2715 if (!valid) {
2716 if (val >= 0)
2717 dev_err(hsotg->dev,
2718 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2719 val);
2720 val = hsotg->hw_params.host_rx_fifo_size;
2721 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2722 }
2723
2724 hsotg->core_params->host_rx_fifo_size = val;
2725 }
2726
dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg * hsotg,int val)2727 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2728 {
2729 int valid = 1;
2730
2731 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
2732 valid = 0;
2733
2734 if (!valid) {
2735 if (val >= 0)
2736 dev_err(hsotg->dev,
2737 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2738 val);
2739 val = hsotg->hw_params.host_nperio_tx_fifo_size;
2740 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2741 val);
2742 }
2743
2744 hsotg->core_params->host_nperio_tx_fifo_size = val;
2745 }
2746
dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg * hsotg,int val)2747 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2748 {
2749 int valid = 1;
2750
2751 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
2752 valid = 0;
2753
2754 if (!valid) {
2755 if (val >= 0)
2756 dev_err(hsotg->dev,
2757 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2758 val);
2759 val = hsotg->hw_params.host_perio_tx_fifo_size;
2760 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2761 val);
2762 }
2763
2764 hsotg->core_params->host_perio_tx_fifo_size = val;
2765 }
2766
dwc2_set_param_max_transfer_size(struct dwc2_hsotg * hsotg,int val)2767 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2768 {
2769 int valid = 1;
2770
2771 if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
2772 valid = 0;
2773
2774 if (!valid) {
2775 if (val >= 0)
2776 dev_err(hsotg->dev,
2777 "%d invalid for max_transfer_size. Check HW configuration.\n",
2778 val);
2779 val = hsotg->hw_params.max_transfer_size;
2780 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2781 }
2782
2783 hsotg->core_params->max_transfer_size = val;
2784 }
2785
dwc2_set_param_max_packet_count(struct dwc2_hsotg * hsotg,int val)2786 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2787 {
2788 int valid = 1;
2789
2790 if (val < 15 || val > hsotg->hw_params.max_packet_count)
2791 valid = 0;
2792
2793 if (!valid) {
2794 if (val >= 0)
2795 dev_err(hsotg->dev,
2796 "%d invalid for max_packet_count. Check HW configuration.\n",
2797 val);
2798 val = hsotg->hw_params.max_packet_count;
2799 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2800 }
2801
2802 hsotg->core_params->max_packet_count = val;
2803 }
2804
dwc2_set_param_host_channels(struct dwc2_hsotg * hsotg,int val)2805 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2806 {
2807 int valid = 1;
2808
2809 if (val < 1 || val > hsotg->hw_params.host_channels)
2810 valid = 0;
2811
2812 if (!valid) {
2813 if (val >= 0)
2814 dev_err(hsotg->dev,
2815 "%d invalid for host_channels. Check HW configuration.\n",
2816 val);
2817 val = hsotg->hw_params.host_channels;
2818 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2819 }
2820
2821 hsotg->core_params->host_channels = val;
2822 }
2823
dwc2_set_param_phy_type(struct dwc2_hsotg * hsotg,int val)2824 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2825 {
2826 int valid = 0;
2827 u32 hs_phy_type, fs_phy_type;
2828
2829 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2830 DWC2_PHY_TYPE_PARAM_ULPI)) {
2831 if (val >= 0) {
2832 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2833 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2834 }
2835
2836 valid = 0;
2837 }
2838
2839 hs_phy_type = hsotg->hw_params.hs_phy_type;
2840 fs_phy_type = hsotg->hw_params.fs_phy_type;
2841 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2842 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2843 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2844 valid = 1;
2845 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2846 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2847 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2848 valid = 1;
2849 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2850 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2851 valid = 1;
2852
2853 if (!valid) {
2854 if (val >= 0)
2855 dev_err(hsotg->dev,
2856 "%d invalid for phy_type. Check HW configuration.\n",
2857 val);
2858 val = DWC2_PHY_TYPE_PARAM_FS;
2859 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2860 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2861 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2862 val = DWC2_PHY_TYPE_PARAM_UTMI;
2863 else
2864 val = DWC2_PHY_TYPE_PARAM_ULPI;
2865 }
2866 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2867 }
2868
2869 hsotg->core_params->phy_type = val;
2870 }
2871
dwc2_get_param_phy_type(struct dwc2_hsotg * hsotg)2872 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2873 {
2874 return hsotg->core_params->phy_type;
2875 }
2876
dwc2_set_param_speed(struct dwc2_hsotg * hsotg,int val)2877 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2878 {
2879 int valid = 1;
2880
2881 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2882 if (val >= 0) {
2883 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2884 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2885 }
2886 valid = 0;
2887 }
2888
2889 if (val == DWC2_SPEED_PARAM_HIGH &&
2890 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2891 valid = 0;
2892
2893 if (!valid) {
2894 if (val >= 0)
2895 dev_err(hsotg->dev,
2896 "%d invalid for speed parameter. Check HW configuration.\n",
2897 val);
2898 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2899 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2900 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2901 }
2902
2903 hsotg->core_params->speed = val;
2904 }
2905
dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg * hsotg,int val)2906 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2907 {
2908 int valid = 1;
2909
2910 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2911 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2912 if (val >= 0) {
2913 dev_err(hsotg->dev,
2914 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2915 dev_err(hsotg->dev,
2916 "host_ls_low_power_phy_clk must be 0 or 1\n");
2917 }
2918 valid = 0;
2919 }
2920
2921 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2922 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2923 valid = 0;
2924
2925 if (!valid) {
2926 if (val >= 0)
2927 dev_err(hsotg->dev,
2928 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2929 val);
2930 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2931 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2932 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2933 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2934 val);
2935 }
2936
2937 hsotg->core_params->host_ls_low_power_phy_clk = val;
2938 }
2939
dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg * hsotg,int val)2940 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2941 {
2942 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2943 if (val >= 0) {
2944 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2945 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2946 }
2947 val = 0;
2948 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2949 }
2950
2951 hsotg->core_params->phy_ulpi_ddr = val;
2952 }
2953
dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg * hsotg,int val)2954 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2955 {
2956 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2957 if (val >= 0) {
2958 dev_err(hsotg->dev,
2959 "Wrong value for phy_ulpi_ext_vbus\n");
2960 dev_err(hsotg->dev,
2961 "phy_ulpi_ext_vbus must be 0 or 1\n");
2962 }
2963 val = 0;
2964 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2965 }
2966
2967 hsotg->core_params->phy_ulpi_ext_vbus = val;
2968 }
2969
dwc2_set_param_phy_utmi_width(struct dwc2_hsotg * hsotg,int val)2970 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2971 {
2972 int valid = 0;
2973
2974 switch (hsotg->hw_params.utmi_phy_data_width) {
2975 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2976 valid = (val == 8);
2977 break;
2978 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2979 valid = (val == 16);
2980 break;
2981 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2982 valid = (val == 8 || val == 16);
2983 break;
2984 }
2985
2986 if (!valid) {
2987 if (val >= 0) {
2988 dev_err(hsotg->dev,
2989 "%d invalid for phy_utmi_width. Check HW configuration.\n",
2990 val);
2991 }
2992 val = (hsotg->hw_params.utmi_phy_data_width ==
2993 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
2994 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2995 }
2996
2997 hsotg->core_params->phy_utmi_width = val;
2998 }
2999
dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg * hsotg,int val)3000 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
3001 {
3002 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3003 if (val >= 0) {
3004 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
3005 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
3006 }
3007 val = 0;
3008 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
3009 }
3010
3011 hsotg->core_params->ulpi_fs_ls = val;
3012 }
3013
dwc2_set_param_ts_dline(struct dwc2_hsotg * hsotg,int val)3014 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
3015 {
3016 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3017 if (val >= 0) {
3018 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
3019 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
3020 }
3021 val = 0;
3022 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
3023 }
3024
3025 hsotg->core_params->ts_dline = val;
3026 }
3027
dwc2_set_param_i2c_enable(struct dwc2_hsotg * hsotg,int val)3028 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
3029 {
3030 int valid = 1;
3031
3032 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3033 if (val >= 0) {
3034 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
3035 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
3036 }
3037
3038 valid = 0;
3039 }
3040
3041 if (val == 1 && !(hsotg->hw_params.i2c_enable))
3042 valid = 0;
3043
3044 if (!valid) {
3045 if (val >= 0)
3046 dev_err(hsotg->dev,
3047 "%d invalid for i2c_enable. Check HW configuration.\n",
3048 val);
3049 val = hsotg->hw_params.i2c_enable;
3050 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
3051 }
3052
3053 hsotg->core_params->i2c_enable = val;
3054 }
3055
dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg * hsotg,int val)3056 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
3057 {
3058 int valid = 1;
3059
3060 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3061 if (val >= 0) {
3062 dev_err(hsotg->dev,
3063 "Wrong value for en_multiple_tx_fifo,\n");
3064 dev_err(hsotg->dev,
3065 "en_multiple_tx_fifo must be 0 or 1\n");
3066 }
3067 valid = 0;
3068 }
3069
3070 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
3071 valid = 0;
3072
3073 if (!valid) {
3074 if (val >= 0)
3075 dev_err(hsotg->dev,
3076 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
3077 val);
3078 val = hsotg->hw_params.en_multiple_tx_fifo;
3079 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
3080 }
3081
3082 hsotg->core_params->en_multiple_tx_fifo = val;
3083 }
3084
dwc2_set_param_reload_ctl(struct dwc2_hsotg * hsotg,int val)3085 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
3086 {
3087 int valid = 1;
3088
3089 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3090 if (val >= 0) {
3091 dev_err(hsotg->dev,
3092 "'%d' invalid for parameter reload_ctl\n", val);
3093 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
3094 }
3095 valid = 0;
3096 }
3097
3098 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
3099 valid = 0;
3100
3101 if (!valid) {
3102 if (val >= 0)
3103 dev_err(hsotg->dev,
3104 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
3105 val);
3106 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
3107 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
3108 }
3109
3110 hsotg->core_params->reload_ctl = val;
3111 }
3112
dwc2_set_param_ahbcfg(struct dwc2_hsotg * hsotg,int val)3113 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
3114 {
3115 if (val != -1)
3116 hsotg->core_params->ahbcfg = val;
3117 else
3118 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
3119 GAHBCFG_HBSTLEN_SHIFT;
3120 }
3121
dwc2_set_param_otg_ver(struct dwc2_hsotg * hsotg,int val)3122 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
3123 {
3124 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3125 if (val >= 0) {
3126 dev_err(hsotg->dev,
3127 "'%d' invalid for parameter otg_ver\n", val);
3128 dev_err(hsotg->dev,
3129 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
3130 }
3131 val = 0;
3132 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
3133 }
3134
3135 hsotg->core_params->otg_ver = val;
3136 }
3137
dwc2_set_param_uframe_sched(struct dwc2_hsotg * hsotg,int val)3138 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
3139 {
3140 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3141 if (val >= 0) {
3142 dev_err(hsotg->dev,
3143 "'%d' invalid for parameter uframe_sched\n",
3144 val);
3145 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
3146 }
3147 val = 1;
3148 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
3149 }
3150
3151 hsotg->core_params->uframe_sched = val;
3152 }
3153
dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg * hsotg,int val)3154 static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
3155 int val)
3156 {
3157 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3158 if (val >= 0) {
3159 dev_err(hsotg->dev,
3160 "'%d' invalid for parameter external_id_pin_ctl\n",
3161 val);
3162 dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
3163 }
3164 val = 0;
3165 dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
3166 }
3167
3168 hsotg->core_params->external_id_pin_ctl = val;
3169 }
3170
dwc2_set_param_hibernation(struct dwc2_hsotg * hsotg,int val)3171 static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
3172 int val)
3173 {
3174 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3175 if (val >= 0) {
3176 dev_err(hsotg->dev,
3177 "'%d' invalid for parameter hibernation\n",
3178 val);
3179 dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
3180 }
3181 val = 0;
3182 dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
3183 }
3184
3185 hsotg->core_params->hibernation = val;
3186 }
3187
3188 /*
3189 * This function is called during module intialization to pass module parameters
3190 * for the DWC_otg core.
3191 */
dwc2_set_parameters(struct dwc2_hsotg * hsotg,const struct dwc2_core_params * params)3192 void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
3193 const struct dwc2_core_params *params)
3194 {
3195 dev_dbg(hsotg->dev, "%s()\n", __func__);
3196
3197 dwc2_set_param_otg_cap(hsotg, params->otg_cap);
3198 dwc2_set_param_dma_enable(hsotg, params->dma_enable);
3199 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
3200 dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
3201 dwc2_set_param_host_support_fs_ls_low_power(hsotg,
3202 params->host_support_fs_ls_low_power);
3203 dwc2_set_param_enable_dynamic_fifo(hsotg,
3204 params->enable_dynamic_fifo);
3205 dwc2_set_param_host_rx_fifo_size(hsotg,
3206 params->host_rx_fifo_size);
3207 dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
3208 params->host_nperio_tx_fifo_size);
3209 dwc2_set_param_host_perio_tx_fifo_size(hsotg,
3210 params->host_perio_tx_fifo_size);
3211 dwc2_set_param_max_transfer_size(hsotg,
3212 params->max_transfer_size);
3213 dwc2_set_param_max_packet_count(hsotg,
3214 params->max_packet_count);
3215 dwc2_set_param_host_channels(hsotg, params->host_channels);
3216 dwc2_set_param_phy_type(hsotg, params->phy_type);
3217 dwc2_set_param_speed(hsotg, params->speed);
3218 dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
3219 params->host_ls_low_power_phy_clk);
3220 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
3221 dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
3222 params->phy_ulpi_ext_vbus);
3223 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
3224 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
3225 dwc2_set_param_ts_dline(hsotg, params->ts_dline);
3226 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
3227 dwc2_set_param_en_multiple_tx_fifo(hsotg,
3228 params->en_multiple_tx_fifo);
3229 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
3230 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
3231 dwc2_set_param_otg_ver(hsotg, params->otg_ver);
3232 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
3233 dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
3234 dwc2_set_param_hibernation(hsotg, params->hibernation);
3235 }
3236
3237 /*
3238 * Forces either host or device mode if the controller is not
3239 * currently in that mode.
3240 *
3241 * Returns true if the mode was forced.
3242 */
dwc2_force_mode_if_needed(struct dwc2_hsotg * hsotg,bool host)3243 static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
3244 {
3245 if (host && dwc2_is_host_mode(hsotg))
3246 return false;
3247 else if (!host && dwc2_is_device_mode(hsotg))
3248 return false;
3249
3250 return dwc2_force_mode(hsotg, host);
3251 }
3252
3253 /*
3254 * Gets host hardware parameters. Forces host mode if not currently in
3255 * host mode. Should be called immediately after a core soft reset in
3256 * order to get the reset values.
3257 */
dwc2_get_host_hwparams(struct dwc2_hsotg * hsotg)3258 static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
3259 {
3260 struct dwc2_hw_params *hw = &hsotg->hw_params;
3261 u32 gnptxfsiz;
3262 u32 hptxfsiz;
3263 bool forced;
3264
3265 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
3266 return;
3267
3268 forced = dwc2_force_mode_if_needed(hsotg, true);
3269
3270 gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
3271 hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ);
3272 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
3273 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
3274
3275 if (forced)
3276 dwc2_clear_force_mode(hsotg);
3277
3278 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3279 FIFOSIZE_DEPTH_SHIFT;
3280 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3281 FIFOSIZE_DEPTH_SHIFT;
3282 }
3283
3284 /*
3285 * Gets device hardware parameters. Forces device mode if not
3286 * currently in device mode. Should be called immediately after a core
3287 * soft reset in order to get the reset values.
3288 */
dwc2_get_dev_hwparams(struct dwc2_hsotg * hsotg)3289 static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
3290 {
3291 struct dwc2_hw_params *hw = &hsotg->hw_params;
3292 bool forced;
3293 u32 gnptxfsiz;
3294
3295 if (hsotg->dr_mode == USB_DR_MODE_HOST)
3296 return;
3297
3298 forced = dwc2_force_mode_if_needed(hsotg, false);
3299
3300 gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
3301 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
3302
3303 if (forced)
3304 dwc2_clear_force_mode(hsotg);
3305
3306 hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3307 FIFOSIZE_DEPTH_SHIFT;
3308 }
3309
3310 /**
3311 * During device initialization, read various hardware configuration
3312 * registers and interpret the contents.
3313 */
dwc2_get_hwparams(struct dwc2_hsotg * hsotg)3314 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3315 {
3316 struct dwc2_hw_params *hw = &hsotg->hw_params;
3317 unsigned width;
3318 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
3319 u32 grxfsiz;
3320
3321 /*
3322 * Attempt to ensure this device is really a DWC_otg Controller.
3323 * Read and verify the GSNPSID register contents. The value should be
3324 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
3325 * as in "OTG version 2.xx" or "OTG version 3.xx".
3326 */
3327 hw->snpsid = DWC2_READ_4(hsotg, GSNPSID);
3328 if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
3329 (hw->snpsid & 0xfffff000) != 0x4f543000) {
3330 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
3331 hw->snpsid);
3332 return -ENODEV;
3333 }
3334
3335 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
3336 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
3337 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
3338
3339 hwcfg1 = DWC2_READ_4(hsotg, GHWCFG1);
3340 hwcfg2 = DWC2_READ_4(hsotg, GHWCFG2);
3341 hwcfg3 = DWC2_READ_4(hsotg, GHWCFG3);
3342 hwcfg4 = DWC2_READ_4(hsotg, GHWCFG4);
3343 grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
3344
3345 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
3346 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
3347 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
3348 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
3349 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
3350
3351 /*
3352 * Host specific hardware parameters. Reading these parameters
3353 * requires the controller to be in host mode. The mode will
3354 * be forced, if necessary, to read these values.
3355 */
3356 dwc2_get_host_hwparams(hsotg);
3357 dwc2_get_dev_hwparams(hsotg);
3358
3359 /* hwcfg1 */
3360 hw->dev_ep_dirs = hwcfg1;
3361
3362 /* hwcfg2 */
3363 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
3364 GHWCFG2_OP_MODE_SHIFT;
3365 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
3366 GHWCFG2_ARCHITECTURE_SHIFT;
3367 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
3368 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
3369 GHWCFG2_NUM_HOST_CHAN_SHIFT);
3370 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
3371 GHWCFG2_HS_PHY_TYPE_SHIFT;
3372 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
3373 GHWCFG2_FS_PHY_TYPE_SHIFT;
3374 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
3375 GHWCFG2_NUM_DEV_EP_SHIFT;
3376 hw->nperio_tx_q_depth =
3377 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
3378 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
3379 hw->host_perio_tx_q_depth =
3380 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
3381 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
3382 hw->dev_token_q_depth =
3383 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
3384 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
3385
3386 /* hwcfg3 */
3387 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
3388 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
3389 hw->max_transfer_size = (1 << (width + 11)) - 1;
3390 /*
3391 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
3392 * coherent buffers with this size, and if it's too large we can
3393 * exhaust the coherent DMA pool.
3394 */
3395 if (hw->max_transfer_size > 65535)
3396 hw->max_transfer_size = 65535;
3397 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
3398 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
3399 hw->max_packet_count = (1 << (width + 4)) - 1;
3400 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
3401 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
3402 GHWCFG3_DFIFO_DEPTH_SHIFT;
3403
3404 /* hwcfg4 */
3405 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
3406 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
3407 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
3408 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
3409 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
3410 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
3411 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
3412
3413 /* fifo sizes */
3414 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
3415 GRXFSIZ_DEPTH_SHIFT;
3416
3417 dev_dbg(hsotg->dev, "Detected values from hardware:\n");
3418 dev_dbg(hsotg->dev, " op_mode=%d\n",
3419 hw->op_mode);
3420 dev_dbg(hsotg->dev, " arch=%d\n",
3421 hw->arch);
3422 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
3423 hw->dma_desc_enable);
3424 dev_dbg(hsotg->dev, " power_optimized=%d\n",
3425 hw->power_optimized);
3426 dev_dbg(hsotg->dev, " i2c_enable=%d\n",
3427 hw->i2c_enable);
3428 dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
3429 hw->hs_phy_type);
3430 dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
3431 hw->fs_phy_type);
3432 dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
3433 hw->utmi_phy_data_width);
3434 dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
3435 hw->num_dev_ep);
3436 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
3437 hw->num_dev_perio_in_ep);
3438 dev_dbg(hsotg->dev, " host_channels=%d\n",
3439 hw->host_channels);
3440 dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
3441 hw->max_transfer_size);
3442 dev_dbg(hsotg->dev, " max_packet_count=%d\n",
3443 hw->max_packet_count);
3444 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
3445 hw->nperio_tx_q_depth);
3446 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
3447 hw->host_perio_tx_q_depth);
3448 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
3449 hw->dev_token_q_depth);
3450 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
3451 hw->enable_dynamic_fifo);
3452 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
3453 hw->en_multiple_tx_fifo);
3454 dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
3455 hw->total_fifo_size);
3456 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
3457 hw->host_rx_fifo_size);
3458 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
3459 hw->host_nperio_tx_fifo_size);
3460 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
3461 hw->host_perio_tx_fifo_size);
3462 dev_dbg(hsotg->dev, "\n");
3463
3464 return 0;
3465 }
3466
3467 /*
3468 * Sets all parameters to the given value.
3469 *
3470 * Assumes that the dwc2_core_params struct contains only integers.
3471 */
dwc2_set_all_params(struct dwc2_core_params * params,int value)3472 void dwc2_set_all_params(struct dwc2_core_params *params, int value)
3473 {
3474 int *p = (int *)params;
3475 size_t size = sizeof(*params) / sizeof(*p);
3476 int i;
3477
3478 for (i = 0; i < size; i++)
3479 p[i] = value;
3480 }
3481
3482
dwc2_get_otg_version(struct dwc2_hsotg * hsotg)3483 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
3484 {
3485 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
3486 }
3487
dwc2_is_controller_alive(struct dwc2_hsotg * hsotg)3488 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
3489 {
3490 if (DWC2_READ_4(hsotg, GSNPSID) == 0xffffffff)
3491 return false;
3492 else
3493 return true;
3494 }
3495
3496 /**
3497 * dwc2_enable_global_interrupts() - Enables the controller's Global
3498 * Interrupt in the AHB Config register
3499 *
3500 * @hsotg: Programming view of DWC_otg controller
3501 */
dwc2_enable_global_interrupts(struct dwc2_hsotg * hsotg)3502 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
3503 {
3504 u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
3505
3506 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
3507 DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
3508 }
3509
3510 /**
3511 * dwc2_disable_global_interrupts() - Disables the controller's Global
3512 * Interrupt in the AHB Config register
3513 *
3514 * @hsotg: Programming view of DWC_otg controller
3515 */
dwc2_disable_global_interrupts(struct dwc2_hsotg * hsotg)3516 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
3517 {
3518 u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
3519
3520 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
3521 DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
3522 }
3523
3524 /* Returns the controller's GHWCFG2.OTG_MODE. */
dwc2_op_mode(struct dwc2_hsotg * hsotg)3525 unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg)
3526 {
3527 u32 ghwcfg2 = DWC2_READ_4(hsotg, GHWCFG2);
3528
3529 return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >>
3530 GHWCFG2_OP_MODE_SHIFT;
3531 }
3532
3533 /* Returns true if the controller is capable of DRD. */
dwc2_hw_is_otg(struct dwc2_hsotg * hsotg)3534 bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg)
3535 {
3536 unsigned op_mode = dwc2_op_mode(hsotg);
3537
3538 return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) ||
3539 (op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) ||
3540 (op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE);
3541 }
3542
3543 /* Returns true if the controller is host-only. */
dwc2_hw_is_host(struct dwc2_hsotg * hsotg)3544 bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg)
3545 {
3546 unsigned op_mode = dwc2_op_mode(hsotg);
3547
3548 return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) ||
3549 (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST);
3550 }
3551
3552 /* Returns true if the controller is device-only. */
dwc2_hw_is_device(struct dwc2_hsotg * hsotg)3553 bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg)
3554 {
3555 unsigned op_mode = dwc2_op_mode(hsotg);
3556
3557 return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
3558 (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE);
3559 }
3560