1 /* $OpenBSD: dwc2_hcd.c,v 1.29 2022/09/18 21:12:19 mglocker Exp $ */
2 /* $NetBSD: dwc2_hcd.c,v 1.15 2014/11/24 10:14:14 skrll Exp $ */
3
4 /*
5 * hcd.c - DesignWare HS OTG Controller host-mode routines
6 *
7 * Copyright (C) 2004-2013 Synopsys, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation; either version 2 of the License, or (at your option) any
25 * later version.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * This file contains the core HCD code, and implements the Linux hc_driver
42 * API
43 */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/signal.h>
48 #include <sys/proc.h>
49 #include <sys/pool.h>
50 #include <sys/task.h>
51
52 #include <machine/bus.h>
53
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usbdi.h>
56 #include <dev/usb/usbdivar.h>
57 #include <dev/usb/usb_mem.h>
58
59 #include <dev/usb/dwc2/dwc2.h>
60 #include <dev/usb/dwc2/dwc2var.h>
61
62 #include <dev/usb/dwc2/dwc2_core.h>
63 #include <dev/usb/dwc2/dwc2_hcd.h>
64
65 /*
66 * =========================================================================
67 * Host Core Layer Functions
68 * =========================================================================
69 */
70
71 /**
72 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
73 * used in both device and host modes
74 *
75 * @hsotg: Programming view of the DWC_otg controller
76 */
dwc2_enable_common_interrupts(struct dwc2_hsotg * hsotg)77 STATIC void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
78 {
79 u32 intmsk;
80
81 /* Clear any pending OTG Interrupts */
82 dwc2_writel(hsotg, 0xffffffff, GOTGINT);
83
84 /* Clear any pending interrupts */
85 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
86
87 /* Enable the interrupts in the GINTMSK */
88 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
89
90 if (!hsotg->params.host_dma)
91 intmsk |= GINTSTS_RXFLVL;
92 if (!hsotg->params.external_id_pin_ctl)
93 intmsk |= GINTSTS_CONIDSTSCHNG;
94
95 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
96 GINTSTS_SESSREQINT;
97
98 if (dwc2_is_device_mode(hsotg) && hsotg->params.lpm)
99 intmsk |= GINTSTS_LPMTRANRCVD;
100
101 dwc2_writel(hsotg, intmsk, GINTMSK);
102 }
103
dwc2_gahbcfg_init(struct dwc2_hsotg * hsotg)104 STATIC int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
105 {
106 struct dwc2_softc *sc = hsotg->hsotg_sc;
107 u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
108
109 switch (hsotg->hw_params.arch) {
110 case GHWCFG2_EXT_DMA_ARCH:
111 dev_dbg(hsotg->dev, "External DMA Mode\n");
112 if (!sc->sc_set_dma_addr) {
113 dev_err(hsotg->dev,
114 "External DMA Mode not supported\n");
115 return -EINVAL;
116 }
117 if (hsotg->params.ahbcfg != -1) {
118 ahbcfg &= GAHBCFG_CTRL_MASK;
119 ahbcfg |= hsotg->params.ahbcfg &
120 ~GAHBCFG_CTRL_MASK;
121 }
122 break;
123
124 case GHWCFG2_INT_DMA_ARCH:
125 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
126 if (hsotg->params.ahbcfg != -1) {
127 ahbcfg &= GAHBCFG_CTRL_MASK;
128 ahbcfg |= hsotg->params.ahbcfg &
129 ~GAHBCFG_CTRL_MASK;
130 }
131 break;
132
133 case GHWCFG2_SLAVE_ONLY_ARCH:
134 default:
135 dev_dbg(hsotg->dev, "Slave Only Mode\n");
136 break;
137 }
138
139 if (hsotg->params.host_dma)
140 ahbcfg |= GAHBCFG_DMA_EN;
141 else
142 hsotg->params.dma_desc_enable = false;
143
144 dwc2_writel(hsotg, ahbcfg, GAHBCFG);
145
146 return 0;
147 }
148
dwc2_gusbcfg_init(struct dwc2_hsotg * hsotg)149 STATIC void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
150 {
151 u32 usbcfg;
152
153 usbcfg = dwc2_readl(hsotg, GUSBCFG);
154 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
155
156 switch (hsotg->hw_params.op_mode) {
157 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
158 if (hsotg->params.otg_caps.hnp_support &&
159 hsotg->params.otg_caps.srp_support)
160 usbcfg |= GUSBCFG_HNPCAP;
161 // fallthrough;
162
163 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
164 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
165 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
166 if (hsotg->params.otg_caps.srp_support)
167 usbcfg |= GUSBCFG_SRPCAP;
168 break;
169
170 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
171 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
172 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
173 default:
174 break;
175 }
176
177 dwc2_writel(hsotg, usbcfg, GUSBCFG);
178 }
179
dwc2_vbus_supply_init(struct dwc2_hsotg * hsotg)180 static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
181 {
182 #if 0
183 if (hsotg->vbus_supply)
184 return regulator_enable(hsotg->vbus_supply);
185 #endif
186
187 return 0;
188 }
189
dwc2_vbus_supply_exit(struct dwc2_hsotg * hsotg)190 static int dwc2_vbus_supply_exit(struct dwc2_hsotg *hsotg)
191 {
192 #if 0
193 if (hsotg->vbus_supply)
194 return regulator_disable(hsotg->vbus_supply);
195 #endif
196
197 return 0;
198 }
199
200 /**
201 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
202 *
203 * @hsotg: Programming view of DWC_otg controller
204 */
dwc2_enable_host_interrupts(struct dwc2_hsotg * hsotg)205 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
206 {
207 u32 intmsk;
208
209 dev_dbg(hsotg->dev, "%s()\n", __func__);
210
211 /* Disable all interrupts */
212 dwc2_writel(hsotg, 0, GINTMSK);
213 dwc2_writel(hsotg, 0, HAINTMSK);
214
215 /* Enable the common interrupts */
216 dwc2_enable_common_interrupts(hsotg);
217
218 /* Enable host mode interrupts without disturbing common interrupts */
219 intmsk = dwc2_readl(hsotg, GINTMSK);
220 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
221 dwc2_writel(hsotg, intmsk, GINTMSK);
222 }
223
224 /**
225 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
226 *
227 * @hsotg: Programming view of DWC_otg controller
228 */
dwc2_disable_host_interrupts(struct dwc2_hsotg * hsotg)229 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
230 {
231 u32 intmsk = dwc2_readl(hsotg, GINTMSK);
232
233 /* Disable host mode interrupts without disturbing common interrupts */
234 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
235 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
236 dwc2_writel(hsotg, intmsk, GINTMSK);
237 }
238
239 /*
240 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
241 * For system that have a total fifo depth that is smaller than the default
242 * RX + TX fifo size.
243 *
244 * @hsotg: Programming view of DWC_otg controller
245 */
dwc2_calculate_dynamic_fifo(struct dwc2_hsotg * hsotg)246 STATIC void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
247 {
248 struct dwc2_core_params *params = &hsotg->params;
249 struct dwc2_hw_params *hw = &hsotg->hw_params;
250 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
251
252 total_fifo_size = hw->total_fifo_size;
253 rxfsiz = params->host_rx_fifo_size;
254 nptxfsiz = params->host_nperio_tx_fifo_size;
255 ptxfsiz = params->host_perio_tx_fifo_size;
256
257 /*
258 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
259 * allocation with support for high bandwidth endpoints. Synopsys
260 * defines MPS(Max Packet size) for a periodic EP=1024, and for
261 * non-periodic as 512.
262 */
263 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
264 /*
265 * For Buffer DMA mode/Scatter Gather DMA mode
266 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
267 * with n = number of host channel.
268 * 2 * ((1024/4) + 2) = 516
269 */
270 rxfsiz = 516 + hw->host_channels;
271
272 /*
273 * min non-periodic tx fifo depth
274 * 2 * (largest non-periodic USB packet used / 4)
275 * 2 * (512/4) = 256
276 */
277 nptxfsiz = 256;
278
279 /*
280 * min periodic tx fifo depth
281 * (largest packet size*MC)/4
282 * (1024 * 3)/4 = 768
283 */
284 ptxfsiz = 768;
285
286 params->host_rx_fifo_size = rxfsiz;
287 params->host_nperio_tx_fifo_size = nptxfsiz;
288 params->host_perio_tx_fifo_size = ptxfsiz;
289 }
290
291 /*
292 * If the summation of RX, NPTX and PTX fifo sizes is still
293 * bigger than the total_fifo_size, then we have a problem.
294 *
295 * We won't be able to allocate as many endpoints. Right now,
296 * we're just printing an error message, but ideally this FIFO
297 * allocation algorithm would be improved in the future.
298 *
299 * FIXME improve this FIFO allocation algorithm.
300 */
301 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
302 dev_err(hsotg->dev, "invalid fifo sizes\n");
303 }
304
dwc2_config_fifos(struct dwc2_hsotg * hsotg)305 STATIC void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
306 {
307 struct dwc2_core_params *params = &hsotg->params;
308 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
309
310 if (!params->enable_dynamic_fifo)
311 return;
312
313 dwc2_calculate_dynamic_fifo(hsotg);
314
315 /* Rx FIFO */
316 grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
317 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
318 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
319 grxfsiz |= params->host_rx_fifo_size <<
320 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
321 dwc2_writel(hsotg, grxfsiz, GRXFSIZ);
322 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
323 dwc2_readl(hsotg, GRXFSIZ));
324
325 /* Non-periodic Tx FIFO */
326 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
327 dwc2_readl(hsotg, GNPTXFSIZ));
328 nptxfsiz = params->host_nperio_tx_fifo_size <<
329 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
330 nptxfsiz |= params->host_rx_fifo_size <<
331 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
332 dwc2_writel(hsotg, nptxfsiz, GNPTXFSIZ);
333 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
334 dwc2_readl(hsotg, GNPTXFSIZ));
335
336 /* Periodic Tx FIFO */
337 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
338 dwc2_readl(hsotg, HPTXFSIZ));
339 hptxfsiz = params->host_perio_tx_fifo_size <<
340 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
341 hptxfsiz |= (params->host_rx_fifo_size +
342 params->host_nperio_tx_fifo_size) <<
343 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
344 dwc2_writel(hsotg, hptxfsiz, HPTXFSIZ);
345 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
346 dwc2_readl(hsotg, HPTXFSIZ));
347
348 if (hsotg->params.en_multiple_tx_fifo &&
349 hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_91a) {
350 /*
351 * This feature was implemented in 2.91a version
352 * Global DFIFOCFG calculation for Host mode -
353 * include RxFIFO, NPTXFIFO and HPTXFIFO
354 */
355 dfifocfg = dwc2_readl(hsotg, GDFIFOCFG);
356 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
357 dfifocfg |= (params->host_rx_fifo_size +
358 params->host_nperio_tx_fifo_size +
359 params->host_perio_tx_fifo_size) <<
360 GDFIFOCFG_EPINFOBASE_SHIFT &
361 GDFIFOCFG_EPINFOBASE_MASK;
362 dwc2_writel(hsotg, dfifocfg, GDFIFOCFG);
363 }
364 }
365
366 /**
367 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
368 * the HFIR register according to PHY type and speed
369 *
370 * @hsotg: Programming view of DWC_otg controller
371 *
372 * NOTE: The caller can modify the value of the HFIR register only after the
373 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
374 * has been set
375 */
dwc2_calc_frame_interval(struct dwc2_hsotg * hsotg)376 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
377 {
378 u32 usbcfg;
379 u32 hprt0;
380 int clock = 60; /* default value */
381
382 usbcfg = dwc2_readl(hsotg, GUSBCFG);
383 hprt0 = dwc2_readl(hsotg, HPRT0);
384
385 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
386 !(usbcfg & GUSBCFG_PHYIF16))
387 clock = 60;
388 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
389 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
390 clock = 48;
391 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
392 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
393 clock = 30;
394 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
395 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
396 clock = 60;
397 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
398 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
399 clock = 48;
400 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
401 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
402 clock = 48;
403 if ((usbcfg & GUSBCFG_PHYSEL) &&
404 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
405 clock = 48;
406
407 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
408 /* High speed case */
409 return 125 * clock - 1;
410
411 /* FS/LS case */
412 return 1000 * clock - 1;
413 }
414
415 /**
416 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
417 * buffer
418 *
419 * @hsotg: Programming view of DWC_otg controller
420 * @dest: Destination buffer for the packet
421 * @bytes: Number of bytes to copy to the destination
422 */
dwc2_read_packet(struct dwc2_hsotg * hsotg,u8 * dest,u16 bytes)423 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
424 {
425 u32 *data_buf = (u32 *)dest;
426 int word_count = (bytes + 3) / 4;
427 int i;
428
429 /*
430 * Todo: Account for the case where dest is not dword aligned. This
431 * requires reading data from the FIFO into a u32 temp buffer, then
432 * moving it into the data buffer.
433 */
434
435 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
436
437 for (i = 0; i < word_count; i++, data_buf++)
438 *data_buf = dwc2_readl(hsotg, HCFIFO(0));
439 }
440
441 /**
442 * dwc2_dump_channel_info() - Prints the state of a host channel
443 *
444 * @hsotg: Programming view of DWC_otg controller
445 * @chan: Pointer to the channel to dump
446 *
447 * Must be called with interrupt disabled and spinlock held
448 *
449 * NOTE: This function will be removed once the peripheral controller code
450 * is integrated and the driver is stable
451 */
dwc2_dump_channel_info(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)452 STATIC void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
453 struct dwc2_host_chan *chan)
454 {
455 #ifdef VERBOSE_DEBUG
456 int num_channels = hsotg->params.host_channels;
457 struct dwc2_qh *qh;
458 u32 hcchar;
459 u32 hcsplt;
460 u32 hctsiz;
461 u32 hc_dma;
462 int i;
463
464 if (!chan)
465 return;
466
467 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
468 hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num));
469 hctsiz = dwc2_readl(hsotg, HCTSIZ(chan->hc_num));
470 hc_dma = dwc2_readl(hsotg, HCDMA(chan->hc_num));
471
472 dev_dbg(hsotg->dev, " Assigned to channel %p:\n", chan);
473 dev_dbg(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n",
474 hcchar, hcsplt);
475 dev_dbg(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n",
476 hctsiz, hc_dma);
477 dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
478 chan->dev_addr, chan->ep_num, chan->ep_is_in);
479 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
480 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
481 dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start);
482 dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started);
483 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
484 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
485 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
486 (unsigned long)chan->xfer_dma);
487 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
488 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
489 dev_dbg(hsotg->dev, " NP inactive sched:\n");
490 list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive,
491 qh_list_entry)
492 dev_dbg(hsotg->dev, " %p\n", qh);
493 dev_dbg(hsotg->dev, " NP waiting sched:\n");
494 list_for_each_entry(qh, &hsotg->non_periodic_sched_waiting,
495 qh_list_entry)
496 dev_dbg(hsotg->dev, " %p\n", qh);
497 dev_dbg(hsotg->dev, " NP active sched:\n");
498 list_for_each_entry(qh, &hsotg->non_periodic_sched_active,
499 qh_list_entry)
500 dev_dbg(hsotg->dev, " %p\n", qh);
501 dev_dbg(hsotg->dev, " Channels:\n");
502 for (i = 0; i < num_channels; i++) {
503 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
504
505 dev_dbg(hsotg->dev, " %2d: %p\n", i, chan);
506 }
507 #endif /* VERBOSE_DEBUG */
508 }
509
510 static int _dwc2_hcd_start(struct dwc2_hsotg *hsotg);
511
dwc2_host_start(struct dwc2_hsotg * hsotg)512 static void dwc2_host_start(struct dwc2_hsotg *hsotg)
513 {
514 #if 0
515 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
516
517 hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
518 #endif
519 _dwc2_hcd_start(hsotg);
520 }
521
dwc2_host_disconnect(struct dwc2_hsotg * hsotg)522 static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
523 {
524 #if 0
525 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
526
527 hcd->self.is_b_host = 0;
528 #endif
529 }
530
dwc2_host_hub_info(struct dwc2_hsotg * hsotg,void * context,int * hub_addr,int * hub_port)531 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
532 int *hub_addr, int *hub_port)
533 {
534 struct usbd_xfer *xfer = context;
535 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
536 struct usbd_device *dev = dpipe->pipe.device;
537
538 if (dev->myhsport->tt)
539 *hub_addr = dev->myhsport->parent->address;
540 else
541 *hub_addr = 0;
542 *hub_port = dev->myhsport->portno;
543 }
544
545 /*
546 * =========================================================================
547 * Low Level Host Channel Access Functions
548 * =========================================================================
549 */
550
dwc2_hc_enable_slave_ints(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)551 STATIC void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
552 struct dwc2_host_chan *chan)
553 {
554 u32 hcintmsk = HCINTMSK_CHHLTD;
555
556 switch (chan->ep_type) {
557 case USB_ENDPOINT_XFER_CONTROL:
558 case USB_ENDPOINT_XFER_BULK:
559 dev_vdbg(hsotg->dev, "control/bulk\n");
560 hcintmsk |= HCINTMSK_XFERCOMPL;
561 hcintmsk |= HCINTMSK_STALL;
562 hcintmsk |= HCINTMSK_XACTERR;
563 hcintmsk |= HCINTMSK_DATATGLERR;
564 if (chan->ep_is_in) {
565 hcintmsk |= HCINTMSK_BBLERR;
566 } else {
567 hcintmsk |= HCINTMSK_NAK;
568 hcintmsk |= HCINTMSK_NYET;
569 if (chan->do_ping)
570 hcintmsk |= HCINTMSK_ACK;
571 }
572
573 if (chan->do_split) {
574 hcintmsk |= HCINTMSK_NAK;
575 if (chan->complete_split)
576 hcintmsk |= HCINTMSK_NYET;
577 else
578 hcintmsk |= HCINTMSK_ACK;
579 }
580
581 if (chan->error_state)
582 hcintmsk |= HCINTMSK_ACK;
583 break;
584
585 case USB_ENDPOINT_XFER_INT:
586 if (dbg_perio())
587 dev_vdbg(hsotg->dev, "intr\n");
588 hcintmsk |= HCINTMSK_XFERCOMPL;
589 hcintmsk |= HCINTMSK_NAK;
590 hcintmsk |= HCINTMSK_STALL;
591 hcintmsk |= HCINTMSK_XACTERR;
592 hcintmsk |= HCINTMSK_DATATGLERR;
593 hcintmsk |= HCINTMSK_FRMOVRUN;
594
595 if (chan->ep_is_in)
596 hcintmsk |= HCINTMSK_BBLERR;
597 if (chan->error_state)
598 hcintmsk |= HCINTMSK_ACK;
599 if (chan->do_split) {
600 if (chan->complete_split)
601 hcintmsk |= HCINTMSK_NYET;
602 else
603 hcintmsk |= HCINTMSK_ACK;
604 }
605 break;
606
607 case USB_ENDPOINT_XFER_ISOC:
608 if (dbg_perio())
609 dev_vdbg(hsotg->dev, "isoc\n");
610 hcintmsk |= HCINTMSK_XFERCOMPL;
611 hcintmsk |= HCINTMSK_FRMOVRUN;
612 hcintmsk |= HCINTMSK_ACK;
613
614 if (chan->ep_is_in) {
615 hcintmsk |= HCINTMSK_XACTERR;
616 hcintmsk |= HCINTMSK_BBLERR;
617 }
618 break;
619 default:
620 dev_err(hsotg->dev, "## Unknown EP type ##\n");
621 break;
622 }
623
624 dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
625 if (dbg_hc(chan))
626 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
627 }
628
dwc2_hc_enable_dma_ints(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)629 STATIC void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
630 struct dwc2_host_chan *chan)
631 {
632 u32 hcintmsk = HCINTMSK_CHHLTD;
633
634 /*
635 * For Descriptor DMA mode core halts the channel on AHB error.
636 * Interrupt is not required.
637 */
638 if (!hsotg->params.dma_desc_enable) {
639 if (dbg_hc(chan))
640 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
641 hcintmsk |= HCINTMSK_AHBERR;
642 } else {
643 if (dbg_hc(chan))
644 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
645 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
646 hcintmsk |= HCINTMSK_XFERCOMPL;
647 }
648
649 if (chan->error_state && !chan->do_split &&
650 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
651 if (dbg_hc(chan))
652 dev_vdbg(hsotg->dev, "setting ACK\n");
653 hcintmsk |= HCINTMSK_ACK;
654 if (chan->ep_is_in) {
655 hcintmsk |= HCINTMSK_DATATGLERR;
656 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
657 hcintmsk |= HCINTMSK_NAK;
658 }
659 }
660
661 dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
662 if (dbg_hc(chan))
663 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
664 }
665
dwc2_hc_enable_ints(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)666 STATIC void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
667 struct dwc2_host_chan *chan)
668 {
669 u32 intmsk;
670
671 if (hsotg->params.host_dma) {
672 if (dbg_hc(chan))
673 dev_vdbg(hsotg->dev, "DMA enabled\n");
674 dwc2_hc_enable_dma_ints(hsotg, chan);
675 } else {
676 if (dbg_hc(chan))
677 dev_vdbg(hsotg->dev, "DMA disabled\n");
678 dwc2_hc_enable_slave_ints(hsotg, chan);
679 }
680
681 /* Enable the top level host channel interrupt */
682 intmsk = dwc2_readl(hsotg, HAINTMSK);
683 intmsk |= 1 << chan->hc_num;
684 dwc2_writel(hsotg, intmsk, HAINTMSK);
685 if (dbg_hc(chan))
686 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
687
688 /* Make sure host channel interrupts are enabled */
689 intmsk = dwc2_readl(hsotg, GINTMSK);
690 intmsk |= GINTSTS_HCHINT;
691 dwc2_writel(hsotg, intmsk, GINTMSK);
692 if (dbg_hc(chan))
693 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
694 }
695
696 /**
697 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
698 * a specific endpoint
699 *
700 * @hsotg: Programming view of DWC_otg controller
701 * @chan: Information needed to initialize the host channel
702 *
703 * The HCCHARn register is set up with the characteristics specified in chan.
704 * Host channel interrupts that may need to be serviced while this transfer is
705 * in progress are enabled.
706 */
dwc2_hc_init(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)707 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
708 {
709 u8 hc_num = chan->hc_num;
710 u32 hcintmsk;
711 u32 hcchar;
712 u32 hcsplt = 0;
713
714 if (dbg_hc(chan))
715 dev_vdbg(hsotg->dev, "%s()\n", __func__);
716
717 /* Clear old interrupt conditions for this host channel */
718 hcintmsk = 0xffffffff;
719 hcintmsk &= ~HCINTMSK_RESERVED14_31;
720 dwc2_writel(hsotg, hcintmsk, HCINT(hc_num));
721
722 /* Enable channel interrupts required for this transfer */
723 dwc2_hc_enable_ints(hsotg, chan);
724
725 /*
726 * Program the HCCHARn register with the endpoint characteristics for
727 * the current transfer
728 */
729 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
730 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
731 if (chan->ep_is_in)
732 hcchar |= HCCHAR_EPDIR;
733 if (chan->speed == USB_SPEED_LOW)
734 hcchar |= HCCHAR_LSPDDEV;
735 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
736 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
737 dwc2_writel(hsotg, hcchar, HCCHAR(hc_num));
738 if (dbg_hc(chan)) {
739 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
740 hc_num, hcchar);
741
742 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
743 __func__, hc_num);
744 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
745 chan->dev_addr);
746 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
747 chan->ep_num);
748 dev_vdbg(hsotg->dev, " Is In: %d\n",
749 chan->ep_is_in);
750 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
751 chan->speed == USB_SPEED_LOW);
752 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
753 chan->ep_type);
754 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
755 chan->max_packet);
756 }
757
758 /* Program the HCSPLT register for SPLITs */
759 if (chan->do_split) {
760 if (dbg_hc(chan))
761 dev_vdbg(hsotg->dev,
762 "Programming HC %d with split --> %s\n",
763 hc_num,
764 chan->complete_split ? "CSPLIT" : "SSPLIT");
765 if (chan->complete_split)
766 hcsplt |= HCSPLT_COMPSPLT;
767 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
768 HCSPLT_XACTPOS_MASK;
769 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
770 HCSPLT_HUBADDR_MASK;
771 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
772 HCSPLT_PRTADDR_MASK;
773 if (dbg_hc(chan)) {
774 dev_vdbg(hsotg->dev, " comp split %d\n",
775 chan->complete_split);
776 dev_vdbg(hsotg->dev, " xact pos %d\n",
777 chan->xact_pos);
778 dev_vdbg(hsotg->dev, " hub addr %d\n",
779 chan->hub_addr);
780 dev_vdbg(hsotg->dev, " hub port %d\n",
781 chan->hub_port);
782 dev_vdbg(hsotg->dev, " is_in %d\n",
783 chan->ep_is_in);
784 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
785 chan->max_packet);
786 dev_vdbg(hsotg->dev, " xferlen %d\n",
787 chan->xfer_len);
788 }
789 }
790
791 dwc2_writel(hsotg, hcsplt, HCSPLT(hc_num));
792 }
793
794 /**
795 * dwc2_hc_halt() - Attempts to halt a host channel
796 *
797 * @hsotg: Controller register interface
798 * @chan: Host channel to halt
799 * @halt_status: Reason for halting the channel
800 *
801 * This function should only be called in Slave mode or to abort a transfer in
802 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
803 * controller halts the channel when the transfer is complete or a condition
804 * occurs that requires application intervention.
805 *
806 * In slave mode, checks for a free request queue entry, then sets the Channel
807 * Enable and Channel Disable bits of the Host Channel Characteristics
808 * register of the specified channel to intiate the halt. If there is no free
809 * request queue entry, sets only the Channel Disable bit of the HCCHARn
810 * register to flush requests for this channel. In the latter case, sets a
811 * flag to indicate that the host channel needs to be halted when a request
812 * queue slot is open.
813 *
814 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
815 * HCCHARn register. The controller ensures there is space in the request
816 * queue before submitting the halt request.
817 *
818 * Some time may elapse before the core flushes any posted requests for this
819 * host channel and halts. The Channel Halted interrupt handler completes the
820 * deactivation of the host channel.
821 */
dwc2_hc_halt(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,enum dwc2_halt_status halt_status)822 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
823 enum dwc2_halt_status halt_status)
824 {
825 u32 nptxsts, hptxsts, hcchar;
826
827 if (dbg_hc(chan))
828 dev_vdbg(hsotg->dev, "%s()\n", __func__);
829
830 /*
831 * In buffer DMA or external DMA mode channel can't be halted
832 * for non-split periodic channels. At the end of the next
833 * uframe/frame (in the worst case), the core generates a channel
834 * halted and disables the channel automatically.
835 */
836 if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) ||
837 hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) {
838 if (!chan->do_split &&
839 (chan->ep_type == USB_ENDPOINT_XFER_ISOC ||
840 chan->ep_type == USB_ENDPOINT_XFER_INT)) {
841 dev_err(hsotg->dev, "%s() Channel can't be halted\n",
842 __func__);
843 return;
844 }
845 }
846
847 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
848 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
849
850 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
851 halt_status == DWC2_HC_XFER_AHB_ERR) {
852 /*
853 * Disable all channel interrupts except Ch Halted. The QTD
854 * and QH state associated with this transfer has been cleared
855 * (in the case of URB_DEQUEUE), so the channel needs to be
856 * shut down carefully to prevent crashes.
857 */
858 u32 hcintmsk = HCINTMSK_CHHLTD;
859
860 dev_vdbg(hsotg->dev, "dequeue/error\n");
861 dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
862
863 /*
864 * Make sure no other interrupts besides halt are currently
865 * pending. Handling another interrupt could cause a crash due
866 * to the QTD and QH state.
867 */
868 dwc2_writel(hsotg, ~hcintmsk, HCINT(chan->hc_num));
869
870 /*
871 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
872 * even if the channel was already halted for some other
873 * reason
874 */
875 chan->halt_status = halt_status;
876
877 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
878 if (!(hcchar & HCCHAR_CHENA)) {
879 /*
880 * The channel is either already halted or it hasn't
881 * started yet. In DMA mode, the transfer may halt if
882 * it finishes normally or a condition occurs that
883 * requires driver intervention. Don't want to halt
884 * the channel again. In either Slave or DMA mode,
885 * it's possible that the transfer has been assigned
886 * to a channel, but not started yet when an URB is
887 * dequeued. Don't want to halt a channel that hasn't
888 * started yet.
889 */
890 return;
891 }
892 }
893 if (chan->halt_pending) {
894 /*
895 * A halt has already been issued for this channel. This might
896 * happen when a transfer is aborted by a higher level in
897 * the stack.
898 */
899 dev_vdbg(hsotg->dev,
900 "*** %s: Channel %d, chan->halt_pending already set ***\n",
901 __func__, chan->hc_num);
902 return;
903 }
904
905 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
906
907 /* No need to set the bit in DDMA for disabling the channel */
908 /* TODO check it everywhere channel is disabled */
909 if (!hsotg->params.dma_desc_enable) {
910 if (dbg_hc(chan))
911 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
912 hcchar |= HCCHAR_CHENA;
913 } else {
914 if (dbg_hc(chan))
915 dev_dbg(hsotg->dev, "desc DMA enabled\n");
916 }
917 hcchar |= HCCHAR_CHDIS;
918
919 if (!hsotg->params.host_dma) {
920 if (dbg_hc(chan))
921 dev_vdbg(hsotg->dev, "DMA not enabled\n");
922 hcchar |= HCCHAR_CHENA;
923
924 /* Check for space in the request queue to issue the halt */
925 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
926 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
927 dev_vdbg(hsotg->dev, "control/bulk\n");
928 nptxsts = dwc2_readl(hsotg, GNPTXSTS);
929 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
930 dev_vdbg(hsotg->dev, "Disabling channel\n");
931 hcchar &= ~HCCHAR_CHENA;
932 }
933 } else {
934 if (dbg_perio())
935 dev_vdbg(hsotg->dev, "isoc/intr\n");
936 hptxsts = dwc2_readl(hsotg, HPTXSTS);
937 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
938 hsotg->queuing_high_bandwidth) {
939 if (dbg_perio())
940 dev_vdbg(hsotg->dev, "Disabling channel\n");
941 hcchar &= ~HCCHAR_CHENA;
942 }
943 }
944 } else {
945 if (dbg_hc(chan))
946 dev_vdbg(hsotg->dev, "DMA enabled\n");
947 }
948
949 dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
950 chan->halt_status = halt_status;
951
952 if (hcchar & HCCHAR_CHENA) {
953 if (dbg_hc(chan))
954 dev_vdbg(hsotg->dev, "Channel enabled\n");
955 chan->halt_pending = 1;
956 chan->halt_on_queue = 0;
957 } else {
958 if (dbg_hc(chan))
959 dev_vdbg(hsotg->dev, "Channel disabled\n");
960 chan->halt_on_queue = 1;
961 }
962
963 if (dbg_hc(chan)) {
964 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
965 chan->hc_num);
966 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
967 hcchar);
968 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
969 chan->halt_pending);
970 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
971 chan->halt_on_queue);
972 dev_vdbg(hsotg->dev, " halt_status: %d\n",
973 chan->halt_status);
974 }
975 }
976
977 /**
978 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
979 *
980 * @hsotg: Programming view of DWC_otg controller
981 * @chan: Identifies the host channel to clean up
982 *
983 * This function is normally called after a transfer is done and the host
984 * channel is being released
985 */
dwc2_hc_cleanup(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)986 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
987 {
988 u32 hcintmsk;
989
990 chan->xfer_started = 0;
991
992 list_del_init(&chan->split_order_list_entry);
993
994 /*
995 * Clear channel interrupt enables and any unhandled channel interrupt
996 * conditions
997 */
998 dwc2_writel(hsotg, 0, HCINTMSK(chan->hc_num));
999 hcintmsk = 0xffffffff;
1000 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1001 dwc2_writel(hsotg, hcintmsk, HCINT(chan->hc_num));
1002 }
1003
1004 /**
1005 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1006 * which frame a periodic transfer should occur
1007 *
1008 * @hsotg: Programming view of DWC_otg controller
1009 * @chan: Identifies the host channel to set up and its properties
1010 * @hcchar: Current value of the HCCHAR register for the specified host channel
1011 *
1012 * This function has no effect on non-periodic transfers
1013 */
dwc2_hc_set_even_odd_frame(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,u32 * hcchar)1014 STATIC void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1015 struct dwc2_host_chan *chan, u32 *hcchar)
1016 {
1017 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1018 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1019 int host_speed;
1020 int xfer_ns;
1021 int xfer_us;
1022 int bytes_in_fifo;
1023 u16 fifo_space;
1024 u16 frame_number;
1025 u16 wire_frame;
1026
1027 /*
1028 * Try to figure out if we're an even or odd frame. If we set
1029 * even and the current frame number is even the the transfer
1030 * will happen immediately. Similar if both are odd. If one is
1031 * even and the other is odd then the transfer will happen when
1032 * the frame number ticks.
1033 *
1034 * There's a bit of a balancing act to get this right.
1035 * Sometimes we may want to send data in the current frame (AK
1036 * right away). We might want to do this if the frame number
1037 * _just_ ticked, but we might also want to do this in order
1038 * to continue a split transaction that happened late in a
1039 * microframe (so we didn't know to queue the next transfer
1040 * until the frame number had ticked). The problem is that we
1041 * need a lot of knowledge to know if there's actually still
1042 * time to send things or if it would be better to wait until
1043 * the next frame.
1044 *
1045 * We can look at how much time is left in the current frame
1046 * and make a guess about whether we'll have time to transfer.
1047 * We'll do that.
1048 */
1049
1050 /* Get speed host is running at */
1051 host_speed = (chan->speed != USB_SPEED_HIGH &&
1052 !chan->do_split) ? chan->speed : USB_SPEED_HIGH;
1053
1054 /* See how many bytes are in the periodic FIFO right now */
1055 fifo_space = (dwc2_readl(hsotg, HPTXSTS) &
1056 TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
1057 bytes_in_fifo = sizeof(u32) *
1058 (hsotg->params.host_perio_tx_fifo_size -
1059 fifo_space);
1060
1061 /*
1062 * Roughly estimate bus time for everything in the periodic
1063 * queue + our new transfer. This is "rough" because we're
1064 * using a function that makes takes into account IN/OUT
1065 * and INT/ISO and we're just slamming in one value for all
1066 * transfers. This should be an over-estimate and that should
1067 * be OK, but we can probably tighten it.
1068 */
1069 xfer_ns = dwc2_usb_calc_bus_time(host_speed, false, false,
1070 chan->xfer_len + bytes_in_fifo);
1071 xfer_us = NS_TO_US(xfer_ns);
1072
1073 /* See what frame number we'll be at by the time we finish */
1074 frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us);
1075
1076 /* This is when we were scheduled to be on the wire */
1077 wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1);
1078
1079 /*
1080 * If we'd finish _after_ the frame we're scheduled in then
1081 * it's hopeless. Just schedule right away and hope for the
1082 * best. Note that it _might_ be wise to call back into the
1083 * scheduler to pick a better frame, but this is better than
1084 * nothing.
1085 */
1086 if (dwc2_frame_num_gt(frame_number, wire_frame)) {
1087 dwc2_sch_vdbg(hsotg,
1088 "QH=%p EO MISS fr=%04x=>%04x (%+d)\n",
1089 chan->qh, wire_frame, frame_number,
1090 dwc2_frame_num_dec(frame_number,
1091 wire_frame));
1092 wire_frame = frame_number;
1093
1094 /*
1095 * We picked a different frame number; communicate this
1096 * back to the scheduler so it doesn't try to schedule
1097 * another in the same frame.
1098 *
1099 * Remember that next_active_frame is 1 before the wire
1100 * frame.
1101 */
1102 chan->qh->next_active_frame =
1103 dwc2_frame_num_dec(frame_number, 1);
1104 }
1105
1106 if (wire_frame & 1)
1107 *hcchar |= HCCHAR_ODDFRM;
1108 else
1109 *hcchar &= ~HCCHAR_ODDFRM;
1110 }
1111 }
1112
dwc2_set_pid_isoc(struct dwc2_host_chan * chan)1113 STATIC void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1114 {
1115 /* Set up the initial PID for the transfer */
1116 if (chan->speed == USB_SPEED_HIGH) {
1117 if (chan->ep_is_in) {
1118 if (chan->multi_count == 1)
1119 chan->data_pid_start = DWC2_HC_PID_DATA0;
1120 else if (chan->multi_count == 2)
1121 chan->data_pid_start = DWC2_HC_PID_DATA1;
1122 else
1123 chan->data_pid_start = DWC2_HC_PID_DATA2;
1124 } else {
1125 if (chan->multi_count == 1)
1126 chan->data_pid_start = DWC2_HC_PID_DATA0;
1127 else
1128 chan->data_pid_start = DWC2_HC_PID_MDATA;
1129 }
1130 } else {
1131 chan->data_pid_start = DWC2_HC_PID_DATA0;
1132 }
1133 }
1134
1135 /**
1136 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1137 * the Host Channel
1138 *
1139 * @hsotg: Programming view of DWC_otg controller
1140 * @chan: Information needed to initialize the host channel
1141 *
1142 * This function should only be called in Slave mode. For a channel associated
1143 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1144 * associated with a periodic EP, the periodic Tx FIFO is written.
1145 *
1146 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1147 * the number of bytes written to the Tx FIFO.
1148 */
dwc2_hc_write_packet(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1149 STATIC void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1150 struct dwc2_host_chan *chan)
1151 {
1152 u32 i;
1153 u32 remaining_count;
1154 u32 byte_count;
1155 u32 dword_count;
1156 u32 *data_buf = (u32 *)chan->xfer_buf;
1157
1158 if (dbg_hc(chan))
1159 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1160
1161 remaining_count = chan->xfer_len - chan->xfer_count;
1162 if (remaining_count > chan->max_packet)
1163 byte_count = chan->max_packet;
1164 else
1165 byte_count = remaining_count;
1166
1167 dword_count = (byte_count + 3) / 4;
1168
1169 if (((unsigned long)data_buf & 0x3) == 0) {
1170 /* xfer_buf is DWORD aligned */
1171 for (i = 0; i < dword_count; i++, data_buf++)
1172 dwc2_writel(hsotg, *data_buf, HCFIFO(chan->hc_num));
1173 } else {
1174 /* xfer_buf is not DWORD aligned */
1175 for (i = 0; i < dword_count; i++, data_buf++) {
1176 u32 data = data_buf[0] | data_buf[1] << 8 |
1177 data_buf[2] << 16 | data_buf[3] << 24;
1178 dwc2_writel(hsotg, data, HCFIFO(chan->hc_num));
1179 }
1180 }
1181
1182 chan->xfer_count += byte_count;
1183 chan->xfer_buf += byte_count;
1184 }
1185
1186 /**
1187 * dwc2_hc_do_ping() - Starts a PING transfer
1188 *
1189 * @hsotg: Programming view of DWC_otg controller
1190 * @chan: Information needed to initialize the host channel
1191 *
1192 * This function should only be called in Slave mode. The Do Ping bit is set in
1193 * the HCTSIZ register, then the channel is enabled.
1194 */
dwc2_hc_do_ping(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1195 STATIC void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
1196 struct dwc2_host_chan *chan)
1197 {
1198 u32 hcchar;
1199 u32 hctsiz;
1200
1201 if (dbg_hc(chan))
1202 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1203 chan->hc_num);
1204
1205 hctsiz = TSIZ_DOPNG;
1206 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1207 dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
1208
1209 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
1210 hcchar |= HCCHAR_CHENA;
1211 hcchar &= ~HCCHAR_CHDIS;
1212 dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
1213 }
1214
1215 /**
1216 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1217 * channel and starts the transfer
1218 *
1219 * @hsotg: Programming view of DWC_otg controller
1220 * @chan: Information needed to initialize the host channel. The xfer_len value
1221 * may be reduced to accommodate the max widths of the XferSize and
1222 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1223 * changed to reflect the final xfer_len value.
1224 *
1225 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1226 * the caller must ensure that there is sufficient space in the request queue
1227 * and Tx Data FIFO.
1228 *
1229 * For an OUT transfer in Slave mode, it loads a data packet into the
1230 * appropriate FIFO. If necessary, additional data packets are loaded in the
1231 * Host ISR.
1232 *
1233 * For an IN transfer in Slave mode, a data packet is requested. The data
1234 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1235 * additional data packets are requested in the Host ISR.
1236 *
1237 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1238 * register along with a packet count of 1 and the channel is enabled. This
1239 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1240 * simply set to 0 since no data transfer occurs in this case.
1241 *
1242 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1243 * all the information required to perform the subsequent data transfer. In
1244 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1245 * controller performs the entire PING protocol, then starts the data
1246 * transfer.
1247 */
dwc2_hc_start_transfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1248 STATIC void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1249 struct dwc2_host_chan *chan)
1250 {
1251 u32 max_hc_xfer_size = hsotg->params.max_transfer_size;
1252 u16 max_hc_pkt_count = hsotg->params.max_packet_count;
1253 u32 hcchar;
1254 u32 hctsiz = 0;
1255 u16 num_packets = 0;
1256 u32 ec_mc = 0;
1257
1258 if (dbg_hc(chan))
1259 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1260
1261 if (chan->do_ping) {
1262 if (!hsotg->params.host_dma) {
1263 if (dbg_hc(chan))
1264 dev_vdbg(hsotg->dev, "ping, no DMA\n");
1265 dwc2_hc_do_ping(hsotg, chan);
1266 chan->xfer_started = 1;
1267 return;
1268 }
1269
1270 if (dbg_hc(chan))
1271 dev_vdbg(hsotg->dev, "ping, DMA\n");
1272
1273 hctsiz |= TSIZ_DOPNG;
1274 }
1275
1276 if (chan->do_split) {
1277 if (dbg_hc(chan))
1278 dev_vdbg(hsotg->dev, "split\n");
1279 num_packets = 1;
1280
1281 if (chan->complete_split && !chan->ep_is_in)
1282 /*
1283 * For CSPLIT OUT Transfer, set the size to 0 so the
1284 * core doesn't expect any data written to the FIFO
1285 */
1286 chan->xfer_len = 0;
1287 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1288 chan->xfer_len = chan->max_packet;
1289 else if (!chan->ep_is_in && chan->xfer_len > 188)
1290 chan->xfer_len = 188;
1291
1292 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1293 TSIZ_XFERSIZE_MASK;
1294
1295 /* For split set ec_mc for immediate retries */
1296 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1297 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1298 ec_mc = 3;
1299 else
1300 ec_mc = 1;
1301 } else {
1302 if (dbg_hc(chan))
1303 dev_vdbg(hsotg->dev, "no split\n");
1304 /*
1305 * Ensure that the transfer length and packet count will fit
1306 * in the widths allocated for them in the HCTSIZn register
1307 */
1308 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1309 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1310 /*
1311 * Make sure the transfer size is no larger than one
1312 * (micro)frame's worth of data. (A check was done
1313 * when the periodic transfer was accepted to ensure
1314 * that a (micro)frame's worth of data can be
1315 * programmed into a channel.)
1316 */
1317 u32 max_periodic_len =
1318 chan->multi_count * chan->max_packet;
1319
1320 if (chan->xfer_len > max_periodic_len)
1321 chan->xfer_len = max_periodic_len;
1322 } else if (chan->xfer_len > max_hc_xfer_size) {
1323 /*
1324 * Make sure that xfer_len is a multiple of max packet
1325 * size
1326 */
1327 chan->xfer_len =
1328 max_hc_xfer_size - chan->max_packet + 1;
1329 }
1330
1331 if (chan->xfer_len > 0) {
1332 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1333 chan->max_packet;
1334 if (num_packets > max_hc_pkt_count) {
1335 num_packets = max_hc_pkt_count;
1336 chan->xfer_len = num_packets * chan->max_packet;
1337 } else if (chan->ep_is_in) {
1338 /*
1339 * Always program an integral # of max packets
1340 * for IN transfers.
1341 * Note: This assumes that the input buffer is
1342 * aligned and sized accordingly.
1343 */
1344 chan->xfer_len = num_packets * chan->max_packet;
1345 }
1346 } else {
1347 /* Need 1 packet for transfer length of 0 */
1348 num_packets = 1;
1349 }
1350
1351 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1352 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1353 /*
1354 * Make sure that the multi_count field matches the
1355 * actual transfer length
1356 */
1357 chan->multi_count = num_packets;
1358
1359 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1360 dwc2_set_pid_isoc(chan);
1361
1362 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1363 TSIZ_XFERSIZE_MASK;
1364
1365 /* The ec_mc gets the multi_count for non-split */
1366 ec_mc = chan->multi_count;
1367 }
1368
1369 chan->start_pkt_count = num_packets;
1370 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1371 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1372 TSIZ_SC_MC_PID_MASK;
1373 dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
1374 if (dbg_hc(chan)) {
1375 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1376 hctsiz, chan->hc_num);
1377
1378 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1379 chan->hc_num);
1380 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1381 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1382 TSIZ_XFERSIZE_SHIFT);
1383 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1384 (hctsiz & TSIZ_PKTCNT_MASK) >>
1385 TSIZ_PKTCNT_SHIFT);
1386 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1387 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1388 TSIZ_SC_MC_PID_SHIFT);
1389 }
1390
1391 if (hsotg->params.host_dma) {
1392 dma_addr_t dma_addr;
1393
1394 if (chan->align_buf) {
1395 if (dbg_hc(chan))
1396 dev_vdbg(hsotg->dev, "align_buf\n");
1397 dma_addr = chan->align_buf;
1398 } else {
1399 dma_addr = chan->xfer_dma;
1400 }
1401
1402 if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) {
1403 dwc2_writel(hsotg, (u32)dma_addr, HCDMA(chan->hc_num));
1404
1405 if (dbg_hc(chan))
1406 dev_vdbg(hsotg->dev,
1407 "Wrote %08lx to HCDMA(%d)\n",
1408 (unsigned long)dma_addr, chan->hc_num);
1409 } else {
1410 (void)(*hsotg->hsotg_sc->sc_set_dma_addr)(
1411 hsotg->dev, dma_addr, chan->hc_num);
1412 }
1413 }
1414
1415 /* Start the split */
1416 if (chan->do_split) {
1417 u32 hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num));
1418
1419 hcsplt |= HCSPLT_SPLTENA;
1420 dwc2_writel(hsotg, hcsplt, HCSPLT(chan->hc_num));
1421 }
1422
1423 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
1424 hcchar &= ~HCCHAR_MULTICNT_MASK;
1425 hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
1426 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1427
1428 if (hcchar & HCCHAR_CHDIS)
1429 dev_warn(hsotg->dev,
1430 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1431 __func__, chan->hc_num, hcchar);
1432
1433 /* Set host channel enable after all other setup is complete */
1434 hcchar |= HCCHAR_CHENA;
1435 hcchar &= ~HCCHAR_CHDIS;
1436
1437 if (dbg_hc(chan))
1438 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1439 (hcchar & HCCHAR_MULTICNT_MASK) >>
1440 HCCHAR_MULTICNT_SHIFT);
1441
1442 dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
1443 if (dbg_hc(chan))
1444 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1445 chan->hc_num);
1446
1447 chan->xfer_started = 1;
1448 chan->requests++;
1449
1450 if (!hsotg->params.host_dma &&
1451 !chan->ep_is_in && chan->xfer_len > 0)
1452 /* Load OUT packet into the appropriate Tx FIFO */
1453 dwc2_hc_write_packet(hsotg, chan);
1454 }
1455
1456 /**
1457 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1458 * host channel and starts the transfer in Descriptor DMA mode
1459 *
1460 * @hsotg: Programming view of DWC_otg controller
1461 * @chan: Information needed to initialize the host channel
1462 *
1463 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1464 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1465 * with micro-frame bitmap.
1466 *
1467 * Initializes HCDMA register with descriptor list address and CTD value then
1468 * starts the transfer via enabling the channel.
1469 */
dwc2_hc_start_transfer_ddma(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1470 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1471 struct dwc2_host_chan *chan)
1472 {
1473 u32 hcchar;
1474 u32 hctsiz = 0;
1475
1476 if (chan->do_ping)
1477 hctsiz |= TSIZ_DOPNG;
1478
1479 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1480 dwc2_set_pid_isoc(chan);
1481
1482 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1483 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1484 TSIZ_SC_MC_PID_MASK;
1485
1486 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1487 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1488
1489 /* Non-zero only for high-speed interrupt endpoints */
1490 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1491
1492 if (dbg_hc(chan)) {
1493 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1494 chan->hc_num);
1495 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1496 chan->data_pid_start);
1497 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1498 }
1499
1500 dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
1501
1502 usb_syncmem(&chan->desc_list_usbdma, 0, chan->desc_list_sz,
1503 BUS_DMASYNC_PREWRITE);
1504
1505 if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) {
1506 dwc2_writel(hsotg, chan->desc_list_addr, HCDMA(chan->hc_num));
1507 if (dbg_hc(chan))
1508 dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
1509 &chan->desc_list_addr, chan->hc_num);
1510 } else {
1511 (void)(*hsotg->hsotg_sc->sc_set_dma_addr)(
1512 hsotg->dev, chan->desc_list_addr, chan->hc_num);
1513 if (dbg_hc(chan))
1514 dev_vdbg(hsotg->dev, "Wrote %pad to ext dma(%d)\n",
1515 &chan->desc_list_addr, chan->hc_num);
1516 }
1517
1518 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
1519 hcchar &= ~HCCHAR_MULTICNT_MASK;
1520 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1521 HCCHAR_MULTICNT_MASK;
1522
1523 if (hcchar & HCCHAR_CHDIS)
1524 dev_warn(hsotg->dev,
1525 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1526 __func__, chan->hc_num, hcchar);
1527
1528 /* Set host channel enable after all other setup is complete */
1529 hcchar |= HCCHAR_CHENA;
1530 hcchar &= ~HCCHAR_CHDIS;
1531
1532 if (dbg_hc(chan))
1533 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1534 (hcchar & HCCHAR_MULTICNT_MASK) >>
1535 HCCHAR_MULTICNT_SHIFT);
1536
1537 dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
1538 if (dbg_hc(chan))
1539 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1540 chan->hc_num);
1541
1542 chan->xfer_started = 1;
1543 chan->requests++;
1544 }
1545
1546 /**
1547 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1548 * a previous call to dwc2_hc_start_transfer()
1549 *
1550 * @hsotg: Programming view of DWC_otg controller
1551 * @chan: Information needed to initialize the host channel
1552 *
1553 * The caller must ensure there is sufficient space in the request queue and Tx
1554 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1555 * the controller acts autonomously to complete transfers programmed to a host
1556 * channel.
1557 *
1558 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1559 * if there is any data remaining to be queued. For an IN transfer, another
1560 * data packet is always requested. For the SETUP phase of a control transfer,
1561 * this function does nothing.
1562 *
1563 * Return: 1 if a new request is queued, 0 if no more requests are required
1564 * for this transfer
1565 */
dwc2_hc_continue_transfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan)1566 STATIC int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1567 struct dwc2_host_chan *chan)
1568 {
1569 if (dbg_hc(chan))
1570 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1571 chan->hc_num);
1572
1573 if (chan->do_split)
1574 /* SPLITs always queue just once per channel */
1575 return 0;
1576
1577 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1578 /* SETUPs are queued only once since they can't be NAK'd */
1579 return 0;
1580
1581 if (chan->ep_is_in) {
1582 /*
1583 * Always queue another request for other IN transfers. If
1584 * back-to-back INs are issued and NAKs are received for both,
1585 * the driver may still be processing the first NAK when the
1586 * second NAK is received. When the interrupt handler clears
1587 * the NAK interrupt for the first NAK, the second NAK will
1588 * not be seen. So we can't depend on the NAK interrupt
1589 * handler to requeue a NAK'd request. Instead, IN requests
1590 * are issued each time this function is called. When the
1591 * transfer completes, the extra requests for the channel will
1592 * be flushed.
1593 */
1594 u32 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
1595
1596 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1597 hcchar |= HCCHAR_CHENA;
1598 hcchar &= ~HCCHAR_CHDIS;
1599 if (dbg_hc(chan))
1600 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
1601 hcchar);
1602 dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
1603 chan->requests++;
1604 return 1;
1605 }
1606
1607 /* OUT transfers */
1608
1609 if (chan->xfer_count < chan->xfer_len) {
1610 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1611 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1612 u32 hcchar = dwc2_readl(hsotg,
1613 HCCHAR(chan->hc_num));
1614
1615 dwc2_hc_set_even_odd_frame(hsotg, chan,
1616 &hcchar);
1617 }
1618
1619 /* Load OUT packet into the appropriate Tx FIFO */
1620 dwc2_hc_write_packet(hsotg, chan);
1621 chan->requests++;
1622 return 1;
1623 }
1624
1625 return 0;
1626 }
1627
1628 /*
1629 * =========================================================================
1630 * HCD
1631 * =========================================================================
1632 */
1633
1634 /*
1635 * Processes all the URBs in a single list of QHs. Completes them with
1636 * -ETIMEDOUT and frees the QTD.
1637 *
1638 * Must be called with interrupt disabled and spinlock held
1639 */
dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg * hsotg,struct list_head * qh_list)1640 STATIC void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg,
1641 struct list_head *qh_list)
1642 {
1643 struct dwc2_qh *qh, *qh_tmp;
1644 struct dwc2_qtd *qtd, *qtd_tmp;
1645
1646 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
1647 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
1648 qtd_list_entry) {
1649 dwc2_host_complete(hsotg, qtd, -ECONNRESET);
1650 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1651 }
1652 }
1653 }
1654
dwc2_qh_list_free(struct dwc2_hsotg * hsotg,struct list_head * qh_list)1655 STATIC void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
1656 struct list_head *qh_list)
1657 {
1658 struct dwc2_qtd *qtd, *qtd_tmp;
1659 struct dwc2_qh *qh, *qh_tmp;
1660 unsigned long flags;
1661
1662 if (!qh_list->next)
1663 /* The list hasn't been initialized yet */
1664 return;
1665
1666 spin_lock_irqsave(&hsotg->lock, flags);
1667
1668 /* Ensure there are no QTDs or URBs left */
1669 dwc2_kill_urbs_in_qh_list(hsotg, qh_list);
1670
1671 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
1672 dwc2_hcd_qh_unlink(hsotg, qh);
1673
1674 /* Free each QTD in the QH's QTD list */
1675 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
1676 qtd_list_entry)
1677 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1678
1679 if (qh->channel && qh->channel->qh == qh)
1680 qh->channel->qh = NULL;
1681
1682 spin_unlock_irqrestore(&hsotg->lock, flags);
1683 dwc2_hcd_qh_free(hsotg, qh);
1684 spin_lock_irqsave(&hsotg->lock, flags);
1685 }
1686
1687 spin_unlock_irqrestore(&hsotg->lock, flags);
1688 }
1689
1690 /*
1691 * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic
1692 * and periodic schedules. The QTD associated with each URB is removed from
1693 * the schedule and freed. This function may be called when a disconnect is
1694 * detected or when the HCD is being stopped.
1695 *
1696 * Must be called with interrupt disabled and spinlock held
1697 */
dwc2_kill_all_urbs(struct dwc2_hsotg * hsotg)1698 STATIC void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg)
1699 {
1700 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive);
1701 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_waiting);
1702 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active);
1703 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive);
1704 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready);
1705 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned);
1706 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued);
1707 }
1708
1709 /**
1710 * dwc2_hcd_start() - Starts the HCD when switching to Host mode
1711 *
1712 * @hsotg: Pointer to struct dwc2_hsotg
1713 */
dwc2_hcd_start(struct dwc2_hsotg * hsotg)1714 void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
1715 {
1716 u32 hprt0;
1717
1718 if (hsotg->op_state == OTG_STATE_B_HOST) {
1719 /*
1720 * Reset the port. During a HNP mode switch the reset
1721 * needs to occur within 1ms and have a duration of at
1722 * least 50ms.
1723 */
1724 hprt0 = dwc2_read_hprt0(hsotg);
1725 hprt0 |= HPRT0_RST;
1726 dwc2_writel(hsotg, hprt0, HPRT0);
1727 }
1728
1729 queue_delayed_work(hsotg->wq_otg, &hsotg->start_work,
1730 msecs_to_jiffies(50));
1731 }
1732
1733 /* Must be called with interrupt disabled and spinlock held */
dwc2_hcd_cleanup_channels(struct dwc2_hsotg * hsotg)1734 STATIC void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
1735 {
1736 int num_channels = hsotg->params.host_channels;
1737 struct dwc2_host_chan *channel;
1738 u32 hcchar;
1739 int i;
1740
1741 if (!hsotg->params.host_dma) {
1742 /* Flush out any channel requests in slave mode */
1743 for (i = 0; i < num_channels; i++) {
1744 channel = hsotg->hc_ptr_array[i];
1745 if (!list_empty(&channel->hc_list_entry))
1746 continue;
1747 hcchar = dwc2_readl(hsotg, HCCHAR(i));
1748 if (hcchar & HCCHAR_CHENA) {
1749 hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR);
1750 hcchar |= HCCHAR_CHDIS;
1751 dwc2_writel(hsotg, hcchar, HCCHAR(i));
1752 }
1753 }
1754 }
1755
1756 for (i = 0; i < num_channels; i++) {
1757 channel = hsotg->hc_ptr_array[i];
1758 if (!list_empty(&channel->hc_list_entry))
1759 continue;
1760 hcchar = dwc2_readl(hsotg, HCCHAR(i));
1761 if (hcchar & HCCHAR_CHENA) {
1762 /* Halt the channel */
1763 hcchar |= HCCHAR_CHDIS;
1764 dwc2_writel(hsotg, hcchar, HCCHAR(i));
1765 }
1766
1767 dwc2_hc_cleanup(hsotg, channel);
1768 list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list);
1769 /*
1770 * Added for Descriptor DMA to prevent channel double cleanup in
1771 * release_channel_ddma(), which is called from ep_disable when
1772 * device disconnects
1773 */
1774 channel->qh = NULL;
1775 }
1776 /* All channels have been freed, mark them available */
1777 if (hsotg->params.uframe_sched) {
1778 hsotg->available_host_channels =
1779 hsotg->params.host_channels;
1780 } else {
1781 hsotg->non_periodic_channels = 0;
1782 hsotg->periodic_channels = 0;
1783 }
1784 }
1785
1786 /**
1787 * dwc2_hcd_connect() - Handles connect of the HCD
1788 *
1789 * @hsotg: Pointer to struct dwc2_hsotg
1790 *
1791 * Must be called with interrupt disabled and spinlock held
1792 */
dwc2_hcd_connect(struct dwc2_hsotg * hsotg)1793 void dwc2_hcd_connect(struct dwc2_hsotg *hsotg)
1794 {
1795 if (hsotg->lx_state != DWC2_L0)
1796 usb_hcd_resume_root_hub(hsotg->priv);
1797
1798 hsotg->flags.b.port_connect_status_change = 1;
1799 hsotg->flags.b.port_connect_status = 1;
1800 }
1801
1802 /**
1803 * dwc2_hcd_disconnect() - Handles disconnect of the HCD
1804 *
1805 * @hsotg: Pointer to struct dwc2_hsotg
1806 * @force: If true, we won't try to reconnect even if we see device connected.
1807 *
1808 * Must be called with interrupt disabled and spinlock held
1809 */
dwc2_hcd_disconnect(struct dwc2_hsotg * hsotg,bool force)1810 void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
1811 {
1812 u32 intr;
1813 u32 hprt0;
1814
1815 /* Set status flags for the hub driver */
1816 hsotg->flags.b.port_connect_status_change = 1;
1817 hsotg->flags.b.port_connect_status = 0;
1818
1819 /*
1820 * Shutdown any transfers in process by clearing the Tx FIFO Empty
1821 * interrupt mask and status bits and disabling subsequent host
1822 * channel interrupts.
1823 */
1824 intr = dwc2_readl(hsotg, GINTMSK);
1825 intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT);
1826 dwc2_writel(hsotg, intr, GINTMSK);
1827 intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT;
1828 dwc2_writel(hsotg, intr, GINTSTS);
1829
1830 /*
1831 * Turn off the vbus power only if the core has transitioned to device
1832 * mode. If still in host mode, need to keep power on to detect a
1833 * reconnection.
1834 */
1835 if (dwc2_is_device_mode(hsotg)) {
1836 if (hsotg->op_state != OTG_STATE_A_SUSPEND) {
1837 dev_dbg(hsotg->dev, "Disconnect: PortPower off\n");
1838 dwc2_writel(hsotg, 0, HPRT0);
1839 }
1840
1841 dwc2_disable_host_interrupts(hsotg);
1842 }
1843
1844 /* Respond with an error status to all URBs in the schedule */
1845 dwc2_kill_all_urbs(hsotg);
1846
1847 if (dwc2_is_host_mode(hsotg))
1848 /* Clean up any host channels that were in use */
1849 dwc2_hcd_cleanup_channels(hsotg);
1850
1851 dwc2_host_disconnect(hsotg);
1852
1853 dwc2_root_intr(hsotg->hsotg_sc); /* Required for OpenBSD */
1854
1855 /*
1856 * Add an extra check here to see if we're actually connected but
1857 * we don't have a detection interrupt pending. This can happen if:
1858 * 1. hardware sees connect
1859 * 2. hardware sees disconnect
1860 * 3. hardware sees connect
1861 * 4. dwc2_port_intr() - clears connect interrupt
1862 * 5. dwc2_handle_common_intr() - calls here
1863 *
1864 * Without the extra check here we will end calling disconnect
1865 * and won't get any future interrupts to handle the connect.
1866 */
1867 if (!force) {
1868 hprt0 = dwc2_readl(hsotg, HPRT0);
1869 if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS))
1870 dwc2_hcd_connect(hsotg);
1871 }
1872 }
1873
1874 /**
1875 * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup
1876 *
1877 * @hsotg: Pointer to struct dwc2_hsotg
1878 */
dwc2_hcd_rem_wakeup(struct dwc2_hsotg * hsotg)1879 STATIC void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
1880 {
1881 if (hsotg->bus_suspended) {
1882 hsotg->flags.b.port_suspend_change = 1;
1883 usb_hcd_resume_root_hub(hsotg->priv);
1884 }
1885
1886 if (hsotg->lx_state == DWC2_L1)
1887 hsotg->flags.b.port_l1_change = 1;
1888
1889 dwc2_root_intr(hsotg->hsotg_sc); /* Required for OpenBSD */
1890 }
1891
1892 /**
1893 * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner
1894 *
1895 * @hsotg: Pointer to struct dwc2_hsotg
1896 *
1897 * Must be called with interrupt disabled and spinlock held
1898 */
dwc2_hcd_stop(struct dwc2_hsotg * hsotg)1899 void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
1900 {
1901 dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n");
1902
1903 /*
1904 * The root hub should be disconnected before this function is called.
1905 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
1906 * and the QH lists (via ..._hcd_endpoint_disable).
1907 */
1908
1909 /* Turn off all host-specific interrupts */
1910 dwc2_disable_host_interrupts(hsotg);
1911
1912 /* Turn off the vbus power */
1913 dev_dbg(hsotg->dev, "PortPower off\n");
1914 dwc2_writel(hsotg, 0, HPRT0);
1915 }
1916
1917 /* Caller must hold driver lock */
dwc2_hcd_urb_enqueue(struct dwc2_hsotg * hsotg,struct dwc2_hcd_urb * urb,struct dwc2_qh * qh,struct dwc2_qtd * qtd)1918 STATIC int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
1919 struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
1920 struct dwc2_qtd *qtd)
1921 {
1922 u32 intr_mask;
1923 int retval;
1924 int dev_speed;
1925
1926 if (!hsotg->flags.b.port_connect_status) {
1927 /* No longer connected */
1928 dev_err(hsotg->dev, "Not connected\n");
1929 return -ENODEV;
1930 }
1931
1932 dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
1933
1934 /* Some configurations cannot support LS traffic on a FS root port */
1935 if ((dev_speed == USB_SPEED_LOW) &&
1936 (hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) &&
1937 (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) {
1938 u32 hprt0 = dwc2_readl(hsotg, HPRT0);
1939 u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
1940
1941 if (prtspd == HPRT0_SPD_FULL_SPEED)
1942 return -ENODEV;
1943 }
1944
1945 if (!qtd)
1946 return -EINVAL;
1947
1948 dwc2_hcd_qtd_init(qtd, urb);
1949 retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
1950 if (retval) {
1951 dev_err(hsotg->dev,
1952 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
1953 retval);
1954 return retval;
1955 }
1956
1957 intr_mask = dwc2_readl(hsotg, GINTMSK);
1958 if (!(intr_mask & GINTSTS_SOF)) {
1959 enum dwc2_transaction_type tr_type;
1960
1961 if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
1962 !(qtd->urb->flags & URB_GIVEBACK_ASAP))
1963 /*
1964 * Do not schedule SG transactions until qtd has
1965 * URB_GIVEBACK_ASAP set
1966 */
1967 return 0;
1968
1969 tr_type = dwc2_hcd_select_transactions(hsotg);
1970 if (tr_type != DWC2_TRANSACTION_NONE)
1971 dwc2_hcd_queue_transactions(hsotg, tr_type);
1972 }
1973
1974 return 0;
1975 }
1976
1977 /* Must be called with interrupt disabled and spinlock held */
dwc2_hcd_urb_dequeue(struct dwc2_hsotg * hsotg,struct dwc2_hcd_urb * urb)1978 STATIC int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
1979 struct dwc2_hcd_urb *urb)
1980 {
1981 struct dwc2_qh *qh;
1982 struct dwc2_qtd *urb_qtd;
1983
1984 urb_qtd = urb->qtd;
1985 if (!urb_qtd) {
1986 dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n");
1987 return -EINVAL;
1988 }
1989
1990 qh = urb_qtd->qh;
1991 if (!qh) {
1992 dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n");
1993 return -EINVAL;
1994 }
1995
1996 urb->priv = NULL;
1997
1998 if (urb_qtd->in_process && qh->channel) {
1999 dwc2_dump_channel_info(hsotg, qh->channel);
2000
2001 /* The QTD is in process (it has been assigned to a channel) */
2002 if (hsotg->flags.b.port_connect_status)
2003 /*
2004 * If still connected (i.e. in host mode), halt the
2005 * channel so it can be used for other transfers. If
2006 * no longer connected, the host registers can't be
2007 * written to halt the channel since the core is in
2008 * device mode.
2009 */
2010 dwc2_hc_halt(hsotg, qh->channel,
2011 DWC2_HC_XFER_URB_DEQUEUE);
2012 }
2013
2014 /*
2015 * Free the QTD and clean up the associated QH. Leave the QH in the
2016 * schedule if it has any remaining QTDs.
2017 */
2018 if (!hsotg->params.dma_desc_enable) {
2019 u8 in_process = urb_qtd->in_process;
2020
2021 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
2022 if (in_process) {
2023 dwc2_hcd_qh_deactivate(hsotg, qh, 0);
2024 qh->channel = NULL;
2025 } else if (list_empty(&qh->qtd_list)) {
2026 dwc2_hcd_qh_unlink(hsotg, qh);
2027 }
2028 } else {
2029 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
2030 }
2031
2032 return 0;
2033 }
2034
2035 #if 0
2036 /* Must NOT be called with interrupt disabled or spinlock held */
2037 static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg,
2038 struct usb_host_endpoint *ep, int retry)
2039 {
2040 struct dwc2_qtd *qtd, *qtd_tmp;
2041 struct dwc2_qh *qh;
2042 unsigned long flags;
2043 int rc;
2044
2045 spin_lock_irqsave(&hsotg->lock, flags);
2046
2047 qh = ep->hcpriv;
2048 if (!qh) {
2049 rc = -EINVAL;
2050 goto err;
2051 }
2052
2053 while (!list_empty(&qh->qtd_list) && retry--) {
2054 if (retry == 0) {
2055 dev_err(hsotg->dev,
2056 "## timeout in dwc2_hcd_endpoint_disable() ##\n");
2057 rc = -EBUSY;
2058 goto err;
2059 }
2060
2061 spin_unlock_irqrestore(&hsotg->lock, flags);
2062 dwc2_msleep(20);
2063 spin_lock_irqsave(&hsotg->lock, flags);
2064 qh = ep->hcpriv;
2065 if (!qh) {
2066 rc = -EINVAL;
2067 goto err;
2068 }
2069 }
2070
2071 dwc2_hcd_qh_unlink(hsotg, qh);
2072
2073 /* Free each QTD in the QH's QTD list */
2074 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry)
2075 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
2076
2077 ep->hcpriv = NULL;
2078
2079 if (qh->channel && qh->channel->qh == qh)
2080 qh->channel->qh = NULL;
2081
2082 spin_unlock_irqrestore(&hsotg->lock, flags);
2083
2084 dwc2_hcd_qh_free(hsotg, qh);
2085
2086 return 0;
2087
2088 err:
2089 ep->hcpriv = NULL;
2090 spin_unlock_irqrestore(&hsotg->lock, flags);
2091
2092 return rc;
2093 }
2094
2095 /* Must be called with interrupt disabled and spinlock held */
2096 static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg,
2097 struct usb_host_endpoint *ep)
2098 {
2099 struct dwc2_qh *qh = ep->hcpriv;
2100
2101 if (!qh)
2102 return -EINVAL;
2103
2104 qh->data_toggle = DWC2_HC_PID_DATA0;
2105
2106 return 0;
2107 }
2108 #endif
2109
2110 /**
2111 * dwc2_core_init() - Initializes the DWC_otg controller registers and
2112 * prepares the core for device mode or host mode operation
2113 *
2114 * @hsotg: Programming view of the DWC_otg controller
2115 * @initial_setup: If true then this is the first init for this instance.
2116 */
dwc2_core_init(struct dwc2_hsotg * hsotg,bool initial_setup)2117 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
2118 {
2119 u32 usbcfg, otgctl;
2120 int retval;
2121
2122 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
2123
2124 usbcfg = dwc2_readl(hsotg, GUSBCFG);
2125
2126 /* Set ULPI External VBUS bit if needed */
2127 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
2128 if (hsotg->params.phy_ulpi_ext_vbus)
2129 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
2130
2131 /* Set external TS Dline pulsing bit if needed */
2132 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
2133 if (hsotg->params.ts_dline)
2134 usbcfg |= GUSBCFG_TERMSELDLPULSE;
2135
2136 dwc2_writel(hsotg, usbcfg, GUSBCFG);
2137
2138 /*
2139 * Reset the Controller
2140 *
2141 * We only need to reset the controller if this is a re-init.
2142 * For the first init we know for sure that earlier code reset us (it
2143 * needed to in order to properly detect various parameters).
2144 */
2145 if (!initial_setup) {
2146 retval = dwc2_core_reset(hsotg, false);
2147 if (retval) {
2148 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
2149 __func__);
2150 return retval;
2151 }
2152 }
2153
2154 /*
2155 * This needs to happen in FS mode before any other programming occurs
2156 */
2157 retval = dwc2_phy_init(hsotg, initial_setup);
2158 if (retval)
2159 return retval;
2160
2161 /* Program the GAHBCFG Register */
2162 retval = dwc2_gahbcfg_init(hsotg);
2163 if (retval)
2164 return retval;
2165
2166 /* Program the GUSBCFG register */
2167 dwc2_gusbcfg_init(hsotg);
2168
2169 /* Program the GOTGCTL register */
2170 otgctl = dwc2_readl(hsotg, GOTGCTL);
2171 otgctl &= ~GOTGCTL_OTGVER;
2172 dwc2_writel(hsotg, otgctl, GOTGCTL);
2173
2174 /* Clear the SRP success bit for FS-I2c */
2175 hsotg->srp_success = 0;
2176
2177 /* Enable common interrupts */
2178 dwc2_enable_common_interrupts(hsotg);
2179
2180 /*
2181 * Do device or host initialization based on mode during PCD and
2182 * HCD initialization
2183 */
2184 if (dwc2_is_host_mode(hsotg)) {
2185 dev_dbg(hsotg->dev, "Host Mode\n");
2186 hsotg->op_state = OTG_STATE_A_HOST;
2187 } else {
2188 dev_dbg(hsotg->dev, "Device Mode\n");
2189 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
2190 }
2191
2192 return 0;
2193 }
2194
2195 /**
2196 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
2197 * Host mode
2198 *
2199 * @hsotg: Programming view of DWC_otg controller
2200 *
2201 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
2202 * request queues. Host channels are reset to ensure that they are ready for
2203 * performing transfers.
2204 */
dwc2_core_host_init(struct dwc2_hsotg * hsotg)2205 static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
2206 {
2207 u32 hcfg, hfir, otgctl, usbcfg;
2208
2209 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
2210
2211 /* Set HS/FS Timeout Calibration to 7 (max available value).
2212 * The number of PHY clocks that the application programs in
2213 * this field is added to the high/full speed interpacket timeout
2214 * duration in the core to account for any additional delays
2215 * introduced by the PHY. This can be required, because the delay
2216 * introduced by the PHY in generating the linestate condition
2217 * can vary from one PHY to another.
2218 */
2219 usbcfg = dwc2_readl(hsotg, GUSBCFG);
2220 usbcfg |= GUSBCFG_TOUTCAL(7);
2221 dwc2_writel(hsotg, usbcfg, GUSBCFG);
2222
2223 /* Restart the Phy Clock */
2224 dwc2_writel(hsotg, 0, PCGCTL);
2225
2226 /* Initialize Host Configuration Register */
2227 dwc2_init_fs_ls_pclk_sel(hsotg);
2228 if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
2229 hsotg->params.speed == DWC2_SPEED_PARAM_LOW) {
2230 hcfg = dwc2_readl(hsotg, HCFG);
2231 hcfg |= HCFG_FSLSSUPP;
2232 dwc2_writel(hsotg, hcfg, HCFG);
2233 }
2234
2235 /*
2236 * This bit allows dynamic reloading of the HFIR register during
2237 * runtime. This bit needs to be programmed during initial configuration
2238 * and its value must not be changed during runtime.
2239 */
2240 if (hsotg->params.reload_ctl) {
2241 hfir = dwc2_readl(hsotg, HFIR);
2242 hfir |= HFIR_RLDCTRL;
2243 dwc2_writel(hsotg, hfir, HFIR);
2244 }
2245
2246 if (hsotg->params.dma_desc_enable) {
2247 u32 op_mode = hsotg->hw_params.op_mode;
2248
2249 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
2250 !hsotg->hw_params.dma_desc_enable ||
2251 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
2252 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
2253 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
2254 dev_err(hsotg->dev,
2255 "Hardware does not support descriptor DMA mode -\n");
2256 dev_err(hsotg->dev,
2257 "falling back to buffer DMA mode.\n");
2258 hsotg->params.dma_desc_enable = false;
2259 } else {
2260 hcfg = dwc2_readl(hsotg, HCFG);
2261 hcfg |= HCFG_DESCDMA;
2262 dwc2_writel(hsotg, hcfg, HCFG);
2263 }
2264 }
2265
2266 /* Configure data FIFO sizes */
2267 dwc2_config_fifos(hsotg);
2268
2269 /* TODO - check this */
2270 /* Clear Host Set HNP Enable in the OTG Control Register */
2271 otgctl = dwc2_readl(hsotg, GOTGCTL);
2272 otgctl &= ~GOTGCTL_HSTSETHNPEN;
2273 dwc2_writel(hsotg, otgctl, GOTGCTL);
2274
2275 /* Make sure the FIFOs are flushed */
2276 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
2277 dwc2_flush_rx_fifo(hsotg);
2278
2279 /* Clear Host Set HNP Enable in the OTG Control Register */
2280 otgctl = dwc2_readl(hsotg, GOTGCTL);
2281 otgctl &= ~GOTGCTL_HSTSETHNPEN;
2282 dwc2_writel(hsotg, otgctl, GOTGCTL);
2283
2284 if (!hsotg->params.dma_desc_enable) {
2285 int num_channels, i;
2286 u32 hcchar;
2287
2288 /* Flush out any leftover queued requests */
2289 num_channels = hsotg->params.host_channels;
2290 for (i = 0; i < num_channels; i++) {
2291 hcchar = dwc2_readl(hsotg, HCCHAR(i));
2292 if (hcchar & HCCHAR_CHENA) {
2293 hcchar &= ~HCCHAR_CHENA;
2294 hcchar |= HCCHAR_CHDIS;
2295 hcchar &= ~HCCHAR_EPDIR;
2296 dwc2_writel(hsotg, hcchar, HCCHAR(i));
2297 }
2298 }
2299
2300 /* Halt all channels to put them into a known state */
2301 for (i = 0; i < num_channels; i++) {
2302 hcchar = dwc2_readl(hsotg, HCCHAR(i));
2303 if (hcchar & HCCHAR_CHENA) {
2304 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
2305 hcchar &= ~HCCHAR_EPDIR;
2306 dwc2_writel(hsotg, hcchar, HCCHAR(i));
2307 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
2308 __func__, i);
2309
2310 if (dwc2_hsotg_wait_bit_clear(hsotg, HCCHAR(i),
2311 HCCHAR_CHENA,
2312 1000)) {
2313 dev_warn(hsotg->dev,
2314 "Unable to clear enable on channel %d\n",
2315 i);
2316 }
2317 }
2318 }
2319 }
2320
2321 /* Enable ACG feature in host mode, if supported */
2322 dwc2_enable_acg(hsotg);
2323
2324 /* Turn on the vbus power */
2325 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
2326 if (hsotg->op_state == OTG_STATE_A_HOST) {
2327 u32 hprt0 = dwc2_read_hprt0(hsotg);
2328
2329 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
2330 !!(hprt0 & HPRT0_PWR));
2331 if (!(hprt0 & HPRT0_PWR)) {
2332 hprt0 |= HPRT0_PWR;
2333 dwc2_writel(hsotg, hprt0, HPRT0);
2334 }
2335 }
2336
2337 dwc2_enable_host_interrupts(hsotg);
2338 }
2339
2340 /*
2341 * Initializes dynamic portions of the DWC_otg HCD state
2342 *
2343 * Must be called with interrupt disabled and spinlock held
2344 */
dwc2_hcd_reinit(struct dwc2_hsotg * hsotg)2345 STATIC void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
2346 {
2347 struct dwc2_host_chan *chan, *chan_tmp;
2348 int num_channels;
2349 int i;
2350
2351 hsotg->flags.d32 = 0;
2352 hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
2353
2354 if (hsotg->params.uframe_sched) {
2355 hsotg->available_host_channels =
2356 hsotg->params.host_channels;
2357 } else {
2358 hsotg->non_periodic_channels = 0;
2359 hsotg->periodic_channels = 0;
2360 }
2361
2362 /*
2363 * Put all channels in the free channel list and clean up channel
2364 * states
2365 */
2366 list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list,
2367 hc_list_entry)
2368 list_del_init(&chan->hc_list_entry);
2369
2370 num_channels = hsotg->params.host_channels;
2371 for (i = 0; i < num_channels; i++) {
2372 chan = hsotg->hc_ptr_array[i];
2373 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
2374 dwc2_hc_cleanup(hsotg, chan);
2375 }
2376
2377 /* Initialize the DWC core for host mode operation */
2378 dwc2_core_host_init(hsotg);
2379 }
2380
dwc2_hc_init_split(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,struct dwc2_qtd * qtd,struct dwc2_hcd_urb * urb)2381 STATIC void dwc2_hc_init_split(struct dwc2_hsotg *hsotg,
2382 struct dwc2_host_chan *chan,
2383 struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
2384 {
2385 int hub_addr, hub_port;
2386
2387 chan->do_split = 1;
2388 chan->xact_pos = qtd->isoc_split_pos;
2389 chan->complete_split = qtd->complete_split;
2390 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
2391 chan->hub_addr = (u8)hub_addr;
2392 chan->hub_port = (u8)hub_port;
2393 }
2394
dwc2_hc_init_xfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,struct dwc2_qtd * qtd)2395 STATIC void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
2396 struct dwc2_host_chan *chan,
2397 struct dwc2_qtd *qtd)
2398 {
2399 struct dwc2_hcd_urb *urb = qtd->urb;
2400 struct dwc2_hcd_iso_packet_desc *frame_desc;
2401
2402 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
2403 case USB_ENDPOINT_XFER_CONTROL:
2404 chan->ep_type = USB_ENDPOINT_XFER_CONTROL;
2405
2406 switch (qtd->control_phase) {
2407 case DWC2_CONTROL_SETUP:
2408 dev_vdbg(hsotg->dev, " Control setup transaction\n");
2409 chan->do_ping = 0;
2410 chan->ep_is_in = 0;
2411 chan->data_pid_start = DWC2_HC_PID_SETUP;
2412 if (hsotg->params.host_dma)
2413 chan->xfer_dma = urb->setup_dma;
2414 else
2415 chan->xfer_buf = urb->setup_packet;
2416 chan->xfer_len = 8;
2417 break;
2418
2419 case DWC2_CONTROL_DATA:
2420 dev_vdbg(hsotg->dev, " Control data transaction\n");
2421 chan->data_pid_start = qtd->data_toggle;
2422 break;
2423
2424 case DWC2_CONTROL_STATUS:
2425 /*
2426 * Direction is opposite of data direction or IN if no
2427 * data
2428 */
2429 dev_vdbg(hsotg->dev, " Control status transaction\n");
2430 if (urb->length == 0)
2431 chan->ep_is_in = 1;
2432 else
2433 chan->ep_is_in =
2434 dwc2_hcd_is_pipe_out(&urb->pipe_info);
2435 if (chan->ep_is_in)
2436 chan->do_ping = 0;
2437 chan->data_pid_start = DWC2_HC_PID_DATA1;
2438 chan->xfer_len = 0;
2439 if (hsotg->params.host_dma)
2440 chan->xfer_dma = hsotg->status_buf_dma;
2441 else
2442 chan->xfer_buf = hsotg->status_buf;
2443 break;
2444 }
2445 break;
2446
2447 case USB_ENDPOINT_XFER_BULK:
2448 chan->ep_type = USB_ENDPOINT_XFER_BULK;
2449 break;
2450
2451 case USB_ENDPOINT_XFER_INT:
2452 chan->ep_type = USB_ENDPOINT_XFER_INT;
2453 break;
2454
2455 case USB_ENDPOINT_XFER_ISOC:
2456 chan->ep_type = USB_ENDPOINT_XFER_ISOC;
2457 if (hsotg->params.dma_desc_enable)
2458 break;
2459
2460 frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
2461 frame_desc->status = 0;
2462
2463 if (hsotg->params.host_dma) {
2464 chan->xfer_dma = urb->dma;
2465 chan->xfer_dma += frame_desc->offset +
2466 qtd->isoc_split_offset;
2467 } else {
2468 chan->xfer_buf = urb->buf;
2469 chan->xfer_buf += frame_desc->offset +
2470 qtd->isoc_split_offset;
2471 }
2472
2473 chan->xfer_len = frame_desc->length - qtd->isoc_split_offset;
2474
2475 if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) {
2476 if (chan->xfer_len <= 188)
2477 chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL;
2478 else
2479 chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN;
2480 }
2481 break;
2482 }
2483 }
2484
dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh,struct dwc2_host_chan * chan)2485 static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
2486 struct dwc2_qh *qh,
2487 struct dwc2_host_chan *chan)
2488 {
2489 #if 0
2490 if (!hsotg->unaligned_cache ||
2491 chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
2492 return -ENOMEM;
2493
2494 if (!qh->dw_align_buf) {
2495 qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
2496 GFP_ATOMIC | GFP_DMA);
2497 if (!qh->dw_align_buf)
2498 return -ENOMEM;
2499 }
2500
2501 qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
2502 DWC2_KMEM_UNALIGNED_BUF_SIZE,
2503 DMA_FROM_DEVICE);
2504
2505 if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
2506 dev_err(hsotg->dev, "can't map align_buf\n");
2507 chan->align_buf = 0;
2508 return -EINVAL;
2509 }
2510
2511 chan->align_buf = qh->dw_align_buf_dma;
2512 #endif
2513 return 0;
2514 }
2515
2516 #define DWC2_USB_DMA_ALIGN 4
2517
2518 #if 0
2519 static void dwc2_free_dma_aligned_buffer(struct urb *urb)
2520 {
2521 void *stored_xfer_buffer;
2522 size_t length;
2523
2524 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2525 return;
2526
2527 /* Restore urb->transfer_buffer from the end of the allocated area */
2528 memcpy(&stored_xfer_buffer,
2529 PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
2530 dma_get_cache_alignment()),
2531 sizeof(urb->transfer_buffer));
2532
2533 if (usb_urb_dir_in(urb)) {
2534 if (usb_pipeisoc(urb->pipe))
2535 length = urb->transfer_buffer_length;
2536 else
2537 length = urb->actual_length;
2538
2539 memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
2540 }
2541 kfree(urb->transfer_buffer);
2542 urb->transfer_buffer = stored_xfer_buffer;
2543
2544 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2545 }
2546
2547 static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2548 {
2549 void *kmalloc_ptr;
2550 size_t kmalloc_size;
2551
2552 if (urb->num_sgs || urb->sg ||
2553 urb->transfer_buffer_length == 0 ||
2554 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
2555 return 0;
2556
2557 /*
2558 * Allocate a buffer with enough padding for original transfer_buffer
2559 * pointer. This allocation is guaranteed to be aligned properly for
2560 * DMA
2561 */
2562 kmalloc_size = urb->transfer_buffer_length +
2563 (dma_get_cache_alignment() - 1) +
2564 sizeof(urb->transfer_buffer);
2565
2566 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2567 if (!kmalloc_ptr)
2568 return -ENOMEM;
2569
2570 /*
2571 * Position value of original urb->transfer_buffer pointer to the end
2572 * of allocation for later referencing
2573 */
2574 memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
2575 dma_get_cache_alignment()),
2576 &urb->transfer_buffer, sizeof(urb->transfer_buffer));
2577
2578 if (usb_urb_dir_out(urb))
2579 memcpy(kmalloc_ptr, urb->transfer_buffer,
2580 urb->transfer_buffer_length);
2581 urb->transfer_buffer = kmalloc_ptr;
2582
2583 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2584
2585 return 0;
2586 }
2587
2588 static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2589 gfp_t mem_flags)
2590 {
2591 int ret;
2592
2593 /* We assume setup_dma is always aligned; warn if not */
2594 WARN_ON_ONCE(urb->setup_dma &&
2595 (urb->setup_dma & (DWC2_USB_DMA_ALIGN - 1)));
2596
2597 ret = dwc2_alloc_dma_aligned_buffer(urb, mem_flags);
2598 if (ret)
2599 return ret;
2600
2601 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2602 if (ret)
2603 dwc2_free_dma_aligned_buffer(urb);
2604
2605 return ret;
2606 }
2607
2608 static void dwc2_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2609 {
2610 usb_hcd_unmap_urb_for_dma(hcd, urb);
2611 dwc2_free_dma_aligned_buffer(urb);
2612 }
2613 #endif
2614
2615 /**
2616 * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host
2617 * channel and initializes the host channel to perform the transactions. The
2618 * host channel is removed from the free list.
2619 *
2620 * @hsotg: The HCD state structure
2621 * @qh: Transactions from the first QTD for this QH are selected and assigned
2622 * to a free host channel
2623 */
dwc2_assign_and_init_hc(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)2624 STATIC int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
2625 {
2626 struct dwc2_host_chan *chan;
2627 struct dwc2_hcd_urb *urb;
2628 struct dwc2_qtd *qtd;
2629
2630 if (dbg_qh(qh))
2631 dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh);
2632
2633 if (list_empty(&qh->qtd_list)) {
2634 dev_dbg(hsotg->dev, "No QTDs in QH list\n");
2635 return -ENOMEM;
2636 }
2637
2638 if (list_empty(&hsotg->free_hc_list)) {
2639 dev_dbg(hsotg->dev, "No free channel to assign\n");
2640 return -ENOMEM;
2641 }
2642
2643 chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan,
2644 hc_list_entry);
2645
2646 /* Remove host channel from free list */
2647 list_del_init(&chan->hc_list_entry);
2648
2649 qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
2650 urb = qtd->urb;
2651 qh->channel = chan;
2652 qtd->in_process = 1;
2653
2654 /*
2655 * Use usb_pipedevice to determine device address. This address is
2656 * 0 before the SET_ADDRESS command and the correct address afterward.
2657 */
2658 chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
2659 chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
2660 chan->speed = qh->dev_speed;
2661 chan->max_packet = qh->maxp;
2662
2663 chan->xfer_started = 0;
2664 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2665 chan->error_state = (qtd->error_count > 0);
2666 chan->halt_on_queue = 0;
2667 chan->halt_pending = 0;
2668 chan->requests = 0;
2669
2670 /*
2671 * The following values may be modified in the transfer type section
2672 * below. The xfer_len value may be reduced when the transfer is
2673 * started to accommodate the max widths of the XferSize and PktCnt
2674 * fields in the HCTSIZn register.
2675 */
2676
2677 chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0);
2678 if (chan->ep_is_in)
2679 chan->do_ping = 0;
2680 else
2681 chan->do_ping = qh->ping_state;
2682
2683 chan->data_pid_start = qh->data_toggle;
2684 chan->multi_count = 1;
2685
2686 if (urb->actual_length > urb->length &&
2687 !dwc2_hcd_is_pipe_in(&urb->pipe_info))
2688 urb->actual_length = urb->length;
2689
2690 if (hsotg->params.host_dma)
2691 chan->xfer_dma = urb->dma + urb->actual_length;
2692 else
2693 chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
2694
2695 chan->xfer_len = urb->length - urb->actual_length;
2696 chan->xfer_count = 0;
2697
2698 /* Set the split attributes if required */
2699 if (qh->do_split)
2700 dwc2_hc_init_split(hsotg, chan, qtd, urb);
2701 else
2702 chan->do_split = 0;
2703
2704 /* Set the transfer attributes */
2705 dwc2_hc_init_xfer(hsotg, chan, qtd);
2706
2707 /* For non-dword aligned buffers */
2708 if (hsotg->params.host_dma && qh->do_split &&
2709 chan->ep_is_in && (chan->xfer_dma & 0x3)) {
2710 dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
2711 if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
2712 dev_err(hsotg->dev,
2713 "Failed to allocate memory to handle non-aligned buffer\n");
2714 /* Add channel back to free list */
2715 chan->align_buf = 0;
2716 chan->multi_count = 0;
2717 list_add_tail(&chan->hc_list_entry,
2718 &hsotg->free_hc_list);
2719 qtd->in_process = 0;
2720 qh->channel = NULL;
2721 return -ENOMEM;
2722 }
2723 } else {
2724 /*
2725 * We assume that DMA is always aligned in non-split
2726 * case or split out case. Warn if not.
2727 */
2728 WARN_ON_ONCE(hsotg->params.host_dma &&
2729 (chan->xfer_dma & 0x3));
2730 chan->align_buf = 0;
2731 }
2732
2733 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2734 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
2735 /*
2736 * This value may be modified when the transfer is started
2737 * to reflect the actual transfer length
2738 */
2739 chan->multi_count = qh->maxp_mult;
2740
2741 if (hsotg->params.dma_desc_enable) {
2742 chan->desc_list_addr = qh->desc_list_dma;
2743 chan->desc_list_sz = qh->desc_list_sz;
2744 }
2745
2746 dwc2_hc_init(hsotg, chan);
2747 chan->qh = qh;
2748
2749 return 0;
2750 }
2751
2752 /**
2753 * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer
2754 * schedule and assigns them to available host channels. Called from the HCD
2755 * interrupt handler functions.
2756 *
2757 * @hsotg: The HCD state structure
2758 *
2759 * Return: The types of new transactions that were assigned to host channels
2760 */
dwc2_hcd_select_transactions(struct dwc2_hsotg * hsotg)2761 enum dwc2_transaction_type dwc2_hcd_select_transactions(
2762 struct dwc2_hsotg *hsotg)
2763 {
2764 enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE;
2765 struct list_head *qh_ptr;
2766 struct dwc2_qh *qh;
2767 int num_channels;
2768
2769 #ifdef DWC2_DEBUG_SOF
2770 dev_vdbg(hsotg->dev, " Select Transactions\n");
2771 #endif
2772
2773 /* Process entries in the periodic ready list */
2774 qh_ptr = hsotg->periodic_sched_ready.next;
2775 while (qh_ptr != &hsotg->periodic_sched_ready) {
2776 if (list_empty(&hsotg->free_hc_list))
2777 break;
2778 if (hsotg->params.uframe_sched) {
2779 if (hsotg->available_host_channels <= 1)
2780 break;
2781 hsotg->available_host_channels--;
2782 }
2783 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
2784 if (dwc2_assign_and_init_hc(hsotg, qh))
2785 break;
2786
2787 /*
2788 * Move the QH from the periodic ready schedule to the
2789 * periodic assigned schedule
2790 */
2791 qh_ptr = qh_ptr->next;
2792 list_move_tail(&qh->qh_list_entry,
2793 &hsotg->periodic_sched_assigned);
2794 ret_val = DWC2_TRANSACTION_PERIODIC;
2795 }
2796
2797 /*
2798 * Process entries in the inactive portion of the non-periodic
2799 * schedule. Some free host channels may not be used if they are
2800 * reserved for periodic transfers.
2801 */
2802 num_channels = hsotg->params.host_channels;
2803 qh_ptr = hsotg->non_periodic_sched_inactive.next;
2804 while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
2805 if (!hsotg->params.uframe_sched &&
2806 hsotg->non_periodic_channels >= num_channels -
2807 hsotg->periodic_channels)
2808 break;
2809 if (list_empty(&hsotg->free_hc_list))
2810 break;
2811 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
2812 if (hsotg->params.uframe_sched) {
2813 if (hsotg->available_host_channels < 1)
2814 break;
2815 hsotg->available_host_channels--;
2816 }
2817
2818 if (dwc2_assign_and_init_hc(hsotg, qh))
2819 break;
2820
2821 /*
2822 * Move the QH from the non-periodic inactive schedule to the
2823 * non-periodic active schedule
2824 */
2825 qh_ptr = qh_ptr->next;
2826 list_move_tail(&qh->qh_list_entry,
2827 &hsotg->non_periodic_sched_active);
2828
2829 if (ret_val == DWC2_TRANSACTION_NONE)
2830 ret_val = DWC2_TRANSACTION_NON_PERIODIC;
2831 else
2832 ret_val = DWC2_TRANSACTION_ALL;
2833
2834 if (!hsotg->params.uframe_sched)
2835 hsotg->non_periodic_channels++;
2836 }
2837
2838 return ret_val;
2839 }
2840
2841 /**
2842 * dwc2_queue_transaction() - Attempts to queue a single transaction request for
2843 * a host channel associated with either a periodic or non-periodic transfer
2844 *
2845 * @hsotg: The HCD state structure
2846 * @chan: Host channel descriptor associated with either a periodic or
2847 * non-periodic transfer
2848 * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO
2849 * for periodic transfers or the non-periodic Tx FIFO
2850 * for non-periodic transfers
2851 *
2852 * Return: 1 if a request is queued and more requests may be needed to
2853 * complete the transfer, 0 if no more requests are required for this
2854 * transfer, -1 if there is insufficient space in the Tx FIFO
2855 *
2856 * This function assumes that there is space available in the appropriate
2857 * request queue. For an OUT transfer or SETUP transaction in Slave mode,
2858 * it checks whether space is available in the appropriate Tx FIFO.
2859 *
2860 * Must be called with interrupt disabled and spinlock held
2861 */
dwc2_queue_transaction(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,u16 fifo_dwords_avail)2862 STATIC int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
2863 struct dwc2_host_chan *chan,
2864 u16 fifo_dwords_avail)
2865 {
2866 int retval = 0;
2867
2868 if (chan->do_split)
2869 /* Put ourselves on the list to keep order straight */
2870 list_move_tail(&chan->split_order_list_entry,
2871 &hsotg->split_order);
2872
2873 if (hsotg->params.host_dma && chan->qh) {
2874 if (hsotg->params.dma_desc_enable) {
2875 if (!chan->xfer_started ||
2876 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
2877 dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
2878 chan->qh->ping_state = 0;
2879 }
2880 } else if (!chan->xfer_started) {
2881 dwc2_hc_start_transfer(hsotg, chan);
2882 chan->qh->ping_state = 0;
2883 }
2884 } else if (chan->halt_pending) {
2885 /* Don't queue a request if the channel has been halted */
2886 } else if (chan->halt_on_queue) {
2887 dwc2_hc_halt(hsotg, chan, chan->halt_status);
2888 } else if (chan->do_ping) {
2889 if (!chan->xfer_started)
2890 dwc2_hc_start_transfer(hsotg, chan);
2891 } else if (!chan->ep_is_in ||
2892 chan->data_pid_start == DWC2_HC_PID_SETUP) {
2893 if ((fifo_dwords_avail * 4) >= chan->max_packet) {
2894 if (!chan->xfer_started) {
2895 dwc2_hc_start_transfer(hsotg, chan);
2896 retval = 1;
2897 } else {
2898 retval = dwc2_hc_continue_transfer(hsotg, chan);
2899 }
2900 } else {
2901 retval = -1;
2902 }
2903 } else {
2904 if (!chan->xfer_started) {
2905 dwc2_hc_start_transfer(hsotg, chan);
2906 retval = 1;
2907 } else {
2908 retval = dwc2_hc_continue_transfer(hsotg, chan);
2909 }
2910 }
2911
2912 return retval;
2913 }
2914
2915 /*
2916 * Processes periodic channels for the next frame and queues transactions for
2917 * these channels to the DWC_otg controller. After queueing transactions, the
2918 * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
2919 * to queue as Periodic Tx FIFO or request queue space becomes available.
2920 * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
2921 *
2922 * Must be called with interrupt disabled and spinlock held
2923 */
dwc2_process_periodic_channels(struct dwc2_hsotg * hsotg)2924 STATIC void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
2925 {
2926 struct list_head *qh_ptr;
2927 struct dwc2_qh *qh;
2928 u32 tx_status;
2929 u32 fspcavail;
2930 u32 gintmsk;
2931 int status;
2932 bool no_queue_space = false;
2933 bool no_fifo_space = false;
2934 u32 qspcavail;
2935
2936 /* If empty list then just adjust interrupt enables */
2937 if (list_empty(&hsotg->periodic_sched_assigned))
2938 goto exit;
2939
2940 if (dbg_perio())
2941 dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
2942
2943 tx_status = dwc2_readl(hsotg, HPTXSTS);
2944 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
2945 TXSTS_QSPCAVAIL_SHIFT;
2946 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
2947 TXSTS_FSPCAVAIL_SHIFT;
2948
2949 if (dbg_perio()) {
2950 dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n",
2951 qspcavail);
2952 dev_vdbg(hsotg->dev, " P Tx FIFO Space Avail (before queue): %d\n",
2953 fspcavail);
2954 }
2955
2956 qh_ptr = hsotg->periodic_sched_assigned.next;
2957 while (qh_ptr != &hsotg->periodic_sched_assigned) {
2958 tx_status = dwc2_readl(hsotg, HPTXSTS);
2959 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
2960 TXSTS_QSPCAVAIL_SHIFT;
2961 if (qspcavail == 0) {
2962 no_queue_space = true;
2963 break;
2964 }
2965
2966 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
2967 if (!qh->channel) {
2968 qh_ptr = qh_ptr->next;
2969 continue;
2970 }
2971
2972 /* Make sure EP's TT buffer is clean before queueing qtds */
2973 if (qh->tt_buffer_dirty) {
2974 qh_ptr = qh_ptr->next;
2975 continue;
2976 }
2977
2978 /*
2979 * Set a flag if we're queuing high-bandwidth in slave mode.
2980 * The flag prevents any halts to get into the request queue in
2981 * the middle of multiple high-bandwidth packets getting queued.
2982 */
2983 if (!hsotg->params.host_dma &&
2984 qh->channel->multi_count > 1)
2985 hsotg->queuing_high_bandwidth = 1;
2986
2987 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
2988 TXSTS_FSPCAVAIL_SHIFT;
2989 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
2990 if (status < 0) {
2991 no_fifo_space = true;
2992 break;
2993 }
2994
2995 /*
2996 * In Slave mode, stay on the current transfer until there is
2997 * nothing more to do or the high-bandwidth request count is
2998 * reached. In DMA mode, only need to queue one request. The
2999 * controller automatically handles multiple packets for
3000 * high-bandwidth transfers.
3001 */
3002 if (hsotg->params.host_dma || status == 0 ||
3003 qh->channel->requests == qh->channel->multi_count) {
3004 qh_ptr = qh_ptr->next;
3005 /*
3006 * Move the QH from the periodic assigned schedule to
3007 * the periodic queued schedule
3008 */
3009 list_move_tail(&qh->qh_list_entry,
3010 &hsotg->periodic_sched_queued);
3011
3012 /* done queuing high bandwidth */
3013 hsotg->queuing_high_bandwidth = 0;
3014 }
3015 }
3016
3017 exit:
3018 if (no_queue_space || no_fifo_space ||
3019 (!hsotg->params.host_dma &&
3020 !list_empty(&hsotg->periodic_sched_assigned))) {
3021 /*
3022 * May need to queue more transactions as the request
3023 * queue or Tx FIFO empties. Enable the periodic Tx
3024 * FIFO empty interrupt. (Always use the half-empty
3025 * level to ensure that new requests are loaded as
3026 * soon as possible.)
3027 */
3028 gintmsk = dwc2_readl(hsotg, GINTMSK);
3029 if (!(gintmsk & GINTSTS_PTXFEMP)) {
3030 gintmsk |= GINTSTS_PTXFEMP;
3031 dwc2_writel(hsotg, gintmsk, GINTMSK);
3032 }
3033 } else {
3034 /*
3035 * Disable the Tx FIFO empty interrupt since there are
3036 * no more transactions that need to be queued right
3037 * now. This function is called from interrupt
3038 * handlers to queue more transactions as transfer
3039 * states change.
3040 */
3041 gintmsk = dwc2_readl(hsotg, GINTMSK);
3042 if (gintmsk & GINTSTS_PTXFEMP) {
3043 gintmsk &= ~GINTSTS_PTXFEMP;
3044 dwc2_writel(hsotg, gintmsk, GINTMSK);
3045 }
3046 }
3047 }
3048
3049 /*
3050 * Processes active non-periodic channels and queues transactions for these
3051 * channels to the DWC_otg controller. After queueing transactions, the NP Tx
3052 * FIFO Empty interrupt is enabled if there are more transactions to queue as
3053 * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
3054 * FIFO Empty interrupt is disabled.
3055 *
3056 * Must be called with interrupt disabled and spinlock held
3057 */
dwc2_process_non_periodic_channels(struct dwc2_hsotg * hsotg)3058 STATIC void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
3059 {
3060 struct list_head *orig_qh_ptr;
3061 struct dwc2_qh *qh;
3062 u32 tx_status;
3063 u32 qspcavail;
3064 u32 fspcavail;
3065 u32 gintmsk;
3066 int status;
3067 int no_queue_space = 0;
3068 int no_fifo_space = 0;
3069 int more_to_do = 0;
3070
3071 dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n");
3072
3073 tx_status = dwc2_readl(hsotg, GNPTXSTS);
3074 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
3075 TXSTS_QSPCAVAIL_SHIFT;
3076 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
3077 TXSTS_FSPCAVAIL_SHIFT;
3078 dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n",
3079 qspcavail);
3080 dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n",
3081 fspcavail);
3082
3083 /*
3084 * Keep track of the starting point. Skip over the start-of-list
3085 * entry.
3086 */
3087 if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active)
3088 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
3089 orig_qh_ptr = hsotg->non_periodic_qh_ptr;
3090
3091 /*
3092 * Process once through the active list or until no more space is
3093 * available in the request queue or the Tx FIFO
3094 */
3095 do {
3096 tx_status = dwc2_readl(hsotg, GNPTXSTS);
3097 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
3098 TXSTS_QSPCAVAIL_SHIFT;
3099 if (!hsotg->params.host_dma && qspcavail == 0) {
3100 no_queue_space = 1;
3101 break;
3102 }
3103
3104 qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh,
3105 qh_list_entry);
3106 if (!qh->channel)
3107 goto next;
3108
3109 /* Make sure EP's TT buffer is clean before queueing qtds */
3110 if (qh->tt_buffer_dirty)
3111 goto next;
3112
3113 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
3114 TXSTS_FSPCAVAIL_SHIFT;
3115 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
3116
3117 if (status > 0) {
3118 more_to_do = 1;
3119 } else if (status < 0) {
3120 no_fifo_space = 1;
3121 break;
3122 }
3123 next:
3124 /* Advance to next QH, skipping start-of-list entry */
3125 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
3126 if (hsotg->non_periodic_qh_ptr ==
3127 &hsotg->non_periodic_sched_active)
3128 hsotg->non_periodic_qh_ptr =
3129 hsotg->non_periodic_qh_ptr->next;
3130 } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
3131
3132 if (!hsotg->params.host_dma) {
3133 tx_status = dwc2_readl(hsotg, GNPTXSTS);
3134 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
3135 TXSTS_QSPCAVAIL_SHIFT;
3136 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
3137 TXSTS_FSPCAVAIL_SHIFT;
3138 dev_vdbg(hsotg->dev,
3139 " NP Tx Req Queue Space Avail (after queue): %d\n",
3140 qspcavail);
3141 dev_vdbg(hsotg->dev,
3142 " NP Tx FIFO Space Avail (after queue): %d\n",
3143 fspcavail);
3144
3145 if (more_to_do || no_queue_space || no_fifo_space) {
3146 /*
3147 * May need to queue more transactions as the request
3148 * queue or Tx FIFO empties. Enable the non-periodic
3149 * Tx FIFO empty interrupt. (Always use the half-empty
3150 * level to ensure that new requests are loaded as
3151 * soon as possible.)
3152 */
3153 gintmsk = dwc2_readl(hsotg, GINTMSK);
3154 gintmsk |= GINTSTS_NPTXFEMP;
3155 dwc2_writel(hsotg, gintmsk, GINTMSK);
3156 } else {
3157 /*
3158 * Disable the Tx FIFO empty interrupt since there are
3159 * no more transactions that need to be queued right
3160 * now. This function is called from interrupt
3161 * handlers to queue more transactions as transfer
3162 * states change.
3163 */
3164 gintmsk = dwc2_readl(hsotg, GINTMSK);
3165 gintmsk &= ~GINTSTS_NPTXFEMP;
3166 dwc2_writel(hsotg, gintmsk, GINTMSK);
3167 }
3168 }
3169 }
3170
3171 /**
3172 * dwc2_hcd_queue_transactions() - Processes the currently active host channels
3173 * and queues transactions for these channels to the DWC_otg controller. Called
3174 * from the HCD interrupt handler functions.
3175 *
3176 * @hsotg: The HCD state structure
3177 * @tr_type: The type(s) of transactions to queue (non-periodic, periodic,
3178 * or both)
3179 *
3180 * Must be called with interrupt disabled and spinlock held
3181 */
dwc2_hcd_queue_transactions(struct dwc2_hsotg * hsotg,enum dwc2_transaction_type tr_type)3182 void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
3183 enum dwc2_transaction_type tr_type)
3184 {
3185 #ifdef DWC2_DEBUG_SOF
3186 dev_vdbg(hsotg->dev, "Queue Transactions\n");
3187 #endif
3188 /* Process host channels associated with periodic transfers */
3189 if (tr_type == DWC2_TRANSACTION_PERIODIC ||
3190 tr_type == DWC2_TRANSACTION_ALL)
3191 dwc2_process_periodic_channels(hsotg);
3192
3193 /* Process host channels associated with non-periodic transfers */
3194 if (tr_type == DWC2_TRANSACTION_NON_PERIODIC ||
3195 tr_type == DWC2_TRANSACTION_ALL) {
3196 if (!list_empty(&hsotg->non_periodic_sched_active)) {
3197 dwc2_process_non_periodic_channels(hsotg);
3198 } else {
3199 /*
3200 * Ensure NP Tx FIFO empty interrupt is disabled when
3201 * there are no non-periodic transfers to process
3202 */
3203 u32 gintmsk = dwc2_readl(hsotg, GINTMSK);
3204
3205 gintmsk &= ~GINTSTS_NPTXFEMP;
3206 dwc2_writel(hsotg, gintmsk, GINTMSK);
3207 }
3208 }
3209 }
3210
dwc2_conn_id_status_change(void * data)3211 STATIC void dwc2_conn_id_status_change(void *data)
3212 {
3213 struct dwc2_hsotg *hsotg = data;
3214
3215 u32 count = 0;
3216 u32 gotgctl;
3217 unsigned long flags;
3218
3219 dev_dbg(hsotg->dev, "%s()\n", __func__);
3220
3221 gotgctl = dwc2_readl(hsotg, GOTGCTL);
3222 dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl);
3223 dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n",
3224 !!(gotgctl & GOTGCTL_CONID_B));
3225
3226 /* B-Device connector (Device Mode) */
3227 if (gotgctl & GOTGCTL_CONID_B) {
3228 dwc2_vbus_supply_exit(hsotg);
3229 /* Wait for switch to device mode */
3230 dev_dbg(hsotg->dev, "connId B\n");
3231 if (hsotg->bus_suspended) {
3232 dev_info(hsotg->dev,
3233 "Do port resume before switching to device mode\n");
3234 dwc2_port_resume(hsotg);
3235 }
3236 while (!dwc2_is_device_mode(hsotg)) {
3237 dev_info(hsotg->dev,
3238 "Waiting for Peripheral Mode, Mode=%s\n",
3239 dwc2_is_host_mode(hsotg) ? "Host" :
3240 "Peripheral");
3241 dwc2_msleep(20);
3242 /*
3243 * Sometimes the initial GOTGCTRL read is wrong, so
3244 * check it again and jump to host mode if that was
3245 * the case.
3246 */
3247 gotgctl = dwc2_readl(hsotg, GOTGCTL);
3248 if (!(gotgctl & GOTGCTL_CONID_B))
3249 goto host;
3250 if (++count > 250)
3251 break;
3252 }
3253 if (count > 250)
3254 dev_err(hsotg->dev,
3255 "Connection id status change timed out\n");
3256
3257 /*
3258 * Exit Partial Power Down without restoring registers.
3259 * No need to check the return value as registers
3260 * are not being restored.
3261 */
3262 if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
3263 dwc2_exit_partial_power_down(hsotg, 0, false);
3264
3265 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
3266 dwc2_core_init(hsotg, false);
3267 dwc2_enable_global_interrupts(hsotg);
3268 spin_lock_irqsave(&hsotg->lock, flags);
3269 dwc2_hsotg_core_init_disconnected(hsotg, false);
3270 spin_unlock_irqrestore(&hsotg->lock, flags);
3271 /* Enable ACG feature in device mode,if supported */
3272 dwc2_enable_acg(hsotg);
3273 dwc2_hsotg_core_connect(hsotg);
3274 } else {
3275 host:
3276 /* A-Device connector (Host Mode) */
3277 dev_dbg(hsotg->dev, "connId A\n");
3278 while (!dwc2_is_host_mode(hsotg)) {
3279 dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n",
3280 dwc2_is_host_mode(hsotg) ?
3281 "Host" : "Peripheral");
3282 dwc2_msleep(20);
3283 if (++count > 250)
3284 break;
3285 }
3286 if (count > 250)
3287 dev_err(hsotg->dev,
3288 "Connection id status change timed out\n");
3289
3290 spin_lock_irqsave(&hsotg->lock, flags);
3291 dwc2_hsotg_disconnect(hsotg);
3292 spin_unlock_irqrestore(&hsotg->lock, flags);
3293
3294 hsotg->op_state = OTG_STATE_A_HOST;
3295 /* Initialize the Core for Host mode */
3296 dwc2_core_init(hsotg, false);
3297 dwc2_enable_global_interrupts(hsotg);
3298 dwc2_hcd_start(hsotg);
3299 }
3300 }
3301
dwc2_wakeup_detected(void * data)3302 void dwc2_wakeup_detected(void *data)
3303 {
3304 struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
3305 u32 hprt0;
3306
3307 dev_dbg(hsotg->dev, "%s()\n", __func__);
3308
3309 /*
3310 * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
3311 * so that OPT tests pass with all PHYs.)
3312 */
3313 hprt0 = dwc2_read_hprt0(hsotg);
3314 dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0);
3315 hprt0 &= ~HPRT0_RES;
3316 dwc2_writel(hsotg, hprt0, HPRT0);
3317 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
3318 dwc2_readl(hsotg, HPRT0));
3319
3320 dwc2_hcd_rem_wakeup(hsotg);
3321 hsotg->bus_suspended = false;
3322
3323 /* Change to L0 state */
3324 hsotg->lx_state = DWC2_L0;
3325 }
3326
dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg * hsotg)3327 static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
3328 {
3329 #if 0
3330 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
3331
3332 return hcd->self.b_hnp_enable;
3333 #endif
3334 return 0;
3335 }
3336
3337 /**
3338 * dwc2_port_suspend() - Put controller in suspend mode for host.
3339 *
3340 * @hsotg: Programming view of the DWC_otg controller
3341 * @windex: The control request wIndex field
3342 *
3343 * Return: non-zero if failed to enter suspend mode for host.
3344 *
3345 * This function is for entering Host mode suspend.
3346 * Must NOT be called with interrupt disabled or spinlock held.
3347 */
dwc2_port_suspend(struct dwc2_hsotg * hsotg,u16 windex)3348 int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
3349 {
3350 unsigned long flags;
3351 u32 pcgctl;
3352 u32 gotgctl;
3353 int ret = 0;
3354
3355 dev_dbg(hsotg->dev, "%s()\n", __func__);
3356
3357 spin_lock_irqsave(&hsotg->lock, flags);
3358
3359 if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) {
3360 gotgctl = dwc2_readl(hsotg, GOTGCTL);
3361 gotgctl |= GOTGCTL_HSTSETHNPEN;
3362 dwc2_writel(hsotg, gotgctl, GOTGCTL);
3363 hsotg->op_state = OTG_STATE_A_SUSPEND;
3364 }
3365
3366 switch (hsotg->params.power_down) {
3367 case DWC2_POWER_DOWN_PARAM_PARTIAL:
3368 ret = dwc2_enter_partial_power_down(hsotg);
3369 if (ret)
3370 dev_err(hsotg->dev,
3371 "enter partial_power_down failed.\n");
3372 break;
3373 case DWC2_POWER_DOWN_PARAM_HIBERNATION:
3374 /*
3375 * Perform spin unlock and lock because in
3376 * "dwc2_host_enter_hibernation()" function there is a spinlock
3377 * logic which prevents servicing of any IRQ during entering
3378 * hibernation.
3379 */
3380 spin_unlock_irqrestore(&hsotg->lock, flags);
3381 ret = dwc2_enter_hibernation(hsotg, 1);
3382 if (ret)
3383 dev_err(hsotg->dev, "enter hibernation failed.\n");
3384 spin_lock_irqsave(&hsotg->lock, flags);
3385 break;
3386 case DWC2_POWER_DOWN_PARAM_NONE:
3387 /*
3388 * If not hibernation nor partial power down are supported,
3389 * clock gating is used to save power.
3390 */
3391 if (!hsotg->params.no_clock_gating)
3392 dwc2_host_enter_clock_gating(hsotg);
3393 break;
3394 }
3395
3396 /* For HNP the bus must be suspended for at least 200ms */
3397 if (dwc2_host_is_b_hnp_enabled(hsotg)) {
3398 pcgctl = dwc2_readl(hsotg, PCGCTL);
3399 pcgctl &= ~PCGCTL_STOPPCLK;
3400 dwc2_writel(hsotg, pcgctl, PCGCTL);
3401
3402 spin_unlock_irqrestore(&hsotg->lock, flags);
3403
3404 dwc2_msleep(200);
3405 } else {
3406 spin_unlock_irqrestore(&hsotg->lock, flags);
3407 }
3408
3409 return ret;
3410 }
3411
3412 /**
3413 * dwc2_port_resume() - Exit controller from suspend mode for host.
3414 *
3415 * @hsotg: Programming view of the DWC_otg controller
3416 *
3417 * Return: non-zero if failed to exit suspend mode for host.
3418 *
3419 * This function is for exiting Host mode suspend.
3420 * Must NOT be called with interrupt disabled or spinlock held.
3421 */
dwc2_port_resume(struct dwc2_hsotg * hsotg)3422 int dwc2_port_resume(struct dwc2_hsotg *hsotg)
3423 {
3424 unsigned long flags;
3425 int ret = 0;
3426
3427 spin_lock_irqsave(&hsotg->lock, flags);
3428
3429 switch (hsotg->params.power_down) {
3430 case DWC2_POWER_DOWN_PARAM_PARTIAL:
3431 ret = dwc2_exit_partial_power_down(hsotg, 0, true);
3432 if (ret)
3433 dev_err(hsotg->dev,
3434 "exit partial_power_down failed.\n");
3435 break;
3436 case DWC2_POWER_DOWN_PARAM_HIBERNATION:
3437 /* Exit host hibernation. */
3438 ret = dwc2_exit_hibernation(hsotg, 0, 0, 1);
3439 if (ret)
3440 dev_err(hsotg->dev, "exit hibernation failed.\n");
3441 break;
3442 case DWC2_POWER_DOWN_PARAM_NONE:
3443 /*
3444 * If not hibernation nor partial power down are supported,
3445 * port resume is done using the clock gating programming flow.
3446 */
3447 spin_unlock_irqrestore(&hsotg->lock, flags);
3448 dwc2_host_exit_clock_gating(hsotg, 0);
3449 spin_lock_irqsave(&hsotg->lock, flags);
3450 break;
3451 }
3452
3453 spin_unlock_irqrestore(&hsotg->lock, flags);
3454
3455 return ret;
3456 }
3457
3458 /* Handles hub class-specific requests */
dwc2_hcd_hub_control(struct dwc2_hsotg * hsotg,u16 typereq,u16 wvalue,u16 windex,char * buf,u16 wlength)3459 int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
3460 u16 wvalue, u16 windex, char *buf, u16 wlength)
3461 {
3462 usb_hub_descriptor_t *hub_desc;
3463 usb_port_status_t ps;
3464 int retval = 0;
3465 u32 hprt0;
3466 u32 port_status;
3467 u32 speed;
3468 u32 pcgctl;
3469 u32 pwr;
3470
3471 switch (typereq) {
3472 case ClearHubFeature:
3473 dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue);
3474
3475 switch (wvalue) {
3476 case C_HUB_LOCAL_POWER:
3477 case C_HUB_OVER_CURRENT:
3478 /* Nothing required here */
3479 break;
3480
3481 default:
3482 retval = -EINVAL;
3483 dev_err(hsotg->dev,
3484 "ClearHubFeature request %1xh unknown\n",
3485 wvalue);
3486 }
3487 break;
3488
3489 case ClearPortFeature:
3490 // if (wvalue != USB_PORT_FEAT_L1)
3491 if (!windex || windex > 1)
3492 goto error;
3493 switch (wvalue) {
3494 case USB_PORT_FEAT_ENABLE:
3495 dev_dbg(hsotg->dev,
3496 "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
3497 hprt0 = dwc2_read_hprt0(hsotg);
3498 hprt0 |= HPRT0_ENA;
3499 dwc2_writel(hsotg, hprt0, HPRT0);
3500 break;
3501
3502 case USB_PORT_FEAT_SUSPEND:
3503 dev_dbg(hsotg->dev,
3504 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
3505
3506 if (hsotg->bus_suspended)
3507 retval = dwc2_port_resume(hsotg);
3508 break;
3509
3510 case USB_PORT_FEAT_POWER:
3511 dev_dbg(hsotg->dev,
3512 "ClearPortFeature USB_PORT_FEAT_POWER\n");
3513 hprt0 = dwc2_read_hprt0(hsotg);
3514 pwr = hprt0 & HPRT0_PWR;
3515 hprt0 &= ~HPRT0_PWR;
3516 dwc2_writel(hsotg, hprt0, HPRT0);
3517 if (pwr)
3518 dwc2_vbus_supply_exit(hsotg);
3519 break;
3520
3521 case USB_PORT_FEAT_INDICATOR:
3522 dev_dbg(hsotg->dev,
3523 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
3524 /* Port indicator not supported */
3525 break;
3526
3527 case USB_PORT_FEAT_C_CONNECTION:
3528 /*
3529 * Clears driver's internal Connect Status Change flag
3530 */
3531 dev_dbg(hsotg->dev,
3532 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
3533 hsotg->flags.b.port_connect_status_change = 0;
3534 break;
3535
3536 case USB_PORT_FEAT_C_RESET:
3537 /* Clears driver's internal Port Reset Change flag */
3538 dev_dbg(hsotg->dev,
3539 "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
3540 hsotg->flags.b.port_reset_change = 0;
3541 break;
3542
3543 case USB_PORT_FEAT_C_ENABLE:
3544 /*
3545 * Clears the driver's internal Port Enable/Disable
3546 * Change flag
3547 */
3548 dev_dbg(hsotg->dev,
3549 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
3550 hsotg->flags.b.port_enable_change = 0;
3551 break;
3552
3553 case USB_PORT_FEAT_C_SUSPEND:
3554 /*
3555 * Clears the driver's internal Port Suspend Change
3556 * flag, which is set when resume signaling on the host
3557 * port is complete
3558 */
3559 dev_dbg(hsotg->dev,
3560 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
3561 hsotg->flags.b.port_suspend_change = 0;
3562 break;
3563
3564 case USB_PORT_FEAT_C_PORT_L1:
3565 dev_dbg(hsotg->dev,
3566 "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n");
3567 hsotg->flags.b.port_l1_change = 0;
3568 break;
3569
3570 case USB_PORT_FEAT_C_OVER_CURRENT:
3571 dev_dbg(hsotg->dev,
3572 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
3573 hsotg->flags.b.port_over_current_change = 0;
3574 break;
3575
3576 default:
3577 retval = -EINVAL;
3578 dev_err(hsotg->dev,
3579 "ClearPortFeature request %1xh unknown or unsupported\n",
3580 wvalue);
3581 }
3582 break;
3583
3584 case GetHubDescriptor:
3585 dev_dbg(hsotg->dev, "GetHubDescriptor\n");
3586 hub_desc = (usb_hub_descriptor_t *)buf;
3587 hub_desc->bDescLength = 9;
3588 hub_desc->bDescriptorType = USB_DT_HUB;
3589 hub_desc->bNbrPorts = 1;
3590 USETW(hub_desc->wHubCharacteristics, HUB_CHAR_COMMON_LPSM |
3591 HUB_CHAR_INDV_PORT_OCPM);
3592 hub_desc->bPwrOn2PwrGood = 1;
3593 hub_desc->bHubContrCurrent = 0;
3594 hub_desc->DeviceRemovable[0] = 0;
3595 hub_desc->DeviceRemovable[1] = 0xff;
3596 break;
3597
3598 case GetHubStatus:
3599 dev_dbg(hsotg->dev, "GetHubStatus\n");
3600 memset(buf, 0, 4);
3601 break;
3602
3603 case GetPortStatus:
3604 dev_vdbg(hsotg->dev,
3605 "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex,
3606 hsotg->flags.d32);
3607 if (!windex || windex > 1)
3608 goto error;
3609
3610 port_status = 0;
3611 if (hsotg->flags.b.port_connect_status_change)
3612 port_status |= USB_PORT_STAT_C_CONNECTION;
3613 if (hsotg->flags.b.port_enable_change)
3614 port_status |= USB_PORT_STAT_C_ENABLE;
3615 if (hsotg->flags.b.port_suspend_change)
3616 port_status |= USB_PORT_STAT_C_SUSPEND;
3617 if (hsotg->flags.b.port_l1_change)
3618 port_status |= USB_PORT_STAT_C_L1;
3619 if (hsotg->flags.b.port_reset_change)
3620 port_status |= USB_PORT_STAT_C_RESET;
3621 if (hsotg->flags.b.port_over_current_change) {
3622 dev_warn(hsotg->dev, "Overcurrent change detected\n");
3623 port_status |= USB_PORT_STAT_C_OVERCURRENT;
3624 }
3625 USETW(ps.wPortChange, port_status);
3626 dev_vdbg(hsotg->dev, "wPortChange=%04x\n", port_status);
3627
3628 if (!hsotg->flags.b.port_connect_status) {
3629 /*
3630 * The port is disconnected, which means the core is
3631 * either in device mode or it soon will be. Just
3632 * return 0's for the remainder of the port status
3633 * since the port register can't be read if the core
3634 * is in device mode.
3635 */
3636 USETW(ps.wPortStatus, 0);
3637 memcpy(buf, &ps, sizeof(ps));
3638 break;
3639 }
3640
3641 port_status = 0;
3642 hprt0 = dwc2_readl(hsotg, HPRT0);
3643 dev_vdbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0);
3644
3645 if (hprt0 & HPRT0_CONNSTS)
3646 port_status |= USB_PORT_STAT_CONNECTION;
3647 if (hprt0 & HPRT0_ENA)
3648 port_status |= USB_PORT_STAT_ENABLE;
3649 if (hprt0 & HPRT0_SUSP)
3650 port_status |= USB_PORT_STAT_SUSPEND;
3651 if (hprt0 & HPRT0_OVRCURRACT)
3652 port_status |= USB_PORT_STAT_OVERCURRENT;
3653 if (hprt0 & HPRT0_RST)
3654 port_status |= USB_PORT_STAT_RESET;
3655 if (hprt0 & HPRT0_PWR)
3656 port_status |= USB_PORT_STAT_POWER;
3657
3658 speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
3659 if (speed == HPRT0_SPD_HIGH_SPEED)
3660 port_status |= USB_PORT_STAT_HIGH_SPEED;
3661 else if (speed == HPRT0_SPD_LOW_SPEED)
3662 port_status |= USB_PORT_STAT_LOW_SPEED;
3663
3664 if (hprt0 & HPRT0_TSTCTL_MASK)
3665 port_status |= USB_PORT_STAT_TEST;
3666 /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
3667 USETW(ps.wPortStatus, port_status);
3668
3669 if (hsotg->params.dma_desc_fs_enable) {
3670 /*
3671 * Enable descriptor DMA only if a full speed
3672 * device is connected.
3673 */
3674 if (hsotg->new_connection &&
3675 ((port_status &
3676 (USB_PORT_STAT_CONNECTION |
3677 USB_PORT_STAT_HIGH_SPEED |
3678 USB_PORT_STAT_LOW_SPEED)) ==
3679 USB_PORT_STAT_CONNECTION)) {
3680 u32 hcfg;
3681
3682 dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
3683 hsotg->params.dma_desc_enable = true;
3684 hcfg = dwc2_readl(hsotg, HCFG);
3685 hcfg |= HCFG_DESCDMA;
3686 dwc2_writel(hsotg, hcfg, HCFG);
3687 hsotg->new_connection = false;
3688 }
3689 }
3690
3691 dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status);
3692 memcpy(buf, &ps, sizeof(ps));
3693 break;
3694
3695 case SetHubFeature:
3696 dev_dbg(hsotg->dev, "SetHubFeature\n");
3697 /* No HUB features supported */
3698 break;
3699
3700 case SetPortFeature:
3701 dev_dbg(hsotg->dev, "SetPortFeature\n");
3702 if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
3703 goto error;
3704
3705 if (!hsotg->flags.b.port_connect_status) {
3706 /*
3707 * The port is disconnected, which means the core is
3708 * either in device mode or it soon will be. Just
3709 * return without doing anything since the port
3710 * register can't be written if the core is in device
3711 * mode.
3712 */
3713 break;
3714 }
3715
3716 switch (wvalue) {
3717 case USB_PORT_FEAT_SUSPEND:
3718 dev_dbg(hsotg->dev,
3719 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
3720 if (windex != hsotg->otg_port)
3721 goto error;
3722 if (!hsotg->bus_suspended)
3723 retval = dwc2_port_suspend(hsotg, windex);
3724 break;
3725
3726 case USB_PORT_FEAT_POWER:
3727 dev_dbg(hsotg->dev,
3728 "SetPortFeature - USB_PORT_FEAT_POWER\n");
3729 hprt0 = dwc2_read_hprt0(hsotg);
3730 pwr = hprt0 & HPRT0_PWR;
3731 hprt0 |= HPRT0_PWR;
3732 dwc2_writel(hsotg, hprt0, HPRT0);
3733 if (!pwr)
3734 dwc2_vbus_supply_init(hsotg);
3735 break;
3736
3737 case USB_PORT_FEAT_RESET:
3738 dev_dbg(hsotg->dev,
3739 "SetPortFeature - USB_PORT_FEAT_RESET\n");
3740
3741 hprt0 = dwc2_read_hprt0(hsotg);
3742
3743 if (hsotg->hibernated) {
3744 retval = dwc2_exit_hibernation(hsotg, 0, 1, 1);
3745 if (retval)
3746 dev_err(hsotg->dev,
3747 "exit hibernation failed\n");
3748 }
3749
3750 if (hsotg->in_ppd) {
3751 retval = dwc2_exit_partial_power_down(hsotg, 1,
3752 true);
3753 if (retval)
3754 dev_err(hsotg->dev,
3755 "exit partial_power_down failed\n");
3756 }
3757
3758 if (hsotg->params.power_down ==
3759 DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
3760 dwc2_host_exit_clock_gating(hsotg, 0);
3761
3762 pcgctl = dwc2_readl(hsotg, PCGCTL);
3763 pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK);
3764 dwc2_writel(hsotg, pcgctl, PCGCTL);
3765 /* ??? Original driver does this */
3766 dwc2_writel(hsotg, 0, PCGCTL);
3767
3768 hprt0 = dwc2_read_hprt0(hsotg);
3769 pwr = hprt0 & HPRT0_PWR;
3770 /* Clear suspend bit if resetting from suspend state */
3771 hprt0 &= ~HPRT0_SUSP;
3772
3773 /*
3774 * When B-Host the Port reset bit is set in the Start
3775 * HCD Callback function, so that the reset is started
3776 * within 1ms of the HNP success interrupt
3777 */
3778 if (!dwc2_hcd_is_b_host(hsotg)) {
3779 hprt0 |= HPRT0_PWR | HPRT0_RST;
3780 dev_dbg(hsotg->dev,
3781 "In host mode, hprt0=%08x\n", hprt0);
3782 dwc2_writel(hsotg, hprt0, HPRT0);
3783 if (!pwr)
3784 dwc2_vbus_supply_init(hsotg);
3785 }
3786
3787 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
3788 dwc2_msleep(50);
3789 hprt0 &= ~HPRT0_RST;
3790 dwc2_writel(hsotg, hprt0, HPRT0);
3791 hsotg->lx_state = DWC2_L0; /* Now back to On state */
3792 break;
3793
3794 case USB_PORT_FEAT_INDICATOR:
3795 dev_dbg(hsotg->dev,
3796 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
3797 /* Not supported */
3798 break;
3799
3800 case USB_PORT_FEAT_TEST:
3801 hprt0 = dwc2_read_hprt0(hsotg);
3802 dev_dbg(hsotg->dev,
3803 "SetPortFeature - USB_PORT_FEAT_TEST\n");
3804 hprt0 &= ~HPRT0_TSTCTL_MASK;
3805 hprt0 |= (windex >> 8) << HPRT0_TSTCTL_SHIFT;
3806 dwc2_writel(hsotg, hprt0, HPRT0);
3807 break;
3808
3809 default:
3810 retval = -EINVAL;
3811 dev_err(hsotg->dev,
3812 "SetPortFeature %1xh unknown or unsupported\n",
3813 wvalue);
3814 break;
3815 }
3816 break;
3817
3818 default:
3819 error:
3820 retval = -EINVAL;
3821 dev_dbg(hsotg->dev,
3822 "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n",
3823 typereq, windex, wvalue);
3824 break;
3825 }
3826
3827 return retval;
3828 }
3829
3830 #if 0
3831 static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port)
3832 {
3833 int retval;
3834
3835 if (port != 1)
3836 return -EINVAL;
3837
3838 retval = (hsotg->flags.b.port_connect_status_change ||
3839 hsotg->flags.b.port_reset_change ||
3840 hsotg->flags.b.port_enable_change ||
3841 hsotg->flags.b.port_suspend_change ||
3842 hsotg->flags.b.port_over_current_change);
3843
3844 if (retval) {
3845 dev_dbg(hsotg->dev,
3846 "DWC OTG HCD HUB STATUS DATA: Root port status changed\n");
3847 dev_dbg(hsotg->dev, " port_connect_status_change: %d\n",
3848 hsotg->flags.b.port_connect_status_change);
3849 dev_dbg(hsotg->dev, " port_reset_change: %d\n",
3850 hsotg->flags.b.port_reset_change);
3851 dev_dbg(hsotg->dev, " port_enable_change: %d\n",
3852 hsotg->flags.b.port_enable_change);
3853 dev_dbg(hsotg->dev, " port_suspend_change: %d\n",
3854 hsotg->flags.b.port_suspend_change);
3855 dev_dbg(hsotg->dev, " port_over_current_change: %d\n",
3856 hsotg->flags.b.port_over_current_change);
3857 }
3858
3859 return retval;
3860 }
3861 #endif
3862
dwc2_hcd_get_frame_number(struct dwc2_hsotg * hsotg)3863 int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
3864 {
3865 u32 hfnum = dwc2_readl(hsotg, HFNUM);
3866
3867 #ifdef DWC2_DEBUG_SOF
3868 dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
3869 (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT);
3870 #endif
3871 return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
3872 }
3873
dwc2_hcd_get_future_frame_number(struct dwc2_hsotg * hsotg,int us)3874 int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us)
3875 {
3876 u32 hprt = dwc2_readl(hsotg, HPRT0);
3877 u32 hfir = dwc2_readl(hsotg, HFIR);
3878 u32 hfnum = dwc2_readl(hsotg, HFNUM);
3879 unsigned int us_per_frame;
3880 unsigned int frame_number;
3881 unsigned int remaining;
3882 unsigned int interval;
3883 unsigned int phy_clks;
3884
3885 /* High speed has 125 us per (micro) frame; others are 1 ms per */
3886 us_per_frame = (hprt & HPRT0_SPD_MASK) ? 1000 : 125;
3887
3888 /* Extract fields */
3889 frame_number = (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
3890 remaining = (hfnum & HFNUM_FRREM_MASK) >> HFNUM_FRREM_SHIFT;
3891 interval = (hfir & HFIR_FRINT_MASK) >> HFIR_FRINT_SHIFT;
3892
3893 /*
3894 * Number of phy clocks since the last tick of the frame number after
3895 * "us" has passed.
3896 */
3897 phy_clks = (interval - remaining) +
3898 DIV_ROUND_UP(interval * us, us_per_frame);
3899
3900 return dwc2_frame_num_inc(frame_number, phy_clks / interval);
3901 }
3902
dwc2_hcd_is_b_host(struct dwc2_hsotg * hsotg)3903 int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
3904 {
3905 return hsotg->op_state == OTG_STATE_B_HOST;
3906 }
3907
dwc2_hcd_urb_alloc(struct dwc2_hsotg * hsotg,int iso_desc_count,gfp_t mem_flags)3908 STATIC struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
3909 int iso_desc_count,
3910 gfp_t mem_flags)
3911 {
3912 struct dwc2_hcd_urb *urb;
3913
3914 u32 size = sizeof(*urb) + iso_desc_count *
3915 sizeof(struct dwc2_hcd_iso_packet_desc);
3916 urb = malloc(size, M_USBHC, M_ZERO | mem_flags);
3917 if (urb)
3918 urb->packet_count = iso_desc_count;
3919 return urb;
3920 }
3921
3922 /* Required for OpenBSD */
dwc2_hcd_urb_free(struct dwc2_hsotg * hsotg,struct dwc2_hcd_urb * urb,int iso_desc_count)3923 void dwc2_hcd_urb_free(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb,
3924 int iso_desc_count)
3925 {
3926 u32 size = sizeof(*urb) + iso_desc_count *
3927 sizeof(struct dwc2_hcd_iso_packet_desc);
3928
3929 free(urb, M_USBHC, size);
3930 }
3931
dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg * hsotg,struct dwc2_hcd_urb * urb,u8 dev_addr,u8 ep_num,u8 ep_type,u8 ep_dir,u16 maxp,u16 maxp_mult)3932 void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
3933 struct dwc2_hcd_urb *urb, u8 dev_addr,
3934 u8 ep_num, u8 ep_type, u8 ep_dir,
3935 u16 maxp, u16 maxp_mult)
3936 {
3937 if (dbg_perio() ||
3938 ep_type == USB_ENDPOINT_XFER_BULK ||
3939 ep_type == USB_ENDPOINT_XFER_CONTROL)
3940 dev_vdbg(hsotg->dev,
3941 "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n",
3942 dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult);
3943 urb->pipe_info.dev_addr = dev_addr;
3944 urb->pipe_info.ep_num = ep_num;
3945 urb->pipe_info.pipe_type = ep_type;
3946 urb->pipe_info.pipe_dir = ep_dir;
3947 urb->pipe_info.maxp = maxp;
3948 urb->pipe_info.maxp_mult = maxp_mult;
3949 }
3950
3951 /*
3952 * NOTE: This function will be removed once the peripheral controller code
3953 * is integrated and the driver is stable
3954 */
dwc2_hcd_dump_state(struct dwc2_hsotg * hsotg)3955 void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
3956 {
3957 #ifdef DWC2_DEBUG
3958 struct dwc2_host_chan *chan;
3959 struct dwc2_hcd_urb *urb;
3960 struct dwc2_qtd *qtd;
3961 int num_channels;
3962 u32 np_tx_status;
3963 u32 p_tx_status;
3964 int i;
3965
3966 num_channels = hsotg->params.host_channels;
3967 dev_dbg(hsotg->dev, "\n");
3968 dev_dbg(hsotg->dev,
3969 "************************************************************\n");
3970 dev_dbg(hsotg->dev, "HCD State:\n");
3971 dev_dbg(hsotg->dev, " Num channels: %d\n", num_channels);
3972
3973 for (i = 0; i < num_channels; i++) {
3974 chan = hsotg->hc_ptr_array[i];
3975 dev_dbg(hsotg->dev, " Channel %d:\n", i);
3976 dev_dbg(hsotg->dev,
3977 " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
3978 chan->dev_addr, chan->ep_num, chan->ep_is_in);
3979 dev_dbg(hsotg->dev, " speed: %d\n", chan->speed);
3980 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
3981 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
3982 dev_dbg(hsotg->dev, " data_pid_start: %d\n",
3983 chan->data_pid_start);
3984 dev_dbg(hsotg->dev, " multi_count: %d\n", chan->multi_count);
3985 dev_dbg(hsotg->dev, " xfer_started: %d\n",
3986 chan->xfer_started);
3987 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
3988 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
3989 (unsigned long)chan->xfer_dma);
3990 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
3991 dev_dbg(hsotg->dev, " xfer_count: %d\n", chan->xfer_count);
3992 dev_dbg(hsotg->dev, " halt_on_queue: %d\n",
3993 chan->halt_on_queue);
3994 dev_dbg(hsotg->dev, " halt_pending: %d\n",
3995 chan->halt_pending);
3996 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
3997 dev_dbg(hsotg->dev, " do_split: %d\n", chan->do_split);
3998 dev_dbg(hsotg->dev, " complete_split: %d\n",
3999 chan->complete_split);
4000 dev_dbg(hsotg->dev, " hub_addr: %d\n", chan->hub_addr);
4001 dev_dbg(hsotg->dev, " hub_port: %d\n", chan->hub_port);
4002 dev_dbg(hsotg->dev, " xact_pos: %d\n", chan->xact_pos);
4003 dev_dbg(hsotg->dev, " requests: %d\n", chan->requests);
4004 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
4005
4006 if (chan->xfer_started) {
4007 u32 hfnum, hcchar, hctsiz, hcint, hcintmsk;
4008
4009 hfnum = dwc2_readl(hsotg, HFNUM);
4010 hcchar = dwc2_readl(hsotg, HCCHAR(i));
4011 hctsiz = dwc2_readl(hsotg, HCTSIZ(i));
4012 hcint = dwc2_readl(hsotg, HCINT(i));
4013 hcintmsk = dwc2_readl(hsotg, HCINTMSK(i));
4014 dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum);
4015 dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar);
4016 dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz);
4017 dev_dbg(hsotg->dev, " hcint: 0x%08x\n", hcint);
4018 dev_dbg(hsotg->dev, " hcintmsk: 0x%08x\n", hcintmsk);
4019 }
4020
4021 if (!(chan->xfer_started && chan->qh))
4022 continue;
4023
4024 list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) {
4025 if (!qtd->in_process)
4026 break;
4027 urb = qtd->urb;
4028 dev_dbg(hsotg->dev, " URB Info:\n");
4029 dev_dbg(hsotg->dev, " qtd: %p, urb: %p\n",
4030 qtd, urb);
4031 if (urb) {
4032 dev_dbg(hsotg->dev,
4033 " Dev: %d, EP: %d %s\n",
4034 dwc2_hcd_get_dev_addr(&urb->pipe_info),
4035 dwc2_hcd_get_ep_num(&urb->pipe_info),
4036 dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
4037 "IN" : "OUT");
4038 dev_dbg(hsotg->dev,
4039 " Max packet size: %d (%d mult)\n",
4040 dwc2_hcd_get_maxp(&urb->pipe_info),
4041 dwc2_hcd_get_maxp_mult(&urb->pipe_info));
4042 dev_dbg(hsotg->dev,
4043 " transfer_buffer: %p\n",
4044 urb->buf);
4045 dev_dbg(hsotg->dev,
4046 " transfer_dma: %08lx\n",
4047 (unsigned long)urb->dma);
4048 dev_dbg(hsotg->dev,
4049 " transfer_buffer_length: %d\n",
4050 urb->length);
4051 dev_dbg(hsotg->dev, " actual_length: %d\n",
4052 urb->actual_length);
4053 }
4054 }
4055 }
4056
4057 dev_dbg(hsotg->dev, " non_periodic_channels: %d\n",
4058 hsotg->non_periodic_channels);
4059 dev_dbg(hsotg->dev, " periodic_channels: %d\n",
4060 hsotg->periodic_channels);
4061 dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs);
4062 np_tx_status = dwc2_readl(hsotg, GNPTXSTS);
4063 dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n",
4064 (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
4065 dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n",
4066 (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
4067 p_tx_status = dwc2_readl(hsotg, HPTXSTS);
4068 dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n",
4069 (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
4070 dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n",
4071 (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
4072 dwc2_dump_global_registers(hsotg);
4073 dwc2_dump_host_registers(hsotg);
4074 dev_dbg(hsotg->dev,
4075 "************************************************************\n");
4076 dev_dbg(hsotg->dev, "\n");
4077 #endif
4078 }
4079
4080 struct wrapper_priv_data {
4081 struct dwc2_hsotg *hsotg;
4082 };
4083
4084 #if 0
4085 /* Gets the dwc2_hsotg from a usb_hcd */
4086 static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
4087 {
4088 struct wrapper_priv_data *p;
4089
4090 p = (struct wrapper_priv_data *)&hcd->hcd_priv;
4091 return p->hsotg;
4092 }
4093 #endif
4094
4095 /**
4096 * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
4097 *
4098 * This will get the dwc2_tt structure (and ttport) associated with the given
4099 * context (which is really just a struct urb pointer).
4100 *
4101 * The first time this is called for a given TT we allocate memory for our
4102 * structure. When everyone is done and has called dwc2_host_put_tt_info()
4103 * then the refcount for the structure will go to 0 and we'll free it.
4104 *
4105 * @hsotg: The HCD state structure for the DWC OTG controller.
4106 * @context: The priv pointer from a struct dwc2_hcd_urb.
4107 * @mem_flags: Flags for allocating memory.
4108 * @ttport: We'll return this device's port number here. That's used to
4109 * reference into the bitmap if we're on a multi_tt hub.
4110 *
4111 * Return: a pointer to a struct dwc2_tt. Don't forget to call
4112 * dwc2_host_put_tt_info()! Returns NULL upon memory alloc failure.
4113 */
4114
dwc2_host_get_tt_info(struct dwc2_hsotg * hsotg,void * context,gfp_t mem_flags,int * ttport)4115 struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context,
4116 gfp_t mem_flags, int *ttport)
4117 {
4118 struct usbd_xfer *xfer = context;
4119 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
4120 struct usbd_device *dev = dpipe->pipe.device;
4121 struct dwc2_tt *dwc_tt = NULL;
4122
4123 if (dev->myhsport->tt) {
4124 *ttport = dev->myhsport->portno;
4125
4126 dwc_tt = dev->myhsport->tt->hcpriv;
4127 if (!dwc_tt) {
4128 size_t bitmap_size;
4129
4130 /*
4131 * For single_tt we need one schedule. For multi_tt
4132 * we need one per port.
4133 */
4134 bitmap_size = DWC2_ELEMENTS_PER_LS_BITMAP *
4135 sizeof(dwc_tt->periodic_bitmaps[0]);
4136 if (dev->myhsport->tt->hub->multi)
4137 bitmap_size *= USB_MAXCHILDREN; /* XXX */
4138
4139 dwc_tt = malloc(sizeof(*dwc_tt) + bitmap_size, M_USBHC,
4140 mem_flags);
4141
4142 if (!dwc_tt)
4143 return NULL;
4144
4145 dwc_tt->usb_tt = dev->myhsport->tt;
4146 dwc_tt->usb_tt->hcpriv = dwc_tt;
4147 }
4148
4149 dwc_tt->refcount++;
4150 }
4151
4152 return dwc_tt;
4153 }
4154
4155 /**
4156 * dwc2_host_put_tt_info() - Put the dwc2_tt from dwc2_host_get_tt_info()
4157 *
4158 * Frees resources allocated by dwc2_host_get_tt_info() if all current holders
4159 * of the structure are done.
4160 *
4161 * It's OK to call this with NULL.
4162 *
4163 * @hsotg: The HCD state structure for the DWC OTG controller.
4164 * @dwc_tt: The pointer returned by dwc2_host_get_tt_info.
4165 */
dwc2_host_put_tt_info(struct dwc2_hsotg * hsotg,struct dwc2_tt * dwc_tt)4166 void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt)
4167 {
4168 /* Model kfree and make put of NULL a no-op */
4169 if (!dwc_tt)
4170 return;
4171
4172 WARN_ON(dwc_tt->refcount < 1);
4173
4174 dwc_tt->refcount--;
4175 if (!dwc_tt->refcount) {
4176 dwc_tt->usb_tt->hcpriv = NULL;
4177 free(dwc_tt, M_USBHC, 0);
4178 }
4179 }
4180
dwc2_host_get_speed(struct dwc2_hsotg * hsotg,void * context)4181 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
4182 {
4183 struct usbd_xfer *xfer = context;
4184 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
4185 struct usbd_device *dev = dpipe->pipe.device;
4186
4187 return dev->speed;
4188 }
4189
dwc2_allocate_bus_bandwidth(struct dwc2_hsotg * hsotg,u16 bw,struct usbd_xfer * xfer)4190 STATIC void dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
4191 struct usbd_xfer *xfer)
4192 {
4193 }
4194
dwc2_free_bus_bandwidth(struct dwc2_hsotg * hsotg,u16 bw,struct usbd_xfer * xfer)4195 STATIC void dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
4196 struct usbd_xfer *xfer)
4197 {
4198 }
4199
4200 /*
4201 * Sets the final status of an URB and returns it to the upper layer. Any
4202 * required cleanup of the URB is performed.
4203 *
4204 * Must be called with interrupt disabled and spinlock held
4205 */
dwc2_host_complete(struct dwc2_hsotg * hsotg,struct dwc2_qtd * qtd,int status)4206 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
4207 int status)
4208 {
4209 struct usbd_xfer *xfer;
4210 struct dwc2_xfer *dxfer;
4211 struct dwc2_softc *sc;
4212 usb_endpoint_descriptor_t *ed;
4213 uint8_t xfertype;
4214
4215 if (!qtd) {
4216 dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
4217 return;
4218 }
4219
4220 if (!qtd->urb) {
4221 dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
4222 return;
4223 }
4224
4225 xfer = qtd->urb->priv;
4226 if (!xfer) {
4227 dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
4228 return;
4229 }
4230
4231 dxfer = DWC2_XFER2DXFER(xfer);
4232 sc = DWC2_XFER2SC(xfer);
4233 ed = xfer->pipe->endpoint->edesc;
4234 xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
4235
4236 struct dwc2_hcd_urb *urb = qtd->urb;
4237 xfer->actlen = dwc2_hcd_urb_get_actual_length(urb);
4238 #if 0
4239 DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->actlen);
4240 #endif
4241 if (xfertype == UE_ISOCHRONOUS) {
4242 xfer->actlen = 0;
4243 for (size_t i = 0; i < xfer->nframes; ++i) {
4244 xfer->frlengths[i] =
4245 dwc2_hcd_urb_get_iso_desc_actual_length(urb, i);
4246 #if 0
4247 DPRINTFN(1, "xfer=%p frame=%zu length=%d\n", xfer, i,
4248 xfer->frlengths[i]);
4249 #endif
4250 xfer->actlen += xfer->frlengths[i];
4251 }
4252 #if 0
4253 DPRINTFN(1, "xfer=%p actlen=%d (isoc)\n", xfer, xfer->actlen);
4254 #endif
4255 }
4256
4257 if (xfertype == UE_ISOCHRONOUS && dbg_perio()) {
4258 for (size_t i = 0; i < xfer->nframes; i++)
4259 dev_vdbg(hsotg->dev, " ISO Desc %zu status %d\n",
4260 i, urb->iso_descs[i].status);
4261 }
4262
4263 if (!status) {
4264 if (!(xfer->flags & USBD_SHORT_XFER_OK) &&
4265 xfer->actlen < xfer->length)
4266 status = -EIO;
4267 }
4268
4269 switch (status) {
4270 case 0:
4271 dxfer->intr_status = USBD_NORMAL_COMPLETION;
4272 break;
4273 case -EPIPE:
4274 dxfer->intr_status = USBD_STALLED;
4275 break;
4276 case -EPROTO:
4277 dxfer->intr_status = USBD_INVAL;
4278 break;
4279 case -EIO:
4280 dxfer->intr_status = USBD_IOERROR;
4281 break;
4282 case -EOVERFLOW:
4283 dxfer->intr_status = USBD_IOERROR;
4284 break;
4285 default:
4286 dxfer->intr_status = USBD_IOERROR;
4287 printf("%s: unknown error status %d\n", __func__, status);
4288 }
4289
4290 if (dxfer->intr_status == USBD_NORMAL_COMPLETION) {
4291 /*
4292 * control transfers with no data phase don't touch dmabuf, but
4293 * everything else does.
4294 */
4295 if (!(xfertype == UE_CONTROL &&
4296 UGETW(xfer->request.wLength) == 0) &&
4297 xfer->actlen > 0 /* XXX PR/53503 */
4298 ) {
4299 int rd = usbd_xfer_isread(xfer);
4300
4301 usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
4302 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4303 }
4304 }
4305
4306 if (xfertype == UE_ISOCHRONOUS ||
4307 xfertype == UE_INTERRUPT) {
4308 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
4309
4310 dwc2_free_bus_bandwidth(hsotg,
4311 dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
4312 xfer);
4313 }
4314
4315 dwc2_hcd_urb_free(sc->sc_hsotg, dxfer->urb, xfer->nframes);
4316 qtd->urb = NULL;
4317 timeout_del(&xfer->timeout_handle);
4318 usb_rem_task(xfer->device, &xfer->abort_task);
4319 MUTEX_ASSERT_LOCKED(&hsotg->lock);
4320
4321 TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext);
4322
4323 mtx_leave(&hsotg->lock);
4324 usb_schedsoftintr(&sc->sc_bus);
4325 mtx_enter(&hsotg->lock);
4326 }
4327
4328 /*
4329 * Work queue function for starting the HCD when A-Cable is connected
4330 */
dwc2_hcd_start_func(void * data)4331 STATIC void dwc2_hcd_start_func(void *data)
4332 {
4333 struct dwc2_hsotg *hsotg = data;
4334
4335 dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg);
4336 dwc2_host_start(hsotg);
4337 }
4338
4339 /*
4340 * Reset work queue function
4341 */
dwc2_hcd_reset_func(void * data)4342 STATIC void dwc2_hcd_reset_func(void *data)
4343 {
4344 struct dwc2_hsotg *hsotg = data;
4345 unsigned long flags;
4346 u32 hprt0;
4347
4348 dev_dbg(hsotg->dev, "USB RESET function called\n");
4349
4350 spin_lock_irqsave(&hsotg->lock, flags);
4351
4352 hprt0 = dwc2_read_hprt0(hsotg);
4353 hprt0 &= ~HPRT0_RST;
4354 dwc2_writel(hsotg, hprt0, HPRT0);
4355 hsotg->flags.b.port_reset_change = 1;
4356
4357 dwc2_root_intr(hsotg->hsotg_sc); /* Required for OpenBSD */
4358
4359 spin_unlock_irqrestore(&hsotg->lock, flags);
4360 }
4361
4362 #if 0
4363 static void dwc2_hcd_phy_reset_func(struct work_struct *work)
4364 {
4365 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
4366 phy_reset_work);
4367 int ret;
4368
4369 ret = phy_reset(hsotg->phy);
4370 if (ret)
4371 dev_warn(hsotg->dev, "PHY reset failed\n");
4372 }
4373 #endif
4374
4375 /*
4376 * =========================================================================
4377 * Linux HC Driver Functions
4378 * =========================================================================
4379 */
4380
4381 static int
_dwc2_hcd_start(struct dwc2_hsotg * hsotg)4382 _dwc2_hcd_start(struct dwc2_hsotg *hsotg)
4383 {
4384 unsigned long flags;
4385
4386 dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
4387
4388 spin_lock_irqsave(&hsotg->lock, flags);
4389
4390 hsotg->lx_state = DWC2_L0;
4391
4392 if (dwc2_is_device_mode(hsotg)) {
4393 spin_unlock_irqrestore(&hsotg->lock, flags);
4394 return 0; /* why 0 ?? */
4395 }
4396
4397 dwc2_hcd_reinit(hsotg);
4398
4399 spin_unlock_irqrestore(&hsotg->lock, flags);
4400
4401 return 0;
4402 }
4403 #if 0
4404 /*
4405 * Initializes the DWC_otg controller and its root hub and prepares it for host
4406 * mode operation. Activates the root port. Returns 0 on success and a negative
4407 * error code on failure.
4408 */
4409 static int _dwc2_hcd_start(struct dwc2_hsotg *hsotg)
4410 {
4411 struct usb_bus *bus = hcd_to_bus(hcd);
4412 unsigned long flags;
4413 u32 hprt0;
4414 int ret;
4415
4416 dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
4417
4418 spin_lock_irqsave(&hsotg->lock, flags);
4419 hsotg->lx_state = DWC2_L0;
4420 hcd->state = HC_STATE_RUNNING;
4421 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4422
4423 if (dwc2_is_device_mode(hsotg)) {
4424 spin_unlock_irqrestore(&hsotg->lock, flags);
4425 return 0; /* why 0 ?? */
4426 }
4427
4428 dwc2_hcd_reinit(hsotg);
4429
4430 hprt0 = dwc2_read_hprt0(hsotg);
4431 /* Has vbus power been turned on in dwc2_core_host_init ? */
4432 if (hprt0 & HPRT0_PWR) {
4433 /* Enable external vbus supply before resuming root hub */
4434 spin_unlock_irqrestore(&hsotg->lock, flags);
4435 ret = dwc2_vbus_supply_init(hsotg);
4436 if (ret)
4437 return ret;
4438 spin_lock_irqsave(&hsotg->lock, flags);
4439 }
4440
4441 /* Initialize and connect root hub if one is not already attached */
4442 if (bus->root_hub) {
4443 dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
4444 /* Inform the HUB driver to resume */
4445 usb_hcd_resume_root_hub(hcd);
4446 }
4447
4448 spin_unlock_irqrestore(&hsotg->lock, flags);
4449
4450 return 0;
4451 }
4452
4453 /*
4454 * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
4455 * stopped.
4456 */
4457 static void _dwc2_hcd_stop(struct usb_hcd *hcd)
4458 {
4459 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4460 unsigned long flags;
4461 u32 hprt0;
4462
4463 /* Turn off all host-specific interrupts */
4464 dwc2_disable_host_interrupts(hsotg);
4465
4466 /* Wait for interrupt processing to finish */
4467 synchronize_irq(hcd->irq);
4468
4469 spin_lock_irqsave(&hsotg->lock, flags);
4470 hprt0 = dwc2_read_hprt0(hsotg);
4471 /* Ensure hcd is disconnected */
4472 dwc2_hcd_disconnect(hsotg, true);
4473 dwc2_hcd_stop(hsotg);
4474 hsotg->lx_state = DWC2_L3;
4475 hcd->state = HC_STATE_HALT;
4476 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4477 spin_unlock_irqrestore(&hsotg->lock, flags);
4478
4479 /* keep balanced supply init/exit by checking HPRT0_PWR */
4480 if (hprt0 & HPRT0_PWR)
4481 dwc2_vbus_supply_exit(hsotg);
4482
4483 usleep_range(1000, 3000);
4484 }
4485
4486 static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
4487 {
4488 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4489 unsigned long flags;
4490 int ret = 0;
4491
4492 spin_lock_irqsave(&hsotg->lock, flags);
4493
4494 if (dwc2_is_device_mode(hsotg))
4495 goto unlock;
4496
4497 if (hsotg->lx_state != DWC2_L0)
4498 goto unlock;
4499
4500 if (!HCD_HW_ACCESSIBLE(hcd))
4501 goto unlock;
4502
4503 if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
4504 goto unlock;
4505
4506 if (hsotg->bus_suspended)
4507 goto skip_power_saving;
4508
4509 if (hsotg->flags.b.port_connect_status == 0)
4510 goto skip_power_saving;
4511
4512 switch (hsotg->params.power_down) {
4513 case DWC2_POWER_DOWN_PARAM_PARTIAL:
4514 /* Enter partial_power_down */
4515 ret = dwc2_enter_partial_power_down(hsotg);
4516 if (ret)
4517 dev_err(hsotg->dev,
4518 "enter partial_power_down failed\n");
4519 /* After entering suspend, hardware is not accessible */
4520 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4521 break;
4522 case DWC2_POWER_DOWN_PARAM_HIBERNATION:
4523 /* Enter hibernation */
4524 spin_unlock_irqrestore(&hsotg->lock, flags);
4525 ret = dwc2_enter_hibernation(hsotg, 1);
4526 if (ret)
4527 dev_err(hsotg->dev, "enter hibernation failed\n");
4528 spin_lock_irqsave(&hsotg->lock, flags);
4529
4530 /* After entering suspend, hardware is not accessible */
4531 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4532 break;
4533 case DWC2_POWER_DOWN_PARAM_NONE:
4534 /*
4535 * If not hibernation nor partial power down are supported,
4536 * clock gating is used to save power.
4537 */
4538 if (!hsotg->params.no_clock_gating) {
4539 dwc2_host_enter_clock_gating(hsotg);
4540
4541 /* After entering suspend, hardware is not accessible */
4542 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4543 }
4544 break;
4545 default:
4546 goto skip_power_saving;
4547 }
4548
4549 spin_unlock_irqrestore(&hsotg->lock, flags);
4550 dwc2_vbus_supply_exit(hsotg);
4551 spin_lock_irqsave(&hsotg->lock, flags);
4552
4553 /* Ask phy to be suspended */
4554 if (!IS_ERR_OR_NULL(hsotg->uphy)) {
4555 spin_unlock_irqrestore(&hsotg->lock, flags);
4556 usb_phy_set_suspend(hsotg->uphy, true);
4557 spin_lock_irqsave(&hsotg->lock, flags);
4558 }
4559
4560 skip_power_saving:
4561 hsotg->lx_state = DWC2_L2;
4562 unlock:
4563 spin_unlock_irqrestore(&hsotg->lock, flags);
4564
4565 return ret;
4566 }
4567
4568 static int _dwc2_hcd_resume(struct usb_hcd *hcd)
4569 {
4570 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4571 unsigned long flags;
4572 u32 hprt0;
4573 int ret = 0;
4574
4575 spin_lock_irqsave(&hsotg->lock, flags);
4576
4577 if (dwc2_is_device_mode(hsotg))
4578 goto unlock;
4579
4580 if (hsotg->lx_state != DWC2_L2)
4581 goto unlock;
4582
4583 hprt0 = dwc2_read_hprt0(hsotg);
4584
4585 /*
4586 * Added port connection status checking which prevents exiting from
4587 * Partial Power Down mode from _dwc2_hcd_resume() if not in Partial
4588 * Power Down mode.
4589 */
4590 if (hprt0 & HPRT0_CONNSTS) {
4591 hsotg->lx_state = DWC2_L0;
4592 goto unlock;
4593 }
4594
4595 switch (hsotg->params.power_down) {
4596 case DWC2_POWER_DOWN_PARAM_PARTIAL:
4597 ret = dwc2_exit_partial_power_down(hsotg, 0, true);
4598 if (ret)
4599 dev_err(hsotg->dev,
4600 "exit partial_power_down failed\n");
4601 /*
4602 * Set HW accessible bit before powering on the controller
4603 * since an interrupt may rise.
4604 */
4605 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4606 break;
4607 case DWC2_POWER_DOWN_PARAM_HIBERNATION:
4608 ret = dwc2_exit_hibernation(hsotg, 0, 0, 1);
4609 if (ret)
4610 dev_err(hsotg->dev, "exit hibernation failed.\n");
4611
4612 /*
4613 * Set HW accessible bit before powering on the controller
4614 * since an interrupt may rise.
4615 */
4616 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4617 break;
4618 case DWC2_POWER_DOWN_PARAM_NONE:
4619 /*
4620 * If not hibernation nor partial power down are supported,
4621 * port resume is done using the clock gating programming flow.
4622 */
4623 spin_unlock_irqrestore(&hsotg->lock, flags);
4624 dwc2_host_exit_clock_gating(hsotg, 0);
4625
4626 /*
4627 * Initialize the Core for Host mode, as after system resume
4628 * the global interrupts are disabled.
4629 */
4630 dwc2_core_init(hsotg, false);
4631 dwc2_enable_global_interrupts(hsotg);
4632 dwc2_hcd_reinit(hsotg);
4633 spin_lock_irqsave(&hsotg->lock, flags);
4634
4635 /*
4636 * Set HW accessible bit before powering on the controller
4637 * since an interrupt may rise.
4638 */
4639 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4640 break;
4641 default:
4642 hsotg->lx_state = DWC2_L0;
4643 goto unlock;
4644 }
4645
4646 /* Change Root port status, as port status change occurred after resume.*/
4647 hsotg->flags.b.port_suspend_change = 1;
4648
4649 /*
4650 * Enable power if not already done.
4651 * This must not be spinlocked since duration
4652 * of this call is unknown.
4653 */
4654 if (!IS_ERR_OR_NULL(hsotg->uphy)) {
4655 spin_unlock_irqrestore(&hsotg->lock, flags);
4656 usb_phy_set_suspend(hsotg->uphy, false);
4657 spin_lock_irqsave(&hsotg->lock, flags);
4658 }
4659
4660 /* Enable external vbus supply after resuming the port. */
4661 spin_unlock_irqrestore(&hsotg->lock, flags);
4662 dwc2_vbus_supply_init(hsotg);
4663
4664 /* Wait for controller to correctly update D+/D- level */
4665 usleep_range(3000, 5000);
4666 spin_lock_irqsave(&hsotg->lock, flags);
4667
4668 /*
4669 * Clear Port Enable and Port Status changes.
4670 * Enable Port Power.
4671 */
4672 dwc2_writel(hsotg, HPRT0_PWR | HPRT0_CONNDET |
4673 HPRT0_ENACHG, HPRT0);
4674
4675 /* Wait for controller to detect Port Connect */
4676 spin_unlock_irqrestore(&hsotg->lock, flags);
4677 usleep_range(5000, 7000);
4678 spin_lock_irqsave(&hsotg->lock, flags);
4679 unlock:
4680 spin_unlock_irqrestore(&hsotg->lock, flags);
4681
4682 return ret;
4683 }
4684
4685 /* Returns the current frame number */
4686 static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd)
4687 {
4688 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4689
4690 return dwc2_hcd_get_frame_number(hsotg);
4691 }
4692
4693 static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
4694 char *fn_name)
4695 {
4696 #ifdef VERBOSE_DEBUG
4697 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4698 char *pipetype = NULL;
4699 char *speed = NULL;
4700
4701 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
4702 dev_vdbg(hsotg->dev, " Device address: %d\n",
4703 usb_pipedevice(urb->pipe));
4704 dev_vdbg(hsotg->dev, " Endpoint: %d, %s\n",
4705 usb_pipeendpoint(urb->pipe),
4706 usb_pipein(urb->pipe) ? "IN" : "OUT");
4707
4708 switch (usb_pipetype(urb->pipe)) {
4709 case PIPE_CONTROL:
4710 pipetype = "CONTROL";
4711 break;
4712 case PIPE_BULK:
4713 pipetype = "BULK";
4714 break;
4715 case PIPE_INTERRUPT:
4716 pipetype = "INTERRUPT";
4717 break;
4718 case PIPE_ISOCHRONOUS:
4719 pipetype = "ISOCHRONOUS";
4720 break;
4721 }
4722
4723 dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype,
4724 usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ?
4725 "IN" : "OUT");
4726
4727 switch (urb->dev->speed) {
4728 case USB_SPEED_HIGH:
4729 speed = "HIGH";
4730 break;
4731 case USB_SPEED_FULL:
4732 speed = "FULL";
4733 break;
4734 case USB_SPEED_LOW:
4735 speed = "LOW";
4736 break;
4737 default:
4738 speed = "UNKNOWN";
4739 break;
4740 }
4741
4742 dev_vdbg(hsotg->dev, " Speed: %s\n", speed);
4743 dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n",
4744 usb_endpoint_maxp(&urb->ep->desc),
4745 usb_endpoint_maxp_mult(&urb->ep->desc));
4746
4747 dev_vdbg(hsotg->dev, " Data buffer length: %d\n",
4748 urb->transfer_buffer_length);
4749 dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
4750 urb->transfer_buffer, (unsigned long)urb->transfer_dma);
4751 dev_vdbg(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
4752 urb->setup_packet, (unsigned long)urb->setup_dma);
4753 dev_vdbg(hsotg->dev, " Interval: %d\n", urb->interval);
4754
4755 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
4756 int i;
4757
4758 for (i = 0; i < urb->number_of_packets; i++) {
4759 dev_vdbg(hsotg->dev, " ISO Desc %d:\n", i);
4760 dev_vdbg(hsotg->dev, " offset: %d, length %d\n",
4761 urb->iso_frame_desc[i].offset,
4762 urb->iso_frame_desc[i].length);
4763 }
4764 }
4765 #endif
4766 }
4767
4768 /*
4769 * Starts processing a USB transfer request specified by a USB Request Block
4770 * (URB). mem_flags indicates the type of memory allocation to use while
4771 * processing this URB.
4772 */
4773 static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
4774 gfp_t mem_flags)
4775 {
4776 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4777 struct usb_host_endpoint *ep = urb->ep;
4778 struct dwc2_hcd_urb *dwc2_urb;
4779 int i;
4780 int retval;
4781 int alloc_bandwidth = 0;
4782 u8 ep_type = 0;
4783 u32 tflags = 0;
4784 void *buf;
4785 unsigned long flags;
4786 struct dwc2_qh *qh;
4787 bool qh_allocated = false;
4788 struct dwc2_qtd *qtd;
4789 struct dwc2_gregs_backup *gr;
4790
4791 gr = &hsotg->gr_backup;
4792
4793 if (dbg_urb(urb)) {
4794 dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
4795 dwc2_dump_urb_info(hcd, urb, "urb_enqueue");
4796 }
4797
4798 if (hsotg->hibernated) {
4799 if (gr->gotgctl & GOTGCTL_CURMODE_HOST)
4800 retval = dwc2_exit_hibernation(hsotg, 0, 0, 1);
4801 else
4802 retval = dwc2_exit_hibernation(hsotg, 0, 0, 0);
4803
4804 if (retval)
4805 dev_err(hsotg->dev,
4806 "exit hibernation failed.\n");
4807 }
4808
4809 if (hsotg->in_ppd) {
4810 retval = dwc2_exit_partial_power_down(hsotg, 0, true);
4811 if (retval)
4812 dev_err(hsotg->dev,
4813 "exit partial_power_down failed\n");
4814 }
4815
4816 if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
4817 hsotg->bus_suspended) {
4818 if (dwc2_is_device_mode(hsotg))
4819 dwc2_gadget_exit_clock_gating(hsotg, 0);
4820 else
4821 dwc2_host_exit_clock_gating(hsotg, 0);
4822 }
4823
4824 if (!ep)
4825 return -EINVAL;
4826
4827 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
4828 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
4829 spin_lock_irqsave(&hsotg->lock, flags);
4830 if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep))
4831 alloc_bandwidth = 1;
4832 spin_unlock_irqrestore(&hsotg->lock, flags);
4833 }
4834
4835 switch (usb_pipetype(urb->pipe)) {
4836 case PIPE_CONTROL:
4837 ep_type = USB_ENDPOINT_XFER_CONTROL;
4838 break;
4839 case PIPE_ISOCHRONOUS:
4840 ep_type = USB_ENDPOINT_XFER_ISOC;
4841 break;
4842 case PIPE_BULK:
4843 ep_type = USB_ENDPOINT_XFER_BULK;
4844 break;
4845 case PIPE_INTERRUPT:
4846 ep_type = USB_ENDPOINT_XFER_INT;
4847 break;
4848 }
4849
4850 dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
4851 mem_flags);
4852 if (!dwc2_urb)
4853 return -ENOMEM;
4854
4855 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
4856 usb_pipeendpoint(urb->pipe), ep_type,
4857 usb_pipein(urb->pipe),
4858 usb_endpoint_maxp(&ep->desc),
4859 usb_endpoint_maxp_mult(&ep->desc));
4860
4861 buf = urb->transfer_buffer;
4862
4863 if (hcd_uses_dma(hcd)) {
4864 if (!buf && (urb->transfer_dma & 3)) {
4865 dev_err(hsotg->dev,
4866 "%s: unaligned transfer with no transfer_buffer",
4867 __func__);
4868 retval = -EINVAL;
4869 goto fail0;
4870 }
4871 }
4872
4873 if (!(urb->transfer_flags & URB_NO_INTERRUPT))
4874 tflags |= URB_GIVEBACK_ASAP;
4875 if (urb->transfer_flags & URB_ZERO_PACKET)
4876 tflags |= URB_SEND_ZERO_PACKET;
4877
4878 dwc2_urb->priv = urb;
4879 dwc2_urb->buf = buf;
4880 dwc2_urb->dma = urb->transfer_dma;
4881 dwc2_urb->length = urb->transfer_buffer_length;
4882 dwc2_urb->setup_packet = urb->setup_packet;
4883 dwc2_urb->setup_dma = urb->setup_dma;
4884 dwc2_urb->flags = tflags;
4885 dwc2_urb->interval = urb->interval;
4886 dwc2_urb->status = -EINPROGRESS;
4887
4888 for (i = 0; i < urb->number_of_packets; ++i)
4889 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i,
4890 urb->iso_frame_desc[i].offset,
4891 urb->iso_frame_desc[i].length);
4892
4893 urb->hcpriv = dwc2_urb;
4894 qh = (struct dwc2_qh *)ep->hcpriv;
4895 /* Create QH for the endpoint if it doesn't exist */
4896 if (!qh) {
4897 qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags);
4898 if (!qh) {
4899 retval = -ENOMEM;
4900 goto fail0;
4901 }
4902 ep->hcpriv = qh;
4903 qh_allocated = true;
4904 }
4905
4906 qtd = kzalloc(sizeof(*qtd), mem_flags);
4907 if (!qtd) {
4908 retval = -ENOMEM;
4909 goto fail1;
4910 }
4911
4912 spin_lock_irqsave(&hsotg->lock, flags);
4913 retval = usb_hcd_link_urb_to_ep(hcd, urb);
4914 if (retval)
4915 goto fail2;
4916
4917 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
4918 if (retval)
4919 goto fail3;
4920
4921 if (alloc_bandwidth) {
4922 dwc2_allocate_bus_bandwidth(hcd,
4923 dwc2_hcd_get_ep_bandwidth(hsotg, ep),
4924 urb);
4925 }
4926
4927 spin_unlock_irqrestore(&hsotg->lock, flags);
4928
4929 return 0;
4930
4931 fail3:
4932 dwc2_urb->priv = NULL;
4933 usb_hcd_unlink_urb_from_ep(hcd, urb);
4934 if (qh_allocated && qh->channel && qh->channel->qh == qh)
4935 qh->channel->qh = NULL;
4936 fail2:
4937 spin_unlock_irqrestore(&hsotg->lock, flags);
4938 urb->hcpriv = NULL;
4939 kfree(qtd);
4940 fail1:
4941 if (qh_allocated) {
4942 struct dwc2_qtd *qtd2, *qtd2_tmp;
4943
4944 ep->hcpriv = NULL;
4945 dwc2_hcd_qh_unlink(hsotg, qh);
4946 /* Free each QTD in the QH's QTD list */
4947 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list,
4948 qtd_list_entry)
4949 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh);
4950 dwc2_hcd_qh_free(hsotg, qh);
4951 }
4952 fail0:
4953 kfree(dwc2_urb);
4954
4955 return retval;
4956 }
4957
4958 /*
4959 * Aborts/cancels a USB transfer request. Always returns 0 to indicate success.
4960 */
4961 static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
4962 int status)
4963 {
4964 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4965 int rc;
4966 unsigned long flags;
4967
4968 dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n");
4969 dwc2_dump_urb_info(hcd, urb, "urb_dequeue");
4970
4971 spin_lock_irqsave(&hsotg->lock, flags);
4972
4973 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
4974 if (rc)
4975 goto out;
4976
4977 if (!urb->hcpriv) {
4978 dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n");
4979 goto out;
4980 }
4981
4982 rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv);
4983
4984 usb_hcd_unlink_urb_from_ep(hcd, urb);
4985
4986 kfree(urb->hcpriv);
4987 urb->hcpriv = NULL;
4988
4989 /* Higher layer software sets URB status */
4990 spin_unlock(&hsotg->lock);
4991 usb_hcd_giveback_urb(hcd, urb, status);
4992 spin_lock(&hsotg->lock);
4993
4994 dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n");
4995 dev_dbg(hsotg->dev, " urb->status = %d\n", urb->status);
4996 out:
4997 spin_unlock_irqrestore(&hsotg->lock, flags);
4998
4999 return rc;
5000 }
5001
5002 /*
5003 * Frees resources in the DWC_otg controller related to a given endpoint. Also
5004 * clears state in the HCD related to the endpoint. Any URBs for the endpoint
5005 * must already be dequeued.
5006 */
5007 static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd,
5008 struct usb_host_endpoint *ep)
5009 {
5010 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
5011
5012 dev_dbg(hsotg->dev,
5013 "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n",
5014 ep->desc.bEndpointAddress, ep->hcpriv);
5015 dwc2_hcd_endpoint_disable(hsotg, ep, 250);
5016 }
5017
5018 /*
5019 * Resets endpoint specific parameter values, in current version used to reset
5020 * the data toggle (as a WA). This function can be called from usb_clear_halt
5021 * routine.
5022 */
5023 static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
5024 struct usb_host_endpoint *ep)
5025 {
5026 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
5027 unsigned long flags;
5028
5029 dev_dbg(hsotg->dev,
5030 "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
5031 ep->desc.bEndpointAddress);
5032
5033 spin_lock_irqsave(&hsotg->lock, flags);
5034 dwc2_hcd_endpoint_reset(hsotg, ep);
5035 spin_unlock_irqrestore(&hsotg->lock, flags);
5036 }
5037
5038 /*
5039 * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
5040 * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
5041 * interrupt.
5042 *
5043 * This function is called by the USB core when an interrupt occurs
5044 */
5045 static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd)
5046 {
5047 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
5048
5049 return dwc2_handle_hcd_intr(hsotg);
5050 }
5051
5052 /*
5053 * Creates Status Change bitmap for the root hub and root port. The bitmap is
5054 * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
5055 * is the status change indicator for the single root port. Returns 1 if either
5056 * change indicator is 1, otherwise returns 0.
5057 */
5058 static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
5059 {
5060 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
5061
5062 buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1;
5063 return buf[0] != 0;
5064 }
5065
5066 /* Handles hub class-specific requests */
5067 static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue,
5068 u16 windex, char *buf, u16 wlength)
5069 {
5070 int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq,
5071 wvalue, windex, buf, wlength);
5072 return retval;
5073 }
5074
5075 /* Handles hub TT buffer clear completions */
5076 static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd,
5077 struct usb_host_endpoint *ep)
5078 {
5079 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
5080 struct dwc2_qh *qh;
5081 unsigned long flags;
5082
5083 qh = ep->hcpriv;
5084 if (!qh)
5085 return;
5086
5087 spin_lock_irqsave(&hsotg->lock, flags);
5088 qh->tt_buffer_dirty = 0;
5089
5090 if (hsotg->flags.b.port_connect_status)
5091 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL);
5092
5093 spin_unlock_irqrestore(&hsotg->lock, flags);
5094 }
5095
5096 /*
5097 * HPRT0_SPD_HIGH_SPEED: high speed
5098 * HPRT0_SPD_FULL_SPEED: full speed
5099 */
5100 static void dwc2_change_bus_speed(struct usb_hcd *hcd, int speed)
5101 {
5102 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
5103
5104 if (hsotg->params.speed == speed)
5105 return;
5106
5107 hsotg->params.speed = speed;
5108 queue_work(hsotg->wq_otg, &hsotg->wf_otg);
5109 }
5110
5111 static void dwc2_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
5112 {
5113 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
5114
5115 if (!hsotg->params.change_speed_quirk)
5116 return;
5117
5118 /*
5119 * On removal, set speed to default high-speed.
5120 */
5121 if (udev->parent && udev->parent->speed > USB_SPEED_UNKNOWN &&
5122 udev->parent->speed < USB_SPEED_HIGH) {
5123 dev_info(hsotg->dev, "Set speed to default high-speed\n");
5124 dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
5125 }
5126 }
5127
5128 static int dwc2_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
5129 {
5130 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
5131
5132 if (!hsotg->params.change_speed_quirk)
5133 return 0;
5134
5135 if (udev->speed == USB_SPEED_HIGH) {
5136 dev_info(hsotg->dev, "Set speed to high-speed\n");
5137 dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
5138 } else if ((udev->speed == USB_SPEED_FULL ||
5139 udev->speed == USB_SPEED_LOW)) {
5140 /*
5141 * Change speed setting to full-speed if there's
5142 * a full-speed or low-speed device plugged in.
5143 */
5144 dev_info(hsotg->dev, "Set speed to full-speed\n");
5145 dwc2_change_bus_speed(hcd, HPRT0_SPD_FULL_SPEED);
5146 }
5147
5148 return 0;
5149 }
5150
5151 /* XXX: Linux USB Stack */
5152 static struct hc_driver dwc2_hc_driver = {
5153 .description = "dwc2_hsotg",
5154 .product_desc = "DWC OTG Controller",
5155 .hcd_priv_size = sizeof(struct wrapper_priv_data),
5156
5157 .irq = _dwc2_hcd_irq,
5158 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
5159
5160 .start = _dwc2_hcd_start,
5161 .stop = _dwc2_hcd_stop,
5162 .urb_enqueue = _dwc2_hcd_urb_enqueue,
5163 .urb_dequeue = _dwc2_hcd_urb_dequeue,
5164 .endpoint_disable = _dwc2_hcd_endpoint_disable,
5165 .endpoint_reset = _dwc2_hcd_endpoint_reset,
5166 .get_frame_number = _dwc2_hcd_get_frame_number,
5167
5168 .hub_status_data = _dwc2_hcd_hub_status_data,
5169 .hub_control = _dwc2_hcd_hub_control,
5170 .clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete,
5171
5172 .bus_suspend = _dwc2_hcd_suspend,
5173 .bus_resume = _dwc2_hcd_resume,
5174
5175 .map_urb_for_dma = dwc2_map_urb_for_dma,
5176 .unmap_urb_for_dma = dwc2_unmap_urb_for_dma,
5177 };
5178 #endif
5179
5180 /*
5181 * Frees secondary storage associated with the dwc2_hsotg structure contained
5182 * in the struct usb_hcd field
5183 */
dwc2_hcd_free(struct dwc2_hsotg * hsotg)5184 STATIC void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
5185 {
5186 u32 ahbcfg;
5187 u32 dctl;
5188 int i;
5189
5190 dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n");
5191
5192 /* Free memory for QH/QTD lists */
5193 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive);
5194 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_waiting);
5195 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active);
5196 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive);
5197 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready);
5198 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned);
5199 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued);
5200
5201 /* Free memory for the host channels */
5202 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
5203 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
5204
5205 if (chan) {
5206 dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n",
5207 i, chan);
5208 hsotg->hc_ptr_array[i] = NULL;
5209 free(chan, M_USBHC, sizeof(*chan));
5210 }
5211 }
5212
5213 if (hsotg->params.host_dma) {
5214 if (hsotg->status_buf) {
5215 usb_freemem(&hsotg->hsotg_sc->sc_bus,
5216 &hsotg->status_buf_dma_usb);
5217 hsotg->status_buf = NULL;
5218 }
5219 } else {
5220 free(hsotg->status_buf, M_USBHC, DWC2_HCD_STATUS_BUF_SIZE);
5221 hsotg->status_buf = NULL;
5222 }
5223
5224 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
5225
5226 /* Disable all interrupts */
5227 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
5228 dwc2_writel(hsotg, ahbcfg, GAHBCFG);
5229 dwc2_writel(hsotg, 0, GINTMSK);
5230
5231 if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) {
5232 dctl = dwc2_readl(hsotg, DCTL);
5233 dctl |= DCTL_SFTDISCON;
5234 dwc2_writel(hsotg, dctl, DCTL);
5235 }
5236
5237 if (hsotg->wq_otg) {
5238 taskq_destroy(hsotg->wq_otg);
5239 }
5240
5241 //free(hsotg->core_params, M_USBHC, sizeof(*hsotg->core_params));
5242 //hsotg->core_params = NULL;
5243 timeout_del(&hsotg->wkp_timer);
5244 }
5245
5246
dwc2_hcd_release(struct dwc2_hsotg * hsotg)5247 STATIC void dwc2_hcd_release(struct dwc2_hsotg *hsotg)
5248 {
5249 /* Turn off all host-specific interrupts */
5250 dwc2_disable_host_interrupts(hsotg);
5251
5252 dwc2_hcd_free(hsotg);
5253 }
5254
5255 /*
5256 * Initializes the HCD. This function allocates memory for and initializes the
5257 * static parts of the usb_hcd and dwc2_hsotg structures. It also registers the
5258 * USB bus with the core and calls the hc_driver->start() function. It returns
5259 * a negative error on failure.
5260 */
dwc2_hcd_init(struct dwc2_hsotg * hsotg)5261 int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
5262 {
5263 struct dwc2_host_chan *channel;
5264 u32 hcfg;
5265 int i, num_channels;
5266 int retval;
5267
5268 if (usb_disabled())
5269 return -ENODEV;
5270
5271 dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n");
5272
5273 retval = -ENOMEM;
5274
5275 hcfg = dwc2_readl(hsotg, HCFG);
5276 dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
5277
5278 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5279 hsotg->frame_num_array = malloc(sizeof(*hsotg->frame_num_array) *
5280 FRAME_NUM_ARRAY_SIZE, M_USBHC,
5281 M_ZERO | M_WAITOK);
5282 if (!hsotg->frame_num_array)
5283 goto error1;
5284 hsotg->last_frame_num_array = malloc(
5285 sizeof(*hsotg->last_frame_num_array) *
5286 FRAME_NUM_ARRAY_SIZE, M_USBHC, M_ZERO | M_WAITOK);
5287 if (!hsotg->last_frame_num_array)
5288 goto error1;
5289 #endif
5290 hsotg->last_frame_num = HFNUM_MAX_FRNUM;
5291
5292 spin_lock_init(&hsotg->lock);
5293
5294 /*
5295 * Disable the global interrupt until all the interrupt handlers are
5296 * installed
5297 */
5298 dwc2_disable_global_interrupts(hsotg);
5299
5300 /* Initialize the DWC_otg core, and select the Phy type */
5301 retval = dwc2_core_init(hsotg, true);
5302 if (retval)
5303 goto error2;
5304
5305 /* Create new workqueue and init work */
5306 retval = -ENOMEM;
5307 hsotg->wq_otg = taskq_create("dwc2", 1, IPL_VM, 0);
5308 if (!hsotg->wq_otg) {
5309 dev_err(hsotg->dev, "Failed to create workqueue\n");
5310 goto error2;
5311 }
5312 task_set(&hsotg->wf_otg, dwc2_conn_id_status_change, hsotg);
5313
5314 timeout_set(&hsotg->wkp_timer, dwc2_wakeup_detected, hsotg);
5315
5316 /* Initialize the non-periodic schedule */
5317 INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
5318 INIT_LIST_HEAD(&hsotg->non_periodic_sched_waiting);
5319 INIT_LIST_HEAD(&hsotg->non_periodic_sched_active);
5320
5321 /* Initialize the periodic schedule */
5322 INIT_LIST_HEAD(&hsotg->periodic_sched_inactive);
5323 INIT_LIST_HEAD(&hsotg->periodic_sched_ready);
5324 INIT_LIST_HEAD(&hsotg->periodic_sched_assigned);
5325 INIT_LIST_HEAD(&hsotg->periodic_sched_queued);
5326
5327 INIT_LIST_HEAD(&hsotg->split_order);
5328
5329 /*
5330 * Create a host channel descriptor for each host channel implemented
5331 * in the controller. Initialize the channel descriptor array.
5332 */
5333 INIT_LIST_HEAD(&hsotg->free_hc_list);
5334 num_channels = hsotg->params.host_channels;
5335 memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
5336
5337 for (i = 0; i < num_channels; i++) {
5338 channel = malloc(sizeof(*channel), M_USBHC, M_ZERO | M_WAITOK);
5339 if (!channel)
5340 goto error3;
5341 channel->hc_num = i;
5342 INIT_LIST_HEAD(&channel->split_order_list_entry);
5343 hsotg->hc_ptr_array[i] = channel;
5344 }
5345
5346 /* Initialize hsotg start work */
5347 INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func, hsotg);
5348
5349 /* Initialize port reset work */
5350 INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func, hsotg);
5351
5352 /*
5353 * Allocate space for storing data on status transactions. Normally no
5354 * data is sent, but this space acts as a bit bucket. This must be
5355 * done after usb_add_hcd since that function allocates the DMA buffer
5356 * pool.
5357 */
5358 hsotg->status_buf = NULL;
5359 if (hsotg->params.host_dma) {
5360 int error = usb_allocmem(&hsotg->hsotg_sc->sc_bus,
5361 DWC2_HCD_STATUS_BUF_SIZE, 0, USB_DMA_COHERENT,
5362 &hsotg->status_buf_dma_usb);
5363 if (!error) {
5364 hsotg->status_buf =
5365 KERNADDR(&hsotg->status_buf_dma_usb, 0);
5366 hsotg->status_buf_dma =
5367 DMAADDR(&hsotg->status_buf_dma_usb, 0);
5368 }
5369 } else
5370 hsotg->status_buf = malloc(DWC2_HCD_STATUS_BUF_SIZE, M_USBHC,
5371 M_ZERO | M_WAITOK);
5372
5373 /* retval is already -ENOMEM */
5374 if (!hsotg->status_buf)
5375 goto error3;
5376
5377 hsotg->otg_port = 1;
5378 hsotg->frame_list = NULL;
5379 hsotg->frame_list_dma = 0;
5380 hsotg->periodic_qh_count = 0;
5381
5382 /* Initiate lx_state to L3 disconnected state */
5383 hsotg->lx_state = DWC2_L3;
5384
5385 _dwc2_hcd_start(hsotg);
5386
5387 dwc2_hcd_dump_state(hsotg);
5388
5389 dwc2_enable_global_interrupts(hsotg);
5390
5391 return 0;
5392
5393 error3:
5394 dwc2_hcd_release(hsotg);
5395 error2:
5396 #if 0
5397 if (hsotg->core_params != NULL)
5398 free(hsotg->core_params, M_USBHC, sizeof(*hsotg->core_params));
5399 #endif
5400 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5401 if (hsotg->last_frame_num_array != NULL)
5402 free(hsotg->last_frame_num_array, M_USBHC,
5403 sizeof(*hsotg->last_frame_num_array) * FRAME_NUM_ARRAY_SIZE);
5404 if (hsotg->frame_num_array != NULL)
5405 free(hsotg->frame_num_array, M_USBHC,
5406 sizeof(*hsotg->frame_num_array) * FRAME_NUM_ARRAY_SIZE);
5407 #endif
5408
5409 dev_err(hsotg->dev, "%s() FAILED, returning %d\n", __func__, retval);
5410 return retval;
5411 }
5412
5413 #if 0
5414 /*
5415 * Removes the HCD.
5416 * Frees memory and resources associated with the HCD and deregisters the bus.
5417 */
5418 void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
5419 {
5420 struct usb_hcd *hcd;
5421
5422 dev_dbg(hsotg->dev, "DWC OTG HCD REMOVE\n");
5423
5424 hcd = dwc2_hsotg_to_hcd(hsotg);
5425 dev_dbg(hsotg->dev, "hsotg->hcd = %p\n", hcd);
5426
5427 if (!hcd) {
5428 dev_dbg(hsotg->dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n",
5429 __func__);
5430 return;
5431 }
5432 hsotg->priv = NULL;
5433
5434 dwc2_hcd_release(hsotg);
5435
5436 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5437 free(hsotg->last_frame_num_array, M_USBHC, sizeof(*hsotg->last_frame_num_array) * FRAME_NUM_ARRAY_SIZE);
5438 free(hsotg->frame_num_array, M_USBHC, sizeof(*hsotg->frame_num_array) * FRAME_NUM_ARRAY_SIZE);
5439 #endif
5440 }
5441 #endif
5442
5443 /**
5444 * dwc2_backup_host_registers() - Backup controller host registers.
5445 * When suspending usb bus, registers needs to be backuped
5446 * if controller power is disabled once suspended.
5447 *
5448 * @hsotg: Programming view of the DWC_otg controller
5449 */
dwc2_backup_host_registers(struct dwc2_hsotg * hsotg)5450 STATIC int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
5451 {
5452 struct dwc2_hregs_backup *hr;
5453 int i;
5454
5455 dev_dbg(hsotg->dev, "%s\n", __func__);
5456
5457 /* Backup Host regs */
5458 hr = &hsotg->hr_backup;
5459 hr->hcfg = dwc2_readl(hsotg, HCFG);
5460 hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);
5461 for (i = 0; i < hsotg->params.host_channels; ++i)
5462 hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));
5463
5464 hr->hprt0 = dwc2_read_hprt0(hsotg);
5465 hr->hfir = dwc2_readl(hsotg, HFIR);
5466 hr->hptxfsiz = dwc2_readl(hsotg, HPTXFSIZ);
5467 hr->valid = true;
5468
5469 return 0;
5470 }
5471
5472 /**
5473 * dwc2_restore_host_registers() - Restore controller host registers.
5474 * When resuming usb bus, device registers needs to be restored
5475 * if controller power were disabled.
5476 *
5477 * @hsotg: Programming view of the DWC_otg controller
5478 */
dwc2_restore_host_registers(struct dwc2_hsotg * hsotg)5479 STATIC int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
5480 {
5481 struct dwc2_hregs_backup *hr;
5482 int i;
5483
5484 dev_dbg(hsotg->dev, "%s\n", __func__);
5485
5486 /* Restore host regs */
5487 hr = &hsotg->hr_backup;
5488 if (!hr->valid) {
5489 dev_err(hsotg->dev, "%s: no host registers to restore\n",
5490 __func__);
5491 return -EINVAL;
5492 }
5493 hr->valid = false;
5494
5495 dwc2_writel(hsotg, hr->hcfg, HCFG);
5496 dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);
5497
5498 for (i = 0; i < hsotg->params.host_channels; ++i)
5499 dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));
5500
5501 dwc2_writel(hsotg, hr->hprt0, HPRT0);
5502 dwc2_writel(hsotg, hr->hfir, HFIR);
5503 dwc2_writel(hsotg, hr->hptxfsiz, HPTXFSIZ);
5504 hsotg->frame_number = 0;
5505
5506 return 0;
5507 }
5508
5509 /**
5510 * dwc2_host_enter_hibernation() - Put controller in Hibernation.
5511 *
5512 * @hsotg: Programming view of the DWC_otg controller
5513 */
dwc2_host_enter_hibernation(struct dwc2_hsotg * hsotg)5514 int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
5515 {
5516 unsigned long flags;
5517 int ret = 0;
5518 u32 hprt0;
5519 u32 pcgcctl;
5520 u32 gusbcfg;
5521 u32 gpwrdn;
5522
5523 dev_dbg(hsotg->dev, "Preparing host for hibernation\n");
5524 ret = dwc2_backup_global_registers(hsotg);
5525 if (ret) {
5526 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
5527 __func__);
5528 return ret;
5529 }
5530 ret = dwc2_backup_host_registers(hsotg);
5531 if (ret) {
5532 dev_err(hsotg->dev, "%s: failed to backup host registers\n",
5533 __func__);
5534 return ret;
5535 }
5536
5537 /* Enter USB Suspend Mode */
5538 hprt0 = dwc2_readl(hsotg, HPRT0);
5539 hprt0 |= HPRT0_SUSP;
5540 hprt0 &= ~HPRT0_ENA;
5541 dwc2_writel(hsotg, hprt0, HPRT0);
5542
5543 /* Wait for the HPRT0.PrtSusp register field to be set */
5544 if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
5545 dev_warn(hsotg->dev, "Suspend wasn't generated\n");
5546
5547 /*
5548 * We need to disable interrupts to prevent servicing of any IRQ
5549 * during going to hibernation
5550 */
5551 spin_lock_irqsave(&hsotg->lock, flags);
5552 hsotg->lx_state = DWC2_L2;
5553
5554 gusbcfg = dwc2_readl(hsotg, GUSBCFG);
5555 if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) {
5556 /* ULPI interface */
5557 /* Suspend the Phy Clock */
5558 pcgcctl = dwc2_readl(hsotg, PCGCTL);
5559 pcgcctl |= PCGCTL_STOPPCLK;
5560 dwc2_writel(hsotg, pcgcctl, PCGCTL);
5561 udelay(10);
5562
5563 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5564 gpwrdn |= GPWRDN_PMUACTV;
5565 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5566 udelay(10);
5567 } else {
5568 /* UTMI+ Interface */
5569 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5570 gpwrdn |= GPWRDN_PMUACTV;
5571 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5572 udelay(10);
5573
5574 pcgcctl = dwc2_readl(hsotg, PCGCTL);
5575 pcgcctl |= PCGCTL_STOPPCLK;
5576 dwc2_writel(hsotg, pcgcctl, PCGCTL);
5577 udelay(10);
5578 }
5579
5580 /* Enable interrupts from wake up logic */
5581 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5582 gpwrdn |= GPWRDN_PMUINTSEL;
5583 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5584 udelay(10);
5585
5586 /* Unmask host mode interrupts in GPWRDN */
5587 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5588 gpwrdn |= GPWRDN_DISCONN_DET_MSK;
5589 gpwrdn |= GPWRDN_LNSTSCHG_MSK;
5590 gpwrdn |= GPWRDN_STS_CHGINT_MSK;
5591 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5592 udelay(10);
5593
5594 /* Enable Power Down Clamp */
5595 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5596 gpwrdn |= GPWRDN_PWRDNCLMP;
5597 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5598 udelay(10);
5599
5600 /* Switch off VDD */
5601 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5602 gpwrdn |= GPWRDN_PWRDNSWTCH;
5603 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5604
5605 hsotg->hibernated = 1;
5606 hsotg->bus_suspended = 1;
5607 dev_dbg(hsotg->dev, "Host hibernation completed\n");
5608 spin_unlock_irqrestore(&hsotg->lock, flags);
5609 return ret;
5610 }
5611
5612 /*
5613 * dwc2_host_exit_hibernation()
5614 *
5615 * @hsotg: Programming view of the DWC_otg controller
5616 * @rem_wakeup: indicates whether resume is initiated by Device or Host.
5617 * @param reset: indicates whether resume is initiated by Reset.
5618 *
5619 * Return: non-zero if failed to enter to hibernation.
5620 *
5621 * This function is for exiting from Host mode hibernation by
5622 * Host Initiated Resume/Reset and Device Initiated Remote-Wakeup.
5623 */
dwc2_host_exit_hibernation(struct dwc2_hsotg * hsotg,int rem_wakeup,int reset)5624 int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
5625 int reset)
5626 {
5627 u32 gpwrdn;
5628 u32 hprt0;
5629 int ret = 0;
5630 struct dwc2_gregs_backup *gr;
5631 struct dwc2_hregs_backup *hr;
5632
5633 gr = &hsotg->gr_backup;
5634 hr = &hsotg->hr_backup;
5635
5636 dev_dbg(hsotg->dev,
5637 "%s: called with rem_wakeup = %d reset = %d\n",
5638 __func__, rem_wakeup, reset);
5639
5640 dwc2_hib_restore_common(hsotg, rem_wakeup, 1);
5641 hsotg->hibernated = 0;
5642
5643 /*
5644 * This step is not described in functional spec but if not wait for
5645 * this delay, mismatch interrupts occurred because just after restore
5646 * core is in Device mode(gintsts.curmode == 0)
5647 */
5648 mdelay(100);
5649
5650 /* Clear all pending interupts */
5651 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5652
5653 /* De-assert Restore */
5654 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5655 gpwrdn &= ~GPWRDN_RESTORE;
5656 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5657 udelay(10);
5658
5659 /* Restore GUSBCFG, HCFG */
5660 dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
5661 dwc2_writel(hsotg, hr->hcfg, HCFG);
5662
5663 /* De-assert Wakeup Logic */
5664 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5665 gpwrdn &= ~GPWRDN_PMUACTV;
5666 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5667 udelay(10);
5668
5669 hprt0 = hr->hprt0;
5670 hprt0 |= HPRT0_PWR;
5671 hprt0 &= ~HPRT0_ENA;
5672 hprt0 &= ~HPRT0_SUSP;
5673 dwc2_writel(hsotg, hprt0, HPRT0);
5674
5675 hprt0 = hr->hprt0;
5676 hprt0 |= HPRT0_PWR;
5677 hprt0 &= ~HPRT0_ENA;
5678 hprt0 &= ~HPRT0_SUSP;
5679
5680 if (reset) {
5681 hprt0 |= HPRT0_RST;
5682 dwc2_writel(hsotg, hprt0, HPRT0);
5683
5684 /* Wait for Resume time and then program HPRT again */
5685 mdelay(60);
5686 hprt0 &= ~HPRT0_RST;
5687 dwc2_writel(hsotg, hprt0, HPRT0);
5688 } else {
5689 hprt0 |= HPRT0_RES;
5690 dwc2_writel(hsotg, hprt0, HPRT0);
5691
5692 /* Wait for Resume time and then program HPRT again */
5693 mdelay(100);
5694 hprt0 &= ~HPRT0_RES;
5695 dwc2_writel(hsotg, hprt0, HPRT0);
5696 }
5697 /* Clear all interrupt status */
5698 hprt0 = dwc2_readl(hsotg, HPRT0);
5699 hprt0 |= HPRT0_CONNDET;
5700 hprt0 |= HPRT0_ENACHG;
5701 hprt0 &= ~HPRT0_ENA;
5702 dwc2_writel(hsotg, hprt0, HPRT0);
5703
5704 hprt0 = dwc2_readl(hsotg, HPRT0);
5705
5706 /* Clear all pending interupts */
5707 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5708
5709 /* Restore global registers */
5710 ret = dwc2_restore_global_registers(hsotg);
5711 if (ret) {
5712 dev_err(hsotg->dev, "%s: failed to restore registers\n",
5713 __func__);
5714 return ret;
5715 }
5716
5717 /* Restore host registers */
5718 ret = dwc2_restore_host_registers(hsotg);
5719 if (ret) {
5720 dev_err(hsotg->dev, "%s: failed to restore host registers\n",
5721 __func__);
5722 return ret;
5723 }
5724
5725 if (rem_wakeup) {
5726 dwc2_hcd_rem_wakeup(hsotg);
5727 /*
5728 * Change "port_connect_status_change" flag to re-enumerate,
5729 * because after exit from hibernation port connection status
5730 * is not detected.
5731 */
5732 hsotg->flags.b.port_connect_status_change = 1;
5733 }
5734
5735 hsotg->hibernated = 0;
5736 hsotg->bus_suspended = 0;
5737 hsotg->lx_state = DWC2_L0;
5738 dev_dbg(hsotg->dev, "Host hibernation restore complete\n");
5739 return ret;
5740 }
5741
dwc2_host_can_poweroff_phy(struct dwc2_hsotg * dwc2)5742 bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg *dwc2)
5743 {
5744 #if 0
5745 struct usb_device *root_hub = dwc2_hsotg_to_hcd(dwc2)->self.root_hub;
5746
5747 /* If the controller isn't allowed to wakeup then we can power off. */
5748 if (!device_may_wakeup(dwc2->dev))
5749 return true;
5750
5751 /*
5752 * We don't want to power off the PHY if something under the
5753 * root hub has wakeup enabled.
5754 */
5755 if (usb_wakeup_enabled_descendants(root_hub))
5756 return false;
5757 #endif
5758
5759 /* No reason to keep the PHY powered, so allow poweroff */
5760 return true;
5761 }
5762
5763 /**
5764 * dwc2_host_enter_partial_power_down() - Put controller in partial
5765 * power down.
5766 *
5767 * @hsotg: Programming view of the DWC_otg controller
5768 *
5769 * Return: non-zero if failed to enter host partial power down.
5770 *
5771 * This function is for entering Host mode partial power down.
5772 */
dwc2_host_enter_partial_power_down(struct dwc2_hsotg * hsotg)5773 int dwc2_host_enter_partial_power_down(struct dwc2_hsotg *hsotg)
5774 {
5775 u32 pcgcctl;
5776 u32 hprt0;
5777 int ret = 0;
5778
5779 dev_dbg(hsotg->dev, "Entering host partial power down started.\n");
5780
5781 /* Put this port in suspend mode. */
5782 hprt0 = dwc2_read_hprt0(hsotg);
5783 hprt0 |= HPRT0_SUSP;
5784 dwc2_writel(hsotg, hprt0, HPRT0);
5785 udelay(5);
5786
5787 /* Wait for the HPRT0.PrtSusp register field to be set */
5788 if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
5789 dev_warn(hsotg->dev, "Suspend wasn't generated\n");
5790
5791 /* Backup all registers */
5792 ret = dwc2_backup_global_registers(hsotg);
5793 if (ret) {
5794 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
5795 __func__);
5796 return ret;
5797 }
5798
5799 ret = dwc2_backup_host_registers(hsotg);
5800 if (ret) {
5801 dev_err(hsotg->dev, "%s: failed to backup host registers\n",
5802 __func__);
5803 return ret;
5804 }
5805
5806 /*
5807 * Clear any pending interrupts since dwc2 will not be able to
5808 * clear them after entering partial_power_down.
5809 */
5810 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5811
5812 /* Put the controller in low power state */
5813 pcgcctl = dwc2_readl(hsotg, PCGCTL);
5814
5815 pcgcctl |= PCGCTL_PWRCLMP;
5816 dwc2_writel(hsotg, pcgcctl, PCGCTL);
5817 udelay(5);
5818
5819 pcgcctl |= PCGCTL_RSTPDWNMODULE;
5820 dwc2_writel(hsotg, pcgcctl, PCGCTL);
5821 udelay(5);
5822
5823 pcgcctl |= PCGCTL_STOPPCLK;
5824 dwc2_writel(hsotg, pcgcctl, PCGCTL);
5825
5826 /* Set in_ppd flag to 1 as here core enters suspend. */
5827 hsotg->in_ppd = 1;
5828 hsotg->lx_state = DWC2_L2;
5829 hsotg->bus_suspended = true;
5830
5831 dev_dbg(hsotg->dev, "Entering host partial power down completed.\n");
5832
5833 return ret;
5834 }
5835
5836 /*
5837 * dwc2_host_exit_partial_power_down() - Exit controller from host partial
5838 * power down.
5839 *
5840 * @hsotg: Programming view of the DWC_otg controller
5841 * @rem_wakeup: indicates whether resume is initiated by Reset.
5842 * @restore: indicates whether need to restore the registers or not.
5843 *
5844 * Return: non-zero if failed to exit host partial power down.
5845 *
5846 * This function is for exiting from Host mode partial power down.
5847 */
dwc2_host_exit_partial_power_down(struct dwc2_hsotg * hsotg,int rem_wakeup,bool restore)5848 int dwc2_host_exit_partial_power_down(struct dwc2_hsotg *hsotg,
5849 int rem_wakeup, bool restore)
5850 {
5851 u32 pcgcctl;
5852 int ret = 0;
5853 u32 hprt0;
5854
5855 dev_dbg(hsotg->dev, "Exiting host partial power down started.\n");
5856
5857 pcgcctl = dwc2_readl(hsotg, PCGCTL);
5858 pcgcctl &= ~PCGCTL_STOPPCLK;
5859 dwc2_writel(hsotg, pcgcctl, PCGCTL);
5860 udelay(5);
5861
5862 pcgcctl = dwc2_readl(hsotg, PCGCTL);
5863 pcgcctl &= ~PCGCTL_PWRCLMP;
5864 dwc2_writel(hsotg, pcgcctl, PCGCTL);
5865 udelay(5);
5866
5867 pcgcctl = dwc2_readl(hsotg, PCGCTL);
5868 pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
5869 dwc2_writel(hsotg, pcgcctl, PCGCTL);
5870
5871 udelay(100);
5872 if (restore) {
5873 ret = dwc2_restore_global_registers(hsotg);
5874 if (ret) {
5875 dev_err(hsotg->dev, "%s: failed to restore registers\n",
5876 __func__);
5877 return ret;
5878 }
5879
5880 ret = dwc2_restore_host_registers(hsotg);
5881 if (ret) {
5882 dev_err(hsotg->dev, "%s: failed to restore host registers\n",
5883 __func__);
5884 return ret;
5885 }
5886 }
5887
5888 /* Drive resume signaling and exit suspend mode on the port. */
5889 hprt0 = dwc2_read_hprt0(hsotg);
5890 hprt0 |= HPRT0_RES;
5891 hprt0 &= ~HPRT0_SUSP;
5892 dwc2_writel(hsotg, hprt0, HPRT0);
5893 udelay(5);
5894
5895 if (!rem_wakeup) {
5896 /* Stop driveing resume signaling on the port. */
5897 hprt0 = dwc2_read_hprt0(hsotg);
5898 hprt0 &= ~HPRT0_RES;
5899 dwc2_writel(hsotg, hprt0, HPRT0);
5900
5901 hsotg->bus_suspended = false;
5902 } else {
5903 /* Turn on the port power bit. */
5904 hprt0 = dwc2_read_hprt0(hsotg);
5905 hprt0 |= HPRT0_PWR;
5906 dwc2_writel(hsotg, hprt0, HPRT0);
5907
5908 /* Connect hcd. */
5909 dwc2_hcd_connect(hsotg);
5910
5911 timeout_add_msec(&hsotg->wkp_timer, 71);
5912 }
5913
5914 /* Set lx_state to and in_ppd to 0 as here core exits from suspend. */
5915 hsotg->in_ppd = 0;
5916 hsotg->lx_state = DWC2_L0;
5917
5918 dev_dbg(hsotg->dev, "Exiting host partial power down completed.\n");
5919 return ret;
5920 }
5921
5922 /**
5923 * dwc2_host_enter_clock_gating() - Put controller in clock gating.
5924 *
5925 * @hsotg: Programming view of the DWC_otg controller
5926 *
5927 * This function is for entering Host mode clock gating.
5928 */
dwc2_host_enter_clock_gating(struct dwc2_hsotg * hsotg)5929 void dwc2_host_enter_clock_gating(struct dwc2_hsotg *hsotg)
5930 {
5931 u32 hprt0;
5932 u32 pcgctl;
5933
5934 dev_dbg(hsotg->dev, "Entering host clock gating.\n");
5935
5936 /* Put this port in suspend mode. */
5937 hprt0 = dwc2_read_hprt0(hsotg);
5938 hprt0 |= HPRT0_SUSP;
5939 dwc2_writel(hsotg, hprt0, HPRT0);
5940
5941 /* Set the Phy Clock bit as suspend is received. */
5942 pcgctl = dwc2_readl(hsotg, PCGCTL);
5943 pcgctl |= PCGCTL_STOPPCLK;
5944 dwc2_writel(hsotg, pcgctl, PCGCTL);
5945 udelay(5);
5946
5947 /* Set the Gate hclk as suspend is received. */
5948 pcgctl = dwc2_readl(hsotg, PCGCTL);
5949 pcgctl |= PCGCTL_GATEHCLK;
5950 dwc2_writel(hsotg, pcgctl, PCGCTL);
5951 udelay(5);
5952
5953 hsotg->bus_suspended = true;
5954 hsotg->lx_state = DWC2_L2;
5955 }
5956
5957 /**
5958 * dwc2_host_exit_clock_gating() - Exit controller from clock gating.
5959 *
5960 * @hsotg: Programming view of the DWC_otg controller
5961 * @rem_wakeup: indicates whether resume is initiated by remote wakeup
5962 *
5963 * This function is for exiting Host mode clock gating.
5964 */
dwc2_host_exit_clock_gating(struct dwc2_hsotg * hsotg,int rem_wakeup)5965 void dwc2_host_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup)
5966 {
5967 u32 hprt0;
5968 u32 pcgctl;
5969
5970 dev_dbg(hsotg->dev, "Exiting host clock gating.\n");
5971
5972 /* Clear the Gate hclk. */
5973 pcgctl = dwc2_readl(hsotg, PCGCTL);
5974 pcgctl &= ~PCGCTL_GATEHCLK;
5975 dwc2_writel(hsotg, pcgctl, PCGCTL);
5976 udelay(5);
5977
5978 /* Phy Clock bit. */
5979 pcgctl = dwc2_readl(hsotg, PCGCTL);
5980 pcgctl &= ~PCGCTL_STOPPCLK;
5981 dwc2_writel(hsotg, pcgctl, PCGCTL);
5982 udelay(5);
5983
5984 /* Drive resume signaling and exit suspend mode on the port. */
5985 hprt0 = dwc2_read_hprt0(hsotg);
5986 hprt0 |= HPRT0_RES;
5987 hprt0 &= ~HPRT0_SUSP;
5988 dwc2_writel(hsotg, hprt0, HPRT0);
5989 udelay(5);
5990
5991 if (!rem_wakeup) {
5992 /* In case of port resume need to wait for 40 ms */
5993 dwc2_msleep(USB_RESUME_TIMEOUT);
5994
5995 /* Stop driveing resume signaling on the port. */
5996 hprt0 = dwc2_read_hprt0(hsotg);
5997 hprt0 &= ~HPRT0_RES;
5998 dwc2_writel(hsotg, hprt0, HPRT0);
5999
6000 hsotg->bus_suspended = false;
6001 hsotg->lx_state = DWC2_L0;
6002 } else {
6003 timeout_add_msec(&hsotg->wkp_timer, 71);
6004 }
6005 }
6006