1 /* $OpenBSD: dwc2_hcdintr.c,v 1.14 2022/09/04 08:42:40 mglocker Exp $ */
2 /* $NetBSD: dwc2_hcdintr.c,v 1.11 2014/11/24 10:14:14 skrll Exp $ */
3
4 /*
5 * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
6 *
7 * Copyright (C) 2004-2013 Synopsys, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation; either version 2 of the License, or (at your option) any
25 * later version.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * This file contains the interrupt handlers for Host mode
42 */
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/pool.h>
47
48 #include <machine/bus.h>
49
50 #include <dev/usb/usb.h>
51 #include <dev/usb/usbdi.h>
52 #include <dev/usb/usbdivar.h>
53 #include <dev/usb/usb_mem.h>
54
55 #include <dev/usb/dwc2/dwc2.h>
56 #include <dev/usb/dwc2/dwc2var.h>
57
58 #include <dev/usb/dwc2/dwc2_core.h>
59 #include <dev/usb/dwc2/dwc2_hcd.h>
60
61 /*
62 * If we get this many NAKs on a split transaction we'll slow down
63 * retransmission. A 1 here means delay after the first NAK.
64 */
65 #define DWC2_NAKS_BEFORE_DELAY 3
66
67 /* This function is for debug only */
dwc2_track_missed_sofs(struct dwc2_hsotg * hsotg)68 STATIC void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
69 {
70 u16 curr_frame_number = hsotg->frame_number;
71 u16 expected = dwc2_frame_num_inc(hsotg->last_frame_num, 1);
72
73 if (expected != curr_frame_number)
74 dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n",
75 expected, curr_frame_number);
76
77 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
78 if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
79 if (expected != curr_frame_number) {
80 hsotg->frame_num_array[hsotg->frame_num_idx] =
81 curr_frame_number;
82 hsotg->last_frame_num_array[hsotg->frame_num_idx] =
83 hsotg->last_frame_num;
84 hsotg->frame_num_idx++;
85 }
86 } else if (!hsotg->dumped_frame_num_array) {
87 int i;
88
89 dev_info(hsotg->dev, "Frame Last Frame\n");
90 dev_info(hsotg->dev, "----- ----------\n");
91 for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
92 dev_info(hsotg->dev, "0x%04x 0x%04x\n",
93 hsotg->frame_num_array[i],
94 hsotg->last_frame_num_array[i]);
95 }
96 hsotg->dumped_frame_num_array = 1;
97 }
98 #endif
99 hsotg->last_frame_num = curr_frame_number;
100 }
101
dwc2_hc_handle_tt_clear(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,struct dwc2_qtd * qtd)102 STATIC void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
103 struct dwc2_host_chan *chan,
104 struct dwc2_qtd *qtd)
105 {
106 //struct usb_device *root_hub = dwc2_hsotg_to_hcd(hsotg)->self.root_hub;
107 //struct urb *usb_urb;
108
109 if (!chan->qh)
110 return;
111
112 if (chan->qh->dev_speed == USB_SPEED_HIGH)
113 return;
114
115 if (!qtd->urb)
116 return;
117
118 #if 0
119 usb_urb = qtd->urb->priv;
120 if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
121 return;
122
123 /*
124 * The root hub doesn't really have a TT, but Linux thinks it
125 * does because how could you have a "high speed hub" that
126 * directly talks directly to low speed devices without a TT?
127 * It's all lies. Lies, I tell you.
128 */
129 if (usb_urb->dev->tt->hub == root_hub)
130 return;
131 #endif
132
133 if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
134 chan->qh->tt_buffer_dirty = 1;
135 #if 0
136 if (usb_hub_clear_tt_buffer(usb_urb))
137 /* Clear failed; let's hope things work anyway */
138 #endif
139 chan->qh->tt_buffer_dirty = 0;
140 }
141 }
142
143 /*
144 * Handles the start-of-frame interrupt in host mode. Non-periodic
145 * transactions may be queued to the DWC_otg controller for the current
146 * (micro)frame. Periodic transactions may be queued to the controller
147 * for the next (micro)frame.
148 */
dwc2_sof_intr(struct dwc2_hsotg * hsotg)149 STATIC void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
150 {
151 struct list_head *qh_entry;
152 struct dwc2_qh *qh;
153 enum dwc2_transaction_type tr_type;
154
155 /* Clear interrupt */
156 dwc2_writel(hsotg, GINTSTS_SOF, GINTSTS);
157
158 #ifdef DEBUG_SOF
159 dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
160 #endif
161
162 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
163
164 dwc2_track_missed_sofs(hsotg);
165
166 /* Determine whether any periodic QHs should be executed */
167 qh_entry = hsotg->periodic_sched_inactive.next;
168 while (qh_entry != &hsotg->periodic_sched_inactive) {
169 qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
170 qh_entry = qh_entry->next;
171 if (dwc2_frame_num_le(qh->next_active_frame,
172 hsotg->frame_number)) {
173 dwc2_sch_vdbg(hsotg, "QH=%p ready fn=%04x, nxt=%04x\n",
174 qh, hsotg->frame_number,
175 qh->next_active_frame);
176
177 /*
178 * Move QH to the ready list to be executed next
179 * (micro)frame
180 */
181 list_move_tail(&qh->qh_list_entry,
182 &hsotg->periodic_sched_ready);
183 }
184 }
185 tr_type = dwc2_hcd_select_transactions(hsotg);
186 if (tr_type != DWC2_TRANSACTION_NONE)
187 dwc2_hcd_queue_transactions(hsotg, tr_type);
188 }
189
190 /*
191 * Handles the Rx FIFO Level Interrupt, which indicates that there is
192 * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
193 * memory if the DWC_otg controller is operating in Slave mode.
194 */
dwc2_rx_fifo_level_intr(struct dwc2_hsotg * hsotg)195 STATIC void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
196 {
197 u32 grxsts, chnum, bcnt, dpid, pktsts;
198 struct dwc2_host_chan *chan;
199
200 if (dbg_perio())
201 dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
202
203 grxsts = dwc2_readl(hsotg, GRXSTSP);
204 chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
205 chan = hsotg->hc_ptr_array[chnum];
206 if (!chan) {
207 dev_err(hsotg->dev, "Unable to get corresponding channel\n");
208 return;
209 }
210
211 bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
212 dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
213 pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
214
215 /* Packet Status */
216 if (dbg_perio()) {
217 dev_vdbg(hsotg->dev, " Ch num = %d\n", chnum);
218 dev_vdbg(hsotg->dev, " Count = %d\n", bcnt);
219 dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n", dpid,
220 chan->data_pid_start);
221 dev_vdbg(hsotg->dev, " PStatus = %d\n", pktsts);
222 }
223
224 switch (pktsts) {
225 case GRXSTS_PKTSTS_HCHIN:
226 /* Read the data into the host buffer */
227 if (bcnt > 0) {
228 dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
229
230 /* Update the HC fields for the next packet received */
231 chan->xfer_count += bcnt;
232 chan->xfer_buf += bcnt;
233 }
234 break;
235 case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
236 case GRXSTS_PKTSTS_DATATOGGLEERR:
237 case GRXSTS_PKTSTS_HCHHALTED:
238 /* Handled in interrupt, just ignore data */
239 break;
240 default:
241 dev_err(hsotg->dev,
242 "RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
243 break;
244 }
245 }
246
247 /*
248 * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
249 * data packets may be written to the FIFO for OUT transfers. More requests
250 * may be written to the non-periodic request queue for IN transfers. This
251 * interrupt is enabled only in Slave mode.
252 */
dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg * hsotg)253 STATIC void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
254 {
255 dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
256 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
257 }
258
259 /*
260 * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
261 * packets may be written to the FIFO for OUT transfers. More requests may be
262 * written to the periodic request queue for IN transfers. This interrupt is
263 * enabled only in Slave mode.
264 */
dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg * hsotg)265 STATIC void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
266 {
267 if (dbg_perio())
268 dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
269 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
270 }
271
dwc2_hprt0_enable(struct dwc2_hsotg * hsotg,u32 hprt0,u32 * hprt0_modify)272 STATIC void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
273 u32 *hprt0_modify)
274 {
275 struct dwc2_core_params *params = &hsotg->params;
276 int do_reset = 0;
277 u32 usbcfg;
278 u32 prtspd;
279 u32 hcfg;
280 u32 fslspclksel;
281 u32 hfir;
282
283 dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
284
285 /* Every time when port enables calculate HFIR.FrInterval */
286 hfir = dwc2_readl(hsotg, HFIR);
287 hfir &= ~HFIR_FRINT_MASK;
288 hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
289 HFIR_FRINT_MASK;
290 dwc2_writel(hsotg, hfir, HFIR);
291
292 /* Check if we need to adjust the PHY clock speed for low power */
293 if (!params->host_support_fs_ls_low_power) {
294 /* Port has been enabled, set the reset change flag */
295 hsotg->flags.b.port_reset_change = 1;
296
297 dwc2_root_intr(hsotg->hsotg_sc); /* Required for OpenBSD */
298 return;
299 }
300
301 usbcfg = dwc2_readl(hsotg, GUSBCFG);
302 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
303
304 if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
305 /* Low power */
306 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
307 /* Set PHY low power clock select for FS/LS devices */
308 usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
309 dwc2_writel(hsotg, usbcfg, GUSBCFG);
310 do_reset = 1;
311 }
312
313 hcfg = dwc2_readl(hsotg, HCFG);
314 fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
315 HCFG_FSLSPCLKSEL_SHIFT;
316
317 if (prtspd == HPRT0_SPD_LOW_SPEED &&
318 params->host_ls_low_power_phy_clk) {
319 /* 6 MHZ */
320 dev_vdbg(hsotg->dev,
321 "FS_PHY programming HCFG to 6 MHz\n");
322 if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
323 fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
324 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
325 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
326 dwc2_writel(hsotg, hcfg, HCFG);
327 do_reset = 1;
328 }
329 } else {
330 /* 48 MHZ */
331 dev_vdbg(hsotg->dev,
332 "FS_PHY programming HCFG to 48 MHz\n");
333 if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
334 fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
335 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
336 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
337 dwc2_writel(hsotg, hcfg, HCFG);
338 do_reset = 1;
339 }
340 }
341 } else {
342 /* Not low power */
343 if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
344 usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
345 dwc2_writel(hsotg, usbcfg, GUSBCFG);
346 do_reset = 1;
347 }
348 }
349
350 if (do_reset) {
351 *hprt0_modify |= HPRT0_RST;
352 dwc2_writel(hsotg, *hprt0_modify, HPRT0);
353 queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
354 msecs_to_jiffies(60));
355 } else {
356 /* Port has been enabled, set the reset change flag */
357 hsotg->flags.b.port_reset_change = 1;
358 dwc2_root_intr(hsotg->hsotg_sc); /* Required for OpenBSD */
359 }
360 }
361
362 /*
363 * There are multiple conditions that can cause a port interrupt. This function
364 * determines which interrupt conditions have occurred and handles them
365 * appropriately.
366 */
dwc2_port_intr(struct dwc2_hsotg * hsotg)367 STATIC void dwc2_port_intr(struct dwc2_hsotg *hsotg)
368 {
369 u32 hprt0;
370 u32 hprt0_modify;
371
372 dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
373
374 hprt0 = dwc2_readl(hsotg, HPRT0);
375 hprt0_modify = hprt0;
376
377 /*
378 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
379 * GINTSTS
380 */
381 hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
382 HPRT0_OVRCURRCHG);
383
384 /*
385 * Port Connect Detected
386 * Set flag and clear if detected
387 */
388 if (hprt0 & HPRT0_CONNDET) {
389 dwc2_writel(hsotg, hprt0_modify | HPRT0_CONNDET, HPRT0);
390
391 dev_vdbg(hsotg->dev,
392 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
393 hprt0);
394 dwc2_hcd_connect(hsotg);
395
396 /*
397 * The Hub driver asserts a reset when it sees port connect
398 * status change flag
399 */
400 }
401
402 /*
403 * Port Enable Changed
404 * Clear if detected - Set internal flag if disabled
405 */
406 if (hprt0 & HPRT0_ENACHG) {
407 dwc2_writel(hsotg, hprt0_modify | HPRT0_ENACHG, HPRT0);
408 dev_vdbg(hsotg->dev,
409 " --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
410 hprt0, !!(hprt0 & HPRT0_ENA));
411 if (hprt0 & HPRT0_ENA) {
412 hsotg->new_connection = true;
413 dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
414 } else {
415 hsotg->flags.b.port_enable_change = 1;
416 if (hsotg->params.dma_desc_fs_enable) {
417 u32 hcfg;
418
419 hsotg->params.dma_desc_enable = false;
420 hsotg->new_connection = false;
421 hcfg = dwc2_readl(hsotg, HCFG);
422 hcfg &= ~HCFG_DESCDMA;
423 dwc2_writel(hsotg, hcfg, HCFG);
424 }
425 }
426 }
427
428 /* Overcurrent Change Interrupt */
429 if (hprt0 & HPRT0_OVRCURRCHG) {
430 dwc2_writel(hsotg, hprt0_modify | HPRT0_OVRCURRCHG,
431 HPRT0);
432 dev_vdbg(hsotg->dev,
433 " --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
434 hprt0);
435 hsotg->flags.b.port_over_current_change = 1;
436 }
437
438 /* Required for OpenBSD */
439 if (hsotg->flags.b.port_connect_status_change ||
440 hsotg->flags.b.port_enable_change ||
441 hsotg->flags.b.port_over_current_change)
442 dwc2_root_intr(hsotg->hsotg_sc);
443 }
444
445 /*
446 * Gets the actual length of a transfer after the transfer halts. halt_status
447 * holds the reason for the halt.
448 *
449 * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
450 * is set to 1 upon return if less than the requested number of bytes were
451 * transferred. short_read may also be NULL on entry, in which case it remains
452 * unchanged.
453 */
dwc2_get_actual_xfer_length(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status,int * short_read)454 STATIC u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
455 struct dwc2_host_chan *chan, int chnum,
456 struct dwc2_qtd *qtd,
457 enum dwc2_halt_status halt_status,
458 int *short_read)
459 {
460 u32 hctsiz, count, length;
461
462 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
463
464 if (halt_status == DWC2_HC_XFER_COMPLETE) {
465 if (chan->ep_is_in) {
466 count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
467 TSIZ_XFERSIZE_SHIFT;
468 length = chan->xfer_len - count;
469 if (short_read)
470 *short_read = (count != 0);
471 } else if (chan->qh->do_split) {
472 length = qtd->ssplit_out_xfer_count;
473 } else {
474 length = chan->xfer_len;
475 }
476 } else {
477 /*
478 * Must use the hctsiz.pktcnt field to determine how much data
479 * has been transferred. This field reflects the number of
480 * packets that have been transferred via the USB. This is
481 * always an integral number of packets if the transfer was
482 * halted before its normal completion. (Can't use the
483 * hctsiz.xfersize field because that reflects the number of
484 * bytes transferred via the AHB, not the USB).
485 */
486 count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
487 length = (chan->start_pkt_count - count) * chan->max_packet;
488 }
489
490 return length;
491 }
492
493 /**
494 * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
495 * Complete interrupt on the host channel. Updates the actual_length field
496 * of the URB based on the number of bytes transferred via the host channel.
497 * Sets the URB status if the data transfer is finished.
498 *
499 * @hsotg: Programming view of the DWC_otg controller
500 * @chan: Programming view of host channel
501 * @chnum: Channel number
502 * @urb: Processing URB
503 * @qtd: Queue transfer descriptor
504 *
505 * Return: 1 if the data transfer specified by the URB is completely finished,
506 * 0 otherwise
507 */
dwc2_update_urb_state(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_hcd_urb * urb,struct dwc2_qtd * qtd)508 STATIC int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
509 struct dwc2_host_chan *chan, int chnum,
510 struct dwc2_hcd_urb *urb,
511 struct dwc2_qtd *qtd)
512 {
513 u32 hctsiz;
514 int xfer_done = 0;
515 int short_read = 0;
516 int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
517 DWC2_HC_XFER_COMPLETE,
518 &short_read);
519
520 if (urb->actual_length + xfer_length > urb->length) {
521 dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
522 xfer_length = urb->length - urb->actual_length;
523 }
524
525 dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
526 urb->actual_length, xfer_length);
527 urb->actual_length += xfer_length;
528
529 if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
530 (urb->flags & URB_SEND_ZERO_PACKET) &&
531 urb->actual_length >= urb->length &&
532 !(urb->length % chan->max_packet)) {
533 xfer_done = 0;
534 } else if (short_read || urb->actual_length >= urb->length) {
535 xfer_done = 1;
536 urb->status = 0;
537 }
538
539 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
540 dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
541 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
542 dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len);
543 dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n",
544 (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
545 dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length);
546 dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length);
547 dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read,
548 xfer_done);
549
550 return xfer_done;
551 }
552
553 /*
554 * Save the starting data toggle for the next transfer. The data toggle is
555 * saved in the QH for non-control transfers and it's saved in the QTD for
556 * control transfers.
557 */
dwc2_hcd_save_data_toggle(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)558 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
559 struct dwc2_host_chan *chan, int chnum,
560 struct dwc2_qtd *qtd)
561 {
562 u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
563 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
564
565 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
566 #if 0
567 if (WARN(!chan || !chan->qh,
568 "chan->qh must be specified for non-control eps\n"))
569 return;
570 #endif
571
572 if (pid == TSIZ_SC_MC_PID_DATA0)
573 chan->qh->data_toggle = DWC2_HC_PID_DATA0;
574 else
575 chan->qh->data_toggle = DWC2_HC_PID_DATA1;
576 } else {
577 #if 0
578 if (WARN(!qtd,
579 "qtd must be specified for control eps\n"))
580 return;
581 #endif
582
583 if (pid == TSIZ_SC_MC_PID_DATA0)
584 qtd->data_toggle = DWC2_HC_PID_DATA0;
585 else
586 qtd->data_toggle = DWC2_HC_PID_DATA1;
587 }
588 }
589
590 /**
591 * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
592 * the transfer is stopped for any reason. The fields of the current entry in
593 * the frame descriptor array are set based on the transfer state and the input
594 * halt_status. Completes the Isochronous URB if all the URB frames have been
595 * completed.
596 *
597 * @hsotg: Programming view of the DWC_otg controller
598 * @chan: Programming view of host channel
599 * @chnum: Channel number
600 * @halt_status: Reason for halting a host channel
601 * @qtd: Queue transfer descriptor
602 *
603 * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
604 * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
605 */
dwc2_update_isoc_urb_state(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)606 STATIC enum dwc2_halt_status dwc2_update_isoc_urb_state(
607 struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
608 int chnum, struct dwc2_qtd *qtd,
609 enum dwc2_halt_status halt_status)
610 {
611 struct dwc2_hcd_iso_packet_desc *frame_desc;
612 struct dwc2_hcd_urb *urb = qtd->urb;
613
614 if (!urb)
615 return DWC2_HC_XFER_NO_HALT_STATUS;
616
617 frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
618
619 switch (halt_status) {
620 case DWC2_HC_XFER_COMPLETE:
621 frame_desc->status = 0;
622 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
623 chan, chnum, qtd, halt_status, NULL);
624 break;
625 case DWC2_HC_XFER_FRAME_OVERRUN:
626 urb->error_count++;
627 if (chan->ep_is_in)
628 frame_desc->status = -ENOSR;
629 else
630 frame_desc->status = -ECOMM;
631 frame_desc->actual_length = 0;
632 break;
633 case DWC2_HC_XFER_BABBLE_ERR:
634 urb->error_count++;
635 frame_desc->status = -EOVERFLOW;
636 /* Don't need to update actual_length in this case */
637 break;
638 case DWC2_HC_XFER_XACT_ERR:
639 urb->error_count++;
640 frame_desc->status = -EPROTO;
641 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
642 chan, chnum, qtd, halt_status, NULL);
643
644 /* Skip whole frame */
645 if (chan->qh->do_split &&
646 chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
647 hsotg->params.host_dma) {
648 qtd->complete_split = 0;
649 qtd->isoc_split_offset = 0;
650 }
651
652 break;
653 default:
654 dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
655 halt_status);
656 break;
657 }
658
659 if (++qtd->isoc_frame_index == urb->packet_count) {
660 /*
661 * urb->status is not used for isoc transfers. The individual
662 * frame_desc statuses are used instead.
663 */
664 dwc2_host_complete(hsotg, qtd, 0);
665 halt_status = DWC2_HC_XFER_URB_COMPLETE;
666 } else {
667 halt_status = DWC2_HC_XFER_COMPLETE;
668 }
669
670 return halt_status;
671 }
672
673 /*
674 * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
675 * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
676 * still linked to the QH, the QH is added to the end of the inactive
677 * non-periodic schedule. For periodic QHs, removes the QH from the periodic
678 * schedule if no more QTDs are linked to the QH.
679 */
dwc2_deactivate_qh(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh,int free_qtd)680 STATIC void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
681 int free_qtd)
682 {
683 int continue_split = 0;
684 struct dwc2_qtd *qtd;
685
686 if (dbg_qh(qh))
687 dev_vdbg(hsotg->dev, " %s(%p,%p,%d)\n", __func__,
688 hsotg, qh, free_qtd);
689
690 if (list_empty(&qh->qtd_list)) {
691 dev_dbg(hsotg->dev, "## QTD list empty ##\n");
692 goto no_qtd;
693 }
694
695 qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
696
697 if (qtd->complete_split)
698 continue_split = 1;
699 else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
700 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
701 continue_split = 1;
702
703 if (free_qtd) {
704 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
705 continue_split = 0;
706 }
707
708 no_qtd:
709 qh->channel = NULL;
710 dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
711 }
712
713 /**
714 * dwc2_release_channel() - Releases a host channel for use by other transfers
715 *
716 * @hsotg: The HCD state structure
717 * @chan: The host channel to release
718 * @qtd: The QTD associated with the host channel. This QTD may be
719 * freed if the transfer is complete or an error has occurred.
720 * @halt_status: Reason the channel is being released. This status
721 * determines the actions taken by this function.
722 *
723 * Also attempts to select and queue more transactions since at least one host
724 * channel is available.
725 */
dwc2_release_channel(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)726 STATIC void dwc2_release_channel(struct dwc2_hsotg *hsotg,
727 struct dwc2_host_chan *chan,
728 struct dwc2_qtd *qtd,
729 enum dwc2_halt_status halt_status)
730 {
731 enum dwc2_transaction_type tr_type;
732 u32 haintmsk;
733 int free_qtd = 0;
734
735 if (dbg_hc(chan))
736 dev_vdbg(hsotg->dev, " %s: channel %d, halt_status %d\n",
737 __func__, chan->hc_num, halt_status);
738
739 switch (halt_status) {
740 case DWC2_HC_XFER_URB_COMPLETE:
741 free_qtd = 1;
742 break;
743 case DWC2_HC_XFER_AHB_ERR:
744 case DWC2_HC_XFER_STALL:
745 case DWC2_HC_XFER_BABBLE_ERR:
746 free_qtd = 1;
747 break;
748 case DWC2_HC_XFER_XACT_ERR:
749 if (qtd && qtd->error_count >= 3) {
750 dev_vdbg(hsotg->dev,
751 " Complete URB with transaction error\n");
752 free_qtd = 1;
753 dwc2_host_complete(hsotg, qtd, -EPROTO);
754 }
755 break;
756 case DWC2_HC_XFER_URB_DEQUEUE:
757 /*
758 * The QTD has already been removed and the QH has been
759 * deactivated. Don't want to do anything except release the
760 * host channel and try to queue more transfers.
761 */
762 goto cleanup;
763 case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
764 dev_vdbg(hsotg->dev, " Complete URB with I/O error\n");
765 free_qtd = 1;
766 dwc2_host_complete(hsotg, qtd, -EIO);
767 break;
768 case DWC2_HC_XFER_NO_HALT_STATUS:
769 default:
770 break;
771 }
772
773 dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
774
775 cleanup:
776 /*
777 * Release the host channel for use by other transfers. The cleanup
778 * function clears the channel interrupt enables and conditions, so
779 * there's no need to clear the Channel Halted interrupt separately.
780 */
781 if (!list_empty(&chan->hc_list_entry))
782 list_del(&chan->hc_list_entry);
783 dwc2_hc_cleanup(hsotg, chan);
784 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
785
786 if (hsotg->params.uframe_sched) {
787 hsotg->available_host_channels++;
788 } else {
789 switch (chan->ep_type) {
790 case USB_ENDPOINT_XFER_CONTROL:
791 case USB_ENDPOINT_XFER_BULK:
792 hsotg->non_periodic_channels--;
793 break;
794 default:
795 /*
796 * Don't release reservations for periodic channels
797 * here. That's done when a periodic transfer is
798 * descheduled (i.e. when the QH is removed from the
799 * periodic schedule).
800 */
801 break;
802 }
803 }
804
805 haintmsk = dwc2_readl(hsotg, HAINTMSK);
806 haintmsk &= ~(1 << chan->hc_num);
807 dwc2_writel(hsotg, haintmsk, HAINTMSK);
808
809 /* Try to queue more transfers now that there's a free channel */
810 tr_type = dwc2_hcd_select_transactions(hsotg);
811 if (tr_type != DWC2_TRANSACTION_NONE)
812 dwc2_hcd_queue_transactions(hsotg, tr_type);
813 }
814
815 /*
816 * Halts a host channel. If the channel cannot be halted immediately because
817 * the request queue is full, this function ensures that the FIFO empty
818 * interrupt for the appropriate queue is enabled so that the halt request can
819 * be queued when there is space in the request queue.
820 *
821 * This function may also be called in DMA mode. In that case, the channel is
822 * simply released since the core always halts the channel automatically in
823 * DMA mode.
824 */
dwc2_halt_channel(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)825 STATIC void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
826 struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
827 enum dwc2_halt_status halt_status)
828 {
829 if (dbg_hc(chan))
830 dev_vdbg(hsotg->dev, "%s()\n", __func__);
831
832 if (hsotg->params.host_dma) {
833 if (dbg_hc(chan))
834 dev_vdbg(hsotg->dev, "DMA enabled\n");
835 dwc2_release_channel(hsotg, chan, qtd, halt_status);
836 return;
837 }
838
839 /* Slave mode processing */
840 dwc2_hc_halt(hsotg, chan, halt_status);
841
842 if (chan->halt_on_queue) {
843 u32 gintmsk;
844
845 dev_vdbg(hsotg->dev, "Halt on queue\n");
846 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
847 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
848 dev_vdbg(hsotg->dev, "control/bulk\n");
849 /*
850 * Make sure the Non-periodic Tx FIFO empty interrupt
851 * is enabled so that the non-periodic schedule will
852 * be processed
853 */
854 gintmsk = dwc2_readl(hsotg, GINTMSK);
855 gintmsk |= GINTSTS_NPTXFEMP;
856 dwc2_writel(hsotg, gintmsk, GINTMSK);
857 } else {
858 dev_vdbg(hsotg->dev, "isoc/intr\n");
859 /*
860 * Move the QH from the periodic queued schedule to
861 * the periodic assigned schedule. This allows the
862 * halt to be queued when the periodic schedule is
863 * processed.
864 */
865 list_move_tail(&chan->qh->qh_list_entry,
866 &hsotg->periodic_sched_assigned);
867
868 /*
869 * Make sure the Periodic Tx FIFO Empty interrupt is
870 * enabled so that the periodic schedule will be
871 * processed
872 */
873 gintmsk = dwc2_readl(hsotg, GINTMSK);
874 gintmsk |= GINTSTS_PTXFEMP;
875 dwc2_writel(hsotg, gintmsk, GINTMSK);
876 }
877 }
878 }
879
880 /*
881 * Performs common cleanup for non-periodic transfers after a Transfer
882 * Complete interrupt. This function should be called after any endpoint type
883 * specific handling is finished to release the host channel.
884 */
dwc2_complete_non_periodic_xfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)885 STATIC void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
886 struct dwc2_host_chan *chan,
887 int chnum, struct dwc2_qtd *qtd,
888 enum dwc2_halt_status halt_status)
889 {
890 dev_vdbg(hsotg->dev, "%s()\n", __func__);
891
892 qtd->error_count = 0;
893
894 if (chan->hcint & HCINTMSK_NYET) {
895 /*
896 * Got a NYET on the last transaction of the transfer. This
897 * means that the endpoint should be in the PING state at the
898 * beginning of the next transfer.
899 */
900 dev_vdbg(hsotg->dev, "got NYET\n");
901 chan->qh->ping_state = 1;
902 }
903
904 /*
905 * Always halt and release the host channel to make it available for
906 * more transfers. There may still be more phases for a control
907 * transfer or more data packets for a bulk transfer at this point,
908 * but the host channel is still halted. A channel will be reassigned
909 * to the transfer when the non-periodic schedule is processed after
910 * the channel is released. This allows transactions to be queued
911 * properly via dwc2_hcd_queue_transactions, which also enables the
912 * Tx FIFO Empty interrupt if necessary.
913 */
914 if (chan->ep_is_in) {
915 /*
916 * IN transfers in Slave mode require an explicit disable to
917 * halt the channel. (In DMA mode, this call simply releases
918 * the channel.)
919 */
920 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
921 } else {
922 /*
923 * The channel is automatically disabled by the core for OUT
924 * transfers in Slave mode
925 */
926 dwc2_release_channel(hsotg, chan, qtd, halt_status);
927 }
928 }
929
930 /*
931 * Performs common cleanup for periodic transfers after a Transfer Complete
932 * interrupt. This function should be called after any endpoint type specific
933 * handling is finished to release the host channel.
934 */
dwc2_complete_periodic_xfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)935 STATIC void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
936 struct dwc2_host_chan *chan, int chnum,
937 struct dwc2_qtd *qtd,
938 enum dwc2_halt_status halt_status)
939 {
940 u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
941
942 qtd->error_count = 0;
943
944 if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
945 /* Core halts channel in these cases */
946 dwc2_release_channel(hsotg, chan, qtd, halt_status);
947 else
948 /* Flush any outstanding requests from the Tx queue */
949 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
950 }
951
dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)952 STATIC int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
953 struct dwc2_host_chan *chan, int chnum,
954 struct dwc2_qtd *qtd)
955 {
956 struct dwc2_hcd_iso_packet_desc *frame_desc;
957 u32 len;
958 u32 hctsiz;
959 u32 pid;
960
961 if (!qtd->urb)
962 return 0;
963
964 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
965 len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
966 DWC2_HC_XFER_COMPLETE, NULL);
967 if (!len && !qtd->isoc_split_offset) {
968 qtd->complete_split = 0;
969 return 0;
970 }
971
972 frame_desc->actual_length += len;
973
974 if (chan->align_buf) {
975 dev_vdbg(hsotg->dev, "non-aligned buffer\n");
976 usb_syncmem(qtd->urb->usbdma, chan->qh->dw_align_buf_dma,
977 chan->qh->dw_align_buf_size, BUS_DMASYNC_POSTREAD);
978 memcpy(qtd->urb->buf + frame_desc->offset +
979 qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
980 usb_syncmem(qtd->urb->usbdma, chan->qh->dw_align_buf_dma,
981 chan->qh->dw_align_buf_size, BUS_DMASYNC_PREREAD);
982 }
983
984 qtd->isoc_split_offset += len;
985
986 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
987 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
988
989 if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
990 frame_desc->status = 0;
991 qtd->isoc_frame_index++;
992 qtd->complete_split = 0;
993 qtd->isoc_split_offset = 0;
994 }
995
996 if (qtd->isoc_frame_index == qtd->urb->packet_count) {
997 dwc2_host_complete(hsotg, qtd, 0);
998 dwc2_release_channel(hsotg, chan, qtd,
999 DWC2_HC_XFER_URB_COMPLETE);
1000 } else {
1001 dwc2_release_channel(hsotg, chan, qtd,
1002 DWC2_HC_XFER_NO_HALT_STATUS);
1003 }
1004
1005 return 1; /* Indicates that channel released */
1006 }
1007
1008 /*
1009 * Handles a host channel Transfer Complete interrupt. This handler may be
1010 * called in either DMA mode or Slave mode.
1011 */
dwc2_hc_xfercomp_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1012 STATIC void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
1013 struct dwc2_host_chan *chan, int chnum,
1014 struct dwc2_qtd *qtd)
1015 {
1016 struct dwc2_hcd_urb *urb = qtd->urb;
1017 enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
1018 int pipe_type;
1019 int urb_xfer_done;
1020
1021 if (dbg_hc(chan))
1022 dev_vdbg(hsotg->dev,
1023 "--Host Channel %d Interrupt: Transfer Complete--\n",
1024 chnum);
1025
1026 if (!urb)
1027 goto handle_xfercomp_done;
1028
1029 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1030
1031 if (hsotg->params.dma_desc_enable) {
1032 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
1033 if (pipe_type == USB_ENDPOINT_XFER_ISOC)
1034 /* Do not disable the interrupt, just clear it */
1035 return;
1036 goto handle_xfercomp_done;
1037 }
1038
1039 /* Handle xfer complete on CSPLIT */
1040 if (chan->qh->do_split) {
1041 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1042 hsotg->params.host_dma) {
1043 if (qtd->complete_split &&
1044 dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1045 qtd))
1046 goto handle_xfercomp_done;
1047 } else {
1048 qtd->complete_split = 0;
1049 }
1050 }
1051
1052 /* Update the QTD and URB states */
1053 switch (pipe_type) {
1054 case USB_ENDPOINT_XFER_CONTROL:
1055 switch (qtd->control_phase) {
1056 case DWC2_CONTROL_SETUP:
1057 if (urb->length > 0)
1058 qtd->control_phase = DWC2_CONTROL_DATA;
1059 else
1060 qtd->control_phase = DWC2_CONTROL_STATUS;
1061 dev_vdbg(hsotg->dev,
1062 " Control setup transaction done\n");
1063 halt_status = DWC2_HC_XFER_COMPLETE;
1064 break;
1065 case DWC2_CONTROL_DATA:
1066 urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1067 chnum, urb, qtd);
1068 if (urb_xfer_done) {
1069 qtd->control_phase = DWC2_CONTROL_STATUS;
1070 dev_vdbg(hsotg->dev,
1071 " Control data transfer done\n");
1072 } else {
1073 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1074 qtd);
1075 }
1076 halt_status = DWC2_HC_XFER_COMPLETE;
1077 break;
1078 case DWC2_CONTROL_STATUS:
1079 dev_vdbg(hsotg->dev, " Control transfer complete\n");
1080 if (urb->status == -EINPROGRESS)
1081 urb->status = 0;
1082 dwc2_host_complete(hsotg, qtd, urb->status);
1083 halt_status = DWC2_HC_XFER_URB_COMPLETE;
1084 break;
1085 }
1086
1087 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1088 halt_status);
1089 break;
1090 case USB_ENDPOINT_XFER_BULK:
1091 dev_vdbg(hsotg->dev, " Bulk transfer complete\n");
1092 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1093 qtd);
1094 if (urb_xfer_done) {
1095 dwc2_host_complete(hsotg, qtd, urb->status);
1096 halt_status = DWC2_HC_XFER_URB_COMPLETE;
1097 } else {
1098 halt_status = DWC2_HC_XFER_COMPLETE;
1099 }
1100
1101 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1102 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1103 halt_status);
1104 break;
1105 case USB_ENDPOINT_XFER_INT:
1106 dev_vdbg(hsotg->dev, " Interrupt transfer complete\n");
1107 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1108 qtd);
1109
1110 /*
1111 * Interrupt URB is done on the first transfer complete
1112 * interrupt
1113 */
1114 if (urb_xfer_done) {
1115 dwc2_host_complete(hsotg, qtd, urb->status);
1116 halt_status = DWC2_HC_XFER_URB_COMPLETE;
1117 } else {
1118 halt_status = DWC2_HC_XFER_COMPLETE;
1119 }
1120
1121 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1122 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1123 halt_status);
1124 break;
1125 case USB_ENDPOINT_XFER_ISOC:
1126 if (dbg_perio())
1127 dev_vdbg(hsotg->dev, " Isochronous transfer complete\n");
1128 if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1129 halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1130 chnum, qtd,
1131 DWC2_HC_XFER_COMPLETE);
1132 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1133 halt_status);
1134 break;
1135 }
1136
1137 handle_xfercomp_done:
1138 disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1139 }
1140
1141 /*
1142 * Handles a host channel STALL interrupt. This handler may be called in
1143 * either DMA mode or Slave mode.
1144 */
dwc2_hc_stall_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1145 STATIC void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1146 struct dwc2_host_chan *chan, int chnum,
1147 struct dwc2_qtd *qtd)
1148 {
1149 struct dwc2_hcd_urb *urb = qtd->urb;
1150 int pipe_type;
1151
1152 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1153 chnum);
1154
1155 if (hsotg->params.dma_desc_enable) {
1156 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1157 DWC2_HC_XFER_STALL);
1158 goto handle_stall_done;
1159 }
1160
1161 if (!urb)
1162 goto handle_stall_halt;
1163
1164 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1165
1166 if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1167 dwc2_host_complete(hsotg, qtd, -EPIPE);
1168
1169 if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1170 pipe_type == USB_ENDPOINT_XFER_INT) {
1171 dwc2_host_complete(hsotg, qtd, -EPIPE);
1172 /*
1173 * USB protocol requires resetting the data toggle for bulk
1174 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1175 * setup command is issued to the endpoint. Anticipate the
1176 * CLEAR_FEATURE command since a STALL has occurred and reset
1177 * the data toggle now.
1178 */
1179 chan->qh->data_toggle = 0;
1180 }
1181
1182 handle_stall_halt:
1183 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1184
1185 handle_stall_done:
1186 disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1187 }
1188
1189 /*
1190 * Updates the state of the URB when a transfer has been stopped due to an
1191 * abnormal condition before the transfer completes. Modifies the
1192 * actual_length field of the URB to reflect the number of bytes that have
1193 * actually been transferred via the host channel.
1194 */
dwc2_update_urb_state_abn(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_hcd_urb * urb,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)1195 STATIC void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1196 struct dwc2_host_chan *chan, int chnum,
1197 struct dwc2_hcd_urb *urb,
1198 struct dwc2_qtd *qtd,
1199 enum dwc2_halt_status halt_status)
1200 {
1201 u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1202 qtd, halt_status, NULL);
1203 u32 hctsiz;
1204
1205 if (urb->actual_length + xfer_length > urb->length) {
1206 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1207 xfer_length = urb->length - urb->actual_length;
1208 }
1209
1210 urb->actual_length += xfer_length;
1211
1212 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1213 dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1214 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1215 dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n",
1216 chan->start_pkt_count);
1217 dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n",
1218 (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1219 dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet);
1220 dev_vdbg(hsotg->dev, " bytes_transferred %d\n",
1221 xfer_length);
1222 dev_vdbg(hsotg->dev, " urb->actual_length %d\n",
1223 urb->actual_length);
1224 dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n",
1225 urb->length);
1226 }
1227
1228 /*
1229 * Handles a host channel NAK interrupt. This handler may be called in either
1230 * DMA mode or Slave mode.
1231 */
dwc2_hc_nak_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1232 STATIC void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1233 struct dwc2_host_chan *chan, int chnum,
1234 struct dwc2_qtd *qtd)
1235 {
1236 if (!qtd) {
1237 dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
1238 return;
1239 }
1240
1241 if (!qtd->urb) {
1242 dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
1243 return;
1244 }
1245
1246 if (dbg_hc(chan))
1247 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1248 chnum);
1249
1250 /*
1251 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1252 * interrupt. Re-start the SSPLIT transfer.
1253 *
1254 * Normally for non-periodic transfers we'll retry right away, but to
1255 * avoid interrupt storms we'll wait before retrying if we've got
1256 * several NAKs. If we didn't do this we'd retry directly from the
1257 * interrupt handler and could end up quickly getting another
1258 * interrupt (another NAK), which we'd retry. Note that we do not
1259 * delay retries for IN parts of control requests, as those are expected
1260 * to complete fairly quickly, and if we delay them we risk confusing
1261 * the device and cause it issue STALL.
1262 *
1263 * Note that in DMA mode software only gets involved to re-send NAKed
1264 * transfers for split transactions, so we only need to apply this
1265 * delaying logic when handling splits. In non-DMA mode presumably we
1266 * might want a similar delay if someone can demonstrate this problem
1267 * affects that code path too.
1268 */
1269 if (chan->do_split) {
1270 if (chan->complete_split)
1271 qtd->error_count = 0;
1272 qtd->complete_split = 0;
1273 qtd->num_naks++;
1274 qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
1275 !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
1276 chan->ep_is_in);
1277 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1278 goto handle_nak_done;
1279 }
1280
1281 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1282 case USB_ENDPOINT_XFER_CONTROL:
1283 case USB_ENDPOINT_XFER_BULK:
1284 if (hsotg->params.host_dma && chan->ep_is_in) {
1285 /*
1286 * NAK interrupts are enabled on bulk/control IN
1287 * transfers in DMA mode for the sole purpose of
1288 * resetting the error count after a transaction error
1289 * occurs. The core will continue transferring data.
1290 */
1291 qtd->error_count = 0;
1292 break;
1293 }
1294
1295 /*
1296 * NAK interrupts normally occur during OUT transfers in DMA
1297 * or Slave mode. For IN transfers, more requests will be
1298 * queued as request queue space is available.
1299 */
1300 qtd->error_count = 0;
1301
1302 if (!chan->qh->ping_state) {
1303 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1304 qtd, DWC2_HC_XFER_NAK);
1305 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1306
1307 if (chan->speed == USB_SPEED_HIGH)
1308 chan->qh->ping_state = 1;
1309 }
1310
1311 /*
1312 * Halt the channel so the transfer can be re-started from
1313 * the appropriate point or the PING protocol will
1314 * start/continue
1315 */
1316 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1317 break;
1318 case USB_ENDPOINT_XFER_INT:
1319 qtd->error_count = 0;
1320 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1321 break;
1322 case USB_ENDPOINT_XFER_ISOC:
1323 /* Should never get called for isochronous transfers */
1324 dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1325 break;
1326 }
1327
1328 handle_nak_done:
1329 disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1330 }
1331
1332 /*
1333 * Handles a host channel ACK interrupt. This interrupt is enabled when
1334 * performing the PING protocol in Slave mode, when errors occur during
1335 * either Slave mode or DMA mode, and during Start Split transactions.
1336 */
dwc2_hc_ack_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1337 STATIC void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1338 struct dwc2_host_chan *chan, int chnum,
1339 struct dwc2_qtd *qtd)
1340 {
1341 struct dwc2_hcd_iso_packet_desc *frame_desc;
1342
1343 if (dbg_hc(chan))
1344 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1345 chnum);
1346
1347 if (chan->do_split) {
1348 /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1349 if (!chan->ep_is_in &&
1350 chan->data_pid_start != DWC2_HC_PID_SETUP)
1351 qtd->ssplit_out_xfer_count = chan->xfer_len;
1352
1353 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1354 qtd->complete_split = 1;
1355 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1356 } else {
1357 /* ISOC OUT */
1358 switch (chan->xact_pos) {
1359 case DWC2_HCSPLT_XACTPOS_ALL:
1360 break;
1361 case DWC2_HCSPLT_XACTPOS_END:
1362 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1363 qtd->isoc_split_offset = 0;
1364 break;
1365 case DWC2_HCSPLT_XACTPOS_BEGIN:
1366 case DWC2_HCSPLT_XACTPOS_MID:
1367 /*
1368 * For BEGIN or MID, calculate the length for
1369 * the next microframe to determine the correct
1370 * SSPLIT token, either MID or END
1371 */
1372 frame_desc = &qtd->urb->iso_descs[
1373 qtd->isoc_frame_index];
1374 qtd->isoc_split_offset += 188;
1375
1376 if (frame_desc->length - qtd->isoc_split_offset
1377 <= 188)
1378 qtd->isoc_split_pos =
1379 DWC2_HCSPLT_XACTPOS_END;
1380 else
1381 qtd->isoc_split_pos =
1382 DWC2_HCSPLT_XACTPOS_MID;
1383 break;
1384 }
1385 }
1386 } else {
1387 qtd->error_count = 0;
1388
1389 if (chan->qh->ping_state) {
1390 chan->qh->ping_state = 0;
1391 /*
1392 * Halt the channel so the transfer can be re-started
1393 * from the appropriate point. This only happens in
1394 * Slave mode. In DMA mode, the ping_state is cleared
1395 * when the transfer is started because the core
1396 * automatically executes the PING, then the transfer.
1397 */
1398 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1399 }
1400 }
1401
1402 /*
1403 * If the ACK occurred when _not_ in the PING state, let the channel
1404 * continue transferring data after clearing the error count
1405 */
1406 disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1407 }
1408
1409 /*
1410 * Handles a host channel NYET interrupt. This interrupt should only occur on
1411 * Bulk and Control OUT endpoints and for complete split transactions. If a
1412 * NYET occurs at the same time as a Transfer Complete interrupt, it is
1413 * handled in the xfercomp interrupt handler, not here. This handler may be
1414 * called in either DMA mode or Slave mode.
1415 */
dwc2_hc_nyet_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1416 STATIC void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1417 struct dwc2_host_chan *chan, int chnum,
1418 struct dwc2_qtd *qtd)
1419 {
1420 if (dbg_hc(chan))
1421 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1422 chnum);
1423
1424 /*
1425 * NYET on CSPLIT
1426 * re-do the CSPLIT immediately on non-periodic
1427 */
1428 if (chan->do_split && chan->complete_split) {
1429 if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1430 hsotg->params.host_dma) {
1431 qtd->complete_split = 0;
1432 qtd->isoc_split_offset = 0;
1433 qtd->isoc_frame_index++;
1434 if (qtd->urb &&
1435 qtd->isoc_frame_index == qtd->urb->packet_count) {
1436 dwc2_host_complete(hsotg, qtd, 0);
1437 dwc2_release_channel(hsotg, chan, qtd,
1438 DWC2_HC_XFER_URB_COMPLETE);
1439 } else {
1440 dwc2_release_channel(hsotg, chan, qtd,
1441 DWC2_HC_XFER_NO_HALT_STATUS);
1442 }
1443 goto handle_nyet_done;
1444 }
1445
1446 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1447 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1448 struct dwc2_qh *qh = chan->qh;
1449 bool past_end;
1450
1451 if (!hsotg->params.uframe_sched) {
1452 int frnum = dwc2_hcd_get_frame_number(hsotg);
1453
1454 /* Don't have num_hs_transfers; simple logic */
1455 past_end = dwc2_full_frame_num(frnum) !=
1456 dwc2_full_frame_num(qh->next_active_frame);
1457 } else {
1458 int end_frnum;
1459
1460 /*
1461 * Figure out the end frame based on
1462 * schedule.
1463 *
1464 * We don't want to go on trying again
1465 * and again forever. Let's stop when
1466 * we've done all the transfers that
1467 * were scheduled.
1468 *
1469 * We're going to be comparing
1470 * start_active_frame and
1471 * next_active_frame, both of which
1472 * are 1 before the time the packet
1473 * goes on the wire, so that cancels
1474 * out. Basically if had 1 transfer
1475 * and we saw 1 NYET then we're done.
1476 * We're getting a NYET here so if
1477 * next >= (start + num_transfers)
1478 * we're done. The complexity is that
1479 * for all but ISOC_OUT we skip one
1480 * slot.
1481 */
1482 end_frnum = dwc2_frame_num_inc(
1483 qh->start_active_frame,
1484 qh->num_hs_transfers);
1485
1486 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
1487 qh->ep_is_in)
1488 end_frnum =
1489 dwc2_frame_num_inc(end_frnum, 1);
1490
1491 past_end = dwc2_frame_num_le(
1492 end_frnum, qh->next_active_frame);
1493 }
1494
1495 if (past_end) {
1496 /* Treat this as a transaction error. */
1497 #if 0
1498 /*
1499 * Todo: Fix system performance so this can
1500 * be treated as an error. Right now complete
1501 * splits cannot be scheduled precisely enough
1502 * due to other system activity, so this error
1503 * occurs regularly in Slave mode.
1504 */
1505 qtd->error_count++;
1506 #endif
1507 qtd->complete_split = 0;
1508 dwc2_halt_channel(hsotg, chan, qtd,
1509 DWC2_HC_XFER_XACT_ERR);
1510 /* Todo: add support for isoc release */
1511 goto handle_nyet_done;
1512 }
1513 }
1514
1515 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1516 goto handle_nyet_done;
1517 }
1518
1519 chan->qh->ping_state = 1;
1520 qtd->error_count = 0;
1521
1522 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1523 DWC2_HC_XFER_NYET);
1524 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1525
1526 /*
1527 * Halt the channel and re-start the transfer so the PING protocol
1528 * will start
1529 */
1530 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1531
1532 handle_nyet_done:
1533 disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1534 }
1535
1536 /*
1537 * Handles a host channel babble interrupt. This handler may be called in
1538 * either DMA mode or Slave mode.
1539 */
dwc2_hc_babble_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1540 STATIC void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1541 struct dwc2_host_chan *chan, int chnum,
1542 struct dwc2_qtd *qtd)
1543 {
1544 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1545 chnum);
1546
1547 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1548
1549 if (hsotg->params.dma_desc_enable) {
1550 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1551 DWC2_HC_XFER_BABBLE_ERR);
1552 goto disable_int;
1553 }
1554
1555 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1556 dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1557 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1558 } else {
1559 enum dwc2_halt_status halt_status;
1560
1561 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1562 qtd, DWC2_HC_XFER_BABBLE_ERR);
1563 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1564 }
1565
1566 disable_int:
1567 disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1568 }
1569
1570 /*
1571 * Handles a host channel AHB error interrupt. This handler is only called in
1572 * DMA mode.
1573 */
dwc2_hc_ahberr_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1574 STATIC void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1575 struct dwc2_host_chan *chan, int chnum,
1576 struct dwc2_qtd *qtd)
1577 {
1578 struct dwc2_hcd_urb *urb = qtd->urb;
1579 char *pipetype, *speed;
1580 u32 hcchar;
1581 u32 hcsplt;
1582 u32 hctsiz;
1583 u32 hc_dma;
1584
1585 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1586 chnum);
1587
1588 if (!urb)
1589 goto handle_ahberr_halt;
1590
1591 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1592
1593 hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1594 hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
1595 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1596 hc_dma = dwc2_readl(hsotg, HCDMA(chnum));
1597
1598 dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1599 dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1600 dev_err(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1601 dev_err(hsotg->dev, " Device address: %d\n",
1602 dwc2_hcd_get_dev_addr(&urb->pipe_info));
1603 dev_err(hsotg->dev, " Endpoint: %d, %s\n",
1604 dwc2_hcd_get_ep_num(&urb->pipe_info),
1605 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1606
1607 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1608 case USB_ENDPOINT_XFER_CONTROL:
1609 pipetype = "CONTROL";
1610 break;
1611 case USB_ENDPOINT_XFER_BULK:
1612 pipetype = "BULK";
1613 break;
1614 case USB_ENDPOINT_XFER_INT:
1615 pipetype = "INTERRUPT";
1616 break;
1617 case USB_ENDPOINT_XFER_ISOC:
1618 pipetype = "ISOCHRONOUS";
1619 break;
1620 default:
1621 pipetype = "UNKNOWN";
1622 break;
1623 }
1624
1625 dev_err(hsotg->dev, " Endpoint type: %s\n", pipetype);
1626
1627 switch (chan->speed) {
1628 case USB_SPEED_HIGH:
1629 speed = "HIGH";
1630 break;
1631 case USB_SPEED_FULL:
1632 speed = "FULL";
1633 break;
1634 case USB_SPEED_LOW:
1635 speed = "LOW";
1636 break;
1637 default:
1638 speed = "UNKNOWN";
1639 break;
1640 }
1641
1642 dev_err(hsotg->dev, " Speed: %s\n", speed);
1643
1644 dev_err(hsotg->dev, " Max packet size: %d (mult %d)\n",
1645 dwc2_hcd_get_maxp(&urb->pipe_info),
1646 dwc2_hcd_get_maxp_mult(&urb->pipe_info));
1647 dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
1648 dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
1649 urb->buf, (unsigned long)urb->dma);
1650 dev_err(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
1651 urb->setup_packet, (unsigned long)urb->setup_dma);
1652 dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
1653
1654 /* Core halts the channel for Descriptor DMA mode */
1655 if (hsotg->params.dma_desc_enable) {
1656 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1657 DWC2_HC_XFER_AHB_ERR);
1658 goto handle_ahberr_done;
1659 }
1660
1661 dwc2_host_complete(hsotg, qtd, -EIO);
1662
1663 handle_ahberr_halt:
1664 /*
1665 * Force a channel halt. Don't call dwc2_halt_channel because that won't
1666 * write to the HCCHARn register in DMA mode to force the halt.
1667 */
1668 dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1669
1670 handle_ahberr_done:
1671 disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1672 }
1673
1674 /*
1675 * Handles a host channel transaction error interrupt. This handler may be
1676 * called in either DMA mode or Slave mode.
1677 */
dwc2_hc_xacterr_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1678 STATIC void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1679 struct dwc2_host_chan *chan, int chnum,
1680 struct dwc2_qtd *qtd)
1681 {
1682 dev_dbg(hsotg->dev,
1683 "--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1684
1685 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1686
1687 if (hsotg->params.dma_desc_enable) {
1688 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1689 DWC2_HC_XFER_XACT_ERR);
1690 goto handle_xacterr_done;
1691 }
1692
1693 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1694 case USB_ENDPOINT_XFER_CONTROL:
1695 case USB_ENDPOINT_XFER_BULK:
1696 qtd->error_count++;
1697 if (!chan->qh->ping_state) {
1698 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1699 qtd, DWC2_HC_XFER_XACT_ERR);
1700 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1701 if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1702 chan->qh->ping_state = 1;
1703 }
1704
1705 /*
1706 * Halt the channel so the transfer can be re-started from
1707 * the appropriate point or the PING protocol will start
1708 */
1709 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1710 break;
1711 case USB_ENDPOINT_XFER_INT:
1712 qtd->error_count++;
1713 if (chan->do_split && chan->complete_split)
1714 qtd->complete_split = 0;
1715 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1716 break;
1717 case USB_ENDPOINT_XFER_ISOC:
1718 {
1719 enum dwc2_halt_status halt_status;
1720
1721 halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1722 chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1723 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1724 }
1725 break;
1726 }
1727
1728 handle_xacterr_done:
1729 disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1730 }
1731
1732 /*
1733 * Handles a host channel frame overrun interrupt. This handler may be called
1734 * in either DMA mode or Slave mode.
1735 */
dwc2_hc_frmovrun_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1736 STATIC void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1737 struct dwc2_host_chan *chan, int chnum,
1738 struct dwc2_qtd *qtd)
1739 {
1740 enum dwc2_halt_status halt_status;
1741
1742 if (dbg_hc(chan))
1743 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1744 chnum);
1745
1746 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1747
1748 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1749 case USB_ENDPOINT_XFER_CONTROL:
1750 case USB_ENDPOINT_XFER_BULK:
1751 break;
1752 case USB_ENDPOINT_XFER_INT:
1753 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1754 break;
1755 case USB_ENDPOINT_XFER_ISOC:
1756 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1757 qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1758 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1759 break;
1760 }
1761
1762 disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1763 }
1764
1765 /*
1766 * Handles a host channel data toggle error interrupt. This handler may be
1767 * called in either DMA mode or Slave mode.
1768 */
dwc2_hc_datatglerr_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1769 STATIC void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1770 struct dwc2_host_chan *chan, int chnum,
1771 struct dwc2_qtd *qtd)
1772 {
1773 dev_dbg(hsotg->dev,
1774 "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1775
1776 if (chan->ep_is_in)
1777 qtd->error_count = 0;
1778 else
1779 dev_err(hsotg->dev,
1780 "Data Toggle Error on OUT transfer, channel %d\n",
1781 chnum);
1782
1783 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1784 disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1785 }
1786
1787 /*
1788 * For debug only. It checks that a valid halt status is set and that
1789 * HCCHARn.chdis is clear. If there's a problem, corrective action is
1790 * taken and a warning is issued.
1791 *
1792 * Return: true if halt status is ok, false otherwise
1793 */
dwc2_halt_status_ok(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1794 STATIC bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1795 struct dwc2_host_chan *chan, int chnum,
1796 struct dwc2_qtd *qtd)
1797 {
1798 #ifdef DWC2_DEBUG
1799 u32 hcchar;
1800 u32 hctsiz;
1801 u32 hcintmsk;
1802 u32 hcsplt;
1803
1804 if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1805 /*
1806 * This code is here only as a check. This condition should
1807 * never happen. Ignore the halt if it does occur.
1808 */
1809 hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1810 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1811 hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
1812 hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
1813 dev_dbg(hsotg->dev,
1814 "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1815 __func__);
1816 dev_dbg(hsotg->dev,
1817 "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1818 chnum, hcchar, hctsiz);
1819 dev_dbg(hsotg->dev,
1820 "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1821 chan->hcint, hcintmsk, hcsplt);
1822 if (qtd)
1823 dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1824 qtd->complete_split);
1825 dev_warn(hsotg->dev,
1826 "%s: no halt status, channel %d, ignoring interrupt\n",
1827 __func__, chnum);
1828 return false;
1829 }
1830
1831 /*
1832 * This code is here only as a check. hcchar.chdis should never be set
1833 * when the halt interrupt occurs. Halt the channel again if it does
1834 * occur.
1835 */
1836 hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1837 if (hcchar & HCCHAR_CHDIS) {
1838 dev_warn(hsotg->dev,
1839 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1840 __func__, hcchar);
1841 chan->halt_pending = 0;
1842 dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1843 return false;
1844 }
1845 #endif
1846
1847 return true;
1848 }
1849
1850 /*
1851 * Handles a host Channel Halted interrupt in DMA mode. This handler
1852 * determines the reason the channel halted and proceeds accordingly.
1853 */
dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1854 STATIC void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1855 struct dwc2_host_chan *chan, int chnum,
1856 struct dwc2_qtd *qtd)
1857 {
1858 u32 hcintmsk;
1859 int out_nak_enh = 0;
1860
1861 if (dbg_hc(chan))
1862 dev_vdbg(hsotg->dev,
1863 "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1864 chnum);
1865
1866 /*
1867 * For core with OUT NAK enhancement, the flow for high-speed
1868 * CONTROL/BULK OUT is handled a little differently
1869 */
1870 if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1871 if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1872 (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1873 chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1874 out_nak_enh = 1;
1875 }
1876 }
1877
1878 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1879 (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1880 !hsotg->params.dma_desc_enable)) {
1881 if (hsotg->params.dma_desc_enable)
1882 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1883 chan->halt_status);
1884 else
1885 /*
1886 * Just release the channel. A dequeue can happen on a
1887 * transfer timeout. In the case of an AHB Error, the
1888 * channel was forced to halt because there's no way to
1889 * gracefully recover.
1890 */
1891 dwc2_release_channel(hsotg, chan, qtd,
1892 chan->halt_status);
1893 return;
1894 }
1895
1896 hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
1897
1898 if (chan->hcint & HCINTMSK_XFERCOMPL) {
1899 /*
1900 * Todo: This is here because of a possible hardware bug. Spec
1901 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1902 * interrupt w/ACK bit set should occur, but I only see the
1903 * XFERCOMP bit, even with it masked out. This is a workaround
1904 * for that behavior. Should fix this when hardware is fixed.
1905 */
1906 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1907 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1908 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1909 } else if (chan->hcint & HCINTMSK_STALL) {
1910 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1911 } else if ((chan->hcint & HCINTMSK_XACTERR) &&
1912 !hsotg->params.dma_desc_enable) {
1913 if (out_nak_enh) {
1914 if (chan->hcint &
1915 (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1916 dev_vdbg(hsotg->dev,
1917 "XactErr with NYET/NAK/ACK\n");
1918 qtd->error_count = 0;
1919 } else {
1920 dev_vdbg(hsotg->dev,
1921 "XactErr without NYET/NAK/ACK\n");
1922 }
1923 }
1924
1925 /*
1926 * Must handle xacterr before nak or ack. Could get a xacterr
1927 * at the same time as either of these on a BULK/CONTROL OUT
1928 * that started with a PING. The xacterr takes precedence.
1929 */
1930 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1931 } else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1932 hsotg->params.dma_desc_enable) {
1933 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1934 } else if ((chan->hcint & HCINTMSK_AHBERR) &&
1935 hsotg->params.dma_desc_enable) {
1936 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1937 } else if (chan->hcint & HCINTMSK_BBLERR) {
1938 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1939 } else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1940 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1941 } else if (!out_nak_enh) {
1942 if (chan->hcint & HCINTMSK_NYET) {
1943 /*
1944 * Must handle nyet before nak or ack. Could get a nyet
1945 * at the same time as either of those on a BULK/CONTROL
1946 * OUT that started with a PING. The nyet takes
1947 * precedence.
1948 */
1949 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1950 } else if ((chan->hcint & HCINTMSK_NAK) &&
1951 !(hcintmsk & HCINTMSK_NAK)) {
1952 /*
1953 * If nak is not masked, it's because a non-split IN
1954 * transfer is in an error state. In that case, the nak
1955 * is handled by the nak interrupt handler, not here.
1956 * Handle nak here for BULK/CONTROL OUT transfers, which
1957 * halt on a NAK to allow rewinding the buffer pointer.
1958 */
1959 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1960 } else if ((chan->hcint & HCINTMSK_ACK) &&
1961 !(hcintmsk & HCINTMSK_ACK)) {
1962 /*
1963 * If ack is not masked, it's because a non-split IN
1964 * transfer is in an error state. In that case, the ack
1965 * is handled by the ack interrupt handler, not here.
1966 * Handle ack here for split transfers. Start splits
1967 * halt on ACK.
1968 */
1969 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1970 } else {
1971 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1972 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1973 /*
1974 * A periodic transfer halted with no other
1975 * channel interrupts set. Assume it was halted
1976 * by the core because it could not be completed
1977 * in its scheduled (micro)frame.
1978 */
1979 dev_dbg(hsotg->dev,
1980 "%s: Halt channel %d (assume incomplete periodic transfer)\n",
1981 __func__, chnum);
1982 dwc2_halt_channel(hsotg, chan, qtd,
1983 DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1984 } else {
1985 dev_err(hsotg->dev,
1986 "%s: Channel %d - ChHltd set, but reason is unknown\n",
1987 __func__, chnum);
1988 dev_err(hsotg->dev,
1989 "hcint 0x%08x, intsts 0x%08x\n",
1990 chan->hcint,
1991 dwc2_readl(hsotg, GINTSTS));
1992 goto error;
1993 }
1994 }
1995 } else {
1996 dev_info(hsotg->dev,
1997 "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1998 chan->hcint);
1999 error:
2000 /* Failthrough: use 3-strikes rule */
2001 qtd->error_count++;
2002 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
2003 qtd, DWC2_HC_XFER_XACT_ERR);
2004 /*
2005 * We can get here after a completed transaction
2006 * (urb->actual_length >= urb->length) which was not reported
2007 * as completed. If that is the case, and we do not abort
2008 * the transfer, a transfer of size 0 will be enqueued
2009 * subsequently. If urb->actual_length is not DMA-aligned,
2010 * the buffer will then point to an unaligned address, and
2011 * the resulting behavior is undefined. Bail out in that
2012 * situation.
2013 */
2014 if (qtd->urb->actual_length >= qtd->urb->length)
2015 qtd->error_count = 3;
2016 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
2017 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
2018 }
2019 }
2020
2021 /*
2022 * Handles a host channel Channel Halted interrupt
2023 *
2024 * In slave mode, this handler is called only when the driver specifically
2025 * requests a halt. This occurs during handling other host channel interrupts
2026 * (e.g. nak, xacterr, stall, nyet, etc.).
2027 *
2028 * In DMA mode, this is the interrupt that occurs when the core has finished
2029 * processing a transfer on a channel. Other host channel interrupts (except
2030 * ahberr) are disabled in DMA mode.
2031 */
dwc2_hc_chhltd_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)2032 STATIC void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
2033 struct dwc2_host_chan *chan, int chnum,
2034 struct dwc2_qtd *qtd)
2035 {
2036 if (dbg_hc(chan))
2037 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
2038 chnum);
2039
2040 if (hsotg->params.host_dma) {
2041 dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
2042 } else {
2043 if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
2044 return;
2045 dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
2046 }
2047 }
2048
2049 /*
2050 * Check if the given qtd is still the top of the list (and thus valid).
2051 *
2052 * If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed
2053 * the qtd from the top of the list, this will return false (otherwise true).
2054 */
dwc2_check_qtd_still_ok(struct dwc2_qtd * qtd,struct dwc2_qh * qh)2055 STATIC bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
2056 {
2057 struct dwc2_qtd *cur_head;
2058
2059 if (!qh)
2060 return false;
2061
2062 cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd,
2063 qtd_list_entry);
2064 return (cur_head == qtd);
2065 }
2066
2067 /* Handles interrupt for a specific Host Channel */
dwc2_hc_n_intr(struct dwc2_hsotg * hsotg,int chnum)2068 STATIC void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
2069 {
2070 struct dwc2_qtd *qtd;
2071 struct dwc2_host_chan *chan;
2072 u32 hcint, hcintmsk;
2073
2074 chan = hsotg->hc_ptr_array[chnum];
2075
2076 hcint = dwc2_readl(hsotg, HCINT(chnum));
2077 hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
2078 if (!chan) {
2079 dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
2080 dwc2_writel(hsotg, hcint, HCINT(chnum));
2081 return;
2082 }
2083
2084 if (dbg_hc(chan)) {
2085 dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
2086 chnum);
2087 dev_vdbg(hsotg->dev,
2088 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2089 hcint, hcintmsk, hcint & hcintmsk);
2090 }
2091
2092 dwc2_writel(hsotg, hcint, HCINT(chnum));
2093
2094 /*
2095 * If we got an interrupt after someone called
2096 * dwc2_hcd_endpoint_disable() we don't want to crash below
2097 */
2098 if (!chan->qh) {
2099 dev_warn(hsotg->dev, "Interrupt on disabled channel\n");
2100 return;
2101 }
2102
2103 chan->hcint = hcint;
2104 hcint &= hcintmsk;
2105
2106 /*
2107 * If the channel was halted due to a dequeue, the qtd list might
2108 * be empty or at least the first entry will not be the active qtd.
2109 * In this case, take a shortcut and just release the channel.
2110 */
2111 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
2112 /*
2113 * If the channel was halted, this should be the only
2114 * interrupt unmasked
2115 */
2116 WARN_ON(hcint != HCINTMSK_CHHLTD);
2117 if (hsotg->params.dma_desc_enable)
2118 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
2119 chan->halt_status);
2120 else
2121 dwc2_release_channel(hsotg, chan, NULL,
2122 chan->halt_status);
2123 return;
2124 }
2125
2126 if (list_empty(&chan->qh->qtd_list)) {
2127 /*
2128 * TODO: Will this ever happen with the
2129 * DWC2_HC_XFER_URB_DEQUEUE handling above?
2130 */
2131 dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
2132 chnum);
2133 dev_dbg(hsotg->dev,
2134 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2135 chan->hcint, hcintmsk, hcint);
2136 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2137 disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
2138 chan->hcint = 0;
2139 return;
2140 }
2141
2142 qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2143 qtd_list_entry);
2144
2145 if (!hsotg->params.host_dma) {
2146 if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2147 hcint &= ~HCINTMSK_CHHLTD;
2148 }
2149
2150 if (hcint & HCINTMSK_XFERCOMPL) {
2151 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2152 /*
2153 * If NYET occurred at same time as Xfer Complete, the NYET is
2154 * handled by the Xfer Complete interrupt handler. Don't want
2155 * to call the NYET interrupt handler in this case.
2156 */
2157 hcint &= ~HCINTMSK_NYET;
2158 }
2159
2160 if (hcint & HCINTMSK_CHHLTD) {
2161 dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2162 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2163 goto exit;
2164 }
2165 if (hcint & HCINTMSK_AHBERR) {
2166 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2167 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2168 goto exit;
2169 }
2170 if (hcint & HCINTMSK_STALL) {
2171 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2172 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2173 goto exit;
2174 }
2175 if (hcint & HCINTMSK_NAK) {
2176 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2177 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2178 goto exit;
2179 }
2180 if (hcint & HCINTMSK_ACK) {
2181 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2182 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2183 goto exit;
2184 }
2185 if (hcint & HCINTMSK_NYET) {
2186 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2187 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2188 goto exit;
2189 }
2190 if (hcint & HCINTMSK_XACTERR) {
2191 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2192 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2193 goto exit;
2194 }
2195 if (hcint & HCINTMSK_BBLERR) {
2196 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2197 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2198 goto exit;
2199 }
2200 if (hcint & HCINTMSK_FRMOVRUN) {
2201 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2202 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2203 goto exit;
2204 }
2205 if (hcint & HCINTMSK_DATATGLERR) {
2206 dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2207 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2208 goto exit;
2209 }
2210
2211 exit:
2212 chan->hcint = 0;
2213 }
2214
2215 /*
2216 * This interrupt indicates that one or more host channels has a pending
2217 * interrupt. There are multiple conditions that can cause each host channel
2218 * interrupt. This function determines which conditions have occurred for each
2219 * host channel interrupt and handles them appropriately.
2220 */
dwc2_hc_intr(struct dwc2_hsotg * hsotg)2221 STATIC void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2222 {
2223 u32 haint;
2224 int i;
2225 struct dwc2_host_chan *chan, *chan_tmp;
2226
2227 haint = dwc2_readl(hsotg, HAINT);
2228 if (dbg_perio()) {
2229 dev_vdbg(hsotg->dev, "%s()\n", __func__);
2230
2231 dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2232 }
2233
2234 /*
2235 * According to USB 2.0 spec section 11.18.8, a host must
2236 * issue complete-split transactions in a microframe for a
2237 * set of full-/low-speed endpoints in the same relative
2238 * order as the start-splits were issued in a microframe for.
2239 */
2240 list_for_each_entry_safe(chan, chan_tmp, &hsotg->split_order,
2241 split_order_list_entry) {
2242 int hc_num = chan->hc_num;
2243
2244 if (haint & (1 << hc_num)) {
2245 dwc2_hc_n_intr(hsotg, hc_num);
2246 haint &= ~(1 << hc_num);
2247 }
2248 }
2249
2250 for (i = 0; i < hsotg->params.host_channels; i++) {
2251 if (haint & (1 << i))
2252 dwc2_hc_n_intr(hsotg, i);
2253 }
2254 }
2255
2256 /* This function handles interrupts for the HCD */
dwc2_handle_hcd_intr(struct dwc2_hsotg * hsotg)2257 irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2258 {
2259 u32 gintsts, dbg_gintsts;
2260 irqreturn_t retval = IRQ_NONE;
2261
2262 if (!dwc2_is_controller_alive(hsotg)) {
2263 dev_warn(hsotg->dev, "Controller is dead\n");
2264 return retval;
2265 }
2266
2267 spin_lock(&hsotg->lock);
2268
2269 /* Check if HOST Mode */
2270 if (dwc2_is_host_mode(hsotg)) {
2271 gintsts = dwc2_read_core_intr(hsotg);
2272 if (!gintsts) {
2273 spin_unlock(&hsotg->lock);
2274 return retval;
2275 }
2276
2277 retval = IRQ_HANDLED;
2278
2279 dbg_gintsts = gintsts;
2280 #ifndef DEBUG_SOF
2281 dbg_gintsts &= ~GINTSTS_SOF;
2282 #endif
2283 if (!dbg_perio())
2284 dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2285 GINTSTS_PTXFEMP);
2286
2287 /* Only print if there are any non-suppressed interrupts left */
2288 if (dbg_gintsts)
2289 dev_vdbg(hsotg->dev,
2290 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2291 gintsts);
2292
2293 if (gintsts & GINTSTS_SOF)
2294 dwc2_sof_intr(hsotg);
2295 if (gintsts & GINTSTS_RXFLVL)
2296 dwc2_rx_fifo_level_intr(hsotg);
2297 if (gintsts & GINTSTS_NPTXFEMP)
2298 dwc2_np_tx_fifo_empty_intr(hsotg);
2299 if (gintsts & GINTSTS_PRTINT)
2300 dwc2_port_intr(hsotg);
2301 if (gintsts & GINTSTS_HCHINT)
2302 dwc2_hc_intr(hsotg);
2303 if (gintsts & GINTSTS_PTXFEMP)
2304 dwc2_perio_tx_fifo_empty_intr(hsotg);
2305
2306 if (dbg_gintsts) {
2307 dev_vdbg(hsotg->dev,
2308 "DWC OTG HCD Finished Servicing Interrupts\n");
2309 dev_vdbg(hsotg->dev,
2310 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2311 dwc2_readl(hsotg, GINTSTS),
2312 dwc2_readl(hsotg, GINTMSK));
2313 }
2314 }
2315
2316 spin_unlock(&hsotg->lock);
2317
2318 return retval;
2319 }
2320