xref: /openbsd/sys/dev/usb/dwc2/dwc2_hcdintr.c (revision e5dd7070)
1 /*	$OpenBSD: dwc2_hcdintr.c,v 1.10 2017/09/08 05:36:53 deraadt Exp $	*/
2 /*	$NetBSD: dwc2_hcdintr.c,v 1.11 2014/11/24 10:14:14 skrll Exp $	*/
3 
4 /*
5  * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
6  *
7  * Copyright (C) 2004-2013 Synopsys, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The names of the above-listed copyright holders may not be used
19  *    to endorse or promote products derived from this software without
20  *    specific prior written permission.
21  *
22  * ALTERNATIVELY, this software may be distributed under the terms of the
23  * GNU General Public License ("GPL") as published by the Free Software
24  * Foundation; either version 2 of the License, or (at your option) any
25  * later version.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * This file contains the interrupt handlers for Host mode
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/pool.h>
47 
48 #include <machine/bus.h>
49 
50 #include <dev/usb/usb.h>
51 #include <dev/usb/usbdi.h>
52 #include <dev/usb/usbdivar.h>
53 #include <dev/usb/usb_mem.h>
54 
55 #include <dev/usb/dwc2/dwc2.h>
56 #include <dev/usb/dwc2/dwc2var.h>
57 
58 #include <dev/usb/dwc2/dwc2_core.h>
59 #include <dev/usb/dwc2/dwc2_hcd.h>
60 
61 /* This function is for debug only */
62 STATIC void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
63 {
64 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
65 	u16 curr_frame_number = hsotg->frame_number;
66 
67 	if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
68 		if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
69 		    curr_frame_number) {
70 			hsotg->frame_num_array[hsotg->frame_num_idx] =
71 					curr_frame_number;
72 			hsotg->last_frame_num_array[hsotg->frame_num_idx] =
73 					hsotg->last_frame_num;
74 			hsotg->frame_num_idx++;
75 		}
76 	} else if (!hsotg->dumped_frame_num_array) {
77 		int i;
78 
79 		dev_info(hsotg->dev, "Frame     Last Frame\n");
80 		dev_info(hsotg->dev, "-----     ----------\n");
81 		for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
82 			dev_info(hsotg->dev, "0x%04x    0x%04x\n",
83 				 hsotg->frame_num_array[i],
84 				 hsotg->last_frame_num_array[i]);
85 		}
86 		hsotg->dumped_frame_num_array = 1;
87 	}
88 	hsotg->last_frame_num = curr_frame_number;
89 #endif
90 }
91 
92 STATIC void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
93 				    struct dwc2_host_chan *chan,
94 				    struct dwc2_qtd *qtd)
95 {
96 // 	struct urb *usb_urb;
97 
98 	if (!chan->qh)
99 		return;
100 
101 	if (chan->qh->dev_speed == USB_SPEED_HIGH)
102 		return;
103 
104 	if (!qtd->urb)
105 		return;
106 
107 
108 	if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
109 		chan->qh->tt_buffer_dirty = 1;
110 			chan->qh->tt_buffer_dirty = 0;
111 	}
112 }
113 
114 /*
115  * Handles the start-of-frame interrupt in host mode. Non-periodic
116  * transactions may be queued to the DWC_otg controller for the current
117  * (micro)frame. Periodic transactions may be queued to the controller
118  * for the next (micro)frame.
119  */
120 STATIC void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
121 {
122 	struct dwc2_qh *qh, *qhn;
123 	enum dwc2_transaction_type tr_type;
124 
125 #ifdef DEBUG_SOF
126 	dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
127 #endif
128 
129 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
130 
131 	dwc2_track_missed_sofs(hsotg);
132 
133 	/* Determine whether any periodic QHs should be executed */
134 	qh = TAILQ_FIRST(&hsotg->periodic_sched_inactive);
135 	while (qh != NULL) {
136 		qhn = TAILQ_NEXT(qh, qh_list_entry);
137 		if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number)) {
138 			/*
139 			 * Move QH to the ready list to be executed next
140 			 * (micro)frame
141 			 */
142 			TAILQ_REMOVE(&hsotg->periodic_sched_inactive, qh, qh_list_entry);
143 			TAILQ_INSERT_TAIL(&hsotg->periodic_sched_ready, qh, qh_list_entry);
144 		}
145 		qh = qhn;
146 	}
147 	tr_type = dwc2_hcd_select_transactions(hsotg);
148 	if (tr_type != DWC2_TRANSACTION_NONE)
149 		dwc2_hcd_queue_transactions(hsotg, tr_type);
150 
151 	/* Clear interrupt */
152 	DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_SOF);
153 }
154 
155 /*
156  * Handles the Rx FIFO Level Interrupt, which indicates that there is
157  * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
158  * memory if the DWC_otg controller is operating in Slave mode.
159  */
160 STATIC void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
161 {
162 	u32 grxsts, chnum, bcnt, pktsts;
163 	struct dwc2_host_chan *chan;
164 
165 	if (dbg_perio())
166 		dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
167 
168 	grxsts = DWC2_READ_4(hsotg, GRXSTSP);
169 	chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
170 	chan = hsotg->hc_ptr_array[chnum];
171 	if (!chan) {
172 		dev_err(hsotg->dev, "Unable to get corresponding channel\n");
173 		return;
174 	}
175 
176 	bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
177 	pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
178 
179 	/* Packet Status */
180 	if (dbg_perio()) {
181 		dev_vdbg(hsotg->dev, "    Ch num = %d\n", chnum);
182 		dev_vdbg(hsotg->dev, "    Count = %d\n", bcnt);
183 		dev_vdbg(hsotg->dev, "    DPID = %d, chan.dpid = %d\n",
184 			 (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT,
185 			 chan->data_pid_start);
186 		dev_vdbg(hsotg->dev, "    PStatus = %d\n", pktsts);
187 	}
188 
189 	switch (pktsts) {
190 	case GRXSTS_PKTSTS_HCHIN:
191 		/* Read the data into the host buffer */
192 		if (bcnt > 0) {
193 			dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
194 
195 			/* Update the HC fields for the next packet received */
196 			chan->xfer_count += bcnt;
197 			chan->xfer_buf += bcnt;
198 		}
199 		break;
200 	case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
201 	case GRXSTS_PKTSTS_DATATOGGLEERR:
202 	case GRXSTS_PKTSTS_HCHHALTED:
203 		/* Handled in interrupt, just ignore data */
204 		break;
205 	default:
206 		dev_err(hsotg->dev,
207 			"RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
208 		break;
209 	}
210 }
211 
212 /*
213  * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
214  * data packets may be written to the FIFO for OUT transfers. More requests
215  * may be written to the non-periodic request queue for IN transfers. This
216  * interrupt is enabled only in Slave mode.
217  */
218 STATIC void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
219 {
220 	dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
221 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
222 }
223 
224 /*
225  * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
226  * packets may be written to the FIFO for OUT transfers. More requests may be
227  * written to the periodic request queue for IN transfers. This interrupt is
228  * enabled only in Slave mode.
229  */
230 STATIC void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
231 {
232 	if (dbg_perio())
233 		dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
234 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
235 }
236 
237 STATIC void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
238 			      u32 *hprt0_modify)
239 {
240 	struct dwc2_core_params *params = hsotg->core_params;
241 	int do_reset = 0;
242 	u32 usbcfg;
243 	u32 prtspd;
244 	u32 hcfg;
245 	u32 fslspclksel;
246 	u32 hfir;
247 
248 	dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
249 
250 	/* Every time when port enables calculate HFIR.FrInterval */
251 	hfir = DWC2_READ_4(hsotg, HFIR);
252 	hfir &= ~HFIR_FRINT_MASK;
253 	hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
254 		HFIR_FRINT_MASK;
255 	DWC2_WRITE_4(hsotg, HFIR, hfir);
256 
257 	/* Check if we need to adjust the PHY clock speed for low power */
258 	if (!params->host_support_fs_ls_low_power) {
259 		/* Port has been enabled, set the reset change flag */
260 		hsotg->flags.b.port_reset_change = 1;
261 
262 		dwc2_root_intr(hsotg->hsotg_sc);
263 		return;
264 	}
265 
266 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
267 	prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
268 
269 	if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
270 		/* Low power */
271 		if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
272 			/* Set PHY low power clock select for FS/LS devices */
273 			usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
274 			DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
275 			do_reset = 1;
276 		}
277 
278 		hcfg = DWC2_READ_4(hsotg, HCFG);
279 		fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
280 			      HCFG_FSLSPCLKSEL_SHIFT;
281 
282 		if (prtspd == HPRT0_SPD_LOW_SPEED &&
283 		    params->host_ls_low_power_phy_clk ==
284 		    DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
285 			/* 6 MHZ */
286 			dev_vdbg(hsotg->dev,
287 				 "FS_PHY programming HCFG to 6 MHz\n");
288 			if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
289 				fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
290 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
291 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
292 				DWC2_WRITE_4(hsotg, HCFG, hcfg);
293 				do_reset = 1;
294 			}
295 		} else {
296 			/* 48 MHZ */
297 			dev_vdbg(hsotg->dev,
298 				 "FS_PHY programming HCFG to 48 MHz\n");
299 			if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
300 				fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
301 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
302 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
303 				DWC2_WRITE_4(hsotg, HCFG, hcfg);
304 				do_reset = 1;
305 			}
306 		}
307 	} else {
308 		/* Not low power */
309 		if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
310 			usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
311 			DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
312 			do_reset = 1;
313 		}
314 	}
315 
316 	if (do_reset) {
317 		*hprt0_modify |= HPRT0_RST;
318 		queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
319 				   msecs_to_jiffies(60));
320 	} else {
321 		/* Port has been enabled, set the reset change flag */
322 		hsotg->flags.b.port_reset_change = 1;
323 		dwc2_root_intr(hsotg->hsotg_sc);
324 
325 	}
326 }
327 
328 /*
329  * There are multiple conditions that can cause a port interrupt. This function
330  * determines which interrupt conditions have occurred and handles them
331  * appropriately.
332  */
333 STATIC void dwc2_port_intr(struct dwc2_hsotg *hsotg)
334 {
335 	u32 hprt0;
336 	u32 hprt0_modify;
337 
338 	dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
339 
340 	hprt0 = DWC2_READ_4(hsotg, HPRT0);
341 	hprt0_modify = hprt0;
342 
343 	/*
344 	 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
345 	 * GINTSTS
346 	 */
347 	hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
348 			  HPRT0_OVRCURRCHG);
349 
350 	/*
351 	 * Port Connect Detected
352 	 * Set flag and clear if detected
353 	 */
354 	if (hprt0 & HPRT0_CONNDET) {
355 		dev_vdbg(hsotg->dev,
356 			 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
357 			 hprt0);
358 		hsotg->flags.b.port_connect_status_change = 1;
359 		hsotg->flags.b.port_connect_status = 1;
360 		hprt0_modify |= HPRT0_CONNDET;
361 
362 		/*
363 		 * The Hub driver asserts a reset when it sees port connect
364 		 * status change flag
365 		 */
366 	}
367 
368 	/*
369 	 * Port Enable Changed
370 	 * Clear if detected - Set internal flag if disabled
371 	 */
372 	if (hprt0 & HPRT0_ENACHG) {
373 		dev_vdbg(hsotg->dev,
374 			 "  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
375 			 hprt0, !!(hprt0 & HPRT0_ENA));
376 		hprt0_modify |= HPRT0_ENACHG;
377 		if (hprt0 & HPRT0_ENA)
378 			dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
379 		else
380 			hsotg->flags.b.port_enable_change = 1;
381 	}
382 
383 	/* Overcurrent Change Interrupt */
384 	if (hprt0 & HPRT0_OVRCURRCHG) {
385 		dev_vdbg(hsotg->dev,
386 			 "  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
387 			 hprt0);
388 		hsotg->flags.b.port_over_current_change = 1;
389 		hprt0_modify |= HPRT0_OVRCURRCHG;
390 	}
391 
392 	/* Clear Port Interrupts */
393 	DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify);
394 
395 	if (hsotg->flags.b.port_connect_status_change ||
396 	    hsotg->flags.b.port_enable_change ||
397 	    hsotg->flags.b.port_over_current_change)
398 		dwc2_root_intr(hsotg->hsotg_sc);
399 }
400 
401 /*
402  * Gets the actual length of a transfer after the transfer halts. halt_status
403  * holds the reason for the halt.
404  *
405  * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
406  * is set to 1 upon return if less than the requested number of bytes were
407  * transferred. short_read may also be NULL on entry, in which case it remains
408  * unchanged.
409  */
410 STATIC u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
411 				       struct dwc2_host_chan *chan, int chnum,
412 				       struct dwc2_qtd *qtd,
413 				       enum dwc2_halt_status halt_status,
414 				       int *short_read)
415 {
416 	u32 hctsiz, count, length;
417 
418 	hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
419 
420 	if (halt_status == DWC2_HC_XFER_COMPLETE) {
421 		if (chan->ep_is_in) {
422 			count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
423 				TSIZ_XFERSIZE_SHIFT;
424 			length = chan->xfer_len - count;
425 			if (short_read != NULL)
426 				*short_read = (count != 0);
427 		} else if (chan->qh->do_split) {
428 			length = qtd->ssplit_out_xfer_count;
429 		} else {
430 			length = chan->xfer_len;
431 		}
432 	} else {
433 		/*
434 		 * Must use the hctsiz.pktcnt field to determine how much data
435 		 * has been transferred. This field reflects the number of
436 		 * packets that have been transferred via the USB. This is
437 		 * always an integral number of packets if the transfer was
438 		 * halted before its normal completion. (Can't use the
439 		 * hctsiz.xfersize field because that reflects the number of
440 		 * bytes transferred via the AHB, not the USB).
441 		 */
442 		count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
443 		length = (chan->start_pkt_count - count) * chan->max_packet;
444 	}
445 
446 	return length;
447 }
448 
449 /**
450  * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
451  * Complete interrupt on the host channel. Updates the actual_length field
452  * of the URB based on the number of bytes transferred via the host channel.
453  * Sets the URB status if the data transfer is finished.
454  *
455  * Return: 1 if the data transfer specified by the URB is completely finished,
456  * 0 otherwise
457  */
458 STATIC int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
459 				 struct dwc2_host_chan *chan, int chnum,
460 				 struct dwc2_hcd_urb *urb,
461 				 struct dwc2_qtd *qtd)
462 {
463 	int xfer_done = 0;
464 	int short_read = 0;
465 	int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
466 						      DWC2_HC_XFER_COMPLETE,
467 						      &short_read);
468 
469 	if (urb->actual_length + xfer_length > urb->length) {
470 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
471 		xfer_length = urb->length - urb->actual_length;
472 	}
473 
474 	/* Non DWORD-aligned buffer case handling */
475 	if (chan->align_buf && xfer_length && chan->ep_is_in) {
476 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
477 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_POSTREAD);
478 		memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
479 		       xfer_length);
480 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_PREREAD);
481 	}
482 
483 	dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
484 		 urb->actual_length, xfer_length);
485 	urb->actual_length += xfer_length;
486 
487 	if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
488 	    (urb->flags & URB_SEND_ZERO_PACKET) &&
489 	    urb->actual_length >= urb->length &&
490 	    !(urb->length % chan->max_packet)) {
491 		xfer_done = 0;
492 	} else if (short_read || urb->actual_length >= urb->length) {
493 		xfer_done = 1;
494 		urb->status = 0;
495 	}
496 
497 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
498 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
499 	dev_vdbg(hsotg->dev, "  chan->xfer_len %d\n", chan->xfer_len);
500 	dev_vdbg(hsotg->dev, "  hctsiz.xfersize %d\n",
501 		 (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
502 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n", urb->length);
503 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n", urb->actual_length);
504 	dev_vdbg(hsotg->dev, "  short_read %d, xfer_done %d\n", short_read,
505 		 xfer_done);
506 
507 	return xfer_done;
508 }
509 
510 /*
511  * Save the starting data toggle for the next transfer. The data toggle is
512  * saved in the QH for non-control transfers and it's saved in the QTD for
513  * control transfers.
514  */
515 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
516 			       struct dwc2_host_chan *chan, int chnum,
517 			       struct dwc2_qtd *qtd)
518 {
519 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
520 	u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
521 
522 	if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
523 		if (pid == TSIZ_SC_MC_PID_DATA0)
524 			chan->qh->data_toggle = DWC2_HC_PID_DATA0;
525 		else
526 			chan->qh->data_toggle = DWC2_HC_PID_DATA1;
527 	} else {
528 		if (pid == TSIZ_SC_MC_PID_DATA0)
529 			qtd->data_toggle = DWC2_HC_PID_DATA0;
530 		else
531 			qtd->data_toggle = DWC2_HC_PID_DATA1;
532 	}
533 }
534 
535 /**
536  * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
537  * the transfer is stopped for any reason. The fields of the current entry in
538  * the frame descriptor array are set based on the transfer state and the input
539  * halt_status. Completes the Isochronous URB if all the URB frames have been
540  * completed.
541  *
542  * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
543  * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
544  */
545 STATIC enum dwc2_halt_status dwc2_update_isoc_urb_state(
546 		struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
547 		int chnum, struct dwc2_qtd *qtd,
548 		enum dwc2_halt_status halt_status)
549 {
550 	struct dwc2_hcd_iso_packet_desc *frame_desc;
551 	struct dwc2_hcd_urb *urb = qtd->urb;
552 
553 	if (!urb)
554 		return DWC2_HC_XFER_NO_HALT_STATUS;
555 
556 	frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
557 
558 	switch (halt_status) {
559 	case DWC2_HC_XFER_COMPLETE:
560 		frame_desc->status = 0;
561 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
562 					chan, chnum, qtd, halt_status, NULL);
563 
564 		/* Non DWORD-aligned buffer case handling */
565 		if (chan->align_buf && frame_desc->actual_length &&
566 		    chan->ep_is_in) {
567 			dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
568 				 __func__);
569 			usb_syncmem(urb->usbdma, 0, urb->length,
570 				    BUS_DMASYNC_POSTREAD);
571 			memcpy(urb->buf + frame_desc->offset +
572 			       qtd->isoc_split_offset, chan->qh->dw_align_buf,
573 			       frame_desc->actual_length);
574 			usb_syncmem(urb->usbdma, 0, urb->length,
575 				    BUS_DMASYNC_PREREAD);
576 		}
577 		break;
578 	case DWC2_HC_XFER_FRAME_OVERRUN:
579 		urb->error_count++;
580 		if (chan->ep_is_in)
581 			frame_desc->status = -ENOSR;
582 		else
583 			frame_desc->status = -ECOMM;
584 		frame_desc->actual_length = 0;
585 		break;
586 	case DWC2_HC_XFER_BABBLE_ERR:
587 		urb->error_count++;
588 		frame_desc->status = -EOVERFLOW;
589 		/* Don't need to update actual_length in this case */
590 		break;
591 	case DWC2_HC_XFER_XACT_ERR:
592 		urb->error_count++;
593 		frame_desc->status = -EPROTO;
594 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
595 					chan, chnum, qtd, halt_status, NULL);
596 
597 		/* Non DWORD-aligned buffer case handling */
598 		if (chan->align_buf && frame_desc->actual_length &&
599 		    chan->ep_is_in) {
600 			dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
601 				 __func__);
602 			usb_syncmem(urb->usbdma, 0, urb->length,
603 				    BUS_DMASYNC_POSTREAD);
604 			memcpy(urb->buf + frame_desc->offset +
605 			       qtd->isoc_split_offset, chan->qh->dw_align_buf,
606 			       frame_desc->actual_length);
607 			usb_syncmem(urb->usbdma, 0, urb->length,
608 				    BUS_DMASYNC_PREREAD);
609 		}
610 
611 		/* Skip whole frame */
612 		if (chan->qh->do_split &&
613 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
614 		    hsotg->core_params->dma_enable > 0) {
615 			qtd->complete_split = 0;
616 			qtd->isoc_split_offset = 0;
617 		}
618 
619 		break;
620 	default:
621 		dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
622 			halt_status);
623 		break;
624 	}
625 
626 	if (++qtd->isoc_frame_index == urb->packet_count) {
627 		/*
628 		 * urb->status is not used for isoc transfers. The individual
629 		 * frame_desc statuses are used instead.
630 		 */
631 		dwc2_host_complete(hsotg, qtd, 0);
632 		halt_status = DWC2_HC_XFER_URB_COMPLETE;
633 	} else {
634 		halt_status = DWC2_HC_XFER_COMPLETE;
635 	}
636 
637 	return halt_status;
638 }
639 
640 /*
641  * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
642  * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
643  * still linked to the QH, the QH is added to the end of the inactive
644  * non-periodic schedule. For periodic QHs, removes the QH from the periodic
645  * schedule if no more QTDs are linked to the QH.
646  */
647 STATIC void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
648 			       int free_qtd)
649 {
650 	int continue_split = 0;
651 	struct dwc2_qtd *qtd;
652 
653 	if (dbg_qh(qh))
654 		dev_vdbg(hsotg->dev, "  %s(%p,%p,%d)\n", __func__,
655 			 hsotg, qh, free_qtd);
656 
657 	if (TAILQ_EMPTY(&qh->qtd_list)) {
658 		dev_dbg(hsotg->dev, "## QTD list empty ##\n");
659 		goto no_qtd;
660 	}
661 
662 	qtd = TAILQ_FIRST(&qh->qtd_list);
663 
664 	if (qtd->complete_split)
665 		continue_split = 1;
666 	else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
667 		 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
668 		continue_split = 1;
669 
670 	if (free_qtd) {
671 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
672 		continue_split = 0;
673 	}
674 
675 no_qtd:
676 	if (qh->channel)
677 		qh->channel->align_buf = 0;
678 	dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
679 	qh->channel = NULL;
680 }
681 
682 /**
683  * dwc2_release_channel() - Releases a host channel for use by other transfers
684  *
685  * @hsotg:       The HCD state structure
686  * @chan:        The host channel to release
687  * @qtd:         The QTD associated with the host channel. This QTD may be
688  *               freed if the transfer is complete or an error has occurred.
689  * @halt_status: Reason the channel is being released. This status
690  *               determines the actions taken by this function.
691  *
692  * Also attempts to select and queue more transactions since at least one host
693  * channel is available.
694  */
695 STATIC void dwc2_release_channel(struct dwc2_hsotg *hsotg,
696 				 struct dwc2_host_chan *chan,
697 				 struct dwc2_qtd *qtd,
698 				 enum dwc2_halt_status halt_status)
699 {
700 	enum dwc2_transaction_type tr_type;
701 	u32 haintmsk;
702 	int free_qtd = 0;
703 
704 	if (dbg_hc(chan))
705 		dev_vdbg(hsotg->dev, "  %s: channel %d, halt_status %d\n",
706 			 __func__, chan->hc_num, halt_status);
707 
708 	switch (halt_status) {
709 	case DWC2_HC_XFER_URB_COMPLETE:
710 		free_qtd = 1;
711 		break;
712 	case DWC2_HC_XFER_AHB_ERR:
713 	case DWC2_HC_XFER_STALL:
714 	case DWC2_HC_XFER_BABBLE_ERR:
715 		free_qtd = 1;
716 		break;
717 	case DWC2_HC_XFER_XACT_ERR:
718 		if (qtd && qtd->error_count >= 3) {
719 			dev_vdbg(hsotg->dev,
720 				 "  Complete URB with transaction error\n");
721 			free_qtd = 1;
722 			dwc2_host_complete(hsotg, qtd, -EPROTO);
723 		}
724 		break;
725 	case DWC2_HC_XFER_URB_DEQUEUE:
726 		/*
727 		 * The QTD has already been removed and the QH has been
728 		 * deactivated. Don't want to do anything except release the
729 		 * host channel and try to queue more transfers.
730 		 */
731 		goto cleanup;
732 	case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
733 		dev_vdbg(hsotg->dev, "  Complete URB with I/O error\n");
734 		free_qtd = 1;
735 		dwc2_host_complete(hsotg, qtd, -EIO);
736 		break;
737 	case DWC2_HC_XFER_NO_HALT_STATUS:
738 	default:
739 		break;
740 	}
741 
742 	dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
743 
744 cleanup:
745 	/*
746 	 * Release the host channel for use by other transfers. The cleanup
747 	 * function clears the channel interrupt enables and conditions, so
748 	 * there's no need to clear the Channel Halted interrupt separately.
749 	 */
750 	if (chan->in_freelist != 0)
751 		LIST_REMOVE(chan, hc_list_entry);
752 	dwc2_hc_cleanup(hsotg, chan);
753 	LIST_INSERT_HEAD(&hsotg->free_hc_list, chan, hc_list_entry);
754 	chan->in_freelist = 1;
755 
756 	if (hsotg->core_params->uframe_sched > 0) {
757 		hsotg->available_host_channels++;
758 	} else {
759 		switch (chan->ep_type) {
760 		case USB_ENDPOINT_XFER_CONTROL:
761 		case USB_ENDPOINT_XFER_BULK:
762 			hsotg->non_periodic_channels--;
763 			break;
764 		default:
765 			/*
766 			 * Don't release reservations for periodic channels
767 			 * here. That's done when a periodic transfer is
768 			 * descheduled (i.e. when the QH is removed from the
769 			 * periodic schedule).
770 			 */
771 			break;
772 		}
773 	}
774 
775 	haintmsk = DWC2_READ_4(hsotg, HAINTMSK);
776 	haintmsk &= ~(1 << chan->hc_num);
777 	DWC2_WRITE_4(hsotg, HAINTMSK, haintmsk);
778 
779 	/* Try to queue more transfers now that there's a free channel */
780 	tr_type = dwc2_hcd_select_transactions(hsotg);
781 	if (tr_type != DWC2_TRANSACTION_NONE)
782 		dwc2_hcd_queue_transactions(hsotg, tr_type);
783 }
784 
785 /*
786  * Halts a host channel. If the channel cannot be halted immediately because
787  * the request queue is full, this function ensures that the FIFO empty
788  * interrupt for the appropriate queue is enabled so that the halt request can
789  * be queued when there is space in the request queue.
790  *
791  * This function may also be called in DMA mode. In that case, the channel is
792  * simply released since the core always halts the channel automatically in
793  * DMA mode.
794  */
795 STATIC void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
796 			      struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
797 			      enum dwc2_halt_status halt_status)
798 {
799 	if (dbg_hc(chan))
800 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
801 
802 	if (hsotg->core_params->dma_enable > 0) {
803 		if (dbg_hc(chan))
804 			dev_vdbg(hsotg->dev, "DMA enabled\n");
805 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
806 		return;
807 	}
808 
809 	/* Slave mode processing */
810 	dwc2_hc_halt(hsotg, chan, halt_status);
811 
812 	if (chan->halt_on_queue) {
813 		u32 gintmsk;
814 
815 		dev_vdbg(hsotg->dev, "Halt on queue\n");
816 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
817 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
818 			dev_vdbg(hsotg->dev, "control/bulk\n");
819 			/*
820 			 * Make sure the Non-periodic Tx FIFO empty interrupt
821 			 * is enabled so that the non-periodic schedule will
822 			 * be processed
823 			 */
824 			gintmsk = DWC2_READ_4(hsotg, GINTMSK);
825 			gintmsk |= GINTSTS_NPTXFEMP;
826 			DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
827 		} else {
828 			dev_vdbg(hsotg->dev, "isoc/intr\n");
829 			/*
830 			 * Move the QH from the periodic queued schedule to
831 			 * the periodic assigned schedule. This allows the
832 			 * halt to be queued when the periodic schedule is
833 			 * processed.
834 			 */
835 			TAILQ_REMOVE(&hsotg->periodic_sched_queued, chan->qh, qh_list_entry);
836 			TAILQ_INSERT_TAIL(&hsotg->periodic_sched_assigned, chan->qh, qh_list_entry);
837 
838 			/*
839 			 * Make sure the Periodic Tx FIFO Empty interrupt is
840 			 * enabled so that the periodic schedule will be
841 			 * processed
842 			 */
843 			gintmsk = DWC2_READ_4(hsotg, GINTMSK);
844 			gintmsk |= GINTSTS_PTXFEMP;
845 			DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
846 		}
847 	}
848 }
849 
850 /*
851  * Performs common cleanup for non-periodic transfers after a Transfer
852  * Complete interrupt. This function should be called after any endpoint type
853  * specific handling is finished to release the host channel.
854  */
855 STATIC void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
856 					    struct dwc2_host_chan *chan,
857 					    int chnum, struct dwc2_qtd *qtd,
858 					    enum dwc2_halt_status halt_status)
859 {
860 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
861 
862 	qtd->error_count = 0;
863 
864 	if (chan->hcint & HCINTMSK_NYET) {
865 		/*
866 		 * Got a NYET on the last transaction of the transfer. This
867 		 * means that the endpoint should be in the PING state at the
868 		 * beginning of the next transfer.
869 		 */
870 		dev_vdbg(hsotg->dev, "got NYET\n");
871 		chan->qh->ping_state = 1;
872 	}
873 
874 	/*
875 	 * Always halt and release the host channel to make it available for
876 	 * more transfers. There may still be more phases for a control
877 	 * transfer or more data packets for a bulk transfer at this point,
878 	 * but the host channel is still halted. A channel will be reassigned
879 	 * to the transfer when the non-periodic schedule is processed after
880 	 * the channel is released. This allows transactions to be queued
881 	 * properly via dwc2_hcd_queue_transactions, which also enables the
882 	 * Tx FIFO Empty interrupt if necessary.
883 	 */
884 	if (chan->ep_is_in) {
885 		/*
886 		 * IN transfers in Slave mode require an explicit disable to
887 		 * halt the channel. (In DMA mode, this call simply releases
888 		 * the channel.)
889 		 */
890 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
891 	} else {
892 		/*
893 		 * The channel is automatically disabled by the core for OUT
894 		 * transfers in Slave mode
895 		 */
896 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
897 	}
898 }
899 
900 /*
901  * Performs common cleanup for periodic transfers after a Transfer Complete
902  * interrupt. This function should be called after any endpoint type specific
903  * handling is finished to release the host channel.
904  */
905 STATIC void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
906 					struct dwc2_host_chan *chan, int chnum,
907 					struct dwc2_qtd *qtd,
908 					enum dwc2_halt_status halt_status)
909 {
910 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
911 
912 	qtd->error_count = 0;
913 
914 	if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
915 		/* Core halts channel in these cases */
916 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
917 	else
918 		/* Flush any outstanding requests from the Tx queue */
919 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
920 }
921 
922 STATIC int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
923 				       struct dwc2_host_chan *chan, int chnum,
924 				       struct dwc2_qtd *qtd)
925 {
926 	struct dwc2_hcd_iso_packet_desc *frame_desc;
927 	u32 len;
928 
929 	if (!qtd->urb)
930 		return 0;
931 
932 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
933 	len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
934 					  DWC2_HC_XFER_COMPLETE, NULL);
935 	if (!len) {
936 		qtd->complete_split = 0;
937 		qtd->isoc_split_offset = 0;
938 		return 0;
939 	}
940 
941 	frame_desc->actual_length += len;
942 
943 	if (chan->align_buf) {
944 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
945 		usb_syncmem(qtd->urb->usbdma, 0, qtd->urb->length,
946 			    BUS_DMASYNC_POSTREAD);
947 		memcpy(qtd->urb->buf + frame_desc->offset +
948 		       qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
949 		usb_syncmem(qtd->urb->usbdma, 0, qtd->urb->length,
950 			    BUS_DMASYNC_PREREAD);
951 	}
952 
953 	qtd->isoc_split_offset += len;
954 
955 	if (frame_desc->actual_length >= frame_desc->length) {
956 		frame_desc->status = 0;
957 		qtd->isoc_frame_index++;
958 		qtd->complete_split = 0;
959 		qtd->isoc_split_offset = 0;
960 	}
961 
962 	if (qtd->isoc_frame_index == qtd->urb->packet_count) {
963 		dwc2_host_complete(hsotg, qtd, 0);
964 		dwc2_release_channel(hsotg, chan, qtd,
965 				     DWC2_HC_XFER_URB_COMPLETE);
966 	} else {
967 		dwc2_release_channel(hsotg, chan, qtd,
968 				     DWC2_HC_XFER_NO_HALT_STATUS);
969 	}
970 
971 	return 1;	/* Indicates that channel released */
972 }
973 
974 /*
975  * Handles a host channel Transfer Complete interrupt. This handler may be
976  * called in either DMA mode or Slave mode.
977  */
978 STATIC void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
979 				  struct dwc2_host_chan *chan, int chnum,
980 				  struct dwc2_qtd *qtd)
981 {
982 	struct dwc2_hcd_urb *urb = qtd->urb;
983 	enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
984 	int pipe_type;
985 	int urb_xfer_done;
986 
987 	if (dbg_hc(chan))
988 		dev_vdbg(hsotg->dev,
989 			 "--Host Channel %d Interrupt: Transfer Complete--\n",
990 			 chnum);
991 
992 	if (!urb)
993 		goto handle_xfercomp_done;
994 
995 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
996 
997 	if (hsotg->core_params->dma_desc_enable > 0) {
998 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
999 		if (pipe_type == USB_ENDPOINT_XFER_ISOC)
1000 			/* Do not disable the interrupt, just clear it */
1001 			return;
1002 		goto handle_xfercomp_done;
1003 	}
1004 
1005 	/* Handle xfer complete on CSPLIT */
1006 	if (chan->qh->do_split) {
1007 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1008 		    hsotg->core_params->dma_enable > 0) {
1009 			if (qtd->complete_split &&
1010 			    dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1011 							qtd))
1012 				goto handle_xfercomp_done;
1013 		} else {
1014 			qtd->complete_split = 0;
1015 		}
1016 	}
1017 
1018 	/* Update the QTD and URB states */
1019 	switch (pipe_type) {
1020 	case USB_ENDPOINT_XFER_CONTROL:
1021 		switch (qtd->control_phase) {
1022 		case DWC2_CONTROL_SETUP:
1023 			if (urb->length > 0)
1024 				qtd->control_phase = DWC2_CONTROL_DATA;
1025 			else
1026 				qtd->control_phase = DWC2_CONTROL_STATUS;
1027 			dev_vdbg(hsotg->dev,
1028 				 "  Control setup transaction done\n");
1029 			halt_status = DWC2_HC_XFER_COMPLETE;
1030 			break;
1031 		case DWC2_CONTROL_DATA:
1032 			urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1033 							      chnum, urb, qtd);
1034 			if (urb_xfer_done) {
1035 				qtd->control_phase = DWC2_CONTROL_STATUS;
1036 				dev_vdbg(hsotg->dev,
1037 					 "  Control data transfer done\n");
1038 			} else {
1039 				dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1040 							  qtd);
1041 			}
1042 			halt_status = DWC2_HC_XFER_COMPLETE;
1043 			break;
1044 		case DWC2_CONTROL_STATUS:
1045 			dev_vdbg(hsotg->dev, "  Control transfer complete\n");
1046 			if (urb->status == -EINPROGRESS)
1047 				urb->status = 0;
1048 			dwc2_host_complete(hsotg, qtd, urb->status);
1049 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1050 			break;
1051 		}
1052 
1053 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1054 						halt_status);
1055 		break;
1056 	case USB_ENDPOINT_XFER_BULK:
1057 		dev_vdbg(hsotg->dev, "  Bulk transfer complete\n");
1058 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1059 						      qtd);
1060 		if (urb_xfer_done) {
1061 			dwc2_host_complete(hsotg, qtd, urb->status);
1062 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1063 		} else {
1064 			halt_status = DWC2_HC_XFER_COMPLETE;
1065 		}
1066 
1067 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1068 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1069 						halt_status);
1070 		break;
1071 	case USB_ENDPOINT_XFER_INT:
1072 		dev_vdbg(hsotg->dev, "  Interrupt transfer complete\n");
1073 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1074 						      qtd);
1075 
1076 		/*
1077 		 * Interrupt URB is done on the first transfer complete
1078 		 * interrupt
1079 		 */
1080 		if (urb_xfer_done) {
1081 			dwc2_host_complete(hsotg, qtd, urb->status);
1082 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1083 		} else {
1084 			halt_status = DWC2_HC_XFER_COMPLETE;
1085 		}
1086 
1087 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1088 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1089 					    halt_status);
1090 		break;
1091 	case USB_ENDPOINT_XFER_ISOC:
1092 		if (dbg_perio())
1093 			dev_vdbg(hsotg->dev, "  Isochronous transfer complete\n");
1094 		if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1095 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1096 					chnum, qtd, DWC2_HC_XFER_COMPLETE);
1097 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1098 					    halt_status);
1099 		break;
1100 	}
1101 
1102 handle_xfercomp_done:
1103 	disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1104 }
1105 
1106 /*
1107  * Handles a host channel STALL interrupt. This handler may be called in
1108  * either DMA mode or Slave mode.
1109  */
1110 STATIC void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1111 			       struct dwc2_host_chan *chan, int chnum,
1112 			       struct dwc2_qtd *qtd)
1113 {
1114 	struct dwc2_hcd_urb *urb = qtd->urb;
1115 	int pipe_type;
1116 
1117 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1118 		chnum);
1119 
1120 	if (hsotg->core_params->dma_desc_enable > 0) {
1121 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1122 					    DWC2_HC_XFER_STALL);
1123 		goto handle_stall_done;
1124 	}
1125 
1126 	if (!urb)
1127 		goto handle_stall_halt;
1128 
1129 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1130 
1131 	if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1132 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1133 
1134 	if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1135 	    pipe_type == USB_ENDPOINT_XFER_INT) {
1136 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1137 		/*
1138 		 * USB protocol requires resetting the data toggle for bulk
1139 		 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1140 		 * setup command is issued to the endpoint. Anticipate the
1141 		 * CLEAR_FEATURE command since a STALL has occurred and reset
1142 		 * the data toggle now.
1143 		 */
1144 		chan->qh->data_toggle = 0;
1145 	}
1146 
1147 handle_stall_halt:
1148 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1149 
1150 handle_stall_done:
1151 	disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1152 }
1153 
1154 /*
1155  * Updates the state of the URB when a transfer has been stopped due to an
1156  * abnormal condition before the transfer completes. Modifies the
1157  * actual_length field of the URB to reflect the number of bytes that have
1158  * actually been transferred via the host channel.
1159  */
1160 STATIC void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1161 				      struct dwc2_host_chan *chan, int chnum,
1162 				      struct dwc2_hcd_urb *urb,
1163 				      struct dwc2_qtd *qtd,
1164 				      enum dwc2_halt_status halt_status)
1165 {
1166 	u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1167 						      qtd, halt_status, NULL);
1168 
1169 	if (urb->actual_length + xfer_length > urb->length) {
1170 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1171 		xfer_length = urb->length - urb->actual_length;
1172 	}
1173 
1174 	/* Non DWORD-aligned buffer case handling */
1175 	if (chan->align_buf && xfer_length && chan->ep_is_in) {
1176 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
1177 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_POSTREAD);
1178 		memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
1179 		       xfer_length);
1180 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_PREREAD);
1181 	}
1182 
1183 	urb->actual_length += xfer_length;
1184 
1185 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1186 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1187 	dev_vdbg(hsotg->dev, "  chan->start_pkt_count %d\n",
1188 		 chan->start_pkt_count);
1189 	dev_vdbg(hsotg->dev, "  hctsiz.pktcnt %d\n",
1190 		 (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1191 	dev_vdbg(hsotg->dev, "  chan->max_packet %d\n", chan->max_packet);
1192 	dev_vdbg(hsotg->dev, "  bytes_transferred %d\n",
1193 		 xfer_length);
1194 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n",
1195 		 urb->actual_length);
1196 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n",
1197 		 urb->length);
1198 }
1199 
1200 /*
1201  * Handles a host channel NAK interrupt. This handler may be called in either
1202  * DMA mode or Slave mode.
1203  */
1204 STATIC void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1205 			     struct dwc2_host_chan *chan, int chnum,
1206 			     struct dwc2_qtd *qtd)
1207 {
1208 	if (dbg_hc(chan))
1209 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1210 			 chnum);
1211 
1212 	/*
1213 	 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1214 	 * interrupt. Re-start the SSPLIT transfer.
1215 	 */
1216 	if (chan->do_split) {
1217 		/*
1218 		 * When we get control/bulk NAKs then remember this so we holdoff on
1219 		 * this qh until the beginning of the next frame
1220 		 */
1221 		switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1222 		case USB_ENDPOINT_XFER_CONTROL:
1223 		case USB_ENDPOINT_XFER_BULK:
1224 			chan->qh->nak_frame = dwc2_hcd_get_frame_number(hsotg);
1225 			break;
1226 		}
1227 
1228 		if (chan->complete_split)
1229 			qtd->error_count = 0;
1230 		qtd->complete_split = 0;
1231 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1232 		goto handle_nak_done;
1233 	}
1234 
1235 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1236 	case USB_ENDPOINT_XFER_CONTROL:
1237 	case USB_ENDPOINT_XFER_BULK:
1238 		if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
1239 			/*
1240 			 * NAK interrupts are enabled on bulk/control IN
1241 			 * transfers in DMA mode for the sole purpose of
1242 			 * resetting the error count after a transaction error
1243 			 * occurs. The core will continue transferring data.
1244 			 */
1245 			qtd->error_count = 0;
1246 			break;
1247 		}
1248 
1249 		/*
1250 		 * NAK interrupts normally occur during OUT transfers in DMA
1251 		 * or Slave mode. For IN transfers, more requests will be
1252 		 * queued as request queue space is available.
1253 		 */
1254 		qtd->error_count = 0;
1255 
1256 		if (!chan->qh->ping_state) {
1257 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1258 						  qtd, DWC2_HC_XFER_NAK);
1259 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1260 
1261 			if (chan->speed == USB_SPEED_HIGH)
1262 				chan->qh->ping_state = 1;
1263 		}
1264 
1265 		/*
1266 		 * Halt the channel so the transfer can be re-started from
1267 		 * the appropriate point or the PING protocol will
1268 		 * start/continue
1269 		 */
1270 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1271 		break;
1272 	case USB_ENDPOINT_XFER_INT:
1273 		qtd->error_count = 0;
1274 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1275 		break;
1276 	case USB_ENDPOINT_XFER_ISOC:
1277 		/* Should never get called for isochronous transfers */
1278 		dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1279 		break;
1280 	}
1281 
1282 handle_nak_done:
1283 	disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1284 }
1285 
1286 /*
1287  * Handles a host channel ACK interrupt. This interrupt is enabled when
1288  * performing the PING protocol in Slave mode, when errors occur during
1289  * either Slave mode or DMA mode, and during Start Split transactions.
1290  */
1291 STATIC void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1292 			     struct dwc2_host_chan *chan, int chnum,
1293 			     struct dwc2_qtd *qtd)
1294 {
1295 	struct dwc2_hcd_iso_packet_desc *frame_desc;
1296 
1297 	if (dbg_hc(chan))
1298 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1299 			 chnum);
1300 
1301 	if (chan->do_split) {
1302 		/* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1303 		if (!chan->ep_is_in &&
1304 		    chan->data_pid_start != DWC2_HC_PID_SETUP)
1305 			qtd->ssplit_out_xfer_count = chan->xfer_len;
1306 
1307 		if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1308 			qtd->complete_split = 1;
1309 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1310 		} else {
1311 			/* ISOC OUT */
1312 			switch (chan->xact_pos) {
1313 			case DWC2_HCSPLT_XACTPOS_ALL:
1314 				break;
1315 			case DWC2_HCSPLT_XACTPOS_END:
1316 				qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1317 				qtd->isoc_split_offset = 0;
1318 				break;
1319 			case DWC2_HCSPLT_XACTPOS_BEGIN:
1320 			case DWC2_HCSPLT_XACTPOS_MID:
1321 				/*
1322 				 * For BEGIN or MID, calculate the length for
1323 				 * the next microframe to determine the correct
1324 				 * SSPLIT token, either MID or END
1325 				 */
1326 				frame_desc = &qtd->urb->iso_descs[
1327 						qtd->isoc_frame_index];
1328 				qtd->isoc_split_offset += 188;
1329 
1330 				if (frame_desc->length - qtd->isoc_split_offset
1331 							<= 188)
1332 					qtd->isoc_split_pos =
1333 							DWC2_HCSPLT_XACTPOS_END;
1334 				else
1335 					qtd->isoc_split_pos =
1336 							DWC2_HCSPLT_XACTPOS_MID;
1337 				break;
1338 			}
1339 		}
1340 	} else {
1341 		qtd->error_count = 0;
1342 
1343 		if (chan->qh->ping_state) {
1344 			chan->qh->ping_state = 0;
1345 			/*
1346 			 * Halt the channel so the transfer can be re-started
1347 			 * from the appropriate point. This only happens in
1348 			 * Slave mode. In DMA mode, the ping_state is cleared
1349 			 * when the transfer is started because the core
1350 			 * automatically executes the PING, then the transfer.
1351 			 */
1352 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1353 		}
1354 	}
1355 
1356 	/*
1357 	 * If the ACK occurred when _not_ in the PING state, let the channel
1358 	 * continue transferring data after clearing the error count
1359 	 */
1360 	disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1361 }
1362 
1363 /*
1364  * Handles a host channel NYET interrupt. This interrupt should only occur on
1365  * Bulk and Control OUT endpoints and for complete split transactions. If a
1366  * NYET occurs at the same time as a Transfer Complete interrupt, it is
1367  * handled in the xfercomp interrupt handler, not here. This handler may be
1368  * called in either DMA mode or Slave mode.
1369  */
1370 STATIC void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1371 			      struct dwc2_host_chan *chan, int chnum,
1372 			      struct dwc2_qtd *qtd)
1373 {
1374 	if (dbg_hc(chan))
1375 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1376 			 chnum);
1377 
1378 	/*
1379 	 * NYET on CSPLIT
1380 	 * re-do the CSPLIT immediately on non-periodic
1381 	 */
1382 	if (chan->do_split && chan->complete_split) {
1383 		if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1384 		    hsotg->core_params->dma_enable > 0) {
1385 			qtd->complete_split = 0;
1386 			qtd->isoc_split_offset = 0;
1387 			qtd->isoc_frame_index++;
1388 			if (qtd->urb &&
1389 			    qtd->isoc_frame_index == qtd->urb->packet_count) {
1390 				dwc2_host_complete(hsotg, qtd, 0);
1391 				dwc2_release_channel(hsotg, chan, qtd,
1392 						     DWC2_HC_XFER_URB_COMPLETE);
1393 			} else {
1394 				dwc2_release_channel(hsotg, chan, qtd,
1395 						DWC2_HC_XFER_NO_HALT_STATUS);
1396 			}
1397 			goto handle_nyet_done;
1398 		}
1399 
1400 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1401 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1402 			int frnum = dwc2_hcd_get_frame_number(hsotg);
1403 
1404 			if (dwc2_full_frame_num(frnum) !=
1405 			    dwc2_full_frame_num(chan->qh->sched_frame)) {
1406 				/*
1407 				 * No longer in the same full speed frame.
1408 				 * Treat this as a transaction error.
1409 				 */
1410 #if 0
1411 				/*
1412 				 * Todo: Fix system performance so this can
1413 				 * be treated as an error. Right now complete
1414 				 * splits cannot be scheduled precisely enough
1415 				 * due to other system activity, so this error
1416 				 * occurs regularly in Slave mode.
1417 				 */
1418 				qtd->error_count++;
1419 #endif
1420 				qtd->complete_split = 0;
1421 				dwc2_halt_channel(hsotg, chan, qtd,
1422 						  DWC2_HC_XFER_XACT_ERR);
1423 				/* Todo: add support for isoc release */
1424 				goto handle_nyet_done;
1425 			}
1426 		}
1427 
1428 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1429 		goto handle_nyet_done;
1430 	}
1431 
1432 	chan->qh->ping_state = 1;
1433 	qtd->error_count = 0;
1434 
1435 	dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1436 				  DWC2_HC_XFER_NYET);
1437 	dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1438 
1439 	/*
1440 	 * Halt the channel and re-start the transfer so the PING protocol
1441 	 * will start
1442 	 */
1443 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1444 
1445 handle_nyet_done:
1446 	disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1447 }
1448 
1449 /*
1450  * Handles a host channel babble interrupt. This handler may be called in
1451  * either DMA mode or Slave mode.
1452  */
1453 STATIC void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1454 				struct dwc2_host_chan *chan, int chnum,
1455 				struct dwc2_qtd *qtd)
1456 {
1457 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1458 		chnum);
1459 
1460 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1461 
1462 	if (hsotg->core_params->dma_desc_enable > 0) {
1463 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1464 					    DWC2_HC_XFER_BABBLE_ERR);
1465 		goto disable_int;
1466 	}
1467 
1468 	if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1469 		dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1470 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1471 	} else {
1472 		enum dwc2_halt_status halt_status;
1473 
1474 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1475 						qtd, DWC2_HC_XFER_BABBLE_ERR);
1476 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1477 	}
1478 
1479 disable_int:
1480 	disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1481 }
1482 
1483 /*
1484  * Handles a host channel AHB error interrupt. This handler is only called in
1485  * DMA mode.
1486  */
1487 STATIC void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1488 				struct dwc2_host_chan *chan, int chnum,
1489 				struct dwc2_qtd *qtd)
1490 {
1491 	struct dwc2_hcd_urb *urb = qtd->urb;
1492 
1493 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1494 		chnum);
1495 
1496 	if (!urb)
1497 		goto handle_ahberr_halt;
1498 
1499 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1500 
1501 #ifdef DWC2_DEBUG
1502 	const char *pipetype, *speed;
1503 
1504 	u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1505 	u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1506 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1507 	u32 hc_dma = DWC2_READ_4(hsotg, HCDMA(chnum));
1508 
1509 	dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1510 	dev_err(hsotg->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1511 	dev_err(hsotg->dev, "  hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1512 	dev_err(hsotg->dev, "  Device address: %d\n",
1513 		dwc2_hcd_get_dev_addr(&urb->pipe_info));
1514 	dev_err(hsotg->dev, "  Endpoint: %d, %s\n",
1515 		dwc2_hcd_get_ep_num(&urb->pipe_info),
1516 		dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1517 
1518 	switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1519 	case USB_ENDPOINT_XFER_CONTROL:
1520 		pipetype = "CONTROL";
1521 		break;
1522 	case USB_ENDPOINT_XFER_BULK:
1523 		pipetype = "BULK";
1524 		break;
1525 	case USB_ENDPOINT_XFER_INT:
1526 		pipetype = "INTERRUPT";
1527 		break;
1528 	case USB_ENDPOINT_XFER_ISOC:
1529 		pipetype = "ISOCHRONOUS";
1530 		break;
1531 	default:
1532 		pipetype = "UNKNOWN";
1533 		break;
1534 	}
1535 
1536 	dev_err(hsotg->dev, "  Endpoint type: %s\n", pipetype);
1537 
1538 	switch (chan->speed) {
1539 	case USB_SPEED_HIGH:
1540 		speed = "HIGH";
1541 		break;
1542 	case USB_SPEED_FULL:
1543 		speed = "FULL";
1544 		break;
1545 	case USB_SPEED_LOW:
1546 		speed = "LOW";
1547 		break;
1548 	default:
1549 		speed = "UNKNOWN";
1550 		break;
1551 	}
1552 
1553 	dev_err(hsotg->dev, "  Speed: %s\n", speed);
1554 
1555 	dev_err(hsotg->dev, "  Max packet size: %d\n",
1556 		dwc2_hcd_get_mps(&urb->pipe_info));
1557 	dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
1558 	dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
1559 		urb->buf, (unsigned long)urb->dma);
1560 	dev_err(hsotg->dev, "  Setup buffer: %p, Setup DMA: %08lx\n",
1561 		urb->setup_packet, (unsigned long)urb->setup_dma);
1562 	dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
1563 #endif
1564 
1565 	/* Core halts the channel for Descriptor DMA mode */
1566 	if (hsotg->core_params->dma_desc_enable > 0) {
1567 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1568 					    DWC2_HC_XFER_AHB_ERR);
1569 		goto handle_ahberr_done;
1570 	}
1571 
1572 	dwc2_host_complete(hsotg, qtd, -EIO);
1573 
1574 handle_ahberr_halt:
1575 	/*
1576 	 * Force a channel halt. Don't call dwc2_halt_channel because that won't
1577 	 * write to the HCCHARn register in DMA mode to force the halt.
1578 	 */
1579 	dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1580 
1581 handle_ahberr_done:
1582 	disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1583 }
1584 
1585 /*
1586  * Handles a host channel transaction error interrupt. This handler may be
1587  * called in either DMA mode or Slave mode.
1588  */
1589 STATIC void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1590 				 struct dwc2_host_chan *chan, int chnum,
1591 				 struct dwc2_qtd *qtd)
1592 {
1593 	dev_dbg(hsotg->dev,
1594 		"--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1595 
1596 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1597 
1598 	if (hsotg->core_params->dma_desc_enable > 0) {
1599 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1600 					    DWC2_HC_XFER_XACT_ERR);
1601 		goto handle_xacterr_done;
1602 	}
1603 
1604 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1605 	case USB_ENDPOINT_XFER_CONTROL:
1606 	case USB_ENDPOINT_XFER_BULK:
1607 		qtd->error_count++;
1608 		if (!chan->qh->ping_state) {
1609 
1610 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1611 						  qtd, DWC2_HC_XFER_XACT_ERR);
1612 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1613 			if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1614 				chan->qh->ping_state = 1;
1615 		}
1616 
1617 		/*
1618 		 * Halt the channel so the transfer can be re-started from
1619 		 * the appropriate point or the PING protocol will start
1620 		 */
1621 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1622 		break;
1623 	case USB_ENDPOINT_XFER_INT:
1624 		qtd->error_count++;
1625 		if (chan->do_split && chan->complete_split)
1626 			qtd->complete_split = 0;
1627 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1628 		break;
1629 	case USB_ENDPOINT_XFER_ISOC:
1630 		{
1631 			enum dwc2_halt_status halt_status;
1632 
1633 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1634 					chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1635 			dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1636 		}
1637 		break;
1638 	}
1639 
1640 handle_xacterr_done:
1641 	disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1642 }
1643 
1644 /*
1645  * Handles a host channel frame overrun interrupt. This handler may be called
1646  * in either DMA mode or Slave mode.
1647  */
1648 STATIC void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1649 				  struct dwc2_host_chan *chan, int chnum,
1650 				  struct dwc2_qtd *qtd)
1651 {
1652 	enum dwc2_halt_status halt_status;
1653 
1654 	if (dbg_hc(chan))
1655 		dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1656 			chnum);
1657 
1658 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1659 
1660 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1661 	case USB_ENDPOINT_XFER_CONTROL:
1662 	case USB_ENDPOINT_XFER_BULK:
1663 		break;
1664 	case USB_ENDPOINT_XFER_INT:
1665 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1666 		break;
1667 	case USB_ENDPOINT_XFER_ISOC:
1668 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1669 					qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1670 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1671 		break;
1672 	}
1673 
1674 	disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1675 }
1676 
1677 /*
1678  * Handles a host channel data toggle error interrupt. This handler may be
1679  * called in either DMA mode or Slave mode.
1680  */
1681 STATIC void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1682 				    struct dwc2_host_chan *chan, int chnum,
1683 				    struct dwc2_qtd *qtd)
1684 {
1685 	dev_dbg(hsotg->dev,
1686 		"--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1687 
1688 	if (chan->ep_is_in)
1689 		qtd->error_count = 0;
1690 	else
1691 		dev_err(hsotg->dev,
1692 			"Data Toggle Error on OUT transfer, channel %d\n",
1693 			chnum);
1694 
1695 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1696 	disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1697 }
1698 
1699 /*
1700  * For debug only. It checks that a valid halt status is set and that
1701  * HCCHARn.chdis is clear. If there's a problem, corrective action is
1702  * taken and a warning is issued.
1703  *
1704  * Return: true if halt status is ok, false otherwise
1705  */
1706 STATIC bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1707 				struct dwc2_host_chan *chan, int chnum,
1708 				struct dwc2_qtd *qtd)
1709 {
1710 #ifdef DWC2_DEBUG
1711 	u32 hcchar;
1712 	u32 hctsiz;
1713 	u32 hcintmsk;
1714 	u32 hcsplt;
1715 
1716 	if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1717 		/*
1718 		 * This code is here only as a check. This condition should
1719 		 * never happen. Ignore the halt if it does occur.
1720 		 */
1721 		hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1722 		hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1723 		hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1724 		hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1725 		dev_dbg(hsotg->dev,
1726 			"%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1727 			 __func__);
1728 		dev_dbg(hsotg->dev,
1729 			"channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1730 			chnum, hcchar, hctsiz);
1731 		dev_dbg(hsotg->dev,
1732 			"hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1733 			chan->hcint, hcintmsk, hcsplt);
1734 		if (qtd)
1735 			dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1736 				qtd->complete_split);
1737 		dev_warn(hsotg->dev,
1738 			 "%s: no halt status, channel %d, ignoring interrupt\n",
1739 			 __func__, chnum);
1740 		return false;
1741 	}
1742 
1743 	/*
1744 	 * This code is here only as a check. hcchar.chdis should never be set
1745 	 * when the halt interrupt occurs. Halt the channel again if it does
1746 	 * occur.
1747 	 */
1748 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1749 	if (hcchar & HCCHAR_CHDIS) {
1750 		dev_warn(hsotg->dev,
1751 			 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1752 			 __func__, hcchar);
1753 		chan->halt_pending = 0;
1754 		dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1755 		return false;
1756 	}
1757 #endif
1758 
1759 	return true;
1760 }
1761 
1762 /*
1763  * Handles a host Channel Halted interrupt in DMA mode. This handler
1764  * determines the reason the channel halted and proceeds accordingly.
1765  */
1766 STATIC void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1767 				    struct dwc2_host_chan *chan, int chnum,
1768 				    struct dwc2_qtd *qtd)
1769 {
1770 	u32 hcintmsk;
1771 	int out_nak_enh = 0;
1772 
1773 	if (dbg_hc(chan))
1774 		dev_vdbg(hsotg->dev,
1775 			 "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1776 			 chnum);
1777 
1778 	/*
1779 	 * For core with OUT NAK enhancement, the flow for high-speed
1780 	 * CONTROL/BULK OUT is handled a little differently
1781 	 */
1782 	if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1783 		if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1784 		    (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1785 		     chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1786 			out_nak_enh = 1;
1787 		}
1788 	}
1789 
1790 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1791 	    (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1792 	     hsotg->core_params->dma_desc_enable <= 0)) {
1793 		if (hsotg->core_params->dma_desc_enable > 0)
1794 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1795 						    chan->halt_status);
1796 		else
1797 			/*
1798 			 * Just release the channel. A dequeue can happen on a
1799 			 * transfer timeout. In the case of an AHB Error, the
1800 			 * channel was forced to halt because there's no way to
1801 			 * gracefully recover.
1802 			 */
1803 			dwc2_release_channel(hsotg, chan, qtd,
1804 					     chan->halt_status);
1805 		return;
1806 	}
1807 
1808 	hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1809 
1810 	if (chan->hcint & HCINTMSK_XFERCOMPL) {
1811 		/*
1812 		 * Todo: This is here because of a possible hardware bug. Spec
1813 		 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1814 		 * interrupt w/ACK bit set should occur, but I only see the
1815 		 * XFERCOMP bit, even with it masked out. This is a workaround
1816 		 * for that behavior. Should fix this when hardware is fixed.
1817 		 */
1818 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1819 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1820 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1821 	} else if (chan->hcint & HCINTMSK_STALL) {
1822 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1823 	} else if ((chan->hcint & HCINTMSK_XACTERR) &&
1824 		   hsotg->core_params->dma_desc_enable <= 0) {
1825 		if (out_nak_enh) {
1826 			if (chan->hcint &
1827 			    (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1828 				dev_vdbg(hsotg->dev,
1829 					 "XactErr with NYET/NAK/ACK\n");
1830 				qtd->error_count = 0;
1831 			} else {
1832 				dev_vdbg(hsotg->dev,
1833 					 "XactErr without NYET/NAK/ACK\n");
1834 			}
1835 		}
1836 
1837 		/*
1838 		 * Must handle xacterr before nak or ack. Could get a xacterr
1839 		 * at the same time as either of these on a BULK/CONTROL OUT
1840 		 * that started with a PING. The xacterr takes precedence.
1841 		 */
1842 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1843 	} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1844 		   hsotg->core_params->dma_desc_enable > 0) {
1845 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1846 	} else if ((chan->hcint & HCINTMSK_AHBERR) &&
1847 		   hsotg->core_params->dma_desc_enable > 0) {
1848 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1849 	} else if (chan->hcint & HCINTMSK_BBLERR) {
1850 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1851 	} else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1852 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1853 	} else if (!out_nak_enh) {
1854 		if (chan->hcint & HCINTMSK_NYET) {
1855 			/*
1856 			 * Must handle nyet before nak or ack. Could get a nyet
1857 			 * at the same time as either of those on a BULK/CONTROL
1858 			 * OUT that started with a PING. The nyet takes
1859 			 * precedence.
1860 			 */
1861 			dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1862 		} else if ((chan->hcint & HCINTMSK_NAK) &&
1863 			   !(hcintmsk & HCINTMSK_NAK)) {
1864 			/*
1865 			 * If nak is not masked, it's because a non-split IN
1866 			 * transfer is in an error state. In that case, the nak
1867 			 * is handled by the nak interrupt handler, not here.
1868 			 * Handle nak here for BULK/CONTROL OUT transfers, which
1869 			 * halt on a NAK to allow rewinding the buffer pointer.
1870 			 */
1871 			dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1872 		} else if ((chan->hcint & HCINTMSK_ACK) &&
1873 			   !(hcintmsk & HCINTMSK_ACK)) {
1874 			/*
1875 			 * If ack is not masked, it's because a non-split IN
1876 			 * transfer is in an error state. In that case, the ack
1877 			 * is handled by the ack interrupt handler, not here.
1878 			 * Handle ack here for split transfers. Start splits
1879 			 * halt on ACK.
1880 			 */
1881 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1882 		} else {
1883 			if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1884 			    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1885 				/*
1886 				 * A periodic transfer halted with no other
1887 				 * channel interrupts set. Assume it was halted
1888 				 * by the core because it could not be completed
1889 				 * in its scheduled (micro)frame.
1890 				 */
1891 				dev_dbg(hsotg->dev,
1892 					"%s: Halt channel %d (assume incomplete periodic transfer)\n",
1893 					__func__, chnum);
1894 				dwc2_halt_channel(hsotg, chan, qtd,
1895 					DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1896 			} else {
1897 				dev_err(hsotg->dev,
1898 					"%s: Channel %d - ChHltd set, but reason is unknown\n",
1899 					__func__, chnum);
1900 				dev_err(hsotg->dev,
1901 					"hcint 0x%08x, intsts 0x%08x\n",
1902 					chan->hcint,
1903 					DWC2_READ_4(hsotg, GINTSTS));
1904 				goto error;
1905 			}
1906 		}
1907 	} else {
1908 		dev_info(hsotg->dev,
1909 			 "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1910 			 chan->hcint);
1911 error:
1912 		/* use the 3-strikes rule */
1913 		qtd->error_count++;
1914 		dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1915 					    qtd, DWC2_HC_XFER_XACT_ERR);
1916 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1917 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1918 	}
1919 }
1920 
1921 /*
1922  * Handles a host channel Channel Halted interrupt
1923  *
1924  * In slave mode, this handler is called only when the driver specifically
1925  * requests a halt. This occurs during handling other host channel interrupts
1926  * (e.g. nak, xacterr, stall, nyet, etc.).
1927  *
1928  * In DMA mode, this is the interrupt that occurs when the core has finished
1929  * processing a transfer on a channel. Other host channel interrupts (except
1930  * ahberr) are disabled in DMA mode.
1931  */
1932 STATIC void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
1933 				struct dwc2_host_chan *chan, int chnum,
1934 				struct dwc2_qtd *qtd)
1935 {
1936 	if (dbg_hc(chan))
1937 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
1938 			 chnum);
1939 
1940 	if (hsotg->core_params->dma_enable > 0) {
1941 		dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
1942 	} else {
1943 		if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
1944 			return;
1945 		dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
1946 	}
1947 }
1948 
1949 /*
1950  * Check if the given qtd is still the top of the list (and thus valid).
1951  *
1952  * If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed
1953  * the qtd from the top of the list, this will return false (otherwise true).
1954  */
1955 STATIC bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
1956 {
1957 	if (!qh)
1958 		return false;
1959 
1960 	return (TAILQ_FIRST(&qh->qtd_list) == qtd);
1961 }
1962 
1963 /* Handles interrupt for a specific Host Channel */
1964 STATIC void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
1965 {
1966 	struct dwc2_qtd *qtd;
1967 	struct dwc2_host_chan *chan;
1968 	u32 hcint, hcintmsk;
1969 
1970 	chan = hsotg->hc_ptr_array[chnum];
1971 
1972 	hcint = DWC2_READ_4(hsotg, HCINT(chnum));
1973 	hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1974 	if (!chan) {
1975 		dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
1976 		DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
1977 		return;
1978 	}
1979 
1980 	if (dbg_hc(chan)) {
1981 		dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
1982 			 chnum);
1983 		dev_vdbg(hsotg->dev,
1984 			 "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1985 			 hcint, hcintmsk, hcint & hcintmsk);
1986 	}
1987 
1988 	DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
1989 	chan->hcint = hcint;
1990 	hcint &= hcintmsk;
1991 
1992 	/*
1993 	 * If the channel was halted due to a dequeue, the qtd list might
1994 	 * be empty or at least the first entry will not be the active qtd.
1995 	 * In this case, take a shortcut and just release the channel.
1996 	 */
1997 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1998 		/*
1999 		 * If the channel was halted, this should be the only
2000 		 * interrupt unmasked
2001 		 */
2002 		WARN_ON(hcint != HCINTMSK_CHHLTD);
2003 		if (hsotg->core_params->dma_desc_enable > 0)
2004 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
2005 						    chan->halt_status);
2006 		else
2007 			dwc2_release_channel(hsotg, chan, NULL,
2008 					     chan->halt_status);
2009 		return;
2010 	}
2011 
2012 	if (TAILQ_EMPTY(&chan->qh->qtd_list)) {
2013 		/*
2014 		 * TODO: Will this ever happen with the
2015 		 * DWC2_HC_XFER_URB_DEQUEUE handling above?
2016 		 */
2017 		dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
2018 			chnum);
2019 		dev_dbg(hsotg->dev,
2020 			"  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2021 			chan->hcint, hcintmsk, hcint);
2022 		chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2023 		disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
2024 		chan->hcint = 0;
2025 		return;
2026 	}
2027 
2028 	qtd = TAILQ_FIRST(&chan->qh->qtd_list);
2029 
2030 	if (hsotg->core_params->dma_enable <= 0) {
2031 		if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2032 			hcint &= ~HCINTMSK_CHHLTD;
2033 	}
2034 
2035 	if (hcint & HCINTMSK_XFERCOMPL) {
2036 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2037 		/*
2038 		 * If NYET occurred at same time as Xfer Complete, the NYET is
2039 		 * handled by the Xfer Complete interrupt handler. Don't want
2040 		 * to call the NYET interrupt handler in this case.
2041 		 */
2042 		hcint &= ~HCINTMSK_NYET;
2043 	}
2044 
2045 	if (hcint & HCINTMSK_CHHLTD) {
2046 		dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2047 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2048 			goto exit;
2049 	}
2050 	if (hcint & HCINTMSK_AHBERR) {
2051 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2052 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2053 			goto exit;
2054 	}
2055 	if (hcint & HCINTMSK_STALL) {
2056 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2057 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2058 			goto exit;
2059 	}
2060 	if (hcint & HCINTMSK_NAK) {
2061 		dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2062 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2063 			goto exit;
2064 	}
2065 	if (hcint & HCINTMSK_ACK) {
2066 		dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2067 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2068 			goto exit;
2069 	}
2070 	if (hcint & HCINTMSK_NYET) {
2071 		dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2072 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2073 			goto exit;
2074 	}
2075 	if (hcint & HCINTMSK_XACTERR) {
2076 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2077 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2078 			goto exit;
2079 	}
2080 	if (hcint & HCINTMSK_BBLERR) {
2081 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2082 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2083 			goto exit;
2084 	}
2085 	if (hcint & HCINTMSK_FRMOVRUN) {
2086 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2087 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2088 			goto exit;
2089 	}
2090 	if (hcint & HCINTMSK_DATATGLERR) {
2091 		dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2092 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2093 			goto exit;
2094 	}
2095 
2096 exit:
2097 	chan->hcint = 0;
2098 }
2099 
2100 /*
2101  * This interrupt indicates that one or more host channels has a pending
2102  * interrupt. There are multiple conditions that can cause each host channel
2103  * interrupt. This function determines which conditions have occurred for each
2104  * host channel interrupt and handles them appropriately.
2105  */
2106 STATIC void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2107 {
2108 	u32 haint;
2109 	int i;
2110 
2111 	haint = DWC2_READ_4(hsotg, HAINT);
2112 	if (dbg_perio()) {
2113 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
2114 
2115 		dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2116 	}
2117 
2118 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
2119 		if (haint & (1 << i))
2120 			dwc2_hc_n_intr(hsotg, i);
2121 	}
2122 }
2123 
2124 /* This function handles interrupts for the HCD */
2125 irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2126 {
2127 	u32 gintsts, dbg_gintsts;
2128 	irqreturn_t retval = IRQ_NONE;
2129 
2130 	if (!dwc2_is_controller_alive(hsotg)) {
2131 		dev_warn(hsotg->dev, "Controller is dead\n");
2132 		return retval;
2133 	}
2134 
2135 	MUTEX_ASSERT_LOCKED(&hsotg->lock);
2136 
2137 	/* Check if HOST Mode */
2138 	if (dwc2_is_host_mode(hsotg)) {
2139 		gintsts = dwc2_read_core_intr(hsotg);
2140 		if (!gintsts) {
2141 			return retval;
2142 		}
2143 
2144 		retval = IRQ_HANDLED;
2145 
2146 		dbg_gintsts = gintsts;
2147 #ifndef DEBUG_SOF
2148 		dbg_gintsts &= ~GINTSTS_SOF;
2149 #endif
2150 		if (!dbg_perio())
2151 			dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2152 					 GINTSTS_PTXFEMP);
2153 
2154 		/* Only print if there are any non-suppressed interrupts left */
2155 		if (dbg_gintsts)
2156 			dev_vdbg(hsotg->dev,
2157 				 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2158 				 gintsts);
2159 
2160 		if (gintsts & GINTSTS_SOF)
2161 			dwc2_sof_intr(hsotg);
2162 		if (gintsts & GINTSTS_RXFLVL)
2163 			dwc2_rx_fifo_level_intr(hsotg);
2164 		if (gintsts & GINTSTS_NPTXFEMP)
2165 			dwc2_np_tx_fifo_empty_intr(hsotg);
2166 		if (gintsts & GINTSTS_PRTINT)
2167 			dwc2_port_intr(hsotg);
2168 		if (gintsts & GINTSTS_HCHINT)
2169 			dwc2_hc_intr(hsotg);
2170 		if (gintsts & GINTSTS_PTXFEMP)
2171 			dwc2_perio_tx_fifo_empty_intr(hsotg);
2172 
2173 		if (dbg_gintsts) {
2174 			dev_vdbg(hsotg->dev,
2175 				 "DWC OTG HCD Finished Servicing Interrupts\n");
2176 			dev_vdbg(hsotg->dev,
2177 				 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2178 				 DWC2_READ_4(hsotg, GINTSTS),
2179 				 DWC2_READ_4(hsotg, GINTMSK));
2180 		}
2181 	}
2182 
2183 	return retval;
2184 }
2185