xref: /openbsd/sys/dev/usb/dwc2/dwc2_hcdddma.c (revision 73471bf0)
1 /*	$OpenBSD: dwc2_hcdddma.c,v 1.20 2021/11/28 09:25:02 mglocker Exp $	*/
2 /*	$NetBSD: dwc2_hcdddma.c,v 1.6 2014/04/03 06:34:58 skrll Exp $	*/
3 
4 /*
5  * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
6  *
7  * Copyright (C) 2004-2013 Synopsys, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The names of the above-listed copyright holders may not be used
19  *    to endorse or promote products derived from this software without
20  *    specific prior written permission.
21  *
22  * ALTERNATIVELY, this software may be distributed under the terms of the
23  * GNU General Public License ("GPL") as published by the Free Software
24  * Foundation; either version 2 of the License, or (at your option) any
25  * later version.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * This file contains the Descriptor DMA implementation for Host mode
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 
49 #include <machine/bus.h>
50 
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdivar.h>
54 #include <dev/usb/usb_mem.h>
55 
56 #include <dev/usb/dwc2/dwc2.h>
57 #include <dev/usb/dwc2/dwc2var.h>
58 
59 #include <dev/usb/dwc2/dwc2_core.h>
60 #include <dev/usb/dwc2/dwc2_hcd.h>
61 
62 STATIC u16 dwc2_frame_list_idx(u16 frame)
63 {
64 	return frame & (FRLISTEN_64_SIZE - 1);
65 }
66 
67 STATIC u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
68 {
69 	return (idx + inc) &
70 		((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
71 		  MAX_DMA_DESC_NUM_GENERIC) - 1);
72 }
73 
74 STATIC u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
75 {
76 	return (idx - inc) &
77 		((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
78 		  MAX_DMA_DESC_NUM_GENERIC) - 1);
79 }
80 
81 STATIC u16 dwc2_max_desc_num(struct dwc2_qh *qh)
82 {
83 	return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
84 		qh->dev_speed == USB_SPEED_HIGH) ?
85 		MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
86 }
87 
88 STATIC u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
89 {
90 	return qh->dev_speed == USB_SPEED_HIGH ?
91 	       (qh->interval + 8 - 1) / 8 : qh->interval;
92 }
93 
94 STATIC int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
95 				gfp_t flags)
96 {
97 	int err;
98 
99 	qh->desc_list = NULL;
100 	qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
101 						dwc2_max_desc_num(qh);
102 
103 	err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, qh->desc_list_sz, 0,
104 	    USB_DMA_COHERENT, &qh->desc_list_usbdma);
105 
106 	if (err)
107 		return -ENOMEM;
108 
109 	qh->desc_list = KERNADDR(&qh->desc_list_usbdma, 0);
110 	qh->desc_list_dma = DMAADDR(&qh->desc_list_usbdma, 0);
111 
112 	qh->n_bytes = malloc(sizeof(u32) * dwc2_max_desc_num(qh), M_USBHC,
113 	    M_ZERO | M_WAITOK);
114 
115 	if (!qh->n_bytes) {
116 		usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->desc_list_usbdma);
117 		qh->desc_list = NULL;
118 		return -ENOMEM;
119 	}
120 
121 	return 0;
122 }
123 
124 STATIC void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
125 {
126 
127 	if (qh->desc_list) {
128 		usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->desc_list_usbdma);
129 		qh->desc_list = NULL;
130 	}
131 
132 	free(qh->n_bytes, M_USBHC, sizeof(u32) * dwc2_max_desc_num(qh));
133 	qh->n_bytes = NULL;
134 }
135 
136 STATIC int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
137 {
138 	int err;
139 
140 	if (hsotg->frame_list)
141 		return 0;
142 
143 	/* XXXNH - pool_cache_t */
144 	hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
145 	hsotg->frame_list = NULL;
146 	err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, hsotg->frame_list_sz,
147 	    0, USB_DMA_COHERENT, &hsotg->frame_list_usbdma);
148 
149 	if (!err) {
150 		hsotg->frame_list = KERNADDR(&hsotg->frame_list_usbdma, 0);
151 		hsotg->frame_list_dma = DMAADDR(&hsotg->frame_list_usbdma, 0);
152 	}
153 
154 	if (!hsotg->frame_list)
155 		return -ENOMEM;
156 
157 	return 0;
158 }
159 
160 STATIC void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
161 {
162 	struct usb_dma frame_list_usbdma;
163 	unsigned long flags;
164 
165 	spin_lock_irqsave(&hsotg->lock, flags);
166 
167 	if (!hsotg->frame_list) {
168 		spin_unlock_irqrestore(&hsotg->lock, flags);
169 		return;
170 	}
171 
172 	frame_list_usbdma = hsotg->frame_list_usbdma;
173 	hsotg->frame_list = NULL;
174 
175 	spin_unlock_irqrestore(&hsotg->lock, flags);
176 
177 	usb_freemem(&hsotg->hsotg_sc->sc_bus, &frame_list_usbdma);
178 }
179 
180 STATIC void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
181 {
182 	u32 hcfg;
183 	unsigned long flags;
184 
185 	spin_lock_irqsave(&hsotg->lock, flags);
186 
187 	hcfg = DWC2_READ_4(hsotg, HCFG);
188 	if (hcfg & HCFG_PERSCHEDENA) {
189 		/* already enabled */
190 		spin_unlock_irqrestore(&hsotg->lock, flags);
191 		return;
192 	}
193 
194 	DWC2_WRITE_4(hsotg, HFLBADDR, hsotg->frame_list_dma);
195 
196 	hcfg &= ~HCFG_FRLISTEN_MASK;
197 	hcfg |= fr_list_en | HCFG_PERSCHEDENA;
198 	dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
199 	DWC2_WRITE_4(hsotg, HCFG, hcfg);
200 
201 	spin_unlock_irqrestore(&hsotg->lock, flags);
202 }
203 
204 STATIC void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
205 {
206 	u32 hcfg;
207 	unsigned long flags;
208 
209 	spin_lock_irqsave(&hsotg->lock, flags);
210 
211 	hcfg = DWC2_READ_4(hsotg, HCFG);
212 	if (!(hcfg & HCFG_PERSCHEDENA)) {
213 		/* already disabled */
214 		spin_unlock_irqrestore(&hsotg->lock, flags);
215 		return;
216 	}
217 
218 	hcfg &= ~HCFG_PERSCHEDENA;
219 	dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
220 	DWC2_WRITE_4(hsotg, HCFG, hcfg);
221 
222 	spin_unlock_irqrestore(&hsotg->lock, flags);
223 }
224 
225 /*
226  * Activates/Deactivates FrameList entries for the channel based on endpoint
227  * servicing period
228  */
229 STATIC void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
230 				   int enable)
231 {
232 	struct dwc2_host_chan *chan;
233 	u16 i, j, inc;
234 
235 	if (!hsotg) {
236 		printf("hsotg = %p\n", hsotg);
237 		return;
238 	}
239 
240 	if (!qh->channel) {
241 		dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
242 		return;
243 	}
244 
245 	if (!hsotg->frame_list) {
246 		dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
247 			hsotg->frame_list);
248 		return;
249 	}
250 
251 	chan = qh->channel;
252 	inc = dwc2_frame_incr_val(qh);
253 	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
254 		i = dwc2_frame_list_idx(qh->sched_frame);
255 	else
256 		i = 0;
257 
258 	j = i;
259 	do {
260 		if (enable)
261 			hsotg->frame_list[j] |= 1 << chan->hc_num;
262 		else
263 			hsotg->frame_list[j] &= ~(1 << chan->hc_num);
264 		j = (j + inc) & (FRLISTEN_64_SIZE - 1);
265 	} while (j != i);
266 
267 	/*
268 	 * Sync frame list since controller will access it if periodic
269 	 * channel is currently enabled.
270 	 */
271 	usb_syncmem(&hsotg->frame_list_usbdma, 0, hsotg->frame_list_sz,
272 	    BUS_DMASYNC_PREWRITE);
273 
274 	if (!enable)
275 		return;
276 
277 	chan->schinfo = 0;
278 	if (chan->speed == USB_SPEED_HIGH && qh->interval) {
279 		j = 1;
280 		/* TODO - check this */
281 		inc = (8 + qh->interval - 1) / qh->interval;
282 		for (i = 0; i < inc; i++) {
283 			chan->schinfo |= j;
284 			j = j << qh->interval;
285 		}
286 	} else {
287 		chan->schinfo = 0xff;
288 	}
289 }
290 
291 STATIC void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
292 				      struct dwc2_qh *qh)
293 {
294 	struct dwc2_host_chan *chan = qh->channel;
295 
296 	if (dwc2_qh_is_non_per(qh)) {
297 		if (hsotg->core_params->uframe_sched > 0)
298 			hsotg->available_host_channels++;
299 		else
300 			hsotg->non_periodic_channels--;
301 	} else {
302 		dwc2_update_frame_list(hsotg, qh, 0);
303 		hsotg->available_host_channels++;
304 	}
305 
306 	/*
307 	 * The condition is added to prevent double cleanup try in case of
308 	 * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
309 	 */
310 	if (chan->qh) {
311 		if (!list_empty(&chan->hc_list_entry))
312 			list_del(&chan->hc_list_entry);
313 		dwc2_hc_cleanup(hsotg, chan);
314 		list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
315 		chan->qh = NULL;
316 	}
317 
318 	qh->channel = NULL;
319 	qh->ntd = 0;
320 
321 	if (qh->desc_list)
322 		memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
323 		       dwc2_max_desc_num(qh));
324 }
325 
326 /**
327  * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
328  * related members
329  *
330  * @hsotg: The HCD state structure for the DWC OTG controller
331  * @qh:    The QH to init
332  *
333  * Return: 0 if successful, negative error code otherwise
334  *
335  * Allocates memory for the descriptor list. For the first periodic QH,
336  * allocates memory for the FrameList and enables periodic scheduling.
337  */
338 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
339 			  gfp_t mem_flags)
340 {
341 	int retval;
342 
343 	if (qh->do_split) {
344 		dev_err(hsotg->dev,
345 			"SPLIT Transfers are not supported in Descriptor DMA mode.\n");
346 		retval = -EINVAL;
347 		goto err0;
348 	}
349 
350 	retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
351 	if (retval)
352 		goto err0;
353 
354 	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
355 	    qh->ep_type == USB_ENDPOINT_XFER_INT) {
356 		if (!hsotg->frame_list) {
357 			retval = dwc2_frame_list_alloc(hsotg, mem_flags);
358 			if (retval)
359 				goto err1;
360 			/* Enable periodic schedule on first periodic QH */
361 			dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
362 		}
363 	}
364 
365 	qh->ntd = 0;
366 	return 0;
367 
368 err1:
369 	dwc2_desc_list_free(hsotg, qh);
370 err0:
371 	return retval;
372 }
373 
374 /**
375  * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
376  * members
377  *
378  * @hsotg: The HCD state structure for the DWC OTG controller
379  * @qh:    The QH to free
380  *
381  * Frees descriptor list memory associated with the QH. If QH is periodic and
382  * the last, frees FrameList memory and disables periodic scheduling.
383  */
384 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
385 {
386 	unsigned long flags;
387 
388 	dwc2_desc_list_free(hsotg, qh);
389 
390 	/*
391 	 * Channel still assigned due to some reasons.
392 	 * Seen on Isoc URB dequeue. Channel halted but no subsequent
393 	 * ChHalted interrupt to release the channel. Afterwards
394 	 * when it comes here from endpoint disable routine
395 	 * channel remains assigned.
396 	 */
397 	spin_lock_irqsave(&hsotg->lock, flags);
398 	if (qh->channel)
399 		dwc2_release_channel_ddma(hsotg, qh);
400 	spin_unlock_irqrestore(&hsotg->lock, flags);
401 
402 	if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
403 	     qh->ep_type == USB_ENDPOINT_XFER_INT) &&
404 	    (hsotg->core_params->uframe_sched > 0 ||
405 	     !hsotg->periodic_channels) && hsotg->frame_list) {
406 		dwc2_per_sched_disable(hsotg);
407 		dwc2_frame_list_free(hsotg);
408 	}
409 }
410 
411 STATIC u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
412 {
413 	if (qh->dev_speed == USB_SPEED_HIGH)
414 		/* Descriptor set (8 descriptors) index which is 8-aligned */
415 		return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
416 	else
417 		return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
418 }
419 
420 /*
421  * Determine starting frame for Isochronous transfer.
422  * Few frames skipped to prevent race condition with HC.
423  */
424 STATIC u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
425 				    struct dwc2_qh *qh, u16 *skip_frames)
426 {
427 	u16 frame;
428 
429 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
430 
431 	/* sched_frame is always frame number (not uFrame) both in FS and HS! */
432 
433 	/*
434 	 * skip_frames is used to limit activated descriptors number
435 	 * to avoid the situation when HC services the last activated
436 	 * descriptor firstly.
437 	 * Example for FS:
438 	 * Current frame is 1, scheduled frame is 3. Since HC always fetches
439 	 * the descriptor corresponding to curr_frame+1, the descriptor
440 	 * corresponding to frame 2 will be fetched. If the number of
441 	 * descriptors is max=64 (or greather) the list will be fully programmed
442 	 * with Active descriptors and it is possible case (rare) that the
443 	 * latest descriptor(considering rollback) corresponding to frame 2 will
444 	 * be serviced first. HS case is more probable because, in fact, up to
445 	 * 11 uframes (16 in the code) may be skipped.
446 	 */
447 	if (qh->dev_speed == USB_SPEED_HIGH) {
448 		/*
449 		 * Consider uframe counter also, to start xfer asap. If half of
450 		 * the frame elapsed skip 2 frames otherwise just 1 frame.
451 		 * Starting descriptor index must be 8-aligned, so if the
452 		 * current frame is near to complete the next one is skipped as
453 		 * well.
454 		 */
455 		if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
456 			*skip_frames = 2 * 8;
457 			frame = dwc2_frame_num_inc(hsotg->frame_number,
458 						   *skip_frames);
459 		} else {
460 			*skip_frames = 1 * 8;
461 			frame = dwc2_frame_num_inc(hsotg->frame_number,
462 						   *skip_frames);
463 		}
464 
465 		frame = dwc2_full_frame_num(frame);
466 	} else {
467 		/*
468 		 * Two frames are skipped for FS - the current and the next.
469 		 * But for descriptor programming, 1 frame (descriptor) is
470 		 * enough, see example above.
471 		 */
472 		*skip_frames = 1;
473 		frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
474 	}
475 
476 	return frame;
477 }
478 
479 /*
480  * Calculate initial descriptor index for isochronous transfer based on
481  * scheduled frame
482  */
483 STATIC u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
484 					struct dwc2_qh *qh)
485 {
486 	u16 frame, fr_idx, fr_idx_tmp, skip_frames;
487 
488 	/*
489 	 * With current ISOC processing algorithm the channel is being released
490 	 * when no more QTDs in the list (qh->ntd == 0). Thus this function is
491 	 * called only when qh->ntd == 0 and qh->channel == 0.
492 	 *
493 	 * So qh->channel != NULL branch is not used and just not removed from
494 	 * the source file. It is required for another possible approach which
495 	 * is, do not disable and release the channel when ISOC session
496 	 * completed, just move QH to inactive schedule until new QTD arrives.
497 	 * On new QTD, the QH moved back to 'ready' schedule, starting frame and
498 	 * therefore starting desc_index are recalculated. In this case channel
499 	 * is released only on ep_disable.
500 	 */
501 
502 	/*
503 	 * Calculate starting descriptor index. For INTERRUPT endpoint it is
504 	 * always 0.
505 	 */
506 	if (qh->channel) {
507 		frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
508 		/*
509 		 * Calculate initial descriptor index based on FrameList current
510 		 * bitmap and servicing period
511 		 */
512 		fr_idx_tmp = dwc2_frame_list_idx(frame);
513 		fr_idx = (FRLISTEN_64_SIZE +
514 			  dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp)
515 			 % dwc2_frame_incr_val(qh);
516 		fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
517 	} else {
518 		qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh,
519 							   &skip_frames);
520 		fr_idx = dwc2_frame_list_idx(qh->sched_frame);
521 	}
522 
523 	qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
524 
525 	return skip_frames;
526 }
527 
528 #define ISOC_URB_GIVEBACK_ASAP
529 
530 #define MAX_ISOC_XFER_SIZE_FS	1023
531 #define MAX_ISOC_XFER_SIZE_HS	3072
532 #define DESCNUM_THRESHOLD	4
533 
534 STATIC void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
535 					 struct dwc2_qtd *qtd,
536 					 struct dwc2_qh *qh, u32 max_xfer_size,
537 					 u16 idx)
538 {
539 	struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
540 	struct dwc2_hcd_iso_packet_desc *frame_desc;
541 
542 	memset(dma_desc, 0, sizeof(*dma_desc));
543 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
544 
545 	if (frame_desc->length > max_xfer_size)
546 		qh->n_bytes[idx] = max_xfer_size;
547 	else
548 		qh->n_bytes[idx] = frame_desc->length;
549 
550 	dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset));
551 	dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
552 			   HOST_DMA_ISOC_NBYTES_MASK;
553 
554 	/* Set active bit */
555 	dma_desc->status |= HOST_DMA_A;
556 
557 	qh->ntd++;
558 	qtd->isoc_frame_index_last++;
559 
560 #ifdef ISOC_URB_GIVEBACK_ASAP
561 	/* Set IOC for each descriptor corresponding to last frame of URB */
562 	if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
563 		dma_desc->status |= HOST_DMA_IOC;
564 #endif
565 
566 	usb_syncmem(&qh->desc_list_usbdma,
567 	    (idx * sizeof(struct dwc2_hcd_dma_desc)),
568 	    sizeof(struct dwc2_hcd_dma_desc),
569 	    BUS_DMASYNC_PREWRITE);
570 }
571 
572 STATIC void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
573 				    struct dwc2_qh *qh, u16 skip_frames)
574 {
575 	struct dwc2_qtd *qtd;
576 	u32 max_xfer_size;
577 	u16 idx, inc, n_desc = 0, ntd_max = 0;
578 	u16 cur_idx;
579 	u16 next_idx;
580 
581 	idx = qh->td_last;
582 	inc = qh->interval;
583 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
584 	cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
585 	next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
586 
587 	/*
588 	 * Ensure current frame number didn't overstep last scheduled
589 	 * descriptor. If it happens, the only way to recover is to move
590 	 * qh->td_last to current frame number + 1.
591 	 * So that next isoc descriptor will be scheduled on frame number + 1
592 	 * and not on a past frame.
593 	 */
594 	if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
595 		if (inc < 32) {
596 			dev_vdbg(hsotg->dev,
597 				 "current frame number overstep last descriptor\n");
598 			qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
599 							    qh->dev_speed);
600 			idx = qh->td_last;
601 		}
602 	}
603 
604 	if (qh->interval) {
605 		ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) /
606 				qh->interval;
607 		if (skip_frames && !qh->channel)
608 			ntd_max -= skip_frames / qh->interval;
609 	}
610 
611 	max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
612 			MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
613 
614 	list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
615 		if (qtd->in_process &&
616 		    qtd->isoc_frame_index_last ==
617 		    qtd->urb->packet_count)
618 			continue;
619 
620 		qtd->isoc_td_first = idx;
621 		while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
622 						qtd->urb->packet_count) {
623 			dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
624 						     max_xfer_size, idx);
625 			idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
626 			n_desc++;
627 		}
628 		qtd->isoc_td_last = idx;
629 		qtd->in_process = 1;
630 	}
631 
632 	qh->td_last = idx;
633 
634 #ifdef ISOC_URB_GIVEBACK_ASAP
635 	/* Set IOC for last descriptor if descriptor list is full */
636 	if (qh->ntd == ntd_max) {
637 		idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
638 		qh->desc_list[idx].status |= HOST_DMA_IOC;
639 
640 		usb_syncmem(&qh->desc_list_usbdma,
641 		    (idx * sizeof(struct dwc2_hcd_dma_desc)),
642 		    sizeof(struct dwc2_hcd_dma_desc),
643 		    BUS_DMASYNC_PREWRITE);
644 	}
645 #else
646 	/*
647 	 * Set IOC bit only for one descriptor. Always try to be ahead of HW
648 	 * processing, i.e. on IOC generation driver activates next descriptor
649 	 * but core continues to process descriptors following the one with IOC
650 	 * set.
651 	 */
652 
653 	if (n_desc > DESCNUM_THRESHOLD)
654 		/*
655 		 * Move IOC "up". Required even if there is only one QTD
656 		 * in the list, because QTDs might continue to be queued,
657 		 * but during the activation it was only one queued.
658 		 * Actually more than one QTD might be in the list if this
659 		 * function called from XferCompletion - QTDs was queued during
660 		 * HW processing of the previous descriptor chunk.
661 		 */
662 		idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
663 					    qh->dev_speed);
664 	else
665 		/*
666 		 * Set the IOC for the latest descriptor if either number of
667 		 * descriptors is not greater than threshold or no more new
668 		 * descriptors activated
669 		 */
670 		idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
671 
672 	qh->desc_list[idx].status |= HOST_DMA_IOC;
673 	usb_syncmem(&qh->desc_list_usbdma,
674 	    (idx * sizeof(struct dwc2_hcd_dma_desc)),
675 	    sizeof(struct dwc2_hcd_dma_desc),
676 	    BUS_DMASYNC_PREWRITE);
677 #endif
678 }
679 
680 STATIC void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
681 				    struct dwc2_host_chan *chan,
682 				    struct dwc2_qtd *qtd, struct dwc2_qh *qh,
683 				    int n_desc)
684 {
685 	struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
686 	int len = chan->xfer_len;
687 
688 	if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
689 		len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
690 
691 	if (chan->ep_is_in) {
692 		int num_packets;
693 
694 		if (len > 0 && chan->max_packet)
695 			num_packets = (len + chan->max_packet - 1)
696 					/ chan->max_packet;
697 		else
698 			/* Need 1 packet for transfer length of 0 */
699 			num_packets = 1;
700 
701 		/* Always program an integral # of packets for IN transfers */
702 		len = num_packets * chan->max_packet;
703 	}
704 
705 	dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
706 	qh->n_bytes[n_desc] = len;
707 
708 	if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
709 	    qtd->control_phase == DWC2_CONTROL_SETUP)
710 		dma_desc->status |= HOST_DMA_SUP;
711 
712 	dma_desc->buf = (u32)chan->xfer_dma;
713 
714 	usb_syncmem(&qh->desc_list_usbdma,
715 	    (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
716 	    sizeof(struct dwc2_hcd_dma_desc),
717 	    BUS_DMASYNC_PREWRITE);
718 
719 	/*
720 	 * Last (or only) descriptor of IN transfer with actual size less
721 	 * than MaxPacket
722 	 */
723 	if (len > chan->xfer_len) {
724 		chan->xfer_len = 0;
725 	} else {
726 		chan->xfer_dma += len;		/* XXXNH safe */
727 		chan->xfer_len -= len;
728 	}
729 }
730 
731 STATIC void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
732 					struct dwc2_qh *qh)
733 {
734 	struct dwc2_qtd *qtd;
735 	struct dwc2_host_chan *chan = qh->channel;
736 	int n_desc = 0;
737 
738 	dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
739 		 (unsigned long)chan->xfer_dma, chan->xfer_len);
740 
741 	/*
742 	 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
743 	 * if SG transfer consists of multiple URBs, this pointer is re-assigned
744 	 * to the buffer of the currently processed QTD. For non-SG request
745 	 * there is always one QTD active.
746 	 */
747 
748 	list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
749 		dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
750 
751 		if (n_desc) {
752 			/* SG request - more than 1 QTD */
753 			chan->xfer_dma = DMAADDR(qtd->urb->usbdma,
754 					qtd->urb->actual_length);
755 			chan->xfer_len = qtd->urb->length -
756 					qtd->urb->actual_length;
757 			dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
758 				 (unsigned long)chan->xfer_dma, chan->xfer_len);
759 		}
760 
761 		qtd->n_desc = 0;
762 		do {
763 			if (n_desc > 1) {
764 				qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
765 				dev_vdbg(hsotg->dev,
766 					 "set A bit in desc %d (%p)\n",
767 					 n_desc - 1,
768 					 &qh->desc_list[n_desc - 1]);
769 				usb_syncmem(&qh->desc_list_usbdma,
770 				    ((n_desc - 1) *
771 				    sizeof(struct dwc2_hcd_dma_desc)),
772 				    sizeof(struct dwc2_hcd_dma_desc),
773 				    BUS_DMASYNC_PREWRITE);
774 			}
775 			dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
776 			dev_vdbg(hsotg->dev,
777 				 "desc %d (%p) buf=%08x status=%08x\n",
778 				 n_desc, &qh->desc_list[n_desc],
779 				 qh->desc_list[n_desc].buf,
780 				 qh->desc_list[n_desc].status);
781 			qtd->n_desc++;
782 			n_desc++;
783 		} while (chan->xfer_len > 0 &&
784 			 n_desc != MAX_DMA_DESC_NUM_GENERIC);
785 
786 		dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
787 		qtd->in_process = 1;
788 		if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
789 			break;
790 		if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
791 			break;
792 	}
793 
794 	if (n_desc) {
795 		qh->desc_list[n_desc - 1].status |=
796 				HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
797 		dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
798 			 n_desc - 1, &qh->desc_list[n_desc - 1]);
799 		usb_syncmem(&qh->desc_list_usbdma,
800 		    ((n_desc - 1) * sizeof(struct dwc2_hcd_dma_desc)),
801 		    sizeof(struct dwc2_hcd_dma_desc),
802 		    BUS_DMASYNC_PREWRITE);
803 		if (n_desc > 1) {
804 			qh->desc_list[0].status |= HOST_DMA_A;
805 			dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
806 				 &qh->desc_list[0]);
807 			usb_syncmem(&qh->desc_list_usbdma, 0,
808 			    sizeof(struct dwc2_hcd_dma_desc),
809 			    BUS_DMASYNC_PREWRITE);
810 		}
811 		chan->ntd = n_desc;
812 	}
813 }
814 
815 /**
816  * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
817  *
818  * @hsotg: The HCD state structure for the DWC OTG controller
819  * @qh:    The QH to init
820  *
821  * Return: 0 if successful, negative error code otherwise
822  *
823  * For Control and Bulk endpoints, initializes descriptor list and starts the
824  * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
825  * list then updates FrameList, marking appropriate entries as active.
826  *
827  * For Isochronous endpoints the starting descriptor index is calculated based
828  * on the scheduled frame, but only on the first transfer descriptor within a
829  * session. Then the transfer is started via enabling the channel.
830  *
831  * For Isochronous endpoints the channel is not halted on XferComplete
832  * interrupt so remains assigned to the endpoint(QH) until session is done.
833  */
834 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
835 {
836 	/* Channel is already assigned */
837 	struct dwc2_host_chan *chan = qh->channel;
838 	u16 skip_frames = 0;
839 
840 	switch (chan->ep_type) {
841 	case USB_ENDPOINT_XFER_CONTROL:
842 	case USB_ENDPOINT_XFER_BULK:
843 		dwc2_init_non_isoc_dma_desc(hsotg, qh);
844 		dwc2_hc_start_transfer_ddma(hsotg, chan);
845 		break;
846 	case USB_ENDPOINT_XFER_INT:
847 		dwc2_init_non_isoc_dma_desc(hsotg, qh);
848 		dwc2_update_frame_list(hsotg, qh, 1);
849 		dwc2_hc_start_transfer_ddma(hsotg, chan);
850 		break;
851 	case USB_ENDPOINT_XFER_ISOC:
852 		if (!qh->ntd)
853 			skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
854 		dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
855 
856 		if (!chan->xfer_started) {
857 			dwc2_update_frame_list(hsotg, qh, 1);
858 
859 			/*
860 			 * Always set to max, instead of actual size. Otherwise
861 			 * ntd will be changed with channel being enabled. Not
862 			 * recommended.
863 			 */
864 			chan->ntd = dwc2_max_desc_num(qh);
865 
866 			/* Enable channel only once for ISOC */
867 			dwc2_hc_start_transfer_ddma(hsotg, chan);
868 		}
869 
870 		break;
871 	default:
872 		break;
873 	}
874 }
875 
876 #define DWC2_CMPL_DONE		1
877 #define DWC2_CMPL_STOP		2
878 
879 STATIC int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
880 					struct dwc2_host_chan *chan,
881 					struct dwc2_qtd *qtd,
882 					struct dwc2_qh *qh, u16 idx)
883 {
884 	struct dwc2_hcd_dma_desc *dma_desc;
885 	struct dwc2_hcd_iso_packet_desc *frame_desc;
886 	u16 remain = 0;
887 	int rc = 0;
888 
889 	if (!qtd->urb)
890 		return -EINVAL;
891 
892 	usb_syncmem(&qh->desc_list_usbdma,
893 	    (idx * sizeof(struct dwc2_hcd_dma_desc)),
894 	    sizeof(struct dwc2_hcd_dma_desc),
895 	    BUS_DMASYNC_POSTREAD);
896 
897 	dma_desc = &qh->desc_list[idx];
898 
899 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
900 	dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset));
901 	if (chan->ep_is_in)
902 		remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
903 			 HOST_DMA_ISOC_NBYTES_SHIFT;
904 
905 	if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
906 		/*
907 		 * XactError, or unable to complete all the transactions
908 		 * in the scheduled micro-frame/frame, both indicated by
909 		 * HOST_DMA_STS_PKTERR
910 		 */
911 		qtd->urb->error_count++;
912 		frame_desc->actual_length = qh->n_bytes[idx] - remain;
913 		frame_desc->status = -EPROTO;
914 	} else {
915 		/* Success */
916 		frame_desc->actual_length = qh->n_bytes[idx] - remain;
917 		frame_desc->status = 0;
918 	}
919 
920 	if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
921 		/*
922 		 * urb->status is not used for isoc transfers here. The
923 		 * individual frame_desc status are used instead.
924 		 */
925 		dwc2_host_complete(hsotg, qtd, 0);
926 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
927 
928 		/*
929 		 * This check is necessary because urb_dequeue can be called
930 		 * from urb complete callback (sound driver for example). All
931 		 * pending URBs are dequeued there, so no need for further
932 		 * processing.
933 		 */
934 		if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
935 			return -1;
936 		rc = DWC2_CMPL_DONE;
937 	}
938 
939 	qh->ntd--;
940 
941 	/* Stop if IOC requested descriptor reached */
942 	if (dma_desc->status & HOST_DMA_IOC)
943 		rc = DWC2_CMPL_STOP;
944 
945 	return rc;
946 }
947 
948 STATIC void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
949 					 struct dwc2_host_chan *chan,
950 					 enum dwc2_halt_status halt_status)
951 {
952 	struct dwc2_hcd_iso_packet_desc *frame_desc;
953 	struct dwc2_qtd *qtd, *qtd_tmp;
954 	struct dwc2_qh *qh;
955 	u16 idx;
956 	int rc;
957 
958 	qh = chan->qh;
959 	idx = qh->td_first;
960 
961 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
962 		list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
963 			qtd->in_process = 0;
964 		return;
965 	}
966 
967 	if (halt_status == DWC2_HC_XFER_AHB_ERR ||
968 	    halt_status == DWC2_HC_XFER_BABBLE_ERR) {
969 		/*
970 		 * Channel is halted in these error cases, considered as serious
971 		 * issues.
972 		 * Complete all URBs marking all frames as failed, irrespective
973 		 * whether some of the descriptors (frames) succeeded or not.
974 		 * Pass error code to completion routine as well, to update
975 		 * urb->status, some of class drivers might use it to stop
976 		 * queing transfer requests.
977 		 */
978 		int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
979 			  -EIO : -EOVERFLOW;
980 
981 		list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
982 					 qtd_list_entry) {
983 			if (qtd->urb) {
984 				for (idx = 0; idx < qtd->urb->packet_count;
985 				     idx++) {
986 					frame_desc = &qtd->urb->iso_descs[idx];
987 					frame_desc->status = err;
988 				}
989 
990 				dwc2_host_complete(hsotg, qtd, err);
991 			}
992 
993 			dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
994 		}
995 
996 		return;
997 	}
998 
999 	list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
1000 		if (!qtd->in_process)
1001 			break;
1002 
1003 		/*
1004 		 * Ensure idx corresponds to descriptor where first urb of this
1005 		 * qtd was added. In fact, during isoc desc init, dwc2 may skip
1006 		 * an index if current frame number is already over this index.
1007 		 */
1008 		if (idx != qtd->isoc_td_first) {
1009 			dev_vdbg(hsotg->dev,
1010 				 "try to complete %d instead of %d\n",
1011 				 idx, qtd->isoc_td_first);
1012 			idx = qtd->isoc_td_first;
1013 		}
1014 
1015 		do {
1016 			struct dwc2_qtd *qtd_next;
1017 			u16 cur_idx;
1018 
1019 			rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
1020 							  idx);
1021 			if (rc < 0)
1022 				return;
1023 			idx = dwc2_desclist_idx_inc(idx, qh->interval,
1024 						    chan->speed);
1025 			if (!rc)
1026 				continue;
1027 
1028 			if (rc == DWC2_CMPL_DONE)
1029 				break;
1030 
1031 			/* rc == DWC2_CMPL_STOP */
1032 
1033 			if (qh->interval >= 32)
1034 				goto stop_scan;
1035 
1036 			qh->td_first = idx;
1037 			cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
1038 			qtd_next = list_first_entry(&qh->qtd_list,
1039 						    struct dwc2_qtd,
1040 						    qtd_list_entry);
1041 			if (dwc2_frame_idx_num_gt(cur_idx,
1042 						  qtd_next->isoc_td_last))
1043 				break;
1044 
1045 			goto stop_scan;
1046 
1047 		} while (idx != qh->td_first);
1048 	}
1049 
1050 stop_scan:
1051 	qh->td_first = idx;
1052 }
1053 
1054 STATIC int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
1055 					struct dwc2_host_chan *chan,
1056 					struct dwc2_qtd *qtd,
1057 					struct dwc2_hcd_dma_desc *dma_desc,
1058 					enum dwc2_halt_status halt_status,
1059 					u32 n_bytes, int *xfer_done)
1060 {
1061 	struct dwc2_hcd_urb *urb = qtd->urb;
1062 	u16 remain = 0;
1063 
1064 	if (chan->ep_is_in)
1065 		remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
1066 			 HOST_DMA_NBYTES_SHIFT;
1067 
1068 	dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
1069 
1070 	if (halt_status == DWC2_HC_XFER_AHB_ERR) {
1071 		dev_err(hsotg->dev, "EIO\n");
1072 		urb->status = -EIO;
1073 		return 1;
1074 	}
1075 
1076 	if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
1077 		switch (halt_status) {
1078 		case DWC2_HC_XFER_STALL:
1079 			dev_vdbg(hsotg->dev, "Stall\n");
1080 			urb->status = -EPIPE;
1081 			break;
1082 		case DWC2_HC_XFER_BABBLE_ERR:
1083 			dev_err(hsotg->dev, "Babble\n");
1084 			urb->status = -EOVERFLOW;
1085 			break;
1086 		case DWC2_HC_XFER_XACT_ERR:
1087 			dev_err(hsotg->dev, "XactErr\n");
1088 			urb->status = -EPROTO;
1089 			break;
1090 		default:
1091 			dev_err(hsotg->dev,
1092 				"%s: Unhandled descriptor error status (%d)\n",
1093 				__func__, halt_status);
1094 			break;
1095 		}
1096 		return 1;
1097 	}
1098 
1099 	if (dma_desc->status & HOST_DMA_A) {
1100 		dev_vdbg(hsotg->dev,
1101 			 "Active descriptor encountered on channel %d\n",
1102 			 chan->hc_num);
1103 		return 0;
1104 	}
1105 
1106 	if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1107 		if (qtd->control_phase == DWC2_CONTROL_DATA) {
1108 			urb->actual_length += n_bytes - remain;
1109 			if (remain || urb->actual_length >= urb->length) {
1110 				/*
1111 				 * For Control Data stage do not set urb->status
1112 				 * to 0, to prevent URB callback. Set it when
1113 				 * Status phase is done. See below.
1114 				 */
1115 				*xfer_done = 1;
1116 			}
1117 		} else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
1118 			urb->status = 0;
1119 			*xfer_done = 1;
1120 		}
1121 		/* No handling for SETUP stage */
1122 	} else {
1123 		/* BULK and INTR */
1124 		urb->actual_length += n_bytes - remain;
1125 		dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
1126 			 urb->actual_length);
1127 		if (remain || urb->actual_length >= urb->length) {
1128 			urb->status = 0;
1129 			*xfer_done = 1;
1130 		}
1131 	}
1132 
1133 	return 0;
1134 }
1135 
1136 STATIC int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1137 				      struct dwc2_host_chan *chan,
1138 				      int chnum, struct dwc2_qtd *qtd,
1139 				      int desc_num,
1140 				      enum dwc2_halt_status halt_status,
1141 				      int *xfer_done)
1142 {
1143 	struct dwc2_qh *qh = chan->qh;
1144 	struct dwc2_hcd_urb *urb = qtd->urb;
1145 	struct dwc2_hcd_dma_desc *dma_desc;
1146 	u32 n_bytes;
1147 	int failed;
1148 
1149 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
1150 
1151 	if (!urb)
1152 		return -EINVAL;
1153 
1154 	usb_syncmem(&qh->desc_list_usbdma,
1155 	    (desc_num * sizeof(struct dwc2_hcd_dma_desc)),
1156 	    sizeof(struct dwc2_hcd_dma_desc),
1157 	    BUS_DMASYNC_POSTREAD);
1158 
1159 	dma_desc = &qh->desc_list[desc_num];
1160 	n_bytes = qh->n_bytes[desc_num];
1161 	dev_vdbg(hsotg->dev,
1162 		 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1163 		 qtd, urb, desc_num, dma_desc, n_bytes);
1164 	failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1165 						     halt_status, n_bytes,
1166 						     xfer_done);
1167 	if (*xfer_done && urb->status != -EINPROGRESS)
1168 		failed = 1;
1169 
1170 	if (failed) {
1171 		dwc2_host_complete(hsotg, qtd, urb->status);
1172 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1173 		dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
1174 			 failed, *xfer_done, urb->status);
1175 		return failed;
1176 	}
1177 
1178 	if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1179 		switch (qtd->control_phase) {
1180 		case DWC2_CONTROL_SETUP:
1181 			if (urb->length > 0)
1182 				qtd->control_phase = DWC2_CONTROL_DATA;
1183 			else
1184 				qtd->control_phase = DWC2_CONTROL_STATUS;
1185 			dev_vdbg(hsotg->dev,
1186 				 "  Control setup transaction done\n");
1187 			break;
1188 		case DWC2_CONTROL_DATA:
1189 			if (*xfer_done) {
1190 				qtd->control_phase = DWC2_CONTROL_STATUS;
1191 				dev_vdbg(hsotg->dev,
1192 					 "  Control data transfer done\n");
1193 			} else if (desc_num + 1 == qtd->n_desc) {
1194 				/*
1195 				 * Last descriptor for Control data stage which
1196 				 * is not completed yet
1197 				 */
1198 				dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1199 							  qtd);
1200 			}
1201 			break;
1202 		default:
1203 			break;
1204 		}
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 STATIC void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1211 					     struct dwc2_host_chan *chan,
1212 					     int chnum,
1213 					     enum dwc2_halt_status halt_status)
1214 {
1215 	struct list_head *qtd_item, *qtd_tmp;
1216 	struct dwc2_qh *qh = chan->qh;
1217 	struct dwc2_qtd *qtd = NULL;
1218 	int xfer_done;
1219 	int desc_num = 0;
1220 
1221 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1222 		list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
1223 			qtd->in_process = 0;
1224 		return;
1225 	}
1226 
1227 	list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1228 		int i;
1229 
1230 		qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1231 		xfer_done = 0;
1232 
1233 		for (i = 0; i < qtd->n_desc; i++) {
1234 			if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1235 						       desc_num, halt_status,
1236 						       &xfer_done)) {
1237 				qtd = NULL;
1238 				break;
1239 			}
1240 			desc_num++;
1241 		}
1242 	}
1243 
1244 	if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1245 		/*
1246 		 * Resetting the data toggle for bulk and interrupt endpoints
1247 		 * in case of stall. See handle_hc_stall_intr().
1248 		 */
1249 		if (halt_status == DWC2_HC_XFER_STALL)
1250 			qh->data_toggle = DWC2_HC_PID_DATA0;
1251 		else if (qtd)
1252 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1253 	}
1254 
1255 	if (halt_status == DWC2_HC_XFER_COMPLETE) {
1256 		if (chan->hcint & HCINTMSK_NYET) {
1257 			/*
1258 			 * Got a NYET on the last transaction of the transfer.
1259 			 * It means that the endpoint should be in the PING
1260 			 * state at the beginning of the next transfer.
1261 			 */
1262 			qh->ping_state = 1;
1263 		}
1264 	}
1265 }
1266 
1267 /**
1268  * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1269  * status and calls completion routine for the URB if it's done. Called from
1270  * interrupt handlers.
1271  *
1272  * @hsotg:       The HCD state structure for the DWC OTG controller
1273  * @chan:        Host channel the transfer is completed on
1274  * @chnum:       Index of Host channel registers
1275  * @halt_status: Reason the channel is being halted or just XferComplete
1276  *               for isochronous transfers
1277  *
1278  * Releases the channel to be used by other transfers.
1279  * In case of Isochronous endpoint the channel is not halted until the end of
1280  * the session, i.e. QTD list is empty.
1281  * If periodic channel released the FrameList is updated accordingly.
1282  * Calls transaction selection routines to activate pending transfers.
1283  */
1284 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1285 				 struct dwc2_host_chan *chan, int chnum,
1286 				 enum dwc2_halt_status halt_status)
1287 {
1288 	struct dwc2_qh *qh = chan->qh;
1289 	int continue_isoc_xfer = 0;
1290 	enum dwc2_transaction_type tr_type;
1291 
1292 	if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1293 		dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
1294 
1295 		/* Release the channel if halted or session completed */
1296 		if (halt_status != DWC2_HC_XFER_COMPLETE ||
1297 		    list_empty(&qh->qtd_list)) {
1298 			struct dwc2_qtd *qtd, *qtd_tmp;
1299 
1300 			/*
1301 			 * Kill all remainings QTDs since channel has been
1302 			 * halted.
1303 			 */
1304 			list_for_each_entry_safe(qtd, qtd_tmp,
1305 						 &qh->qtd_list,
1306 						 qtd_list_entry) {
1307 				dwc2_host_complete(hsotg, qtd,
1308 						   -ECONNRESET);
1309 				dwc2_hcd_qtd_unlink_and_free(hsotg,
1310 							     qtd, qh);
1311 			}
1312 
1313 			/* Halt the channel if session completed */
1314 			if (halt_status == DWC2_HC_XFER_COMPLETE)
1315 				dwc2_hc_halt(hsotg, chan, halt_status);
1316 			dwc2_release_channel_ddma(hsotg, qh);
1317 			dwc2_hcd_qh_unlink(hsotg, qh);
1318 		} else {
1319 			/* Keep in assigned schedule to continue transfer */
1320 			list_move(&qh->qh_list_entry,
1321 				       &hsotg->periodic_sched_assigned);
1322 			/*
1323 			 * If channel has been halted during giveback of urb
1324 			 * then prevent any new scheduling.
1325 			 */
1326 			if (!chan->halt_status)
1327 				continue_isoc_xfer = 1;
1328 		}
1329 		/*
1330 		 * Todo: Consider the case when period exceeds FrameList size.
1331 		 * Frame Rollover interrupt should be used.
1332 		 */
1333 	} else {
1334 		/*
1335 		 * Scan descriptor list to complete the URB(s), then release
1336 		 * the channel
1337 		 */
1338 		dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
1339 						 halt_status);
1340 		dwc2_release_channel_ddma(hsotg, qh);
1341 		dwc2_hcd_qh_unlink(hsotg, qh);
1342 
1343 		if (!list_empty(&qh->qtd_list)) {
1344 			/*
1345 			 * Add back to inactive non-periodic schedule on normal
1346 			 * completion
1347 			 */
1348 			dwc2_hcd_qh_add(hsotg, qh);
1349 		}
1350 	}
1351 
1352 	tr_type = dwc2_hcd_select_transactions(hsotg);
1353 	if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
1354 		if (continue_isoc_xfer) {
1355 			if (tr_type == DWC2_TRANSACTION_NONE)
1356 				tr_type = DWC2_TRANSACTION_PERIODIC;
1357 			else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
1358 				tr_type = DWC2_TRANSACTION_ALL;
1359 		}
1360 		dwc2_hcd_queue_transactions(hsotg, tr_type);
1361 	}
1362 }
1363