1 /* $OpenBSD: dwc2_hcdddma.c,v 1.16 2020/03/21 12:08:31 patrick Exp $ */ 2 /* $NetBSD: dwc2_hcdddma.c,v 1.6 2014/04/03 06:34:58 skrll Exp $ */ 3 4 /* 5 * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines 6 * 7 * Copyright (C) 2004-2013 Synopsys, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The names of the above-listed copyright holders may not be used 19 * to endorse or promote products derived from this software without 20 * specific prior written permission. 21 * 22 * ALTERNATIVELY, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") as published by the Free Software 24 * Foundation; either version 2 of the License, or (at your option) any 25 * later version. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * This file contains the Descriptor DMA implementation for Host mode 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #if 0 49 #include <sys/cpu.h> 50 #endif 51 52 #include <machine/bus.h> 53 54 #include <dev/usb/usb.h> 55 #include <dev/usb/usbdi.h> 56 #include <dev/usb/usbdivar.h> 57 #include <dev/usb/usb_mem.h> 58 59 #include <dev/usb/dwc2/dwc2.h> 60 #include <dev/usb/dwc2/dwc2var.h> 61 62 #include <dev/usb/dwc2/dwc2_core.h> 63 #include <dev/usb/dwc2/dwc2_hcd.h> 64 65 STATIC u16 dwc2_frame_list_idx(u16 frame) 66 { 67 return frame & (FRLISTEN_64_SIZE - 1); 68 } 69 70 STATIC u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed) 71 { 72 return (idx + inc) & 73 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 74 MAX_DMA_DESC_NUM_GENERIC) - 1); 75 } 76 77 STATIC u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed) 78 { 79 return (idx - inc) & 80 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 81 MAX_DMA_DESC_NUM_GENERIC) - 1); 82 } 83 84 STATIC u16 dwc2_max_desc_num(struct dwc2_qh *qh) 85 { 86 return (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 87 qh->dev_speed == USB_SPEED_HIGH) ? 88 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC; 89 } 90 91 STATIC u16 dwc2_frame_incr_val(struct dwc2_qh *qh) 92 { 93 return qh->dev_speed == USB_SPEED_HIGH ? 94 (qh->interval + 8 - 1) / 8 : qh->interval; 95 } 96 97 STATIC int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 98 gfp_t flags) 99 { 100 int err; 101 102 //KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 103 104 qh->desc_list = NULL; 105 err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, 106 sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh), 0, 107 USB_DMA_COHERENT, &qh->desc_list_usbdma); 108 109 if (!err) { 110 qh->desc_list = KERNADDR(&qh->desc_list_usbdma, 0); 111 qh->desc_list_dma = DMAADDR(&qh->desc_list_usbdma, 0); 112 } 113 114 if (!qh->desc_list) 115 return -ENOMEM; 116 117 memset(qh->desc_list, 0, 118 sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh)); 119 120 qh->n_bytes = mallocarray(dwc2_max_desc_num(qh), sizeof(u32), M_DEVBUF, 121 M_ZERO | M_WAITOK); 122 123 return 0; 124 } 125 126 STATIC void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 127 { 128 if (qh->desc_list) { 129 usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->desc_list_usbdma); 130 qh->desc_list = NULL; 131 } 132 133 free(qh->n_bytes, M_DEVBUF, sizeof(u32) * dwc2_max_desc_num(qh)); 134 qh->n_bytes = NULL; 135 } 136 137 STATIC int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) 138 { 139 int err; 140 141 if (hsotg->frame_list) 142 return 0; 143 144 /* XXXNH - struct pool */ 145 hsotg->frame_list = NULL; 146 err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, 4 * FRLISTEN_64_SIZE, 147 0, USB_DMA_COHERENT, &hsotg->frame_list_usbdma); 148 149 if (!err) { 150 hsotg->frame_list = KERNADDR(&hsotg->frame_list_usbdma, 0); 151 hsotg->frame_list_dma = DMAADDR(&hsotg->frame_list_usbdma, 0); 152 } 153 154 if (!hsotg->frame_list) 155 return -ENOMEM; 156 157 memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE); 158 return 0; 159 } 160 161 STATIC void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) 162 { 163 struct usb_dma frame_list_usbdma; 164 unsigned long flags; 165 166 spin_lock_irqsave(&hsotg->lock, flags); 167 168 if (!hsotg->frame_list) { 169 spin_unlock_irqrestore(&hsotg->lock, flags); 170 return; 171 } 172 173 frame_list_usbdma = hsotg->frame_list_usbdma; 174 hsotg->frame_list = NULL; 175 176 spin_unlock_irqrestore(&hsotg->lock, flags); 177 178 usb_freemem(&hsotg->hsotg_sc->sc_bus, &frame_list_usbdma); 179 } 180 181 STATIC void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) 182 { 183 u32 hcfg; 184 unsigned long flags; 185 186 spin_lock_irqsave(&hsotg->lock, flags); 187 188 hcfg = DWC2_READ_4(hsotg, HCFG); 189 if (hcfg & HCFG_PERSCHEDENA) { 190 /* already enabled */ 191 spin_unlock_irqrestore(&hsotg->lock, flags); 192 return; 193 } 194 195 DWC2_WRITE_4(hsotg, HFLBADDR, hsotg->frame_list_dma); 196 197 hcfg &= ~HCFG_FRLISTEN_MASK; 198 hcfg |= fr_list_en | HCFG_PERSCHEDENA; 199 dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n"); 200 DWC2_WRITE_4(hsotg, HCFG, hcfg); 201 202 spin_unlock_irqrestore(&hsotg->lock, flags); 203 } 204 205 STATIC void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg) 206 { 207 u32 hcfg; 208 unsigned long flags; 209 210 spin_lock_irqsave(&hsotg->lock, flags); 211 212 hcfg = DWC2_READ_4(hsotg, HCFG); 213 if (!(hcfg & HCFG_PERSCHEDENA)) { 214 /* already disabled */ 215 spin_unlock_irqrestore(&hsotg->lock, flags); 216 return; 217 } 218 219 hcfg &= ~HCFG_PERSCHEDENA; 220 dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n"); 221 DWC2_WRITE_4(hsotg, HCFG, hcfg); 222 223 spin_unlock_irqrestore(&hsotg->lock, flags); 224 } 225 226 /* 227 * Activates/Deactivates FrameList entries for the channel based on endpoint 228 * servicing period 229 */ 230 STATIC void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 231 int enable) 232 { 233 struct dwc2_host_chan *chan; 234 u16 i, j, inc; 235 236 if (!hsotg) { 237 printf("hsotg = %p\n", hsotg); 238 return; 239 } 240 241 if (!qh->channel) { 242 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel); 243 return; 244 } 245 246 if (!hsotg->frame_list) { 247 dev_err(hsotg->dev, "hsotg->frame_list = %p\n", 248 hsotg->frame_list); 249 return; 250 } 251 252 chan = qh->channel; 253 inc = dwc2_frame_incr_val(qh); 254 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) 255 i = dwc2_frame_list_idx(qh->sched_frame); 256 else 257 i = 0; 258 259 j = i; 260 do { 261 if (enable) 262 hsotg->frame_list[j] |= 1 << chan->hc_num; 263 else 264 hsotg->frame_list[j] &= ~(1 << chan->hc_num); 265 j = (j + inc) & (FRLISTEN_64_SIZE - 1); 266 } while (j != i); 267 268 if (!enable) 269 return; 270 271 chan->schinfo = 0; 272 if (chan->speed == USB_SPEED_HIGH && qh->interval) { 273 j = 1; 274 /* TODO - check this */ 275 inc = (8 + qh->interval - 1) / qh->interval; 276 for (i = 0; i < inc; i++) { 277 chan->schinfo |= j; 278 j = j << qh->interval; 279 } 280 } else { 281 chan->schinfo = 0xff; 282 } 283 } 284 285 STATIC void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, 286 struct dwc2_qh *qh) 287 { 288 struct dwc2_host_chan *chan = qh->channel; 289 290 if (dwc2_qh_is_non_per(qh)) { 291 if (hsotg->core_params->uframe_sched > 0) 292 hsotg->available_host_channels++; 293 else 294 hsotg->non_periodic_channels--; 295 } else { 296 dwc2_update_frame_list(hsotg, qh, 0); 297 } 298 299 /* 300 * The condition is added to prevent double cleanup try in case of 301 * device disconnect. See channel cleanup in dwc2_hcd_disconnect(). 302 */ 303 if (chan->qh) { 304 if (chan->in_freelist != 0) 305 LIST_REMOVE(chan, hc_list_entry); 306 dwc2_hc_cleanup(hsotg, chan); 307 LIST_INSERT_HEAD(&hsotg->free_hc_list, chan, hc_list_entry); 308 chan->qh = NULL; 309 chan->in_freelist = 1; 310 } 311 312 qh->channel = NULL; 313 qh->ntd = 0; 314 315 if (qh->desc_list) 316 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) * 317 dwc2_max_desc_num(qh)); 318 } 319 320 /** 321 * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA 322 * related members 323 * 324 * @hsotg: The HCD state structure for the DWC OTG controller 325 * @qh: The QH to init 326 * 327 * Return: 0 if successful, negative error code otherwise 328 * 329 * Allocates memory for the descriptor list. For the first periodic QH, 330 * allocates memory for the FrameList and enables periodic scheduling. 331 */ 332 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 333 gfp_t mem_flags) 334 { 335 int retval; 336 337 if (qh->do_split) { 338 dev_err(hsotg->dev, 339 "SPLIT Transfers are not supported in Descriptor DMA mode.\n"); 340 retval = -EINVAL; 341 goto err0; 342 } 343 344 retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags); 345 if (retval) 346 goto err0; 347 348 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC || 349 qh->ep_type == USB_ENDPOINT_XFER_INT) { 350 if (!hsotg->frame_list) { 351 retval = dwc2_frame_list_alloc(hsotg, mem_flags); 352 if (retval) 353 goto err1; 354 /* Enable periodic schedule on first periodic QH */ 355 dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64); 356 } 357 } 358 359 qh->ntd = 0; 360 return 0; 361 362 err1: 363 dwc2_desc_list_free(hsotg, qh); 364 err0: 365 return retval; 366 } 367 368 /** 369 * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related 370 * members 371 * 372 * @hsotg: The HCD state structure for the DWC OTG controller 373 * @qh: The QH to free 374 * 375 * Frees descriptor list memory associated with the QH. If QH is periodic and 376 * the last, frees FrameList memory and disables periodic scheduling. 377 */ 378 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 379 { 380 dwc2_desc_list_free(hsotg, qh); 381 382 /* 383 * Channel still assigned due to some reasons. 384 * Seen on Isoc URB dequeue. Channel halted but no subsequent 385 * ChHalted interrupt to release the channel. Afterwards 386 * when it comes here from endpoint disable routine 387 * channel remains assigned. 388 */ 389 if (qh->channel) 390 dwc2_release_channel_ddma(hsotg, qh); 391 392 if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || 393 qh->ep_type == USB_ENDPOINT_XFER_INT) && 394 (hsotg->core_params->uframe_sched > 0 || 395 !hsotg->periodic_channels) && hsotg->frame_list) { 396 dwc2_per_sched_disable(hsotg); 397 dwc2_frame_list_free(hsotg); 398 } 399 } 400 401 STATIC u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx) 402 { 403 if (qh->dev_speed == USB_SPEED_HIGH) 404 /* Descriptor set (8 descriptors) index which is 8-aligned */ 405 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8; 406 else 407 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1); 408 } 409 410 /* 411 * Determine starting frame for Isochronous transfer. 412 * Few frames skipped to prevent race condition with HC. 413 */ 414 STATIC u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg, 415 struct dwc2_qh *qh, u16 *skip_frames) 416 { 417 u16 frame; 418 419 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 420 421 /* sched_frame is always frame number (not uFrame) both in FS and HS! */ 422 423 /* 424 * skip_frames is used to limit activated descriptors number 425 * to avoid the situation when HC services the last activated 426 * descriptor firstly. 427 * Example for FS: 428 * Current frame is 1, scheduled frame is 3. Since HC always fetches 429 * the descriptor corresponding to curr_frame+1, the descriptor 430 * corresponding to frame 2 will be fetched. If the number of 431 * descriptors is max=64 (or greather) the list will be fully programmed 432 * with Active descriptors and it is possible case (rare) that the 433 * latest descriptor(considering rollback) corresponding to frame 2 will 434 * be serviced first. HS case is more probable because, in fact, up to 435 * 11 uframes (16 in the code) may be skipped. 436 */ 437 if (qh->dev_speed == USB_SPEED_HIGH) { 438 /* 439 * Consider uframe counter also, to start xfer asap. If half of 440 * the frame elapsed skip 2 frames otherwise just 1 frame. 441 * Starting descriptor index must be 8-aligned, so if the 442 * current frame is near to complete the next one is skipped as 443 * well. 444 */ 445 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) { 446 *skip_frames = 2 * 8; 447 frame = dwc2_frame_num_inc(hsotg->frame_number, 448 *skip_frames); 449 } else { 450 *skip_frames = 1 * 8; 451 frame = dwc2_frame_num_inc(hsotg->frame_number, 452 *skip_frames); 453 } 454 455 frame = dwc2_full_frame_num(frame); 456 } else { 457 /* 458 * Two frames are skipped for FS - the current and the next. 459 * But for descriptor programming, 1 frame (descriptor) is 460 * enough, see example above. 461 */ 462 *skip_frames = 1; 463 frame = dwc2_frame_num_inc(hsotg->frame_number, 2); 464 } 465 466 return frame; 467 } 468 469 /* 470 * Calculate initial descriptor index for isochronous transfer based on 471 * scheduled frame 472 */ 473 STATIC u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, 474 struct dwc2_qh *qh) 475 { 476 u16 frame, fr_idx, fr_idx_tmp, skip_frames; 477 478 /* 479 * With current ISOC processing algorithm the channel is being released 480 * when no more QTDs in the list (qh->ntd == 0). Thus this function is 481 * called only when qh->ntd == 0 and qh->channel == 0. 482 * 483 * So qh->channel != NULL branch is not used and just not removed from 484 * the source file. It is required for another possible approach which 485 * is, do not disable and release the channel when ISOC session 486 * completed, just move QH to inactive schedule until new QTD arrives. 487 * On new QTD, the QH moved back to 'ready' schedule, starting frame and 488 * therefore starting desc_index are recalculated. In this case channel 489 * is released only on ep_disable. 490 */ 491 492 /* 493 * Calculate starting descriptor index. For INTERRUPT endpoint it is 494 * always 0. 495 */ 496 if (qh->channel) { 497 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames); 498 /* 499 * Calculate initial descriptor index based on FrameList current 500 * bitmap and servicing period 501 */ 502 fr_idx_tmp = dwc2_frame_list_idx(frame); 503 fr_idx = (FRLISTEN_64_SIZE + 504 dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp) 505 % dwc2_frame_incr_val(qh); 506 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; 507 } else { 508 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh, 509 &skip_frames); 510 fr_idx = dwc2_frame_list_idx(qh->sched_frame); 511 } 512 513 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx); 514 515 return skip_frames; 516 } 517 518 #define ISOC_URB_GIVEBACK_ASAP 519 520 #define MAX_ISOC_XFER_SIZE_FS 1023 521 #define MAX_ISOC_XFER_SIZE_HS 3072 522 #define DESCNUM_THRESHOLD 4 523 524 STATIC void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 525 struct dwc2_qtd *qtd, 526 struct dwc2_qh *qh, u32 max_xfer_size, 527 u16 idx) 528 { 529 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; 530 struct dwc2_hcd_iso_packet_desc *frame_desc; 531 532 memset(dma_desc, 0, sizeof(*dma_desc)); 533 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 534 535 if (frame_desc->length > max_xfer_size) 536 qh->n_bytes[idx] = max_xfer_size; 537 else 538 qh->n_bytes[idx] = frame_desc->length; 539 540 dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 541 dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & 542 HOST_DMA_ISOC_NBYTES_MASK; 543 544 #ifdef ISOC_URB_GIVEBACK_ASAP 545 /* Set IOC for each descriptor corresponding to last frame of URB */ 546 if (qtd->isoc_frame_index_last == qtd->urb->packet_count) 547 dma_desc->status |= HOST_DMA_IOC; 548 #endif 549 550 qh->ntd++; 551 qtd->isoc_frame_index_last++; 552 } 553 554 STATIC void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, 555 struct dwc2_qh *qh, u16 skip_frames) 556 { 557 struct dwc2_qtd *qtd; 558 u32 max_xfer_size; 559 u16 idx, inc, n_desc, ntd_max = 0; 560 561 idx = qh->td_last; 562 inc = qh->interval; 563 n_desc = 0; 564 565 if (qh->interval) { 566 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / 567 qh->interval; 568 if (skip_frames && !qh->channel) 569 ntd_max -= skip_frames / qh->interval; 570 } 571 572 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? 573 MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; 574 575 TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) { 576 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < 577 qtd->urb->packet_count) { 578 if (n_desc > 1) 579 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 580 dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, 581 max_xfer_size, idx); 582 idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed); 583 n_desc++; 584 } 585 qtd->in_process = 1; 586 } 587 588 qh->td_last = idx; 589 590 #ifdef ISOC_URB_GIVEBACK_ASAP 591 /* Set IOC for last descriptor if descriptor list is full */ 592 if (qh->ntd == ntd_max) { 593 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 594 qh->desc_list[idx].status |= HOST_DMA_IOC; 595 } 596 #else 597 /* 598 * Set IOC bit only for one descriptor. Always try to be ahead of HW 599 * processing, i.e. on IOC generation driver activates next descriptor 600 * but core continues to process descriptors following the one with IOC 601 * set. 602 */ 603 604 if (n_desc > DESCNUM_THRESHOLD) 605 /* 606 * Move IOC "up". Required even if there is only one QTD 607 * in the list, because QTDs might continue to be queued, 608 * but during the activation it was only one queued. 609 * Actually more than one QTD might be in the list if this 610 * function called from XferCompletion - QTDs was queued during 611 * HW processing of the previous descriptor chunk. 612 */ 613 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), 614 qh->dev_speed); 615 else 616 /* 617 * Set the IOC for the latest descriptor if either number of 618 * descriptors is not greater than threshold or no more new 619 * descriptors activated 620 */ 621 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 622 623 qh->desc_list[idx].status |= HOST_DMA_IOC; 624 #endif 625 626 if (n_desc) { 627 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 628 if (n_desc > 1) 629 qh->desc_list[0].status |= HOST_DMA_A; 630 } 631 } 632 633 STATIC void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, 634 struct dwc2_host_chan *chan, 635 struct dwc2_qtd *qtd, struct dwc2_qh *qh, 636 int n_desc) 637 { 638 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc]; 639 int len = chan->xfer_len; 640 641 if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1)) 642 len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1); 643 644 if (chan->ep_is_in) { 645 int num_packets; 646 647 if (len > 0 && chan->max_packet) 648 num_packets = (len + chan->max_packet - 1) 649 / chan->max_packet; 650 else 651 /* Need 1 packet for transfer length of 0 */ 652 num_packets = 1; 653 654 /* Always program an integral # of packets for IN transfers */ 655 len = num_packets * chan->max_packet; 656 } 657 658 dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK; 659 qh->n_bytes[n_desc] = len; 660 661 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL && 662 qtd->control_phase == DWC2_CONTROL_SETUP) 663 dma_desc->status |= HOST_DMA_SUP; 664 665 dma_desc->buf = (u32)chan->xfer_dma; 666 667 /* 668 * Last (or only) descriptor of IN transfer with actual size less 669 * than MaxPacket 670 */ 671 if (len > chan->xfer_len) { 672 chan->xfer_len = 0; 673 } else { 674 chan->xfer_dma += len; /* XXXNH safe */ 675 chan->xfer_len -= len; 676 } 677 } 678 679 STATIC void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, 680 struct dwc2_qh *qh) 681 { 682 struct dwc2_qtd *qtd; 683 struct dwc2_host_chan *chan = qh->channel; 684 int n_desc = 0; 685 686 dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh, 687 (unsigned long)chan->xfer_dma, chan->xfer_len); 688 689 /* 690 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then 691 * if SG transfer consists of multiple URBs, this pointer is re-assigned 692 * to the buffer of the currently processed QTD. For non-SG request 693 * there is always one QTD active. 694 */ 695 696 TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) { 697 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd); 698 699 if (n_desc) { 700 /* SG request - more than 1 QTD */ 701 chan->xfer_dma = DMAADDR(qtd->urb->usbdma, 702 qtd->urb->actual_length); 703 chan->xfer_len = qtd->urb->length - 704 qtd->urb->actual_length; 705 dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n", 706 (unsigned long)chan->xfer_dma, chan->xfer_len); 707 } 708 709 qtd->n_desc = 0; 710 do { 711 if (n_desc > 1) { 712 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 713 dev_vdbg(hsotg->dev, 714 "set A bit in desc %d (%p)\n", 715 n_desc - 1, 716 &qh->desc_list[n_desc - 1]); 717 } 718 dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); 719 dev_vdbg(hsotg->dev, 720 "desc %d (%p) buf=%08x status=%08x\n", 721 n_desc, &qh->desc_list[n_desc], 722 qh->desc_list[n_desc].buf, 723 qh->desc_list[n_desc].status); 724 qtd->n_desc++; 725 n_desc++; 726 } while (chan->xfer_len > 0 && 727 n_desc != MAX_DMA_DESC_NUM_GENERIC); 728 729 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc); 730 qtd->in_process = 1; 731 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) 732 break; 733 if (n_desc == MAX_DMA_DESC_NUM_GENERIC) 734 break; 735 } 736 737 if (n_desc) { 738 qh->desc_list[n_desc - 1].status |= 739 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; 740 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", 741 n_desc - 1, &qh->desc_list[n_desc - 1]); 742 if (n_desc > 1) { 743 qh->desc_list[0].status |= HOST_DMA_A; 744 dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", 745 &qh->desc_list[0]); 746 } 747 chan->ntd = n_desc; 748 } 749 } 750 751 /** 752 * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode 753 * 754 * @hsotg: The HCD state structure for the DWC OTG controller 755 * @qh: The QH to init 756 * 757 * Return: 0 if successful, negative error code otherwise 758 * 759 * For Control and Bulk endpoints, initializes descriptor list and starts the 760 * transfer. For Interrupt and Isochronous endpoints, initializes descriptor 761 * list then updates FrameList, marking appropriate entries as active. 762 * 763 * For Isochronous endpoints the starting descriptor index is calculated based 764 * on the scheduled frame, but only on the first transfer descriptor within a 765 * session. Then the transfer is started via enabling the channel. 766 * 767 * For Isochronous endpoints the channel is not halted on XferComplete 768 * interrupt so remains assigned to the endpoint(QH) until session is done. 769 */ 770 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 771 { 772 /* Channel is already assigned */ 773 struct dwc2_host_chan *chan = qh->channel; 774 u16 skip_frames = 0; 775 776 switch (chan->ep_type) { 777 case USB_ENDPOINT_XFER_CONTROL: 778 case USB_ENDPOINT_XFER_BULK: 779 dwc2_init_non_isoc_dma_desc(hsotg, qh); 780 dwc2_hc_start_transfer_ddma(hsotg, chan); 781 break; 782 case USB_ENDPOINT_XFER_INT: 783 dwc2_init_non_isoc_dma_desc(hsotg, qh); 784 dwc2_update_frame_list(hsotg, qh, 1); 785 dwc2_hc_start_transfer_ddma(hsotg, chan); 786 break; 787 case USB_ENDPOINT_XFER_ISOC: 788 if (!qh->ntd) 789 skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh); 790 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames); 791 792 if (!chan->xfer_started) { 793 dwc2_update_frame_list(hsotg, qh, 1); 794 795 /* 796 * Always set to max, instead of actual size. Otherwise 797 * ntd will be changed with channel being enabled. Not 798 * recommended. 799 */ 800 chan->ntd = dwc2_max_desc_num(qh); 801 802 /* Enable channel only once for ISOC */ 803 dwc2_hc_start_transfer_ddma(hsotg, chan); 804 } 805 806 break; 807 default: 808 break; 809 } 810 } 811 812 #define DWC2_CMPL_DONE 1 813 #define DWC2_CMPL_STOP 2 814 815 STATIC int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 816 struct dwc2_host_chan *chan, 817 struct dwc2_qtd *qtd, 818 struct dwc2_qh *qh, u16 idx) 819 { 820 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; 821 struct dwc2_hcd_iso_packet_desc *frame_desc; 822 u16 remain = 0; 823 int rc = 0; 824 825 if (!qtd->urb) 826 return -EINVAL; 827 828 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 829 dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 830 if (chan->ep_is_in) 831 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >> 832 HOST_DMA_ISOC_NBYTES_SHIFT; 833 834 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 835 /* 836 * XactError, or unable to complete all the transactions 837 * in the scheduled micro-frame/frame, both indicated by 838 * HOST_DMA_STS_PKTERR 839 */ 840 qtd->urb->error_count++; 841 frame_desc->actual_length = qh->n_bytes[idx] - remain; 842 frame_desc->status = -EPROTO; 843 } else { 844 /* Success */ 845 frame_desc->actual_length = qh->n_bytes[idx] - remain; 846 frame_desc->status = 0; 847 } 848 849 if (++qtd->isoc_frame_index == qtd->urb->packet_count) { 850 /* 851 * urb->status is not used for isoc transfers here. The 852 * individual frame_desc status are used instead. 853 */ 854 dwc2_host_complete(hsotg, qtd, 0); 855 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 856 857 /* 858 * This check is necessary because urb_dequeue can be called 859 * from urb complete callback (sound driver for example). All 860 * pending URBs are dequeued there, so no need for further 861 * processing. 862 */ 863 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) 864 return -1; 865 rc = DWC2_CMPL_DONE; 866 } 867 868 qh->ntd--; 869 870 /* Stop if IOC requested descriptor reached */ 871 if (dma_desc->status & HOST_DMA_IOC) 872 rc = DWC2_CMPL_STOP; 873 874 return rc; 875 } 876 877 STATIC void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 878 struct dwc2_host_chan *chan, 879 enum dwc2_halt_status halt_status) 880 { 881 struct dwc2_hcd_iso_packet_desc *frame_desc; 882 struct dwc2_qtd *qtd, *qtd_tmp; 883 struct dwc2_qh *qh; 884 u16 idx; 885 int rc; 886 887 qh = chan->qh; 888 idx = qh->td_first; 889 890 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 891 TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) 892 qtd->in_process = 0; 893 return; 894 } 895 896 if (halt_status == DWC2_HC_XFER_AHB_ERR || 897 halt_status == DWC2_HC_XFER_BABBLE_ERR) { 898 /* 899 * Channel is halted in these error cases, considered as serious 900 * issues. 901 * Complete all URBs marking all frames as failed, irrespective 902 * whether some of the descriptors (frames) succeeded or not. 903 * Pass error code to completion routine as well, to update 904 * urb->status, some of class drivers might use it to stop 905 * queing transfer requests. 906 */ 907 int err = halt_status == DWC2_HC_XFER_AHB_ERR ? 908 -EIO : -EOVERFLOW; 909 910 TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) { 911 if (qtd->urb) { 912 for (idx = 0; idx < qtd->urb->packet_count; 913 idx++) { 914 frame_desc = &qtd->urb->iso_descs[idx]; 915 frame_desc->status = err; 916 } 917 918 dwc2_host_complete(hsotg, qtd, err); 919 } 920 921 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 922 } 923 924 return; 925 } 926 927 TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) { 928 if (!qtd->in_process) 929 break; 930 do { 931 rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, 932 idx); 933 if (rc < 0) 934 return; 935 idx = dwc2_desclist_idx_inc(idx, qh->interval, 936 chan->speed); 937 if (rc == DWC2_CMPL_STOP) 938 goto stop_scan; 939 if (rc == DWC2_CMPL_DONE) 940 break; 941 } while (idx != qh->td_first); 942 } 943 944 stop_scan: 945 qh->td_first = idx; 946 } 947 948 STATIC int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, 949 struct dwc2_host_chan *chan, 950 struct dwc2_qtd *qtd, 951 struct dwc2_hcd_dma_desc *dma_desc, 952 enum dwc2_halt_status halt_status, 953 u32 n_bytes, int *xfer_done) 954 { 955 struct dwc2_hcd_urb *urb = qtd->urb; 956 u16 remain = 0; 957 958 if (chan->ep_is_in) 959 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >> 960 HOST_DMA_NBYTES_SHIFT; 961 962 dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb); 963 964 if (halt_status == DWC2_HC_XFER_AHB_ERR) { 965 dev_err(hsotg->dev, "EIO\n"); 966 urb->status = -EIO; 967 return 1; 968 } 969 970 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 971 switch (halt_status) { 972 case DWC2_HC_XFER_STALL: 973 dev_vdbg(hsotg->dev, "Stall\n"); 974 urb->status = -EPIPE; 975 break; 976 case DWC2_HC_XFER_BABBLE_ERR: 977 dev_err(hsotg->dev, "Babble\n"); 978 urb->status = -EOVERFLOW; 979 break; 980 case DWC2_HC_XFER_XACT_ERR: 981 dev_err(hsotg->dev, "XactErr\n"); 982 urb->status = -EPROTO; 983 break; 984 default: 985 dev_err(hsotg->dev, 986 "%s: Unhandled descriptor error status (%d)\n", 987 __func__, halt_status); 988 break; 989 } 990 return 1; 991 } 992 993 if (dma_desc->status & HOST_DMA_A) { 994 dev_vdbg(hsotg->dev, 995 "Active descriptor encountered on channel %d\n", 996 chan->hc_num); 997 return 0; 998 } 999 1000 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1001 if (qtd->control_phase == DWC2_CONTROL_DATA) { 1002 urb->actual_length += n_bytes - remain; 1003 if (remain || urb->actual_length >= urb->length) { 1004 /* 1005 * For Control Data stage do not set urb->status 1006 * to 0, to prevent URB callback. Set it when 1007 * Status phase is done. See below. 1008 */ 1009 *xfer_done = 1; 1010 } 1011 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) { 1012 urb->status = 0; 1013 *xfer_done = 1; 1014 } 1015 /* No handling for SETUP stage */ 1016 } else { 1017 /* BULK and INTR */ 1018 urb->actual_length += n_bytes - remain; 1019 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length, 1020 urb->actual_length); 1021 if (remain || urb->actual_length >= urb->length) { 1022 urb->status = 0; 1023 *xfer_done = 1; 1024 } 1025 } 1026 1027 return 0; 1028 } 1029 1030 STATIC int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, 1031 struct dwc2_host_chan *chan, 1032 int chnum, struct dwc2_qtd *qtd, 1033 int desc_num, 1034 enum dwc2_halt_status halt_status, 1035 int *xfer_done) 1036 { 1037 struct dwc2_qh *qh = chan->qh; 1038 struct dwc2_hcd_urb *urb = qtd->urb; 1039 struct dwc2_hcd_dma_desc *dma_desc; 1040 u32 n_bytes; 1041 int failed; 1042 1043 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1044 1045 if (!urb) 1046 return -EINVAL; 1047 1048 dma_desc = &qh->desc_list[desc_num]; 1049 n_bytes = qh->n_bytes[desc_num]; 1050 dev_vdbg(hsotg->dev, 1051 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n", 1052 qtd, urb, desc_num, dma_desc, n_bytes); 1053 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1054 halt_status, n_bytes, 1055 xfer_done); 1056 if (failed || (*xfer_done && urb->status != -EINPROGRESS)) { 1057 dwc2_host_complete(hsotg, qtd, urb->status); 1058 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1059 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", 1060 failed, *xfer_done, urb->status); 1061 return failed; 1062 } 1063 1064 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1065 switch (qtd->control_phase) { 1066 case DWC2_CONTROL_SETUP: 1067 if (urb->length > 0) 1068 qtd->control_phase = DWC2_CONTROL_DATA; 1069 else 1070 qtd->control_phase = DWC2_CONTROL_STATUS; 1071 dev_vdbg(hsotg->dev, 1072 " Control setup transaction done\n"); 1073 break; 1074 case DWC2_CONTROL_DATA: 1075 if (*xfer_done) { 1076 qtd->control_phase = DWC2_CONTROL_STATUS; 1077 dev_vdbg(hsotg->dev, 1078 " Control data transfer done\n"); 1079 } else if (desc_num + 1 == qtd->n_desc) { 1080 /* 1081 * Last descriptor for Control data stage which 1082 * is not completed yet 1083 */ 1084 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, 1085 qtd); 1086 } 1087 break; 1088 default: 1089 break; 1090 } 1091 } 1092 1093 return 0; 1094 } 1095 1096 STATIC void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 1097 struct dwc2_host_chan *chan, 1098 int chnum, 1099 enum dwc2_halt_status halt_status) 1100 { 1101 struct dwc2_qh *qh = chan->qh; 1102 struct dwc2_qtd *qtd = NULL, *qtd_tmp; 1103 int xfer_done; 1104 int desc_num = 0; 1105 1106 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 1107 TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) 1108 qtd->in_process = 0; 1109 return; 1110 } 1111 1112 TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) { 1113 int i; 1114 1115 xfer_done = 0; 1116 1117 for (i = 0; i < qtd->n_desc; i++) { 1118 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1119 desc_num, halt_status, 1120 &xfer_done)) { 1121 qtd = NULL; 1122 break; 1123 } 1124 desc_num++; 1125 } 1126 } 1127 1128 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1129 /* 1130 * Resetting the data toggle for bulk and interrupt endpoints 1131 * in case of stall. See handle_hc_stall_intr(). 1132 */ 1133 if (halt_status == DWC2_HC_XFER_STALL) 1134 qh->data_toggle = DWC2_HC_PID_DATA0; 1135 else if (qtd) 1136 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1137 } 1138 1139 if (halt_status == DWC2_HC_XFER_COMPLETE) { 1140 if (chan->hcint & HCINTMSK_NYET) { 1141 /* 1142 * Got a NYET on the last transaction of the transfer. 1143 * It means that the endpoint should be in the PING 1144 * state at the beginning of the next transfer. 1145 */ 1146 qh->ping_state = 1; 1147 } 1148 } 1149 } 1150 1151 /** 1152 * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's 1153 * status and calls completion routine for the URB if it's done. Called from 1154 * interrupt handlers. 1155 * 1156 * @hsotg: The HCD state structure for the DWC OTG controller 1157 * @chan: Host channel the transfer is completed on 1158 * @chnum: Index of Host channel registers 1159 * @halt_status: Reason the channel is being halted or just XferComplete 1160 * for isochronous transfers 1161 * 1162 * Releases the channel to be used by other transfers. 1163 * In case of Isochronous endpoint the channel is not halted until the end of 1164 * the session, i.e. QTD list is empty. 1165 * If periodic channel released the FrameList is updated accordingly. 1166 * Calls transaction selection routines to activate pending transfers. 1167 */ 1168 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, 1169 struct dwc2_host_chan *chan, int chnum, 1170 enum dwc2_halt_status halt_status) 1171 { 1172 struct dwc2_qh *qh = chan->qh; 1173 int continue_isoc_xfer = 0; 1174 enum dwc2_transaction_type tr_type; 1175 1176 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1177 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status); 1178 1179 /* Release the channel if halted or session completed */ 1180 if (halt_status != DWC2_HC_XFER_COMPLETE || 1181 TAILQ_EMPTY(&qh->qtd_list)) { 1182 /* Halt the channel if session completed */ 1183 if (halt_status == DWC2_HC_XFER_COMPLETE) 1184 dwc2_hc_halt(hsotg, chan, halt_status); 1185 dwc2_release_channel_ddma(hsotg, qh); 1186 dwc2_hcd_qh_unlink(hsotg, qh); 1187 } else { 1188 /* Keep in assigned schedule to continue transfer */ 1189 TAILQ_REMOVE(&hsotg->periodic_sched_queued, qh, qh_list_entry); 1190 TAILQ_INSERT_TAIL(&hsotg->periodic_sched_assigned, qh, qh_list_entry); 1191 continue_isoc_xfer = 1; 1192 } 1193 /* 1194 * Todo: Consider the case when period exceeds FrameList size. 1195 * Frame Rollover interrupt should be used. 1196 */ 1197 } else { 1198 /* 1199 * Scan descriptor list to complete the URB(s), then release 1200 * the channel 1201 */ 1202 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum, 1203 halt_status); 1204 dwc2_release_channel_ddma(hsotg, qh); 1205 dwc2_hcd_qh_unlink(hsotg, qh); 1206 1207 if (!TAILQ_EMPTY(&qh->qtd_list)) { 1208 /* 1209 * Add back to inactive non-periodic schedule on normal 1210 * completion 1211 */ 1212 dwc2_hcd_qh_add(hsotg, qh); 1213 } 1214 } 1215 1216 tr_type = dwc2_hcd_select_transactions(hsotg); 1217 if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) { 1218 if (continue_isoc_xfer) { 1219 if (tr_type == DWC2_TRANSACTION_NONE) 1220 tr_type = DWC2_TRANSACTION_PERIODIC; 1221 else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC) 1222 tr_type = DWC2_TRANSACTION_ALL; 1223 } 1224 dwc2_hcd_queue_transactions(hsotg, tr_type); 1225 } 1226 } 1227