1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * EHCI Host Controller Driver (EHCI) 30 * 31 * The EHCI driver is a software driver which interfaces to the Universal 32 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to 33 * the Host Controller is defined by the EHCI Host Controller Interface. 34 * 35 * This module contains the main EHCI driver code which handles all USB 36 * transfers, bandwidth allocations and other general functionalities. 37 */ 38 39 #include <sys/usb/hcd/ehci/ehcid.h> 40 #include <sys/usb/hcd/ehci/ehci_intr.h> 41 #include <sys/usb/hcd/ehci/ehci_util.h> 42 #include <sys/usb/hcd/ehci/ehci_isoch.h> 43 44 /* Adjustable variables for the size of the pools */ 45 extern int ehci_qh_pool_size; 46 extern int ehci_qtd_pool_size; 47 48 49 /* Endpoint Descriptor (QH) related functions */ 50 ehci_qh_t *ehci_alloc_qh( 51 ehci_state_t *ehcip, 52 usba_pipe_handle_data_t *ph, 53 uint_t flag); 54 static void ehci_unpack_endpoint( 55 ehci_state_t *ehcip, 56 usba_pipe_handle_data_t *ph, 57 ehci_qh_t *qh); 58 void ehci_insert_qh( 59 ehci_state_t *ehcip, 60 usba_pipe_handle_data_t *ph); 61 static void ehci_insert_async_qh( 62 ehci_state_t *ehcip, 63 ehci_pipe_private_t *pp); 64 static void ehci_insert_intr_qh( 65 ehci_state_t *ehcip, 66 ehci_pipe_private_t *pp); 67 static void ehci_modify_qh_status_bit( 68 ehci_state_t *ehcip, 69 ehci_pipe_private_t *pp, 70 halt_bit_t action); 71 static void ehci_halt_hs_qh( 72 ehci_state_t *ehcip, 73 ehci_pipe_private_t *pp, 74 ehci_qh_t *qh); 75 static void ehci_halt_fls_ctrl_and_bulk_qh( 76 ehci_state_t *ehcip, 77 ehci_pipe_private_t *pp, 78 ehci_qh_t *qh); 79 static void ehci_clear_tt_buffer( 80 ehci_state_t *ehcip, 81 usba_pipe_handle_data_t *ph, 82 ehci_qh_t *qh); 83 static void ehci_halt_fls_intr_qh( 84 ehci_state_t *ehcip, 85 ehci_qh_t *qh); 86 void ehci_remove_qh( 87 ehci_state_t *ehcip, 88 ehci_pipe_private_t *pp, 89 boolean_t reclaim); 90 static void ehci_remove_async_qh( 91 ehci_state_t *ehcip, 92 ehci_pipe_private_t *pp, 93 boolean_t reclaim); 94 static void ehci_remove_intr_qh( 95 ehci_state_t *ehcip, 96 ehci_pipe_private_t *pp, 97 boolean_t reclaim); 98 static void ehci_insert_qh_on_reclaim_list( 99 ehci_state_t *ehcip, 100 ehci_pipe_private_t *pp); 101 void ehci_deallocate_qh( 102 ehci_state_t *ehcip, 103 ehci_qh_t *old_qh); 104 uint32_t ehci_qh_cpu_to_iommu( 105 ehci_state_t *ehcip, 106 ehci_qh_t *addr); 107 ehci_qh_t *ehci_qh_iommu_to_cpu( 108 ehci_state_t *ehcip, 109 uintptr_t addr); 110 111 /* Transfer Descriptor (QTD) related functions */ 112 static int ehci_initialize_dummy( 113 ehci_state_t *ehcip, 114 ehci_qh_t *qh); 115 ehci_trans_wrapper_t *ehci_allocate_ctrl_resources( 116 ehci_state_t *ehcip, 117 ehci_pipe_private_t *pp, 118 usb_ctrl_req_t *ctrl_reqp, 119 usb_flags_t usb_flags); 120 void ehci_insert_ctrl_req( 121 ehci_state_t *ehcip, 122 usba_pipe_handle_data_t *ph, 123 usb_ctrl_req_t *ctrl_reqp, 124 ehci_trans_wrapper_t *tw, 125 usb_flags_t usb_flags); 126 ehci_trans_wrapper_t *ehci_allocate_bulk_resources( 127 ehci_state_t *ehcip, 128 ehci_pipe_private_t *pp, 129 usb_bulk_req_t *bulk_reqp, 130 usb_flags_t usb_flags); 131 void ehci_insert_bulk_req( 132 ehci_state_t *ehcip, 133 usba_pipe_handle_data_t *ph, 134 usb_bulk_req_t *bulk_reqp, 135 ehci_trans_wrapper_t *tw, 136 usb_flags_t flags); 137 int ehci_start_periodic_pipe_polling( 138 ehci_state_t *ehcip, 139 usba_pipe_handle_data_t *ph, 140 usb_opaque_t periodic_in_reqp, 141 usb_flags_t flags); 142 static int ehci_start_pipe_polling( 143 ehci_state_t *ehcip, 144 usba_pipe_handle_data_t *ph, 145 usb_flags_t flags); 146 static int ehci_start_intr_polling( 147 ehci_state_t *ehcip, 148 usba_pipe_handle_data_t *ph, 149 usb_flags_t flags); 150 static void ehci_set_periodic_pipe_polling( 151 ehci_state_t *ehcip, 152 usba_pipe_handle_data_t *ph); 153 ehci_trans_wrapper_t *ehci_allocate_intr_resources( 154 ehci_state_t *ehcip, 155 usba_pipe_handle_data_t *ph, 156 usb_intr_req_t *intr_reqp, 157 usb_flags_t usb_flags); 158 void ehci_insert_intr_req( 159 ehci_state_t *ehcip, 160 ehci_pipe_private_t *pp, 161 ehci_trans_wrapper_t *tw, 162 usb_flags_t flags); 163 int ehci_stop_periodic_pipe_polling( 164 ehci_state_t *ehcip, 165 usba_pipe_handle_data_t *ph, 166 usb_flags_t flags); 167 int ehci_insert_qtd( 168 ehci_state_t *ehcip, 169 uint32_t qtd_ctrl, 170 size_t qtd_dma_offs, 171 size_t qtd_length, 172 uint32_t qtd_ctrl_phase, 173 ehci_pipe_private_t *pp, 174 ehci_trans_wrapper_t *tw); 175 static ehci_qtd_t *ehci_allocate_qtd_from_pool( 176 ehci_state_t *ehcip); 177 static void ehci_fill_in_qtd( 178 ehci_state_t *ehcip, 179 ehci_qtd_t *qtd, 180 uint32_t qtd_ctrl, 181 size_t qtd_dma_offs, 182 size_t qtd_length, 183 uint32_t qtd_ctrl_phase, 184 ehci_pipe_private_t *pp, 185 ehci_trans_wrapper_t *tw); 186 static void ehci_insert_qtd_on_tw( 187 ehci_state_t *ehcip, 188 ehci_trans_wrapper_t *tw, 189 ehci_qtd_t *qtd); 190 static void ehci_insert_qtd_into_active_qtd_list( 191 ehci_state_t *ehcip, 192 ehci_qtd_t *curr_qtd); 193 void ehci_remove_qtd_from_active_qtd_list( 194 ehci_state_t *ehcip, 195 ehci_qtd_t *curr_qtd); 196 static void ehci_traverse_qtds( 197 ehci_state_t *ehcip, 198 usba_pipe_handle_data_t *ph); 199 void ehci_deallocate_qtd( 200 ehci_state_t *ehcip, 201 ehci_qtd_t *old_qtd); 202 uint32_t ehci_qtd_cpu_to_iommu( 203 ehci_state_t *ehcip, 204 ehci_qtd_t *addr); 205 ehci_qtd_t *ehci_qtd_iommu_to_cpu( 206 ehci_state_t *ehcip, 207 uintptr_t addr); 208 209 /* Transfer Wrapper (TW) functions */ 210 static ehci_trans_wrapper_t *ehci_create_transfer_wrapper( 211 ehci_state_t *ehcip, 212 ehci_pipe_private_t *pp, 213 size_t length, 214 uint_t usb_flags); 215 int ehci_allocate_tds_for_tw( 216 ehci_state_t *ehcip, 217 ehci_pipe_private_t *pp, 218 ehci_trans_wrapper_t *tw, 219 size_t qtd_count); 220 static ehci_trans_wrapper_t *ehci_allocate_tw_resources( 221 ehci_state_t *ehcip, 222 ehci_pipe_private_t *pp, 223 size_t length, 224 usb_flags_t usb_flags, 225 size_t td_count); 226 static void ehci_free_tw_td_resources( 227 ehci_state_t *ehcip, 228 ehci_trans_wrapper_t *tw); 229 static void ehci_start_xfer_timer( 230 ehci_state_t *ehcip, 231 ehci_pipe_private_t *pp, 232 ehci_trans_wrapper_t *tw); 233 void ehci_stop_xfer_timer( 234 ehci_state_t *ehcip, 235 ehci_trans_wrapper_t *tw, 236 uint_t flag); 237 static void ehci_xfer_timeout_handler(void *arg); 238 static void ehci_remove_tw_from_timeout_list( 239 ehci_state_t *ehcip, 240 ehci_trans_wrapper_t *tw); 241 static void ehci_start_timer(ehci_state_t *ehcip, 242 ehci_pipe_private_t *pp); 243 void ehci_deallocate_tw( 244 ehci_state_t *ehcip, 245 ehci_pipe_private_t *pp, 246 ehci_trans_wrapper_t *tw); 247 void ehci_free_dma_resources( 248 ehci_state_t *ehcip, 249 usba_pipe_handle_data_t *ph); 250 static void ehci_free_tw( 251 ehci_state_t *ehcip, 252 ehci_pipe_private_t *pp, 253 ehci_trans_wrapper_t *tw); 254 255 /* Miscellaneous functions */ 256 int ehci_allocate_intr_in_resource( 257 ehci_state_t *ehcip, 258 ehci_pipe_private_t *pp, 259 ehci_trans_wrapper_t *tw, 260 usb_flags_t flags); 261 void ehci_pipe_cleanup( 262 ehci_state_t *ehcip, 263 usba_pipe_handle_data_t *ph); 264 static void ehci_wait_for_transfers_completion( 265 ehci_state_t *ehcip, 266 ehci_pipe_private_t *pp); 267 void ehci_check_for_transfers_completion( 268 ehci_state_t *ehcip, 269 ehci_pipe_private_t *pp); 270 static void ehci_save_data_toggle( 271 ehci_state_t *ehcip, 272 usba_pipe_handle_data_t *ph); 273 void ehci_restore_data_toggle( 274 ehci_state_t *ehcip, 275 usba_pipe_handle_data_t *ph); 276 void ehci_handle_outstanding_requests( 277 ehci_state_t *ehcip, 278 ehci_pipe_private_t *pp); 279 void ehci_deallocate_intr_in_resource( 280 ehci_state_t *ehcip, 281 ehci_pipe_private_t *pp, 282 ehci_trans_wrapper_t *tw); 283 void ehci_do_client_periodic_in_req_callback( 284 ehci_state_t *ehcip, 285 ehci_pipe_private_t *pp, 286 usb_cr_t completion_reason); 287 void ehci_hcdi_callback( 288 usba_pipe_handle_data_t *ph, 289 ehci_trans_wrapper_t *tw, 290 usb_cr_t completion_reason); 291 292 293 /* 294 * Endpoint Descriptor (QH) manipulations functions 295 */ 296 297 /* 298 * ehci_alloc_qh: 299 * 300 * Allocate an endpoint descriptor (QH) 301 * 302 * NOTE: This function is also called from POLLED MODE. 303 */ 304 ehci_qh_t * 305 ehci_alloc_qh( 306 ehci_state_t *ehcip, 307 usba_pipe_handle_data_t *ph, 308 uint_t flag) 309 { 310 int i, state; 311 ehci_qh_t *qh; 312 313 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 314 "ehci_alloc_qh: ph = 0x%p flag = 0x%x", (void *)ph, flag); 315 316 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 317 318 /* 319 * If this is for a ISOC endpoint return null. 320 * Isochronous uses ITD put directly onto the PFL. 321 */ 322 if (ph) { 323 if (EHCI_ISOC_ENDPOINT((&ph->p_ep))) { 324 325 return (NULL); 326 } 327 } 328 329 /* 330 * The first 63 endpoints in the Endpoint Descriptor (QH) 331 * buffer pool are reserved for building interrupt lattice 332 * tree. Search for a blank endpoint descriptor in the QH 333 * buffer pool. 334 */ 335 for (i = EHCI_NUM_STATIC_NODES; i < ehci_qh_pool_size; i ++) { 336 state = Get_QH(ehcip->ehci_qh_pool_addr[i].qh_state); 337 338 if (state == EHCI_QH_FREE) { 339 break; 340 } 341 } 342 343 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 344 "ehci_alloc_qh: Allocated %d", i); 345 346 if (i == ehci_qh_pool_size) { 347 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 348 "ehci_alloc_qh: QH exhausted"); 349 350 return (NULL); 351 } else { 352 qh = &ehcip->ehci_qh_pool_addr[i]; 353 354 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 355 "ehci_alloc_qh: Allocated address 0x%p", (void *)qh); 356 357 /* Check polled mode flag */ 358 if (flag == EHCI_POLLED_MODE_FLAG) { 359 Set_QH(qh->qh_link_ptr, EHCI_QH_LINK_PTR_VALID); 360 Set_QH(qh->qh_ctrl, EHCI_QH_CTRL_ED_INACTIVATE); 361 } 362 363 /* Unpack the endpoint descriptor into a control field */ 364 if (ph) { 365 if ((ehci_initialize_dummy(ehcip, 366 qh)) == USB_NO_RESOURCES) { 367 368 bzero((void *)qh, sizeof (ehci_qh_t)); 369 Set_QH(qh->qh_state, EHCI_QH_FREE); 370 371 return (NULL); 372 } 373 374 ehci_unpack_endpoint(ehcip, ph, qh); 375 376 Set_QH(qh->qh_curr_qtd, NULL); 377 Set_QH(qh->qh_alt_next_qtd, 378 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 379 380 /* Change QH's state Active */ 381 Set_QH(qh->qh_state, EHCI_QH_ACTIVE); 382 } else { 383 Set_QH(qh->qh_status, EHCI_QH_STS_HALTED); 384 385 /* Change QH's state Static */ 386 Set_QH(qh->qh_state, EHCI_QH_STATIC); 387 } 388 389 ehci_print_qh(ehcip, qh); 390 391 return (qh); 392 } 393 } 394 395 396 /* 397 * ehci_unpack_endpoint: 398 * 399 * Unpack the information in the pipe handle and create the first byte 400 * of the Host Controller's (HC) Endpoint Descriptor (QH). 401 */ 402 static void 403 ehci_unpack_endpoint( 404 ehci_state_t *ehcip, 405 usba_pipe_handle_data_t *ph, 406 ehci_qh_t *qh) 407 { 408 usb_ep_descr_t *endpoint = &ph->p_ep; 409 uint_t maxpacketsize, addr, xactions; 410 uint_t ctrl = 0, status = 0, split_ctrl = 0; 411 usb_port_status_t usb_port_status; 412 usba_device_t *usba_device = ph->p_usba_device; 413 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 414 415 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 416 "ehci_unpack_endpoint:"); 417 418 mutex_enter(&usba_device->usb_mutex); 419 ctrl = usba_device->usb_addr; 420 usb_port_status = usba_device->usb_port_status; 421 mutex_exit(&usba_device->usb_mutex); 422 423 addr = endpoint->bEndpointAddress; 424 425 /* Assign the endpoint's address */ 426 ctrl |= ((addr & USB_EP_NUM_MASK) << EHCI_QH_CTRL_ED_NUMBER_SHIFT); 427 428 /* Assign the speed */ 429 switch (usb_port_status) { 430 case USBA_LOW_SPEED_DEV: 431 ctrl |= EHCI_QH_CTRL_ED_LOW_SPEED; 432 break; 433 case USBA_FULL_SPEED_DEV: 434 ctrl |= EHCI_QH_CTRL_ED_FULL_SPEED; 435 break; 436 case USBA_HIGH_SPEED_DEV: 437 ctrl |= EHCI_QH_CTRL_ED_HIGH_SPEED; 438 break; 439 } 440 441 switch (endpoint->bmAttributes & USB_EP_ATTR_MASK) { 442 case USB_EP_ATTR_CONTROL: 443 /* Assign data toggle information */ 444 ctrl |= EHCI_QH_CTRL_DATA_TOGGLE; 445 446 if (usb_port_status != USBA_HIGH_SPEED_DEV) { 447 ctrl |= EHCI_QH_CTRL_CONTROL_ED_FLAG; 448 } 449 /* FALLTHRU */ 450 case USB_EP_ATTR_BULK: 451 /* Maximum nak counter */ 452 ctrl |= EHCI_QH_CTRL_MAX_NC; 453 454 if (usb_port_status == USBA_HIGH_SPEED_DEV) { 455 /* 456 * Perform ping before executing control 457 * and bulk transactions. 458 */ 459 status = EHCI_QH_STS_DO_PING; 460 } 461 break; 462 case USB_EP_ATTR_INTR: 463 /* Set start split mask */ 464 split_ctrl = (pp->pp_smask & EHCI_QH_SPLIT_CTRL_INTR_MASK); 465 466 /* 467 * Set complete split mask for low/full speed 468 * usb devices. 469 */ 470 if (usb_port_status != USBA_HIGH_SPEED_DEV) { 471 split_ctrl |= ((pp->pp_cmask << 472 EHCI_QH_SPLIT_CTRL_COMP_SHIFT) & 473 EHCI_QH_SPLIT_CTRL_COMP_MASK); 474 } 475 break; 476 } 477 478 /* Get the max transactions per microframe */ 479 xactions = (endpoint->wMaxPacketSize & 480 USB_EP_MAX_XACTS_MASK) >> USB_EP_MAX_XACTS_SHIFT; 481 482 switch (xactions) { 483 case 0: 484 split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS; 485 break; 486 case 1: 487 split_ctrl |= EHCI_QH_SPLIT_CTRL_2_XACTS; 488 break; 489 case 2: 490 split_ctrl |= EHCI_QH_SPLIT_CTRL_3_XACTS; 491 break; 492 default: 493 split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS; 494 break; 495 } 496 497 /* 498 * For low/full speed devices, program high speed hub 499 * address and port number. 500 */ 501 if (usb_port_status != USBA_HIGH_SPEED_DEV) { 502 mutex_enter(&usba_device->usb_mutex); 503 split_ctrl |= ((usba_device->usb_hs_hub_addr 504 << EHCI_QH_SPLIT_CTRL_HUB_ADDR_SHIFT) & 505 EHCI_QH_SPLIT_CTRL_HUB_ADDR); 506 507 split_ctrl |= ((usba_device->usb_hs_hub_port 508 << EHCI_QH_SPLIT_CTRL_HUB_PORT_SHIFT) & 509 EHCI_QH_SPLIT_CTRL_HUB_PORT); 510 511 mutex_exit(&usba_device->usb_mutex); 512 513 /* Set start split transaction state */ 514 status = EHCI_QH_STS_DO_START_SPLIT; 515 } 516 517 /* Assign endpoint's maxpacketsize */ 518 maxpacketsize = endpoint->wMaxPacketSize & USB_EP_MAX_PKTSZ_MASK; 519 maxpacketsize = maxpacketsize << EHCI_QH_CTRL_MAXPKTSZ_SHIFT; 520 ctrl |= (maxpacketsize & EHCI_QH_CTRL_MAXPKTSZ); 521 522 Set_QH(qh->qh_ctrl, ctrl); 523 Set_QH(qh->qh_split_ctrl, split_ctrl); 524 Set_QH(qh->qh_status, status); 525 } 526 527 528 /* 529 * ehci_insert_qh: 530 * 531 * Add the Endpoint Descriptor (QH) into the Host Controller's 532 * (HC) appropriate endpoint list. 533 */ 534 void 535 ehci_insert_qh( 536 ehci_state_t *ehcip, 537 usba_pipe_handle_data_t *ph) 538 { 539 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 540 541 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 542 "ehci_insert_qh: qh=0x%p", pp->pp_qh); 543 544 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 545 546 switch (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) { 547 case USB_EP_ATTR_CONTROL: 548 case USB_EP_ATTR_BULK: 549 ehci_insert_async_qh(ehcip, pp); 550 ehcip->ehci_open_async_count++; 551 break; 552 case USB_EP_ATTR_INTR: 553 ehci_insert_intr_qh(ehcip, pp); 554 ehcip->ehci_open_periodic_count++; 555 break; 556 case USB_EP_ATTR_ISOCH: 557 /* ISOCH does not use QH, don't do anything but update count */ 558 ehcip->ehci_open_periodic_count++; 559 break; 560 } 561 ehci_toggle_scheduler(ehcip); 562 } 563 564 565 /* 566 * ehci_insert_async_qh: 567 * 568 * Insert a control/bulk endpoint into the Host Controller's (HC) 569 * Asynchronous schedule endpoint list. 570 */ 571 static void 572 ehci_insert_async_qh( 573 ehci_state_t *ehcip, 574 ehci_pipe_private_t *pp) 575 { 576 ehci_qh_t *qh = pp->pp_qh; 577 ehci_qh_t *async_head_qh; 578 ehci_qh_t *next_qh; 579 uintptr_t qh_addr; 580 581 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 582 "ehci_insert_async_qh:"); 583 584 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 585 586 /* Make sure this QH is not already in the list */ 587 ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL); 588 589 qh_addr = ehci_qh_cpu_to_iommu(ehcip, qh); 590 591 /* Obtain a ptr to the head of the Async schedule list */ 592 async_head_qh = ehcip->ehci_head_of_async_sched_list; 593 594 if (async_head_qh == NULL) { 595 /* Set this QH to be the "head" of the circular list */ 596 Set_QH(qh->qh_ctrl, 597 (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_RECLAIM_HEAD)); 598 599 /* Set new QH's link and previous pointer to itself */ 600 Set_QH(qh->qh_link_ptr, qh_addr | EHCI_QH_LINK_REF_QH); 601 Set_QH(qh->qh_prev, qh_addr); 602 603 ehcip->ehci_head_of_async_sched_list = qh; 604 605 /* Set the head ptr to the new endpoint */ 606 Set_OpReg(ehci_async_list_addr, qh_addr); 607 608 /* 609 * For some reason this register might get nulled out by 610 * the Uli M1575 South Bridge. To workaround the hardware 611 * problem, check the value after write and retry if the 612 * last write fails. 613 * 614 * If the ASYNCLISTADDR remains "stuck" after 615 * EHCI_MAX_RETRY retries, then the M1575 is broken 616 * and is stuck in an inconsistent state and is about 617 * to crash the machine with a trn_oor panic when it 618 * does a DMA read from 0x0. It is better to panic 619 * now rather than wait for the trn_oor crash; this 620 * way Customer Service will have a clean signature 621 * that indicts the M1575 chip rather than a 622 * mysterious and hard-to-diagnose trn_oor panic. 623 */ 624 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 625 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) && 626 (qh_addr != Get_OpReg(ehci_async_list_addr))) { 627 int retry = 0; 628 629 Set_OpRegRetry(ehci_async_list_addr, qh_addr, retry); 630 if (retry >= EHCI_MAX_RETRY) 631 cmn_err(CE_PANIC, "ehci_insert_async_qh:" 632 " ASYNCLISTADDR write failed."); 633 634 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 635 "ehci_insert_async_qh: ASYNCLISTADDR " 636 "write failed, retry=%d", retry); 637 } 638 } else { 639 ASSERT(Get_QH(async_head_qh->qh_ctrl) & 640 EHCI_QH_CTRL_RECLAIM_HEAD); 641 642 /* Ensure this QH's "H" bit is not set */ 643 Set_QH(qh->qh_ctrl, 644 (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_RECLAIM_HEAD)); 645 646 next_qh = ehci_qh_iommu_to_cpu(ehcip, 647 Get_QH(async_head_qh->qh_link_ptr) & EHCI_QH_LINK_PTR); 648 649 /* Set new QH's link and previous pointers */ 650 Set_QH(qh->qh_link_ptr, 651 Get_QH(async_head_qh->qh_link_ptr) | EHCI_QH_LINK_REF_QH); 652 Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, async_head_qh)); 653 654 /* Set next QH's prev pointer */ 655 Set_QH(next_qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, qh)); 656 657 /* Set QH Head's link pointer points to new QH */ 658 Set_QH(async_head_qh->qh_link_ptr, 659 qh_addr | EHCI_QH_LINK_REF_QH); 660 } 661 } 662 663 664 /* 665 * ehci_insert_intr_qh: 666 * 667 * Insert a interrupt endpoint into the Host Controller's (HC) interrupt 668 * lattice tree. 669 */ 670 static void 671 ehci_insert_intr_qh( 672 ehci_state_t *ehcip, 673 ehci_pipe_private_t *pp) 674 { 675 ehci_qh_t *qh = pp->pp_qh; 676 ehci_qh_t *next_lattice_qh, *lattice_qh; 677 uint_t hnode; 678 679 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 680 "ehci_insert_intr_qh:"); 681 682 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 683 684 /* Make sure this QH is not already in the list */ 685 ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL); 686 687 /* 688 * The appropriate high speed node was found 689 * during the opening of the pipe. 690 */ 691 hnode = pp->pp_pnode; 692 693 /* Find the lattice endpoint */ 694 lattice_qh = &ehcip->ehci_qh_pool_addr[hnode]; 695 696 /* Find the next lattice endpoint */ 697 next_lattice_qh = ehci_qh_iommu_to_cpu( 698 ehcip, (Get_QH(lattice_qh->qh_link_ptr) & EHCI_QH_LINK_PTR)); 699 700 /* Update the previous pointer */ 701 Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, lattice_qh)); 702 703 /* Check next_lattice_qh value */ 704 if (next_lattice_qh) { 705 /* Update this qh to point to the next one in the lattice */ 706 Set_QH(qh->qh_link_ptr, Get_QH(lattice_qh->qh_link_ptr)); 707 708 /* Update the previous pointer of qh->qh_link_ptr */ 709 if (Get_QH(next_lattice_qh->qh_state) != EHCI_QH_STATIC) { 710 Set_QH(next_lattice_qh->qh_prev, 711 ehci_qh_cpu_to_iommu(ehcip, qh)); 712 } 713 } else { 714 /* Update qh's link pointer to terminate periodic list */ 715 Set_QH(qh->qh_link_ptr, 716 (Get_QH(lattice_qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID)); 717 } 718 719 /* Insert this endpoint into the lattice */ 720 Set_QH(lattice_qh->qh_link_ptr, 721 (ehci_qh_cpu_to_iommu(ehcip, qh) | EHCI_QH_LINK_REF_QH)); 722 } 723 724 725 /* 726 * ehci_modify_qh_status_bit: 727 * 728 * Modify the halt bit on the Host Controller (HC) Endpoint Descriptor (QH). 729 * 730 * If several threads try to halt the same pipe, they will need to wait on 731 * a condition variable. Only one thread is allowed to halt or unhalt the 732 * pipe at a time. 733 * 734 * Usually after a halt pipe, an unhalt pipe will follow soon after. There 735 * is an assumption that an Unhalt pipe will never occur without a halt pipe. 736 */ 737 static void 738 ehci_modify_qh_status_bit( 739 ehci_state_t *ehcip, 740 ehci_pipe_private_t *pp, 741 halt_bit_t action) 742 { 743 ehci_qh_t *qh = pp->pp_qh; 744 uint_t smask, eps, split_intr_qh; 745 uint_t status; 746 747 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 748 "ehci_modify_qh_status_bit: action=0x%x qh=0x%p", 749 action, qh); 750 751 ehci_print_qh(ehcip, qh); 752 753 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 754 755 /* 756 * If this pipe is in the middle of halting don't allow another 757 * thread to come in and modify the same pipe. 758 */ 759 while (pp->pp_halt_state & EHCI_HALT_STATE_HALTING) { 760 761 cv_wait(&pp->pp_halt_cmpl_cv, 762 &ehcip->ehci_int_mutex); 763 } 764 765 /* Sync the QH QTD pool to get up to date information */ 766 Sync_QH_QTD_Pool(ehcip); 767 768 769 if (action == CLEAR_HALT) { 770 /* 771 * If the halt bit is to be cleared, just clear it. 772 * there shouldn't be any race condition problems. 773 * If the host controller reads the bit before the 774 * driver has a chance to set the bit, the bit will 775 * be reread on the next frame. 776 */ 777 Set_QH(qh->qh_ctrl, 778 (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_ED_INACTIVATE)); 779 Set_QH(qh->qh_status, 780 Get_QH(qh->qh_status) & ~(EHCI_QH_STS_XACT_STATUS)); 781 782 goto success; 783 } 784 785 /* Halt the the QH, but first check to see if it is already halted */ 786 status = Get_QH(qh->qh_status); 787 if (!(status & EHCI_QH_STS_HALTED)) { 788 /* Indicate that this pipe is in the middle of halting. */ 789 pp->pp_halt_state |= EHCI_HALT_STATE_HALTING; 790 791 /* 792 * Find out if this is an full/low speed interrupt endpoint. 793 * A non-zero Cmask indicates that this QH is an interrupt 794 * endpoint. Check the endpoint speed to see if it is either 795 * FULL or LOW . 796 */ 797 smask = Get_QH(qh->qh_split_ctrl) & 798 EHCI_QH_SPLIT_CTRL_INTR_MASK; 799 eps = Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_SPEED; 800 split_intr_qh = ((smask != 0) && 801 (eps != EHCI_QH_CTRL_ED_HIGH_SPEED)); 802 803 if (eps == EHCI_QH_CTRL_ED_HIGH_SPEED) { 804 ehci_halt_hs_qh(ehcip, pp, qh); 805 } else { 806 if (split_intr_qh) { 807 ehci_halt_fls_intr_qh(ehcip, qh); 808 } else { 809 ehci_halt_fls_ctrl_and_bulk_qh(ehcip, pp, qh); 810 } 811 } 812 813 /* Indicate that this pipe is not in the middle of halting. */ 814 pp->pp_halt_state &= ~EHCI_HALT_STATE_HALTING; 815 } 816 817 /* Sync the QH QTD pool again to get the most up to date information */ 818 Sync_QH_QTD_Pool(ehcip); 819 820 ehci_print_qh(ehcip, qh); 821 822 status = Get_QH(qh->qh_status); 823 if (!(status & EHCI_QH_STS_HALTED)) { 824 USB_DPRINTF_L1(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 825 "ehci_modify_qh_status_bit: Failed to halt qh=0x%p", qh); 826 827 ehci_print_qh(ehcip, qh); 828 829 /* Set host controller soft state to error */ 830 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 831 832 ASSERT(status & EHCI_QH_STS_HALTED); 833 } 834 835 success: 836 /* Wake up threads waiting for this pipe to be halted. */ 837 cv_signal(&pp->pp_halt_cmpl_cv); 838 } 839 840 841 /* 842 * ehci_halt_hs_qh: 843 * 844 * Halts all types of HIGH SPEED QHs. 845 */ 846 static void 847 ehci_halt_hs_qh( 848 ehci_state_t *ehcip, 849 ehci_pipe_private_t *pp, 850 ehci_qh_t *qh) 851 { 852 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 853 854 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 855 "ehci_halt_hs_qh:"); 856 857 /* Remove this qh from the HCD's view, but do not reclaim it */ 858 ehci_remove_qh(ehcip, pp, B_FALSE); 859 860 /* 861 * Wait for atleast one SOF, just in case the HCD is in the 862 * middle accessing this QH. 863 */ 864 (void) ehci_wait_for_sof(ehcip); 865 866 /* Sync the QH QTD pool to get up to date information */ 867 Sync_QH_QTD_Pool(ehcip); 868 869 /* Modify the status bit and halt this QH. */ 870 Set_QH(qh->qh_status, 871 ((Get_QH(qh->qh_status) & 872 ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED)); 873 874 /* Insert this QH back into the HCD's view */ 875 ehci_insert_qh(ehcip, ph); 876 } 877 878 879 /* 880 * ehci_halt_fls_ctrl_and_bulk_qh: 881 * 882 * Halts FULL/LOW Ctrl and Bulk QHs only. 883 */ 884 static void 885 ehci_halt_fls_ctrl_and_bulk_qh( 886 ehci_state_t *ehcip, 887 ehci_pipe_private_t *pp, 888 ehci_qh_t *qh) 889 { 890 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 891 uint_t status, split_status, bytes_left; 892 893 894 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 895 "ehci_halt_fls_ctrl_and_bulk_qh:"); 896 897 /* Remove this qh from the HCD's view, but do not reclaim it */ 898 ehci_remove_qh(ehcip, pp, B_FALSE); 899 900 /* 901 * Wait for atleast one SOF, just in case the HCD is in the 902 * middle accessing this QH. 903 */ 904 (void) ehci_wait_for_sof(ehcip); 905 906 /* Sync the QH QTD pool to get up to date information */ 907 Sync_QH_QTD_Pool(ehcip); 908 909 /* Modify the status bit and halt this QH. */ 910 Set_QH(qh->qh_status, 911 ((Get_QH(qh->qh_status) & 912 ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED)); 913 914 /* Check to see if the QH was in the middle of a transaction */ 915 status = Get_QH(qh->qh_status); 916 split_status = status & EHCI_QH_STS_SPLIT_XSTATE; 917 bytes_left = status & EHCI_QH_STS_BYTES_TO_XFER; 918 if ((split_status == EHCI_QH_STS_DO_COMPLETE_SPLIT) && 919 (bytes_left != 0)) { 920 /* send ClearTTBuffer to this device's parent 2.0 hub */ 921 ehci_clear_tt_buffer(ehcip, ph, qh); 922 } 923 924 /* Insert this QH back into the HCD's view */ 925 ehci_insert_qh(ehcip, ph); 926 } 927 928 929 /* 930 * ehci_clear_tt_buffer 931 * 932 * This function will sent a Clear_TT_Buffer request to the pipe's 933 * parent 2.0 hub. 934 */ 935 static void 936 ehci_clear_tt_buffer( 937 ehci_state_t *ehcip, 938 usba_pipe_handle_data_t *ph, 939 ehci_qh_t *qh) 940 { 941 usba_device_t *usba_device; 942 usba_device_t *hub_usba_device; 943 usb_pipe_handle_t hub_def_ph; 944 usb_ep_descr_t *eptd; 945 uchar_t attributes; 946 uint16_t wValue; 947 usb_ctrl_setup_t setup; 948 usb_cr_t completion_reason; 949 usb_cb_flags_t cb_flags; 950 int retry; 951 952 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 953 "ehci_clear_tt_buffer: "); 954 955 /* Get some information about the current pipe */ 956 usba_device = ph->p_usba_device; 957 eptd = &ph->p_ep; 958 attributes = eptd->bmAttributes & USB_EP_ATTR_MASK; 959 960 /* 961 * Create the wIndex for this request (usb spec 11.24.2.3) 962 * 3..0 Endpoint Number 963 * 10..4 Device Address 964 * 12..11 Endpoint Type 965 * 14..13 Reserved (must be 0) 966 * 15 Direction 1 = IN, 0 = OUT 967 */ 968 wValue = 0; 969 if ((eptd->bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 970 wValue |= 0x8000; 971 } 972 wValue |= attributes << 11; 973 wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_DEVICE_ADDRESS) << 4; 974 wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_HIGH_SPEED) >> 975 EHCI_QH_CTRL_ED_NUMBER_SHIFT; 976 977 mutex_exit(&ehcip->ehci_int_mutex); 978 979 /* Manually fill in the request. */ 980 setup.bmRequestType = EHCI_CLEAR_TT_BUFFER_REQTYPE; 981 setup.bRequest = EHCI_CLEAR_TT_BUFFER_BREQ; 982 setup.wValue = wValue; 983 setup.wIndex = 1; 984 setup.wLength = 0; 985 setup.attrs = USB_ATTRS_NONE; 986 987 /* Get the usba_device of the parent 2.0 hub. */ 988 mutex_enter(&usba_device->usb_mutex); 989 hub_usba_device = usba_device->usb_hs_hub_usba_dev; 990 mutex_exit(&usba_device->usb_mutex); 991 992 /* Get the default ctrl pipe for the parent 2.0 hub */ 993 mutex_enter(&hub_usba_device->usb_mutex); 994 hub_def_ph = (usb_pipe_handle_t)&hub_usba_device->usb_ph_list[0]; 995 mutex_exit(&hub_usba_device->usb_mutex); 996 997 for (retry = 0; retry < 3; retry++) { 998 999 /* sync send the request to the default pipe */ 1000 if (usb_pipe_ctrl_xfer_wait( 1001 hub_def_ph, 1002 &setup, 1003 NULL, 1004 &completion_reason, &cb_flags, 0) == USB_SUCCESS) { 1005 1006 break; 1007 } 1008 1009 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1010 "ehci_clear_tt_buffer: Failed to clear tt buffer," 1011 "retry = %d, cr = %d, cb_flags = 0x%x\n", 1012 retry, completion_reason, cb_flags); 1013 } 1014 1015 if (retry >= 3) { 1016 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1017 dev_info_t *dip = hub_usba_device->usb_dip; 1018 1019 /* 1020 * Ask the user to hotplug the 2.0 hub, to make sure that 1021 * all the buffer is in sync since this command has failed. 1022 */ 1023 USB_DPRINTF_L0(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1024 "Error recovery failure: Please hotplug the 2.0 hub at" 1025 "%s", ddi_pathname(dip, path)); 1026 1027 kmem_free(path, MAXPATHLEN); 1028 } 1029 1030 mutex_enter(&ehcip->ehci_int_mutex); 1031 } 1032 1033 /* 1034 * ehci_halt_fls_intr_qh: 1035 * 1036 * Halts FULL/LOW speed Intr QHs. 1037 */ 1038 static void 1039 ehci_halt_fls_intr_qh( 1040 ehci_state_t *ehcip, 1041 ehci_qh_t *qh) 1042 { 1043 usb_frame_number_t starting_frame; 1044 usb_frame_number_t frames_past; 1045 uint_t status, i; 1046 1047 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1048 "ehci_halt_fls_intr_qh:"); 1049 1050 /* 1051 * Ask the HC to deactivate the QH in a 1052 * full/low periodic QH. 1053 */ 1054 Set_QH(qh->qh_ctrl, 1055 (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_ED_INACTIVATE)); 1056 1057 starting_frame = ehci_get_current_frame_number(ehcip); 1058 1059 /* 1060 * Wait at least EHCI_NUM_INTR_QH_LISTS+2 frame or until 1061 * the QH has been halted. 1062 */ 1063 Sync_QH_QTD_Pool(ehcip); 1064 frames_past = 0; 1065 status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT; 1066 1067 while ((frames_past <= (EHCI_NUM_INTR_QH_LISTS + 2)) && 1068 (status != 0)) { 1069 1070 (void) ehci_wait_for_sof(ehcip); 1071 1072 Sync_QH_QTD_Pool(ehcip); 1073 status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT; 1074 frames_past = ehci_get_current_frame_number(ehcip) - 1075 starting_frame; 1076 } 1077 1078 /* Modify the status bit and halt this QH. */ 1079 Sync_QH_QTD_Pool(ehcip); 1080 1081 status = Get_QH(qh->qh_status); 1082 1083 for (i = 0; i < EHCI_NUM_INTR_QH_LISTS; i++) { 1084 Set_QH(qh->qh_status, 1085 ((Get_QH(qh->qh_status) & 1086 ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED)); 1087 1088 Sync_QH_QTD_Pool(ehcip); 1089 1090 (void) ehci_wait_for_sof(ehcip); 1091 Sync_QH_QTD_Pool(ehcip); 1092 1093 if (Get_QH(qh->qh_status) & EHCI_QH_STS_HALTED) { 1094 1095 break; 1096 } 1097 } 1098 1099 Sync_QH_QTD_Pool(ehcip); 1100 1101 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1102 "ehci_halt_fls_intr_qh: qh=0x%p frames past=%d, status=0x%x, 0x%x", 1103 qh, ehci_get_current_frame_number(ehcip) - starting_frame, 1104 status, Get_QH(qh->qh_status)); 1105 } 1106 1107 1108 /* 1109 * ehci_remove_qh: 1110 * 1111 * Remove the Endpoint Descriptor (QH) from the Host Controller's appropriate 1112 * endpoint list. 1113 */ 1114 void 1115 ehci_remove_qh( 1116 ehci_state_t *ehcip, 1117 ehci_pipe_private_t *pp, 1118 boolean_t reclaim) 1119 { 1120 uchar_t attributes; 1121 1122 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1123 1124 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1125 "ehci_remove_qh: qh=0x%p", pp->pp_qh); 1126 1127 attributes = pp->pp_pipe_handle->p_ep.bmAttributes & USB_EP_ATTR_MASK; 1128 1129 switch (attributes) { 1130 case USB_EP_ATTR_CONTROL: 1131 case USB_EP_ATTR_BULK: 1132 ehci_remove_async_qh(ehcip, pp, reclaim); 1133 ehcip->ehci_open_async_count--; 1134 break; 1135 case USB_EP_ATTR_INTR: 1136 ehci_remove_intr_qh(ehcip, pp, reclaim); 1137 ehcip->ehci_open_periodic_count--; 1138 break; 1139 case USB_EP_ATTR_ISOCH: 1140 /* ISOCH does not use QH, don't do anything but update count */ 1141 ehcip->ehci_open_periodic_count--; 1142 break; 1143 } 1144 ehci_toggle_scheduler(ehcip); 1145 } 1146 1147 1148 /* 1149 * ehci_remove_async_qh: 1150 * 1151 * Remove a control/bulk endpoint into the Host Controller's (HC) 1152 * Asynchronous schedule endpoint list. 1153 */ 1154 static void 1155 ehci_remove_async_qh( 1156 ehci_state_t *ehcip, 1157 ehci_pipe_private_t *pp, 1158 boolean_t reclaim) 1159 { 1160 ehci_qh_t *qh = pp->pp_qh; /* qh to be removed */ 1161 ehci_qh_t *prev_qh, *next_qh; 1162 1163 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1164 "ehci_remove_async_qh:"); 1165 1166 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1167 1168 prev_qh = ehci_qh_iommu_to_cpu(ehcip, 1169 Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR); 1170 next_qh = ehci_qh_iommu_to_cpu(ehcip, 1171 Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR); 1172 1173 /* Make sure this QH is in the list */ 1174 ASSERT(prev_qh != NULL); 1175 1176 /* 1177 * If next QH and current QH are the same, then this is the last 1178 * QH on the Asynchronous Schedule list. 1179 */ 1180 if (qh == next_qh) { 1181 ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD); 1182 /* 1183 * Null our pointer to the async sched list, but do not 1184 * touch the host controller's list_addr. 1185 */ 1186 ehcip->ehci_head_of_async_sched_list = NULL; 1187 ASSERT(ehcip->ehci_open_async_count == 1); 1188 } else { 1189 /* If this QH is the HEAD then find another one to replace it */ 1190 if (ehcip->ehci_head_of_async_sched_list == qh) { 1191 1192 ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD); 1193 ehcip->ehci_head_of_async_sched_list = next_qh; 1194 Set_QH(next_qh->qh_ctrl, 1195 Get_QH(next_qh->qh_ctrl) | 1196 EHCI_QH_CTRL_RECLAIM_HEAD); 1197 } 1198 Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr)); 1199 Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev)); 1200 } 1201 1202 /* qh_prev to indicate it is no longer in the circular list */ 1203 Set_QH(qh->qh_prev, NULL); 1204 1205 if (reclaim) { 1206 ehci_insert_qh_on_reclaim_list(ehcip, pp); 1207 } 1208 } 1209 1210 1211 /* 1212 * ehci_remove_intr_qh: 1213 * 1214 * Set up an interrupt endpoint to be removed from the Host Controller's (HC) 1215 * interrupt lattice tree. The Endpoint Descriptor (QH) will be freed in the 1216 * interrupt handler. 1217 */ 1218 static void 1219 ehci_remove_intr_qh( 1220 ehci_state_t *ehcip, 1221 ehci_pipe_private_t *pp, 1222 boolean_t reclaim) 1223 { 1224 ehci_qh_t *qh = pp->pp_qh; /* qh to be removed */ 1225 ehci_qh_t *prev_qh, *next_qh; 1226 1227 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1228 "ehci_remove_intr_qh:"); 1229 1230 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1231 1232 prev_qh = ehci_qh_iommu_to_cpu(ehcip, Get_QH(qh->qh_prev)); 1233 next_qh = ehci_qh_iommu_to_cpu(ehcip, 1234 Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR); 1235 1236 /* Make sure this QH is in the list */ 1237 ASSERT(prev_qh != NULL); 1238 1239 if (next_qh) { 1240 /* Update previous qh's link pointer */ 1241 Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr)); 1242 1243 if (Get_QH(next_qh->qh_state) != EHCI_QH_STATIC) { 1244 /* Set the previous pointer of the next one */ 1245 Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev)); 1246 } 1247 } else { 1248 /* Update previous qh's link pointer */ 1249 Set_QH(prev_qh->qh_link_ptr, 1250 (Get_QH(qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID)); 1251 } 1252 1253 /* qh_prev to indicate it is no longer in the circular list */ 1254 Set_QH(qh->qh_prev, NULL); 1255 1256 if (reclaim) { 1257 ehci_insert_qh_on_reclaim_list(ehcip, pp); 1258 } 1259 } 1260 1261 1262 /* 1263 * ehci_insert_qh_on_reclaim_list: 1264 * 1265 * Insert Endpoint onto the reclaim list 1266 */ 1267 static void 1268 ehci_insert_qh_on_reclaim_list( 1269 ehci_state_t *ehcip, 1270 ehci_pipe_private_t *pp) 1271 { 1272 ehci_qh_t *qh = pp->pp_qh; /* qh to be removed */ 1273 ehci_qh_t *next_qh, *prev_qh; 1274 usb_frame_number_t frame_number; 1275 1276 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1277 1278 /* 1279 * Read current usb frame number and add appropriate number of 1280 * usb frames needs to wait before reclaiming current endpoint. 1281 */ 1282 frame_number = 1283 ehci_get_current_frame_number(ehcip) + MAX_SOF_WAIT_COUNT; 1284 1285 /* Store 32-bit ID */ 1286 Set_QH(qh->qh_reclaim_frame, 1287 ((uint32_t)(EHCI_GET_ID((void *)(uintptr_t)frame_number)))); 1288 1289 /* Insert the endpoint onto the reclamation list */ 1290 if (ehcip->ehci_reclaim_list) { 1291 next_qh = ehcip->ehci_reclaim_list; 1292 1293 while (next_qh) { 1294 prev_qh = next_qh; 1295 next_qh = ehci_qh_iommu_to_cpu(ehcip, 1296 Get_QH(next_qh->qh_reclaim_next)); 1297 } 1298 1299 Set_QH(prev_qh->qh_reclaim_next, 1300 ehci_qh_cpu_to_iommu(ehcip, qh)); 1301 } else { 1302 ehcip->ehci_reclaim_list = qh; 1303 } 1304 1305 ASSERT(Get_QH(qh->qh_reclaim_next) == NULL); 1306 } 1307 1308 1309 /* 1310 * ehci_deallocate_qh: 1311 * 1312 * Deallocate a Host Controller's (HC) Endpoint Descriptor (QH). 1313 * 1314 * NOTE: This function is also called from POLLED MODE. 1315 */ 1316 void 1317 ehci_deallocate_qh( 1318 ehci_state_t *ehcip, 1319 ehci_qh_t *old_qh) 1320 { 1321 ehci_qtd_t *first_dummy_qtd, *second_dummy_qtd; 1322 1323 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 1324 "ehci_deallocate_qh:"); 1325 1326 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1327 1328 first_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip, 1329 (Get_QH(old_qh->qh_next_qtd) & EHCI_QH_NEXT_QTD_PTR)); 1330 1331 if (first_dummy_qtd) { 1332 ASSERT(Get_QTD(first_dummy_qtd->qtd_state) == EHCI_QTD_DUMMY); 1333 1334 second_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip, 1335 Get_QTD(first_dummy_qtd->qtd_next_qtd)); 1336 1337 if (second_dummy_qtd) { 1338 ASSERT(Get_QTD(second_dummy_qtd->qtd_state) == 1339 EHCI_QTD_DUMMY); 1340 1341 ehci_deallocate_qtd(ehcip, second_dummy_qtd); 1342 } 1343 1344 ehci_deallocate_qtd(ehcip, first_dummy_qtd); 1345 } 1346 1347 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 1348 "ehci_deallocate_qh: Deallocated 0x%p", (void *)old_qh); 1349 1350 bzero((void *)old_qh, sizeof (ehci_qh_t)); 1351 Set_QH(old_qh->qh_state, EHCI_QH_FREE); 1352 } 1353 1354 1355 /* 1356 * ehci_qh_cpu_to_iommu: 1357 * 1358 * This function converts for the given Endpoint Descriptor (QH) CPU address 1359 * to IO address. 1360 * 1361 * NOTE: This function is also called from POLLED MODE. 1362 */ 1363 uint32_t 1364 ehci_qh_cpu_to_iommu( 1365 ehci_state_t *ehcip, 1366 ehci_qh_t *addr) 1367 { 1368 uint32_t qh; 1369 1370 qh = (uint32_t)ehcip->ehci_qh_pool_cookie.dmac_address + 1371 (uint32_t)((uintptr_t)addr - (uintptr_t)(ehcip->ehci_qh_pool_addr)); 1372 1373 ASSERT(qh >= ehcip->ehci_qh_pool_cookie.dmac_address); 1374 ASSERT(qh <= ehcip->ehci_qh_pool_cookie.dmac_address + 1375 sizeof (ehci_qh_t) * ehci_qh_pool_size); 1376 1377 return (qh); 1378 } 1379 1380 1381 /* 1382 * ehci_qh_iommu_to_cpu: 1383 * 1384 * This function converts for the given Endpoint Descriptor (QH) IO address 1385 * to CPU address. 1386 */ 1387 ehci_qh_t * 1388 ehci_qh_iommu_to_cpu( 1389 ehci_state_t *ehcip, 1390 uintptr_t addr) 1391 { 1392 ehci_qh_t *qh; 1393 1394 if (addr == NULL) { 1395 1396 return (NULL); 1397 } 1398 1399 qh = (ehci_qh_t *)((uintptr_t) 1400 (addr - ehcip->ehci_qh_pool_cookie.dmac_address) + 1401 (uintptr_t)ehcip->ehci_qh_pool_addr); 1402 1403 ASSERT(qh >= ehcip->ehci_qh_pool_addr); 1404 ASSERT((uintptr_t)qh <= (uintptr_t)ehcip->ehci_qh_pool_addr + 1405 (uintptr_t)(sizeof (ehci_qh_t) * ehci_qh_pool_size)); 1406 1407 return (qh); 1408 } 1409 1410 1411 /* 1412 * Transfer Descriptor manipulations functions 1413 */ 1414 1415 /* 1416 * ehci_initialize_dummy: 1417 * 1418 * An Endpoint Descriptor (QH) has a dummy Transfer Descriptor (QTD) on the 1419 * end of its QTD list. Initially, both the head and tail pointers of the QH 1420 * point to the dummy QTD. 1421 */ 1422 static int 1423 ehci_initialize_dummy( 1424 ehci_state_t *ehcip, 1425 ehci_qh_t *qh) 1426 { 1427 ehci_qtd_t *first_dummy_qtd, *second_dummy_qtd; 1428 1429 /* Allocate first dummy QTD */ 1430 first_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip); 1431 1432 if (first_dummy_qtd == NULL) { 1433 return (USB_NO_RESOURCES); 1434 } 1435 1436 /* Allocate second dummy QTD */ 1437 second_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip); 1438 1439 if (second_dummy_qtd == NULL) { 1440 /* Deallocate first dummy QTD */ 1441 ehci_deallocate_qtd(ehcip, first_dummy_qtd); 1442 1443 return (USB_NO_RESOURCES); 1444 } 1445 1446 /* Next QTD pointer of an QH point to this new dummy QTD */ 1447 Set_QH(qh->qh_next_qtd, ehci_qtd_cpu_to_iommu(ehcip, 1448 first_dummy_qtd) & EHCI_QH_NEXT_QTD_PTR); 1449 1450 /* Set qh's dummy qtd field */ 1451 Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, first_dummy_qtd)); 1452 1453 /* Set first_dummy's next qtd pointer */ 1454 Set_QTD(first_dummy_qtd->qtd_next_qtd, 1455 ehci_qtd_cpu_to_iommu(ehcip, second_dummy_qtd)); 1456 1457 return (USB_SUCCESS); 1458 } 1459 1460 /* 1461 * ehci_allocate_ctrl_resources: 1462 * 1463 * Calculates the number of tds necessary for a ctrl transfer, and allocates 1464 * all the resources necessary. 1465 * 1466 * Returns NULL if there is insufficient resources otherwise TW. 1467 */ 1468 ehci_trans_wrapper_t * 1469 ehci_allocate_ctrl_resources( 1470 ehci_state_t *ehcip, 1471 ehci_pipe_private_t *pp, 1472 usb_ctrl_req_t *ctrl_reqp, 1473 usb_flags_t usb_flags) 1474 { 1475 size_t qtd_count = 2; 1476 size_t ctrl_buf_size; 1477 ehci_trans_wrapper_t *tw; 1478 1479 /* Add one more td for data phase */ 1480 if (ctrl_reqp->ctrl_wLength) { 1481 qtd_count += 1; 1482 } 1483 1484 /* 1485 * If we have a control data phase, the data buffer starts 1486 * on the next 4K page boundary. So the TW buffer is allocated 1487 * to be larger than required. The buffer in the range of 1488 * [SETUP_SIZE, EHCI_MAX_QTD_BUF_SIZE) is just for padding 1489 * and not to be transferred. 1490 */ 1491 if (ctrl_reqp->ctrl_wLength) { 1492 ctrl_buf_size = EHCI_MAX_QTD_BUF_SIZE + 1493 ctrl_reqp->ctrl_wLength; 1494 } else { 1495 ctrl_buf_size = SETUP_SIZE; 1496 } 1497 1498 tw = ehci_allocate_tw_resources(ehcip, pp, ctrl_buf_size, 1499 usb_flags, qtd_count); 1500 1501 return (tw); 1502 } 1503 1504 /* 1505 * ehci_insert_ctrl_req: 1506 * 1507 * Create a Transfer Descriptor (QTD) and a data buffer for a control endpoint. 1508 */ 1509 /* ARGSUSED */ 1510 void 1511 ehci_insert_ctrl_req( 1512 ehci_state_t *ehcip, 1513 usba_pipe_handle_data_t *ph, 1514 usb_ctrl_req_t *ctrl_reqp, 1515 ehci_trans_wrapper_t *tw, 1516 usb_flags_t usb_flags) 1517 { 1518 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 1519 uchar_t bmRequestType = ctrl_reqp->ctrl_bmRequestType; 1520 uchar_t bRequest = ctrl_reqp->ctrl_bRequest; 1521 uint16_t wValue = ctrl_reqp->ctrl_wValue; 1522 uint16_t wIndex = ctrl_reqp->ctrl_wIndex; 1523 uint16_t wLength = ctrl_reqp->ctrl_wLength; 1524 mblk_t *data = ctrl_reqp->ctrl_data; 1525 uint32_t ctrl = 0; 1526 uint8_t setup_packet[8]; 1527 1528 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1529 "ehci_insert_ctrl_req:"); 1530 1531 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1532 1533 /* 1534 * Save current control request pointer and timeout values 1535 * in transfer wrapper. 1536 */ 1537 tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp; 1538 tw->tw_timeout = ctrl_reqp->ctrl_timeout ? 1539 ctrl_reqp->ctrl_timeout : EHCI_DEFAULT_XFER_TIMEOUT; 1540 1541 /* 1542 * Initialize the callback and any callback data for when 1543 * the qtd completes. 1544 */ 1545 tw->tw_handle_qtd = ehci_handle_ctrl_qtd; 1546 tw->tw_handle_callback_value = NULL; 1547 1548 /* 1549 * swap the setup bytes where necessary since we specified 1550 * NEVERSWAP 1551 */ 1552 setup_packet[0] = bmRequestType; 1553 setup_packet[1] = bRequest; 1554 setup_packet[2] = wValue; 1555 setup_packet[3] = wValue >> 8; 1556 setup_packet[4] = wIndex; 1557 setup_packet[5] = wIndex >> 8; 1558 setup_packet[6] = wLength; 1559 setup_packet[7] = wLength >> 8; 1560 1561 bcopy(setup_packet, tw->tw_buf, SETUP_SIZE); 1562 1563 Sync_IO_Buffer_for_device(tw->tw_dmahandle, SETUP_SIZE); 1564 1565 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_0 | EHCI_QTD_CTRL_SETUP_PID); 1566 1567 /* 1568 * The QTD's are placed on the QH one at a time. 1569 * Once this QTD is placed on the done list, the 1570 * data or status phase QTD will be enqueued. 1571 */ 1572 (void) ehci_insert_qtd(ehcip, ctrl, 0, SETUP_SIZE, 1573 EHCI_CTRL_SETUP_PHASE, pp, tw); 1574 1575 USB_DPRINTF_L3(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 1576 "ehci_insert_ctrl_req: pp 0x%p", (void *)pp); 1577 1578 /* 1579 * If this control transfer has a data phase, record the 1580 * direction. If the data phase is an OUT transaction, 1581 * copy the data into the buffer of the transfer wrapper. 1582 */ 1583 if (wLength != 0) { 1584 /* There is a data stage. Find the direction */ 1585 if (bmRequestType & USB_DEV_REQ_DEV_TO_HOST) { 1586 tw->tw_direction = EHCI_QTD_CTRL_IN_PID; 1587 } else { 1588 tw->tw_direction = EHCI_QTD_CTRL_OUT_PID; 1589 1590 /* Copy the data into the message */ 1591 bcopy(data->b_rptr, tw->tw_buf + EHCI_MAX_QTD_BUF_SIZE, 1592 wLength); 1593 1594 Sync_IO_Buffer_for_device(tw->tw_dmahandle, 1595 wLength + EHCI_MAX_QTD_BUF_SIZE); 1596 } 1597 1598 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | tw->tw_direction); 1599 1600 /* 1601 * Create the QTD. If this is an OUT transaction, 1602 * the data is already in the buffer of the TW. 1603 * The transfer should start from EHCI_MAX_QTD_BUF_SIZE 1604 * which is 4K aligned, though the ctrl phase only 1605 * transfers a length of SETUP_SIZE. The padding data 1606 * in the TW buffer are discarded. 1607 */ 1608 (void) ehci_insert_qtd(ehcip, ctrl, EHCI_MAX_QTD_BUF_SIZE, 1609 tw->tw_length - EHCI_MAX_QTD_BUF_SIZE, 1610 EHCI_CTRL_DATA_PHASE, pp, tw); 1611 1612 /* 1613 * The direction of the STATUS QTD depends on 1614 * the direction of the transfer. 1615 */ 1616 if (tw->tw_direction == EHCI_QTD_CTRL_IN_PID) { 1617 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1| 1618 EHCI_QTD_CTRL_OUT_PID | 1619 EHCI_QTD_CTRL_INTR_ON_COMPLETE); 1620 } else { 1621 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1| 1622 EHCI_QTD_CTRL_IN_PID | 1623 EHCI_QTD_CTRL_INTR_ON_COMPLETE); 1624 } 1625 } else { 1626 /* 1627 * There is no data stage, then initiate 1628 * status phase from the host. 1629 */ 1630 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | 1631 EHCI_QTD_CTRL_IN_PID | 1632 EHCI_QTD_CTRL_INTR_ON_COMPLETE); 1633 } 1634 1635 1636 (void) ehci_insert_qtd(ehcip, ctrl, 0, 0, 1637 EHCI_CTRL_STATUS_PHASE, pp, tw); 1638 1639 /* Start the timer for this control transfer */ 1640 ehci_start_xfer_timer(ehcip, pp, tw); 1641 } 1642 1643 1644 /* 1645 * ehci_allocate_bulk_resources: 1646 * 1647 * Calculates the number of tds necessary for a ctrl transfer, and allocates 1648 * all the resources necessary. 1649 * 1650 * Returns NULL if there is insufficient resources otherwise TW. 1651 */ 1652 ehci_trans_wrapper_t * 1653 ehci_allocate_bulk_resources( 1654 ehci_state_t *ehcip, 1655 ehci_pipe_private_t *pp, 1656 usb_bulk_req_t *bulk_reqp, 1657 usb_flags_t usb_flags) 1658 { 1659 size_t qtd_count = 0; 1660 ehci_trans_wrapper_t *tw; 1661 1662 /* Check the size of bulk request */ 1663 if (bulk_reqp->bulk_len > EHCI_MAX_BULK_XFER_SIZE) { 1664 1665 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1666 "ehci_allocate_bulk_resources: Bulk request size 0x%x is " 1667 "more than 0x%x", bulk_reqp->bulk_len, 1668 EHCI_MAX_BULK_XFER_SIZE); 1669 1670 return (NULL); 1671 } 1672 1673 /* Get the required bulk packet size */ 1674 qtd_count = bulk_reqp->bulk_len / EHCI_MAX_QTD_XFER_SIZE; 1675 if (bulk_reqp->bulk_len % EHCI_MAX_QTD_XFER_SIZE) { 1676 qtd_count += 1; 1677 } 1678 1679 tw = ehci_allocate_tw_resources(ehcip, pp, bulk_reqp->bulk_len, 1680 usb_flags, qtd_count); 1681 1682 return (tw); 1683 } 1684 1685 /* 1686 * ehci_insert_bulk_req: 1687 * 1688 * Create a Transfer Descriptor (QTD) and a data buffer for a bulk 1689 * endpoint. 1690 */ 1691 /* ARGSUSED */ 1692 void 1693 ehci_insert_bulk_req( 1694 ehci_state_t *ehcip, 1695 usba_pipe_handle_data_t *ph, 1696 usb_bulk_req_t *bulk_reqp, 1697 ehci_trans_wrapper_t *tw, 1698 usb_flags_t flags) 1699 { 1700 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 1701 uint_t bulk_pkt_size, count; 1702 size_t residue = 0, len = 0; 1703 uint32_t ctrl = 0; 1704 int pipe_dir; 1705 1706 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1707 "ehci_insert_bulk_req: bulk_reqp = 0x%p flags = 0x%x", 1708 bulk_reqp, flags); 1709 1710 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1711 1712 /* Get the bulk pipe direction */ 1713 pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK; 1714 1715 /* Get the required bulk packet size */ 1716 bulk_pkt_size = min(bulk_reqp->bulk_len, EHCI_MAX_QTD_XFER_SIZE); 1717 1718 residue = tw->tw_length % bulk_pkt_size; 1719 1720 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1721 "ehci_insert_bulk_req: bulk_pkt_size = %d", bulk_pkt_size); 1722 1723 /* 1724 * Save current bulk request pointer and timeout values 1725 * in transfer wrapper. 1726 */ 1727 tw->tw_curr_xfer_reqp = (usb_opaque_t)bulk_reqp; 1728 tw->tw_timeout = bulk_reqp->bulk_timeout; 1729 1730 /* 1731 * Initialize the callback and any callback 1732 * data required when the qtd completes. 1733 */ 1734 tw->tw_handle_qtd = ehci_handle_bulk_qtd; 1735 tw->tw_handle_callback_value = NULL; 1736 1737 tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ? 1738 EHCI_QTD_CTRL_OUT_PID : EHCI_QTD_CTRL_IN_PID; 1739 1740 if (tw->tw_direction == EHCI_QTD_CTRL_OUT_PID) { 1741 1742 ASSERT(bulk_reqp->bulk_data != NULL); 1743 1744 bcopy(bulk_reqp->bulk_data->b_rptr, tw->tw_buf, 1745 bulk_reqp->bulk_len); 1746 1747 Sync_IO_Buffer_for_device(tw->tw_dmahandle, 1748 bulk_reqp->bulk_len); 1749 } 1750 1751 ctrl = tw->tw_direction; 1752 1753 /* Insert all the bulk QTDs */ 1754 for (count = 0; count < tw->tw_num_qtds; count++) { 1755 1756 /* Check for last qtd */ 1757 if (count == (tw->tw_num_qtds - 1)) { 1758 1759 ctrl |= EHCI_QTD_CTRL_INTR_ON_COMPLETE; 1760 1761 /* Check for inserting residue data */ 1762 if (residue) { 1763 bulk_pkt_size = residue; 1764 } 1765 } 1766 1767 /* Insert the QTD onto the endpoint */ 1768 (void) ehci_insert_qtd(ehcip, ctrl, len, bulk_pkt_size, 1769 0, pp, tw); 1770 1771 len = len + bulk_pkt_size; 1772 } 1773 1774 /* Start the timer for this bulk transfer */ 1775 ehci_start_xfer_timer(ehcip, pp, tw); 1776 } 1777 1778 1779 /* 1780 * ehci_start_periodic_pipe_polling: 1781 * 1782 * NOTE: This function is also called from POLLED MODE. 1783 */ 1784 int 1785 ehci_start_periodic_pipe_polling( 1786 ehci_state_t *ehcip, 1787 usba_pipe_handle_data_t *ph, 1788 usb_opaque_t periodic_in_reqp, 1789 usb_flags_t flags) 1790 { 1791 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 1792 usb_ep_descr_t *eptd = &ph->p_ep; 1793 int error = USB_SUCCESS; 1794 1795 USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl, 1796 "ehci_start_periodic_pipe_polling: ep%d", 1797 ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK); 1798 1799 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1800 1801 /* 1802 * Check and handle start polling on root hub interrupt pipe. 1803 */ 1804 if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) && 1805 ((eptd->bmAttributes & USB_EP_ATTR_MASK) == 1806 USB_EP_ATTR_INTR)) { 1807 1808 error = ehci_handle_root_hub_pipe_start_intr_polling(ph, 1809 (usb_intr_req_t *)periodic_in_reqp, flags); 1810 1811 return (error); 1812 } 1813 1814 switch (pp->pp_state) { 1815 case EHCI_PIPE_STATE_IDLE: 1816 /* Save the Original client's Periodic IN request */ 1817 pp->pp_client_periodic_in_reqp = periodic_in_reqp; 1818 1819 /* 1820 * This pipe is uninitialized or if a valid QTD is 1821 * not found then insert a QTD on the interrupt IN 1822 * endpoint. 1823 */ 1824 error = ehci_start_pipe_polling(ehcip, ph, flags); 1825 1826 if (error != USB_SUCCESS) { 1827 USB_DPRINTF_L2(PRINT_MASK_INTR, 1828 ehcip->ehci_log_hdl, 1829 "ehci_start_periodic_pipe_polling: " 1830 "Start polling failed"); 1831 1832 pp->pp_client_periodic_in_reqp = NULL; 1833 1834 return (error); 1835 } 1836 1837 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 1838 "ehci_start_periodic_pipe_polling: PP = 0x%p", pp); 1839 1840 #ifdef DEBUG 1841 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) { 1842 case USB_EP_ATTR_INTR: 1843 ASSERT((pp->pp_tw_head != NULL) && 1844 (pp->pp_tw_tail != NULL)); 1845 break; 1846 case USB_EP_ATTR_ISOCH: 1847 ASSERT((pp->pp_itw_head != NULL) && 1848 (pp->pp_itw_tail != NULL)); 1849 break; 1850 } 1851 #endif 1852 1853 break; 1854 case EHCI_PIPE_STATE_ACTIVE: 1855 USB_DPRINTF_L2(PRINT_MASK_INTR, 1856 ehcip->ehci_log_hdl, 1857 "ehci_start_periodic_pipe_polling: " 1858 "Polling is already in progress"); 1859 1860 error = USB_FAILURE; 1861 break; 1862 case EHCI_PIPE_STATE_ERROR: 1863 USB_DPRINTF_L2(PRINT_MASK_INTR, 1864 ehcip->ehci_log_hdl, 1865 "ehci_start_periodic_pipe_polling: " 1866 "Pipe is halted and perform reset" 1867 "before restart polling"); 1868 1869 error = USB_FAILURE; 1870 break; 1871 default: 1872 USB_DPRINTF_L2(PRINT_MASK_INTR, 1873 ehcip->ehci_log_hdl, 1874 "ehci_start_periodic_pipe_polling: " 1875 "Undefined state"); 1876 1877 error = USB_FAILURE; 1878 break; 1879 } 1880 1881 return (error); 1882 } 1883 1884 1885 /* 1886 * ehci_start_pipe_polling: 1887 * 1888 * Insert the number of periodic requests corresponding to polling 1889 * interval as calculated during pipe open. 1890 */ 1891 static int 1892 ehci_start_pipe_polling( 1893 ehci_state_t *ehcip, 1894 usba_pipe_handle_data_t *ph, 1895 usb_flags_t flags) 1896 { 1897 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 1898 usb_ep_descr_t *eptd = &ph->p_ep; 1899 int error = USB_FAILURE; 1900 1901 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1902 "ehci_start_pipe_polling:"); 1903 1904 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1905 1906 /* 1907 * For the start polling, pp_max_periodic_req_cnt will be zero 1908 * and for the restart polling request, it will be non zero. 1909 * 1910 * In case of start polling request, find out number of requests 1911 * required for the Interrupt IN endpoints corresponding to the 1912 * endpoint polling interval. For Isochronous IN endpoints, it is 1913 * always fixed since its polling interval will be one ms. 1914 */ 1915 if (pp->pp_max_periodic_req_cnt == 0) { 1916 1917 ehci_set_periodic_pipe_polling(ehcip, ph); 1918 } 1919 1920 ASSERT(pp->pp_max_periodic_req_cnt != 0); 1921 1922 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) { 1923 case USB_EP_ATTR_INTR: 1924 error = ehci_start_intr_polling(ehcip, ph, flags); 1925 break; 1926 case USB_EP_ATTR_ISOCH: 1927 error = ehci_start_isoc_polling(ehcip, ph, flags); 1928 break; 1929 } 1930 1931 return (error); 1932 } 1933 1934 static int 1935 ehci_start_intr_polling( 1936 ehci_state_t *ehcip, 1937 usba_pipe_handle_data_t *ph, 1938 usb_flags_t flags) 1939 { 1940 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 1941 ehci_trans_wrapper_t *tw_list, *tw; 1942 int i, total_tws; 1943 int error = USB_SUCCESS; 1944 1945 /* Allocate all the necessary resources for the IN transfer */ 1946 tw_list = NULL; 1947 total_tws = pp->pp_max_periodic_req_cnt - pp->pp_cur_periodic_req_cnt; 1948 for (i = 0; i < total_tws; i += 1) { 1949 tw = ehci_allocate_intr_resources(ehcip, ph, NULL, flags); 1950 if (tw == NULL) { 1951 error = USB_NO_RESOURCES; 1952 /* There are not enough resources, deallocate the TWs */ 1953 tw = tw_list; 1954 while (tw != NULL) { 1955 tw_list = tw->tw_next; 1956 ehci_deallocate_intr_in_resource( 1957 ehcip, pp, tw); 1958 ehci_deallocate_tw(ehcip, pp, tw); 1959 tw = tw_list; 1960 } 1961 1962 return (error); 1963 } else { 1964 if (tw_list == NULL) { 1965 tw_list = tw; 1966 } 1967 } 1968 } 1969 1970 while (pp->pp_cur_periodic_req_cnt < pp->pp_max_periodic_req_cnt) { 1971 1972 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 1973 "ehci_start_pipe_polling: max = %d curr = %d tw = %p:", 1974 pp->pp_max_periodic_req_cnt, pp->pp_cur_periodic_req_cnt, 1975 tw_list); 1976 1977 tw = tw_list; 1978 tw_list = tw->tw_next; 1979 1980 ehci_insert_intr_req(ehcip, pp, tw, flags); 1981 1982 pp->pp_cur_periodic_req_cnt++; 1983 } 1984 1985 return (error); 1986 } 1987 1988 1989 /* 1990 * ehci_set_periodic_pipe_polling: 1991 * 1992 * Calculate the number of periodic requests needed corresponding to the 1993 * interrupt IN endpoints polling interval. Table below gives the number 1994 * of periodic requests needed for the interrupt IN endpoints according 1995 * to endpoint polling interval. 1996 * 1997 * Polling interval Number of periodic requests 1998 * 1999 * 1ms 4 2000 * 2ms 2 2001 * 4ms to 32ms 1 2002 */ 2003 static void 2004 ehci_set_periodic_pipe_polling( 2005 ehci_state_t *ehcip, 2006 usba_pipe_handle_data_t *ph) 2007 { 2008 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 2009 usb_ep_descr_t *endpoint = &ph->p_ep; 2010 uchar_t ep_attr = endpoint->bmAttributes; 2011 uint_t interval; 2012 2013 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2014 "ehci_set_periodic_pipe_polling:"); 2015 2016 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2017 2018 pp->pp_cur_periodic_req_cnt = 0; 2019 2020 /* 2021 * Check usb flag whether USB_FLAGS_ONE_TIME_POLL flag is 2022 * set and if so, set pp->pp_max_periodic_req_cnt to one. 2023 */ 2024 if (((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) && 2025 (pp->pp_client_periodic_in_reqp)) { 2026 usb_intr_req_t *intr_reqp = (usb_intr_req_t *) 2027 pp->pp_client_periodic_in_reqp; 2028 2029 if (intr_reqp->intr_attributes & 2030 USB_ATTRS_ONE_XFER) { 2031 2032 pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS; 2033 2034 return; 2035 } 2036 } 2037 2038 mutex_enter(&ph->p_usba_device->usb_mutex); 2039 2040 /* 2041 * The ehci_adjust_polling_interval function will not fail 2042 * at this instance since bandwidth allocation is already 2043 * done. Here we are getting only the periodic interval. 2044 */ 2045 interval = ehci_adjust_polling_interval(ehcip, endpoint, 2046 ph->p_usba_device->usb_port_status); 2047 2048 mutex_exit(&ph->p_usba_device->usb_mutex); 2049 2050 switch (interval) { 2051 case EHCI_INTR_1MS_POLL: 2052 pp->pp_max_periodic_req_cnt = EHCI_INTR_1MS_REQS; 2053 break; 2054 case EHCI_INTR_2MS_POLL: 2055 pp->pp_max_periodic_req_cnt = EHCI_INTR_2MS_REQS; 2056 break; 2057 default: 2058 pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS; 2059 break; 2060 } 2061 2062 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2063 "ehci_set_periodic_pipe_polling: Max periodic requests = %d", 2064 pp->pp_max_periodic_req_cnt); 2065 } 2066 2067 /* 2068 * ehci_allocate_intr_resources: 2069 * 2070 * Calculates the number of tds necessary for a intr transfer, and allocates 2071 * all the necessary resources. 2072 * 2073 * Returns NULL if there is insufficient resources otherwise TW. 2074 */ 2075 ehci_trans_wrapper_t * 2076 ehci_allocate_intr_resources( 2077 ehci_state_t *ehcip, 2078 usba_pipe_handle_data_t *ph, 2079 usb_intr_req_t *intr_reqp, 2080 usb_flags_t flags) 2081 { 2082 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 2083 int pipe_dir; 2084 size_t qtd_count = 1; 2085 size_t tw_length; 2086 ehci_trans_wrapper_t *tw; 2087 2088 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2089 "ehci_allocate_intr_resources:"); 2090 2091 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2092 2093 pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK; 2094 2095 /* Get the length of interrupt transfer & alloc data */ 2096 if (intr_reqp) { 2097 tw_length = intr_reqp->intr_len; 2098 } else { 2099 ASSERT(pipe_dir == USB_EP_DIR_IN); 2100 tw_length = (pp->pp_client_periodic_in_reqp) ? 2101 (((usb_intr_req_t *)pp-> 2102 pp_client_periodic_in_reqp)->intr_len) : 2103 ph->p_ep.wMaxPacketSize; 2104 } 2105 2106 /* Check the size of interrupt request */ 2107 if (tw_length > EHCI_MAX_QTD_XFER_SIZE) { 2108 2109 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2110 "ehci_allocate_intr_resources: Intr request size 0x%lx is " 2111 "more than 0x%x", tw_length, EHCI_MAX_QTD_XFER_SIZE); 2112 2113 return (NULL); 2114 } 2115 2116 if ((tw = ehci_allocate_tw_resources(ehcip, pp, tw_length, flags, 2117 qtd_count)) == NULL) { 2118 2119 return (NULL); 2120 } 2121 2122 if (pipe_dir == USB_EP_DIR_IN) { 2123 if (ehci_allocate_intr_in_resource(ehcip, pp, tw, flags) != 2124 USB_SUCCESS) { 2125 ehci_deallocate_tw(ehcip, pp, tw); 2126 } 2127 tw->tw_direction = EHCI_QTD_CTRL_IN_PID; 2128 } else { 2129 ASSERT(intr_reqp->intr_data != NULL); 2130 2131 /* Copy the data into the buffer */ 2132 bcopy(intr_reqp->intr_data->b_rptr, tw->tw_buf, 2133 intr_reqp->intr_len); 2134 2135 Sync_IO_Buffer_for_device(tw->tw_dmahandle, 2136 intr_reqp->intr_len); 2137 2138 tw->tw_curr_xfer_reqp = (usb_opaque_t)intr_reqp; 2139 tw->tw_direction = EHCI_QTD_CTRL_OUT_PID; 2140 } 2141 2142 if (intr_reqp) { 2143 tw->tw_timeout = intr_reqp->intr_timeout; 2144 } 2145 2146 /* 2147 * Initialize the callback and any callback 2148 * data required when the qtd completes. 2149 */ 2150 tw->tw_handle_qtd = ehci_handle_intr_qtd; 2151 tw->tw_handle_callback_value = NULL; 2152 2153 return (tw); 2154 } 2155 2156 2157 /* 2158 * ehci_insert_intr_req: 2159 * 2160 * Insert an Interrupt request into the Host Controller's periodic list. 2161 */ 2162 /* ARGSUSED */ 2163 void 2164 ehci_insert_intr_req( 2165 ehci_state_t *ehcip, 2166 ehci_pipe_private_t *pp, 2167 ehci_trans_wrapper_t *tw, 2168 usb_flags_t flags) 2169 { 2170 uint_t ctrl = 0; 2171 2172 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2173 2174 ASSERT(tw->tw_curr_xfer_reqp != NULL); 2175 2176 ctrl = (tw->tw_direction | EHCI_QTD_CTRL_INTR_ON_COMPLETE); 2177 2178 /* Insert another interrupt QTD */ 2179 (void) ehci_insert_qtd(ehcip, ctrl, 0, tw->tw_length, 0, pp, tw); 2180 2181 /* Start the timer for this Interrupt transfer */ 2182 ehci_start_xfer_timer(ehcip, pp, tw); 2183 } 2184 2185 2186 /* 2187 * ehci_stop_periodic_pipe_polling: 2188 */ 2189 /* ARGSUSED */ 2190 int 2191 ehci_stop_periodic_pipe_polling( 2192 ehci_state_t *ehcip, 2193 usba_pipe_handle_data_t *ph, 2194 usb_flags_t flags) 2195 { 2196 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 2197 usb_ep_descr_t *eptd = &ph->p_ep; 2198 2199 USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl, 2200 "ehci_stop_periodic_pipe_polling: Flags = 0x%x", flags); 2201 2202 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2203 2204 /* 2205 * Check and handle stop polling on root hub interrupt pipe. 2206 */ 2207 if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) && 2208 ((eptd->bmAttributes & USB_EP_ATTR_MASK) == 2209 USB_EP_ATTR_INTR)) { 2210 2211 ehci_handle_root_hub_pipe_stop_intr_polling(ph, flags); 2212 2213 return (USB_SUCCESS); 2214 } 2215 2216 if (pp->pp_state != EHCI_PIPE_STATE_ACTIVE) { 2217 2218 USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl, 2219 "ehci_stop_periodic_pipe_polling: " 2220 "Polling already stopped"); 2221 2222 return (USB_SUCCESS); 2223 } 2224 2225 /* Set pipe state to pipe stop polling */ 2226 pp->pp_state = EHCI_PIPE_STATE_STOP_POLLING; 2227 2228 ehci_pipe_cleanup(ehcip, ph); 2229 2230 return (USB_SUCCESS); 2231 } 2232 2233 2234 /* 2235 * ehci_insert_qtd: 2236 * 2237 * Insert a Transfer Descriptor (QTD) on an Endpoint Descriptor (QH). 2238 * Always returns USB_SUCCESS for now. Once Isoch has been implemented, 2239 * it may return USB_FAILURE. 2240 */ 2241 int 2242 ehci_insert_qtd( 2243 ehci_state_t *ehcip, 2244 uint32_t qtd_ctrl, 2245 size_t qtd_dma_offs, 2246 size_t qtd_length, 2247 uint32_t qtd_ctrl_phase, 2248 ehci_pipe_private_t *pp, 2249 ehci_trans_wrapper_t *tw) 2250 { 2251 ehci_qtd_t *curr_dummy_qtd, *next_dummy_qtd; 2252 ehci_qtd_t *new_dummy_qtd; 2253 ehci_qh_t *qh = pp->pp_qh; 2254 int error = USB_SUCCESS; 2255 2256 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2257 2258 /* Allocate new dummy QTD */ 2259 new_dummy_qtd = tw->tw_qtd_free_list; 2260 2261 ASSERT(new_dummy_qtd != NULL); 2262 tw->tw_qtd_free_list = ehci_qtd_iommu_to_cpu(ehcip, 2263 Get_QTD(new_dummy_qtd->qtd_tw_next_qtd)); 2264 Set_QTD(new_dummy_qtd->qtd_tw_next_qtd, NULL); 2265 2266 /* Get the current and next dummy QTDs */ 2267 curr_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip, 2268 Get_QH(qh->qh_dummy_qtd)); 2269 next_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip, 2270 Get_QTD(curr_dummy_qtd->qtd_next_qtd)); 2271 2272 /* Update QH's dummy qtd field */ 2273 Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, next_dummy_qtd)); 2274 2275 /* Update next dummy's next qtd pointer */ 2276 Set_QTD(next_dummy_qtd->qtd_next_qtd, 2277 ehci_qtd_cpu_to_iommu(ehcip, new_dummy_qtd)); 2278 2279 /* 2280 * Fill in the current dummy qtd and 2281 * add the new dummy to the end. 2282 */ 2283 ehci_fill_in_qtd(ehcip, curr_dummy_qtd, qtd_ctrl, 2284 qtd_dma_offs, qtd_length, qtd_ctrl_phase, pp, tw); 2285 2286 /* Insert this qtd onto the tw */ 2287 ehci_insert_qtd_on_tw(ehcip, tw, curr_dummy_qtd); 2288 2289 /* 2290 * Insert this qtd onto active qtd list. 2291 * Don't insert polled mode qtd here. 2292 */ 2293 if (pp->pp_flag != EHCI_POLLED_MODE_FLAG) { 2294 /* Insert this qtd onto active qtd list */ 2295 ehci_insert_qtd_into_active_qtd_list(ehcip, curr_dummy_qtd); 2296 } 2297 2298 /* Print qh and qtd */ 2299 ehci_print_qh(ehcip, qh); 2300 ehci_print_qtd(ehcip, curr_dummy_qtd); 2301 2302 return (error); 2303 } 2304 2305 2306 /* 2307 * ehci_allocate_qtd_from_pool: 2308 * 2309 * Allocate a Transfer Descriptor (QTD) from the QTD buffer pool. 2310 */ 2311 static ehci_qtd_t * 2312 ehci_allocate_qtd_from_pool(ehci_state_t *ehcip) 2313 { 2314 int i, ctrl; 2315 ehci_qtd_t *qtd; 2316 2317 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2318 2319 /* 2320 * Search for a blank Transfer Descriptor (QTD) 2321 * in the QTD buffer pool. 2322 */ 2323 for (i = 0; i < ehci_qtd_pool_size; i ++) { 2324 ctrl = Get_QTD(ehcip->ehci_qtd_pool_addr[i].qtd_state); 2325 if (ctrl == EHCI_QTD_FREE) { 2326 break; 2327 } 2328 } 2329 2330 if (i >= ehci_qtd_pool_size) { 2331 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 2332 "ehci_allocate_qtd_from_pool: QTD exhausted"); 2333 2334 return (NULL); 2335 } 2336 2337 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 2338 "ehci_allocate_qtd_from_pool: Allocated %d", i); 2339 2340 /* Create a new dummy for the end of the QTD list */ 2341 qtd = &ehcip->ehci_qtd_pool_addr[i]; 2342 2343 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2344 "ehci_allocate_qtd_from_pool: qtd 0x%p", (void *)qtd); 2345 2346 /* Mark the newly allocated QTD as a dummy */ 2347 Set_QTD(qtd->qtd_state, EHCI_QTD_DUMMY); 2348 2349 /* Mark the status of this new QTD to halted state */ 2350 Set_QTD(qtd->qtd_ctrl, EHCI_QTD_CTRL_HALTED_XACT); 2351 2352 /* Disable dummy QTD's next and alternate next pointers */ 2353 Set_QTD(qtd->qtd_next_qtd, EHCI_QTD_NEXT_QTD_PTR_VALID); 2354 Set_QTD(qtd->qtd_alt_next_qtd, EHCI_QTD_ALT_NEXT_QTD_PTR_VALID); 2355 2356 return (qtd); 2357 } 2358 2359 2360 /* 2361 * ehci_fill_in_qtd: 2362 * 2363 * Fill in the fields of a Transfer Descriptor (QTD). 2364 * The "Buffer Pointer" fields of a QTD are retrieved from the TW 2365 * it is associated with. 2366 * 2367 * Note: 2368 * qtd_dma_offs - the starting offset into the TW buffer, where the QTD 2369 * should transfer from. It should be 4K aligned. And when 2370 * a TW has more than one QTDs, the QTDs must be filled in 2371 * increasing order. 2372 * qtd_length - the total bytes to transfer. 2373 */ 2374 /*ARGSUSED*/ 2375 static void 2376 ehci_fill_in_qtd( 2377 ehci_state_t *ehcip, 2378 ehci_qtd_t *qtd, 2379 uint32_t qtd_ctrl, 2380 size_t qtd_dma_offs, 2381 size_t qtd_length, 2382 uint32_t qtd_ctrl_phase, 2383 ehci_pipe_private_t *pp, 2384 ehci_trans_wrapper_t *tw) 2385 { 2386 uint32_t buf_addr; 2387 size_t buf_len = qtd_length; 2388 uint32_t ctrl = qtd_ctrl; 2389 uint_t i = 0; 2390 int rem_len; 2391 2392 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2393 "ehci_fill_in_qtd: qtd 0x%p ctrl 0x%x bufoffs 0x%lx " 2394 "len 0x%lx", qtd, qtd_ctrl, qtd_dma_offs, qtd_length); 2395 2396 /* Assert that the qtd to be filled in is a dummy */ 2397 ASSERT(Get_QTD(qtd->qtd_state) == EHCI_QTD_DUMMY); 2398 2399 /* Change QTD's state Active */ 2400 Set_QTD(qtd->qtd_state, EHCI_QTD_ACTIVE); 2401 2402 /* Set the total length data transfer */ 2403 ctrl |= (((qtd_length << EHCI_QTD_CTRL_BYTES_TO_XFER_SHIFT) 2404 & EHCI_QTD_CTRL_BYTES_TO_XFER) | EHCI_QTD_CTRL_MAX_ERR_COUNTS); 2405 2406 /* 2407 * QTDs must be filled in increasing DMA offset order. 2408 * tw_dma_offs is initialized to be 0 at TW creation and 2409 * is only increased in this function. 2410 */ 2411 ASSERT(buf_len == 0 || qtd_dma_offs >= tw->tw_dma_offs); 2412 2413 /* 2414 * Save the starting dma buffer offset used and 2415 * length of data that will be transfered in 2416 * the current QTD. 2417 */ 2418 Set_QTD(qtd->qtd_xfer_offs, qtd_dma_offs); 2419 Set_QTD(qtd->qtd_xfer_len, buf_len); 2420 2421 while (buf_len) { 2422 /* 2423 * Advance to the next DMA cookie until finding the cookie 2424 * that qtd_dma_offs falls in. 2425 * It is very likely this loop will never repeat more than 2426 * once. It is here just to accommodate the case qtd_dma_offs 2427 * is increased by multiple cookies during two consecutive 2428 * calls into this function. In that case, the interim DMA 2429 * buffer is allowed to be skipped. 2430 */ 2431 while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <= 2432 qtd_dma_offs) { 2433 /* 2434 * tw_dma_offs always points to the starting offset 2435 * of a cookie 2436 */ 2437 tw->tw_dma_offs += tw->tw_cookie.dmac_size; 2438 ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie); 2439 tw->tw_cookie_idx++; 2440 ASSERT(tw->tw_cookie_idx < tw->tw_ncookies); 2441 } 2442 2443 /* 2444 * Counting the remained buffer length to be filled in 2445 * the QTD for current DMA cookie 2446 */ 2447 rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) - 2448 qtd_dma_offs; 2449 2450 /* Update the beginning of the buffer */ 2451 buf_addr = (qtd_dma_offs - tw->tw_dma_offs) + 2452 tw->tw_cookie.dmac_address; 2453 ASSERT((buf_addr % EHCI_4K_ALIGN) == 0); 2454 Set_QTD(qtd->qtd_buf[i], buf_addr); 2455 2456 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2457 "ehci_fill_in_qtd: dmac_addr 0x%p dmac_size " 2458 "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size, 2459 tw->tw_cookie_idx); 2460 2461 if (buf_len <= EHCI_MAX_QTD_BUF_SIZE) { 2462 ASSERT(buf_len <= rem_len); 2463 break; 2464 } else { 2465 ASSERT(rem_len >= EHCI_MAX_QTD_BUF_SIZE); 2466 buf_len -= EHCI_MAX_QTD_BUF_SIZE; 2467 qtd_dma_offs += EHCI_MAX_QTD_BUF_SIZE; 2468 } 2469 2470 i++; 2471 } 2472 2473 /* 2474 * Setup the alternate next qTD pointer if appropriate. The alternate 2475 * qtd is currently pointing to a QTD that is not yet linked, but will 2476 * be in the very near future. If a short_xfer occurs in this 2477 * situation , the HC will automatically skip this QH. Eventually 2478 * everything will be placed and the alternate_qtd will be valid QTD. 2479 * For more information on alternate qtds look at section 3.5.2 in the 2480 * EHCI spec. 2481 */ 2482 if (tw->tw_alt_qtd != NULL) { 2483 Set_QTD(qtd->qtd_alt_next_qtd, 2484 (ehci_qtd_cpu_to_iommu(ehcip, tw->tw_alt_qtd) & 2485 EHCI_QTD_ALT_NEXT_QTD_PTR)); 2486 } 2487 2488 /* 2489 * For control, bulk and interrupt QTD, now 2490 * enable current QTD by setting active bit. 2491 */ 2492 Set_QTD(qtd->qtd_ctrl, (ctrl | EHCI_QTD_CTRL_ACTIVE_XACT)); 2493 2494 /* 2495 * For Control Xfer, qtd_ctrl_phase is a valid filed. 2496 */ 2497 if (qtd_ctrl_phase) { 2498 Set_QTD(qtd->qtd_ctrl_phase, qtd_ctrl_phase); 2499 } 2500 2501 /* Set the transfer wrapper */ 2502 ASSERT(tw != NULL); 2503 ASSERT(tw->tw_id != NULL); 2504 2505 Set_QTD(qtd->qtd_trans_wrapper, (uint32_t)tw->tw_id); 2506 } 2507 2508 2509 /* 2510 * ehci_insert_qtd_on_tw: 2511 * 2512 * The transfer wrapper keeps a list of all Transfer Descriptors (QTD) that 2513 * are allocated for this transfer. Insert a QTD onto this list. The list 2514 * of QTD's does not include the dummy QTD that is at the end of the list of 2515 * QTD's for the endpoint. 2516 */ 2517 static void 2518 ehci_insert_qtd_on_tw( 2519 ehci_state_t *ehcip, 2520 ehci_trans_wrapper_t *tw, 2521 ehci_qtd_t *qtd) 2522 { 2523 /* 2524 * Set the next pointer to NULL because 2525 * this is the last QTD on list. 2526 */ 2527 Set_QTD(qtd->qtd_tw_next_qtd, NULL); 2528 2529 if (tw->tw_qtd_head == NULL) { 2530 ASSERT(tw->tw_qtd_tail == NULL); 2531 tw->tw_qtd_head = qtd; 2532 tw->tw_qtd_tail = qtd; 2533 } else { 2534 ehci_qtd_t *dummy = (ehci_qtd_t *)tw->tw_qtd_tail; 2535 2536 ASSERT(dummy != NULL); 2537 ASSERT(dummy != qtd); 2538 ASSERT(Get_QTD(qtd->qtd_state) != EHCI_QTD_DUMMY); 2539 2540 /* Add the qtd to the end of the list */ 2541 Set_QTD(dummy->qtd_tw_next_qtd, 2542 ehci_qtd_cpu_to_iommu(ehcip, qtd)); 2543 2544 tw->tw_qtd_tail = qtd; 2545 2546 ASSERT(Get_QTD(qtd->qtd_tw_next_qtd) == NULL); 2547 } 2548 } 2549 2550 2551 /* 2552 * ehci_insert_qtd_into_active_qtd_list: 2553 * 2554 * Insert current QTD into active QTD list. 2555 */ 2556 static void 2557 ehci_insert_qtd_into_active_qtd_list( 2558 ehci_state_t *ehcip, 2559 ehci_qtd_t *qtd) 2560 { 2561 ehci_qtd_t *curr_qtd, *next_qtd; 2562 2563 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2564 2565 curr_qtd = ehcip->ehci_active_qtd_list; 2566 2567 /* Insert this QTD into QTD Active List */ 2568 if (curr_qtd) { 2569 next_qtd = ehci_qtd_iommu_to_cpu(ehcip, 2570 Get_QTD(curr_qtd->qtd_active_qtd_next)); 2571 2572 while (next_qtd) { 2573 curr_qtd = next_qtd; 2574 next_qtd = ehci_qtd_iommu_to_cpu(ehcip, 2575 Get_QTD(curr_qtd->qtd_active_qtd_next)); 2576 } 2577 2578 Set_QTD(qtd->qtd_active_qtd_prev, 2579 ehci_qtd_cpu_to_iommu(ehcip, curr_qtd)); 2580 2581 Set_QTD(curr_qtd->qtd_active_qtd_next, 2582 ehci_qtd_cpu_to_iommu(ehcip, qtd)); 2583 } else { 2584 ehcip->ehci_active_qtd_list = qtd; 2585 Set_QTD(qtd->qtd_active_qtd_next, NULL); 2586 Set_QTD(qtd->qtd_active_qtd_prev, NULL); 2587 } 2588 } 2589 2590 2591 /* 2592 * ehci_remove_qtd_from_active_qtd_list: 2593 * 2594 * Remove current QTD from the active QTD list. 2595 * 2596 * NOTE: This function is also called from POLLED MODE. 2597 */ 2598 void 2599 ehci_remove_qtd_from_active_qtd_list( 2600 ehci_state_t *ehcip, 2601 ehci_qtd_t *qtd) 2602 { 2603 ehci_qtd_t *curr_qtd, *prev_qtd, *next_qtd; 2604 2605 ASSERT(qtd != NULL); 2606 2607 curr_qtd = ehcip->ehci_active_qtd_list; 2608 2609 while ((curr_qtd) && (curr_qtd != qtd)) { 2610 curr_qtd = ehci_qtd_iommu_to_cpu(ehcip, 2611 Get_QTD(curr_qtd->qtd_active_qtd_next)); 2612 } 2613 2614 if ((curr_qtd) && (curr_qtd == qtd)) { 2615 prev_qtd = ehci_qtd_iommu_to_cpu(ehcip, 2616 Get_QTD(curr_qtd->qtd_active_qtd_prev)); 2617 next_qtd = ehci_qtd_iommu_to_cpu(ehcip, 2618 Get_QTD(curr_qtd->qtd_active_qtd_next)); 2619 2620 if (prev_qtd) { 2621 Set_QTD(prev_qtd->qtd_active_qtd_next, 2622 Get_QTD(curr_qtd->qtd_active_qtd_next)); 2623 } else { 2624 ehcip->ehci_active_qtd_list = next_qtd; 2625 } 2626 2627 if (next_qtd) { 2628 Set_QTD(next_qtd->qtd_active_qtd_prev, 2629 Get_QTD(curr_qtd->qtd_active_qtd_prev)); 2630 } 2631 } else { 2632 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2633 "ehci_remove_qtd_from_active_qtd_list: " 2634 "Unable to find QTD in active_qtd_list"); 2635 } 2636 } 2637 2638 2639 /* 2640 * ehci_traverse_qtds: 2641 * 2642 * Traverse the list of QTDs for given pipe using transfer wrapper. Since 2643 * the endpoint is marked as Halted, the Host Controller (HC) is no longer 2644 * accessing these QTDs. Remove all the QTDs that are attached to endpoint. 2645 */ 2646 static void 2647 ehci_traverse_qtds( 2648 ehci_state_t *ehcip, 2649 usba_pipe_handle_data_t *ph) 2650 { 2651 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 2652 ehci_trans_wrapper_t *next_tw; 2653 ehci_qtd_t *qtd; 2654 ehci_qtd_t *next_qtd; 2655 2656 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2657 2658 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2659 "ehci_traverse_qtds:"); 2660 2661 /* Process the transfer wrappers for this pipe */ 2662 next_tw = pp->pp_tw_head; 2663 2664 while (next_tw) { 2665 /* Stop the the transfer timer */ 2666 ehci_stop_xfer_timer(ehcip, next_tw, EHCI_REMOVE_XFER_ALWAYS); 2667 2668 qtd = (ehci_qtd_t *)next_tw->tw_qtd_head; 2669 2670 /* Walk through each QTD for this transfer wrapper */ 2671 while (qtd) { 2672 /* Remove this QTD from active QTD list */ 2673 ehci_remove_qtd_from_active_qtd_list(ehcip, qtd); 2674 2675 next_qtd = ehci_qtd_iommu_to_cpu(ehcip, 2676 Get_QTD(qtd->qtd_tw_next_qtd)); 2677 2678 /* Deallocate this QTD */ 2679 ehci_deallocate_qtd(ehcip, qtd); 2680 2681 qtd = next_qtd; 2682 } 2683 2684 next_tw = next_tw->tw_next; 2685 } 2686 2687 /* Clear current qtd pointer */ 2688 Set_QH(pp->pp_qh->qh_curr_qtd, (uint32_t)0x00000000); 2689 2690 /* Update the next qtd pointer in the QH */ 2691 Set_QH(pp->pp_qh->qh_next_qtd, Get_QH(pp->pp_qh->qh_dummy_qtd)); 2692 } 2693 2694 2695 /* 2696 * ehci_deallocate_qtd: 2697 * 2698 * Deallocate a Host Controller's (HC) Transfer Descriptor (QTD). 2699 * 2700 * NOTE: This function is also called from POLLED MODE. 2701 */ 2702 void 2703 ehci_deallocate_qtd( 2704 ehci_state_t *ehcip, 2705 ehci_qtd_t *old_qtd) 2706 { 2707 ehci_trans_wrapper_t *tw = NULL; 2708 2709 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 2710 "ehci_deallocate_qtd: old_qtd = 0x%p", (void *)old_qtd); 2711 2712 /* 2713 * Obtain the transaction wrapper and tw will be 2714 * NULL for the dummy QTDs. 2715 */ 2716 if (Get_QTD(old_qtd->qtd_state) != EHCI_QTD_DUMMY) { 2717 tw = (ehci_trans_wrapper_t *) 2718 EHCI_LOOKUP_ID((uint32_t) 2719 Get_QTD(old_qtd->qtd_trans_wrapper)); 2720 2721 ASSERT(tw != NULL); 2722 } 2723 2724 /* 2725 * If QTD's transfer wrapper is NULL, don't access its TW. 2726 * Just free the QTD. 2727 */ 2728 if (tw) { 2729 ehci_qtd_t *qtd, *next_qtd; 2730 2731 qtd = tw->tw_qtd_head; 2732 2733 if (old_qtd != qtd) { 2734 next_qtd = ehci_qtd_iommu_to_cpu( 2735 ehcip, Get_QTD(qtd->qtd_tw_next_qtd)); 2736 2737 while (next_qtd != old_qtd) { 2738 qtd = next_qtd; 2739 next_qtd = ehci_qtd_iommu_to_cpu( 2740 ehcip, Get_QTD(qtd->qtd_tw_next_qtd)); 2741 } 2742 2743 Set_QTD(qtd->qtd_tw_next_qtd, old_qtd->qtd_tw_next_qtd); 2744 2745 if (qtd->qtd_tw_next_qtd == NULL) { 2746 tw->tw_qtd_tail = qtd; 2747 } 2748 } else { 2749 tw->tw_qtd_head = ehci_qtd_iommu_to_cpu( 2750 ehcip, Get_QTD(old_qtd->qtd_tw_next_qtd)); 2751 2752 if (tw->tw_qtd_head == NULL) { 2753 tw->tw_qtd_tail = NULL; 2754 } 2755 } 2756 } 2757 2758 bzero((void *)old_qtd, sizeof (ehci_qtd_t)); 2759 Set_QTD(old_qtd->qtd_state, EHCI_QTD_FREE); 2760 2761 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2762 "Dealloc_qtd: qtd 0x%p", (void *)old_qtd); 2763 } 2764 2765 2766 /* 2767 * ehci_qtd_cpu_to_iommu: 2768 * 2769 * This function converts for the given Transfer Descriptor (QTD) CPU address 2770 * to IO address. 2771 * 2772 * NOTE: This function is also called from POLLED MODE. 2773 */ 2774 uint32_t 2775 ehci_qtd_cpu_to_iommu( 2776 ehci_state_t *ehcip, 2777 ehci_qtd_t *addr) 2778 { 2779 uint32_t td; 2780 2781 td = (uint32_t)ehcip->ehci_qtd_pool_cookie.dmac_address + 2782 (uint32_t)((uintptr_t)addr - 2783 (uintptr_t)(ehcip->ehci_qtd_pool_addr)); 2784 2785 ASSERT((ehcip->ehci_qtd_pool_cookie.dmac_address + 2786 (uint32_t) (sizeof (ehci_qtd_t) * 2787 (addr - ehcip->ehci_qtd_pool_addr))) == 2788 (ehcip->ehci_qtd_pool_cookie.dmac_address + 2789 (uint32_t)((uintptr_t)addr - (uintptr_t) 2790 (ehcip->ehci_qtd_pool_addr)))); 2791 2792 ASSERT(td >= ehcip->ehci_qtd_pool_cookie.dmac_address); 2793 ASSERT(td <= ehcip->ehci_qtd_pool_cookie.dmac_address + 2794 sizeof (ehci_qtd_t) * ehci_qtd_pool_size); 2795 2796 return (td); 2797 } 2798 2799 2800 /* 2801 * ehci_qtd_iommu_to_cpu: 2802 * 2803 * This function converts for the given Transfer Descriptor (QTD) IO address 2804 * to CPU address. 2805 * 2806 * NOTE: This function is also called from POLLED MODE. 2807 */ 2808 ehci_qtd_t * 2809 ehci_qtd_iommu_to_cpu( 2810 ehci_state_t *ehcip, 2811 uintptr_t addr) 2812 { 2813 ehci_qtd_t *qtd; 2814 2815 if (addr == NULL) { 2816 2817 return (NULL); 2818 } 2819 2820 qtd = (ehci_qtd_t *)((uintptr_t) 2821 (addr - ehcip->ehci_qtd_pool_cookie.dmac_address) + 2822 (uintptr_t)ehcip->ehci_qtd_pool_addr); 2823 2824 ASSERT(qtd >= ehcip->ehci_qtd_pool_addr); 2825 ASSERT((uintptr_t)qtd <= (uintptr_t)ehcip->ehci_qtd_pool_addr + 2826 (uintptr_t)(sizeof (ehci_qtd_t) * ehci_qtd_pool_size)); 2827 2828 return (qtd); 2829 } 2830 2831 /* 2832 * ehci_allocate_tds_for_tw_resources: 2833 * 2834 * Allocate n Transfer Descriptors (TD) from the TD buffer pool and places it 2835 * into the TW. Also chooses the correct alternate qtd when required. It is 2836 * used for hardware short transfer support. For more information on 2837 * alternate qtds look at section 3.5.2 in the EHCI spec. 2838 * Here is how each alternate qtd's are used: 2839 * 2840 * Bulk: used fully. 2841 * Intr: xfers only require 1 QTD, so alternate qtds are never used. 2842 * Ctrl: Should not use alternate QTD 2843 * Isoch: Doesn't support short_xfer nor does it use QTD 2844 * 2845 * Returns USB_NO_RESOURCES if it was not able to allocate all the requested TD 2846 * otherwise USB_SUCCESS. 2847 */ 2848 int 2849 ehci_allocate_tds_for_tw( 2850 ehci_state_t *ehcip, 2851 ehci_pipe_private_t *pp, 2852 ehci_trans_wrapper_t *tw, 2853 size_t qtd_count) 2854 { 2855 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep; 2856 uchar_t attributes; 2857 ehci_qtd_t *qtd; 2858 uint32_t qtd_addr; 2859 int i; 2860 int error = USB_SUCCESS; 2861 2862 attributes = eptd->bmAttributes & USB_EP_ATTR_MASK; 2863 2864 for (i = 0; i < qtd_count; i += 1) { 2865 qtd = ehci_allocate_qtd_from_pool(ehcip); 2866 if (qtd == NULL) { 2867 error = USB_NO_RESOURCES; 2868 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2869 "ehci_allocate_qtds_for_tw: " 2870 "Unable to allocate %lu QTDs", 2871 qtd_count); 2872 break; 2873 } 2874 if (i > 0) { 2875 qtd_addr = ehci_qtd_cpu_to_iommu(ehcip, 2876 tw->tw_qtd_free_list); 2877 Set_QTD(qtd->qtd_tw_next_qtd, qtd_addr); 2878 } 2879 tw->tw_qtd_free_list = qtd; 2880 2881 /* 2882 * Save the second one as a pointer to the new dummy 1. 2883 * It is used later for the alt_qtd_ptr. Xfers with only 2884 * one qtd do not need alt_qtd_ptr. 2885 * The tds's are allocated and put into a stack, that is 2886 * why the second qtd allocated will turn out to be the 2887 * new dummy 1. 2888 */ 2889 if ((i == 1) && (attributes == USB_EP_ATTR_BULK)) { 2890 tw->tw_alt_qtd = qtd; 2891 } 2892 } 2893 2894 return (error); 2895 } 2896 2897 /* 2898 * ehci_allocate_tw_resources: 2899 * 2900 * Allocate a Transaction Wrapper (TW) and n Transfer Descriptors (QTD) 2901 * from the QTD buffer pool and places it into the TW. It does an all 2902 * or nothing transaction. 2903 * 2904 * Returns NULL if there is insufficient resources otherwise TW. 2905 */ 2906 static ehci_trans_wrapper_t * 2907 ehci_allocate_tw_resources( 2908 ehci_state_t *ehcip, 2909 ehci_pipe_private_t *pp, 2910 size_t tw_length, 2911 usb_flags_t usb_flags, 2912 size_t qtd_count) 2913 { 2914 ehci_trans_wrapper_t *tw; 2915 2916 tw = ehci_create_transfer_wrapper(ehcip, pp, tw_length, usb_flags); 2917 2918 if (tw == NULL) { 2919 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2920 "ehci_allocate_tw_resources: Unable to allocate TW"); 2921 } else { 2922 if (ehci_allocate_tds_for_tw(ehcip, pp, tw, qtd_count) == 2923 USB_SUCCESS) { 2924 tw->tw_num_qtds = qtd_count; 2925 } else { 2926 ehci_deallocate_tw(ehcip, pp, tw); 2927 tw = NULL; 2928 } 2929 } 2930 2931 return (tw); 2932 } 2933 2934 2935 /* 2936 * ehci_free_tw_td_resources: 2937 * 2938 * Free all allocated resources for Transaction Wrapper (TW). 2939 * Does not free the TW itself. 2940 * 2941 * Returns NULL if there is insufficient resources otherwise TW. 2942 */ 2943 static void 2944 ehci_free_tw_td_resources( 2945 ehci_state_t *ehcip, 2946 ehci_trans_wrapper_t *tw) 2947 { 2948 ehci_qtd_t *qtd = NULL; 2949 ehci_qtd_t *temp_qtd = NULL; 2950 2951 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 2952 "ehci_free_tw_td_resources: tw = 0x%p", tw); 2953 2954 qtd = tw->tw_qtd_free_list; 2955 while (qtd != NULL) { 2956 /* Save the pointer to the next qtd before destroying it */ 2957 temp_qtd = ehci_qtd_iommu_to_cpu(ehcip, 2958 Get_QTD(qtd->qtd_tw_next_qtd)); 2959 ehci_deallocate_qtd(ehcip, qtd); 2960 qtd = temp_qtd; 2961 } 2962 tw->tw_qtd_free_list = NULL; 2963 } 2964 2965 /* 2966 * Transfer Wrapper functions 2967 * 2968 * ehci_create_transfer_wrapper: 2969 * 2970 * Create a Transaction Wrapper (TW) and this involves the allocating of DMA 2971 * resources. 2972 */ 2973 static ehci_trans_wrapper_t * 2974 ehci_create_transfer_wrapper( 2975 ehci_state_t *ehcip, 2976 ehci_pipe_private_t *pp, 2977 size_t length, 2978 uint_t usb_flags) 2979 { 2980 ddi_device_acc_attr_t dev_attr; 2981 ddi_dma_attr_t dma_attr; 2982 int result; 2983 size_t real_length; 2984 ehci_trans_wrapper_t *tw; 2985 int kmem_flag; 2986 int (*dmamem_wait)(caddr_t); 2987 2988 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 2989 "ehci_create_transfer_wrapper: length = 0x%lx flags = 0x%x", 2990 length, usb_flags); 2991 2992 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2993 2994 /* SLEEP flag should not be used in interrupt context */ 2995 if (servicing_interrupt()) { 2996 kmem_flag = KM_NOSLEEP; 2997 dmamem_wait = DDI_DMA_DONTWAIT; 2998 } else { 2999 kmem_flag = KM_SLEEP; 3000 dmamem_wait = DDI_DMA_SLEEP; 3001 } 3002 3003 /* Allocate space for the transfer wrapper */ 3004 tw = kmem_zalloc(sizeof (ehci_trans_wrapper_t), kmem_flag); 3005 3006 if (tw == NULL) { 3007 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3008 "ehci_create_transfer_wrapper: kmem_zalloc failed"); 3009 3010 return (NULL); 3011 } 3012 3013 /* allow sg lists for transfer wrapper dma memory */ 3014 bcopy(&ehcip->ehci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 3015 dma_attr.dma_attr_sgllen = EHCI_DMA_ATTR_TW_SGLLEN; 3016 dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 3017 3018 /* Allocate the DMA handle */ 3019 result = ddi_dma_alloc_handle(ehcip->ehci_dip, 3020 &dma_attr, dmamem_wait, 0, &tw->tw_dmahandle); 3021 3022 if (result != DDI_SUCCESS) { 3023 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3024 "ehci_create_transfer_wrapper: Alloc handle failed"); 3025 3026 kmem_free(tw, sizeof (ehci_trans_wrapper_t)); 3027 3028 return (NULL); 3029 } 3030 3031 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3032 3033 /* no need for swapping the raw data */ 3034 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 3035 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3036 3037 /* Allocate the memory */ 3038 result = ddi_dma_mem_alloc(tw->tw_dmahandle, length, 3039 &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, NULL, 3040 (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle); 3041 3042 if (result != DDI_SUCCESS) { 3043 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3044 "ehci_create_transfer_wrapper: dma_mem_alloc fail"); 3045 3046 ddi_dma_free_handle(&tw->tw_dmahandle); 3047 kmem_free(tw, sizeof (ehci_trans_wrapper_t)); 3048 3049 return (NULL); 3050 } 3051 3052 ASSERT(real_length >= length); 3053 3054 /* Bind the handle */ 3055 result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL, 3056 (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, 3057 dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies); 3058 3059 if (result != DDI_DMA_MAPPED) { 3060 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 3061 3062 ddi_dma_mem_free(&tw->tw_accesshandle); 3063 ddi_dma_free_handle(&tw->tw_dmahandle); 3064 kmem_free(tw, sizeof (ehci_trans_wrapper_t)); 3065 3066 return (NULL); 3067 } 3068 3069 tw->tw_cookie_idx = 0; 3070 tw->tw_dma_offs = 0; 3071 3072 /* 3073 * Only allow one wrapper to be added at a time. Insert the 3074 * new transaction wrapper into the list for this pipe. 3075 */ 3076 if (pp->pp_tw_head == NULL) { 3077 pp->pp_tw_head = tw; 3078 pp->pp_tw_tail = tw; 3079 } else { 3080 pp->pp_tw_tail->tw_next = tw; 3081 pp->pp_tw_tail = tw; 3082 } 3083 3084 /* Store the transfer length */ 3085 tw->tw_length = length; 3086 3087 /* Store a back pointer to the pipe private structure */ 3088 tw->tw_pipe_private = pp; 3089 3090 /* Store the transfer type - synchronous or asynchronous */ 3091 tw->tw_flags = usb_flags; 3092 3093 /* Get and Store 32bit ID */ 3094 tw->tw_id = EHCI_GET_ID((void *)tw); 3095 3096 ASSERT(tw->tw_id != NULL); 3097 3098 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 3099 "ehci_create_transfer_wrapper: tw = 0x%p, ncookies = %u", 3100 tw, tw->tw_ncookies); 3101 3102 return (tw); 3103 } 3104 3105 3106 /* 3107 * ehci_start_xfer_timer: 3108 * 3109 * Start the timer for the control, bulk and for one time interrupt 3110 * transfers. 3111 */ 3112 /* ARGSUSED */ 3113 static void 3114 ehci_start_xfer_timer( 3115 ehci_state_t *ehcip, 3116 ehci_pipe_private_t *pp, 3117 ehci_trans_wrapper_t *tw) 3118 { 3119 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3120 "ehci_start_xfer_timer: tw = 0x%p", tw); 3121 3122 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3123 3124 /* 3125 * The timeout handling is done only for control, bulk and for 3126 * one time Interrupt transfers. 3127 * 3128 * NOTE: If timeout is zero; Assume infinite timeout and don't 3129 * insert this transfer on the timeout list. 3130 */ 3131 if (tw->tw_timeout) { 3132 /* 3133 * Add this transfer wrapper to the head of the pipe's 3134 * tw timeout list. 3135 */ 3136 if (pp->pp_timeout_list) { 3137 tw->tw_timeout_next = pp->pp_timeout_list; 3138 } 3139 3140 pp->pp_timeout_list = tw; 3141 ehci_start_timer(ehcip, pp); 3142 } 3143 } 3144 3145 3146 /* 3147 * ehci_stop_xfer_timer: 3148 * 3149 * Start the timer for the control, bulk and for one time interrupt 3150 * transfers. 3151 */ 3152 void 3153 ehci_stop_xfer_timer( 3154 ehci_state_t *ehcip, 3155 ehci_trans_wrapper_t *tw, 3156 uint_t flag) 3157 { 3158 ehci_pipe_private_t *pp; 3159 timeout_id_t timer_id; 3160 3161 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3162 "ehci_stop_xfer_timer: tw = 0x%p", tw); 3163 3164 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3165 3166 /* Obtain the pipe private structure */ 3167 pp = tw->tw_pipe_private; 3168 3169 /* check if the timeout tw list is empty */ 3170 if (pp->pp_timeout_list == NULL) { 3171 3172 return; 3173 } 3174 3175 switch (flag) { 3176 case EHCI_REMOVE_XFER_IFLAST: 3177 if (tw->tw_qtd_head != tw->tw_qtd_tail) { 3178 break; 3179 } 3180 3181 /* FALLTHRU */ 3182 case EHCI_REMOVE_XFER_ALWAYS: 3183 ehci_remove_tw_from_timeout_list(ehcip, tw); 3184 3185 if ((pp->pp_timeout_list == NULL) && 3186 (pp->pp_timer_id)) { 3187 3188 timer_id = pp->pp_timer_id; 3189 3190 /* Reset the timer id to zero */ 3191 pp->pp_timer_id = 0; 3192 3193 mutex_exit(&ehcip->ehci_int_mutex); 3194 3195 (void) untimeout(timer_id); 3196 3197 mutex_enter(&ehcip->ehci_int_mutex); 3198 } 3199 break; 3200 default: 3201 break; 3202 } 3203 } 3204 3205 3206 /* 3207 * ehci_xfer_timeout_handler: 3208 * 3209 * Control or bulk transfer timeout handler. 3210 */ 3211 static void 3212 ehci_xfer_timeout_handler(void *arg) 3213 { 3214 usba_pipe_handle_data_t *ph = (usba_pipe_handle_data_t *)arg; 3215 ehci_state_t *ehcip = ehci_obtain_state( 3216 ph->p_usba_device->usb_root_hub_dip); 3217 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 3218 ehci_trans_wrapper_t *tw, *next; 3219 ehci_trans_wrapper_t *expire_xfer_list = NULL; 3220 ehci_qtd_t *qtd; 3221 3222 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3223 "ehci_xfer_timeout_handler: ehcip = 0x%p, ph = 0x%p", ehcip, ph); 3224 3225 mutex_enter(&ehcip->ehci_int_mutex); 3226 3227 /* 3228 * Check whether still timeout handler is valid. 3229 */ 3230 if (pp->pp_timer_id != 0) { 3231 3232 /* Reset the timer id to zero */ 3233 pp->pp_timer_id = 0; 3234 } else { 3235 mutex_exit(&ehcip->ehci_int_mutex); 3236 3237 return; 3238 } 3239 3240 /* Get the transfer timeout list head */ 3241 tw = pp->pp_timeout_list; 3242 3243 while (tw) { 3244 3245 /* Get the transfer on the timeout list */ 3246 next = tw->tw_timeout_next; 3247 3248 tw->tw_timeout--; 3249 3250 if (tw->tw_timeout <= 0) { 3251 3252 /* remove the tw from the timeout list */ 3253 ehci_remove_tw_from_timeout_list(ehcip, tw); 3254 3255 /* remove QTDs from active QTD list */ 3256 qtd = tw->tw_qtd_head; 3257 while (qtd) { 3258 ehci_remove_qtd_from_active_qtd_list( 3259 ehcip, qtd); 3260 3261 /* Get the next QTD from the wrapper */ 3262 qtd = ehci_qtd_iommu_to_cpu(ehcip, 3263 Get_QTD(qtd->qtd_tw_next_qtd)); 3264 } 3265 3266 /* 3267 * Preserve the order to the requests 3268 * started time sequence. 3269 */ 3270 tw->tw_timeout_next = expire_xfer_list; 3271 expire_xfer_list = tw; 3272 } 3273 3274 tw = next; 3275 } 3276 3277 /* 3278 * The timer should be started before the callbacks. 3279 * There is always a chance that ehci interrupts come 3280 * in when we release the mutex while calling the tw back. 3281 * To keep an accurate timeout it should be restarted 3282 * as soon as possible. 3283 */ 3284 ehci_start_timer(ehcip, pp); 3285 3286 /* Get the expired transfer timeout list head */ 3287 tw = expire_xfer_list; 3288 3289 while (tw) { 3290 3291 /* Get the next tw on the expired transfer timeout list */ 3292 next = tw->tw_timeout_next; 3293 3294 /* 3295 * The error handle routine will release the mutex when 3296 * calling back to USBA. But this will not cause any race. 3297 * We do the callback and are relying on ehci_pipe_cleanup() 3298 * to halt the queue head and clean up since we should not 3299 * block in timeout context. 3300 */ 3301 ehci_handle_error(ehcip, tw->tw_qtd_head, USB_CR_TIMEOUT); 3302 3303 tw = next; 3304 } 3305 mutex_exit(&ehcip->ehci_int_mutex); 3306 } 3307 3308 3309 /* 3310 * ehci_remove_tw_from_timeout_list: 3311 * 3312 * Remove Control or bulk transfer from the timeout list. 3313 */ 3314 static void 3315 ehci_remove_tw_from_timeout_list( 3316 ehci_state_t *ehcip, 3317 ehci_trans_wrapper_t *tw) 3318 { 3319 ehci_pipe_private_t *pp; 3320 ehci_trans_wrapper_t *prev, *next; 3321 3322 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3323 "ehci_remove_tw_from_timeout_list: tw = 0x%p", tw); 3324 3325 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3326 3327 /* Obtain the pipe private structure */ 3328 pp = tw->tw_pipe_private; 3329 3330 if (pp->pp_timeout_list) { 3331 if (pp->pp_timeout_list == tw) { 3332 pp->pp_timeout_list = tw->tw_timeout_next; 3333 3334 tw->tw_timeout_next = NULL; 3335 } else { 3336 prev = pp->pp_timeout_list; 3337 next = prev->tw_timeout_next; 3338 3339 while (next && (next != tw)) { 3340 prev = next; 3341 next = next->tw_timeout_next; 3342 } 3343 3344 if (next == tw) { 3345 prev->tw_timeout_next = 3346 next->tw_timeout_next; 3347 tw->tw_timeout_next = NULL; 3348 } 3349 } 3350 } 3351 } 3352 3353 3354 /* 3355 * ehci_start_timer: 3356 * 3357 * Start the pipe's timer 3358 */ 3359 static void 3360 ehci_start_timer( 3361 ehci_state_t *ehcip, 3362 ehci_pipe_private_t *pp) 3363 { 3364 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3365 "ehci_start_timer: ehcip = 0x%p, pp = 0x%p", ehcip, pp); 3366 3367 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3368 3369 /* 3370 * Start the pipe's timer only if currently timer is not 3371 * running and if there are any transfers on the timeout 3372 * list. This timer will be per pipe. 3373 */ 3374 if ((!pp->pp_timer_id) && (pp->pp_timeout_list)) { 3375 pp->pp_timer_id = timeout(ehci_xfer_timeout_handler, 3376 (void *)(pp->pp_pipe_handle), drv_usectohz(1000000)); 3377 } 3378 } 3379 3380 /* 3381 * ehci_deallocate_tw: 3382 * 3383 * Deallocate of a Transaction Wrapper (TW) and this involves the freeing of 3384 * of DMA resources. 3385 */ 3386 void 3387 ehci_deallocate_tw( 3388 ehci_state_t *ehcip, 3389 ehci_pipe_private_t *pp, 3390 ehci_trans_wrapper_t *tw) 3391 { 3392 ehci_trans_wrapper_t *prev, *next; 3393 3394 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 3395 "ehci_deallocate_tw: tw = 0x%p", tw); 3396 3397 /* 3398 * If the transfer wrapper has no Host Controller (HC) 3399 * Transfer Descriptors (QTD) associated with it, then 3400 * remove the transfer wrapper. 3401 */ 3402 if (tw->tw_qtd_head) { 3403 ASSERT(tw->tw_qtd_tail != NULL); 3404 3405 return; 3406 } 3407 3408 ASSERT(tw->tw_qtd_tail == NULL); 3409 3410 /* Make sure we return all the unused qtd's to the pool as well */ 3411 ehci_free_tw_td_resources(ehcip, tw); 3412 3413 /* 3414 * If pp->pp_tw_head and pp->pp_tw_tail are pointing to 3415 * given TW then set the head and tail equal to NULL. 3416 * Otherwise search for this TW in the linked TW's list 3417 * and then remove this TW from the list. 3418 */ 3419 if (pp->pp_tw_head == tw) { 3420 if (pp->pp_tw_tail == tw) { 3421 pp->pp_tw_head = NULL; 3422 pp->pp_tw_tail = NULL; 3423 } else { 3424 pp->pp_tw_head = tw->tw_next; 3425 } 3426 } else { 3427 prev = pp->pp_tw_head; 3428 next = prev->tw_next; 3429 3430 while (next && (next != tw)) { 3431 prev = next; 3432 next = next->tw_next; 3433 } 3434 3435 if (next == tw) { 3436 prev->tw_next = next->tw_next; 3437 3438 if (pp->pp_tw_tail == tw) { 3439 pp->pp_tw_tail = prev; 3440 } 3441 } 3442 } 3443 3444 /* 3445 * Make sure that, this TW has been removed 3446 * from the timeout list. 3447 */ 3448 ehci_remove_tw_from_timeout_list(ehcip, tw); 3449 3450 /* Deallocate this TW */ 3451 ehci_free_tw(ehcip, pp, tw); 3452 } 3453 3454 3455 /* 3456 * ehci_free_dma_resources: 3457 * 3458 * Free dma resources of a Transfer Wrapper (TW) and also free the TW. 3459 * 3460 * NOTE: This function is also called from POLLED MODE. 3461 */ 3462 void 3463 ehci_free_dma_resources( 3464 ehci_state_t *ehcip, 3465 usba_pipe_handle_data_t *ph) 3466 { 3467 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 3468 ehci_trans_wrapper_t *head_tw = pp->pp_tw_head; 3469 ehci_trans_wrapper_t *next_tw, *tw; 3470 3471 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3472 "ehci_free_dma_resources: ph = 0x%p", (void *)ph); 3473 3474 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3475 3476 /* Process the Transfer Wrappers */ 3477 next_tw = head_tw; 3478 while (next_tw) { 3479 tw = next_tw; 3480 next_tw = tw->tw_next; 3481 3482 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3483 "ehci_free_dma_resources: Free TW = 0x%p", (void *)tw); 3484 3485 ehci_free_tw(ehcip, pp, tw); 3486 } 3487 3488 /* Adjust the head and tail pointers */ 3489 pp->pp_tw_head = NULL; 3490 pp->pp_tw_tail = NULL; 3491 } 3492 3493 3494 /* 3495 * ehci_free_tw: 3496 * 3497 * Free the Transfer Wrapper (TW). 3498 */ 3499 /*ARGSUSED*/ 3500 static void 3501 ehci_free_tw( 3502 ehci_state_t *ehcip, 3503 ehci_pipe_private_t *pp, 3504 ehci_trans_wrapper_t *tw) 3505 { 3506 int rval; 3507 3508 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 3509 "ehci_free_tw: tw = 0x%p", tw); 3510 3511 ASSERT(tw != NULL); 3512 ASSERT(tw->tw_id != NULL); 3513 3514 /* Free 32bit ID */ 3515 EHCI_FREE_ID((uint32_t)tw->tw_id); 3516 3517 rval = ddi_dma_unbind_handle(tw->tw_dmahandle); 3518 ASSERT(rval == DDI_SUCCESS); 3519 3520 ddi_dma_mem_free(&tw->tw_accesshandle); 3521 ddi_dma_free_handle(&tw->tw_dmahandle); 3522 3523 /* Free transfer wrapper */ 3524 kmem_free(tw, sizeof (ehci_trans_wrapper_t)); 3525 } 3526 3527 3528 /* 3529 * Miscellaneous functions 3530 */ 3531 3532 /* 3533 * ehci_allocate_intr_in_resource 3534 * 3535 * Allocate interrupt request structure for the interrupt IN transfer. 3536 */ 3537 /*ARGSUSED*/ 3538 int 3539 ehci_allocate_intr_in_resource( 3540 ehci_state_t *ehcip, 3541 ehci_pipe_private_t *pp, 3542 ehci_trans_wrapper_t *tw, 3543 usb_flags_t flags) 3544 { 3545 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 3546 usb_intr_req_t *curr_intr_reqp; 3547 usb_opaque_t client_periodic_in_reqp; 3548 size_t length = 0; 3549 3550 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3551 "ehci_allocate_intr_in_resource:" 3552 "pp = 0x%p tw = 0x%p flags = 0x%x", pp, tw, flags); 3553 3554 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3555 ASSERT(tw->tw_curr_xfer_reqp == NULL); 3556 3557 /* Get the client periodic in request pointer */ 3558 client_periodic_in_reqp = pp->pp_client_periodic_in_reqp; 3559 3560 /* 3561 * If it a periodic IN request and periodic request is NULL, 3562 * allocate corresponding usb periodic IN request for the 3563 * current periodic polling request and copy the information 3564 * from the saved periodic request structure. 3565 */ 3566 if (client_periodic_in_reqp) { 3567 3568 /* Get the interrupt transfer length */ 3569 length = ((usb_intr_req_t *) 3570 client_periodic_in_reqp)->intr_len; 3571 3572 curr_intr_reqp = usba_hcdi_dup_intr_req(ph->p_dip, 3573 (usb_intr_req_t *)client_periodic_in_reqp, length, flags); 3574 } else { 3575 curr_intr_reqp = usb_alloc_intr_req(ph->p_dip, length, flags); 3576 } 3577 3578 if (curr_intr_reqp == NULL) { 3579 3580 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3581 "ehci_allocate_intr_in_resource: Interrupt" 3582 "request structure allocation failed"); 3583 3584 return (USB_NO_RESOURCES); 3585 } 3586 3587 /* For polled mode */ 3588 if (client_periodic_in_reqp == NULL) { 3589 curr_intr_reqp->intr_attributes = USB_ATTRS_SHORT_XFER_OK; 3590 curr_intr_reqp->intr_len = ph->p_ep.wMaxPacketSize; 3591 } else { 3592 /* Check and save the timeout value */ 3593 tw->tw_timeout = (curr_intr_reqp->intr_attributes & 3594 USB_ATTRS_ONE_XFER) ? curr_intr_reqp->intr_timeout: 0; 3595 } 3596 3597 tw->tw_curr_xfer_reqp = (usb_opaque_t)curr_intr_reqp; 3598 tw->tw_length = curr_intr_reqp->intr_len; 3599 3600 mutex_enter(&ph->p_mutex); 3601 ph->p_req_count++; 3602 mutex_exit(&ph->p_mutex); 3603 3604 pp->pp_state = EHCI_PIPE_STATE_ACTIVE; 3605 3606 return (USB_SUCCESS); 3607 } 3608 3609 /* 3610 * ehci_pipe_cleanup 3611 * 3612 * Cleanup ehci pipe. 3613 */ 3614 void 3615 ehci_pipe_cleanup( 3616 ehci_state_t *ehcip, 3617 usba_pipe_handle_data_t *ph) 3618 { 3619 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 3620 uint_t pipe_state = pp->pp_state; 3621 usb_cr_t completion_reason; 3622 usb_ep_descr_t *eptd = &ph->p_ep; 3623 3624 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3625 "ehci_pipe_cleanup: ph = 0x%p", ph); 3626 3627 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3628 3629 if (EHCI_ISOC_ENDPOINT(eptd)) { 3630 ehci_isoc_pipe_cleanup(ehcip, ph); 3631 3632 return; 3633 } 3634 3635 ASSERT(!servicing_interrupt()); 3636 3637 /* 3638 * Set the QH's status to Halt condition. 3639 * If another thread is halting this function will automatically 3640 * wait. If a pipe close happens at this time 3641 * we will be in lots of trouble. 3642 * If we are in an interrupt thread, don't halt, because it may 3643 * do a wait_for_sof. 3644 */ 3645 ehci_modify_qh_status_bit(ehcip, pp, SET_HALT); 3646 3647 /* 3648 * Wait for processing all completed transfers and 3649 * to send results to upstream. 3650 */ 3651 ehci_wait_for_transfers_completion(ehcip, pp); 3652 3653 /* Save the data toggle information */ 3654 ehci_save_data_toggle(ehcip, ph); 3655 3656 /* 3657 * Traverse the list of QTDs for this pipe using transfer 3658 * wrapper. Process these QTDs depending on their status. 3659 * And stop the timer of this pipe. 3660 */ 3661 ehci_traverse_qtds(ehcip, ph); 3662 3663 /* Make sure the timer is not running */ 3664 ASSERT(pp->pp_timer_id == 0); 3665 3666 /* Do callbacks for all unfinished requests */ 3667 ehci_handle_outstanding_requests(ehcip, pp); 3668 3669 /* Free DMA resources */ 3670 ehci_free_dma_resources(ehcip, ph); 3671 3672 switch (pipe_state) { 3673 case EHCI_PIPE_STATE_CLOSE: 3674 completion_reason = USB_CR_PIPE_CLOSING; 3675 break; 3676 case EHCI_PIPE_STATE_RESET: 3677 case EHCI_PIPE_STATE_STOP_POLLING: 3678 /* Set completion reason */ 3679 completion_reason = (pipe_state == 3680 EHCI_PIPE_STATE_RESET) ? 3681 USB_CR_PIPE_RESET: USB_CR_STOPPED_POLLING; 3682 3683 /* Restore the data toggle information */ 3684 ehci_restore_data_toggle(ehcip, ph); 3685 3686 /* 3687 * Clear the halt bit to restart all the 3688 * transactions on this pipe. 3689 */ 3690 ehci_modify_qh_status_bit(ehcip, pp, CLEAR_HALT); 3691 3692 /* Set pipe state to idle */ 3693 pp->pp_state = EHCI_PIPE_STATE_IDLE; 3694 3695 break; 3696 } 3697 3698 /* 3699 * Do the callback for the original client 3700 * periodic IN request. 3701 */ 3702 if ((EHCI_PERIODIC_ENDPOINT(eptd)) && 3703 ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == 3704 USB_EP_DIR_IN)) { 3705 3706 ehci_do_client_periodic_in_req_callback( 3707 ehcip, pp, completion_reason); 3708 } 3709 } 3710 3711 3712 /* 3713 * ehci_wait_for_transfers_completion: 3714 * 3715 * Wait for processing all completed transfers and to send results 3716 * to upstream. 3717 */ 3718 static void 3719 ehci_wait_for_transfers_completion( 3720 ehci_state_t *ehcip, 3721 ehci_pipe_private_t *pp) 3722 { 3723 ehci_trans_wrapper_t *next_tw = pp->pp_tw_head; 3724 clock_t xfer_cmpl_time_wait; 3725 ehci_qtd_t *qtd; 3726 3727 USB_DPRINTF_L4(PRINT_MASK_LISTS, 3728 ehcip->ehci_log_hdl, 3729 "ehci_wait_for_transfers_completion: pp = 0x%p", pp); 3730 3731 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3732 3733 if ((ehci_state_is_operational(ehcip)) != USB_SUCCESS) { 3734 3735 return; 3736 } 3737 3738 pp->pp_count_done_qtds = 0; 3739 3740 /* Process the transfer wrappers for this pipe */ 3741 while (next_tw) { 3742 qtd = (ehci_qtd_t *)next_tw->tw_qtd_head; 3743 3744 /* 3745 * Walk through each QTD for this transfer wrapper. 3746 * If a QTD still exists, then it is either on done 3747 * list or on the QH's list. 3748 */ 3749 while (qtd) { 3750 if (!(Get_QTD(qtd->qtd_ctrl) & 3751 EHCI_QTD_CTRL_ACTIVE_XACT)) { 3752 pp->pp_count_done_qtds++; 3753 } 3754 3755 qtd = ehci_qtd_iommu_to_cpu(ehcip, 3756 Get_QTD(qtd->qtd_tw_next_qtd)); 3757 } 3758 3759 next_tw = next_tw->tw_next; 3760 } 3761 3762 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3763 "ehci_wait_for_transfers_completion: count_done_qtds = 0x%x", 3764 pp->pp_count_done_qtds); 3765 3766 if (!pp->pp_count_done_qtds) { 3767 3768 return; 3769 } 3770 3771 /* Get the number of clock ticks to wait */ 3772 xfer_cmpl_time_wait = drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000); 3773 3774 (void) cv_timedwait(&pp->pp_xfer_cmpl_cv, 3775 &ehcip->ehci_int_mutex, 3776 ddi_get_lbolt() + xfer_cmpl_time_wait); 3777 3778 if (pp->pp_count_done_qtds) { 3779 3780 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3781 "ehci_wait_for_transfers_completion:" 3782 "No transfers completion confirmation received"); 3783 } 3784 } 3785 3786 /* 3787 * ehci_check_for_transfers_completion: 3788 * 3789 * Check whether anybody is waiting for transfers completion event. If so, send 3790 * this event and also stop initiating any new transfers on this pipe. 3791 */ 3792 void 3793 ehci_check_for_transfers_completion( 3794 ehci_state_t *ehcip, 3795 ehci_pipe_private_t *pp) 3796 { 3797 USB_DPRINTF_L4(PRINT_MASK_LISTS, 3798 ehcip->ehci_log_hdl, 3799 "ehci_check_for_transfers_completion: pp = 0x%p", pp); 3800 3801 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3802 3803 if ((pp->pp_state == EHCI_PIPE_STATE_STOP_POLLING) && 3804 (pp->pp_error == USB_CR_NO_RESOURCES) && 3805 (pp->pp_cur_periodic_req_cnt == 0)) { 3806 3807 /* Reset pipe error to zero */ 3808 pp->pp_error = 0; 3809 3810 /* Do callback for original request */ 3811 ehci_do_client_periodic_in_req_callback( 3812 ehcip, pp, USB_CR_NO_RESOURCES); 3813 } 3814 3815 if (pp->pp_count_done_qtds) { 3816 3817 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3818 "ehci_check_for_transfers_completion:" 3819 "count_done_qtds = 0x%x", pp->pp_count_done_qtds); 3820 3821 /* Decrement the done qtd count */ 3822 pp->pp_count_done_qtds--; 3823 3824 if (!pp->pp_count_done_qtds) { 3825 3826 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3827 "ehci_check_for_transfers_completion:" 3828 "Sent transfers completion event pp = 0x%p", pp); 3829 3830 /* Send the transfer completion signal */ 3831 cv_signal(&pp->pp_xfer_cmpl_cv); 3832 } 3833 } 3834 } 3835 3836 3837 /* 3838 * ehci_save_data_toggle: 3839 * 3840 * Save the data toggle information. 3841 */ 3842 static void 3843 ehci_save_data_toggle( 3844 ehci_state_t *ehcip, 3845 usba_pipe_handle_data_t *ph) 3846 { 3847 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 3848 usb_ep_descr_t *eptd = &ph->p_ep; 3849 uint_t data_toggle; 3850 usb_cr_t error = pp->pp_error; 3851 ehci_qh_t *qh = pp->pp_qh; 3852 3853 USB_DPRINTF_L4(PRINT_MASK_LISTS, 3854 ehcip->ehci_log_hdl, 3855 "ehci_save_data_toggle: ph = 0x%p", ph); 3856 3857 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3858 3859 /* Reset the pipe error value */ 3860 pp->pp_error = USB_CR_OK; 3861 3862 /* Return immediately if it is a control pipe */ 3863 if ((eptd->bmAttributes & USB_EP_ATTR_MASK) == 3864 USB_EP_ATTR_CONTROL) { 3865 3866 return; 3867 } 3868 3869 /* Get the data toggle information from the endpoint (QH) */ 3870 data_toggle = (Get_QH(qh->qh_status) & 3871 EHCI_QH_STS_DATA_TOGGLE)? DATA1:DATA0; 3872 3873 /* 3874 * If error is STALL, then, set 3875 * data toggle to zero. 3876 */ 3877 if (error == USB_CR_STALL) { 3878 data_toggle = DATA0; 3879 } 3880 3881 /* 3882 * Save the data toggle information 3883 * in the usb device structure. 3884 */ 3885 mutex_enter(&ph->p_mutex); 3886 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress, 3887 data_toggle); 3888 mutex_exit(&ph->p_mutex); 3889 } 3890 3891 3892 /* 3893 * ehci_restore_data_toggle: 3894 * 3895 * Restore the data toggle information. 3896 */ 3897 void 3898 ehci_restore_data_toggle( 3899 ehci_state_t *ehcip, 3900 usba_pipe_handle_data_t *ph) 3901 { 3902 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 3903 usb_ep_descr_t *eptd = &ph->p_ep; 3904 uint_t data_toggle = 0; 3905 3906 USB_DPRINTF_L4(PRINT_MASK_LISTS, 3907 ehcip->ehci_log_hdl, 3908 "ehci_restore_data_toggle: ph = 0x%p", ph); 3909 3910 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3911 3912 /* Return immediately if it is a control pipe */ 3913 if ((eptd->bmAttributes & USB_EP_ATTR_MASK) == 3914 USB_EP_ATTR_CONTROL) { 3915 3916 return; 3917 } 3918 3919 mutex_enter(&ph->p_mutex); 3920 3921 data_toggle = usba_hcdi_get_data_toggle(ph->p_usba_device, 3922 ph->p_ep.bEndpointAddress); 3923 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress, 3924 0); 3925 3926 mutex_exit(&ph->p_mutex); 3927 3928 /* 3929 * Restore the data toggle bit depending on the 3930 * previous data toggle information. 3931 */ 3932 if (data_toggle) { 3933 Set_QH(pp->pp_qh->qh_status, 3934 Get_QH(pp->pp_qh->qh_status) | EHCI_QH_STS_DATA_TOGGLE); 3935 } else { 3936 Set_QH(pp->pp_qh->qh_status, 3937 Get_QH(pp->pp_qh->qh_status) & (~EHCI_QH_STS_DATA_TOGGLE)); 3938 } 3939 } 3940 3941 3942 /* 3943 * ehci_handle_outstanding_requests 3944 * 3945 * Deallocate interrupt request structure for the interrupt IN transfer. 3946 * Do the callbacks for all unfinished requests. 3947 * 3948 * NOTE: This function is also called from POLLED MODE. 3949 */ 3950 void 3951 ehci_handle_outstanding_requests( 3952 ehci_state_t *ehcip, 3953 ehci_pipe_private_t *pp) 3954 { 3955 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 3956 usb_ep_descr_t *eptd = &ph->p_ep; 3957 ehci_trans_wrapper_t *curr_tw; 3958 ehci_trans_wrapper_t *next_tw; 3959 usb_opaque_t curr_xfer_reqp; 3960 3961 USB_DPRINTF_L4(PRINT_MASK_LISTS, 3962 ehcip->ehci_log_hdl, 3963 "ehci_handle_outstanding_requests: pp = 0x%p", pp); 3964 3965 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3966 3967 /* Deallocate all pre-allocated interrupt requests */ 3968 next_tw = pp->pp_tw_head; 3969 3970 while (next_tw) { 3971 curr_tw = next_tw; 3972 next_tw = curr_tw->tw_next; 3973 3974 curr_xfer_reqp = curr_tw->tw_curr_xfer_reqp; 3975 3976 /* Deallocate current interrupt request */ 3977 if (curr_xfer_reqp) { 3978 3979 if ((EHCI_PERIODIC_ENDPOINT(eptd)) && 3980 (curr_tw->tw_direction == EHCI_QTD_CTRL_IN_PID)) { 3981 3982 /* Decrement periodic in request count */ 3983 pp->pp_cur_periodic_req_cnt--; 3984 3985 ehci_deallocate_intr_in_resource( 3986 ehcip, pp, curr_tw); 3987 } else { 3988 ehci_hcdi_callback(ph, curr_tw, USB_CR_FLUSHED); 3989 } 3990 } 3991 } 3992 } 3993 3994 3995 /* 3996 * ehci_deallocate_intr_in_resource 3997 * 3998 * Deallocate interrupt request structure for the interrupt IN transfer. 3999 */ 4000 void 4001 ehci_deallocate_intr_in_resource( 4002 ehci_state_t *ehcip, 4003 ehci_pipe_private_t *pp, 4004 ehci_trans_wrapper_t *tw) 4005 { 4006 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4007 uchar_t ep_attr = ph->p_ep.bmAttributes; 4008 usb_opaque_t curr_xfer_reqp; 4009 4010 USB_DPRINTF_L4(PRINT_MASK_LISTS, 4011 ehcip->ehci_log_hdl, 4012 "ehci_deallocate_intr_in_resource: " 4013 "pp = 0x%p tw = 0x%p", pp, tw); 4014 4015 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 4016 ASSERT((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR); 4017 4018 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4019 4020 /* Check the current periodic in request pointer */ 4021 if (curr_xfer_reqp) { 4022 4023 tw->tw_curr_xfer_reqp = NULL; 4024 4025 mutex_enter(&ph->p_mutex); 4026 ph->p_req_count--; 4027 mutex_exit(&ph->p_mutex); 4028 4029 /* Free pre-allocated interrupt requests */ 4030 usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp); 4031 4032 /* Set periodic in pipe state to idle */ 4033 pp->pp_state = EHCI_PIPE_STATE_IDLE; 4034 } 4035 } 4036 4037 4038 /* 4039 * ehci_do_client_periodic_in_req_callback 4040 * 4041 * Do callback for the original client periodic IN request. 4042 */ 4043 void 4044 ehci_do_client_periodic_in_req_callback( 4045 ehci_state_t *ehcip, 4046 ehci_pipe_private_t *pp, 4047 usb_cr_t completion_reason) 4048 { 4049 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4050 usb_ep_descr_t *eptd = &ph->p_ep; 4051 4052 USB_DPRINTF_L4(PRINT_MASK_LISTS, 4053 ehcip->ehci_log_hdl, 4054 "ehci_do_client_periodic_in_req_callback: " 4055 "pp = 0x%p cc = 0x%x", pp, completion_reason); 4056 4057 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 4058 4059 /* 4060 * Check for Interrupt/Isochronous IN, whether we need to do 4061 * callback for the original client's periodic IN request. 4062 */ 4063 if (pp->pp_client_periodic_in_reqp) { 4064 ASSERT(pp->pp_cur_periodic_req_cnt == 0); 4065 if (EHCI_ISOC_ENDPOINT(eptd)) { 4066 ehci_hcdi_isoc_callback(ph, NULL, completion_reason); 4067 } else { 4068 ehci_hcdi_callback(ph, NULL, completion_reason); 4069 } 4070 } 4071 } 4072 4073 4074 /* 4075 * ehci_hcdi_callback() 4076 * 4077 * Convenience wrapper around usba_hcdi_cb() other than root hub. 4078 */ 4079 void 4080 ehci_hcdi_callback( 4081 usba_pipe_handle_data_t *ph, 4082 ehci_trans_wrapper_t *tw, 4083 usb_cr_t completion_reason) 4084 { 4085 ehci_state_t *ehcip = ehci_obtain_state( 4086 ph->p_usba_device->usb_root_hub_dip); 4087 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private; 4088 usb_opaque_t curr_xfer_reqp; 4089 uint_t pipe_state = 0; 4090 4091 USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl, 4092 "ehci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x", 4093 ph, tw, completion_reason); 4094 4095 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 4096 4097 /* Set the pipe state as per completion reason */ 4098 switch (completion_reason) { 4099 case USB_CR_OK: 4100 pipe_state = pp->pp_state; 4101 break; 4102 case USB_CR_NO_RESOURCES: 4103 case USB_CR_NOT_SUPPORTED: 4104 case USB_CR_PIPE_RESET: 4105 case USB_CR_STOPPED_POLLING: 4106 pipe_state = EHCI_PIPE_STATE_IDLE; 4107 break; 4108 case USB_CR_PIPE_CLOSING: 4109 break; 4110 default: 4111 /* Set the pipe state to error */ 4112 pipe_state = EHCI_PIPE_STATE_ERROR; 4113 pp->pp_error = completion_reason; 4114 break; 4115 4116 } 4117 4118 pp->pp_state = pipe_state; 4119 4120 if (tw && tw->tw_curr_xfer_reqp) { 4121 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4122 tw->tw_curr_xfer_reqp = NULL; 4123 } else { 4124 ASSERT(pp->pp_client_periodic_in_reqp != NULL); 4125 4126 curr_xfer_reqp = pp->pp_client_periodic_in_reqp; 4127 pp->pp_client_periodic_in_reqp = NULL; 4128 } 4129 4130 ASSERT(curr_xfer_reqp != NULL); 4131 4132 mutex_exit(&ehcip->ehci_int_mutex); 4133 4134 usba_hcdi_cb(ph, curr_xfer_reqp, completion_reason); 4135 4136 mutex_enter(&ehcip->ehci_int_mutex); 4137 } 4138