1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 /* 28 * EHCI Host Controller Driver (EHCI) 29 * 30 * The EHCI driver is a software driver which interfaces to the Universal 31 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to 32 * the Host Controller is defined by the EHCI Host Controller Interface. 33 * 34 * This module contains the main EHCI driver code which handles all USB 35 * transfers, bandwidth allocations and other general functionalities. 36 */ 37 38 #include <sys/usb/hcd/ehci/ehcid.h> 39 #include <sys/usb/hcd/ehci/ehci_isoch.h> 40 #include <sys/usb/hcd/ehci/ehci_xfer.h> 41 42 /* 43 * EHCI MSI tunable: 44 * 45 * By default MSI is enabled on all supported platforms except for the 46 * EHCI controller of ULI1575 South bridge. 47 */ 48 boolean_t ehci_enable_msi = B_TRUE; 49 50 /* Pointer to the state structure */ 51 extern void *ehci_statep; 52 53 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *); 54 55 extern uint_t ehci_vt62x2_workaround; 56 extern int force_ehci_off; 57 58 /* Adjustable variables for the size of the pools */ 59 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE; 60 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE; 61 62 /* 63 * Initialize the values which the order of 32ms intr qh are executed 64 * by the host controller in the lattice tree. 65 */ 66 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] = 67 {0x00, 0x10, 0x08, 0x18, 68 0x04, 0x14, 0x0c, 0x1c, 69 0x02, 0x12, 0x0a, 0x1a, 70 0x06, 0x16, 0x0e, 0x1e, 71 0x01, 0x11, 0x09, 0x19, 72 0x05, 0x15, 0x0d, 0x1d, 73 0x03, 0x13, 0x0b, 0x1b, 74 0x07, 0x17, 0x0f, 0x1f}; 75 76 /* 77 * Initialize the values which are used to calculate start split mask 78 * for the low/full/high speed interrupt and isochronous endpoints. 79 */ 80 static uint_t ehci_start_split_mask[15] = { 81 /* 82 * For high/full/low speed usb devices. For high speed 83 * device with polling interval greater than or equal 84 * to 8us (125us). 85 */ 86 0x01, /* 00000001 */ 87 0x02, /* 00000010 */ 88 0x04, /* 00000100 */ 89 0x08, /* 00001000 */ 90 0x10, /* 00010000 */ 91 0x20, /* 00100000 */ 92 0x40, /* 01000000 */ 93 0x80, /* 10000000 */ 94 95 /* Only for high speed devices with polling interval 4us */ 96 0x11, /* 00010001 */ 97 0x22, /* 00100010 */ 98 0x44, /* 01000100 */ 99 0x88, /* 10001000 */ 100 101 /* Only for high speed devices with polling interval 2us */ 102 0x55, /* 01010101 */ 103 0xaa, /* 10101010 */ 104 105 /* Only for high speed devices with polling interval 1us */ 106 0xff /* 11111111 */ 107 }; 108 109 /* 110 * Initialize the values which are used to calculate complete split mask 111 * for the low/full speed interrupt and isochronous endpoints. 112 */ 113 static uint_t ehci_intr_complete_split_mask[7] = { 114 /* Only full/low speed devices */ 115 0x1c, /* 00011100 */ 116 0x38, /* 00111000 */ 117 0x70, /* 01110000 */ 118 0xe0, /* 11100000 */ 119 0x00, /* Need FSTN feature */ 120 0x00, /* Need FSTN feature */ 121 0x00 /* Need FSTN feature */ 122 }; 123 124 125 /* 126 * EHCI Internal Function Prototypes 127 */ 128 129 /* Host Controller Driver (HCD) initialization functions */ 130 void ehci_set_dma_attributes(ehci_state_t *ehcip); 131 int ehci_allocate_pools(ehci_state_t *ehcip); 132 void ehci_decode_ddi_dma_addr_bind_handle_result( 133 ehci_state_t *ehcip, 134 int result); 135 int ehci_map_regs(ehci_state_t *ehcip); 136 int ehci_register_intrs_and_init_mutex( 137 ehci_state_t *ehcip); 138 static int ehci_add_intrs(ehci_state_t *ehcip, 139 int intr_type); 140 int ehci_init_ctlr(ehci_state_t *ehcip, 141 int init_type); 142 static int ehci_take_control(ehci_state_t *ehcip); 143 static int ehci_init_periodic_frame_lst_table( 144 ehci_state_t *ehcip); 145 static void ehci_build_interrupt_lattice( 146 ehci_state_t *ehcip); 147 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip); 148 149 /* Host Controller Driver (HCD) deinitialization functions */ 150 int ehci_cleanup(ehci_state_t *ehcip); 151 static void ehci_rem_intrs(ehci_state_t *ehcip); 152 int ehci_cpr_suspend(ehci_state_t *ehcip); 153 int ehci_cpr_resume(ehci_state_t *ehcip); 154 155 /* Bandwidth Allocation functions */ 156 int ehci_allocate_bandwidth(ehci_state_t *ehcip, 157 usba_pipe_handle_data_t *ph, 158 uint_t *pnode, 159 uchar_t *smask, 160 uchar_t *cmask); 161 static int ehci_allocate_high_speed_bandwidth( 162 ehci_state_t *ehcip, 163 usba_pipe_handle_data_t *ph, 164 uint_t *hnode, 165 uchar_t *smask, 166 uchar_t *cmask); 167 static int ehci_allocate_classic_tt_bandwidth( 168 ehci_state_t *ehcip, 169 usba_pipe_handle_data_t *ph, 170 uint_t pnode); 171 void ehci_deallocate_bandwidth(ehci_state_t *ehcip, 172 usba_pipe_handle_data_t *ph, 173 uint_t pnode, 174 uchar_t smask, 175 uchar_t cmask); 176 static void ehci_deallocate_high_speed_bandwidth( 177 ehci_state_t *ehcip, 178 usba_pipe_handle_data_t *ph, 179 uint_t hnode, 180 uchar_t smask, 181 uchar_t cmask); 182 static void ehci_deallocate_classic_tt_bandwidth( 183 ehci_state_t *ehcip, 184 usba_pipe_handle_data_t *ph, 185 uint_t pnode); 186 static int ehci_compute_high_speed_bandwidth( 187 ehci_state_t *ehcip, 188 usb_ep_descr_t *endpoint, 189 usb_port_status_t port_status, 190 uint_t *sbandwidth, 191 uint_t *cbandwidth); 192 static int ehci_compute_classic_bandwidth( 193 usb_ep_descr_t *endpoint, 194 usb_port_status_t port_status, 195 uint_t *bandwidth); 196 int ehci_adjust_polling_interval( 197 ehci_state_t *ehcip, 198 usb_ep_descr_t *endpoint, 199 usb_port_status_t port_status); 200 static int ehci_adjust_high_speed_polling_interval( 201 ehci_state_t *ehcip, 202 usb_ep_descr_t *endpoint); 203 static uint_t ehci_lattice_height(uint_t interval); 204 static uint_t ehci_lattice_parent(uint_t node); 205 static uint_t ehci_find_periodic_node( 206 uint_t leaf, 207 int interval); 208 static uint_t ehci_leftmost_leaf(uint_t node, 209 uint_t height); 210 static uint_t ehci_pow_2(uint_t x); 211 static uint_t ehci_log_2(uint_t x); 212 static int ehci_find_bestfit_hs_mask( 213 ehci_state_t *ehcip, 214 uchar_t *smask, 215 uint_t *pnode, 216 usb_ep_descr_t *endpoint, 217 uint_t bandwidth, 218 int interval); 219 static int ehci_find_bestfit_ls_intr_mask( 220 ehci_state_t *ehcip, 221 uchar_t *smask, 222 uchar_t *cmask, 223 uint_t *pnode, 224 uint_t sbandwidth, 225 uint_t cbandwidth, 226 int interval); 227 static int ehci_find_bestfit_sitd_in_mask( 228 ehci_state_t *ehcip, 229 uchar_t *smask, 230 uchar_t *cmask, 231 uint_t *pnode, 232 uint_t sbandwidth, 233 uint_t cbandwidth, 234 int interval); 235 static int ehci_find_bestfit_sitd_out_mask( 236 ehci_state_t *ehcip, 237 uchar_t *smask, 238 uint_t *pnode, 239 uint_t sbandwidth, 240 int interval); 241 static uint_t ehci_calculate_bw_availability_mask( 242 ehci_state_t *ehcip, 243 uint_t bandwidth, 244 int leaf, 245 int leaf_count, 246 uchar_t *bw_mask); 247 static void ehci_update_bw_availability( 248 ehci_state_t *ehcip, 249 int bandwidth, 250 int leftmost_leaf, 251 int leaf_count, 252 uchar_t mask); 253 254 /* Miscellaneous functions */ 255 ehci_state_t *ehci_obtain_state( 256 dev_info_t *dip); 257 int ehci_state_is_operational( 258 ehci_state_t *ehcip); 259 int ehci_do_soft_reset( 260 ehci_state_t *ehcip); 261 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip, 262 ehci_pipe_private_t *pp, 263 ehci_trans_wrapper_t *tw); 264 usb_frame_number_t ehci_get_current_frame_number( 265 ehci_state_t *ehcip); 266 static void ehci_cpr_cleanup( 267 ehci_state_t *ehcip); 268 int ehci_wait_for_sof( 269 ehci_state_t *ehcip); 270 void ehci_toggle_scheduler( 271 ehci_state_t *ehcip); 272 void ehci_print_caps(ehci_state_t *ehcip); 273 void ehci_print_regs(ehci_state_t *ehcip); 274 void ehci_print_qh(ehci_state_t *ehcip, 275 ehci_qh_t *qh); 276 void ehci_print_qtd(ehci_state_t *ehcip, 277 ehci_qtd_t *qtd); 278 void ehci_create_stats(ehci_state_t *ehcip); 279 void ehci_destroy_stats(ehci_state_t *ehcip); 280 void ehci_do_intrs_stats(ehci_state_t *ehcip, 281 int val); 282 void ehci_do_byte_stats(ehci_state_t *ehcip, 283 size_t len, 284 uint8_t attr, 285 uint8_t addr); 286 287 /* 288 * check if this ehci controller can support PM 289 */ 290 int 291 ehci_hcdi_pm_support(dev_info_t *dip) 292 { 293 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep, 294 ddi_get_instance(dip)); 295 296 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) && 297 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) || 298 299 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 300 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) || 301 302 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) { 303 304 return (USB_SUCCESS); 305 } 306 307 return (USB_FAILURE); 308 } 309 310 void 311 ehci_dma_attr_workaround(ehci_state_t *ehcip) 312 { 313 /* 314 * Some Nvidia chips can not handle qh dma address above 2G. 315 * The bit 31 of the dma address might be omitted and it will 316 * cause system crash or other unpredicable result. So force 317 * the dma address allocated below 2G to make ehci work. 318 */ 319 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) { 320 switch (ehcip->ehci_device_id) { 321 case PCI_DEVICE_NVIDIA_CK804: 322 case PCI_DEVICE_NVIDIA_MCP04: 323 USB_DPRINTF_L2(PRINT_MASK_ATTA, 324 ehcip->ehci_log_hdl, 325 "ehci_dma_attr_workaround: NVIDIA dma " 326 "workaround enabled, force dma address " 327 "to be allocated below 2G"); 328 ehcip->ehci_dma_attr.dma_attr_addr_hi = 329 0x7fffffffull; 330 break; 331 default: 332 break; 333 334 } 335 } 336 } 337 338 /* 339 * Host Controller Driver (HCD) initialization functions 340 */ 341 342 /* 343 * ehci_set_dma_attributes: 344 * 345 * Set the limits in the DMA attributes structure. Most of the values used 346 * in the DMA limit structures are the default values as specified by the 347 * Writing PCI device drivers document. 348 */ 349 void 350 ehci_set_dma_attributes(ehci_state_t *ehcip) 351 { 352 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 353 "ehci_set_dma_attributes:"); 354 355 /* Initialize the DMA attributes */ 356 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0; 357 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 358 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull; 359 360 /* 32 bit addressing */ 361 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX; 362 363 /* Byte alignment */ 364 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 365 366 /* 367 * Since PCI specification is byte alignment, the 368 * burst size field should be set to 1 for PCI devices. 369 */ 370 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1; 371 372 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1; 373 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER; 374 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull; 375 ehcip->ehci_dma_attr.dma_attr_sgllen = 1; 376 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR; 377 ehcip->ehci_dma_attr.dma_attr_flags = 0; 378 ehci_dma_attr_workaround(ehcip); 379 } 380 381 382 /* 383 * ehci_allocate_pools: 384 * 385 * Allocate the system memory for the Endpoint Descriptor (QH) and for the 386 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned 387 * to a 16 byte boundary. 388 */ 389 int 390 ehci_allocate_pools(ehci_state_t *ehcip) 391 { 392 ddi_device_acc_attr_t dev_attr; 393 size_t real_length; 394 int result; 395 uint_t ccount; 396 int i; 397 398 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 399 "ehci_allocate_pools:"); 400 401 /* The host controller will be little endian */ 402 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 403 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 404 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 405 406 /* Byte alignment */ 407 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT; 408 409 /* Allocate the QTD pool DMA handle */ 410 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr, 411 DDI_DMA_SLEEP, 0, 412 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) { 413 414 goto failure; 415 } 416 417 /* Allocate the memory for the QTD pool */ 418 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle, 419 ehci_qtd_pool_size * sizeof (ehci_qtd_t), 420 &dev_attr, 421 DDI_DMA_CONSISTENT, 422 DDI_DMA_SLEEP, 423 0, 424 (caddr_t *)&ehcip->ehci_qtd_pool_addr, 425 &real_length, 426 &ehcip->ehci_qtd_pool_mem_handle)) { 427 428 goto failure; 429 } 430 431 /* Map the QTD pool into the I/O address space */ 432 result = ddi_dma_addr_bind_handle( 433 ehcip->ehci_qtd_pool_dma_handle, 434 NULL, 435 (caddr_t)ehcip->ehci_qtd_pool_addr, 436 real_length, 437 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 438 DDI_DMA_SLEEP, 439 NULL, 440 &ehcip->ehci_qtd_pool_cookie, 441 &ccount); 442 443 bzero((void *)ehcip->ehci_qtd_pool_addr, 444 ehci_qtd_pool_size * sizeof (ehci_qtd_t)); 445 446 /* Process the result */ 447 if (result == DDI_DMA_MAPPED) { 448 /* The cookie count should be 1 */ 449 if (ccount != 1) { 450 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 451 "ehci_allocate_pools: More than 1 cookie"); 452 453 goto failure; 454 } 455 } else { 456 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 457 "ehci_allocate_pools: Result = %d", result); 458 459 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 460 461 goto failure; 462 } 463 464 /* 465 * DMA addresses for QTD pools are bound 466 */ 467 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND; 468 469 /* Initialize the QTD pool */ 470 for (i = 0; i < ehci_qtd_pool_size; i ++) { 471 Set_QTD(ehcip->ehci_qtd_pool_addr[i]. 472 qtd_state, EHCI_QTD_FREE); 473 } 474 475 /* Allocate the QTD pool DMA handle */ 476 if (ddi_dma_alloc_handle(ehcip->ehci_dip, 477 &ehcip->ehci_dma_attr, 478 DDI_DMA_SLEEP, 479 0, 480 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) { 481 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 482 "ehci_allocate_pools: ddi_dma_alloc_handle failed"); 483 484 goto failure; 485 } 486 487 /* Allocate the memory for the QH pool */ 488 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle, 489 ehci_qh_pool_size * sizeof (ehci_qh_t), 490 &dev_attr, 491 DDI_DMA_CONSISTENT, 492 DDI_DMA_SLEEP, 493 0, 494 (caddr_t *)&ehcip->ehci_qh_pool_addr, 495 &real_length, 496 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) { 497 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 498 "ehci_allocate_pools: ddi_dma_mem_alloc failed"); 499 500 goto failure; 501 } 502 503 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle, 504 NULL, 505 (caddr_t)ehcip->ehci_qh_pool_addr, 506 real_length, 507 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 508 DDI_DMA_SLEEP, 509 NULL, 510 &ehcip->ehci_qh_pool_cookie, 511 &ccount); 512 513 bzero((void *)ehcip->ehci_qh_pool_addr, 514 ehci_qh_pool_size * sizeof (ehci_qh_t)); 515 516 /* Process the result */ 517 if (result == DDI_DMA_MAPPED) { 518 /* The cookie count should be 1 */ 519 if (ccount != 1) { 520 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 521 "ehci_allocate_pools: More than 1 cookie"); 522 523 goto failure; 524 } 525 } else { 526 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 527 528 goto failure; 529 } 530 531 /* 532 * DMA addresses for QH pools are bound 533 */ 534 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND; 535 536 /* Initialize the QH pool */ 537 for (i = 0; i < ehci_qh_pool_size; i ++) { 538 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE); 539 } 540 541 /* Byte alignment */ 542 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 543 544 return (DDI_SUCCESS); 545 546 failure: 547 /* Byte alignment */ 548 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 549 550 return (DDI_FAILURE); 551 } 552 553 554 /* 555 * ehci_decode_ddi_dma_addr_bind_handle_result: 556 * 557 * Process the return values of ddi_dma_addr_bind_handle() 558 */ 559 void 560 ehci_decode_ddi_dma_addr_bind_handle_result( 561 ehci_state_t *ehcip, 562 int result) 563 { 564 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 565 "ehci_decode_ddi_dma_addr_bind_handle_result:"); 566 567 switch (result) { 568 case DDI_DMA_PARTIAL_MAP: 569 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 570 "Partial transfers not allowed"); 571 break; 572 case DDI_DMA_INUSE: 573 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 574 "Handle is in use"); 575 break; 576 case DDI_DMA_NORESOURCES: 577 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 578 "No resources"); 579 break; 580 case DDI_DMA_NOMAPPING: 581 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 582 "No mapping"); 583 break; 584 case DDI_DMA_TOOBIG: 585 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 586 "Object is too big"); 587 break; 588 default: 589 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 590 "Unknown dma error"); 591 } 592 } 593 594 595 /* 596 * ehci_map_regs: 597 * 598 * The Host Controller (HC) contains a set of on-chip operational registers 599 * and which should be mapped into a non-cacheable portion of the system 600 * addressable space. 601 */ 602 int 603 ehci_map_regs(ehci_state_t *ehcip) 604 { 605 ddi_device_acc_attr_t attr; 606 uint16_t cmd_reg; 607 uint_t length; 608 609 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:"); 610 611 /* Check to make sure we have memory access */ 612 if (pci_config_setup(ehcip->ehci_dip, 613 &ehcip->ehci_config_handle) != DDI_SUCCESS) { 614 615 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 616 "ehci_map_regs: Config error"); 617 618 return (DDI_FAILURE); 619 } 620 621 /* Make sure Memory Access Enable is set */ 622 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM); 623 624 if (!(cmd_reg & PCI_COMM_MAE)) { 625 626 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 627 "ehci_map_regs: Memory base address access disabled"); 628 629 return (DDI_FAILURE); 630 } 631 632 /* The host controller will be little endian */ 633 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 634 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 635 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 636 637 /* Map in EHCI Capability registers */ 638 if (ddi_regs_map_setup(ehcip->ehci_dip, 1, 639 (caddr_t *)&ehcip->ehci_capsp, 0, 640 sizeof (ehci_caps_t), &attr, 641 &ehcip->ehci_caps_handle) != DDI_SUCCESS) { 642 643 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 644 "ehci_map_regs: Map setup error"); 645 646 return (DDI_FAILURE); 647 } 648 649 length = ddi_get8(ehcip->ehci_caps_handle, 650 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length); 651 652 /* Free the original mapping */ 653 ddi_regs_map_free(&ehcip->ehci_caps_handle); 654 655 /* Re-map in EHCI Capability and Operational registers */ 656 if (ddi_regs_map_setup(ehcip->ehci_dip, 1, 657 (caddr_t *)&ehcip->ehci_capsp, 0, 658 length + sizeof (ehci_regs_t), &attr, 659 &ehcip->ehci_caps_handle) != DDI_SUCCESS) { 660 661 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 662 "ehci_map_regs: Map setup error"); 663 664 return (DDI_FAILURE); 665 } 666 667 /* Get the pointer to EHCI Operational Register */ 668 ehcip->ehci_regsp = (ehci_regs_t *) 669 ((uintptr_t)ehcip->ehci_capsp + length); 670 671 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 672 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n", 673 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp); 674 675 return (DDI_SUCCESS); 676 } 677 678 /* 679 * The following simulated polling is for debugging purposes only. 680 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf. 681 */ 682 static int 683 ehci_is_polled(dev_info_t *dip) 684 { 685 int ret; 686 char *propval; 687 688 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 689 "usb-polling", &propval) != DDI_SUCCESS) 690 691 return (0); 692 693 ret = (strcmp(propval, "true") == 0); 694 ddi_prop_free(propval); 695 696 return (ret); 697 } 698 699 static void 700 ehci_poll_intr(void *arg) 701 { 702 /* poll every msec */ 703 for (;;) { 704 (void) ehci_intr(arg, NULL); 705 delay(drv_usectohz(1000)); 706 } 707 } 708 709 /* 710 * ehci_register_intrs_and_init_mutex: 711 * 712 * Register interrupts and initialize each mutex and condition variables 713 */ 714 int 715 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip) 716 { 717 int intr_types; 718 719 #if defined(__x86) 720 uint8_t iline; 721 #endif 722 723 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 724 "ehci_register_intrs_and_init_mutex:"); 725 726 /* 727 * There is a known MSI hardware bug with the EHCI controller 728 * of ULI1575 southbridge. Hence MSI is disabled for this chip. 729 */ 730 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 731 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) { 732 ehcip->ehci_msi_enabled = B_FALSE; 733 } else { 734 /* Set the MSI enable flag from the global EHCI MSI tunable */ 735 ehcip->ehci_msi_enabled = ehci_enable_msi; 736 } 737 738 /* launch polling thread instead of enabling pci interrupt */ 739 if (ehci_is_polled(ehcip->ehci_dip)) { 740 extern pri_t maxclsyspri; 741 742 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 743 "ehci_register_intrs_and_init_mutex: " 744 "running in simulated polled mode"); 745 746 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0, 747 TS_RUN, maxclsyspri); 748 749 goto skip_intr; 750 } 751 752 #if defined(__x86) 753 /* 754 * Make sure that the interrupt pin is connected to the 755 * interrupt controller on x86. Interrupt line 255 means 756 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43). 757 * If we would return failure when interrupt line equals 255, then 758 * high speed devices will be routed to companion host controllers. 759 * However, it is not necessary to return failure here, and 760 * o/uhci codes don't check the interrupt line either. 761 * But it's good to log a message here for debug purposes. 762 */ 763 iline = pci_config_get8(ehcip->ehci_config_handle, 764 PCI_CONF_ILINE); 765 766 if (iline == 255) { 767 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 768 "ehci_register_intrs_and_init_mutex: " 769 "interrupt line value out of range (%d)", 770 iline); 771 } 772 #endif /* __x86 */ 773 774 /* Get supported interrupt types */ 775 if (ddi_intr_get_supported_types(ehcip->ehci_dip, 776 &intr_types) != DDI_SUCCESS) { 777 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 778 "ehci_register_intrs_and_init_mutex: " 779 "ddi_intr_get_supported_types failed"); 780 781 return (DDI_FAILURE); 782 } 783 784 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 785 "ehci_register_intrs_and_init_mutex: " 786 "supported interrupt types 0x%x", intr_types); 787 788 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) { 789 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI) 790 != DDI_SUCCESS) { 791 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 792 "ehci_register_intrs_and_init_mutex: MSI " 793 "registration failed, trying FIXED interrupt \n"); 794 } else { 795 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 796 "ehci_register_intrs_and_init_mutex: " 797 "Using MSI interrupt type\n"); 798 799 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI; 800 ehcip->ehci_flags |= EHCI_INTR; 801 } 802 } 803 804 if ((!(ehcip->ehci_flags & EHCI_INTR)) && 805 (intr_types & DDI_INTR_TYPE_FIXED)) { 806 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED) 807 != DDI_SUCCESS) { 808 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 809 "ehci_register_intrs_and_init_mutex: " 810 "FIXED interrupt registration failed\n"); 811 812 return (DDI_FAILURE); 813 } 814 815 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 816 "ehci_register_intrs_and_init_mutex: " 817 "Using FIXED interrupt type\n"); 818 819 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED; 820 ehcip->ehci_flags |= EHCI_INTR; 821 } 822 823 skip_intr: 824 /* Create prototype for advance on async schedule */ 825 cv_init(&ehcip->ehci_async_schedule_advance_cv, 826 NULL, CV_DRIVER, NULL); 827 828 return (DDI_SUCCESS); 829 } 830 831 832 /* 833 * ehci_add_intrs: 834 * 835 * Register FIXED or MSI interrupts. 836 */ 837 static int 838 ehci_add_intrs(ehci_state_t *ehcip, 839 int intr_type) 840 { 841 int actual, avail, intr_size, count = 0; 842 int i, flag, ret; 843 844 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 845 "ehci_add_intrs: interrupt type 0x%x", intr_type); 846 847 /* Get number of interrupts */ 848 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count); 849 if ((ret != DDI_SUCCESS) || (count == 0)) { 850 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 851 "ehci_add_intrs: ddi_intr_get_nintrs() failure, " 852 "ret: %d, count: %d", ret, count); 853 854 return (DDI_FAILURE); 855 } 856 857 /* Get number of available interrupts */ 858 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail); 859 if ((ret != DDI_SUCCESS) || (avail == 0)) { 860 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 861 "ehci_add_intrs: ddi_intr_get_navail() failure, " 862 "ret: %d, count: %d", ret, count); 863 864 return (DDI_FAILURE); 865 } 866 867 if (avail < count) { 868 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 869 "ehci_add_intrs: ehci_add_intrs: nintrs () " 870 "returned %d, navail returned %d\n", count, avail); 871 } 872 873 /* Allocate an array of interrupt handles */ 874 intr_size = count * sizeof (ddi_intr_handle_t); 875 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP); 876 877 flag = (intr_type == DDI_INTR_TYPE_MSI) ? 878 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL; 879 880 /* call ddi_intr_alloc() */ 881 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable, 882 intr_type, 0, count, &actual, flag); 883 884 if ((ret != DDI_SUCCESS) || (actual == 0)) { 885 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 886 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret); 887 888 kmem_free(ehcip->ehci_htable, intr_size); 889 890 return (DDI_FAILURE); 891 } 892 893 if (actual < count) { 894 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 895 "ehci_add_intrs: Requested: %d, Received: %d\n", 896 count, actual); 897 898 for (i = 0; i < actual; i++) 899 (void) ddi_intr_free(ehcip->ehci_htable[i]); 900 901 kmem_free(ehcip->ehci_htable, intr_size); 902 903 return (DDI_FAILURE); 904 } 905 906 ehcip->ehci_intr_cnt = actual; 907 908 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0], 909 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) { 910 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 911 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret); 912 913 for (i = 0; i < actual; i++) 914 (void) ddi_intr_free(ehcip->ehci_htable[i]); 915 916 kmem_free(ehcip->ehci_htable, intr_size); 917 918 return (DDI_FAILURE); 919 } 920 921 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 922 "ehci_add_intrs: Supported Interrupt priority 0x%x", 923 ehcip->ehci_intr_pri); 924 925 /* Test for high level mutex */ 926 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) { 927 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 928 "ehci_add_intrs: Hi level interrupt not supported"); 929 930 for (i = 0; i < actual; i++) 931 (void) ddi_intr_free(ehcip->ehci_htable[i]); 932 933 kmem_free(ehcip->ehci_htable, intr_size); 934 935 return (DDI_FAILURE); 936 } 937 938 /* Initialize the mutex */ 939 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER, 940 DDI_INTR_PRI(ehcip->ehci_intr_pri)); 941 942 /* Call ddi_intr_add_handler() */ 943 for (i = 0; i < actual; i++) { 944 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i], 945 ehci_intr, (caddr_t)ehcip, 946 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 947 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 948 "ehci_add_intrs:ddi_intr_add_handler() " 949 "failed %d", ret); 950 951 for (i = 0; i < actual; i++) 952 (void) ddi_intr_free(ehcip->ehci_htable[i]); 953 954 mutex_destroy(&ehcip->ehci_int_mutex); 955 kmem_free(ehcip->ehci_htable, intr_size); 956 957 return (DDI_FAILURE); 958 } 959 } 960 961 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0], 962 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) { 963 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 964 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret); 965 966 for (i = 0; i < actual; i++) { 967 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]); 968 (void) ddi_intr_free(ehcip->ehci_htable[i]); 969 } 970 971 mutex_destroy(&ehcip->ehci_int_mutex); 972 kmem_free(ehcip->ehci_htable, intr_size); 973 974 return (DDI_FAILURE); 975 } 976 977 /* Enable all interrupts */ 978 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) { 979 /* Call ddi_intr_block_enable() for MSI interrupts */ 980 (void) ddi_intr_block_enable(ehcip->ehci_htable, 981 ehcip->ehci_intr_cnt); 982 } else { 983 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 984 for (i = 0; i < ehcip->ehci_intr_cnt; i++) 985 (void) ddi_intr_enable(ehcip->ehci_htable[i]); 986 } 987 988 return (DDI_SUCCESS); 989 } 990 991 992 /* 993 * ehci_init_hardware 994 * 995 * take control from BIOS, reset EHCI host controller, and check version, etc. 996 */ 997 int 998 ehci_init_hardware(ehci_state_t *ehcip) 999 { 1000 int revision; 1001 uint16_t cmd_reg; 1002 int abort_on_BIOS_take_over_failure; 1003 1004 /* Take control from the BIOS */ 1005 if (ehci_take_control(ehcip) != USB_SUCCESS) { 1006 1007 /* read .conf file properties */ 1008 abort_on_BIOS_take_over_failure = 1009 ddi_prop_get_int(DDI_DEV_T_ANY, 1010 ehcip->ehci_dip, DDI_PROP_DONTPASS, 1011 "abort-on-BIOS-take-over-failure", 0); 1012 1013 if (abort_on_BIOS_take_over_failure) { 1014 1015 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1016 "Unable to take control from BIOS."); 1017 1018 return (DDI_FAILURE); 1019 } 1020 1021 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1022 "Unable to take control from BIOS. Failure is ignored."); 1023 } 1024 1025 /* set Memory Master Enable */ 1026 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM); 1027 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME); 1028 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg); 1029 1030 /* Reset the EHCI host controller */ 1031 Set_OpReg(ehci_command, 1032 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET); 1033 1034 /* Wait 10ms for reset to complete */ 1035 drv_usecwait(EHCI_RESET_TIMEWAIT); 1036 1037 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED); 1038 1039 /* Verify the version number */ 1040 revision = Get_16Cap(ehci_version); 1041 1042 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1043 "ehci_init_hardware: Revision 0x%x", revision); 1044 1045 /* 1046 * EHCI driver supports EHCI host controllers compliant to 1047 * 0.95 and higher revisions of EHCI specifications. 1048 */ 1049 if (revision < EHCI_REVISION_0_95) { 1050 1051 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1052 "Revision 0x%x is not supported", revision); 1053 1054 return (DDI_FAILURE); 1055 } 1056 1057 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) { 1058 1059 /* Initialize the Frame list base address area */ 1060 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) { 1061 1062 return (DDI_FAILURE); 1063 } 1064 1065 /* 1066 * For performance reasons, do not insert anything into the 1067 * asynchronous list or activate the asynch list schedule until 1068 * there is a valid QH. 1069 */ 1070 ehcip->ehci_head_of_async_sched_list = NULL; 1071 1072 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) && 1073 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) { 1074 /* 1075 * The driver is unable to reliably stop the asynch 1076 * list schedule on VIA VT6202 controllers, so we 1077 * always keep a dummy QH on the list. 1078 */ 1079 ehci_qh_t *dummy_async_qh = 1080 ehci_alloc_qh(ehcip, NULL, NULL); 1081 1082 Set_QH(dummy_async_qh->qh_link_ptr, 1083 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) & 1084 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH)); 1085 1086 /* Set this QH to be the "head" of the circular list */ 1087 Set_QH(dummy_async_qh->qh_ctrl, 1088 Get_QH(dummy_async_qh->qh_ctrl) | 1089 EHCI_QH_CTRL_RECLAIM_HEAD); 1090 1091 Set_QH(dummy_async_qh->qh_next_qtd, 1092 EHCI_QH_NEXT_QTD_PTR_VALID); 1093 Set_QH(dummy_async_qh->qh_alt_next_qtd, 1094 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 1095 1096 ehcip->ehci_head_of_async_sched_list = dummy_async_qh; 1097 ehcip->ehci_open_async_count++; 1098 } 1099 } 1100 1101 return (DDI_SUCCESS); 1102 } 1103 1104 1105 /* 1106 * ehci_init_workaround 1107 * 1108 * some workarounds during initializing ehci 1109 */ 1110 int 1111 ehci_init_workaround(ehci_state_t *ehcip) 1112 { 1113 /* 1114 * Acer Labs Inc. M5273 EHCI controller does not send 1115 * interrupts unless the Root hub ports are routed to the EHCI 1116 * host controller; so route the ports now, before we test for 1117 * the presence of SOFs interrupts. 1118 */ 1119 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) { 1120 /* Route all Root hub ports to EHCI host controller */ 1121 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI); 1122 } 1123 1124 /* 1125 * VIA chips have some issues and may not work reliably. 1126 * Revisions >= 0x80 are part of a southbridge and appear 1127 * to be reliable with the workaround. 1128 * For revisions < 0x80, if we were bound using class 1129 * complain, else proceed. This will allow the user to 1130 * bind ehci specifically to this chip and not have the 1131 * warnings 1132 */ 1133 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) { 1134 1135 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) { 1136 1137 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1138 "ehci_init_workaround: Applying VIA workarounds " 1139 "for the 6212 chip."); 1140 1141 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name, 1142 "pciclass,0c0320") == 0) { 1143 1144 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1145 "Due to recently discovered incompatibilities"); 1146 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1147 "with this USB controller, USB2.x transfer"); 1148 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1149 "support has been disabled. This device will"); 1150 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1151 "continue to function as a USB1.x controller."); 1152 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1153 "If you are interested in enabling USB2.x"); 1154 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1155 "support please, refer to the ehci(7D) man page."); 1156 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1157 "Please also refer to www.sun.com/io for"); 1158 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1159 "Solaris Ready products and to"); 1160 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1161 "www.sun.com/bigadmin/hcl for additional"); 1162 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1163 "compatible USB products."); 1164 1165 return (DDI_FAILURE); 1166 1167 } else if (ehci_vt62x2_workaround) { 1168 1169 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1170 "Applying VIA workarounds"); 1171 } 1172 } 1173 1174 return (DDI_SUCCESS); 1175 } 1176 1177 1178 /* 1179 * ehci_init_check_status 1180 * 1181 * Check if EHCI host controller is running 1182 */ 1183 int 1184 ehci_init_check_status(ehci_state_t *ehcip) 1185 { 1186 clock_t sof_time_wait; 1187 1188 /* 1189 * Get the number of clock ticks to wait. 1190 * This is based on the maximum time it takes for a frame list rollover 1191 * and maximum time wait for SOFs to begin. 1192 */ 1193 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) + 1194 EHCI_SOF_TIMEWAIT); 1195 1196 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */ 1197 ehcip->ehci_flags |= EHCI_CV_INTR; 1198 1199 /* We need to add a delay to allow the chip time to start running */ 1200 (void) cv_timedwait(&ehcip->ehci_async_schedule_advance_cv, 1201 &ehcip->ehci_int_mutex, ddi_get_lbolt() + sof_time_wait); 1202 1203 /* 1204 * Check EHCI host controller is running, otherwise return failure. 1205 */ 1206 if ((ehcip->ehci_flags & EHCI_CV_INTR) || 1207 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) { 1208 1209 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1210 "No SOF interrupts have been received, this USB EHCI host" 1211 "controller is unusable"); 1212 1213 /* 1214 * Route all Root hub ports to Classic host 1215 * controller, in case this is an unusable ALI M5273 1216 * EHCI controller. 1217 */ 1218 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) { 1219 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC); 1220 } 1221 1222 return (DDI_FAILURE); 1223 } 1224 1225 return (DDI_SUCCESS); 1226 } 1227 1228 1229 /* 1230 * ehci_init_ctlr: 1231 * 1232 * Initialize the Host Controller (HC). 1233 */ 1234 int 1235 ehci_init_ctlr(ehci_state_t *ehcip, 1236 int init_type) 1237 { 1238 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:"); 1239 1240 if (init_type == EHCI_NORMAL_INITIALIZATION) { 1241 1242 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) { 1243 1244 return (DDI_FAILURE); 1245 } 1246 } 1247 1248 /* 1249 * Check for Asynchronous schedule park capability feature. If this 1250 * feature is supported, then, program ehci command register with 1251 * appropriate values.. 1252 */ 1253 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) { 1254 1255 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1256 "ehci_init_ctlr: Async park mode is supported"); 1257 1258 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) | 1259 (EHCI_CMD_ASYNC_PARK_ENABLE | 1260 EHCI_CMD_ASYNC_PARK_COUNT_3))); 1261 } 1262 1263 /* 1264 * Check for programmable periodic frame list feature. If this 1265 * feature is supported, then, program ehci command register with 1266 * 1024 frame list value. 1267 */ 1268 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) { 1269 1270 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1271 "ehci_init_ctlr: Variable programmable periodic " 1272 "frame list is supported"); 1273 1274 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) | 1275 EHCI_CMD_FRAME_1024_SIZE)); 1276 } 1277 1278 /* 1279 * Currently EHCI driver doesn't support 64 bit addressing. 1280 * 1281 * If we are using 64 bit addressing capability, then, program 1282 * ehci_ctrl_segment register with 4 Gigabyte segment where all 1283 * of the interface data structures are allocated. 1284 */ 1285 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) { 1286 1287 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1288 "ehci_init_ctlr: EHCI driver doesn't support " 1289 "64 bit addressing"); 1290 } 1291 1292 /* 64 bit addressing is not support */ 1293 Set_OpReg(ehci_ctrl_segment, 0x00000000); 1294 1295 /* Turn on/off the schedulers */ 1296 ehci_toggle_scheduler(ehcip); 1297 1298 /* Set host controller soft state to operational */ 1299 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE; 1300 1301 /* 1302 * Set the Periodic Frame List Base Address register with the 1303 * starting physical address of the Periodic Frame List. 1304 */ 1305 Set_OpReg(ehci_periodic_list_base, 1306 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 1307 EHCI_PERIODIC_LIST_BASE)); 1308 1309 /* 1310 * Set ehci_interrupt to enable all interrupts except Root 1311 * Hub Status change interrupt. 1312 */ 1313 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR | 1314 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR | 1315 EHCI_INTR_USB); 1316 1317 /* 1318 * Set the desired interrupt threshold and turn on EHCI host controller. 1319 */ 1320 Set_OpReg(ehci_command, 1321 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) | 1322 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN))); 1323 1324 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN); 1325 1326 if (init_type == EHCI_NORMAL_INITIALIZATION) { 1327 1328 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) { 1329 1330 /* Set host controller soft state to error */ 1331 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 1332 1333 return (DDI_FAILURE); 1334 } 1335 1336 if (ehci_init_check_status(ehcip) != DDI_SUCCESS) { 1337 1338 /* Set host controller soft state to error */ 1339 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 1340 1341 return (DDI_FAILURE); 1342 } 1343 1344 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1345 "ehci_init_ctlr: SOF's have started"); 1346 } 1347 1348 /* Route all Root hub ports to EHCI host controller */ 1349 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI); 1350 1351 return (DDI_SUCCESS); 1352 } 1353 1354 /* 1355 * ehci_take_control: 1356 * 1357 * Handshake to take EHCI control from BIOS if necessary. Its only valid for 1358 * x86 machines, because sparc doesn't have a BIOS. 1359 * On x86 machine, the take control process includes 1360 * o get the base address of the extended capability list 1361 * o find out the capability for handoff synchronization in the list. 1362 * o check if BIOS has owned the host controller. 1363 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership. 1364 * o wait for a constant time and check if BIOS has relinquished control. 1365 */ 1366 /* ARGSUSED */ 1367 static int 1368 ehci_take_control(ehci_state_t *ehcip) 1369 { 1370 #if defined(__x86) 1371 uint32_t extended_cap; 1372 uint32_t extended_cap_offset; 1373 uint32_t extended_cap_id; 1374 uint_t retry; 1375 1376 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1377 "ehci_take_control:"); 1378 1379 /* 1380 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS 1381 * register. 1382 */ 1383 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >> 1384 EHCI_HCC_EECP_SHIFT; 1385 1386 /* 1387 * According EHCI Spec 2.2.4, if the extended capability offset is 1388 * less than 40h then its not valid. This means we don't need to 1389 * worry about BIOS handoff. 1390 */ 1391 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) { 1392 1393 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1394 "ehci_take_control: Hardware doesn't support legacy."); 1395 1396 goto success; 1397 } 1398 1399 /* 1400 * According EHCI Spec 2.1.7, A zero offset indicates the 1401 * end of the extended capability list. 1402 */ 1403 while (extended_cap_offset) { 1404 1405 /* Get the extended capability value. */ 1406 extended_cap = pci_config_get32(ehcip->ehci_config_handle, 1407 extended_cap_offset); 1408 1409 /* Get the capability ID */ 1410 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >> 1411 EHCI_EX_CAP_ID_SHIFT; 1412 1413 /* Check if the card support legacy */ 1414 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) { 1415 break; 1416 } 1417 1418 /* Get the offset of the next capability */ 1419 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >> 1420 EHCI_EX_CAP_NEXT_PTR_SHIFT; 1421 } 1422 1423 /* 1424 * Unable to find legacy support in hardware's extended capability list. 1425 * This means we don't need to worry about BIOS handoff. 1426 */ 1427 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) { 1428 1429 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1430 "ehci_take_control: Hardware doesn't support legacy"); 1431 1432 goto success; 1433 } 1434 1435 /* Check if BIOS has owned it. */ 1436 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) { 1437 1438 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1439 "ehci_take_control: BIOS does not own EHCI"); 1440 1441 goto success; 1442 } 1443 1444 /* 1445 * According EHCI Spec 5.1, The OS driver initiates an ownership 1446 * request by setting the OS Owned semaphore to a one. The OS 1447 * waits for the BIOS Owned bit to go to a zero before attempting 1448 * to use the EHCI controller. The time that OS must wait for BIOS 1449 * to respond to the request for ownership is beyond the scope of 1450 * this specification. 1451 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms 1452 * for BIOS to release the ownership. 1453 */ 1454 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM; 1455 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset, 1456 extended_cap); 1457 1458 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) { 1459 1460 /* wait a special interval */ 1461 #ifndef __lock_lint 1462 delay(drv_usectohz(EHCI_TAKEOVER_DELAY)); 1463 #endif 1464 /* Check to see if the BIOS has released the ownership */ 1465 extended_cap = pci_config_get32( 1466 ehcip->ehci_config_handle, extended_cap_offset); 1467 1468 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) { 1469 1470 USB_DPRINTF_L3(PRINT_MASK_ATTA, 1471 ehcip->ehci_log_hdl, 1472 "ehci_take_control: BIOS has released " 1473 "the ownership. retry = %d", retry); 1474 1475 goto success; 1476 } 1477 1478 } 1479 1480 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1481 "ehci_take_control: take control from BIOS failed."); 1482 1483 return (USB_FAILURE); 1484 1485 success: 1486 1487 #endif /* __x86 */ 1488 return (USB_SUCCESS); 1489 } 1490 1491 1492 /* 1493 * ehci_init_periodic_frame_list_table : 1494 * 1495 * Allocate the system memory and initialize Host Controller 1496 * Periodic Frame List table area. The starting of the Periodic 1497 * Frame List Table area must be 4096 byte aligned. 1498 */ 1499 static int 1500 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip) 1501 { 1502 ddi_device_acc_attr_t dev_attr; 1503 size_t real_length; 1504 uint_t ccount; 1505 int result; 1506 1507 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1508 1509 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1510 "ehci_init_periodic_frame_lst_table:"); 1511 1512 /* The host controller will be little endian */ 1513 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1514 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1515 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1516 1517 /* Force the required 4K restrictive alignment */ 1518 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT; 1519 1520 /* Create space for the Periodic Frame List */ 1521 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr, 1522 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) { 1523 1524 goto failure; 1525 } 1526 1527 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle, 1528 sizeof (ehci_periodic_frame_list_t), 1529 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 1530 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep, 1531 &real_length, &ehcip->ehci_pflt_mem_handle)) { 1532 1533 goto failure; 1534 } 1535 1536 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1537 "ehci_init_periodic_frame_lst_table: " 1538 "Real length %lu", real_length); 1539 1540 /* Map the whole Periodic Frame List into the I/O address space */ 1541 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle, 1542 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep, 1543 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1544 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount); 1545 1546 if (result == DDI_DMA_MAPPED) { 1547 /* The cookie count should be 1 */ 1548 if (ccount != 1) { 1549 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1550 "ehci_init_periodic_frame_lst_table: " 1551 "More than 1 cookie"); 1552 1553 goto failure; 1554 } 1555 } else { 1556 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 1557 1558 goto failure; 1559 } 1560 1561 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1562 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x", 1563 (void *)ehcip->ehci_periodic_frame_list_tablep, 1564 ehcip->ehci_pflt_cookie.dmac_address); 1565 1566 /* 1567 * DMA addresses for Periodic Frame List are bound. 1568 */ 1569 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND; 1570 1571 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length); 1572 1573 /* Initialize the Periodic Frame List */ 1574 ehci_build_interrupt_lattice(ehcip); 1575 1576 /* Reset Byte Alignment to Default */ 1577 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 1578 1579 return (DDI_SUCCESS); 1580 failure: 1581 /* Byte alignment */ 1582 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 1583 1584 return (DDI_FAILURE); 1585 } 1586 1587 1588 /* 1589 * ehci_build_interrupt_lattice: 1590 * 1591 * Construct the interrupt lattice tree using static Endpoint Descriptors 1592 * (QH). This interrupt lattice tree will have total of 32 interrupt QH 1593 * lists and the Host Controller (HC) processes one interrupt QH list in 1594 * every frame. The Host Controller traverses the periodic schedule by 1595 * constructing an array offset reference from the Periodic List Base Address 1596 * register and bits 12 to 3 of Frame Index register. It fetches the element 1597 * and begins traversing the graph of linked schedule data structures. 1598 */ 1599 static void 1600 ehci_build_interrupt_lattice(ehci_state_t *ehcip) 1601 { 1602 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr; 1603 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS]; 1604 ehci_periodic_frame_list_t *periodic_frame_list = 1605 ehcip->ehci_periodic_frame_list_tablep; 1606 ushort_t *temp, num_of_nodes; 1607 uintptr_t addr; 1608 int i, j, k; 1609 1610 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1611 "ehci_build_interrupt_lattice:"); 1612 1613 /* 1614 * Reserve the first 63 Endpoint Descriptor (QH) structures 1615 * in the pool as static endpoints & these are required for 1616 * constructing interrupt lattice tree. 1617 */ 1618 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) { 1619 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC); 1620 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED); 1621 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID); 1622 Set_QH(list_array[i].qh_alt_next_qtd, 1623 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 1624 } 1625 1626 /* 1627 * Make sure that last Endpoint on the periodic frame list terminates 1628 * periodic schedule. 1629 */ 1630 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID); 1631 1632 /* Build the interrupt lattice tree */ 1633 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) { 1634 /* 1635 * The next pointer in the host controller endpoint 1636 * descriptor must contain an iommu address. Calculate 1637 * the offset into the cpu address and add this to the 1638 * starting iommu address. 1639 */ 1640 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]); 1641 1642 Set_QH(list_array[2*i + 1].qh_link_ptr, 1643 addr | EHCI_QH_LINK_REF_QH); 1644 Set_QH(list_array[2*i + 2].qh_link_ptr, 1645 addr | EHCI_QH_LINK_REF_QH); 1646 } 1647 1648 /* Build the tree bottom */ 1649 temp = (unsigned short *) 1650 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP); 1651 1652 num_of_nodes = 1; 1653 1654 /* 1655 * Initialize the values which are used for setting up head pointers 1656 * for the 32ms scheduling lists which starts from the Periodic Frame 1657 * List. 1658 */ 1659 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) { 1660 for (j = 0, k = 0; k < num_of_nodes; k++, j++) { 1661 ehci_index[j++] = temp[k]; 1662 ehci_index[j] = temp[k] + ehci_pow_2(i); 1663 } 1664 1665 num_of_nodes *= 2; 1666 for (k = 0; k < num_of_nodes; k++) 1667 temp[k] = ehci_index[k]; 1668 } 1669 1670 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2)); 1671 1672 /* 1673 * Initialize the interrupt list in the Periodic Frame List Table 1674 * so that it points to the bottom of the tree. 1675 */ 1676 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) { 1677 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *) 1678 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1])); 1679 1680 ASSERT(addr); 1681 1682 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) { 1683 Set_PFLT(periodic_frame_list-> 1684 ehci_periodic_frame_list_table[ehci_index[j++]], 1685 (uint32_t)(addr | EHCI_QH_LINK_REF_QH)); 1686 } 1687 } 1688 } 1689 1690 1691 /* 1692 * ehci_alloc_hcdi_ops: 1693 * 1694 * The HCDI interfaces or entry points are the software interfaces used by 1695 * the Universal Serial Bus Driver (USBA) to access the services of the 1696 * Host Controller Driver (HCD). During HCD initialization, inform USBA 1697 * about all available HCDI interfaces or entry points. 1698 */ 1699 usba_hcdi_ops_t * 1700 ehci_alloc_hcdi_ops(ehci_state_t *ehcip) 1701 { 1702 usba_hcdi_ops_t *usba_hcdi_ops; 1703 1704 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1705 "ehci_alloc_hcdi_ops:"); 1706 1707 usba_hcdi_ops = usba_alloc_hcdi_ops(); 1708 1709 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION; 1710 1711 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support; 1712 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open; 1713 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close; 1714 1715 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset; 1716 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle = 1717 ehci_hcdi_pipe_reset_data_toggle; 1718 1719 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer; 1720 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer; 1721 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer; 1722 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer; 1723 1724 usba_hcdi_ops->usba_hcdi_bulk_transfer_size = 1725 ehci_hcdi_bulk_transfer_size; 1726 1727 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 1728 ehci_hcdi_pipe_stop_intr_polling; 1729 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 1730 ehci_hcdi_pipe_stop_isoc_polling; 1731 1732 usba_hcdi_ops->usba_hcdi_get_current_frame_number = 1733 ehci_hcdi_get_current_frame_number; 1734 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts = 1735 ehci_hcdi_get_max_isoc_pkts; 1736 1737 usba_hcdi_ops->usba_hcdi_console_input_init = 1738 ehci_hcdi_polled_input_init; 1739 usba_hcdi_ops->usba_hcdi_console_input_enter = 1740 ehci_hcdi_polled_input_enter; 1741 usba_hcdi_ops->usba_hcdi_console_read = 1742 ehci_hcdi_polled_read; 1743 usba_hcdi_ops->usba_hcdi_console_input_exit = 1744 ehci_hcdi_polled_input_exit; 1745 usba_hcdi_ops->usba_hcdi_console_input_fini = 1746 ehci_hcdi_polled_input_fini; 1747 1748 usba_hcdi_ops->usba_hcdi_console_output_init = 1749 ehci_hcdi_polled_output_init; 1750 usba_hcdi_ops->usba_hcdi_console_output_enter = 1751 ehci_hcdi_polled_output_enter; 1752 usba_hcdi_ops->usba_hcdi_console_write = 1753 ehci_hcdi_polled_write; 1754 usba_hcdi_ops->usba_hcdi_console_output_exit = 1755 ehci_hcdi_polled_output_exit; 1756 usba_hcdi_ops->usba_hcdi_console_output_fini = 1757 ehci_hcdi_polled_output_fini; 1758 return (usba_hcdi_ops); 1759 } 1760 1761 1762 /* 1763 * Host Controller Driver (HCD) deinitialization functions 1764 */ 1765 1766 /* 1767 * ehci_cleanup: 1768 * 1769 * Cleanup on attach failure or detach 1770 */ 1771 int 1772 ehci_cleanup(ehci_state_t *ehcip) 1773 { 1774 ehci_trans_wrapper_t *tw; 1775 ehci_pipe_private_t *pp; 1776 ehci_qtd_t *qtd; 1777 int i, ctrl, rval; 1778 int flags = ehcip->ehci_flags; 1779 1780 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:"); 1781 1782 if (flags & EHCI_RHREG) { 1783 /* Unload the root hub driver */ 1784 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) { 1785 1786 return (DDI_FAILURE); 1787 } 1788 } 1789 1790 if (flags & EHCI_USBAREG) { 1791 /* Unregister this HCD instance with USBA */ 1792 usba_hcdi_unregister(ehcip->ehci_dip); 1793 } 1794 1795 if (flags & EHCI_INTR) { 1796 1797 mutex_enter(&ehcip->ehci_int_mutex); 1798 1799 /* Disable all EHCI QH list processing */ 1800 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) & 1801 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | 1802 EHCI_CMD_PERIODIC_SCHED_ENABLE))); 1803 1804 /* Disable all EHCI interrupts */ 1805 Set_OpReg(ehci_interrupt, 0); 1806 1807 /* wait for the next SOF */ 1808 (void) ehci_wait_for_sof(ehcip); 1809 1810 /* Route all Root hub ports to Classic host controller */ 1811 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC); 1812 1813 /* Stop the EHCI host controller */ 1814 Set_OpReg(ehci_command, 1815 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN); 1816 1817 mutex_exit(&ehcip->ehci_int_mutex); 1818 1819 /* Wait for sometime */ 1820 delay(drv_usectohz(EHCI_TIMEWAIT)); 1821 1822 ehci_rem_intrs(ehcip); 1823 } 1824 1825 /* Unmap the EHCI registers */ 1826 if (ehcip->ehci_caps_handle) { 1827 ddi_regs_map_free(&ehcip->ehci_caps_handle); 1828 } 1829 1830 if (ehcip->ehci_config_handle) { 1831 pci_config_teardown(&ehcip->ehci_config_handle); 1832 } 1833 1834 /* Free all the buffers */ 1835 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) { 1836 for (i = 0; i < ehci_qtd_pool_size; i ++) { 1837 qtd = &ehcip->ehci_qtd_pool_addr[i]; 1838 ctrl = Get_QTD(ehcip-> 1839 ehci_qtd_pool_addr[i].qtd_state); 1840 1841 if ((ctrl != EHCI_QTD_FREE) && 1842 (ctrl != EHCI_QTD_DUMMY) && 1843 (qtd->qtd_trans_wrapper)) { 1844 1845 mutex_enter(&ehcip->ehci_int_mutex); 1846 1847 tw = (ehci_trans_wrapper_t *) 1848 EHCI_LOOKUP_ID((uint32_t) 1849 Get_QTD(qtd->qtd_trans_wrapper)); 1850 1851 /* Obtain the pipe private structure */ 1852 pp = tw->tw_pipe_private; 1853 1854 /* Stop the the transfer timer */ 1855 ehci_stop_xfer_timer(ehcip, tw, 1856 EHCI_REMOVE_XFER_ALWAYS); 1857 1858 ehci_deallocate_tw(ehcip, pp, tw); 1859 1860 mutex_exit(&ehcip->ehci_int_mutex); 1861 } 1862 } 1863 1864 /* 1865 * If EHCI_QTD_POOL_BOUND flag is set, then unbind 1866 * the handle for QTD pools. 1867 */ 1868 if ((ehcip->ehci_dma_addr_bind_flag & 1869 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) { 1870 1871 rval = ddi_dma_unbind_handle( 1872 ehcip->ehci_qtd_pool_dma_handle); 1873 1874 ASSERT(rval == DDI_SUCCESS); 1875 } 1876 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle); 1877 } 1878 1879 /* Free the QTD pool */ 1880 if (ehcip->ehci_qtd_pool_dma_handle) { 1881 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle); 1882 } 1883 1884 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) { 1885 /* 1886 * If EHCI_QH_POOL_BOUND flag is set, then unbind 1887 * the handle for QH pools. 1888 */ 1889 if ((ehcip->ehci_dma_addr_bind_flag & 1890 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) { 1891 1892 rval = ddi_dma_unbind_handle( 1893 ehcip->ehci_qh_pool_dma_handle); 1894 1895 ASSERT(rval == DDI_SUCCESS); 1896 } 1897 1898 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle); 1899 } 1900 1901 /* Free the QH pool */ 1902 if (ehcip->ehci_qh_pool_dma_handle) { 1903 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle); 1904 } 1905 1906 /* Free the Periodic frame list table (PFLT) area */ 1907 if (ehcip->ehci_periodic_frame_list_tablep && 1908 ehcip->ehci_pflt_mem_handle) { 1909 /* 1910 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind 1911 * the handle for PFLT. 1912 */ 1913 if ((ehcip->ehci_dma_addr_bind_flag & 1914 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) { 1915 1916 rval = ddi_dma_unbind_handle( 1917 ehcip->ehci_pflt_dma_handle); 1918 1919 ASSERT(rval == DDI_SUCCESS); 1920 } 1921 1922 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle); 1923 } 1924 1925 (void) ehci_isoc_cleanup(ehcip); 1926 1927 if (ehcip->ehci_pflt_dma_handle) { 1928 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle); 1929 } 1930 1931 if (flags & EHCI_INTR) { 1932 /* Destroy the mutex */ 1933 mutex_destroy(&ehcip->ehci_int_mutex); 1934 1935 /* Destroy the async schedule advance condition variable */ 1936 cv_destroy(&ehcip->ehci_async_schedule_advance_cv); 1937 } 1938 1939 /* clean up kstat structs */ 1940 ehci_destroy_stats(ehcip); 1941 1942 /* Free ehci hcdi ops */ 1943 if (ehcip->ehci_hcdi_ops) { 1944 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops); 1945 } 1946 1947 if (flags & EHCI_ZALLOC) { 1948 1949 usb_free_log_hdl(ehcip->ehci_log_hdl); 1950 1951 /* Remove all properties that might have been created */ 1952 ddi_prop_remove_all(ehcip->ehci_dip); 1953 1954 /* Free the soft state */ 1955 ddi_soft_state_free(ehci_statep, 1956 ddi_get_instance(ehcip->ehci_dip)); 1957 } 1958 1959 return (DDI_SUCCESS); 1960 } 1961 1962 1963 /* 1964 * ehci_rem_intrs: 1965 * 1966 * Unregister FIXED or MSI interrupts 1967 */ 1968 static void 1969 ehci_rem_intrs(ehci_state_t *ehcip) 1970 { 1971 int i; 1972 1973 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1974 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type); 1975 1976 /* Disable all interrupts */ 1977 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) { 1978 (void) ddi_intr_block_disable(ehcip->ehci_htable, 1979 ehcip->ehci_intr_cnt); 1980 } else { 1981 for (i = 0; i < ehcip->ehci_intr_cnt; i++) { 1982 (void) ddi_intr_disable(ehcip->ehci_htable[i]); 1983 } 1984 } 1985 1986 /* Call ddi_intr_remove_handler() */ 1987 for (i = 0; i < ehcip->ehci_intr_cnt; i++) { 1988 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]); 1989 (void) ddi_intr_free(ehcip->ehci_htable[i]); 1990 } 1991 1992 kmem_free(ehcip->ehci_htable, 1993 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t)); 1994 } 1995 1996 1997 /* 1998 * ehci_cpr_suspend 1999 */ 2000 int 2001 ehci_cpr_suspend(ehci_state_t *ehcip) 2002 { 2003 int i; 2004 2005 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2006 "ehci_cpr_suspend:"); 2007 2008 /* Call into the root hub and suspend it */ 2009 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) { 2010 2011 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2012 "ehci_cpr_suspend: root hub fails to suspend"); 2013 2014 return (DDI_FAILURE); 2015 } 2016 2017 /* Only root hub's intr pipe should be open at this time */ 2018 mutex_enter(&ehcip->ehci_int_mutex); 2019 2020 ASSERT(ehcip->ehci_open_pipe_count == 0); 2021 2022 /* Just wait till all resources are reclaimed */ 2023 i = 0; 2024 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) { 2025 ehci_handle_endpoint_reclaimation(ehcip); 2026 (void) ehci_wait_for_sof(ehcip); 2027 } 2028 ASSERT(ehcip->ehci_reclaim_list == NULL); 2029 2030 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2031 "ehci_cpr_suspend: Disable HC QH list processing"); 2032 2033 /* Disable all EHCI QH list processing */ 2034 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) & 2035 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE))); 2036 2037 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2038 "ehci_cpr_suspend: Disable HC interrupts"); 2039 2040 /* Disable all EHCI interrupts */ 2041 Set_OpReg(ehci_interrupt, 0); 2042 2043 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2044 "ehci_cpr_suspend: Wait for the next SOF"); 2045 2046 /* Wait for the next SOF */ 2047 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) { 2048 2049 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2050 "ehci_cpr_suspend: ehci host controller suspend failed"); 2051 2052 mutex_exit(&ehcip->ehci_int_mutex); 2053 return (DDI_FAILURE); 2054 } 2055 2056 /* 2057 * Stop the ehci host controller 2058 * if usb keyboard is not connected. 2059 */ 2060 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) { 2061 Set_OpReg(ehci_command, 2062 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN); 2063 } 2064 2065 /* Set host controller soft state to suspend */ 2066 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE; 2067 2068 mutex_exit(&ehcip->ehci_int_mutex); 2069 2070 return (DDI_SUCCESS); 2071 } 2072 2073 2074 /* 2075 * ehci_cpr_resume 2076 */ 2077 int 2078 ehci_cpr_resume(ehci_state_t *ehcip) 2079 { 2080 mutex_enter(&ehcip->ehci_int_mutex); 2081 2082 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2083 "ehci_cpr_resume: Restart the controller"); 2084 2085 /* Cleanup ehci specific information across cpr */ 2086 ehci_cpr_cleanup(ehcip); 2087 2088 /* Restart the controller */ 2089 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) { 2090 2091 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2092 "ehci_cpr_resume: ehci host controller resume failed "); 2093 2094 mutex_exit(&ehcip->ehci_int_mutex); 2095 2096 return (DDI_FAILURE); 2097 } 2098 2099 mutex_exit(&ehcip->ehci_int_mutex); 2100 2101 /* Now resume the root hub */ 2102 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) { 2103 2104 return (DDI_FAILURE); 2105 } 2106 2107 return (DDI_SUCCESS); 2108 } 2109 2110 2111 /* 2112 * Bandwidth Allocation functions 2113 */ 2114 2115 /* 2116 * ehci_allocate_bandwidth: 2117 * 2118 * Figure out whether or not this interval may be supported. Return the index 2119 * into the lattice if it can be supported. Return allocation failure if it 2120 * can not be supported. 2121 */ 2122 int 2123 ehci_allocate_bandwidth( 2124 ehci_state_t *ehcip, 2125 usba_pipe_handle_data_t *ph, 2126 uint_t *pnode, 2127 uchar_t *smask, 2128 uchar_t *cmask) 2129 { 2130 int error = USB_SUCCESS; 2131 2132 /* This routine is protected by the ehci_int_mutex */ 2133 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2134 2135 /* Reset the pnode to the last checked pnode */ 2136 *pnode = 0; 2137 2138 /* Allocate high speed bandwidth */ 2139 if ((error = ehci_allocate_high_speed_bandwidth(ehcip, 2140 ph, pnode, smask, cmask)) != USB_SUCCESS) { 2141 2142 return (error); 2143 } 2144 2145 /* 2146 * For low/full speed usb devices, allocate classic TT bandwidth 2147 * in additional to high speed bandwidth. 2148 */ 2149 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) { 2150 2151 /* Allocate classic TT bandwidth */ 2152 if ((error = ehci_allocate_classic_tt_bandwidth( 2153 ehcip, ph, *pnode)) != USB_SUCCESS) { 2154 2155 /* Deallocate high speed bandwidth */ 2156 ehci_deallocate_high_speed_bandwidth( 2157 ehcip, ph, *pnode, *smask, *cmask); 2158 } 2159 } 2160 2161 return (error); 2162 } 2163 2164 2165 /* 2166 * ehci_allocate_high_speed_bandwidth: 2167 * 2168 * Allocate high speed bandwidth for the low/full/high speed interrupt and 2169 * isochronous endpoints. 2170 */ 2171 static int 2172 ehci_allocate_high_speed_bandwidth( 2173 ehci_state_t *ehcip, 2174 usba_pipe_handle_data_t *ph, 2175 uint_t *pnode, 2176 uchar_t *smask, 2177 uchar_t *cmask) 2178 { 2179 uint_t sbandwidth, cbandwidth; 2180 int interval; 2181 usb_ep_descr_t *endpoint = &ph->p_ep; 2182 usba_device_t *child_ud; 2183 usb_port_status_t port_status; 2184 int error; 2185 2186 /* This routine is protected by the ehci_int_mutex */ 2187 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2188 2189 /* Get child's usba device structure */ 2190 child_ud = ph->p_usba_device; 2191 2192 mutex_enter(&child_ud->usb_mutex); 2193 2194 /* Get the current usb device's port status */ 2195 port_status = ph->p_usba_device->usb_port_status; 2196 2197 mutex_exit(&child_ud->usb_mutex); 2198 2199 /* 2200 * Calculate the length in bytes of a transaction on this 2201 * periodic endpoint. Return failure if maximum packet is 2202 * zero. 2203 */ 2204 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint, 2205 port_status, &sbandwidth, &cbandwidth); 2206 if (error != USB_SUCCESS) { 2207 2208 return (error); 2209 } 2210 2211 /* 2212 * Adjust polling interval to be a power of 2. 2213 * If this interval can't be supported, return 2214 * allocation failure. 2215 */ 2216 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2217 if (interval == USB_FAILURE) { 2218 2219 return (USB_FAILURE); 2220 } 2221 2222 if (port_status == USBA_HIGH_SPEED_DEV) { 2223 /* Allocate bandwidth for high speed devices */ 2224 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2225 USB_EP_ATTR_ISOCH) { 2226 error = USB_SUCCESS; 2227 } else { 2228 2229 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode, 2230 endpoint, sbandwidth, interval); 2231 } 2232 2233 *cmask = 0x00; 2234 2235 } else { 2236 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2237 USB_EP_ATTR_INTR) { 2238 2239 /* Allocate bandwidth for low speed interrupt */ 2240 error = ehci_find_bestfit_ls_intr_mask(ehcip, 2241 smask, cmask, pnode, sbandwidth, cbandwidth, 2242 interval); 2243 } else { 2244 if ((endpoint->bEndpointAddress & 2245 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2246 2247 /* Allocate bandwidth for sitd in */ 2248 error = ehci_find_bestfit_sitd_in_mask(ehcip, 2249 smask, cmask, pnode, sbandwidth, cbandwidth, 2250 interval); 2251 } else { 2252 2253 /* Allocate bandwidth for sitd out */ 2254 error = ehci_find_bestfit_sitd_out_mask(ehcip, 2255 smask, pnode, sbandwidth, interval); 2256 *cmask = 0x00; 2257 } 2258 } 2259 } 2260 2261 if (error != USB_SUCCESS) { 2262 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2263 "ehci_allocate_high_speed_bandwidth: Reached maximum " 2264 "bandwidth value and cannot allocate bandwidth for a " 2265 "given high-speed periodic endpoint"); 2266 2267 return (USB_NO_BANDWIDTH); 2268 } 2269 2270 return (error); 2271 } 2272 2273 2274 /* 2275 * ehci_allocate_classic_tt_speed_bandwidth: 2276 * 2277 * Allocate classic TT bandwidth for the low/full speed interrupt and 2278 * isochronous endpoints. 2279 */ 2280 static int 2281 ehci_allocate_classic_tt_bandwidth( 2282 ehci_state_t *ehcip, 2283 usba_pipe_handle_data_t *ph, 2284 uint_t pnode) 2285 { 2286 uint_t bandwidth, min; 2287 uint_t height, leftmost, list; 2288 usb_ep_descr_t *endpoint = &ph->p_ep; 2289 usba_device_t *child_ud, *parent_ud; 2290 usb_port_status_t port_status; 2291 int i, interval; 2292 2293 /* This routine is protected by the ehci_int_mutex */ 2294 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2295 2296 /* Get child's usba device structure */ 2297 child_ud = ph->p_usba_device; 2298 2299 mutex_enter(&child_ud->usb_mutex); 2300 2301 /* Get the current usb device's port status */ 2302 port_status = child_ud->usb_port_status; 2303 2304 /* Get the parent high speed hub's usba device structure */ 2305 parent_ud = child_ud->usb_hs_hub_usba_dev; 2306 2307 mutex_exit(&child_ud->usb_mutex); 2308 2309 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2310 "ehci_allocate_classic_tt_bandwidth: " 2311 "child_ud 0x%p parent_ud 0x%p", 2312 (void *)child_ud, (void *)parent_ud); 2313 2314 /* 2315 * Calculate the length in bytes of a transaction on this 2316 * periodic endpoint. Return failure if maximum packet is 2317 * zero. 2318 */ 2319 if (ehci_compute_classic_bandwidth(endpoint, 2320 port_status, &bandwidth) != USB_SUCCESS) { 2321 2322 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2323 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint " 2324 "with zero endpoint maximum packet size is not supported"); 2325 2326 return (USB_NOT_SUPPORTED); 2327 } 2328 2329 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2330 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth); 2331 2332 mutex_enter(&parent_ud->usb_mutex); 2333 2334 /* 2335 * If the length in bytes plus the allocated bandwidth exceeds 2336 * the maximum, return bandwidth allocation failure. 2337 */ 2338 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) > 2339 FS_PERIODIC_BANDWIDTH) { 2340 2341 mutex_exit(&parent_ud->usb_mutex); 2342 2343 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2344 "ehci_allocate_classic_tt_bandwidth: Reached maximum " 2345 "bandwidth value and cannot allocate bandwidth for a " 2346 "given low/full speed periodic endpoint"); 2347 2348 return (USB_NO_BANDWIDTH); 2349 } 2350 2351 mutex_exit(&parent_ud->usb_mutex); 2352 2353 /* Adjust polling interval to be a power of 2 */ 2354 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2355 2356 /* Find the height in the tree */ 2357 height = ehci_lattice_height(interval); 2358 2359 /* Find the leftmost leaf in the subtree specified by the node. */ 2360 leftmost = ehci_leftmost_leaf(pnode, height); 2361 2362 mutex_enter(&parent_ud->usb_mutex); 2363 2364 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2365 list = ehci_index[leftmost + i]; 2366 2367 if ((parent_ud->usb_hs_hub_bandwidth[list] + 2368 bandwidth) > FS_PERIODIC_BANDWIDTH) { 2369 2370 mutex_exit(&parent_ud->usb_mutex); 2371 2372 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2373 "ehci_allocate_classic_tt_bandwidth: Reached " 2374 "maximum bandwidth value and cannot allocate " 2375 "bandwidth for low/full periodic endpoint"); 2376 2377 return (USB_NO_BANDWIDTH); 2378 } 2379 } 2380 2381 /* 2382 * All the leaves for this node must be updated with the bandwidth. 2383 */ 2384 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2385 list = ehci_index[leftmost + i]; 2386 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth; 2387 } 2388 2389 /* Find the leaf with the smallest allocated bandwidth */ 2390 min = parent_ud->usb_hs_hub_bandwidth[0]; 2391 2392 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) { 2393 if (parent_ud->usb_hs_hub_bandwidth[i] < min) { 2394 min = parent_ud->usb_hs_hub_bandwidth[i]; 2395 } 2396 } 2397 2398 /* Save the minimum for later use */ 2399 parent_ud->usb_hs_hub_min_bandwidth = min; 2400 2401 mutex_exit(&parent_ud->usb_mutex); 2402 2403 return (USB_SUCCESS); 2404 } 2405 2406 2407 /* 2408 * ehci_deallocate_bandwidth: 2409 * 2410 * Deallocate bandwidth for the given node in the lattice and the length 2411 * of transfer. 2412 */ 2413 void 2414 ehci_deallocate_bandwidth( 2415 ehci_state_t *ehcip, 2416 usba_pipe_handle_data_t *ph, 2417 uint_t pnode, 2418 uchar_t smask, 2419 uchar_t cmask) 2420 { 2421 /* This routine is protected by the ehci_int_mutex */ 2422 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2423 2424 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask); 2425 2426 /* 2427 * For low/full speed usb devices, deallocate classic TT bandwidth 2428 * in additional to high speed bandwidth. 2429 */ 2430 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) { 2431 2432 /* Deallocate classic TT bandwidth */ 2433 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode); 2434 } 2435 } 2436 2437 2438 /* 2439 * ehci_deallocate_high_speed_bandwidth: 2440 * 2441 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint. 2442 */ 2443 static void 2444 ehci_deallocate_high_speed_bandwidth( 2445 ehci_state_t *ehcip, 2446 usba_pipe_handle_data_t *ph, 2447 uint_t pnode, 2448 uchar_t smask, 2449 uchar_t cmask) 2450 { 2451 uint_t height, leftmost; 2452 uint_t list_count; 2453 uint_t sbandwidth, cbandwidth; 2454 int interval; 2455 usb_ep_descr_t *endpoint = &ph->p_ep; 2456 usba_device_t *child_ud; 2457 usb_port_status_t port_status; 2458 2459 /* This routine is protected by the ehci_int_mutex */ 2460 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2461 2462 /* Get child's usba device structure */ 2463 child_ud = ph->p_usba_device; 2464 2465 mutex_enter(&child_ud->usb_mutex); 2466 2467 /* Get the current usb device's port status */ 2468 port_status = ph->p_usba_device->usb_port_status; 2469 2470 mutex_exit(&child_ud->usb_mutex); 2471 2472 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint, 2473 port_status, &sbandwidth, &cbandwidth); 2474 2475 /* Adjust polling interval to be a power of 2 */ 2476 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2477 2478 /* Find the height in the tree */ 2479 height = ehci_lattice_height(interval); 2480 2481 /* 2482 * Find the leftmost leaf in the subtree specified by the node 2483 */ 2484 leftmost = ehci_leftmost_leaf(pnode, height); 2485 2486 list_count = EHCI_NUM_INTR_QH_LISTS/interval; 2487 2488 /* Delete the bandwidth from the appropriate lists */ 2489 if (port_status == USBA_HIGH_SPEED_DEV) { 2490 2491 ehci_update_bw_availability(ehcip, -sbandwidth, 2492 leftmost, list_count, smask); 2493 } else { 2494 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2495 USB_EP_ATTR_INTR) { 2496 2497 ehci_update_bw_availability(ehcip, -sbandwidth, 2498 leftmost, list_count, smask); 2499 ehci_update_bw_availability(ehcip, -cbandwidth, 2500 leftmost, list_count, cmask); 2501 } else { 2502 if ((endpoint->bEndpointAddress & 2503 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2504 2505 ehci_update_bw_availability(ehcip, -sbandwidth, 2506 leftmost, list_count, smask); 2507 ehci_update_bw_availability(ehcip, 2508 -MAX_UFRAME_SITD_XFER, leftmost, 2509 list_count, cmask); 2510 } else { 2511 2512 ehci_update_bw_availability(ehcip, 2513 -MAX_UFRAME_SITD_XFER, leftmost, 2514 list_count, smask); 2515 } 2516 } 2517 } 2518 } 2519 2520 /* 2521 * ehci_deallocate_classic_tt_bandwidth: 2522 * 2523 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint. 2524 */ 2525 static void 2526 ehci_deallocate_classic_tt_bandwidth( 2527 ehci_state_t *ehcip, 2528 usba_pipe_handle_data_t *ph, 2529 uint_t pnode) 2530 { 2531 uint_t bandwidth, height, leftmost, list, min; 2532 int i, interval; 2533 usb_ep_descr_t *endpoint = &ph->p_ep; 2534 usba_device_t *child_ud, *parent_ud; 2535 usb_port_status_t port_status; 2536 2537 /* This routine is protected by the ehci_int_mutex */ 2538 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2539 2540 /* Get child's usba device structure */ 2541 child_ud = ph->p_usba_device; 2542 2543 mutex_enter(&child_ud->usb_mutex); 2544 2545 /* Get the current usb device's port status */ 2546 port_status = child_ud->usb_port_status; 2547 2548 /* Get the parent high speed hub's usba device structure */ 2549 parent_ud = child_ud->usb_hs_hub_usba_dev; 2550 2551 mutex_exit(&child_ud->usb_mutex); 2552 2553 /* Obtain the bandwidth */ 2554 (void) ehci_compute_classic_bandwidth(endpoint, 2555 port_status, &bandwidth); 2556 2557 /* Adjust polling interval to be a power of 2 */ 2558 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2559 2560 /* Find the height in the tree */ 2561 height = ehci_lattice_height(interval); 2562 2563 /* Find the leftmost leaf in the subtree specified by the node */ 2564 leftmost = ehci_leftmost_leaf(pnode, height); 2565 2566 mutex_enter(&parent_ud->usb_mutex); 2567 2568 /* Delete the bandwidth from the appropriate lists */ 2569 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2570 list = ehci_index[leftmost + i]; 2571 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth; 2572 } 2573 2574 /* Find the leaf with the smallest allocated bandwidth */ 2575 min = parent_ud->usb_hs_hub_bandwidth[0]; 2576 2577 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) { 2578 if (parent_ud->usb_hs_hub_bandwidth[i] < min) { 2579 min = parent_ud->usb_hs_hub_bandwidth[i]; 2580 } 2581 } 2582 2583 /* Save the minimum for later use */ 2584 parent_ud->usb_hs_hub_min_bandwidth = min; 2585 2586 mutex_exit(&parent_ud->usb_mutex); 2587 } 2588 2589 2590 /* 2591 * ehci_compute_high_speed_bandwidth: 2592 * 2593 * Given a periodic endpoint (interrupt or isochronous) determine the total 2594 * bandwidth for one transaction. The EHCI host controller traverses the 2595 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 2596 * services an endpoint, only a single transaction attempt is made. The HC 2597 * moves to the next Endpoint Descriptor after the first transaction attempt 2598 * rather than finishing the entire Transfer Descriptor. Therefore, when a 2599 * Transfer Descriptor is inserted into the lattice, we will only count the 2600 * number of bytes for one transaction. 2601 * 2602 * The following are the formulas used for calculating bandwidth in terms 2603 * bytes and it is for the single USB high speed transaction. The protocol 2604 * overheads will be different for each of type of USB transfer & all these 2605 * formulas & protocol overheads are derived from the 5.11.3 section of the 2606 * USB 2.0 Specification. 2607 * 2608 * High-Speed: 2609 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay 2610 * 2611 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub) 2612 * 2613 * Protocol overhead + Split transaction overhead + 2614 * ((MaxPktSz * 7)/6) + Host_Delay; 2615 */ 2616 /* ARGSUSED */ 2617 static int 2618 ehci_compute_high_speed_bandwidth( 2619 ehci_state_t *ehcip, 2620 usb_ep_descr_t *endpoint, 2621 usb_port_status_t port_status, 2622 uint_t *sbandwidth, 2623 uint_t *cbandwidth) 2624 { 2625 ushort_t maxpacketsize = endpoint->wMaxPacketSize; 2626 2627 /* Return failure if endpoint maximum packet is zero */ 2628 if (maxpacketsize == 0) { 2629 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2630 "ehci_allocate_high_speed_bandwidth: Periodic endpoint " 2631 "with zero endpoint maximum packet size is not supported"); 2632 2633 return (USB_NOT_SUPPORTED); 2634 } 2635 2636 /* Add bit-stuffing overhead */ 2637 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6); 2638 2639 /* Add Host Controller specific delay to required bandwidth */ 2640 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY; 2641 2642 /* Add xfer specific protocol overheads */ 2643 if ((endpoint->bmAttributes & 2644 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) { 2645 /* High speed interrupt transaction */ 2646 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD; 2647 } else { 2648 /* Isochronous transaction */ 2649 *sbandwidth += HS_ISOC_PROTO_OVERHEAD; 2650 } 2651 2652 /* 2653 * For low/full speed devices, add split transaction specific 2654 * overheads. 2655 */ 2656 if (port_status != USBA_HIGH_SPEED_DEV) { 2657 /* 2658 * Add start and complete split transaction 2659 * tokens overheads. 2660 */ 2661 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD; 2662 *sbandwidth += START_SPLIT_OVERHEAD; 2663 2664 /* Add data overhead depending on data direction */ 2665 if ((endpoint->bEndpointAddress & 2666 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2667 *cbandwidth += maxpacketsize; 2668 } else { 2669 if ((endpoint->bmAttributes & 2670 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) { 2671 /* There is no compete splits for out */ 2672 *cbandwidth = 0; 2673 } 2674 *sbandwidth += maxpacketsize; 2675 } 2676 } else { 2677 uint_t xactions; 2678 2679 /* Get the max transactions per microframe */ 2680 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >> 2681 USB_EP_MAX_XACTS_SHIFT) + 1; 2682 2683 /* High speed transaction */ 2684 *sbandwidth += maxpacketsize; 2685 2686 /* Calculate bandwidth per micro-frame */ 2687 *sbandwidth *= xactions; 2688 2689 *cbandwidth = 0; 2690 } 2691 2692 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2693 "ehci_allocate_high_speed_bandwidth: " 2694 "Start split bandwidth %d Complete split bandwidth %d", 2695 *sbandwidth, *cbandwidth); 2696 2697 return (USB_SUCCESS); 2698 } 2699 2700 2701 /* 2702 * ehci_compute_classic_bandwidth: 2703 * 2704 * Given a periodic endpoint (interrupt or isochronous) determine the total 2705 * bandwidth for one transaction. The EHCI host controller traverses the 2706 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 2707 * services an endpoint, only a single transaction attempt is made. The HC 2708 * moves to the next Endpoint Descriptor after the first transaction attempt 2709 * rather than finishing the entire Transfer Descriptor. Therefore, when a 2710 * Transfer Descriptor is inserted into the lattice, we will only count the 2711 * number of bytes for one transaction. 2712 * 2713 * The following are the formulas used for calculating bandwidth in terms 2714 * bytes and it is for the single USB high speed transaction. The protocol 2715 * overheads will be different for each of type of USB transfer & all these 2716 * formulas & protocol overheads are derived from the 5.11.3 section of the 2717 * USB 2.0 Specification. 2718 * 2719 * Low-Speed: 2720 * Protocol overhead + Hub LS overhead + 2721 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay 2722 * 2723 * Full-Speed: 2724 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay 2725 */ 2726 /* ARGSUSED */ 2727 static int 2728 ehci_compute_classic_bandwidth( 2729 usb_ep_descr_t *endpoint, 2730 usb_port_status_t port_status, 2731 uint_t *bandwidth) 2732 { 2733 ushort_t maxpacketsize = endpoint->wMaxPacketSize; 2734 2735 /* 2736 * If endpoint maximum packet is zero, then return immediately. 2737 */ 2738 if (maxpacketsize == 0) { 2739 2740 return (USB_NOT_SUPPORTED); 2741 } 2742 2743 /* Add TT delay to required bandwidth */ 2744 *bandwidth = TT_DELAY; 2745 2746 /* Add bit-stuffing overhead */ 2747 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6); 2748 2749 switch (port_status) { 2750 case USBA_LOW_SPEED_DEV: 2751 /* Low speed interrupt transaction */ 2752 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 2753 HUB_LOW_SPEED_PROTO_OVERHEAD + 2754 (LOW_SPEED_CLOCK * maxpacketsize)); 2755 break; 2756 case USBA_FULL_SPEED_DEV: 2757 /* Full speed transaction */ 2758 *bandwidth += maxpacketsize; 2759 2760 /* Add xfer specific protocol overheads */ 2761 if ((endpoint->bmAttributes & 2762 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) { 2763 /* Full speed interrupt transaction */ 2764 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 2765 } else { 2766 /* Isochronous and input transaction */ 2767 if ((endpoint->bEndpointAddress & 2768 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2769 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 2770 } else { 2771 /* Isochronous and output transaction */ 2772 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 2773 } 2774 } 2775 break; 2776 } 2777 2778 return (USB_SUCCESS); 2779 } 2780 2781 2782 /* 2783 * ehci_adjust_polling_interval: 2784 * 2785 * Adjust bandwidth according usb device speed. 2786 */ 2787 /* ARGSUSED */ 2788 int 2789 ehci_adjust_polling_interval( 2790 ehci_state_t *ehcip, 2791 usb_ep_descr_t *endpoint, 2792 usb_port_status_t port_status) 2793 { 2794 uint_t interval; 2795 int i = 0; 2796 2797 /* Get the polling interval */ 2798 interval = endpoint->bInterval; 2799 2800 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2801 "ehci_adjust_polling_interval: Polling interval 0x%x", interval); 2802 2803 /* 2804 * According USB 2.0 Specifications, a high-speed endpoint's 2805 * polling intervals are specified interms of 125us or micro 2806 * frame, where as full/low endpoint's polling intervals are 2807 * specified in milliseconds. 2808 * 2809 * A high speed interrupt/isochronous endpoints can specify 2810 * desired polling interval between 1 to 16 micro-frames, 2811 * where as full/low endpoints can specify between 1 to 255 2812 * milliseconds. 2813 */ 2814 switch (port_status) { 2815 case USBA_LOW_SPEED_DEV: 2816 /* 2817 * Low speed endpoints are limited to specifying 2818 * only 8ms to 255ms in this driver. If a device 2819 * reports a polling interval that is less than 8ms, 2820 * it will use 8 ms instead. 2821 */ 2822 if (interval < LS_MIN_POLL_INTERVAL) { 2823 2824 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2825 "Low speed endpoint's poll interval of %d ms " 2826 "is below threshold. Rounding up to %d ms", 2827 interval, LS_MIN_POLL_INTERVAL); 2828 2829 interval = LS_MIN_POLL_INTERVAL; 2830 } 2831 2832 /* 2833 * Return an error if the polling interval is greater 2834 * than 255ms. 2835 */ 2836 if (interval > LS_MAX_POLL_INTERVAL) { 2837 2838 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2839 "Low speed endpoint's poll interval is " 2840 "greater than %d ms", LS_MAX_POLL_INTERVAL); 2841 2842 return (USB_FAILURE); 2843 } 2844 break; 2845 2846 case USBA_FULL_SPEED_DEV: 2847 /* 2848 * Return an error if the polling interval is less 2849 * than 1ms and greater than 255ms. 2850 */ 2851 if ((interval < FS_MIN_POLL_INTERVAL) && 2852 (interval > FS_MAX_POLL_INTERVAL)) { 2853 2854 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2855 "Full speed endpoint's poll interval must " 2856 "be between %d and %d ms", FS_MIN_POLL_INTERVAL, 2857 FS_MAX_POLL_INTERVAL); 2858 2859 return (USB_FAILURE); 2860 } 2861 break; 2862 case USBA_HIGH_SPEED_DEV: 2863 /* 2864 * Return an error if the polling interval is less 1 2865 * and greater than 16. Convert this value to 125us 2866 * units using 2^(bInterval -1). refer usb 2.0 spec 2867 * page 51 for details. 2868 */ 2869 if ((interval < HS_MIN_POLL_INTERVAL) && 2870 (interval > HS_MAX_POLL_INTERVAL)) { 2871 2872 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2873 "High speed endpoint's poll interval " 2874 "must be between %d and %d units", 2875 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL); 2876 2877 return (USB_FAILURE); 2878 } 2879 2880 /* Adjust high speed device polling interval */ 2881 interval = 2882 ehci_adjust_high_speed_polling_interval(ehcip, endpoint); 2883 2884 break; 2885 } 2886 2887 /* 2888 * If polling interval is greater than 32ms, 2889 * adjust polling interval equal to 32ms. 2890 */ 2891 if (interval > EHCI_NUM_INTR_QH_LISTS) { 2892 interval = EHCI_NUM_INTR_QH_LISTS; 2893 } 2894 2895 /* 2896 * Find the nearest power of 2 that's less 2897 * than interval. 2898 */ 2899 while ((ehci_pow_2(i)) <= interval) { 2900 i++; 2901 } 2902 2903 return (ehci_pow_2((i - 1))); 2904 } 2905 2906 2907 /* 2908 * ehci_adjust_high_speed_polling_interval: 2909 */ 2910 /* ARGSUSED */ 2911 static int 2912 ehci_adjust_high_speed_polling_interval( 2913 ehci_state_t *ehcip, 2914 usb_ep_descr_t *endpoint) 2915 { 2916 uint_t interval; 2917 2918 /* Get the polling interval */ 2919 interval = ehci_pow_2(endpoint->bInterval - 1); 2920 2921 /* 2922 * Convert polling interval from micro seconds 2923 * to milli seconds. 2924 */ 2925 if (interval <= EHCI_MAX_UFRAMES) { 2926 interval = 1; 2927 } else { 2928 interval = interval/EHCI_MAX_UFRAMES; 2929 } 2930 2931 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2932 "ehci_adjust_high_speed_polling_interval: " 2933 "High speed adjusted interval 0x%x", interval); 2934 2935 return (interval); 2936 } 2937 2938 2939 /* 2940 * ehci_lattice_height: 2941 * 2942 * Given the requested bandwidth, find the height in the tree at which the 2943 * nodes for this bandwidth fall. The height is measured as the number of 2944 * nodes from the leaf to the level specified by bandwidth The root of the 2945 * tree is at height TREE_HEIGHT. 2946 */ 2947 static uint_t 2948 ehci_lattice_height(uint_t interval) 2949 { 2950 return (TREE_HEIGHT - (ehci_log_2(interval))); 2951 } 2952 2953 2954 /* 2955 * ehci_lattice_parent: 2956 * 2957 * Given a node in the lattice, find the index of the parent node 2958 */ 2959 static uint_t 2960 ehci_lattice_parent(uint_t node) 2961 { 2962 if ((node % 2) == 0) { 2963 2964 return ((node/2) - 1); 2965 } else { 2966 2967 return ((node + 1)/2 - 1); 2968 } 2969 } 2970 2971 2972 /* 2973 * ehci_find_periodic_node: 2974 * 2975 * Based on the "real" array leaf node and interval, get the periodic node. 2976 */ 2977 static uint_t 2978 ehci_find_periodic_node(uint_t leaf, int interval) { 2979 uint_t lattice_leaf; 2980 uint_t height = ehci_lattice_height(interval); 2981 uint_t pnode; 2982 int i; 2983 2984 /* Get the leaf number in the lattice */ 2985 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1; 2986 2987 /* Get the node in the lattice based on the height and leaf */ 2988 pnode = lattice_leaf; 2989 for (i = 0; i < height; i++) { 2990 pnode = ehci_lattice_parent(pnode); 2991 } 2992 2993 return (pnode); 2994 } 2995 2996 2997 /* 2998 * ehci_leftmost_leaf: 2999 * 3000 * Find the leftmost leaf in the subtree specified by the node. Height refers 3001 * to number of nodes from the bottom of the tree to the node, including the 3002 * node. 3003 * 3004 * The formula for a zero based tree is: 3005 * 2^H * Node + 2^H - 1 3006 * The leaf of the tree is an array, convert the number for the array. 3007 * Subtract the size of nodes not in the array 3008 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) = 3009 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS = 3010 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS 3011 * 0 3012 * 1 2 3013 * 0 1 2 3 3014 */ 3015 static uint_t 3016 ehci_leftmost_leaf( 3017 uint_t node, 3018 uint_t height) 3019 { 3020 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS); 3021 } 3022 3023 3024 /* 3025 * ehci_pow_2: 3026 * 3027 * Compute 2 to the power 3028 */ 3029 static uint_t 3030 ehci_pow_2(uint_t x) 3031 { 3032 if (x == 0) { 3033 3034 return (1); 3035 } else { 3036 3037 return (2 << (x - 1)); 3038 } 3039 } 3040 3041 3042 /* 3043 * ehci_log_2: 3044 * 3045 * Compute log base 2 of x 3046 */ 3047 static uint_t 3048 ehci_log_2(uint_t x) 3049 { 3050 int i = 0; 3051 3052 while (x != 1) { 3053 x = x >> 1; 3054 i++; 3055 } 3056 3057 return (i); 3058 } 3059 3060 3061 /* 3062 * ehci_find_bestfit_hs_mask: 3063 * 3064 * Find the smask and cmask in the bandwidth allocation, and update the 3065 * bandwidth allocation. 3066 */ 3067 static int 3068 ehci_find_bestfit_hs_mask( 3069 ehci_state_t *ehcip, 3070 uchar_t *smask, 3071 uint_t *pnode, 3072 usb_ep_descr_t *endpoint, 3073 uint_t bandwidth, 3074 int interval) 3075 { 3076 int i; 3077 uint_t elements, index; 3078 int array_leaf, best_array_leaf; 3079 uint_t node_bandwidth, best_node_bandwidth; 3080 uint_t leaf_count; 3081 uchar_t bw_mask; 3082 uchar_t best_smask; 3083 3084 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3085 "ehci_find_bestfit_hs_mask: "); 3086 3087 /* Get all the valid smasks */ 3088 switch (ehci_pow_2(endpoint->bInterval - 1)) { 3089 case EHCI_INTR_1US_POLL: 3090 index = EHCI_1US_MASK_INDEX; 3091 elements = EHCI_INTR_1US_POLL; 3092 break; 3093 case EHCI_INTR_2US_POLL: 3094 index = EHCI_2US_MASK_INDEX; 3095 elements = EHCI_INTR_2US_POLL; 3096 break; 3097 case EHCI_INTR_4US_POLL: 3098 index = EHCI_4US_MASK_INDEX; 3099 elements = EHCI_INTR_4US_POLL; 3100 break; 3101 case EHCI_INTR_XUS_POLL: 3102 default: 3103 index = EHCI_XUS_MASK_INDEX; 3104 elements = EHCI_INTR_XUS_POLL; 3105 break; 3106 } 3107 3108 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3109 3110 /* 3111 * Because of the way the leaves are setup, we will automatically 3112 * hit the leftmost leaf of every possible node with this interval. 3113 */ 3114 best_smask = 0x00; 3115 best_node_bandwidth = 0; 3116 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3117 /* Find the bandwidth mask */ 3118 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip, 3119 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask); 3120 3121 /* 3122 * If this node cannot support our requirements skip to the 3123 * next leaf. 3124 */ 3125 if (bw_mask == 0x00) { 3126 continue; 3127 } 3128 3129 /* 3130 * Now make sure our bandwidth requirements can be 3131 * satisfied with one of smasks in this node. 3132 */ 3133 *smask = 0x00; 3134 for (i = index; i < (index + elements); i++) { 3135 /* Check the start split mask value */ 3136 if (ehci_start_split_mask[index] & bw_mask) { 3137 *smask = ehci_start_split_mask[index]; 3138 break; 3139 } 3140 } 3141 3142 /* 3143 * If an appropriate smask is found save the information if: 3144 * o best_smask has not been found yet. 3145 * - or - 3146 * o This is the node with the least amount of bandwidth 3147 */ 3148 if ((*smask != 0x00) && 3149 ((best_smask == 0x00) || 3150 (best_node_bandwidth > node_bandwidth))) { 3151 3152 best_node_bandwidth = node_bandwidth; 3153 best_array_leaf = array_leaf; 3154 best_smask = *smask; 3155 } 3156 } 3157 3158 /* 3159 * If we find node that can handle the bandwidth populate the 3160 * appropriate variables and return success. 3161 */ 3162 if (best_smask) { 3163 *smask = best_smask; 3164 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3165 interval); 3166 ehci_update_bw_availability(ehcip, bandwidth, 3167 ehci_index[best_array_leaf], leaf_count, best_smask); 3168 3169 return (USB_SUCCESS); 3170 } 3171 3172 return (USB_FAILURE); 3173 } 3174 3175 3176 /* 3177 * ehci_find_bestfit_ls_intr_mask: 3178 * 3179 * Find the smask and cmask in the bandwidth allocation. 3180 */ 3181 static int 3182 ehci_find_bestfit_ls_intr_mask( 3183 ehci_state_t *ehcip, 3184 uchar_t *smask, 3185 uchar_t *cmask, 3186 uint_t *pnode, 3187 uint_t sbandwidth, 3188 uint_t cbandwidth, 3189 int interval) 3190 { 3191 int i; 3192 uint_t elements, index; 3193 int array_leaf, best_array_leaf; 3194 uint_t node_sbandwidth, node_cbandwidth; 3195 uint_t best_node_bandwidth; 3196 uint_t leaf_count; 3197 uchar_t bw_smask, bw_cmask; 3198 uchar_t best_smask, best_cmask; 3199 3200 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3201 "ehci_find_bestfit_ls_intr_mask: "); 3202 3203 /* For low and full speed devices */ 3204 index = EHCI_XUS_MASK_INDEX; 3205 elements = EHCI_INTR_4MS_POLL; 3206 3207 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3208 3209 /* 3210 * Because of the way the leaves are setup, we will automatically 3211 * hit the leftmost leaf of every possible node with this interval. 3212 */ 3213 best_smask = 0x00; 3214 best_node_bandwidth = 0; 3215 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3216 /* Find the bandwidth mask */ 3217 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3218 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask); 3219 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3220 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask); 3221 3222 /* 3223 * If this node cannot support our requirements skip to the 3224 * next leaf. 3225 */ 3226 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) { 3227 continue; 3228 } 3229 3230 /* 3231 * Now make sure our bandwidth requirements can be 3232 * satisfied with one of smasks in this node. 3233 */ 3234 *smask = 0x00; 3235 *cmask = 0x00; 3236 for (i = index; i < (index + elements); i++) { 3237 /* Check the start split mask value */ 3238 if ((ehci_start_split_mask[index] & bw_smask) && 3239 (ehci_intr_complete_split_mask[index] & bw_cmask)) { 3240 *smask = ehci_start_split_mask[index]; 3241 *cmask = ehci_intr_complete_split_mask[index]; 3242 break; 3243 } 3244 } 3245 3246 /* 3247 * If an appropriate smask is found save the information if: 3248 * o best_smask has not been found yet. 3249 * - or - 3250 * o This is the node with the least amount of bandwidth 3251 */ 3252 if ((*smask != 0x00) && 3253 ((best_smask == 0x00) || 3254 (best_node_bandwidth > 3255 (node_sbandwidth + node_cbandwidth)))) { 3256 best_node_bandwidth = node_sbandwidth + node_cbandwidth; 3257 best_array_leaf = array_leaf; 3258 best_smask = *smask; 3259 best_cmask = *cmask; 3260 } 3261 } 3262 3263 /* 3264 * If we find node that can handle the bandwidth populate the 3265 * appropriate variables and return success. 3266 */ 3267 if (best_smask) { 3268 *smask = best_smask; 3269 *cmask = best_cmask; 3270 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3271 interval); 3272 ehci_update_bw_availability(ehcip, sbandwidth, 3273 ehci_index[best_array_leaf], leaf_count, best_smask); 3274 ehci_update_bw_availability(ehcip, cbandwidth, 3275 ehci_index[best_array_leaf], leaf_count, best_cmask); 3276 3277 return (USB_SUCCESS); 3278 } 3279 3280 return (USB_FAILURE); 3281 } 3282 3283 3284 /* 3285 * ehci_find_bestfit_sitd_in_mask: 3286 * 3287 * Find the smask and cmask in the bandwidth allocation. 3288 */ 3289 static int 3290 ehci_find_bestfit_sitd_in_mask( 3291 ehci_state_t *ehcip, 3292 uchar_t *smask, 3293 uchar_t *cmask, 3294 uint_t *pnode, 3295 uint_t sbandwidth, 3296 uint_t cbandwidth, 3297 int interval) 3298 { 3299 int i, uFrames, found; 3300 int array_leaf, best_array_leaf; 3301 uint_t node_sbandwidth, node_cbandwidth; 3302 uint_t best_node_bandwidth; 3303 uint_t leaf_count; 3304 uchar_t bw_smask, bw_cmask; 3305 uchar_t best_smask, best_cmask; 3306 3307 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3308 "ehci_find_bestfit_sitd_in_mask: "); 3309 3310 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3311 3312 /* 3313 * Because of the way the leaves are setup, we will automatically 3314 * hit the leftmost leaf of every possible node with this interval. 3315 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame. 3316 */ 3317 /* 3318 * Need to add an additional 2 uFrames, if the "L"ast 3319 * complete split is before uFrame 6. See section 3320 * 11.8.4 in USB 2.0 Spec. Currently we do not support 3321 * the "Back Ptr" which means we support on IN of 3322 * ~4*MAX_UFRAME_SITD_XFER bandwidth/ 3323 */ 3324 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2; 3325 if (cbandwidth % MAX_UFRAME_SITD_XFER) { 3326 uFrames++; 3327 } 3328 if (uFrames > 6) { 3329 3330 return (USB_FAILURE); 3331 } 3332 *smask = 0x1; 3333 *cmask = 0x00; 3334 for (i = 0; i < uFrames; i++) { 3335 *cmask = *cmask << 1; 3336 *cmask |= 0x1; 3337 } 3338 /* cmask must start 2 frames after the smask */ 3339 *cmask = *cmask << 2; 3340 3341 found = 0; 3342 best_smask = 0x00; 3343 best_node_bandwidth = 0; 3344 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3345 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3346 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask); 3347 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3348 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count, 3349 &bw_cmask); 3350 3351 /* 3352 * If this node cannot support our requirements skip to the 3353 * next leaf. 3354 */ 3355 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) { 3356 continue; 3357 } 3358 3359 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) { 3360 if ((*smask & bw_smask) && (*cmask & bw_cmask)) { 3361 found = 1; 3362 break; 3363 } 3364 *smask = *smask << 1; 3365 *cmask = *cmask << 1; 3366 } 3367 3368 /* 3369 * If an appropriate smask is found save the information if: 3370 * o best_smask has not been found yet. 3371 * - or - 3372 * o This is the node with the least amount of bandwidth 3373 */ 3374 if (found && 3375 ((best_smask == 0x00) || 3376 (best_node_bandwidth > 3377 (node_sbandwidth + node_cbandwidth)))) { 3378 best_node_bandwidth = node_sbandwidth + node_cbandwidth; 3379 best_array_leaf = array_leaf; 3380 best_smask = *smask; 3381 best_cmask = *cmask; 3382 } 3383 } 3384 3385 /* 3386 * If we find node that can handle the bandwidth populate the 3387 * appropriate variables and return success. 3388 */ 3389 if (best_smask) { 3390 *smask = best_smask; 3391 *cmask = best_cmask; 3392 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3393 interval); 3394 ehci_update_bw_availability(ehcip, sbandwidth, 3395 ehci_index[best_array_leaf], leaf_count, best_smask); 3396 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER, 3397 ehci_index[best_array_leaf], leaf_count, best_cmask); 3398 3399 return (USB_SUCCESS); 3400 } 3401 3402 return (USB_FAILURE); 3403 } 3404 3405 3406 /* 3407 * ehci_find_bestfit_sitd_out_mask: 3408 * 3409 * Find the smask in the bandwidth allocation. 3410 */ 3411 static int 3412 ehci_find_bestfit_sitd_out_mask( 3413 ehci_state_t *ehcip, 3414 uchar_t *smask, 3415 uint_t *pnode, 3416 uint_t sbandwidth, 3417 int interval) 3418 { 3419 int i, uFrames, found; 3420 int array_leaf, best_array_leaf; 3421 uint_t node_sbandwidth; 3422 uint_t best_node_bandwidth; 3423 uint_t leaf_count; 3424 uchar_t bw_smask; 3425 uchar_t best_smask; 3426 3427 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3428 "ehci_find_bestfit_sitd_out_mask: "); 3429 3430 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3431 3432 /* 3433 * Because of the way the leaves are setup, we will automatically 3434 * hit the leftmost leaf of every possible node with this interval. 3435 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame. 3436 */ 3437 *smask = 0x00; 3438 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER; 3439 if (sbandwidth % MAX_UFRAME_SITD_XFER) { 3440 uFrames++; 3441 } 3442 for (i = 0; i < uFrames; i++) { 3443 *smask = *smask << 1; 3444 *smask |= 0x1; 3445 } 3446 3447 found = 0; 3448 best_smask = 0x00; 3449 best_node_bandwidth = 0; 3450 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3451 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3452 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count, 3453 &bw_smask); 3454 3455 /* 3456 * If this node cannot support our requirements skip to the 3457 * next leaf. 3458 */ 3459 if (bw_smask == 0x00) { 3460 continue; 3461 } 3462 3463 /* You cannot have a start split on the 8th uFrame */ 3464 for (i = 0; (*smask & 0x80) == 0; i++) { 3465 if (*smask & bw_smask) { 3466 found = 1; 3467 break; 3468 } 3469 *smask = *smask << 1; 3470 } 3471 3472 /* 3473 * If an appropriate smask is found save the information if: 3474 * o best_smask has not been found yet. 3475 * - or - 3476 * o This is the node with the least amount of bandwidth 3477 */ 3478 if (found && 3479 ((best_smask == 0x00) || 3480 (best_node_bandwidth > node_sbandwidth))) { 3481 best_node_bandwidth = node_sbandwidth; 3482 best_array_leaf = array_leaf; 3483 best_smask = *smask; 3484 } 3485 } 3486 3487 /* 3488 * If we find node that can handle the bandwidth populate the 3489 * appropriate variables and return success. 3490 */ 3491 if (best_smask) { 3492 *smask = best_smask; 3493 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3494 interval); 3495 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER, 3496 ehci_index[best_array_leaf], leaf_count, best_smask); 3497 3498 return (USB_SUCCESS); 3499 } 3500 3501 return (USB_FAILURE); 3502 } 3503 3504 3505 /* 3506 * ehci_calculate_bw_availability_mask: 3507 * 3508 * Returns the "total bandwidth used" in this node. 3509 * Populates bw_mask with the uFrames that can support the bandwidth. 3510 * 3511 * If all the Frames cannot support this bandwidth, then bw_mask 3512 * will return 0x00 and the "total bandwidth used" will be invalid. 3513 */ 3514 static uint_t 3515 ehci_calculate_bw_availability_mask( 3516 ehci_state_t *ehcip, 3517 uint_t bandwidth, 3518 int leaf, 3519 int leaf_count, 3520 uchar_t *bw_mask) 3521 { 3522 int i, j; 3523 uchar_t bw_uframe; 3524 int uframe_total; 3525 ehci_frame_bandwidth_t *fbp; 3526 uint_t total_bandwidth = 0; 3527 3528 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3529 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d", 3530 leaf, leaf_count); 3531 3532 /* Start by saying all uFrames are available */ 3533 *bw_mask = 0xFF; 3534 3535 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) { 3536 fbp = &ehcip->ehci_frame_bandwidth[leaf + i]; 3537 3538 total_bandwidth += fbp->ehci_allocated_frame_bandwidth; 3539 3540 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3541 /* 3542 * If the uFrame in bw_mask is available check to see if 3543 * it can support the additional bandwidth. 3544 */ 3545 bw_uframe = (*bw_mask & (0x1 << j)); 3546 uframe_total = 3547 fbp->ehci_micro_frame_bandwidth[j] + 3548 bandwidth; 3549 if ((bw_uframe) && 3550 (uframe_total > HS_PERIODIC_BANDWIDTH)) { 3551 *bw_mask = *bw_mask & ~bw_uframe; 3552 } 3553 } 3554 } 3555 3556 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3557 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x", 3558 *bw_mask); 3559 3560 return (total_bandwidth); 3561 } 3562 3563 3564 /* 3565 * ehci_update_bw_availability: 3566 * 3567 * The leftmost leaf needs to be in terms of array position and 3568 * not the actual lattice position. 3569 */ 3570 static void 3571 ehci_update_bw_availability( 3572 ehci_state_t *ehcip, 3573 int bandwidth, 3574 int leftmost_leaf, 3575 int leaf_count, 3576 uchar_t mask) 3577 { 3578 int i, j; 3579 ehci_frame_bandwidth_t *fbp; 3580 int uFrame_bandwidth[8]; 3581 3582 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3583 "ehci_update_bw_availability: " 3584 "leaf %d count %d bandwidth 0x%x mask 0x%x", 3585 leftmost_leaf, leaf_count, bandwidth, mask); 3586 3587 ASSERT(leftmost_leaf < 32); 3588 ASSERT(leftmost_leaf >= 0); 3589 3590 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3591 if (mask & 0x1) { 3592 uFrame_bandwidth[j] = bandwidth; 3593 } else { 3594 uFrame_bandwidth[j] = 0; 3595 } 3596 3597 mask = mask >> 1; 3598 } 3599 3600 /* Updated all the effected leafs with the bandwidth */ 3601 for (i = 0; i < leaf_count; i++) { 3602 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i]; 3603 3604 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3605 fbp->ehci_micro_frame_bandwidth[j] += 3606 uFrame_bandwidth[j]; 3607 fbp->ehci_allocated_frame_bandwidth += 3608 uFrame_bandwidth[j]; 3609 } 3610 } 3611 } 3612 3613 /* 3614 * Miscellaneous functions 3615 */ 3616 3617 /* 3618 * ehci_obtain_state: 3619 * 3620 * NOTE: This function is also called from POLLED MODE. 3621 */ 3622 ehci_state_t * 3623 ehci_obtain_state(dev_info_t *dip) 3624 { 3625 int instance = ddi_get_instance(dip); 3626 3627 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance); 3628 3629 ASSERT(state != NULL); 3630 3631 return (state); 3632 } 3633 3634 3635 /* 3636 * ehci_state_is_operational: 3637 * 3638 * Check the Host controller state and return proper values. 3639 */ 3640 int 3641 ehci_state_is_operational(ehci_state_t *ehcip) 3642 { 3643 int val; 3644 3645 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3646 3647 switch (ehcip->ehci_hc_soft_state) { 3648 case EHCI_CTLR_INIT_STATE: 3649 case EHCI_CTLR_SUSPEND_STATE: 3650 val = USB_FAILURE; 3651 break; 3652 case EHCI_CTLR_OPERATIONAL_STATE: 3653 val = USB_SUCCESS; 3654 break; 3655 case EHCI_CTLR_ERROR_STATE: 3656 val = USB_HC_HARDWARE_ERROR; 3657 break; 3658 default: 3659 val = USB_FAILURE; 3660 break; 3661 } 3662 3663 return (val); 3664 } 3665 3666 3667 /* 3668 * ehci_do_soft_reset 3669 * 3670 * Do soft reset of ehci host controller. 3671 */ 3672 int 3673 ehci_do_soft_reset(ehci_state_t *ehcip) 3674 { 3675 usb_frame_number_t before_frame_number, after_frame_number; 3676 ehci_regs_t *ehci_save_regs; 3677 3678 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3679 3680 /* Increment host controller error count */ 3681 ehcip->ehci_hc_error++; 3682 3683 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3684 "ehci_do_soft_reset:" 3685 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error); 3686 3687 /* 3688 * Allocate space for saving current Host Controller 3689 * registers. Don't do any recovery if allocation 3690 * fails. 3691 */ 3692 ehci_save_regs = (ehci_regs_t *) 3693 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP); 3694 3695 if (ehci_save_regs == NULL) { 3696 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3697 "ehci_do_soft_reset: kmem_zalloc failed"); 3698 3699 return (USB_FAILURE); 3700 } 3701 3702 /* Save current ehci registers */ 3703 ehci_save_regs->ehci_command = Get_OpReg(ehci_command); 3704 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt); 3705 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment); 3706 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr); 3707 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag); 3708 ehci_save_regs->ehci_periodic_list_base = 3709 Get_OpReg(ehci_periodic_list_base); 3710 3711 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3712 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs); 3713 3714 /* Disable all list processing and interrupts */ 3715 Set_OpReg(ehci_command, Get_OpReg(ehci_command) & 3716 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)); 3717 3718 /* Disable all EHCI interrupts */ 3719 Set_OpReg(ehci_interrupt, 0); 3720 3721 /* Wait for few milliseconds */ 3722 drv_usecwait(EHCI_SOF_TIMEWAIT); 3723 3724 /* Do light soft reset of ehci host controller */ 3725 Set_OpReg(ehci_command, 3726 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET); 3727 3728 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3729 "ehci_do_soft_reset: Reset in progress"); 3730 3731 /* Wait for reset to complete */ 3732 drv_usecwait(EHCI_RESET_TIMEWAIT); 3733 3734 /* 3735 * Restore previous saved EHCI register value 3736 * into the current EHCI registers. 3737 */ 3738 Set_OpReg(ehci_ctrl_segment, (uint32_t) 3739 ehci_save_regs->ehci_ctrl_segment); 3740 3741 Set_OpReg(ehci_periodic_list_base, (uint32_t) 3742 ehci_save_regs->ehci_periodic_list_base); 3743 3744 Set_OpReg(ehci_async_list_addr, (uint32_t) 3745 ehci_save_regs->ehci_async_list_addr); 3746 3747 /* 3748 * For some reason this register might get nulled out by 3749 * the Uli M1575 South Bridge. To workaround the hardware 3750 * problem, check the value after write and retry if the 3751 * last write fails. 3752 */ 3753 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 3754 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) && 3755 (ehci_save_regs->ehci_async_list_addr != 3756 Get_OpReg(ehci_async_list_addr))) { 3757 int retry = 0; 3758 3759 Set_OpRegRetry(ehci_async_list_addr, (uint32_t) 3760 ehci_save_regs->ehci_async_list_addr, retry); 3761 if (retry >= EHCI_MAX_RETRY) { 3762 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3763 ehcip->ehci_log_hdl, "ehci_do_soft_reset:" 3764 " ASYNCLISTADDR write failed."); 3765 3766 return (USB_FAILURE); 3767 } 3768 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3769 "ehci_do_soft_reset: ASYNCLISTADDR " 3770 "write failed, retry=%d", retry); 3771 } 3772 3773 Set_OpReg(ehci_config_flag, (uint32_t) 3774 ehci_save_regs->ehci_config_flag); 3775 3776 /* Enable both Asynchronous and Periodic Schedule if necessary */ 3777 ehci_toggle_scheduler(ehcip); 3778 3779 /* 3780 * Set ehci_interrupt to enable all interrupts except Root 3781 * Hub Status change and frame list rollover interrupts. 3782 */ 3783 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR | 3784 EHCI_INTR_FRAME_LIST_ROLLOVER | 3785 EHCI_INTR_USB_ERROR | 3786 EHCI_INTR_USB); 3787 3788 /* 3789 * Deallocate the space that allocated for saving 3790 * HC registers. 3791 */ 3792 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t)); 3793 3794 /* 3795 * Set the desired interrupt threshold, frame list size (if 3796 * applicable) and turn EHCI host controller. 3797 */ 3798 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) & 3799 ~EHCI_CMD_INTR_THRESHOLD) | 3800 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN))); 3801 3802 /* Wait 10ms for EHCI to start sending SOF */ 3803 drv_usecwait(EHCI_RESET_TIMEWAIT); 3804 3805 /* 3806 * Get the current usb frame number before waiting for 3807 * few milliseconds. 3808 */ 3809 before_frame_number = ehci_get_current_frame_number(ehcip); 3810 3811 /* Wait for few milliseconds */ 3812 drv_usecwait(EHCI_SOF_TIMEWAIT); 3813 3814 /* 3815 * Get the current usb frame number after waiting for 3816 * few milliseconds. 3817 */ 3818 after_frame_number = ehci_get_current_frame_number(ehcip); 3819 3820 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3821 "ehci_do_soft_reset: Before Frame Number 0x%llx " 3822 "After Frame Number 0x%llx", 3823 (unsigned long long)before_frame_number, 3824 (unsigned long long)after_frame_number); 3825 3826 if ((after_frame_number <= before_frame_number) && 3827 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) { 3828 3829 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3830 "ehci_do_soft_reset: Soft reset failed"); 3831 3832 return (USB_FAILURE); 3833 } 3834 3835 return (USB_SUCCESS); 3836 } 3837 3838 3839 /* 3840 * ehci_get_xfer_attrs: 3841 * 3842 * Get the attributes of a particular xfer. 3843 * 3844 * NOTE: This function is also called from POLLED MODE. 3845 */ 3846 usb_req_attrs_t 3847 ehci_get_xfer_attrs( 3848 ehci_state_t *ehcip, 3849 ehci_pipe_private_t *pp, 3850 ehci_trans_wrapper_t *tw) 3851 { 3852 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep; 3853 usb_req_attrs_t attrs = USB_ATTRS_NONE; 3854 3855 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3856 "ehci_get_xfer_attrs:"); 3857 3858 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) { 3859 case USB_EP_ATTR_CONTROL: 3860 attrs = ((usb_ctrl_req_t *) 3861 tw->tw_curr_xfer_reqp)->ctrl_attributes; 3862 break; 3863 case USB_EP_ATTR_BULK: 3864 attrs = ((usb_bulk_req_t *) 3865 tw->tw_curr_xfer_reqp)->bulk_attributes; 3866 break; 3867 case USB_EP_ATTR_INTR: 3868 attrs = ((usb_intr_req_t *) 3869 tw->tw_curr_xfer_reqp)->intr_attributes; 3870 break; 3871 } 3872 3873 return (attrs); 3874 } 3875 3876 3877 /* 3878 * ehci_get_current_frame_number: 3879 * 3880 * Get the current software based usb frame number. 3881 */ 3882 usb_frame_number_t 3883 ehci_get_current_frame_number(ehci_state_t *ehcip) 3884 { 3885 usb_frame_number_t usb_frame_number; 3886 usb_frame_number_t ehci_fno, micro_frame_number; 3887 3888 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3889 3890 ehci_fno = ehcip->ehci_fno; 3891 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF; 3892 3893 /* 3894 * Calculate current software based usb frame number. 3895 * 3896 * This code accounts for the fact that frame number is 3897 * updated by the Host Controller before the ehci driver 3898 * gets an FrameListRollover interrupt that will adjust 3899 * Frame higher part. 3900 * 3901 * Refer ehci specification 1.0, section 2.3.2, page 21. 3902 */ 3903 micro_frame_number = ((micro_frame_number & 0x1FFF) | 3904 ehci_fno) + (((micro_frame_number & 0x3FFF) ^ 3905 ehci_fno) & 0x2000); 3906 3907 /* 3908 * Micro Frame number is equivalent to 125 usec. Eight 3909 * Micro Frame numbers are equivalent to one millsecond 3910 * or one usb frame number. 3911 */ 3912 usb_frame_number = micro_frame_number >> 3913 EHCI_uFRAMES_PER_USB_FRAME_SHIFT; 3914 3915 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3916 "ehci_get_current_frame_number: " 3917 "Current usb uframe number = 0x%llx " 3918 "Current usb frame number = 0x%llx", 3919 (unsigned long long)micro_frame_number, 3920 (unsigned long long)usb_frame_number); 3921 3922 return (usb_frame_number); 3923 } 3924 3925 3926 /* 3927 * ehci_cpr_cleanup: 3928 * 3929 * Cleanup ehci state and other ehci specific informations across 3930 * Check Point Resume (CPR). 3931 */ 3932 static void 3933 ehci_cpr_cleanup(ehci_state_t *ehcip) 3934 { 3935 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3936 3937 /* Reset software part of usb frame number */ 3938 ehcip->ehci_fno = 0; 3939 } 3940 3941 3942 /* 3943 * ehci_wait_for_sof: 3944 * 3945 * Wait for couple of SOF interrupts 3946 */ 3947 int 3948 ehci_wait_for_sof(ehci_state_t *ehcip) 3949 { 3950 usb_frame_number_t before_frame_number, after_frame_number; 3951 int error = USB_SUCCESS; 3952 3953 USB_DPRINTF_L4(PRINT_MASK_LISTS, 3954 ehcip->ehci_log_hdl, "ehci_wait_for_sof"); 3955 3956 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3957 3958 error = ehci_state_is_operational(ehcip); 3959 3960 if (error != USB_SUCCESS) { 3961 3962 return (error); 3963 } 3964 3965 /* Get the current usb frame number before waiting for two SOFs */ 3966 before_frame_number = ehci_get_current_frame_number(ehcip); 3967 3968 mutex_exit(&ehcip->ehci_int_mutex); 3969 3970 /* Wait for few milliseconds */ 3971 delay(drv_usectohz(EHCI_SOF_TIMEWAIT)); 3972 3973 mutex_enter(&ehcip->ehci_int_mutex); 3974 3975 /* Get the current usb frame number after woken up */ 3976 after_frame_number = ehci_get_current_frame_number(ehcip); 3977 3978 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3979 "ehci_wait_for_sof: framenumber: before 0x%llx " 3980 "after 0x%llx", 3981 (unsigned long long)before_frame_number, 3982 (unsigned long long)after_frame_number); 3983 3984 /* Return failure, if usb frame number has not been changed */ 3985 if (after_frame_number <= before_frame_number) { 3986 3987 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) { 3988 3989 USB_DPRINTF_L0(PRINT_MASK_LISTS, 3990 ehcip->ehci_log_hdl, "No SOF interrupts"); 3991 3992 /* Set host controller soft state to error */ 3993 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 3994 3995 return (USB_FAILURE); 3996 } 3997 3998 } 3999 4000 return (USB_SUCCESS); 4001 } 4002 4003 4004 /* 4005 * ehci_toggle_scheduler: 4006 * 4007 * Turn scheduler based on pipe open count. 4008 */ 4009 void 4010 ehci_toggle_scheduler(ehci_state_t *ehcip) { 4011 uint_t temp_reg, cmd_reg; 4012 4013 cmd_reg = Get_OpReg(ehci_command); 4014 temp_reg = cmd_reg; 4015 4016 /* 4017 * Enable/Disable asynchronous scheduler, and 4018 * turn on/off async list door bell 4019 */ 4020 if (ehcip->ehci_open_async_count) { 4021 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) { 4022 /* 4023 * For some reason this address might get nulled out by 4024 * the ehci chip. Set it here just in case it is null. 4025 */ 4026 Set_OpReg(ehci_async_list_addr, 4027 ehci_qh_cpu_to_iommu(ehcip, 4028 ehcip->ehci_head_of_async_sched_list)); 4029 4030 /* 4031 * For some reason this register might get nulled out by 4032 * the Uli M1575 Southbridge. To workaround the HW 4033 * problem, check the value after write and retry if the 4034 * last write fails. 4035 * 4036 * If the ASYNCLISTADDR remains "stuck" after 4037 * EHCI_MAX_RETRY retries, then the M1575 is broken 4038 * and is stuck in an inconsistent state and is about 4039 * to crash the machine with a trn_oor panic when it 4040 * does a DMA read from 0x0. It is better to panic 4041 * now rather than wait for the trn_oor crash; this 4042 * way Customer Service will have a clean signature 4043 * that indicts the M1575 chip rather than a 4044 * mysterious and hard-to-diagnose trn_oor panic. 4045 */ 4046 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 4047 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) && 4048 (ehci_qh_cpu_to_iommu(ehcip, 4049 ehcip->ehci_head_of_async_sched_list) != 4050 Get_OpReg(ehci_async_list_addr))) { 4051 int retry = 0; 4052 4053 Set_OpRegRetry(ehci_async_list_addr, 4054 ehci_qh_cpu_to_iommu(ehcip, 4055 ehcip->ehci_head_of_async_sched_list), 4056 retry); 4057 if (retry >= EHCI_MAX_RETRY) 4058 cmn_err(CE_PANIC, 4059 "ehci_toggle_scheduler: " 4060 "ASYNCLISTADDR write failed."); 4061 4062 USB_DPRINTF_L2(PRINT_MASK_ATTA, 4063 ehcip->ehci_log_hdl, 4064 "ehci_toggle_scheduler: ASYNCLISTADDR " 4065 "write failed, retry=%d", retry); 4066 } 4067 } 4068 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE; 4069 } else { 4070 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE; 4071 } 4072 4073 if (ehcip->ehci_open_periodic_count) { 4074 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) { 4075 /* 4076 * For some reason this address get's nulled out by 4077 * the ehci chip. Set it here just in case it is null. 4078 */ 4079 Set_OpReg(ehci_periodic_list_base, 4080 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 4081 0xFFFFF000)); 4082 } 4083 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE; 4084 } else { 4085 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE; 4086 } 4087 4088 /* Just an optimization */ 4089 if (temp_reg != cmd_reg) { 4090 Set_OpReg(ehci_command, cmd_reg); 4091 } 4092 } 4093 4094 /* 4095 * ehci print functions 4096 */ 4097 4098 /* 4099 * ehci_print_caps: 4100 */ 4101 void 4102 ehci_print_caps(ehci_state_t *ehcip) 4103 { 4104 uint_t i; 4105 4106 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4107 "\n\tUSB 2.0 Host Controller Characteristics\n"); 4108 4109 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4110 "Caps Length: 0x%x Version: 0x%x\n", 4111 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version)); 4112 4113 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4114 "Structural Parameters\n"); 4115 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4116 "Port indicators: %s", (Get_Cap(ehci_hcs_params) & 4117 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No"); 4118 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4119 "No of Classic host controllers: 0x%x", 4120 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS) 4121 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT); 4122 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4123 "No of ports per Classic host controller: 0x%x", 4124 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC) 4125 >> EHCI_HCS_NUM_PORTS_CC_SHIFT); 4126 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4127 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) & 4128 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No"); 4129 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4130 "Port power control: %s", (Get_Cap(ehci_hcs_params) & 4131 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No"); 4132 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4133 "No of root hub ports: 0x%x\n", 4134 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); 4135 4136 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4137 "Capability Parameters\n"); 4138 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4139 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) & 4140 EHCI_HCC_EECP) ? "Yes" : "No"); 4141 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4142 "Isoch schedule threshold: 0x%x", 4143 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD); 4144 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4145 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) & 4146 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No"); 4147 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4148 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) & 4149 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024"); 4150 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4151 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) & 4152 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No"); 4153 4154 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4155 "Classic Port Route Description"); 4156 4157 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) { 4158 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4159 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i])); 4160 } 4161 } 4162 4163 4164 /* 4165 * ehci_print_regs: 4166 */ 4167 void 4168 ehci_print_regs(ehci_state_t *ehcip) 4169 { 4170 uint_t i; 4171 4172 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4173 "\n\tEHCI%d Operational Registers\n", 4174 ddi_get_instance(ehcip->ehci_dip)); 4175 4176 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4177 "Command: 0x%x Status: 0x%x", 4178 Get_OpReg(ehci_command), Get_OpReg(ehci_status)); 4179 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4180 "Interrupt: 0x%x Frame Index: 0x%x", 4181 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index)); 4182 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4183 "Control Segment: 0x%x Periodic List Base: 0x%x", 4184 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base)); 4185 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4186 "Async List Addr: 0x%x Config Flag: 0x%x", 4187 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag)); 4188 4189 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4190 "Root Hub Port Status"); 4191 4192 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) { 4193 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4194 "\tPort Status 0x%x: 0x%x ", i, 4195 Get_OpReg(ehci_rh_port_status[i])); 4196 } 4197 } 4198 4199 4200 /* 4201 * ehci_print_qh: 4202 */ 4203 void 4204 ehci_print_qh( 4205 ehci_state_t *ehcip, 4206 ehci_qh_t *qh) 4207 { 4208 uint_t i; 4209 4210 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4211 "ehci_print_qh: qh = 0x%p", (void *)qh); 4212 4213 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4214 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr)); 4215 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4216 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl)); 4217 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4218 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl)); 4219 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4220 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd)); 4221 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4222 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd)); 4223 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4224 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd)); 4225 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4226 "\tqh_status: 0x%x ", Get_QH(qh->qh_status)); 4227 4228 for (i = 0; i < 5; i++) { 4229 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4230 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i])); 4231 } 4232 4233 for (i = 0; i < 5; i++) { 4234 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4235 "\tqh_buf_high[%d]: 0x%x ", 4236 i, Get_QH(qh->qh_buf_high[i])); 4237 } 4238 4239 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4240 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd)); 4241 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4242 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev)); 4243 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4244 "\tqh_state: 0x%x ", Get_QH(qh->qh_state)); 4245 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4246 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next)); 4247 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4248 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame)); 4249 } 4250 4251 4252 /* 4253 * ehci_print_qtd: 4254 */ 4255 void 4256 ehci_print_qtd( 4257 ehci_state_t *ehcip, 4258 ehci_qtd_t *qtd) 4259 { 4260 uint_t i; 4261 4262 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4263 "ehci_print_qtd: qtd = 0x%p", (void *)qtd); 4264 4265 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4266 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd)); 4267 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4268 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd)); 4269 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4270 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl)); 4271 4272 for (i = 0; i < 5; i++) { 4273 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4274 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i])); 4275 } 4276 4277 for (i = 0; i < 5; i++) { 4278 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4279 "\tqtd_buf_high[%d]: 0x%x ", 4280 i, Get_QTD(qtd->qtd_buf_high[i])); 4281 } 4282 4283 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4284 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper)); 4285 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4286 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd)); 4287 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4288 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next)); 4289 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4290 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev)); 4291 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4292 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state)); 4293 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4294 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase)); 4295 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4296 "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs)); 4297 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4298 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len)); 4299 } 4300 4301 /* 4302 * ehci kstat functions 4303 */ 4304 4305 /* 4306 * ehci_create_stats: 4307 * 4308 * Allocate and initialize the ehci kstat structures 4309 */ 4310 void 4311 ehci_create_stats(ehci_state_t *ehcip) 4312 { 4313 char kstatname[KSTAT_STRLEN]; 4314 const char *dname = ddi_driver_name(ehcip->ehci_dip); 4315 char *usbtypes[USB_N_COUNT_KSTATS] = 4316 {"ctrl", "isoch", "bulk", "intr"}; 4317 uint_t instance = ehcip->ehci_instance; 4318 ehci_intrs_stats_t *isp; 4319 int i; 4320 4321 if (EHCI_INTRS_STATS(ehcip) == NULL) { 4322 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 4323 dname, instance); 4324 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance, 4325 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 4326 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t), 4327 KSTAT_FLAG_PERSISTENT); 4328 4329 if (EHCI_INTRS_STATS(ehcip)) { 4330 isp = EHCI_INTRS_STATS_DATA(ehcip); 4331 kstat_named_init(&isp->ehci_sts_total, 4332 "Interrupts Total", KSTAT_DATA_UINT64); 4333 kstat_named_init(&isp->ehci_sts_not_claimed, 4334 "Not Claimed", KSTAT_DATA_UINT64); 4335 kstat_named_init(&isp->ehci_sts_async_sched_status, 4336 "Async schedule status", KSTAT_DATA_UINT64); 4337 kstat_named_init(&isp->ehci_sts_periodic_sched_status, 4338 "Periodic sched status", KSTAT_DATA_UINT64); 4339 kstat_named_init(&isp->ehci_sts_empty_async_schedule, 4340 "Empty async schedule", KSTAT_DATA_UINT64); 4341 kstat_named_init(&isp->ehci_sts_host_ctrl_halted, 4342 "Host controller Halted", KSTAT_DATA_UINT64); 4343 kstat_named_init(&isp->ehci_sts_async_advance_intr, 4344 "Intr on async advance", KSTAT_DATA_UINT64); 4345 kstat_named_init(&isp->ehci_sts_host_system_error_intr, 4346 "Host system error", KSTAT_DATA_UINT64); 4347 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr, 4348 "Frame list rollover", KSTAT_DATA_UINT64); 4349 kstat_named_init(&isp->ehci_sts_rh_port_change_intr, 4350 "Port change detect", KSTAT_DATA_UINT64); 4351 kstat_named_init(&isp->ehci_sts_usb_error_intr, 4352 "USB error interrupt", KSTAT_DATA_UINT64); 4353 kstat_named_init(&isp->ehci_sts_usb_intr, 4354 "USB interrupt", KSTAT_DATA_UINT64); 4355 4356 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip; 4357 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev; 4358 kstat_install(EHCI_INTRS_STATS(ehcip)); 4359 } 4360 } 4361 4362 if (EHCI_TOTAL_STATS(ehcip) == NULL) { 4363 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 4364 dname, instance); 4365 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance, 4366 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 4367 KSTAT_FLAG_PERSISTENT); 4368 4369 if (EHCI_TOTAL_STATS(ehcip)) { 4370 kstat_install(EHCI_TOTAL_STATS(ehcip)); 4371 } 4372 } 4373 4374 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 4375 if (ehcip->ehci_count_stats[i] == NULL) { 4376 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 4377 dname, instance, usbtypes[i]); 4378 ehcip->ehci_count_stats[i] = kstat_create("usba", 4379 instance, kstatname, "usb_byte_count", 4380 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 4381 4382 if (ehcip->ehci_count_stats[i]) { 4383 kstat_install(ehcip->ehci_count_stats[i]); 4384 } 4385 } 4386 } 4387 } 4388 4389 4390 /* 4391 * ehci_destroy_stats: 4392 * 4393 * Clean up ehci kstat structures 4394 */ 4395 void 4396 ehci_destroy_stats(ehci_state_t *ehcip) 4397 { 4398 int i; 4399 4400 if (EHCI_INTRS_STATS(ehcip)) { 4401 kstat_delete(EHCI_INTRS_STATS(ehcip)); 4402 EHCI_INTRS_STATS(ehcip) = NULL; 4403 } 4404 4405 if (EHCI_TOTAL_STATS(ehcip)) { 4406 kstat_delete(EHCI_TOTAL_STATS(ehcip)); 4407 EHCI_TOTAL_STATS(ehcip) = NULL; 4408 } 4409 4410 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 4411 if (ehcip->ehci_count_stats[i]) { 4412 kstat_delete(ehcip->ehci_count_stats[i]); 4413 ehcip->ehci_count_stats[i] = NULL; 4414 } 4415 } 4416 } 4417 4418 4419 /* 4420 * ehci_do_intrs_stats: 4421 * 4422 * ehci status information 4423 */ 4424 void 4425 ehci_do_intrs_stats( 4426 ehci_state_t *ehcip, 4427 int val) 4428 { 4429 if (EHCI_INTRS_STATS(ehcip)) { 4430 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++; 4431 switch (val) { 4432 case EHCI_STS_ASYNC_SCHED_STATUS: 4433 EHCI_INTRS_STATS_DATA(ehcip)-> 4434 ehci_sts_async_sched_status.value.ui64++; 4435 break; 4436 case EHCI_STS_PERIODIC_SCHED_STATUS: 4437 EHCI_INTRS_STATS_DATA(ehcip)-> 4438 ehci_sts_periodic_sched_status.value.ui64++; 4439 break; 4440 case EHCI_STS_EMPTY_ASYNC_SCHEDULE: 4441 EHCI_INTRS_STATS_DATA(ehcip)-> 4442 ehci_sts_empty_async_schedule.value.ui64++; 4443 break; 4444 case EHCI_STS_HOST_CTRL_HALTED: 4445 EHCI_INTRS_STATS_DATA(ehcip)-> 4446 ehci_sts_host_ctrl_halted.value.ui64++; 4447 break; 4448 case EHCI_STS_ASYNC_ADVANCE_INTR: 4449 EHCI_INTRS_STATS_DATA(ehcip)-> 4450 ehci_sts_async_advance_intr.value.ui64++; 4451 break; 4452 case EHCI_STS_HOST_SYSTEM_ERROR_INTR: 4453 EHCI_INTRS_STATS_DATA(ehcip)-> 4454 ehci_sts_host_system_error_intr.value.ui64++; 4455 break; 4456 case EHCI_STS_FRM_LIST_ROLLOVER_INTR: 4457 EHCI_INTRS_STATS_DATA(ehcip)-> 4458 ehci_sts_frm_list_rollover_intr.value.ui64++; 4459 break; 4460 case EHCI_STS_RH_PORT_CHANGE_INTR: 4461 EHCI_INTRS_STATS_DATA(ehcip)-> 4462 ehci_sts_rh_port_change_intr.value.ui64++; 4463 break; 4464 case EHCI_STS_USB_ERROR_INTR: 4465 EHCI_INTRS_STATS_DATA(ehcip)-> 4466 ehci_sts_usb_error_intr.value.ui64++; 4467 break; 4468 case EHCI_STS_USB_INTR: 4469 EHCI_INTRS_STATS_DATA(ehcip)-> 4470 ehci_sts_usb_intr.value.ui64++; 4471 break; 4472 default: 4473 EHCI_INTRS_STATS_DATA(ehcip)-> 4474 ehci_sts_not_claimed.value.ui64++; 4475 break; 4476 } 4477 } 4478 } 4479 4480 4481 /* 4482 * ehci_do_byte_stats: 4483 * 4484 * ehci data xfer information 4485 */ 4486 void 4487 ehci_do_byte_stats( 4488 ehci_state_t *ehcip, 4489 size_t len, 4490 uint8_t attr, 4491 uint8_t addr) 4492 { 4493 uint8_t type = attr & USB_EP_ATTR_MASK; 4494 uint8_t dir = addr & USB_EP_DIR_MASK; 4495 4496 if (dir == USB_EP_DIR_IN) { 4497 EHCI_TOTAL_STATS_DATA(ehcip)->reads++; 4498 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len; 4499 switch (type) { 4500 case USB_EP_ATTR_CONTROL: 4501 EHCI_CTRL_STATS(ehcip)->reads++; 4502 EHCI_CTRL_STATS(ehcip)->nread += len; 4503 break; 4504 case USB_EP_ATTR_BULK: 4505 EHCI_BULK_STATS(ehcip)->reads++; 4506 EHCI_BULK_STATS(ehcip)->nread += len; 4507 break; 4508 case USB_EP_ATTR_INTR: 4509 EHCI_INTR_STATS(ehcip)->reads++; 4510 EHCI_INTR_STATS(ehcip)->nread += len; 4511 break; 4512 case USB_EP_ATTR_ISOCH: 4513 EHCI_ISOC_STATS(ehcip)->reads++; 4514 EHCI_ISOC_STATS(ehcip)->nread += len; 4515 break; 4516 } 4517 } else if (dir == USB_EP_DIR_OUT) { 4518 EHCI_TOTAL_STATS_DATA(ehcip)->writes++; 4519 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len; 4520 switch (type) { 4521 case USB_EP_ATTR_CONTROL: 4522 EHCI_CTRL_STATS(ehcip)->writes++; 4523 EHCI_CTRL_STATS(ehcip)->nwritten += len; 4524 break; 4525 case USB_EP_ATTR_BULK: 4526 EHCI_BULK_STATS(ehcip)->writes++; 4527 EHCI_BULK_STATS(ehcip)->nwritten += len; 4528 break; 4529 case USB_EP_ATTR_INTR: 4530 EHCI_INTR_STATS(ehcip)->writes++; 4531 EHCI_INTR_STATS(ehcip)->nwritten += len; 4532 break; 4533 case USB_EP_ATTR_ISOCH: 4534 EHCI_ISOC_STATS(ehcip)->writes++; 4535 EHCI_ISOC_STATS(ehcip)->nwritten += len; 4536 break; 4537 } 4538 } 4539 } 4540