1 /* 2 * mr_sas.c: source for mr_sas driver 3 * 4 * MegaRAID device driver for SAS2.0 controllers 5 * Copyright (c) 2008-2010, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Arun Chandrashekhar 11 * Manju R 12 * Rajesh Prabhakaran 13 * Seokmann Ju 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions are met: 17 * 18 * 1. Redistributions of source code must retain the above copyright notice, 19 * this list of conditions and the following disclaimer. 20 * 21 * 2. Redistributions in binary form must reproduce the above copyright notice, 22 * this list of conditions and the following disclaimer in the documentation 23 * and/or other materials provided with the distribution. 24 * 25 * 3. Neither the name of the author nor the names of its contributors may be 26 * used to endorse or promote products derived from this software without 27 * specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 32 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 33 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 36 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 37 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 38 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 39 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 40 * DAMAGE. 41 */ 42 43 /* 44 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 45 */ 46 47 #include <sys/types.h> 48 #include <sys/param.h> 49 #include <sys/file.h> 50 #include <sys/errno.h> 51 #include <sys/open.h> 52 #include <sys/cred.h> 53 #include <sys/modctl.h> 54 #include <sys/conf.h> 55 #include <sys/devops.h> 56 #include <sys/cmn_err.h> 57 #include <sys/kmem.h> 58 #include <sys/stat.h> 59 #include <sys/mkdev.h> 60 #include <sys/pci.h> 61 #include <sys/scsi/scsi.h> 62 #include <sys/ddi.h> 63 #include <sys/sunddi.h> 64 #include <sys/atomic.h> 65 #include <sys/signal.h> 66 #include <sys/byteorder.h> 67 #include <sys/sdt.h> 68 #include <sys/fs/dv_node.h> /* devfs_clean */ 69 70 #include "mr_sas.h" 71 72 /* 73 * FMA header files 74 */ 75 #include <sys/ddifm.h> 76 #include <sys/fm/protocol.h> 77 #include <sys/fm/util.h> 78 #include <sys/fm/io/ddi.h> 79 80 /* 81 * Local static data 82 */ 83 static void *mrsas_state = NULL; 84 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE; 85 static volatile int debug_level_g = CL_NONE; 86 static volatile int msi_enable = 1; 87 static volatile int ctio_enable = 1; 88 89 /* Default Timeout value to issue online controller reset */ 90 static volatile int debug_timeout_g = 0x12C; 91 /* Simulate consecutive firmware fault */ 92 static volatile int debug_fw_faults_after_ocr_g = 0; 93 94 #ifdef OCRDEBUG 95 /* Simulate three consecutive timeout for an IO */ 96 static volatile int debug_consecutive_timeout_after_ocr_g = 0; 97 #endif 98 99 #pragma weak scsi_hba_open 100 #pragma weak scsi_hba_close 101 #pragma weak scsi_hba_ioctl 102 103 static ddi_dma_attr_t mrsas_generic_dma_attr = { 104 DMA_ATTR_V0, /* dma_attr_version */ 105 0, /* low DMA address range */ 106 0xFFFFFFFFU, /* high DMA address range */ 107 0xFFFFFFFFU, /* DMA counter register */ 108 8, /* DMA address alignment */ 109 0x07, /* DMA burstsizes */ 110 1, /* min DMA size */ 111 0xFFFFFFFFU, /* max DMA size */ 112 0xFFFFFFFFU, /* segment boundary */ 113 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */ 114 512, /* granularity of device */ 115 0 /* bus specific DMA flags */ 116 }; 117 118 int32_t mrsas_max_cap_maxxfer = 0x1000000; 119 120 /* 121 * cb_ops contains base level routines 122 */ 123 static struct cb_ops mrsas_cb_ops = { 124 mrsas_open, /* open */ 125 mrsas_close, /* close */ 126 nodev, /* strategy */ 127 nodev, /* print */ 128 nodev, /* dump */ 129 nodev, /* read */ 130 nodev, /* write */ 131 mrsas_ioctl, /* ioctl */ 132 nodev, /* devmap */ 133 nodev, /* mmap */ 134 nodev, /* segmap */ 135 nochpoll, /* poll */ 136 nodev, /* cb_prop_op */ 137 0, /* streamtab */ 138 D_NEW | D_HOTPLUG, /* cb_flag */ 139 CB_REV, /* cb_rev */ 140 nodev, /* cb_aread */ 141 nodev /* cb_awrite */ 142 }; 143 144 /* 145 * dev_ops contains configuration routines 146 */ 147 static struct dev_ops mrsas_ops = { 148 DEVO_REV, /* rev, */ 149 0, /* refcnt */ 150 mrsas_getinfo, /* getinfo */ 151 nulldev, /* identify */ 152 nulldev, /* probe */ 153 mrsas_attach, /* attach */ 154 mrsas_detach, /* detach */ 155 #ifdef __sparc 156 mrsas_reset, /* reset */ 157 #else /* __sparc */ 158 nodev, 159 #endif /* __sparc */ 160 &mrsas_cb_ops, /* char/block ops */ 161 NULL, /* bus ops */ 162 NULL, /* power */ 163 #ifdef __sparc 164 ddi_quiesce_not_needed 165 #else /* __sparc */ 166 mrsas_quiesce /* quiesce */ 167 #endif /* __sparc */ 168 }; 169 170 char _depends_on[] = "misc/scsi"; 171 172 static struct modldrv modldrv = { 173 &mod_driverops, /* module type - driver */ 174 MRSAS_VERSION, 175 &mrsas_ops, /* driver ops */ 176 }; 177 178 static struct modlinkage modlinkage = { 179 MODREV_1, /* ml_rev - must be MODREV_1 */ 180 &modldrv, /* ml_linkage */ 181 NULL /* end of driver linkage */ 182 }; 183 184 static struct ddi_device_acc_attr endian_attr = { 185 DDI_DEVICE_ATTR_V1, 186 DDI_STRUCTURE_LE_ACC, 187 DDI_STRICTORDER_ACC, 188 DDI_DEFAULT_ACC 189 }; 190 191 192 /* 193 * ************************************************************************** * 194 * * 195 * common entry points - for loadable kernel modules * 196 * * 197 * ************************************************************************** * 198 */ 199 200 int 201 _init(void) 202 { 203 int ret; 204 205 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 206 207 ret = ddi_soft_state_init(&mrsas_state, 208 sizeof (struct mrsas_instance), 0); 209 210 if (ret != DDI_SUCCESS) { 211 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init state")); 212 return (ret); 213 } 214 215 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) { 216 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init scsi hba")); 217 ddi_soft_state_fini(&mrsas_state); 218 return (ret); 219 } 220 221 ret = mod_install(&modlinkage); 222 223 if (ret != DDI_SUCCESS) { 224 con_log(CL_ANN, (CE_WARN, "mr_sas: mod_install failed")); 225 scsi_hba_fini(&modlinkage); 226 ddi_soft_state_fini(&mrsas_state); 227 } 228 229 return (ret); 230 } 231 232 int 233 _info(struct modinfo *modinfop) 234 { 235 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 236 237 return (mod_info(&modlinkage, modinfop)); 238 } 239 240 int 241 _fini(void) 242 { 243 int ret; 244 245 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 246 247 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) 248 return (ret); 249 250 scsi_hba_fini(&modlinkage); 251 252 ddi_soft_state_fini(&mrsas_state); 253 254 return (ret); 255 } 256 257 258 /* 259 * ************************************************************************** * 260 * * 261 * common entry points - for autoconfiguration * 262 * * 263 * ************************************************************************** * 264 */ 265 266 static int 267 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 268 { 269 int instance_no; 270 int nregs; 271 uint8_t added_isr_f = 0; 272 uint8_t added_soft_isr_f = 0; 273 uint8_t create_devctl_node_f = 0; 274 uint8_t create_scsi_node_f = 0; 275 uint8_t create_ioc_node_f = 0; 276 uint8_t tran_alloc_f = 0; 277 uint8_t irq; 278 uint16_t vendor_id; 279 uint16_t device_id; 280 uint16_t subsysvid; 281 uint16_t subsysid; 282 uint16_t command; 283 off_t reglength = 0; 284 int intr_types = 0; 285 char *data; 286 287 scsi_hba_tran_t *tran; 288 ddi_dma_attr_t tran_dma_attr; 289 struct mrsas_instance *instance; 290 291 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 292 293 /* CONSTCOND */ 294 ASSERT(NO_COMPETING_THREADS); 295 296 instance_no = ddi_get_instance(dip); 297 298 /* 299 * check to see whether this device is in a DMA-capable slot. 300 */ 301 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 302 con_log(CL_ANN, (CE_WARN, 303 "mr_sas%d: Device in slave-only slot, unused", 304 instance_no)); 305 return (DDI_FAILURE); 306 } 307 308 switch (cmd) { 309 case DDI_ATTACH: 310 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: DDI_ATTACH")); 311 /* allocate the soft state for the instance */ 312 if (ddi_soft_state_zalloc(mrsas_state, instance_no) 313 != DDI_SUCCESS) { 314 con_log(CL_ANN, (CE_WARN, 315 "mr_sas%d: Failed to allocate soft state", 316 instance_no)); 317 318 return (DDI_FAILURE); 319 } 320 321 instance = (struct mrsas_instance *)ddi_get_soft_state 322 (mrsas_state, instance_no); 323 324 if (instance == NULL) { 325 con_log(CL_ANN, (CE_WARN, 326 "mr_sas%d: Bad soft state", instance_no)); 327 328 ddi_soft_state_free(mrsas_state, instance_no); 329 330 return (DDI_FAILURE); 331 } 332 333 bzero((caddr_t)instance, 334 sizeof (struct mrsas_instance)); 335 336 instance->func_ptr = kmem_zalloc( 337 sizeof (struct mrsas_func_ptr), KM_SLEEP); 338 ASSERT(instance->func_ptr); 339 340 /* Setup the PCI configuration space handles */ 341 if (pci_config_setup(dip, &instance->pci_handle) != 342 DDI_SUCCESS) { 343 con_log(CL_ANN, (CE_WARN, 344 "mr_sas%d: pci config setup failed ", 345 instance_no)); 346 347 kmem_free(instance->func_ptr, 348 sizeof (struct mrsas_func_ptr)); 349 ddi_soft_state_free(mrsas_state, instance_no); 350 351 return (DDI_FAILURE); 352 } 353 354 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 355 con_log(CL_ANN, (CE_WARN, 356 "mr_sas: failed to get registers.")); 357 358 pci_config_teardown(&instance->pci_handle); 359 kmem_free(instance->func_ptr, 360 sizeof (struct mrsas_func_ptr)); 361 ddi_soft_state_free(mrsas_state, instance_no); 362 363 return (DDI_FAILURE); 364 } 365 366 vendor_id = pci_config_get16(instance->pci_handle, 367 PCI_CONF_VENID); 368 device_id = pci_config_get16(instance->pci_handle, 369 PCI_CONF_DEVID); 370 371 subsysvid = pci_config_get16(instance->pci_handle, 372 PCI_CONF_SUBVENID); 373 subsysid = pci_config_get16(instance->pci_handle, 374 PCI_CONF_SUBSYSID); 375 376 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 377 (pci_config_get16(instance->pci_handle, 378 PCI_CONF_COMM) | PCI_COMM_ME)); 379 irq = pci_config_get8(instance->pci_handle, 380 PCI_CONF_ILINE); 381 382 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 383 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s", 384 instance_no, vendor_id, device_id, subsysvid, 385 subsysid, irq, MRSAS_VERSION)); 386 387 /* enable bus-mastering */ 388 command = pci_config_get16(instance->pci_handle, 389 PCI_CONF_COMM); 390 391 if (!(command & PCI_COMM_ME)) { 392 command |= PCI_COMM_ME; 393 394 pci_config_put16(instance->pci_handle, 395 PCI_CONF_COMM, command); 396 397 con_log(CL_ANN, (CE_CONT, "mr_sas%d: " 398 "enable bus-mastering", instance_no)); 399 } else { 400 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 401 "bus-mastering already set", instance_no)); 402 } 403 404 /* initialize function pointers */ 405 if ((device_id == PCI_DEVICE_ID_LSI_2108VDE) || 406 (device_id == PCI_DEVICE_ID_LSI_2108V)) { 407 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 408 "2108V/DE detected", instance_no)); 409 instance->func_ptr->read_fw_status_reg = 410 read_fw_status_reg_ppc; 411 instance->func_ptr->issue_cmd = issue_cmd_ppc; 412 instance->func_ptr->issue_cmd_in_sync_mode = 413 issue_cmd_in_sync_mode_ppc; 414 instance->func_ptr->issue_cmd_in_poll_mode = 415 issue_cmd_in_poll_mode_ppc; 416 instance->func_ptr->enable_intr = 417 enable_intr_ppc; 418 instance->func_ptr->disable_intr = 419 disable_intr_ppc; 420 instance->func_ptr->intr_ack = intr_ack_ppc; 421 } else { 422 con_log(CL_ANN, (CE_WARN, 423 "mr_sas: Invalid device detected")); 424 425 pci_config_teardown(&instance->pci_handle); 426 kmem_free(instance->func_ptr, 427 sizeof (struct mrsas_func_ptr)); 428 ddi_soft_state_free(mrsas_state, instance_no); 429 430 return (DDI_FAILURE); 431 } 432 433 instance->baseaddress = pci_config_get32( 434 instance->pci_handle, PCI_CONF_BASE0); 435 instance->baseaddress &= 0x0fffc; 436 437 instance->dip = dip; 438 instance->vendor_id = vendor_id; 439 instance->device_id = device_id; 440 instance->subsysvid = subsysvid; 441 instance->subsysid = subsysid; 442 instance->instance = instance_no; 443 444 /* Initialize FMA */ 445 instance->fm_capabilities = ddi_prop_get_int( 446 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS, 447 "fm-capable", DDI_FM_EREPORT_CAPABLE | 448 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE 449 | DDI_FM_ERRCB_CAPABLE); 450 451 mrsas_fm_init(instance); 452 453 /* Initialize Interrupts */ 454 if ((ddi_dev_regsize(instance->dip, 455 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) || 456 reglength < MINIMUM_MFI_MEM_SZ) { 457 return (DDI_FAILURE); 458 } 459 if (reglength > DEFAULT_MFI_MEM_SZ) { 460 reglength = DEFAULT_MFI_MEM_SZ; 461 con_log(CL_DLEVEL1, (CE_NOTE, 462 "mr_sas: register length to map is " 463 "0x%lx bytes", reglength)); 464 } 465 if (ddi_regs_map_setup(instance->dip, 466 REGISTER_SET_IO_2108, &instance->regmap, 0, 467 reglength, &endian_attr, &instance->regmap_handle) 468 != DDI_SUCCESS) { 469 con_log(CL_ANN, (CE_NOTE, 470 "mr_sas: couldn't map control registers")); 471 goto fail_attach; 472 } 473 474 /* 475 * Disable Interrupt Now. 476 * Setup Software interrupt 477 */ 478 instance->func_ptr->disable_intr(instance); 479 480 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 481 "mrsas-enable-msi", &data) == DDI_SUCCESS) { 482 if (strncmp(data, "no", 3) == 0) { 483 msi_enable = 0; 484 con_log(CL_ANN1, (CE_WARN, 485 "msi_enable = %d disabled", 486 msi_enable)); 487 } 488 ddi_prop_free(data); 489 } 490 491 con_log(CL_DLEVEL1, (CE_WARN, "msi_enable = %d", 492 msi_enable)); 493 494 /* Check for all supported interrupt types */ 495 if (ddi_intr_get_supported_types( 496 dip, &intr_types) != DDI_SUCCESS) { 497 con_log(CL_ANN, (CE_WARN, 498 "ddi_intr_get_supported_types() failed")); 499 goto fail_attach; 500 } 501 502 con_log(CL_DLEVEL1, (CE_NOTE, 503 "ddi_intr_get_supported_types() ret: 0x%x", 504 intr_types)); 505 506 /* Initialize and Setup Interrupt handler */ 507 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) { 508 if (mrsas_add_intrs(instance, 509 DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) { 510 con_log(CL_ANN, (CE_WARN, 511 "MSIX interrupt query failed")); 512 goto fail_attach; 513 } 514 instance->intr_type = DDI_INTR_TYPE_MSIX; 515 } else if (msi_enable && (intr_types & 516 DDI_INTR_TYPE_MSI)) { 517 if (mrsas_add_intrs(instance, 518 DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 519 con_log(CL_ANN, (CE_WARN, 520 "MSI interrupt query failed")); 521 goto fail_attach; 522 } 523 instance->intr_type = DDI_INTR_TYPE_MSI; 524 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 525 msi_enable = 0; 526 if (mrsas_add_intrs(instance, 527 DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 528 con_log(CL_ANN, (CE_WARN, 529 "FIXED interrupt query failed")); 530 goto fail_attach; 531 } 532 instance->intr_type = DDI_INTR_TYPE_FIXED; 533 } else { 534 con_log(CL_ANN, (CE_WARN, "Device cannot " 535 "suppport either FIXED or MSI/X " 536 "interrupts")); 537 goto fail_attach; 538 } 539 540 added_isr_f = 1; 541 542 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 543 "mrsas-enable-ctio", &data) == DDI_SUCCESS) { 544 if (strncmp(data, "no", 3) == 0) { 545 ctio_enable = 0; 546 con_log(CL_ANN1, (CE_WARN, 547 "ctio_enable = %d disabled", 548 ctio_enable)); 549 } 550 ddi_prop_free(data); 551 } 552 553 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", 554 ctio_enable)); 555 556 /* setup the mfi based low level driver */ 557 if (init_mfi(instance) != DDI_SUCCESS) { 558 con_log(CL_ANN, (CE_WARN, "mr_sas: " 559 "could not initialize the low level driver")); 560 561 goto fail_attach; 562 } 563 564 /* Initialize all Mutex */ 565 INIT_LIST_HEAD(&instance->completed_pool_list); 566 mutex_init(&instance->completed_pool_mtx, 567 "completed_pool_mtx", MUTEX_DRIVER, 568 DDI_INTR_PRI(instance->intr_pri)); 569 570 mutex_init(&instance->app_cmd_pool_mtx, 571 "app_cmd_pool_mtx", MUTEX_DRIVER, 572 DDI_INTR_PRI(instance->intr_pri)); 573 574 mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx", 575 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 576 577 mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx", 578 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 579 580 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", 581 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 582 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 583 584 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx", 585 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 586 587 instance->timeout_id = (timeout_id_t)-1; 588 589 /* Register our soft-isr for highlevel interrupts. */ 590 instance->isr_level = instance->intr_pri; 591 if (instance->isr_level == HIGH_LEVEL_INTR) { 592 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, 593 &instance->soft_intr_id, NULL, NULL, 594 mrsas_softintr, (caddr_t)instance) != 595 DDI_SUCCESS) { 596 con_log(CL_ANN, (CE_WARN, 597 " Software ISR did not register")); 598 599 goto fail_attach; 600 } 601 602 added_soft_isr_f = 1; 603 } 604 605 /* Allocate a transport structure */ 606 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 607 608 if (tran == NULL) { 609 con_log(CL_ANN, (CE_WARN, 610 "scsi_hba_tran_alloc failed")); 611 goto fail_attach; 612 } 613 614 tran_alloc_f = 1; 615 616 instance->tran = tran; 617 618 tran->tran_hba_private = instance; 619 tran->tran_tgt_init = mrsas_tran_tgt_init; 620 tran->tran_tgt_probe = scsi_hba_probe; 621 tran->tran_tgt_free = mrsas_tran_tgt_free; 622 tran->tran_init_pkt = mrsas_tran_init_pkt; 623 tran->tran_start = mrsas_tran_start; 624 tran->tran_abort = mrsas_tran_abort; 625 tran->tran_reset = mrsas_tran_reset; 626 tran->tran_getcap = mrsas_tran_getcap; 627 tran->tran_setcap = mrsas_tran_setcap; 628 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt; 629 tran->tran_dmafree = mrsas_tran_dmafree; 630 tran->tran_sync_pkt = mrsas_tran_sync_pkt; 631 tran->tran_bus_config = mrsas_tran_bus_config; 632 633 if (mrsas_relaxed_ordering) 634 mrsas_generic_dma_attr.dma_attr_flags |= 635 DDI_DMA_RELAXED_ORDERING; 636 637 638 tran_dma_attr = mrsas_generic_dma_attr; 639 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 640 641 /* Attach this instance of the hba */ 642 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 643 != DDI_SUCCESS) { 644 con_log(CL_ANN, (CE_WARN, 645 "scsi_hba_attach failed")); 646 647 goto fail_attach; 648 } 649 650 /* create devctl node for cfgadm command */ 651 if (ddi_create_minor_node(dip, "devctl", 652 S_IFCHR, INST2DEVCTL(instance_no), 653 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 654 con_log(CL_ANN, (CE_WARN, 655 "mr_sas: failed to create devctl node.")); 656 657 goto fail_attach; 658 } 659 660 create_devctl_node_f = 1; 661 662 /* create scsi node for cfgadm command */ 663 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 664 INST2SCSI(instance_no), 665 DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 666 DDI_FAILURE) { 667 con_log(CL_ANN, (CE_WARN, 668 "mr_sas: failed to create scsi node.")); 669 670 goto fail_attach; 671 } 672 673 create_scsi_node_f = 1; 674 675 (void) sprintf(instance->iocnode, "%d:lsirdctl", 676 instance_no); 677 678 /* 679 * Create a node for applications 680 * for issuing ioctl to the driver. 681 */ 682 if (ddi_create_minor_node(dip, instance->iocnode, 683 S_IFCHR, INST2LSIRDCTL(instance_no), 684 DDI_PSEUDO, 0) == DDI_FAILURE) { 685 con_log(CL_ANN, (CE_WARN, 686 "mr_sas: failed to create ioctl node.")); 687 688 goto fail_attach; 689 } 690 691 create_ioc_node_f = 1; 692 693 /* Create a taskq to handle dr events */ 694 if ((instance->taskq = ddi_taskq_create(dip, 695 "mrsas_dr_taskq", 1, 696 TASKQ_DEFAULTPRI, 0)) == NULL) { 697 con_log(CL_ANN, (CE_WARN, 698 "mr_sas: failed to create taskq ")); 699 instance->taskq = NULL; 700 goto fail_attach; 701 } 702 703 /* enable interrupt */ 704 instance->func_ptr->enable_intr(instance); 705 706 /* initiate AEN */ 707 if (start_mfi_aen(instance)) { 708 con_log(CL_ANN, (CE_WARN, 709 "mr_sas: failed to initiate AEN.")); 710 goto fail_initiate_aen; 711 } 712 713 con_log(CL_DLEVEL1, (CE_NOTE, 714 "AEN started for instance %d.", instance_no)); 715 716 /* Finally! We are on the air. */ 717 ddi_report_dev(dip); 718 719 if (mrsas_check_acc_handle(instance->regmap_handle) != 720 DDI_SUCCESS) { 721 goto fail_attach; 722 } 723 if (mrsas_check_acc_handle(instance->pci_handle) != 724 DDI_SUCCESS) { 725 goto fail_attach; 726 } 727 instance->mr_ld_list = 728 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld), 729 KM_SLEEP); 730 break; 731 case DDI_PM_RESUME: 732 con_log(CL_ANN, (CE_NOTE, 733 "mr_sas: DDI_PM_RESUME")); 734 break; 735 case DDI_RESUME: 736 con_log(CL_ANN, (CE_NOTE, 737 "mr_sas: DDI_RESUME")); 738 break; 739 default: 740 con_log(CL_ANN, (CE_WARN, 741 "mr_sas: invalid attach cmd=%x", cmd)); 742 return (DDI_FAILURE); 743 } 744 745 return (DDI_SUCCESS); 746 747 fail_initiate_aen: 748 fail_attach: 749 if (create_devctl_node_f) { 750 ddi_remove_minor_node(dip, "devctl"); 751 } 752 753 if (create_scsi_node_f) { 754 ddi_remove_minor_node(dip, "scsi"); 755 } 756 757 if (create_ioc_node_f) { 758 ddi_remove_minor_node(dip, instance->iocnode); 759 } 760 761 if (tran_alloc_f) { 762 scsi_hba_tran_free(tran); 763 } 764 765 766 if (added_soft_isr_f) { 767 ddi_remove_softintr(instance->soft_intr_id); 768 } 769 770 if (added_isr_f) { 771 mrsas_rem_intrs(instance); 772 } 773 774 if (instance && instance->taskq) { 775 ddi_taskq_destroy(instance->taskq); 776 } 777 778 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 779 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 780 781 mrsas_fm_fini(instance); 782 783 pci_config_teardown(&instance->pci_handle); 784 785 ddi_soft_state_free(mrsas_state, instance_no); 786 787 con_log(CL_ANN, (CE_NOTE, 788 "mr_sas: return failure from mrsas_attach")); 789 790 return (DDI_FAILURE); 791 } 792 793 /*ARGSUSED*/ 794 static int 795 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 796 { 797 int rval; 798 int mrsas_minor = getminor((dev_t)arg); 799 800 struct mrsas_instance *instance; 801 802 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 803 804 switch (cmd) { 805 case DDI_INFO_DEVT2DEVINFO: 806 instance = (struct mrsas_instance *) 807 ddi_get_soft_state(mrsas_state, 808 MINOR2INST(mrsas_minor)); 809 810 if (instance == NULL) { 811 *resultp = NULL; 812 rval = DDI_FAILURE; 813 } else { 814 *resultp = instance->dip; 815 rval = DDI_SUCCESS; 816 } 817 break; 818 case DDI_INFO_DEVT2INSTANCE: 819 *resultp = (void *)(intptr_t) 820 (MINOR2INST(getminor((dev_t)arg))); 821 rval = DDI_SUCCESS; 822 break; 823 default: 824 *resultp = NULL; 825 rval = DDI_FAILURE; 826 } 827 828 return (rval); 829 } 830 831 static int 832 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 833 { 834 int instance_no; 835 836 struct mrsas_instance *instance; 837 838 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 839 840 /* CONSTCOND */ 841 ASSERT(NO_COMPETING_THREADS); 842 843 instance_no = ddi_get_instance(dip); 844 845 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state, 846 instance_no); 847 848 if (!instance) { 849 con_log(CL_ANN, (CE_WARN, 850 "mr_sas:%d could not get instance in detach", 851 instance_no)); 852 853 return (DDI_FAILURE); 854 } 855 856 con_log(CL_ANN, (CE_NOTE, 857 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x", 858 instance_no, instance->vendor_id, instance->device_id, 859 instance->subsysvid, instance->subsysid)); 860 861 switch (cmd) { 862 case DDI_DETACH: 863 con_log(CL_ANN, (CE_NOTE, 864 "mrsas_detach: DDI_DETACH")); 865 866 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 867 con_log(CL_ANN, (CE_WARN, 868 "mr_sas:%d failed to detach", 869 instance_no)); 870 871 return (DDI_FAILURE); 872 } 873 874 scsi_hba_tran_free(instance->tran); 875 876 flush_cache(instance); 877 878 if (abort_aen_cmd(instance, instance->aen_cmd)) { 879 con_log(CL_ANN, (CE_WARN, "mrsas_detach: " 880 "failed to abort prevous AEN command")); 881 882 return (DDI_FAILURE); 883 } 884 885 instance->func_ptr->disable_intr(instance); 886 887 if (instance->isr_level == HIGH_LEVEL_INTR) { 888 ddi_remove_softintr(instance->soft_intr_id); 889 } 890 891 mrsas_rem_intrs(instance); 892 893 if (instance->taskq) { 894 ddi_taskq_destroy(instance->taskq); 895 } 896 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD 897 * sizeof (struct mrsas_ld)); 898 free_space_for_mfi(instance); 899 900 mrsas_fm_fini(instance); 901 902 pci_config_teardown(&instance->pci_handle); 903 904 kmem_free(instance->func_ptr, 905 sizeof (struct mrsas_func_ptr)); 906 907 if (instance->timeout_id != (timeout_id_t)-1) { 908 (void) untimeout(instance->timeout_id); 909 instance->timeout_id = (timeout_id_t)-1; 910 } 911 ddi_soft_state_free(mrsas_state, instance_no); 912 break; 913 case DDI_PM_SUSPEND: 914 con_log(CL_ANN, (CE_NOTE, 915 "mrsas_detach: DDI_PM_SUSPEND")); 916 917 break; 918 case DDI_SUSPEND: 919 con_log(CL_ANN, (CE_NOTE, 920 "mrsas_detach: DDI_SUSPEND")); 921 922 break; 923 default: 924 con_log(CL_ANN, (CE_WARN, 925 "invalid detach command:0x%x", cmd)); 926 return (DDI_FAILURE); 927 } 928 929 return (DDI_SUCCESS); 930 } 931 932 /* 933 * ************************************************************************** * 934 * * 935 * common entry points - for character driver types * 936 * * 937 * ************************************************************************** * 938 */ 939 static int 940 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 941 { 942 int rval = 0; 943 944 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 945 946 /* Check root permissions */ 947 if (drv_priv(credp) != 0) { 948 con_log(CL_ANN, (CE_WARN, 949 "mr_sas: Non-root ioctl access denied!")); 950 return (EPERM); 951 } 952 953 /* Verify we are being opened as a character device */ 954 if (otyp != OTYP_CHR) { 955 con_log(CL_ANN, (CE_WARN, 956 "mr_sas: ioctl node must be a char node")); 957 return (EINVAL); 958 } 959 960 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev))) 961 == NULL) { 962 return (ENXIO); 963 } 964 965 if (scsi_hba_open) { 966 rval = scsi_hba_open(dev, openflags, otyp, credp); 967 } 968 969 return (rval); 970 } 971 972 static int 973 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 974 { 975 int rval = 0; 976 977 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 978 979 /* no need for locks! */ 980 981 if (scsi_hba_close) { 982 rval = scsi_hba_close(dev, openflags, otyp, credp); 983 } 984 985 return (rval); 986 } 987 988 static int 989 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 990 int *rvalp) 991 { 992 int rval = 0; 993 994 struct mrsas_instance *instance; 995 struct mrsas_ioctl *ioctl; 996 struct mrsas_aen aen; 997 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 998 999 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev))); 1000 1001 if (instance == NULL) { 1002 /* invalid minor number */ 1003 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found.")); 1004 return (ENXIO); 1005 } 1006 1007 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl), 1008 KM_SLEEP); 1009 ASSERT(ioctl); 1010 1011 switch ((uint_t)cmd) { 1012 case MRSAS_IOCTL_FIRMWARE: 1013 if (ddi_copyin((void *)arg, ioctl, 1014 sizeof (struct mrsas_ioctl), mode)) { 1015 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: " 1016 "ERROR IOCTL copyin")); 1017 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1018 return (EFAULT); 1019 } 1020 1021 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) { 1022 rval = handle_drv_ioctl(instance, ioctl, mode); 1023 } else { 1024 rval = handle_mfi_ioctl(instance, ioctl, mode); 1025 } 1026 1027 if (ddi_copyout((void *)ioctl, (void *)arg, 1028 (sizeof (struct mrsas_ioctl) - 1), mode)) { 1029 con_log(CL_ANN, (CE_WARN, 1030 "mrsas_ioctl: copy_to_user failed")); 1031 rval = 1; 1032 } 1033 1034 break; 1035 case MRSAS_IOCTL_AEN: 1036 if (ddi_copyin((void *) arg, &aen, 1037 sizeof (struct mrsas_aen), mode)) { 1038 con_log(CL_ANN, (CE_WARN, 1039 "mrsas_ioctl: ERROR AEN copyin")); 1040 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1041 return (EFAULT); 1042 } 1043 1044 rval = handle_mfi_aen(instance, &aen); 1045 1046 if (ddi_copyout((void *) &aen, (void *)arg, 1047 sizeof (struct mrsas_aen), mode)) { 1048 con_log(CL_ANN, (CE_WARN, 1049 "mrsas_ioctl: copy_to_user failed")); 1050 rval = 1; 1051 } 1052 1053 break; 1054 default: 1055 rval = scsi_hba_ioctl(dev, cmd, arg, 1056 mode, credp, rvalp); 1057 1058 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: " 1059 "scsi_hba_ioctl called, ret = %x.", rval)); 1060 } 1061 1062 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1063 return (rval); 1064 } 1065 1066 /* 1067 * ************************************************************************** * 1068 * * 1069 * common entry points - for block driver types * 1070 * * 1071 * ************************************************************************** * 1072 */ 1073 #ifdef __sparc 1074 /*ARGSUSED*/ 1075 static int 1076 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1077 { 1078 int instance_no; 1079 1080 struct mrsas_instance *instance; 1081 1082 instance_no = ddi_get_instance(dip); 1083 instance = (struct mrsas_instance *)ddi_get_soft_state 1084 (mrsas_state, instance_no); 1085 1086 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1087 1088 if (!instance) { 1089 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter " 1090 "in reset", instance_no)); 1091 return (DDI_FAILURE); 1092 } 1093 1094 instance->func_ptr->disable_intr(instance); 1095 1096 con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d", 1097 instance_no)); 1098 1099 flush_cache(instance); 1100 1101 return (DDI_SUCCESS); 1102 } 1103 #else /* __sparc */ 1104 /*ARGSUSED*/ 1105 static int 1106 mrsas_quiesce(dev_info_t *dip) 1107 { 1108 int instance_no; 1109 1110 struct mrsas_instance *instance; 1111 1112 instance_no = ddi_get_instance(dip); 1113 instance = (struct mrsas_instance *)ddi_get_soft_state 1114 (mrsas_state, instance_no); 1115 1116 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1117 1118 if (!instance) { 1119 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter " 1120 "in quiesce", instance_no)); 1121 return (DDI_FAILURE); 1122 } 1123 if (instance->deadadapter || instance->adapterresetinprogress) { 1124 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in " 1125 "healthy state", instance_no)); 1126 return (DDI_FAILURE); 1127 } 1128 1129 if (abort_aen_cmd(instance, instance->aen_cmd)) { 1130 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: " 1131 "failed to abort prevous AEN command QUIESCE")); 1132 } 1133 1134 instance->func_ptr->disable_intr(instance); 1135 1136 con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d", 1137 instance_no)); 1138 1139 flush_cache(instance); 1140 1141 if (wait_for_outstanding(instance)) { 1142 return (DDI_FAILURE); 1143 } 1144 return (DDI_SUCCESS); 1145 } 1146 #endif /* __sparc */ 1147 1148 /* 1149 * ************************************************************************** * 1150 * * 1151 * entry points (SCSI HBA) * 1152 * * 1153 * ************************************************************************** * 1154 */ 1155 /*ARGSUSED*/ 1156 static int 1157 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1158 scsi_hba_tran_t *tran, struct scsi_device *sd) 1159 { 1160 struct mrsas_instance *instance; 1161 uint16_t tgt = sd->sd_address.a_target; 1162 uint8_t lun = sd->sd_address.a_lun; 1163 1164 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init target %d lun %d", 1165 tgt, lun)); 1166 1167 instance = ADDR2MR(&sd->sd_address); 1168 1169 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 1170 (void) ndi_merge_node(tgt_dip, mrsas_name_node); 1171 ddi_set_name_addr(tgt_dip, NULL); 1172 1173 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init in " 1174 "ndi_dev_is_persistent_node DDI_FAILURE t = %d l = %d", 1175 tgt, lun)); 1176 return (DDI_FAILURE); 1177 } 1178 1179 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p", 1180 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip)); 1181 1182 if (tgt < MRDRV_MAX_LD && lun == 0) { 1183 if (instance->mr_ld_list[tgt].dip == NULL && 1184 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) { 1185 instance->mr_ld_list[tgt].dip = tgt_dip; 1186 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN; 1187 } 1188 } 1189 return (DDI_SUCCESS); 1190 } 1191 1192 /*ARGSUSED*/ 1193 static void 1194 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1195 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1196 { 1197 struct mrsas_instance *instance; 1198 int tgt = sd->sd_address.a_target; 1199 int lun = sd->sd_address.a_lun; 1200 1201 instance = ADDR2MR(&sd->sd_address); 1202 1203 con_log(CL_ANN1, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun)); 1204 1205 if (tgt < MRDRV_MAX_LD && lun == 0) { 1206 if (instance->mr_ld_list[tgt].dip == tgt_dip) { 1207 instance->mr_ld_list[tgt].dip = NULL; 1208 } 1209 } 1210 } 1211 1212 static dev_info_t * 1213 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun) 1214 { 1215 dev_info_t *child = NULL; 1216 char addr[SCSI_MAXNAMELEN]; 1217 char tmp[MAXNAMELEN]; 1218 1219 (void) sprintf(addr, "%x,%x", tgt, lun); 1220 for (child = ddi_get_child(instance->dip); child; 1221 child = ddi_get_next_sibling(child)) { 1222 1223 if (mrsas_name_node(child, tmp, MAXNAMELEN) != 1224 DDI_SUCCESS) { 1225 continue; 1226 } 1227 1228 if (strcmp(addr, tmp) == 0) { 1229 break; 1230 } 1231 } 1232 con_log(CL_ANN1, (CE_NOTE, "mrsas_find_child: return child = %p", 1233 (void *)child)); 1234 return (child); 1235 } 1236 1237 static int 1238 mrsas_name_node(dev_info_t *dip, char *name, int len) 1239 { 1240 int tgt, lun; 1241 1242 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1243 DDI_PROP_DONTPASS, "target", -1); 1244 con_log(CL_ANN1, (CE_NOTE, 1245 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt)); 1246 if (tgt == -1) { 1247 return (DDI_FAILURE); 1248 } 1249 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1250 "lun", -1); 1251 con_log(CL_ANN1, 1252 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun)); 1253 if (lun == -1) { 1254 return (DDI_FAILURE); 1255 } 1256 (void) snprintf(name, len, "%x,%x", tgt, lun); 1257 return (DDI_SUCCESS); 1258 } 1259 1260 static struct scsi_pkt * 1261 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1262 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1263 int flags, int (*callback)(), caddr_t arg) 1264 { 1265 struct scsa_cmd *acmd; 1266 struct mrsas_instance *instance; 1267 struct scsi_pkt *new_pkt; 1268 1269 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1270 1271 instance = ADDR2MR(ap); 1272 1273 /* step #1 : pkt allocation */ 1274 if (pkt == NULL) { 1275 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1276 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1277 if (pkt == NULL) { 1278 return (NULL); 1279 } 1280 1281 acmd = PKT2CMD(pkt); 1282 1283 /* 1284 * Initialize the new pkt - we redundantly initialize 1285 * all the fields for illustrative purposes. 1286 */ 1287 acmd->cmd_pkt = pkt; 1288 acmd->cmd_flags = 0; 1289 acmd->cmd_scblen = statuslen; 1290 acmd->cmd_cdblen = cmdlen; 1291 acmd->cmd_dmahandle = NULL; 1292 acmd->cmd_ncookies = 0; 1293 acmd->cmd_cookie = 0; 1294 acmd->cmd_cookiecnt = 0; 1295 acmd->cmd_nwin = 0; 1296 1297 pkt->pkt_address = *ap; 1298 pkt->pkt_comp = (void (*)())NULL; 1299 pkt->pkt_flags = 0; 1300 pkt->pkt_time = 0; 1301 pkt->pkt_resid = 0; 1302 pkt->pkt_state = 0; 1303 pkt->pkt_statistics = 0; 1304 pkt->pkt_reason = 0; 1305 new_pkt = pkt; 1306 } else { 1307 acmd = PKT2CMD(pkt); 1308 new_pkt = NULL; 1309 } 1310 1311 /* step #2 : dma allocation/move */ 1312 if (bp && bp->b_bcount != 0) { 1313 if (acmd->cmd_dmahandle == NULL) { 1314 if (mrsas_dma_alloc(instance, pkt, bp, flags, 1315 callback) == DDI_FAILURE) { 1316 if (new_pkt) { 1317 scsi_hba_pkt_free(ap, new_pkt); 1318 } 1319 return ((struct scsi_pkt *)NULL); 1320 } 1321 } else { 1322 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) { 1323 return ((struct scsi_pkt *)NULL); 1324 } 1325 } 1326 } 1327 1328 return (pkt); 1329 } 1330 1331 static int 1332 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1333 { 1334 uchar_t cmd_done = 0; 1335 1336 struct mrsas_instance *instance = ADDR2MR(ap); 1337 struct mrsas_cmd *cmd; 1338 1339 if (instance->deadadapter == 1) { 1340 con_log(CL_ANN1, (CE_WARN, 1341 "mrsas_tran_start: return TRAN_FATAL_ERROR " 1342 "for IO, as the HBA doesnt take any more IOs")); 1343 if (pkt) { 1344 pkt->pkt_reason = CMD_DEV_GONE; 1345 pkt->pkt_statistics = STAT_DISCON; 1346 } 1347 return (TRAN_FATAL_ERROR); 1348 } 1349 1350 if (instance->adapterresetinprogress) { 1351 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " 1352 "returning mfi_pkt and setting TRAN_BUSY\n")); 1353 return (TRAN_BUSY); 1354 } 1355 1356 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x", 1357 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time)); 1358 1359 pkt->pkt_reason = CMD_CMPLT; 1360 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 1361 1362 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1363 1364 /* 1365 * Check if the command is already completed by the mrsas_build_cmd() 1366 * routine. In which case the busy_flag would be clear and scb will be 1367 * NULL and appropriate reason provided in pkt_reason field 1368 */ 1369 if (cmd_done) { 1370 pkt->pkt_reason = CMD_CMPLT; 1371 pkt->pkt_scbp[0] = STATUS_GOOD; 1372 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET 1373 | STATE_SENT_CMD; 1374 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { 1375 (*pkt->pkt_comp)(pkt); 1376 } 1377 1378 return (TRAN_ACCEPT); 1379 } 1380 1381 if (cmd == NULL) { 1382 return (TRAN_BUSY); 1383 } 1384 1385 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1386 if (instance->fw_outstanding > instance->max_fw_cmds) { 1387 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy")); 1388 DTRACE_PROBE2(start_tran_err, 1389 uint16_t, instance->fw_outstanding, 1390 uint16_t, instance->max_fw_cmds); 1391 return_mfi_pkt(instance, cmd); 1392 return (TRAN_BUSY); 1393 } 1394 1395 /* Synchronize the Cmd frame for the controller */ 1396 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1397 DDI_DMA_SYNC_FORDEV); 1398 con_log(CL_ANN1, (CE_NOTE, "Push SCSI CDB[0]=0x%x" 1399 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index)); 1400 instance->func_ptr->issue_cmd(cmd, instance); 1401 1402 } else { 1403 struct mrsas_header *hdr = &cmd->frame->hdr; 1404 1405 cmd->sync_cmd = MRSAS_TRUE; 1406 1407 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); 1408 1409 pkt->pkt_reason = CMD_CMPLT; 1410 pkt->pkt_statistics = 0; 1411 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1412 1413 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, 1414 &hdr->cmd_status)) { 1415 case MFI_STAT_OK: 1416 pkt->pkt_scbp[0] = STATUS_GOOD; 1417 break; 1418 1419 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1420 1421 pkt->pkt_reason = CMD_CMPLT; 1422 pkt->pkt_statistics = 0; 1423 1424 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1425 break; 1426 1427 case MFI_STAT_DEVICE_NOT_FOUND: 1428 pkt->pkt_reason = CMD_DEV_GONE; 1429 pkt->pkt_statistics = STAT_DISCON; 1430 break; 1431 1432 default: 1433 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1434 } 1435 1436 (void) mrsas_common_check(instance, cmd); 1437 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd, 1438 uint8_t, hdr->cmd_status); 1439 return_mfi_pkt(instance, cmd); 1440 1441 if (pkt->pkt_comp) { 1442 (*pkt->pkt_comp)(pkt); 1443 } 1444 1445 } 1446 1447 return (TRAN_ACCEPT); 1448 } 1449 1450 /*ARGSUSED*/ 1451 static int 1452 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1453 { 1454 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1455 1456 /* abort command not supported by H/W */ 1457 1458 return (DDI_FAILURE); 1459 } 1460 1461 /*ARGSUSED*/ 1462 static int 1463 mrsas_tran_reset(struct scsi_address *ap, int level) 1464 { 1465 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1466 1467 /* reset command not supported by H/W */ 1468 1469 return (DDI_FAILURE); 1470 1471 } 1472 1473 /*ARGSUSED*/ 1474 static int 1475 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 1476 { 1477 int rval = 0; 1478 1479 struct mrsas_instance *instance = ADDR2MR(ap); 1480 1481 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1482 1483 /* we do allow inquiring about capabilities for other targets */ 1484 if (cap == NULL) { 1485 return (-1); 1486 } 1487 1488 switch (scsi_hba_lookup_capstr(cap)) { 1489 case SCSI_CAP_DMA_MAX: 1490 /* Limit to 16MB max transfer */ 1491 rval = mrsas_max_cap_maxxfer; 1492 break; 1493 case SCSI_CAP_MSG_OUT: 1494 rval = 1; 1495 break; 1496 case SCSI_CAP_DISCONNECT: 1497 rval = 0; 1498 break; 1499 case SCSI_CAP_SYNCHRONOUS: 1500 rval = 0; 1501 break; 1502 case SCSI_CAP_WIDE_XFER: 1503 rval = 1; 1504 break; 1505 case SCSI_CAP_TAGGED_QING: 1506 rval = 1; 1507 break; 1508 case SCSI_CAP_UNTAGGED_QING: 1509 rval = 1; 1510 break; 1511 case SCSI_CAP_PARITY: 1512 rval = 1; 1513 break; 1514 case SCSI_CAP_INITIATOR_ID: 1515 rval = instance->init_id; 1516 break; 1517 case SCSI_CAP_ARQ: 1518 rval = 1; 1519 break; 1520 case SCSI_CAP_LINKED_CMDS: 1521 rval = 0; 1522 break; 1523 case SCSI_CAP_RESET_NOTIFICATION: 1524 rval = 1; 1525 break; 1526 case SCSI_CAP_GEOMETRY: 1527 rval = -1; 1528 1529 break; 1530 default: 1531 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 1532 scsi_hba_lookup_capstr(cap))); 1533 rval = -1; 1534 break; 1535 } 1536 1537 return (rval); 1538 } 1539 1540 /*ARGSUSED*/ 1541 static int 1542 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1543 { 1544 int rval = 1; 1545 1546 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1547 1548 /* We don't allow setting capabilities for other targets */ 1549 if (cap == NULL || whom == 0) { 1550 return (-1); 1551 } 1552 1553 switch (scsi_hba_lookup_capstr(cap)) { 1554 case SCSI_CAP_DMA_MAX: 1555 case SCSI_CAP_MSG_OUT: 1556 case SCSI_CAP_PARITY: 1557 case SCSI_CAP_LINKED_CMDS: 1558 case SCSI_CAP_RESET_NOTIFICATION: 1559 case SCSI_CAP_DISCONNECT: 1560 case SCSI_CAP_SYNCHRONOUS: 1561 case SCSI_CAP_UNTAGGED_QING: 1562 case SCSI_CAP_WIDE_XFER: 1563 case SCSI_CAP_INITIATOR_ID: 1564 case SCSI_CAP_ARQ: 1565 /* 1566 * None of these are settable via 1567 * the capability interface. 1568 */ 1569 break; 1570 case SCSI_CAP_TAGGED_QING: 1571 rval = 1; 1572 break; 1573 case SCSI_CAP_SECTOR_SIZE: 1574 rval = 1; 1575 break; 1576 1577 case SCSI_CAP_TOTAL_SECTORS: 1578 rval = 1; 1579 break; 1580 default: 1581 rval = -1; 1582 break; 1583 } 1584 1585 return (rval); 1586 } 1587 1588 static void 1589 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1590 { 1591 struct scsa_cmd *acmd = PKT2CMD(pkt); 1592 1593 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1594 1595 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1596 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1597 1598 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1599 1600 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1601 1602 acmd->cmd_dmahandle = NULL; 1603 } 1604 1605 /* free the pkt */ 1606 scsi_hba_pkt_free(ap, pkt); 1607 } 1608 1609 /*ARGSUSED*/ 1610 static void 1611 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1612 { 1613 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1614 1615 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1616 1617 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1618 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1619 1620 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1621 1622 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1623 1624 acmd->cmd_dmahandle = NULL; 1625 } 1626 } 1627 1628 /*ARGSUSED*/ 1629 static void 1630 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1631 { 1632 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1633 1634 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1635 1636 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1637 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 1638 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 1639 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 1640 } 1641 } 1642 1643 /* 1644 * mrsas_isr(caddr_t) 1645 * 1646 * The Interrupt Service Routine 1647 * 1648 * Collect status for all completed commands and do callback 1649 * 1650 */ 1651 static uint_t 1652 mrsas_isr(struct mrsas_instance *instance) 1653 { 1654 int need_softintr; 1655 uint32_t producer; 1656 uint32_t consumer; 1657 uint32_t context; 1658 1659 struct mrsas_cmd *cmd; 1660 struct mrsas_header *hdr; 1661 struct scsi_pkt *pkt; 1662 1663 ASSERT(instance); 1664 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && 1665 !instance->func_ptr->intr_ack(instance)) { 1666 return (DDI_INTR_UNCLAIMED); 1667 } 1668 1669 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1670 0, 0, DDI_DMA_SYNC_FORCPU); 1671 1672 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 1673 != DDI_SUCCESS) { 1674 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 1675 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 1676 con_log(CL_ANN1, (CE_WARN, 1677 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED")); 1678 return (DDI_INTR_CLAIMED); 1679 } 1680 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1681 1682 #ifdef OCRDEBUG 1683 if (debug_consecutive_timeout_after_ocr_g == 1) { 1684 con_log(CL_ANN1, (CE_NOTE, 1685 "simulating consecutive timeout after ocr")); 1686 return (DDI_INTR_CLAIMED); 1687 } 1688 #endif 1689 1690 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) { 1691 con_log(CL_ANN1, (CE_NOTE, "Fw Fault State Detected ")); 1692 if (instance->timeout_id == (timeout_id_t)-1) { 1693 con_log(CL_ANN1, (CE_NOTE, 1694 "Trigger timeout in NON IO Case")); 1695 instance->timeout_id = 1696 timeout(io_timeout_checker, (void *)instance, 1697 drv_usectohz(MRSAS_1_SECOND)); 1698 } 1699 return (DDI_INTR_CLAIMED); 1700 } 1701 1702 mutex_enter(&instance->completed_pool_mtx); 1703 mutex_enter(&instance->cmd_pend_mtx); 1704 1705 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1706 instance->producer); 1707 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1708 instance->consumer); 1709 1710 con_log(CL_ANN1, (CE_NOTE, " producer %x consumer %x ", 1711 producer, consumer)); 1712 if (producer == consumer) { 1713 con_log(CL_ANN1, (CE_WARN, "producer = consumer case")); 1714 DTRACE_PROBE2(isr_pc_err, uint32_t, producer, 1715 uint32_t, consumer); 1716 mutex_exit(&instance->completed_pool_mtx); 1717 mutex_exit(&instance->cmd_pend_mtx); 1718 return (DDI_INTR_CLAIMED); 1719 } 1720 1721 while (consumer != producer) { 1722 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1723 &instance->reply_queue[consumer]); 1724 cmd = instance->cmd_list[context]; 1725 1726 if (cmd->sync_cmd == MRSAS_TRUE) { 1727 hdr = (struct mrsas_header *)&cmd->frame->hdr; 1728 if (hdr) { 1729 mlist_del_init(&cmd->list); 1730 } 1731 } else { 1732 pkt = cmd->pkt; 1733 if (pkt) { 1734 mlist_del_init(&cmd->list); 1735 } 1736 } 1737 1738 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 1739 1740 consumer++; 1741 if (consumer == (instance->max_fw_cmds + 1)) { 1742 consumer = 0; 1743 } 1744 } 1745 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 1746 instance->consumer, consumer); 1747 mutex_exit(&instance->cmd_pend_mtx); 1748 mutex_exit(&instance->completed_pool_mtx); 1749 1750 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1751 0, 0, DDI_DMA_SYNC_FORDEV); 1752 1753 if (instance->softint_running) { 1754 need_softintr = 0; 1755 } else { 1756 need_softintr = 1; 1757 } 1758 1759 if (instance->isr_level == HIGH_LEVEL_INTR) { 1760 if (need_softintr) { 1761 ddi_trigger_softintr(instance->soft_intr_id); 1762 } 1763 } else { 1764 /* 1765 * Not a high-level interrupt, therefore call the soft level 1766 * interrupt explicitly 1767 */ 1768 (void) mrsas_softintr(instance); 1769 } 1770 1771 return (DDI_INTR_CLAIMED); 1772 } 1773 1774 1775 /* 1776 * ************************************************************************** * 1777 * * 1778 * libraries * 1779 * * 1780 * ************************************************************************** * 1781 */ 1782 /* 1783 * get_mfi_pkt : Get a command from the free pool 1784 * After successful allocation, the caller of this routine 1785 * must clear the frame buffer (memset to zero) before 1786 * using the packet further. 1787 * 1788 * ***** Note ***** 1789 * After clearing the frame buffer the context id of the 1790 * frame buffer SHOULD be restored back. 1791 */ 1792 static struct mrsas_cmd * 1793 get_mfi_pkt(struct mrsas_instance *instance) 1794 { 1795 mlist_t *head = &instance->cmd_pool_list; 1796 struct mrsas_cmd *cmd = NULL; 1797 1798 mutex_enter(&instance->cmd_pool_mtx); 1799 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1800 1801 if (!mlist_empty(head)) { 1802 cmd = mlist_entry(head->next, struct mrsas_cmd, list); 1803 mlist_del_init(head->next); 1804 } 1805 if (cmd != NULL) { 1806 cmd->pkt = NULL; 1807 cmd->retry_count_for_ocr = 0; 1808 cmd->drv_pkt_time = 0; 1809 } 1810 mutex_exit(&instance->cmd_pool_mtx); 1811 1812 return (cmd); 1813 } 1814 1815 static struct mrsas_cmd * 1816 get_mfi_app_pkt(struct mrsas_instance *instance) 1817 { 1818 mlist_t *head = &instance->app_cmd_pool_list; 1819 struct mrsas_cmd *cmd = NULL; 1820 1821 mutex_enter(&instance->app_cmd_pool_mtx); 1822 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx)); 1823 1824 if (!mlist_empty(head)) { 1825 cmd = mlist_entry(head->next, struct mrsas_cmd, list); 1826 mlist_del_init(head->next); 1827 } 1828 if (cmd != NULL) 1829 cmd->pkt = NULL; 1830 mutex_exit(&instance->app_cmd_pool_mtx); 1831 1832 return (cmd); 1833 } 1834 /* 1835 * return_mfi_pkt : Return a cmd to free command pool 1836 */ 1837 static void 1838 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 1839 { 1840 mutex_enter(&instance->cmd_pool_mtx); 1841 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1842 1843 mlist_add(&cmd->list, &instance->cmd_pool_list); 1844 1845 mutex_exit(&instance->cmd_pool_mtx); 1846 } 1847 1848 static void 1849 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 1850 { 1851 mutex_enter(&instance->app_cmd_pool_mtx); 1852 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx)); 1853 1854 mlist_add(&cmd->list, &instance->app_cmd_pool_list); 1855 1856 mutex_exit(&instance->app_cmd_pool_mtx); 1857 } 1858 static void 1859 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 1860 { 1861 struct scsi_pkt *pkt; 1862 struct mrsas_header *hdr; 1863 con_log(CL_ANN1, (CE_NOTE, "push_pending_pkt(): Called\n")); 1864 mutex_enter(&instance->cmd_pend_mtx); 1865 ASSERT(mutex_owned(&instance->cmd_pend_mtx)); 1866 mlist_del_init(&cmd->list); 1867 mlist_add_tail(&cmd->list, &instance->cmd_pend_list); 1868 if (cmd->sync_cmd == MRSAS_TRUE) { 1869 hdr = (struct mrsas_header *)&cmd->frame->hdr; 1870 if (hdr) { 1871 con_log(CL_ANN1, (CE_CONT, 1872 "push_pending_mfi_pkt: " 1873 "cmd %p index %x " 1874 "time %llx", 1875 (void *)cmd, cmd->index, 1876 gethrtime())); 1877 /* Wait for specified interval */ 1878 hdr->timeout = (unsigned int)debug_timeout_g; 1879 con_log(CL_ANN1, (CE_CONT, 1880 "push_pending_pkt(): " 1881 "Called IO Timeout Value %x\n", 1882 hdr->timeout)); 1883 } 1884 if (hdr && instance->timeout_id == (timeout_id_t)-1) { 1885 instance->timeout_id = timeout(io_timeout_checker, 1886 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 1887 } 1888 } else { 1889 pkt = cmd->pkt; 1890 if (pkt) { 1891 con_log(CL_ANN1, (CE_CONT, 1892 "push_pending_mfi_pkt: " 1893 "cmd %p index %x pkt %p, " 1894 "time %llx", 1895 (void *)cmd, cmd->index, (void *)pkt, 1896 gethrtime())); 1897 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 1898 } 1899 if (pkt && instance->timeout_id == (timeout_id_t)-1) { 1900 instance->timeout_id = timeout(io_timeout_checker, 1901 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 1902 } 1903 } 1904 1905 mutex_exit(&instance->cmd_pend_mtx); 1906 } 1907 1908 static int 1909 mrsas_print_pending_cmds(struct mrsas_instance *instance) 1910 { 1911 mlist_t *head = &instance->cmd_pend_list; 1912 mlist_t *tmp = head; 1913 struct mrsas_cmd *cmd = NULL; 1914 struct mrsas_header *hdr; 1915 unsigned int flag = 1; 1916 1917 struct scsi_pkt *pkt; 1918 con_log(CL_ANN1, (CE_NOTE, 1919 "mrsas_print_pending_cmds(): Called")); 1920 while (flag) { 1921 mutex_enter(&instance->cmd_pend_mtx); 1922 tmp = tmp->next; 1923 if (tmp == head) { 1924 mutex_exit(&instance->cmd_pend_mtx); 1925 flag = 0; 1926 break; 1927 } else { 1928 cmd = mlist_entry(tmp, struct mrsas_cmd, list); 1929 mutex_exit(&instance->cmd_pend_mtx); 1930 if (cmd) { 1931 if (cmd->sync_cmd == MRSAS_TRUE) { 1932 hdr = (struct mrsas_header *)&cmd->frame->hdr; 1933 if (hdr) { 1934 hdr->timeout = 1935 (unsigned int)debug_timeout_g; 1936 con_log(CL_ANN1, (CE_CONT, 1937 "print: cmd %p index %x hdr %p", 1938 (void *)cmd, cmd->index, 1939 (void *)hdr)); 1940 } 1941 } else { 1942 pkt = cmd->pkt; 1943 if (pkt) { 1944 cmd->drv_pkt_time = 1945 (uint16_t)debug_timeout_g; 1946 con_log(CL_ANN1, (CE_CONT, 1947 "print: cmd %p index %x " 1948 "pkt %p", (void *)cmd, cmd->index, 1949 (void *)pkt)); 1950 } 1951 } 1952 } 1953 } 1954 } 1955 con_log(CL_ANN1, (CE_NOTE, "mrsas_print_pending_cmds(): Done\n")); 1956 return (DDI_SUCCESS); 1957 } 1958 1959 1960 static int 1961 mrsas_complete_pending_cmds(struct mrsas_instance *instance) 1962 { 1963 1964 struct mrsas_cmd *cmd = NULL; 1965 struct scsi_pkt *pkt; 1966 struct mrsas_header *hdr; 1967 1968 struct mlist_head *pos, *next; 1969 1970 con_log(CL_ANN1, (CE_NOTE, 1971 "mrsas_complete_pending_cmds(): Called")); 1972 1973 mutex_enter(&instance->cmd_pend_mtx); 1974 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) { 1975 cmd = mlist_entry(pos, struct mrsas_cmd, list); 1976 if (cmd) { 1977 pkt = cmd->pkt; 1978 if (pkt) { // for IO 1979 if (((pkt->pkt_flags & FLAG_NOINTR) 1980 == 0) && pkt->pkt_comp) { 1981 pkt->pkt_reason 1982 = CMD_DEV_GONE; 1983 pkt->pkt_statistics 1984 = STAT_DISCON; 1985 con_log(CL_ANN1, (CE_NOTE, 1986 "fail and posting to scsa " 1987 "cmd %p index %x" 1988 " pkt %p " 1989 "time : %llx", 1990 (void *)cmd, cmd->index, 1991 (void *)pkt, gethrtime())); 1992 (*pkt->pkt_comp)(pkt); 1993 } 1994 } else { /* for DCMDS */ 1995 if (cmd->sync_cmd == MRSAS_TRUE) { 1996 hdr = (struct mrsas_header *)&cmd->frame->hdr; 1997 con_log(CL_ANN1, (CE_NOTE, 1998 "posting invalid status to application " 1999 "cmd %p index %x" 2000 " hdr %p " 2001 "time : %llx", 2002 (void *)cmd, cmd->index, 2003 (void *)hdr, gethrtime())); 2004 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 2005 complete_cmd_in_sync_mode(instance, cmd); 2006 } 2007 } 2008 mlist_del_init(&cmd->list); 2009 } else { 2010 con_log(CL_ANN1, (CE_NOTE, 2011 "mrsas_complete_pending_cmds:" 2012 "NULL command\n")); 2013 } 2014 con_log(CL_ANN1, (CE_NOTE, 2015 "mrsas_complete_pending_cmds:" 2016 "looping for more commands\n")); 2017 } 2018 mutex_exit(&instance->cmd_pend_mtx); 2019 2020 con_log(CL_ANN1, (CE_NOTE, "mrsas_complete_pending_cmds(): DONE\n")); 2021 return (DDI_SUCCESS); 2022 } 2023 2024 2025 static int 2026 mrsas_issue_pending_cmds(struct mrsas_instance *instance) 2027 { 2028 mlist_t *head = &instance->cmd_pend_list; 2029 mlist_t *tmp = head->next; 2030 struct mrsas_cmd *cmd = NULL; 2031 struct scsi_pkt *pkt; 2032 2033 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called")); 2034 while (tmp != head) { 2035 mutex_enter(&instance->cmd_pend_mtx); 2036 cmd = mlist_entry(tmp, struct mrsas_cmd, list); 2037 tmp = tmp->next; 2038 mutex_exit(&instance->cmd_pend_mtx); 2039 if (cmd) { 2040 con_log(CL_ANN1, (CE_NOTE, 2041 "mrsas_issue_pending_cmds(): " 2042 "Got a cmd: cmd:%p\n", (void *)cmd)); 2043 cmd->retry_count_for_ocr++; 2044 con_log(CL_ANN1, (CE_NOTE, 2045 "mrsas_issue_pending_cmds(): " 2046 "cmd retry count = %d\n", 2047 cmd->retry_count_for_ocr)); 2048 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) { 2049 con_log(CL_ANN1, (CE_NOTE, 2050 "mrsas_issue_pending_cmds():" 2051 "Calling Kill Adapter\n")); 2052 (void) mrsas_kill_adapter(instance); 2053 return (DDI_FAILURE); 2054 } 2055 pkt = cmd->pkt; 2056 if (pkt) { 2057 con_log(CL_ANN1, (CE_NOTE, 2058 "PENDING ISSUE: cmd %p index %x " 2059 "pkt %p time %llx", 2060 (void *)cmd, cmd->index, 2061 (void *)pkt, 2062 gethrtime())); 2063 2064 } 2065 if (cmd->sync_cmd == MRSAS_TRUE) { 2066 instance->func_ptr->issue_cmd_in_sync_mode( 2067 instance, cmd); 2068 } else { 2069 instance->func_ptr->issue_cmd(cmd, instance); 2070 } 2071 } else { 2072 con_log(CL_ANN1, (CE_NOTE, 2073 "mrsas_issue_pending_cmds: NULL command\n")); 2074 } 2075 con_log(CL_ANN1, (CE_NOTE, 2076 "mrsas_issue_pending_cmds:" 2077 "looping for more commands")); 2078 } 2079 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): DONE\n")); 2080 return (DDI_SUCCESS); 2081 } 2082 2083 /* 2084 * destroy_mfi_frame_pool 2085 */ 2086 static void 2087 destroy_mfi_frame_pool(struct mrsas_instance *instance) 2088 { 2089 int i; 2090 uint32_t max_cmd = instance->max_fw_cmds; 2091 2092 struct mrsas_cmd *cmd; 2093 2094 /* return all frames to pool */ 2095 for (i = 0; i < max_cmd+1; i++) { 2096 2097 cmd = instance->cmd_list[i]; 2098 2099 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 2100 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj); 2101 2102 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 2103 } 2104 2105 } 2106 2107 /* 2108 * create_mfi_frame_pool 2109 */ 2110 static int 2111 create_mfi_frame_pool(struct mrsas_instance *instance) 2112 { 2113 int i = 0; 2114 int cookie_cnt; 2115 uint16_t max_cmd; 2116 uint16_t sge_sz; 2117 uint32_t sgl_sz; 2118 uint32_t tot_frame_size; 2119 struct mrsas_cmd *cmd; 2120 2121 max_cmd = instance->max_fw_cmds; 2122 2123 sge_sz = sizeof (struct mrsas_sge_ieee); 2124 2125 /* calculated the number of 64byte frames required for SGL */ 2126 sgl_sz = sge_sz * instance->max_num_sge; 2127 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH; 2128 2129 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 2130 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 2131 2132 while (i < max_cmd+1) { 2133 cmd = instance->cmd_list[i]; 2134 2135 cmd->frame_dma_obj.size = tot_frame_size; 2136 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr; 2137 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2138 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2139 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 2140 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 2141 2142 2143 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj, 2144 (uchar_t)DDI_STRUCTURE_LE_ACC); 2145 2146 if (cookie_cnt == -1 || cookie_cnt > 1) { 2147 con_log(CL_ANN, (CE_WARN, 2148 "create_mfi_frame_pool: could not alloc.")); 2149 return (DDI_FAILURE); 2150 } 2151 2152 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 2153 2154 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 2155 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer; 2156 cmd->frame_phys_addr = 2157 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 2158 2159 cmd->sense = (uint8_t *)(((unsigned long) 2160 cmd->frame_dma_obj.buffer) + 2161 tot_frame_size - SENSE_LENGTH); 2162 cmd->sense_phys_addr = 2163 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 2164 tot_frame_size - SENSE_LENGTH; 2165 2166 if (!cmd->frame || !cmd->sense) { 2167 con_log(CL_ANN, (CE_NOTE, 2168 "mr_sas: pci_pool_alloc failed")); 2169 2170 return (ENOMEM); 2171 } 2172 2173 ddi_put32(cmd->frame_dma_obj.acc_handle, 2174 &cmd->frame->io.context, cmd->index); 2175 i++; 2176 2177 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 2178 cmd->index, cmd->frame_phys_addr)); 2179 } 2180 2181 return (DDI_SUCCESS); 2182 } 2183 2184 /* 2185 * free_additional_dma_buffer 2186 */ 2187 static void 2188 free_additional_dma_buffer(struct mrsas_instance *instance) 2189 { 2190 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 2191 (void) mrsas_free_dma_obj(instance, 2192 instance->mfi_internal_dma_obj); 2193 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 2194 } 2195 2196 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 2197 (void) mrsas_free_dma_obj(instance, 2198 instance->mfi_evt_detail_obj); 2199 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 2200 } 2201 } 2202 2203 /* 2204 * alloc_additional_dma_buffer 2205 */ 2206 static int 2207 alloc_additional_dma_buffer(struct mrsas_instance *instance) 2208 { 2209 uint32_t reply_q_sz; 2210 uint32_t internal_buf_size = PAGESIZE*2; 2211 2212 /* max cmds plus 1 + producer & consumer */ 2213 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 2214 2215 instance->mfi_internal_dma_obj.size = internal_buf_size; 2216 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr; 2217 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2218 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 2219 0xFFFFFFFFU; 2220 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 2221 2222 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj, 2223 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 2224 con_log(CL_ANN, (CE_WARN, 2225 "mr_sas: could not alloc reply queue")); 2226 return (DDI_FAILURE); 2227 } 2228 2229 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 2230 2231 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 2232 2233 instance->producer = (uint32_t *)((unsigned long) 2234 instance->mfi_internal_dma_obj.buffer); 2235 instance->consumer = (uint32_t *)((unsigned long) 2236 instance->mfi_internal_dma_obj.buffer + 4); 2237 instance->reply_queue = (uint32_t *)((unsigned long) 2238 instance->mfi_internal_dma_obj.buffer + 8); 2239 instance->internal_buf = (caddr_t)(((unsigned long) 2240 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 2241 instance->internal_buf_dmac_add = 2242 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 2243 (reply_q_sz + 8); 2244 instance->internal_buf_size = internal_buf_size - 2245 (reply_q_sz + 8); 2246 2247 /* allocate evt_detail */ 2248 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail); 2249 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr; 2250 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2251 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2252 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 2253 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 2254 2255 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj, 2256 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 2257 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " 2258 "could not allocate data transfer buffer.")); 2259 return (DDI_FAILURE); 2260 } 2261 2262 bzero(instance->mfi_evt_detail_obj.buffer, 2263 sizeof (struct mrsas_evt_detail)); 2264 2265 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 2266 2267 return (DDI_SUCCESS); 2268 } 2269 2270 /* 2271 * free_space_for_mfi 2272 */ 2273 static void 2274 free_space_for_mfi(struct mrsas_instance *instance) 2275 { 2276 int i; 2277 uint32_t max_cmd = instance->max_fw_cmds; 2278 2279 /* already freed */ 2280 if (instance->cmd_list == NULL) { 2281 return; 2282 } 2283 2284 free_additional_dma_buffer(instance); 2285 2286 /* first free the MFI frame pool */ 2287 destroy_mfi_frame_pool(instance); 2288 2289 /* free all the commands in the cmd_list */ 2290 for (i = 0; i < instance->max_fw_cmds+1; i++) { 2291 kmem_free(instance->cmd_list[i], 2292 sizeof (struct mrsas_cmd)); 2293 2294 instance->cmd_list[i] = NULL; 2295 } 2296 2297 /* free the cmd_list buffer itself */ 2298 kmem_free(instance->cmd_list, 2299 sizeof (struct mrsas_cmd *) * (max_cmd+1)); 2300 2301 instance->cmd_list = NULL; 2302 2303 INIT_LIST_HEAD(&instance->cmd_pool_list); 2304 INIT_LIST_HEAD(&instance->app_cmd_pool_list); 2305 INIT_LIST_HEAD(&instance->cmd_pend_list); 2306 } 2307 2308 /* 2309 * alloc_space_for_mfi 2310 */ 2311 static int 2312 alloc_space_for_mfi(struct mrsas_instance *instance) 2313 { 2314 int i; 2315 uint32_t max_cmd; 2316 uint32_t reserve_cmd; 2317 size_t sz; 2318 2319 struct mrsas_cmd *cmd; 2320 2321 max_cmd = instance->max_fw_cmds; 2322 2323 /* reserve 1 more slot for flush_cache */ 2324 sz = sizeof (struct mrsas_cmd *) * (max_cmd+1); 2325 2326 /* 2327 * instance->cmd_list is an array of struct mrsas_cmd pointers. 2328 * Allocate the dynamic array first and then allocate individual 2329 * commands. 2330 */ 2331 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 2332 ASSERT(instance->cmd_list); 2333 2334 for (i = 0; i < max_cmd+1; i++) { 2335 instance->cmd_list[i] = kmem_zalloc(sizeof (struct mrsas_cmd), 2336 KM_SLEEP); 2337 ASSERT(instance->cmd_list[i]); 2338 } 2339 2340 INIT_LIST_HEAD(&instance->cmd_pool_list); 2341 INIT_LIST_HEAD(&instance->cmd_pend_list); 2342 /* add all the commands to command pool (instance->cmd_pool) */ 2343 reserve_cmd = APP_RESERVE_CMDS; 2344 INIT_LIST_HEAD(&instance->app_cmd_pool_list); 2345 for (i = 0; i < reserve_cmd-1; i++) { 2346 cmd = instance->cmd_list[i]; 2347 cmd->index = i; 2348 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list); 2349 } 2350 /* 2351 * reserve slot instance->cmd_list[APP_RESERVE_CMDS-1] 2352 * for abort_aen_cmd 2353 */ 2354 for (i = reserve_cmd; i < max_cmd; i++) { 2355 cmd = instance->cmd_list[i]; 2356 cmd->index = i; 2357 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 2358 } 2359 2360 /* single slot for flush_cache won't be added in command pool */ 2361 cmd = instance->cmd_list[max_cmd]; 2362 cmd->index = i; 2363 2364 /* create a frame pool and assign one frame to each cmd */ 2365 if (create_mfi_frame_pool(instance)) { 2366 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 2367 return (DDI_FAILURE); 2368 } 2369 2370 /* create a frame pool and assign one frame to each cmd */ 2371 if (alloc_additional_dma_buffer(instance)) { 2372 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 2373 return (DDI_FAILURE); 2374 } 2375 2376 return (DDI_SUCCESS); 2377 } 2378 2379 2380 /* 2381 * get_ctrl_info 2382 */ 2383 static int 2384 get_ctrl_info(struct mrsas_instance *instance, 2385 struct mrsas_ctrl_info *ctrl_info) 2386 { 2387 int ret = 0; 2388 2389 struct mrsas_cmd *cmd; 2390 struct mrsas_dcmd_frame *dcmd; 2391 struct mrsas_ctrl_info *ci; 2392 2393 cmd = get_mfi_pkt(instance); 2394 2395 if (!cmd) { 2396 con_log(CL_ANN, (CE_WARN, 2397 "Failed to get a cmd for ctrl info")); 2398 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding, 2399 uint16_t, instance->max_fw_cmds); 2400 return (DDI_FAILURE); 2401 } 2402 cmd->retry_count_for_ocr = 0; 2403 /* Clear the frame buffer and assign back the context id */ 2404 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2405 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2406 cmd->index); 2407 2408 dcmd = &cmd->frame->dcmd; 2409 2410 ci = (struct mrsas_ctrl_info *)instance->internal_buf; 2411 2412 if (!ci) { 2413 con_log(CL_ANN, (CE_WARN, 2414 "Failed to alloc mem for ctrl info")); 2415 return_mfi_pkt(instance, cmd); 2416 return (DDI_FAILURE); 2417 } 2418 2419 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info)); 2420 2421 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */ 2422 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2423 2424 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2425 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 2426 MFI_CMD_STATUS_POLL_MODE); 2427 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 2428 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2429 MFI_FRAME_DIR_READ); 2430 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2431 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 2432 sizeof (struct mrsas_ctrl_info)); 2433 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2434 MR_DCMD_CTRL_GET_INFO); 2435 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2436 instance->internal_buf_dmac_add); 2437 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2438 sizeof (struct mrsas_ctrl_info)); 2439 2440 cmd->frame_count = 1; 2441 2442 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2443 ret = 0; 2444 2445 ctrl_info->max_request_size = ddi_get32( 2446 cmd->frame_dma_obj.acc_handle, &ci->max_request_size); 2447 2448 ctrl_info->ld_present_count = ddi_get16( 2449 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count); 2450 2451 /* 2452 * ctrl_info->properties.on_off_properties.disable_online_ctrl_reset = 2453 * ci->properties.on_off_properties.disable_online_ctrl_reset; 2454 */ 2455 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, 2456 (uint8_t *)(ctrl_info->product_name), 2457 (uint8_t *)(ci->product_name), 80 * sizeof (char), 2458 DDI_DEV_AUTOINCR); 2459 /* should get more members of ci with ddi_get when needed */ 2460 } else { 2461 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed")); 2462 ret = -1; 2463 } 2464 2465 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2466 ret = -1; 2467 } 2468 return_mfi_pkt(instance, cmd); 2469 2470 return (ret); 2471 } 2472 2473 /* 2474 * abort_aen_cmd 2475 */ 2476 static int 2477 abort_aen_cmd(struct mrsas_instance *instance, 2478 struct mrsas_cmd *cmd_to_abort) 2479 { 2480 int ret = 0; 2481 2482 struct mrsas_cmd *cmd; 2483 struct mrsas_abort_frame *abort_fr; 2484 2485 cmd = instance->cmd_list[APP_RESERVE_CMDS-1]; 2486 2487 if (!cmd) { 2488 con_log(CL_ANN1, (CE_WARN, 2489 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd")); 2490 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding, 2491 uint16_t, instance->max_fw_cmds); 2492 return (DDI_FAILURE); 2493 } 2494 cmd->retry_count_for_ocr = 0; 2495 /* Clear the frame buffer and assign back the context id */ 2496 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2497 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2498 cmd->index); 2499 2500 abort_fr = &cmd->frame->abort; 2501 2502 /* prepare and issue the abort frame */ 2503 ddi_put8(cmd->frame_dma_obj.acc_handle, 2504 &abort_fr->cmd, MFI_CMD_OP_ABORT); 2505 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status, 2506 MFI_CMD_STATUS_SYNC_MODE); 2507 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0); 2508 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context, 2509 cmd_to_abort->index); 2510 ddi_put32(cmd->frame_dma_obj.acc_handle, 2511 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr); 2512 ddi_put32(cmd->frame_dma_obj.acc_handle, 2513 &abort_fr->abort_mfi_phys_addr_hi, 0); 2514 2515 instance->aen_cmd->abort_aen = 1; 2516 2517 cmd->sync_cmd = MRSAS_TRUE; 2518 cmd->frame_count = 1; 2519 2520 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2521 con_log(CL_ANN1, (CE_WARN, 2522 "abort_aen_cmd: issue_cmd_in_poll_mode failed")); 2523 ret = -1; 2524 } else { 2525 ret = 0; 2526 } 2527 2528 instance->aen_cmd->abort_aen = 1; 2529 instance->aen_cmd = 0; 2530 2531 atomic_add_16(&instance->fw_outstanding, (-1)); 2532 2533 return (ret); 2534 } 2535 2536 2537 /* 2538 * init_mfi 2539 */ 2540 static int 2541 init_mfi(struct mrsas_instance *instance) 2542 { 2543 struct mrsas_cmd *cmd; 2544 struct mrsas_ctrl_info ctrl_info; 2545 struct mrsas_init_frame *init_frame; 2546 struct mrsas_init_queue_info *initq_info; 2547 2548 /* we expect the FW state to be READY */ 2549 if (mfi_state_transition_to_ready(instance)) { 2550 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready")); 2551 goto fail_ready_state; 2552 } 2553 2554 /* get various operational parameters from status register */ 2555 instance->max_num_sge = 2556 (instance->func_ptr->read_fw_status_reg(instance) & 2557 0xFF0000) >> 0x10; 2558 /* 2559 * Reduce the max supported cmds by 1. This is to ensure that the 2560 * reply_q_sz (1 more than the max cmd that driver may send) 2561 * does not exceed max cmds that the FW can support 2562 */ 2563 instance->max_fw_cmds = 2564 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 2565 instance->max_fw_cmds = instance->max_fw_cmds - 1; 2566 2567 instance->max_num_sge = 2568 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ? 2569 MRSAS_MAX_SGE_CNT : instance->max_num_sge; 2570 2571 /* create a pool of commands */ 2572 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) 2573 goto fail_alloc_fw_space; 2574 2575 /* 2576 * Prepare a init frame. Note the init frame points to queue info 2577 * structure. Each frame has SGL allocated after first 64 bytes. For 2578 * this frame - since we don't need any SGL - we use SGL's space as 2579 * queue info structure 2580 */ 2581 cmd = get_mfi_pkt(instance); 2582 cmd->retry_count_for_ocr = 0; 2583 2584 /* Clear the frame buffer and assign back the context id */ 2585 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2586 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2587 cmd->index); 2588 2589 init_frame = (struct mrsas_init_frame *)cmd->frame; 2590 initq_info = (struct mrsas_init_queue_info *) 2591 ((unsigned long)init_frame + 64); 2592 2593 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE); 2594 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info)); 2595 2596 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0); 2597 2598 ddi_put32(cmd->frame_dma_obj.acc_handle, 2599 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1); 2600 2601 ddi_put32(cmd->frame_dma_obj.acc_handle, 2602 &initq_info->producer_index_phys_addr_hi, 0); 2603 ddi_put32(cmd->frame_dma_obj.acc_handle, 2604 &initq_info->producer_index_phys_addr_lo, 2605 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address); 2606 2607 ddi_put32(cmd->frame_dma_obj.acc_handle, 2608 &initq_info->consumer_index_phys_addr_hi, 0); 2609 ddi_put32(cmd->frame_dma_obj.acc_handle, 2610 &initq_info->consumer_index_phys_addr_lo, 2611 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4); 2612 2613 ddi_put32(cmd->frame_dma_obj.acc_handle, 2614 &initq_info->reply_queue_start_phys_addr_hi, 0); 2615 ddi_put32(cmd->frame_dma_obj.acc_handle, 2616 &initq_info->reply_queue_start_phys_addr_lo, 2617 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8); 2618 2619 ddi_put8(cmd->frame_dma_obj.acc_handle, 2620 &init_frame->cmd, MFI_CMD_OP_INIT); 2621 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status, 2622 MFI_CMD_STATUS_POLL_MODE); 2623 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0); 2624 ddi_put32(cmd->frame_dma_obj.acc_handle, 2625 &init_frame->queue_info_new_phys_addr_lo, 2626 cmd->frame_phys_addr + 64); 2627 ddi_put32(cmd->frame_dma_obj.acc_handle, 2628 &init_frame->queue_info_new_phys_addr_hi, 0); 2629 2630 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, 2631 sizeof (struct mrsas_init_queue_info)); 2632 2633 cmd->frame_count = 1; 2634 2635 /* issue the init frame in polled mode */ 2636 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2637 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 2638 return_mfi_pkt(instance, cmd); 2639 goto fail_fw_init; 2640 } 2641 2642 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2643 return_mfi_pkt(instance, cmd); 2644 goto fail_fw_init; 2645 } 2646 return_mfi_pkt(instance, cmd); 2647 2648 if (ctio_enable && 2649 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) { 2650 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported")); 2651 instance->flag_ieee = 1; 2652 } else { 2653 instance->flag_ieee = 0; 2654 } 2655 2656 instance->disable_online_ctrl_reset = 0; 2657 /* gather misc FW related information */ 2658 if (!get_ctrl_info(instance, &ctrl_info)) { 2659 instance->max_sectors_per_req = ctrl_info.max_request_size; 2660 con_log(CL_ANN1, (CE_NOTE, 2661 "product name %s ld present %d", 2662 ctrl_info.product_name, ctrl_info.ld_present_count)); 2663 } else { 2664 instance->max_sectors_per_req = instance->max_num_sge * 2665 PAGESIZE / 512; 2666 } 2667 /* 2668 * instance->disable_online_ctrl_reset = 2669 * ctrl_info.properties.on_off_properties.disable_online_ctrl_reset; 2670 */ 2671 return (DDI_SUCCESS); 2672 2673 fail_fw_init: 2674 fail_alloc_fw_space: 2675 2676 free_space_for_mfi(instance); 2677 2678 fail_ready_state: 2679 ddi_regs_map_free(&instance->regmap_handle); 2680 2681 fail_mfi_reg_setup: 2682 return (DDI_FAILURE); 2683 } 2684 2685 2686 2687 2688 2689 2690 static int 2691 mrsas_issue_init_mfi(struct mrsas_instance *instance) 2692 { 2693 struct mrsas_cmd *cmd; 2694 struct mrsas_init_frame *init_frame; 2695 struct mrsas_init_queue_info *initq_info; 2696 2697 /* 2698 * Prepare a init frame. Note the init frame points to queue info 2699 * structure. Each frame has SGL allocated after first 64 bytes. For 2700 * this frame - since we don't need any SGL - we use SGL's space as 2701 * queue info structure 2702 */ 2703 con_log(CL_ANN1, (CE_NOTE, 2704 "mrsas_issue_init_mfi: entry\n")); 2705 cmd = get_mfi_app_pkt(instance); 2706 2707 if (!cmd) { 2708 con_log(CL_ANN1, (CE_NOTE, 2709 "mrsas_issue_init_mfi: get_pkt failed\n")); 2710 return (DDI_FAILURE); 2711 } 2712 2713 /* Clear the frame buffer and assign back the context id */ 2714 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2715 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2716 cmd->index); 2717 2718 init_frame = (struct mrsas_init_frame *)cmd->frame; 2719 initq_info = (struct mrsas_init_queue_info *) 2720 ((unsigned long)init_frame + 64); 2721 2722 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE); 2723 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info)); 2724 2725 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0); 2726 2727 ddi_put32(cmd->frame_dma_obj.acc_handle, 2728 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1); 2729 ddi_put32(cmd->frame_dma_obj.acc_handle, 2730 &initq_info->producer_index_phys_addr_hi, 0); 2731 ddi_put32(cmd->frame_dma_obj.acc_handle, 2732 &initq_info->producer_index_phys_addr_lo, 2733 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address); 2734 ddi_put32(cmd->frame_dma_obj.acc_handle, 2735 &initq_info->consumer_index_phys_addr_hi, 0); 2736 ddi_put32(cmd->frame_dma_obj.acc_handle, 2737 &initq_info->consumer_index_phys_addr_lo, 2738 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4); 2739 2740 ddi_put32(cmd->frame_dma_obj.acc_handle, 2741 &initq_info->reply_queue_start_phys_addr_hi, 0); 2742 ddi_put32(cmd->frame_dma_obj.acc_handle, 2743 &initq_info->reply_queue_start_phys_addr_lo, 2744 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8); 2745 2746 ddi_put8(cmd->frame_dma_obj.acc_handle, 2747 &init_frame->cmd, MFI_CMD_OP_INIT); 2748 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status, 2749 MFI_CMD_STATUS_POLL_MODE); 2750 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0); 2751 ddi_put32(cmd->frame_dma_obj.acc_handle, 2752 &init_frame->queue_info_new_phys_addr_lo, 2753 cmd->frame_phys_addr + 64); 2754 ddi_put32(cmd->frame_dma_obj.acc_handle, 2755 &init_frame->queue_info_new_phys_addr_hi, 0); 2756 2757 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, 2758 sizeof (struct mrsas_init_queue_info)); 2759 2760 cmd->frame_count = 1; 2761 2762 /* issue the init frame in polled mode */ 2763 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2764 con_log(CL_ANN1, (CE_WARN, 2765 "mrsas_issue_init_mfi():failed to " 2766 "init firmware")); 2767 return_mfi_app_pkt(instance, cmd); 2768 return (DDI_FAILURE); 2769 } 2770 return_mfi_app_pkt(instance, cmd); 2771 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_init_mfi: Done")); 2772 return (DDI_SUCCESS); 2773 } 2774 /* 2775 * mfi_state_transition_to_ready : Move the FW to READY state 2776 * 2777 * @reg_set : MFI register set 2778 */ 2779 static int 2780 mfi_state_transition_to_ready(struct mrsas_instance *instance) 2781 { 2782 int i; 2783 uint8_t max_wait; 2784 uint32_t fw_ctrl; 2785 uint32_t fw_state; 2786 uint32_t cur_state; 2787 uint32_t cur_abs_reg_val; 2788 uint32_t prev_abs_reg_val; 2789 2790 cur_abs_reg_val = 2791 instance->func_ptr->read_fw_status_reg(instance); 2792 fw_state = 2793 cur_abs_reg_val & MFI_STATE_MASK; 2794 con_log(CL_ANN1, (CE_NOTE, 2795 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 2796 2797 while (fw_state != MFI_STATE_READY) { 2798 con_log(CL_ANN, (CE_NOTE, 2799 "mfi_state_transition_to_ready:FW state%x", fw_state)); 2800 2801 switch (fw_state) { 2802 case MFI_STATE_FAULT: 2803 con_log(CL_ANN1, (CE_NOTE, 2804 "mr_sas: FW in FAULT state!!")); 2805 2806 return (ENODEV); 2807 case MFI_STATE_WAIT_HANDSHAKE: 2808 /* set the CLR bit in IMR0 */ 2809 con_log(CL_ANN1, (CE_NOTE, 2810 "mr_sas: FW waiting for HANDSHAKE")); 2811 /* 2812 * PCI_Hot Plug: MFI F/W requires 2813 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2814 * to be set 2815 */ 2816 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 2817 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 2818 MFI_INIT_HOTPLUG, instance); 2819 2820 max_wait = 2; 2821 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2822 break; 2823 case MFI_STATE_BOOT_MESSAGE_PENDING: 2824 /* set the CLR bit in IMR0 */ 2825 con_log(CL_ANN1, (CE_NOTE, 2826 "mr_sas: FW state boot message pending")); 2827 /* 2828 * PCI_Hot Plug: MFI F/W requires 2829 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2830 * to be set 2831 */ 2832 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 2833 2834 max_wait = 10; 2835 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2836 break; 2837 case MFI_STATE_OPERATIONAL: 2838 /* bring it to READY state; assuming max wait 2 secs */ 2839 instance->func_ptr->disable_intr(instance); 2840 con_log(CL_ANN1, (CE_NOTE, 2841 "mr_sas: FW in OPERATIONAL state")); 2842 /* 2843 * PCI_Hot Plug: MFI F/W requires 2844 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 2845 * to be set 2846 */ 2847 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 2848 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 2849 2850 max_wait = 10; 2851 cur_state = MFI_STATE_OPERATIONAL; 2852 break; 2853 case MFI_STATE_UNDEFINED: 2854 /* this state should not last for more than 2 seconds */ 2855 con_log(CL_ANN1, (CE_NOTE, "FW state undefined")); 2856 2857 max_wait = 2; 2858 cur_state = MFI_STATE_UNDEFINED; 2859 break; 2860 case MFI_STATE_BB_INIT: 2861 max_wait = 2; 2862 cur_state = MFI_STATE_BB_INIT; 2863 break; 2864 case MFI_STATE_FW_INIT: 2865 max_wait = 2; 2866 cur_state = MFI_STATE_FW_INIT; 2867 break; 2868 case MFI_STATE_DEVICE_SCAN: 2869 max_wait = 180; 2870 cur_state = MFI_STATE_DEVICE_SCAN; 2871 prev_abs_reg_val = cur_abs_reg_val; 2872 con_log(CL_NONE, (CE_NOTE, 2873 "Device scan in progress ...\n")); 2874 break; 2875 default: 2876 con_log(CL_ANN1, (CE_NOTE, 2877 "mr_sas: Unknown state 0x%x", fw_state)); 2878 return (ENODEV); 2879 } 2880 2881 /* the cur_state should not last for more than max_wait secs */ 2882 for (i = 0; i < (max_wait * MILLISEC); i++) { 2883 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 2884 cur_abs_reg_val = 2885 instance->func_ptr->read_fw_status_reg(instance); 2886 fw_state = cur_abs_reg_val & MFI_STATE_MASK; 2887 2888 if (fw_state == cur_state) { 2889 delay(1 * drv_usectohz(MILLISEC)); 2890 } else { 2891 break; 2892 } 2893 } 2894 if (fw_state == MFI_STATE_DEVICE_SCAN) { 2895 if (prev_abs_reg_val != cur_abs_reg_val) { 2896 continue; 2897 } 2898 } 2899 2900 /* return error if fw_state hasn't changed after max_wait */ 2901 if (fw_state == cur_state) { 2902 con_log(CL_ANN1, (CE_NOTE, 2903 "FW state hasn't changed in %d secs", max_wait)); 2904 return (ENODEV); 2905 } 2906 }; 2907 2908 fw_ctrl = RD_IB_DOORBELL(instance); 2909 2910 con_log(CL_ANN1, (CE_NOTE, 2911 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 2912 2913 /* 2914 * Write 0xF to the doorbell register to do the following. 2915 * - Abort all outstanding commands (bit 0). 2916 * - Transition from OPERATIONAL to READY state (bit 1). 2917 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 2918 * - Set to release FW to continue running (i.e. BIOS handshake 2919 * (bit 3). 2920 */ 2921 WR_IB_DOORBELL(0xF, instance); 2922 2923 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2924 return (ENODEV); 2925 } 2926 return (DDI_SUCCESS); 2927 } 2928 2929 /* 2930 * get_seq_num 2931 */ 2932 static int 2933 get_seq_num(struct mrsas_instance *instance, 2934 struct mrsas_evt_log_info *eli) 2935 { 2936 int ret = DDI_SUCCESS; 2937 2938 dma_obj_t dcmd_dma_obj; 2939 struct mrsas_cmd *cmd; 2940 struct mrsas_dcmd_frame *dcmd; 2941 struct mrsas_evt_log_info *eli_tmp; 2942 cmd = get_mfi_pkt(instance); 2943 2944 if (!cmd) { 2945 cmn_err(CE_WARN, "mr_sas: failed to get a cmd"); 2946 DTRACE_PROBE2(seq_num_mfi_err, uint16_t, 2947 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 2948 return (ENOMEM); 2949 } 2950 cmd->retry_count_for_ocr = 0; 2951 /* Clear the frame buffer and assign back the context id */ 2952 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2953 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2954 cmd->index); 2955 2956 dcmd = &cmd->frame->dcmd; 2957 2958 /* allocate the data transfer buffer */ 2959 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info); 2960 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 2961 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2962 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2963 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2964 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2965 2966 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 2967 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 2968 con_log(CL_ANN, (CE_WARN, 2969 "get_seq_num: could not allocate data transfer buffer.")); 2970 return (DDI_FAILURE); 2971 } 2972 2973 (void) memset(dcmd_dma_obj.buffer, 0, 2974 sizeof (struct mrsas_evt_log_info)); 2975 2976 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2977 2978 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2979 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0); 2980 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 2981 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2982 MFI_FRAME_DIR_READ); 2983 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2984 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 2985 sizeof (struct mrsas_evt_log_info)); 2986 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2987 MR_DCMD_CTRL_EVENT_GET_INFO); 2988 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2989 sizeof (struct mrsas_evt_log_info)); 2990 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2991 dcmd_dma_obj.dma_cookie[0].dmac_address); 2992 2993 cmd->sync_cmd = MRSAS_TRUE; 2994 cmd->frame_count = 1; 2995 2996 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2997 cmn_err(CE_WARN, "get_seq_num: " 2998 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO"); 2999 ret = DDI_FAILURE; 3000 } else { 3001 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer; 3002 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle, 3003 &eli_tmp->newest_seq_num); 3004 ret = DDI_SUCCESS; 3005 } 3006 3007 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 3008 ret = DDI_FAILURE; 3009 3010 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 3011 ret = DDI_FAILURE; 3012 } 3013 3014 return_mfi_pkt(instance, cmd); 3015 3016 return (ret); 3017 } 3018 3019 /* 3020 * start_mfi_aen 3021 */ 3022 static int 3023 start_mfi_aen(struct mrsas_instance *instance) 3024 { 3025 int ret = 0; 3026 3027 struct mrsas_evt_log_info eli; 3028 union mrsas_evt_class_locale class_locale; 3029 3030 /* get the latest sequence number from FW */ 3031 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info)); 3032 3033 if (get_seq_num(instance, &eli)) { 3034 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num"); 3035 return (-1); 3036 } 3037 3038 /* register AEN with FW for latest sequence number plus 1 */ 3039 class_locale.members.reserved = 0; 3040 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL); 3041 class_locale.members.class = MR_EVT_CLASS_INFO; 3042 class_locale.word = LE_32(class_locale.word); 3043 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 3044 class_locale.word); 3045 3046 if (ret) { 3047 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed"); 3048 return (-1); 3049 } 3050 3051 return (ret); 3052 } 3053 3054 /* 3055 * flush_cache 3056 */ 3057 static void 3058 flush_cache(struct mrsas_instance *instance) 3059 { 3060 struct mrsas_cmd *cmd = NULL; 3061 struct mrsas_dcmd_frame *dcmd; 3062 uint32_t max_cmd = instance->max_fw_cmds; 3063 3064 cmd = instance->cmd_list[max_cmd]; 3065 3066 if (!cmd) { 3067 con_log(CL_ANN1, (CE_WARN, 3068 "flush_cache():Failed to get a cmd for flush_cache")); 3069 DTRACE_PROBE2(flush_cache_err, uint16_t, 3070 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 3071 return; 3072 } 3073 dcmd = &cmd->frame->dcmd; 3074 3075 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 3076 3077 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 3078 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 3079 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0); 3080 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 3081 MFI_FRAME_DIR_NONE); 3082 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 3083 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0); 3084 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 3085 MR_DCMD_CTRL_CACHE_FLUSH); 3086 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0], 3087 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE); 3088 3089 cmd->frame_count = 1; 3090 3091 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 3092 con_log(CL_ANN1, (CE_WARN, 3093 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH")); 3094 } 3095 con_log(CL_ANN1, (CE_NOTE, "flush_cache done")); 3096 } 3097 3098 /* 3099 * service_mfi_aen- Completes an AEN command 3100 * @instance: Adapter soft state 3101 * @cmd: Command to be completed 3102 * 3103 */ 3104 static void 3105 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 3106 { 3107 uint32_t seq_num; 3108 struct mrsas_evt_detail *evt_detail = 3109 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 3110 int rval = 0; 3111 int tgt = 0; 3112 ddi_acc_handle_t acc_handle; 3113 3114 acc_handle = cmd->frame_dma_obj.acc_handle; 3115 3116 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status); 3117 3118 if (cmd->cmd_status == ENODATA) { 3119 cmd->cmd_status = 0; 3120 } 3121 3122 /* 3123 * log the MFI AEN event to the sysevent queue so that 3124 * application will get noticed 3125 */ 3126 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 3127 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 3128 int instance_no = ddi_get_instance(instance->dip); 3129 con_log(CL_ANN, (CE_WARN, 3130 "mr_sas%d: Failed to log AEN event", instance_no)); 3131 } 3132 /* 3133 * Check for any ld devices that has changed state. i.e. online 3134 * or offline. 3135 */ 3136 con_log(CL_ANN1, (CE_NOTE, 3137 "AEN: code = %x class = %x locale = %x args = %x", 3138 ddi_get32(acc_handle, &evt_detail->code), 3139 evt_detail->cl.members.class, 3140 ddi_get16(acc_handle, &evt_detail->cl.members.locale), 3141 ddi_get8(acc_handle, &evt_detail->arg_type))); 3142 3143 switch (ddi_get32(acc_handle, &evt_detail->code)) { 3144 case MR_EVT_CFG_CLEARED: { 3145 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 3146 if (instance->mr_ld_list[tgt].dip != NULL) { 3147 rval = mrsas_service_evt(instance, tgt, 0, 3148 MRSAS_EVT_UNCONFIG_TGT, NULL); 3149 con_log(CL_ANN1, (CE_WARN, 3150 "mr_sas: CFG CLEARED AEN rval = %d " 3151 "tgt id = %d", rval, tgt)); 3152 } 3153 } 3154 break; 3155 } 3156 3157 case MR_EVT_LD_DELETED: { 3158 rval = mrsas_service_evt(instance, 3159 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 3160 MRSAS_EVT_UNCONFIG_TGT, NULL); 3161 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d " 3162 "tgt id = %d index = %d", rval, 3163 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 3164 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 3165 break; 3166 } /* End of MR_EVT_LD_DELETED */ 3167 3168 case MR_EVT_LD_CREATED: { 3169 rval = mrsas_service_evt(instance, 3170 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 3171 MRSAS_EVT_CONFIG_TGT, NULL); 3172 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d " 3173 "tgt id = %d index = %d", rval, 3174 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 3175 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 3176 break; 3177 } /* End of MR_EVT_LD_CREATED */ 3178 } /* End of Main Switch */ 3179 3180 /* get copy of seq_num and class/locale for re-registration */ 3181 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num); 3182 seq_num++; 3183 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 3184 sizeof (struct mrsas_evt_detail)); 3185 3186 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0); 3187 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num); 3188 3189 instance->aen_seq_num = seq_num; 3190 3191 cmd->frame_count = 1; 3192 3193 /* Issue the aen registration frame */ 3194 instance->func_ptr->issue_cmd(cmd, instance); 3195 } 3196 3197 /* 3198 * complete_cmd_in_sync_mode - Completes an internal command 3199 * @instance: Adapter soft state 3200 * @cmd: Command to be completed 3201 * 3202 * The issue_cmd_in_sync_mode() function waits for a command to complete 3203 * after it issues a command. This function wakes up that waiting routine by 3204 * calling wake_up() on the wait queue. 3205 */ 3206 static void 3207 complete_cmd_in_sync_mode(struct mrsas_instance *instance, 3208 struct mrsas_cmd *cmd) 3209 { 3210 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle, 3211 &cmd->frame->io.cmd_status); 3212 3213 cmd->sync_cmd = MRSAS_FALSE; 3214 3215 if (cmd->cmd_status == ENODATA) { 3216 cmd->cmd_status = 0; 3217 } 3218 3219 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n", 3220 (void *)cmd)); 3221 3222 cv_broadcast(&instance->int_cmd_cv); 3223 } 3224 3225 /* 3226 * Call this function inside mrsas_softintr. 3227 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty 3228 * @instance: Adapter soft state 3229 */ 3230 3231 static uint32_t 3232 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance) 3233 { 3234 uint32_t cur_abs_reg_val; 3235 uint32_t fw_state; 3236 3237 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance); 3238 fw_state = cur_abs_reg_val & MFI_STATE_MASK; 3239 if (fw_state == MFI_STATE_FAULT) { 3240 3241 if (instance->disable_online_ctrl_reset == 1) { 3242 con_log(CL_ANN1, (CE_NOTE, 3243 "mrsas_initiate_ocr_if_fw_is_faulty: " 3244 "FW in Fault state, detected in ISR: " 3245 "FW doesn't support ocr ")); 3246 return (ADAPTER_RESET_NOT_REQUIRED); 3247 } else { 3248 con_log(CL_ANN1, (CE_NOTE, 3249 "mrsas_initiate_ocr_if_fw_is_faulty: " 3250 "FW in Fault state, detected in ISR: FW supports ocr ")); 3251 return (ADAPTER_RESET_REQUIRED); 3252 } 3253 } 3254 return (ADAPTER_RESET_NOT_REQUIRED); 3255 } 3256 3257 /* 3258 * mrsas_softintr - The Software ISR 3259 * @param arg : HBA soft state 3260 * 3261 * called from high-level interrupt if hi-level interrupt are not there, 3262 * otherwise triggered as a soft interrupt 3263 */ 3264 static uint_t 3265 mrsas_softintr(struct mrsas_instance *instance) 3266 { 3267 struct scsi_pkt *pkt; 3268 struct scsa_cmd *acmd; 3269 struct mrsas_cmd *cmd; 3270 struct mlist_head *pos, *next; 3271 mlist_t process_list; 3272 struct mrsas_header *hdr; 3273 struct scsi_arq_status *arqstat; 3274 3275 con_log(CL_ANN1, (CE_CONT, "mrsas_softintr called")); 3276 3277 ASSERT(instance); 3278 3279 mutex_enter(&instance->completed_pool_mtx); 3280 3281 if (mlist_empty(&instance->completed_pool_list)) { 3282 mutex_exit(&instance->completed_pool_mtx); 3283 return (DDI_INTR_CLAIMED); 3284 } 3285 3286 instance->softint_running = 1; 3287 3288 INIT_LIST_HEAD(&process_list); 3289 mlist_splice(&instance->completed_pool_list, &process_list); 3290 INIT_LIST_HEAD(&instance->completed_pool_list); 3291 3292 mutex_exit(&instance->completed_pool_mtx); 3293 3294 /* perform all callbacks first, before releasing the SCBs */ 3295 mlist_for_each_safe(pos, next, &process_list) { 3296 cmd = mlist_entry(pos, struct mrsas_cmd, list); 3297 3298 /* syncronize the Cmd frame for the controller */ 3299 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 3300 0, 0, DDI_DMA_SYNC_FORCPU); 3301 3302 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 3303 DDI_SUCCESS) { 3304 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 3305 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3306 con_log(CL_ANN1, (CE_WARN, 3307 "mrsas_softintr: " 3308 "FMA check reports DMA handle failure")); 3309 return (DDI_INTR_CLAIMED); 3310 } 3311 3312 hdr = &cmd->frame->hdr; 3313 3314 /* remove the internal command from the process list */ 3315 mlist_del_init(&cmd->list); 3316 3317 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 3318 case MFI_CMD_OP_PD_SCSI: 3319 case MFI_CMD_OP_LD_SCSI: 3320 case MFI_CMD_OP_LD_READ: 3321 case MFI_CMD_OP_LD_WRITE: 3322 /* 3323 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 3324 * could have been issued either through an 3325 * IO path or an IOCTL path. If it was via IOCTL, 3326 * we will send it to internal completion. 3327 */ 3328 if (cmd->sync_cmd == MRSAS_TRUE) { 3329 complete_cmd_in_sync_mode(instance, cmd); 3330 break; 3331 } 3332 3333 /* regular commands */ 3334 acmd = cmd->cmd; 3335 pkt = CMD2PKT(acmd); 3336 3337 if (acmd->cmd_flags & CFLAG_DMAVALID) { 3338 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3339 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3340 acmd->cmd_dma_offset, 3341 acmd->cmd_dma_len, 3342 DDI_DMA_SYNC_FORCPU); 3343 } 3344 } 3345 3346 pkt->pkt_reason = CMD_CMPLT; 3347 pkt->pkt_statistics = 0; 3348 pkt->pkt_state = STATE_GOT_BUS 3349 | STATE_GOT_TARGET | STATE_SENT_CMD 3350 | STATE_XFERRED_DATA | STATE_GOT_STATUS; 3351 3352 con_log(CL_ANN1, (CE_CONT, 3353 "CDB[0] = %x completed for %s: size %lx context %x", 3354 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 3355 acmd->cmd_dmacount, hdr->context)); 3356 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0], 3357 uint_t, acmd->cmd_cdblen, ulong_t, 3358 acmd->cmd_dmacount); 3359 3360 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 3361 struct scsi_inquiry *inq; 3362 3363 if (acmd->cmd_dmacount != 0) { 3364 bp_mapin(acmd->cmd_buf); 3365 inq = (struct scsi_inquiry *) 3366 acmd->cmd_buf->b_un.b_addr; 3367 3368 /* don't expose physical drives to OS */ 3369 if (acmd->islogical && 3370 (hdr->cmd_status == MFI_STAT_OK)) { 3371 display_scsi_inquiry( 3372 (caddr_t)inq); 3373 } else if ((hdr->cmd_status == 3374 MFI_STAT_OK) && inq->inq_dtype == 3375 DTYPE_DIRECT) { 3376 3377 display_scsi_inquiry( 3378 (caddr_t)inq); 3379 3380 /* for physical disk */ 3381 hdr->cmd_status = 3382 MFI_STAT_DEVICE_NOT_FOUND; 3383 } 3384 } 3385 } 3386 3387 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd, 3388 uint8_t, hdr->cmd_status); 3389 3390 switch (hdr->cmd_status) { 3391 case MFI_STAT_OK: 3392 pkt->pkt_scbp[0] = STATUS_GOOD; 3393 break; 3394 case MFI_STAT_LD_CC_IN_PROGRESS: 3395 case MFI_STAT_LD_RECON_IN_PROGRESS: 3396 pkt->pkt_scbp[0] = STATUS_GOOD; 3397 break; 3398 case MFI_STAT_LD_INIT_IN_PROGRESS: 3399 con_log(CL_ANN, 3400 (CE_WARN, "Initialization in Progress")); 3401 pkt->pkt_reason = CMD_TRAN_ERR; 3402 3403 break; 3404 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3405 con_log(CL_ANN1, (CE_CONT, "scsi_done error")); 3406 3407 pkt->pkt_reason = CMD_CMPLT; 3408 ((struct scsi_status *) 3409 pkt->pkt_scbp)->sts_chk = 1; 3410 3411 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { 3412 3413 con_log(CL_ANN, 3414 (CE_WARN, "TEST_UNIT_READY fail")); 3415 3416 } else { 3417 pkt->pkt_state |= STATE_ARQ_DONE; 3418 arqstat = (void *)(pkt->pkt_scbp); 3419 arqstat->sts_rqpkt_reason = CMD_CMPLT; 3420 arqstat->sts_rqpkt_resid = 0; 3421 arqstat->sts_rqpkt_state |= 3422 STATE_GOT_BUS | STATE_GOT_TARGET 3423 | STATE_SENT_CMD 3424 | STATE_XFERRED_DATA; 3425 *(uint8_t *)&arqstat->sts_rqpkt_status = 3426 STATUS_GOOD; 3427 ddi_rep_get8( 3428 cmd->frame_dma_obj.acc_handle, 3429 (uint8_t *) 3430 &(arqstat->sts_sensedata), 3431 cmd->sense, 3432 acmd->cmd_scblen - 3433 offsetof(struct scsi_arq_status, 3434 sts_sensedata), DDI_DEV_AUTOINCR); 3435 } 3436 break; 3437 case MFI_STAT_LD_OFFLINE: 3438 case MFI_STAT_DEVICE_NOT_FOUND: 3439 con_log(CL_ANN1, (CE_CONT, 3440 "mrsas_softintr:device not found error")); 3441 pkt->pkt_reason = CMD_DEV_GONE; 3442 pkt->pkt_statistics = STAT_DISCON; 3443 break; 3444 case MFI_STAT_LD_LBA_OUT_OF_RANGE: 3445 pkt->pkt_state |= STATE_ARQ_DONE; 3446 pkt->pkt_reason = CMD_CMPLT; 3447 ((struct scsi_status *) 3448 pkt->pkt_scbp)->sts_chk = 1; 3449 3450 arqstat = (void *)(pkt->pkt_scbp); 3451 arqstat->sts_rqpkt_reason = CMD_CMPLT; 3452 arqstat->sts_rqpkt_resid = 0; 3453 arqstat->sts_rqpkt_state |= STATE_GOT_BUS 3454 | STATE_GOT_TARGET | STATE_SENT_CMD 3455 | STATE_XFERRED_DATA; 3456 *(uint8_t *)&arqstat->sts_rqpkt_status = 3457 STATUS_GOOD; 3458 3459 arqstat->sts_sensedata.es_valid = 1; 3460 arqstat->sts_sensedata.es_key = 3461 KEY_ILLEGAL_REQUEST; 3462 arqstat->sts_sensedata.es_class = 3463 CLASS_EXTENDED_SENSE; 3464 3465 /* 3466 * LOGICAL BLOCK ADDRESS OUT OF RANGE: 3467 * ASC: 0x21h; ASCQ: 0x00h; 3468 */ 3469 arqstat->sts_sensedata.es_add_code = 0x21; 3470 arqstat->sts_sensedata.es_qual_code = 0x00; 3471 3472 break; 3473 3474 default: 3475 con_log(CL_ANN, (CE_CONT, "Unknown status!")); 3476 pkt->pkt_reason = CMD_TRAN_ERR; 3477 3478 break; 3479 } 3480 3481 atomic_add_16(&instance->fw_outstanding, (-1)); 3482 3483 (void) mrsas_common_check(instance, cmd); 3484 3485 if (acmd->cmd_dmahandle) { 3486 if (mrsas_check_dma_handle( 3487 acmd->cmd_dmahandle) != DDI_SUCCESS) { 3488 ddi_fm_service_impact(instance->dip, 3489 DDI_SERVICE_UNAFFECTED); 3490 pkt->pkt_reason = CMD_TRAN_ERR; 3491 pkt->pkt_statistics = 0; 3492 } 3493 } 3494 3495 /* Call the callback routine */ 3496 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 3497 pkt->pkt_comp) { 3498 3499 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr: " 3500 "posting to scsa cmd %p index %x pkt %p " 3501 "time %llx", (void *)cmd, cmd->index, 3502 (void *)pkt, gethrtime())); 3503 (*pkt->pkt_comp)(pkt); 3504 3505 } 3506 return_mfi_pkt(instance, cmd); 3507 break; 3508 case MFI_CMD_OP_SMP: 3509 case MFI_CMD_OP_STP: 3510 complete_cmd_in_sync_mode(instance, cmd); 3511 break; 3512 case MFI_CMD_OP_DCMD: 3513 /* see if got an event notification */ 3514 if (ddi_get32(cmd->frame_dma_obj.acc_handle, 3515 &cmd->frame->dcmd.opcode) == 3516 MR_DCMD_CTRL_EVENT_WAIT) { 3517 if ((instance->aen_cmd == cmd) && 3518 (instance->aen_cmd->abort_aen)) { 3519 con_log(CL_ANN, (CE_WARN, 3520 "mrsas_softintr: " 3521 "aborted_aen returned")); 3522 } else { 3523 atomic_add_16(&instance->fw_outstanding, 3524 (-1)); 3525 service_mfi_aen(instance, cmd); 3526 } 3527 } else { 3528 complete_cmd_in_sync_mode(instance, cmd); 3529 } 3530 3531 break; 3532 case MFI_CMD_OP_ABORT: 3533 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); 3534 /* 3535 * MFI_CMD_OP_ABORT successfully completed 3536 * in the synchronous mode 3537 */ 3538 complete_cmd_in_sync_mode(instance, cmd); 3539 break; 3540 default: 3541 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 3542 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3543 3544 if (cmd->pkt != NULL) { 3545 pkt = cmd->pkt; 3546 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 3547 pkt->pkt_comp) { 3548 3549 con_log(CL_ANN1, (CE_CONT, "posting to " 3550 "scsa cmd %p index %x pkt %p" 3551 "time %llx, default ", (void *)cmd, 3552 cmd->index, (void *)pkt, 3553 gethrtime())); 3554 3555 (*pkt->pkt_comp)(pkt); 3556 3557 } 3558 } 3559 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !")); 3560 break; 3561 } 3562 } 3563 3564 instance->softint_running = 0; 3565 3566 return (DDI_INTR_CLAIMED); 3567 } 3568 3569 /* 3570 * mrsas_alloc_dma_obj 3571 * 3572 * Allocate the memory and other resources for an dma object. 3573 */ 3574 static int 3575 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj, 3576 uchar_t endian_flags) 3577 { 3578 int i; 3579 size_t alen = 0; 3580 uint_t cookie_cnt; 3581 struct ddi_device_acc_attr tmp_endian_attr; 3582 3583 tmp_endian_attr = endian_attr; 3584 tmp_endian_attr.devacc_attr_endian_flags = endian_flags; 3585 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3586 3587 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 3588 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 3589 if (i != DDI_SUCCESS) { 3590 3591 switch (i) { 3592 case DDI_DMA_BADATTR : 3593 con_log(CL_ANN, (CE_WARN, 3594 "Failed ddi_dma_alloc_handle- Bad attribute")); 3595 break; 3596 case DDI_DMA_NORESOURCES : 3597 con_log(CL_ANN, (CE_WARN, 3598 "Failed ddi_dma_alloc_handle- No Resources")); 3599 break; 3600 default : 3601 con_log(CL_ANN, (CE_WARN, 3602 "Failed ddi_dma_alloc_handle: " 3603 "unknown status %d", i)); 3604 break; 3605 } 3606 3607 return (-1); 3608 } 3609 3610 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr, 3611 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 3612 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 3613 alen < obj->size) { 3614 3615 ddi_dma_free_handle(&obj->dma_handle); 3616 3617 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 3618 3619 return (-1); 3620 } 3621 3622 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 3623 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 3624 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 3625 3626 ddi_dma_mem_free(&obj->acc_handle); 3627 ddi_dma_free_handle(&obj->dma_handle); 3628 3629 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 3630 3631 return (-1); 3632 } 3633 3634 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) { 3635 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3636 return (-1); 3637 } 3638 3639 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) { 3640 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3641 return (-1); 3642 } 3643 3644 return (cookie_cnt); 3645 } 3646 3647 /* 3648 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t) 3649 * 3650 * De-allocate the memory and other resources for an dma object, which must 3651 * have been alloated by a previous call to mrsas_alloc_dma_obj() 3652 */ 3653 static int 3654 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj) 3655 { 3656 3657 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) { 3658 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3659 return (DDI_FAILURE); 3660 } 3661 3662 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) { 3663 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3664 return (DDI_FAILURE); 3665 } 3666 3667 (void) ddi_dma_unbind_handle(obj.dma_handle); 3668 ddi_dma_mem_free(&obj.acc_handle); 3669 ddi_dma_free_handle(&obj.dma_handle); 3670 3671 return (DDI_SUCCESS); 3672 } 3673 3674 /* 3675 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 3676 * int, int (*)()) 3677 * 3678 * Allocate dma resources for a new scsi command 3679 */ 3680 static int 3681 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3682 struct buf *bp, int flags, int (*callback)()) 3683 { 3684 int dma_flags; 3685 int (*cb)(caddr_t); 3686 int i; 3687 3688 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr; 3689 struct scsa_cmd *acmd = PKT2CMD(pkt); 3690 3691 acmd->cmd_buf = bp; 3692 3693 if (bp->b_flags & B_READ) { 3694 acmd->cmd_flags &= ~CFLAG_DMASEND; 3695 dma_flags = DDI_DMA_READ; 3696 } else { 3697 acmd->cmd_flags |= CFLAG_DMASEND; 3698 dma_flags = DDI_DMA_WRITE; 3699 } 3700 3701 if (flags & PKT_CONSISTENT) { 3702 acmd->cmd_flags |= CFLAG_CONSISTENT; 3703 dma_flags |= DDI_DMA_CONSISTENT; 3704 } 3705 3706 if (flags & PKT_DMA_PARTIAL) { 3707 dma_flags |= DDI_DMA_PARTIAL; 3708 } 3709 3710 dma_flags |= DDI_DMA_REDZONE; 3711 3712 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3713 3714 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 3715 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 3716 3717 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 3718 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 3719 switch (i) { 3720 case DDI_DMA_BADATTR: 3721 bioerror(bp, EFAULT); 3722 return (DDI_FAILURE); 3723 3724 case DDI_DMA_NORESOURCES: 3725 bioerror(bp, 0); 3726 return (DDI_FAILURE); 3727 3728 default: 3729 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 3730 "impossible result (0x%x)", i)); 3731 bioerror(bp, EFAULT); 3732 return (DDI_FAILURE); 3733 } 3734 } 3735 3736 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 3737 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 3738 3739 switch (i) { 3740 case DDI_DMA_PARTIAL_MAP: 3741 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 3742 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3743 "DDI_DMA_PARTIAL_MAP impossible")); 3744 goto no_dma_cookies; 3745 } 3746 3747 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 3748 DDI_FAILURE) { 3749 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed")); 3750 goto no_dma_cookies; 3751 } 3752 3753 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3754 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3755 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3756 DDI_FAILURE) { 3757 3758 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed")); 3759 goto no_dma_cookies; 3760 } 3761 3762 goto get_dma_cookies; 3763 case DDI_DMA_MAPPED: 3764 acmd->cmd_nwin = 1; 3765 acmd->cmd_dma_len = 0; 3766 acmd->cmd_dma_offset = 0; 3767 3768 get_dma_cookies: 3769 i = 0; 3770 acmd->cmd_dmacount = 0; 3771 for (;;) { 3772 acmd->cmd_dmacount += 3773 acmd->cmd_dmacookies[i++].dmac_size; 3774 3775 if (i == instance->max_num_sge || 3776 i == acmd->cmd_ncookies) 3777 break; 3778 3779 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3780 &acmd->cmd_dmacookies[i]); 3781 } 3782 3783 acmd->cmd_cookie = i; 3784 acmd->cmd_cookiecnt = i; 3785 3786 acmd->cmd_flags |= CFLAG_DMAVALID; 3787 3788 if (bp->b_bcount >= acmd->cmd_dmacount) { 3789 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3790 } else { 3791 pkt->pkt_resid = 0; 3792 } 3793 3794 return (DDI_SUCCESS); 3795 case DDI_DMA_NORESOURCES: 3796 bioerror(bp, 0); 3797 break; 3798 case DDI_DMA_NOMAPPING: 3799 bioerror(bp, EFAULT); 3800 break; 3801 case DDI_DMA_TOOBIG: 3802 bioerror(bp, EINVAL); 3803 break; 3804 case DDI_DMA_INUSE: 3805 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 3806 " DDI_DMA_INUSE impossible")); 3807 break; 3808 default: 3809 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3810 "impossible result (0x%x)", i)); 3811 break; 3812 } 3813 3814 no_dma_cookies: 3815 ddi_dma_free_handle(&acmd->cmd_dmahandle); 3816 acmd->cmd_dmahandle = NULL; 3817 acmd->cmd_flags &= ~CFLAG_DMAVALID; 3818 return (DDI_FAILURE); 3819 } 3820 3821 /* 3822 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *) 3823 * 3824 * move dma resources to next dma window 3825 * 3826 */ 3827 static int 3828 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3829 struct buf *bp) 3830 { 3831 int i = 0; 3832 3833 struct scsa_cmd *acmd = PKT2CMD(pkt); 3834 3835 /* 3836 * If there are no more cookies remaining in this window, 3837 * must move to the next window first. 3838 */ 3839 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 3840 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 3841 return (DDI_SUCCESS); 3842 } 3843 3844 /* at last window, cannot move */ 3845 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 3846 return (DDI_FAILURE); 3847 } 3848 3849 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3850 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3851 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3852 DDI_FAILURE) { 3853 return (DDI_FAILURE); 3854 } 3855 3856 acmd->cmd_cookie = 0; 3857 } else { 3858 /* still more cookies in this window - get the next one */ 3859 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3860 &acmd->cmd_dmacookies[0]); 3861 } 3862 3863 /* get remaining cookies in this window, up to our maximum */ 3864 for (;;) { 3865 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 3866 acmd->cmd_cookie++; 3867 3868 if (i == instance->max_num_sge || 3869 acmd->cmd_cookie == acmd->cmd_ncookies) { 3870 break; 3871 } 3872 3873 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3874 &acmd->cmd_dmacookies[i]); 3875 } 3876 3877 acmd->cmd_cookiecnt = i; 3878 3879 if (bp->b_bcount >= acmd->cmd_dmacount) { 3880 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3881 } else { 3882 pkt->pkt_resid = 0; 3883 } 3884 3885 return (DDI_SUCCESS); 3886 } 3887 3888 /* 3889 * build_cmd 3890 */ 3891 static struct mrsas_cmd * 3892 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap, 3893 struct scsi_pkt *pkt, uchar_t *cmd_done) 3894 { 3895 uint16_t flags = 0; 3896 uint32_t i; 3897 uint32_t context; 3898 uint32_t sge_bytes; 3899 ddi_acc_handle_t acc_handle; 3900 struct mrsas_cmd *cmd; 3901 struct mrsas_sge64 *mfi_sgl; 3902 struct mrsas_sge_ieee *mfi_sgl_ieee; 3903 struct scsa_cmd *acmd = PKT2CMD(pkt); 3904 struct mrsas_pthru_frame *pthru; 3905 struct mrsas_io_frame *ldio; 3906 3907 /* find out if this is logical or physical drive command. */ 3908 acmd->islogical = MRDRV_IS_LOGICAL(ap); 3909 acmd->device_id = MAP_DEVICE_ID(instance, ap); 3910 *cmd_done = 0; 3911 3912 /* get the command packet */ 3913 if (!(cmd = get_mfi_pkt(instance))) { 3914 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t, 3915 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 3916 return (NULL); 3917 } 3918 3919 cmd->retry_count_for_ocr = 0; 3920 3921 acc_handle = cmd->frame_dma_obj.acc_handle; 3922 3923 /* Clear the frame buffer and assign back the context id */ 3924 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 3925 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index); 3926 3927 cmd->pkt = pkt; 3928 cmd->cmd = acmd; 3929 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0], 3930 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len); 3931 3932 /* lets get the command directions */ 3933 if (acmd->cmd_flags & CFLAG_DMASEND) { 3934 flags = MFI_FRAME_DIR_WRITE; 3935 3936 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3937 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3938 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3939 DDI_DMA_SYNC_FORDEV); 3940 } 3941 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 3942 flags = MFI_FRAME_DIR_READ; 3943 3944 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3945 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3946 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3947 DDI_DMA_SYNC_FORCPU); 3948 } 3949 } else { 3950 flags = MFI_FRAME_DIR_NONE; 3951 } 3952 3953 if (instance->flag_ieee) { 3954 flags |= MFI_FRAME_IEEE; 3955 } 3956 flags |= MFI_FRAME_SGL64; 3957 3958 switch (pkt->pkt_cdbp[0]) { 3959 3960 /* 3961 * case SCMD_SYNCHRONIZE_CACHE: 3962 * flush_cache(instance); 3963 * return_mfi_pkt(instance, cmd); 3964 * *cmd_done = 1; 3965 * 3966 * return (NULL); 3967 */ 3968 3969 case SCMD_READ: 3970 case SCMD_WRITE: 3971 case SCMD_READ_G1: 3972 case SCMD_WRITE_G1: 3973 if (acmd->islogical) { 3974 ldio = (struct mrsas_io_frame *)cmd->frame; 3975 3976 /* 3977 * preare the Logical IO frame: 3978 * 2nd bit is zero for all read cmds 3979 */ 3980 ddi_put8(acc_handle, &ldio->cmd, 3981 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE 3982 : MFI_CMD_OP_LD_READ); 3983 ddi_put8(acc_handle, &ldio->cmd_status, 0x0); 3984 ddi_put8(acc_handle, &ldio->scsi_status, 0x0); 3985 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id); 3986 ddi_put16(acc_handle, &ldio->timeout, 0); 3987 ddi_put8(acc_handle, &ldio->reserved_0, 0); 3988 ddi_put16(acc_handle, &ldio->pad_0, 0); 3989 ddi_put16(acc_handle, &ldio->flags, flags); 3990 3991 /* Initialize sense Information */ 3992 bzero(cmd->sense, SENSE_LENGTH); 3993 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH); 3994 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0); 3995 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo, 3996 cmd->sense_phys_addr); 3997 ddi_put32(acc_handle, &ldio->start_lba_hi, 0); 3998 ddi_put8(acc_handle, &ldio->access_byte, 3999 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0); 4000 ddi_put8(acc_handle, &ldio->sge_count, 4001 acmd->cmd_cookiecnt); 4002 if (instance->flag_ieee) { 4003 mfi_sgl_ieee = 4004 (struct mrsas_sge_ieee *)&ldio->sgl; 4005 } else { 4006 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl; 4007 } 4008 4009 context = ddi_get32(acc_handle, &ldio->context); 4010 4011 if (acmd->cmd_cdblen == CDB_GROUP0) { 4012 ddi_put32(acc_handle, &ldio->lba_count, ( 4013 (uint16_t)(pkt->pkt_cdbp[4]))); 4014 4015 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 4016 ((uint32_t)(pkt->pkt_cdbp[3])) | 4017 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 4018 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 4019 << 16))); 4020 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 4021 ddi_put32(acc_handle, &ldio->lba_count, ( 4022 ((uint16_t)(pkt->pkt_cdbp[8])) | 4023 ((uint16_t)(pkt->pkt_cdbp[7]) << 8))); 4024 4025 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 4026 ((uint32_t)(pkt->pkt_cdbp[5])) | 4027 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 4028 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 4029 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 4030 } else if (acmd->cmd_cdblen == CDB_GROUP2) { 4031 ddi_put32(acc_handle, &ldio->lba_count, ( 4032 ((uint16_t)(pkt->pkt_cdbp[9])) | 4033 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | 4034 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | 4035 ((uint16_t)(pkt->pkt_cdbp[6]) << 24))); 4036 4037 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 4038 ((uint32_t)(pkt->pkt_cdbp[5])) | 4039 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 4040 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 4041 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 4042 } else if (acmd->cmd_cdblen == CDB_GROUP3) { 4043 ddi_put32(acc_handle, &ldio->lba_count, ( 4044 ((uint16_t)(pkt->pkt_cdbp[13])) | 4045 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | 4046 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | 4047 ((uint16_t)(pkt->pkt_cdbp[10]) << 24))); 4048 4049 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 4050 ((uint32_t)(pkt->pkt_cdbp[9])) | 4051 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 4052 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 4053 ((uint32_t)(pkt->pkt_cdbp[6]) << 24))); 4054 4055 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 4056 ((uint32_t)(pkt->pkt_cdbp[5])) | 4057 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 4058 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 4059 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 4060 } 4061 4062 break; 4063 } 4064 /* fall through For all non-rd/wr cmds */ 4065 default: 4066 4067 switch (pkt->pkt_cdbp[0]) { 4068 case SCMD_MODE_SENSE: 4069 case SCMD_MODE_SENSE_G1: { 4070 union scsi_cdb *cdbp; 4071 uint16_t page_code; 4072 4073 cdbp = (void *)pkt->pkt_cdbp; 4074 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0]; 4075 switch (page_code) { 4076 case 0x3: 4077 case 0x4: 4078 (void) mrsas_mode_sense_build(pkt); 4079 return_mfi_pkt(instance, cmd); 4080 *cmd_done = 1; 4081 return (NULL); 4082 } 4083 break; 4084 } 4085 default: 4086 break; 4087 } 4088 4089 pthru = (struct mrsas_pthru_frame *)cmd->frame; 4090 4091 /* prepare the DCDB frame */ 4092 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ? 4093 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI); 4094 ddi_put8(acc_handle, &pthru->cmd_status, 0x0); 4095 ddi_put8(acc_handle, &pthru->scsi_status, 0x0); 4096 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id); 4097 ddi_put8(acc_handle, &pthru->lun, 0); 4098 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen); 4099 ddi_put16(acc_handle, &pthru->timeout, 0); 4100 ddi_put16(acc_handle, &pthru->flags, flags); 4101 ddi_put32(acc_handle, &pthru->data_xfer_len, 4102 acmd->cmd_dmacount); 4103 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt); 4104 if (instance->flag_ieee) { 4105 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl; 4106 } else { 4107 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl; 4108 } 4109 4110 bzero(cmd->sense, SENSE_LENGTH); 4111 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH); 4112 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 4113 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 4114 cmd->sense_phys_addr); 4115 4116 context = ddi_get32(acc_handle, &pthru->context); 4117 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp, 4118 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR); 4119 4120 break; 4121 } 4122 #ifdef lint 4123 context = context; 4124 #endif 4125 /* prepare the scatter-gather list for the firmware */ 4126 if (instance->flag_ieee) { 4127 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) { 4128 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr, 4129 acmd->cmd_dmacookies[i].dmac_laddress); 4130 ddi_put32(acc_handle, &mfi_sgl_ieee->length, 4131 acmd->cmd_dmacookies[i].dmac_size); 4132 } 4133 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt; 4134 } else { 4135 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 4136 ddi_put64(acc_handle, &mfi_sgl->phys_addr, 4137 acmd->cmd_dmacookies[i].dmac_laddress); 4138 ddi_put32(acc_handle, &mfi_sgl->length, 4139 acmd->cmd_dmacookies[i].dmac_size); 4140 } 4141 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt; 4142 } 4143 4144 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) + 4145 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1; 4146 4147 if (cmd->frame_count >= 8) { 4148 cmd->frame_count = 8; 4149 } 4150 4151 return (cmd); 4152 } 4153 #ifndef __sparc 4154 static int 4155 wait_for_outstanding(struct mrsas_instance *instance) 4156 { 4157 int i; 4158 uint32_t wait_time = 90; 4159 4160 for (i = 0; i < wait_time; i++) { 4161 if (!instance->fw_outstanding) { 4162 break; 4163 } 4164 drv_usecwait(MILLISEC); /* wait for 1000 usecs */; 4165 } 4166 4167 if (instance->fw_outstanding) { 4168 return (1); 4169 } 4170 4171 return (0); 4172 } 4173 #endif /* __sparc */ 4174 /* 4175 * issue_mfi_pthru 4176 */ 4177 static int 4178 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4179 struct mrsas_cmd *cmd, int mode) 4180 { 4181 void *ubuf; 4182 uint32_t kphys_addr = 0; 4183 uint32_t xferlen = 0; 4184 uint_t model; 4185 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4186 dma_obj_t pthru_dma_obj; 4187 struct mrsas_pthru_frame *kpthru; 4188 struct mrsas_pthru_frame *pthru; 4189 int i; 4190 pthru = &cmd->frame->pthru; 4191 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0]; 4192 4193 if (instance->adapterresetinprogress) { 4194 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: Reset flag set, " 4195 "returning mfi_pkt and setting TRAN_BUSY\n")); 4196 return (DDI_FAILURE); 4197 } 4198 model = ddi_model_convert_from(mode & FMODELS); 4199 if (model == DDI_MODEL_ILP32) { 4200 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 4201 4202 xferlen = kpthru->sgl.sge32[0].length; 4203 4204 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 4205 } else { 4206 #ifdef _ILP32 4207 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 4208 xferlen = kpthru->sgl.sge32[0].length; 4209 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 4210 #else 4211 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); 4212 xferlen = kpthru->sgl.sge64[0].length; 4213 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 4214 #endif 4215 } 4216 4217 if (xferlen) { 4218 /* means IOCTL requires DMA */ 4219 /* allocate the data transfer buffer */ 4220 pthru_dma_obj.size = xferlen; 4221 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr; 4222 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4223 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4224 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 4225 pthru_dma_obj.dma_attr.dma_attr_align = 1; 4226 4227 /* allocate kernel buffer for DMA */ 4228 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj, 4229 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4230 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 4231 "could not allocate data transfer buffer.")); 4232 return (DDI_FAILURE); 4233 } 4234 (void) memset(pthru_dma_obj.buffer, 0, xferlen); 4235 4236 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4237 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 4238 for (i = 0; i < xferlen; i++) { 4239 if (ddi_copyin((uint8_t *)ubuf+i, 4240 (uint8_t *)pthru_dma_obj.buffer+i, 4241 1, mode)) { 4242 con_log(CL_ANN, (CE_WARN, 4243 "issue_mfi_pthru : " 4244 "copy from user space failed")); 4245 return (DDI_FAILURE); 4246 } 4247 } 4248 } 4249 4250 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 4251 } 4252 4253 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd); 4254 ddi_put8(acc_handle, &pthru->sense_len, 0); 4255 ddi_put8(acc_handle, &pthru->cmd_status, 0); 4256 ddi_put8(acc_handle, &pthru->scsi_status, 0); 4257 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id); 4258 ddi_put8(acc_handle, &pthru->lun, kpthru->lun); 4259 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len); 4260 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count); 4261 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout); 4262 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len); 4263 4264 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 4265 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 4266 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); 4267 4268 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb, 4269 pthru->cdb_len, DDI_DEV_AUTOINCR); 4270 4271 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64); 4272 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen); 4273 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr); 4274 4275 cmd->sync_cmd = MRSAS_TRUE; 4276 cmd->frame_count = 1; 4277 4278 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4279 con_log(CL_ANN, (CE_WARN, 4280 "issue_mfi_pthru: fw_ioctl failed")); 4281 } else { 4282 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) { 4283 for (i = 0; i < xferlen; i++) { 4284 if (ddi_copyout( 4285 (uint8_t *)pthru_dma_obj.buffer+i, 4286 (uint8_t *)ubuf+i, 1, mode)) { 4287 con_log(CL_ANN, (CE_WARN, 4288 "issue_mfi_pthru : " 4289 "copy to user space failed")); 4290 return (DDI_FAILURE); 4291 } 4292 } 4293 } 4294 } 4295 4296 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status); 4297 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status); 4298 4299 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " 4300 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status)); 4301 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t, 4302 kpthru->cmd_status, uint8_t, kpthru->scsi_status); 4303 4304 if (xferlen) { 4305 /* free kernel buffer */ 4306 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS) 4307 return (DDI_FAILURE); 4308 } 4309 4310 return (DDI_SUCCESS); 4311 } 4312 4313 /* 4314 * issue_mfi_dcmd 4315 */ 4316 static int 4317 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4318 struct mrsas_cmd *cmd, int mode) 4319 { 4320 void *ubuf; 4321 uint32_t kphys_addr = 0; 4322 uint32_t xferlen = 0; 4323 uint32_t model; 4324 dma_obj_t dcmd_dma_obj; 4325 struct mrsas_dcmd_frame *kdcmd; 4326 struct mrsas_dcmd_frame *dcmd; 4327 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4328 int i; 4329 dcmd = &cmd->frame->dcmd; 4330 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 4331 if (instance->adapterresetinprogress) { 4332 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " 4333 "returning mfi_pkt and setting TRAN_BUSY\n")); 4334 return (DDI_FAILURE); 4335 } 4336 model = ddi_model_convert_from(mode & FMODELS); 4337 if (model == DDI_MODEL_ILP32) { 4338 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 4339 4340 xferlen = kdcmd->sgl.sge32[0].length; 4341 4342 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4343 } else { 4344 #ifdef _ILP32 4345 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 4346 xferlen = kdcmd->sgl.sge32[0].length; 4347 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4348 #else 4349 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); 4350 xferlen = kdcmd->sgl.sge64[0].length; 4351 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4352 #endif 4353 } 4354 if (xferlen) { 4355 /* means IOCTL requires DMA */ 4356 /* allocate the data transfer buffer */ 4357 dcmd_dma_obj.size = xferlen; 4358 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 4359 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4360 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4361 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 4362 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 4363 4364 /* allocate kernel buffer for DMA */ 4365 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 4366 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4367 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 4368 "could not allocate data transfer buffer.")); 4369 return (DDI_FAILURE); 4370 } 4371 (void) memset(dcmd_dma_obj.buffer, 0, xferlen); 4372 4373 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4374 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 4375 for (i = 0; i < xferlen; i++) { 4376 if (ddi_copyin((uint8_t *)ubuf + i, 4377 (uint8_t *)dcmd_dma_obj.buffer + i, 4378 1, mode)) { 4379 con_log(CL_ANN, (CE_WARN, 4380 "issue_mfi_dcmd : " 4381 "copy from user space failed")); 4382 return (DDI_FAILURE); 4383 } 4384 } 4385 } 4386 4387 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 4388 } 4389 4390 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd); 4391 ddi_put8(acc_handle, &dcmd->cmd_status, 0); 4392 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count); 4393 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout); 4394 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len); 4395 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode); 4396 4397 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b, 4398 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR); 4399 4400 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64); 4401 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen); 4402 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr); 4403 4404 cmd->sync_cmd = MRSAS_TRUE; 4405 cmd->frame_count = 1; 4406 4407 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4408 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed")); 4409 } else { 4410 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 4411 for (i = 0; i < xferlen; i++) { 4412 if (ddi_copyout( 4413 (uint8_t *)dcmd_dma_obj.buffer + i, 4414 (uint8_t *)ubuf + i, 4415 1, mode)) { 4416 con_log(CL_ANN, (CE_WARN, 4417 "issue_mfi_dcmd : " 4418 "copy to user space failed")); 4419 return (DDI_FAILURE); 4420 } 4421 } 4422 } 4423 } 4424 4425 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status); 4426 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t, 4427 kdcmd->cmd, uint8_t, kdcmd->cmd_status); 4428 4429 if (xferlen) { 4430 /* free kernel buffer */ 4431 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 4432 return (DDI_FAILURE); 4433 } 4434 4435 return (DDI_SUCCESS); 4436 } 4437 4438 /* 4439 * issue_mfi_smp 4440 */ 4441 static int 4442 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4443 struct mrsas_cmd *cmd, int mode) 4444 { 4445 void *request_ubuf; 4446 void *response_ubuf; 4447 uint32_t request_xferlen = 0; 4448 uint32_t response_xferlen = 0; 4449 uint_t model; 4450 dma_obj_t request_dma_obj; 4451 dma_obj_t response_dma_obj; 4452 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4453 struct mrsas_smp_frame *ksmp; 4454 struct mrsas_smp_frame *smp; 4455 struct mrsas_sge32 *sge32; 4456 #ifndef _ILP32 4457 struct mrsas_sge64 *sge64; 4458 #endif 4459 int i; 4460 uint64_t tmp_sas_addr; 4461 4462 smp = &cmd->frame->smp; 4463 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0]; 4464 4465 if (instance->adapterresetinprogress) { 4466 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " 4467 "returning mfi_pkt and setting TRAN_BUSY\n")); 4468 return (DDI_FAILURE); 4469 } 4470 model = ddi_model_convert_from(mode & FMODELS); 4471 if (model == DDI_MODEL_ILP32) { 4472 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 4473 4474 sge32 = &ksmp->sgl[0].sge32[0]; 4475 response_xferlen = sge32[0].length; 4476 request_xferlen = sge32[1].length; 4477 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 4478 "response_xferlen = %x, request_xferlen = %x", 4479 response_xferlen, request_xferlen)); 4480 4481 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 4482 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 4483 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 4484 "response_ubuf = %p, request_ubuf = %p", 4485 response_ubuf, request_ubuf)); 4486 } else { 4487 #ifdef _ILP32 4488 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 4489 4490 sge32 = &ksmp->sgl[0].sge32[0]; 4491 response_xferlen = sge32[0].length; 4492 request_xferlen = sge32[1].length; 4493 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 4494 "response_xferlen = %x, request_xferlen = %x", 4495 response_xferlen, request_xferlen)); 4496 4497 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 4498 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 4499 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 4500 "response_ubuf = %p, request_ubuf = %p", 4501 response_ubuf, request_ubuf)); 4502 #else 4503 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); 4504 4505 sge64 = &ksmp->sgl[0].sge64[0]; 4506 response_xferlen = sge64[0].length; 4507 request_xferlen = sge64[1].length; 4508 4509 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 4510 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 4511 #endif 4512 } 4513 if (request_xferlen) { 4514 /* means IOCTL requires DMA */ 4515 /* allocate the data transfer buffer */ 4516 request_dma_obj.size = request_xferlen; 4517 request_dma_obj.dma_attr = mrsas_generic_dma_attr; 4518 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4519 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4520 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 4521 request_dma_obj.dma_attr.dma_attr_align = 1; 4522 4523 /* allocate kernel buffer for DMA */ 4524 if (mrsas_alloc_dma_obj(instance, &request_dma_obj, 4525 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4526 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4527 "could not allocate data transfer buffer.")); 4528 return (DDI_FAILURE); 4529 } 4530 (void) memset(request_dma_obj.buffer, 0, request_xferlen); 4531 4532 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4533 for (i = 0; i < request_xferlen; i++) { 4534 if (ddi_copyin((uint8_t *)request_ubuf + i, 4535 (uint8_t *)request_dma_obj.buffer + i, 4536 1, mode)) { 4537 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4538 "copy from user space failed")); 4539 return (DDI_FAILURE); 4540 } 4541 } 4542 } 4543 4544 if (response_xferlen) { 4545 /* means IOCTL requires DMA */ 4546 /* allocate the data transfer buffer */ 4547 response_dma_obj.size = response_xferlen; 4548 response_dma_obj.dma_attr = mrsas_generic_dma_attr; 4549 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4550 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4551 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 4552 response_dma_obj.dma_attr.dma_attr_align = 1; 4553 4554 /* allocate kernel buffer for DMA */ 4555 if (mrsas_alloc_dma_obj(instance, &response_dma_obj, 4556 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4557 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4558 "could not allocate data transfer buffer.")); 4559 return (DDI_FAILURE); 4560 } 4561 (void) memset(response_dma_obj.buffer, 0, response_xferlen); 4562 4563 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4564 for (i = 0; i < response_xferlen; i++) { 4565 if (ddi_copyin((uint8_t *)response_ubuf + i, 4566 (uint8_t *)response_dma_obj.buffer + i, 4567 1, mode)) { 4568 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4569 "copy from user space failed")); 4570 return (DDI_FAILURE); 4571 } 4572 } 4573 } 4574 4575 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd); 4576 ddi_put8(acc_handle, &smp->cmd_status, 0); 4577 ddi_put8(acc_handle, &smp->connection_status, 0); 4578 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count); 4579 /* smp->context = ksmp->context; */ 4580 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout); 4581 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len); 4582 4583 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr, 4584 sizeof (uint64_t)); 4585 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr); 4586 4587 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64); 4588 4589 model = ddi_model_convert_from(mode & FMODELS); 4590 if (model == DDI_MODEL_ILP32) { 4591 con_log(CL_ANN1, (CE_NOTE, 4592 "issue_mfi_smp: DDI_MODEL_ILP32")); 4593 4594 sge32 = &smp->sgl[0].sge32[0]; 4595 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 4596 ddi_put32(acc_handle, &sge32[0].phys_addr, 4597 response_dma_obj.dma_cookie[0].dmac_address); 4598 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 4599 ddi_put32(acc_handle, &sge32[1].phys_addr, 4600 request_dma_obj.dma_cookie[0].dmac_address); 4601 } else { 4602 #ifdef _ILP32 4603 con_log(CL_ANN1, (CE_NOTE, 4604 "issue_mfi_smp: DDI_MODEL_ILP32")); 4605 sge32 = &smp->sgl[0].sge32[0]; 4606 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 4607 ddi_put32(acc_handle, &sge32[0].phys_addr, 4608 response_dma_obj.dma_cookie[0].dmac_address); 4609 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 4610 ddi_put32(acc_handle, &sge32[1].phys_addr, 4611 request_dma_obj.dma_cookie[0].dmac_address); 4612 #else 4613 con_log(CL_ANN1, (CE_NOTE, 4614 "issue_mfi_smp: DDI_MODEL_LP64")); 4615 sge64 = &smp->sgl[0].sge64[0]; 4616 ddi_put32(acc_handle, &sge64[0].length, response_xferlen); 4617 ddi_put64(acc_handle, &sge64[0].phys_addr, 4618 response_dma_obj.dma_cookie[0].dmac_address); 4619 ddi_put32(acc_handle, &sge64[1].length, request_xferlen); 4620 ddi_put64(acc_handle, &sge64[1].phys_addr, 4621 request_dma_obj.dma_cookie[0].dmac_address); 4622 #endif 4623 } 4624 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp : " 4625 "smp->response_xferlen = %d, smp->request_xferlen = %d " 4626 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length), 4627 ddi_get32(acc_handle, &sge32[1].length), 4628 ddi_get32(acc_handle, &smp->data_xfer_len))); 4629 4630 cmd->sync_cmd = MRSAS_TRUE; 4631 cmd->frame_count = 1; 4632 4633 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4634 con_log(CL_ANN, (CE_WARN, 4635 "issue_mfi_smp: fw_ioctl failed")); 4636 } else { 4637 con_log(CL_ANN1, (CE_NOTE, 4638 "issue_mfi_smp: copy to user space")); 4639 4640 if (request_xferlen) { 4641 for (i = 0; i < request_xferlen; i++) { 4642 if (ddi_copyout( 4643 (uint8_t *)request_dma_obj.buffer + 4644 i, (uint8_t *)request_ubuf + i, 4645 1, mode)) { 4646 con_log(CL_ANN, (CE_WARN, 4647 "issue_mfi_smp : copy to user space" 4648 " failed")); 4649 return (DDI_FAILURE); 4650 } 4651 } 4652 } 4653 4654 if (response_xferlen) { 4655 for (i = 0; i < response_xferlen; i++) { 4656 if (ddi_copyout( 4657 (uint8_t *)response_dma_obj.buffer 4658 + i, (uint8_t *)response_ubuf 4659 + i, 1, mode)) { 4660 con_log(CL_ANN, (CE_WARN, 4661 "issue_mfi_smp : copy to " 4662 "user space failed")); 4663 return (DDI_FAILURE); 4664 } 4665 } 4666 } 4667 } 4668 4669 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status); 4670 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 4671 ddi_get8(acc_handle, &smp->cmd_status))); 4672 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status); 4673 4674 if (request_xferlen) { 4675 /* free kernel buffer */ 4676 if (mrsas_free_dma_obj(instance, request_dma_obj) != 4677 DDI_SUCCESS) 4678 return (DDI_FAILURE); 4679 } 4680 4681 if (response_xferlen) { 4682 /* free kernel buffer */ 4683 if (mrsas_free_dma_obj(instance, response_dma_obj) != 4684 DDI_SUCCESS) 4685 return (DDI_FAILURE); 4686 } 4687 4688 return (DDI_SUCCESS); 4689 } 4690 4691 /* 4692 * issue_mfi_stp 4693 */ 4694 static int 4695 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4696 struct mrsas_cmd *cmd, int mode) 4697 { 4698 void *fis_ubuf; 4699 void *data_ubuf; 4700 uint32_t fis_xferlen = 0; 4701 uint32_t data_xferlen = 0; 4702 uint_t model; 4703 dma_obj_t fis_dma_obj; 4704 dma_obj_t data_dma_obj; 4705 struct mrsas_stp_frame *kstp; 4706 struct mrsas_stp_frame *stp; 4707 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4708 int i; 4709 4710 stp = &cmd->frame->stp; 4711 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0]; 4712 4713 if (instance->adapterresetinprogress) { 4714 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " 4715 "returning mfi_pkt and setting TRAN_BUSY\n")); 4716 return (DDI_FAILURE); 4717 } 4718 model = ddi_model_convert_from(mode & FMODELS); 4719 if (model == DDI_MODEL_ILP32) { 4720 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4721 4722 fis_xferlen = kstp->sgl.sge32[0].length; 4723 data_xferlen = kstp->sgl.sge32[1].length; 4724 4725 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4726 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4727 } 4728 else 4729 { 4730 #ifdef _ILP32 4731 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4732 4733 fis_xferlen = kstp->sgl.sge32[0].length; 4734 data_xferlen = kstp->sgl.sge32[1].length; 4735 4736 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4737 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4738 #else 4739 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); 4740 4741 fis_xferlen = kstp->sgl.sge64[0].length; 4742 data_xferlen = kstp->sgl.sge64[1].length; 4743 4744 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 4745 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 4746 #endif 4747 } 4748 4749 4750 if (fis_xferlen) { 4751 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " 4752 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 4753 4754 /* means IOCTL requires DMA */ 4755 /* allocate the data transfer buffer */ 4756 fis_dma_obj.size = fis_xferlen; 4757 fis_dma_obj.dma_attr = mrsas_generic_dma_attr; 4758 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4759 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4760 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 4761 fis_dma_obj.dma_attr.dma_attr_align = 1; 4762 4763 /* allocate kernel buffer for DMA */ 4764 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj, 4765 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4766 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : " 4767 "could not allocate data transfer buffer.")); 4768 return (DDI_FAILURE); 4769 } 4770 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen); 4771 4772 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4773 for (i = 0; i < fis_xferlen; i++) { 4774 if (ddi_copyin((uint8_t *)fis_ubuf + i, 4775 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) { 4776 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4777 "copy from user space failed")); 4778 return (DDI_FAILURE); 4779 } 4780 } 4781 } 4782 4783 if (data_xferlen) { 4784 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " 4785 "data_xferlen = %x", data_ubuf, data_xferlen)); 4786 4787 /* means IOCTL requires DMA */ 4788 /* allocate the data transfer buffer */ 4789 data_dma_obj.size = data_xferlen; 4790 data_dma_obj.dma_attr = mrsas_generic_dma_attr; 4791 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4792 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4793 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 4794 data_dma_obj.dma_attr.dma_attr_align = 1; 4795 4796 /* allocate kernel buffer for DMA */ 4797 if (mrsas_alloc_dma_obj(instance, &data_dma_obj, 4798 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4799 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4800 "could not allocate data transfer buffer.")); 4801 return (DDI_FAILURE); 4802 } 4803 (void) memset(data_dma_obj.buffer, 0, data_xferlen); 4804 4805 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4806 for (i = 0; i < data_xferlen; i++) { 4807 if (ddi_copyin((uint8_t *)data_ubuf + i, 4808 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) { 4809 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4810 "copy from user space failed")); 4811 return (DDI_FAILURE); 4812 } 4813 } 4814 } 4815 4816 ddi_put8(acc_handle, &stp->cmd, kstp->cmd); 4817 ddi_put8(acc_handle, &stp->cmd_status, 0); 4818 ddi_put8(acc_handle, &stp->connection_status, 0); 4819 ddi_put8(acc_handle, &stp->target_id, kstp->target_id); 4820 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count); 4821 4822 ddi_put16(acc_handle, &stp->timeout, kstp->timeout); 4823 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len); 4824 4825 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10, 4826 DDI_DEV_AUTOINCR); 4827 4828 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64); 4829 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags); 4830 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen); 4831 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr, 4832 fis_dma_obj.dma_cookie[0].dmac_address); 4833 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen); 4834 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr, 4835 data_dma_obj.dma_cookie[0].dmac_address); 4836 4837 cmd->sync_cmd = MRSAS_TRUE; 4838 cmd->frame_count = 1; 4839 4840 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4841 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed")); 4842 } else { 4843 4844 if (fis_xferlen) { 4845 for (i = 0; i < fis_xferlen; i++) { 4846 if (ddi_copyout( 4847 (uint8_t *)fis_dma_obj.buffer + i, 4848 (uint8_t *)fis_ubuf + i, 1, mode)) { 4849 con_log(CL_ANN, (CE_WARN, 4850 "issue_mfi_stp : copy to " 4851 "user space failed")); 4852 return (DDI_FAILURE); 4853 } 4854 } 4855 } 4856 } 4857 if (data_xferlen) { 4858 for (i = 0; i < data_xferlen; i++) { 4859 if (ddi_copyout( 4860 (uint8_t *)data_dma_obj.buffer + i, 4861 (uint8_t *)data_ubuf + i, 1, mode)) { 4862 con_log(CL_ANN, (CE_WARN, 4863 "issue_mfi_stp : copy to" 4864 " user space failed")); 4865 return (DDI_FAILURE); 4866 } 4867 } 4868 } 4869 4870 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status); 4871 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status); 4872 4873 if (fis_xferlen) { 4874 /* free kernel buffer */ 4875 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS) 4876 return (DDI_FAILURE); 4877 } 4878 4879 if (data_xferlen) { 4880 /* free kernel buffer */ 4881 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS) 4882 return (DDI_FAILURE); 4883 } 4884 4885 return (DDI_SUCCESS); 4886 } 4887 4888 /* 4889 * fill_up_drv_ver 4890 */ 4891 static void 4892 fill_up_drv_ver(struct mrsas_drv_ver *dv) 4893 { 4894 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver)); 4895 4896 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 4897 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 4898 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas")); 4899 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION)); 4900 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE, 4901 strlen(MRSAS_RELDATE)); 4902 } 4903 4904 /* 4905 * handle_drv_ioctl 4906 */ 4907 static int 4908 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4909 int mode) 4910 { 4911 int i; 4912 int rval = DDI_SUCCESS; 4913 int *props = NULL; 4914 void *ubuf; 4915 4916 uint8_t *pci_conf_buf; 4917 uint32_t xferlen; 4918 uint32_t num_props; 4919 uint_t model; 4920 struct mrsas_dcmd_frame *kdcmd; 4921 struct mrsas_drv_ver dv; 4922 struct mrsas_pci_information pi; 4923 4924 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 4925 4926 model = ddi_model_convert_from(mode & FMODELS); 4927 if (model == DDI_MODEL_ILP32) { 4928 con_log(CL_ANN1, (CE_NOTE, 4929 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4930 4931 xferlen = kdcmd->sgl.sge32[0].length; 4932 4933 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4934 } else { 4935 #ifdef _ILP32 4936 con_log(CL_ANN1, (CE_NOTE, 4937 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4938 xferlen = kdcmd->sgl.sge32[0].length; 4939 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4940 #else 4941 con_log(CL_ANN1, (CE_NOTE, 4942 "handle_drv_ioctl: DDI_MODEL_LP64")); 4943 xferlen = kdcmd->sgl.sge64[0].length; 4944 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4945 #endif 4946 } 4947 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4948 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 4949 4950 switch (kdcmd->opcode) { 4951 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION: 4952 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4953 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION")); 4954 4955 fill_up_drv_ver(&dv); 4956 4957 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 4958 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4959 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : " 4960 "copy to user space failed")); 4961 kdcmd->cmd_status = 1; 4962 rval = 1; 4963 } else { 4964 kdcmd->cmd_status = 0; 4965 } 4966 break; 4967 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION: 4968 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4969 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON")); 4970 4971 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 4972 0, "reg", &props, &num_props)) { 4973 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4974 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4975 "ddi_prop_look_int_array failed")); 4976 rval = DDI_FAILURE; 4977 } else { 4978 4979 pi.busNumber = (props[0] >> 16) & 0xFF; 4980 pi.deviceNumber = (props[0] >> 11) & 0x1f; 4981 pi.functionNumber = (props[0] >> 8) & 0x7; 4982 ddi_prop_free((void *)props); 4983 } 4984 4985 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 4986 4987 for (i = 0; i < (sizeof (struct mrsas_pci_information) - 4988 offsetof(struct mrsas_pci_information, pciHeaderInfo)); 4989 i++) { 4990 pci_conf_buf[i] = 4991 pci_config_get8(instance->pci_handle, i); 4992 } 4993 4994 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 4995 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4996 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4997 "copy to user space failed")); 4998 kdcmd->cmd_status = 1; 4999 rval = 1; 5000 } else { 5001 kdcmd->cmd_status = 0; 5002 } 5003 break; 5004 default: 5005 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 5006 "invalid driver specific IOCTL opcode = 0x%x", 5007 kdcmd->opcode)); 5008 kdcmd->cmd_status = 1; 5009 rval = DDI_FAILURE; 5010 break; 5011 } 5012 5013 return (rval); 5014 } 5015 5016 /* 5017 * handle_mfi_ioctl 5018 */ 5019 static int 5020 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 5021 int mode) 5022 { 5023 int rval = DDI_SUCCESS; 5024 5025 struct mrsas_header *hdr; 5026 struct mrsas_cmd *cmd; 5027 5028 cmd = get_mfi_pkt(instance); 5029 5030 if (!cmd) { 5031 con_log(CL_ANN, (CE_WARN, "mr_sas: " 5032 "failed to get a cmd packet")); 5033 DTRACE_PROBE2(mfi_ioctl_err, uint16_t, 5034 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 5035 return (DDI_FAILURE); 5036 } 5037 cmd->retry_count_for_ocr = 0; 5038 5039 /* Clear the frame buffer and assign back the context id */ 5040 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 5041 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 5042 cmd->index); 5043 5044 hdr = (struct mrsas_header *)&ioctl->frame[0]; 5045 5046 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 5047 case MFI_CMD_OP_DCMD: 5048 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 5049 break; 5050 case MFI_CMD_OP_SMP: 5051 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 5052 break; 5053 case MFI_CMD_OP_STP: 5054 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 5055 break; 5056 case MFI_CMD_OP_LD_SCSI: 5057 case MFI_CMD_OP_PD_SCSI: 5058 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 5059 break; 5060 default: 5061 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 5062 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd)); 5063 rval = DDI_FAILURE; 5064 break; 5065 } 5066 5067 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) 5068 rval = DDI_FAILURE; 5069 5070 return_mfi_pkt(instance, cmd); 5071 5072 return (rval); 5073 } 5074 5075 /* 5076 * AEN 5077 */ 5078 static int 5079 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen) 5080 { 5081 int rval = 0; 5082 5083 rval = register_mfi_aen(instance, instance->aen_seq_num, 5084 aen->class_locale_word); 5085 5086 aen->cmd_status = (uint8_t)rval; 5087 5088 return (rval); 5089 } 5090 5091 static int 5092 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num, 5093 uint32_t class_locale_word) 5094 { 5095 int ret_val; 5096 5097 struct mrsas_cmd *cmd, *aen_cmd; 5098 struct mrsas_dcmd_frame *dcmd; 5099 union mrsas_evt_class_locale curr_aen; 5100 union mrsas_evt_class_locale prev_aen; 5101 5102 /* 5103 * If there an AEN pending already (aen_cmd), check if the 5104 * class_locale of that pending AEN is inclusive of the new 5105 * AEN request we currently have. If it is, then we don't have 5106 * to do anything. In other words, whichever events the current 5107 * AEN request is subscribing to, have already been subscribed 5108 * to. 5109 * 5110 * If the old_cmd is _not_ inclusive, then we have to abort 5111 * that command, form a class_locale that is superset of both 5112 * old and current and re-issue to the FW 5113 */ 5114 5115 curr_aen.word = LE_32(class_locale_word); 5116 curr_aen.members.locale = LE_16(curr_aen.members.locale); 5117 aen_cmd = instance->aen_cmd; 5118 if (aen_cmd) { 5119 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle, 5120 &aen_cmd->frame->dcmd.mbox.w[1]); 5121 prev_aen.word = LE_32(prev_aen.word); 5122 prev_aen.members.locale = LE_16(prev_aen.members.locale); 5123 /* 5124 * A class whose enum value is smaller is inclusive of all 5125 * higher values. If a PROGRESS (= -1) was previously 5126 * registered, then a new registration requests for higher 5127 * classes need not be sent to FW. They are automatically 5128 * included. 5129 * 5130 * Locale numbers don't have such hierarchy. They are bitmap 5131 * values 5132 */ 5133 if ((prev_aen.members.class <= curr_aen.members.class) && 5134 !((prev_aen.members.locale & curr_aen.members.locale) ^ 5135 curr_aen.members.locale)) { 5136 /* 5137 * Previously issued event registration includes 5138 * current request. Nothing to do. 5139 */ 5140 5141 return (0); 5142 } else { 5143 curr_aen.members.locale |= prev_aen.members.locale; 5144 5145 if (prev_aen.members.class < curr_aen.members.class) 5146 curr_aen.members.class = prev_aen.members.class; 5147 5148 ret_val = abort_aen_cmd(instance, aen_cmd); 5149 5150 if (ret_val) { 5151 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 5152 "failed to abort prevous AEN command")); 5153 5154 return (ret_val); 5155 } 5156 } 5157 } else { 5158 curr_aen.word = LE_32(class_locale_word); 5159 curr_aen.members.locale = LE_16(curr_aen.members.locale); 5160 } 5161 5162 cmd = get_mfi_pkt(instance); 5163 5164 if (!cmd) { 5165 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding, 5166 uint16_t, instance->max_fw_cmds); 5167 return (ENOMEM); 5168 } 5169 cmd->retry_count_for_ocr = 0; 5170 /* Clear the frame buffer and assign back the context id */ 5171 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 5172 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 5173 cmd->index); 5174 5175 dcmd = &cmd->frame->dcmd; 5176 5177 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */ 5178 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 5179 5180 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 5181 sizeof (struct mrsas_evt_detail)); 5182 5183 /* Prepare DCMD for aen registration */ 5184 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 5185 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 5186 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 5187 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 5188 MFI_FRAME_DIR_READ); 5189 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 5190 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 5191 sizeof (struct mrsas_evt_detail)); 5192 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 5193 MR_DCMD_CTRL_EVENT_WAIT); 5194 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num); 5195 curr_aen.members.locale = LE_16(curr_aen.members.locale); 5196 curr_aen.word = LE_32(curr_aen.word); 5197 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1], 5198 curr_aen.word); 5199 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 5200 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address); 5201 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 5202 sizeof (struct mrsas_evt_detail)); 5203 5204 instance->aen_seq_num = seq_num; 5205 5206 5207 /* 5208 * Store reference to the cmd used to register for AEN. When an 5209 * application wants us to register for AEN, we have to abort this 5210 * cmd and re-register with a new EVENT LOCALE supplied by that app 5211 */ 5212 instance->aen_cmd = cmd; 5213 5214 cmd->frame_count = 1; 5215 5216 /* Issue the aen registration frame */ 5217 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 5218 instance->func_ptr->issue_cmd(cmd, instance); 5219 5220 return (0); 5221 } 5222 5223 static void 5224 display_scsi_inquiry(caddr_t scsi_inq) 5225 { 5226 #define MAX_SCSI_DEVICE_CODE 14 5227 int i; 5228 char inquiry_buf[256] = {0}; 5229 int len; 5230 const char *const scsi_device_types[] = { 5231 "Direct-Access ", 5232 "Sequential-Access", 5233 "Printer ", 5234 "Processor ", 5235 "WORM ", 5236 "CD-ROM ", 5237 "Scanner ", 5238 "Optical Device ", 5239 "Medium Changer ", 5240 "Communications ", 5241 "Unknown ", 5242 "Unknown ", 5243 "Unknown ", 5244 "Enclosure ", 5245 }; 5246 5247 len = 0; 5248 5249 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 5250 for (i = 8; i < 16; i++) { 5251 len += snprintf(inquiry_buf + len, 265 - len, "%c", 5252 scsi_inq[i]); 5253 } 5254 5255 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 5256 5257 for (i = 16; i < 32; i++) { 5258 len += snprintf(inquiry_buf + len, 265 - len, "%c", 5259 scsi_inq[i]); 5260 } 5261 5262 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 5263 5264 for (i = 32; i < 36; i++) { 5265 len += snprintf(inquiry_buf + len, 265 - len, "%c", 5266 scsi_inq[i]); 5267 } 5268 5269 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 5270 5271 5272 i = scsi_inq[0] & 0x1f; 5273 5274 5275 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 5276 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 5277 "Unknown "); 5278 5279 5280 len += snprintf(inquiry_buf + len, 265 - len, 5281 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 5282 5283 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 5284 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 5285 } else { 5286 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 5287 } 5288 5289 con_log(CL_ANN1, (CE_CONT, inquiry_buf)); 5290 } 5291 5292 static void 5293 io_timeout_checker(void *arg) 5294 { 5295 struct scsi_pkt *pkt; 5296 struct mrsas_instance *instance = arg; 5297 struct mrsas_cmd *cmd = NULL; 5298 struct mrsas_header *hdr; 5299 int time = 0; 5300 int counter = 0; 5301 struct mlist_head *pos, *next; 5302 mlist_t process_list; 5303 5304 instance->timeout_id = (timeout_id_t)-1; 5305 if (instance->adapterresetinprogress == 1) { 5306 con_log(CL_ANN1, (CE_NOTE, "io_timeout_checker" 5307 " reset in progress")); 5308 instance->timeout_id = timeout(io_timeout_checker, 5309 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 5310 return; 5311 } 5312 5313 /* See if this check needs to be in the beginning or last in ISR */ 5314 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) { 5315 con_log(CL_ANN1, (CE_NOTE, 5316 "Fw Fault state Handling in io_timeout_checker")); 5317 if (instance->adapterresetinprogress == 0) { 5318 (void) mrsas_reset_ppc(instance); 5319 } 5320 instance->timeout_id = timeout(io_timeout_checker, 5321 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 5322 return; 5323 } 5324 5325 INIT_LIST_HEAD(&process_list); 5326 5327 mutex_enter(&instance->cmd_pend_mtx); 5328 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) { 5329 cmd = mlist_entry(pos, struct mrsas_cmd, list); 5330 5331 if (cmd == NULL) { 5332 continue; 5333 } 5334 5335 if (cmd->sync_cmd == MRSAS_TRUE) { 5336 hdr = (struct mrsas_header *)&cmd->frame->hdr; 5337 if (hdr == NULL) { 5338 continue; 5339 } 5340 time = --hdr->timeout; 5341 } else { 5342 pkt = cmd->pkt; 5343 if (pkt == NULL) { 5344 continue; 5345 } 5346 time = --cmd->drv_pkt_time; 5347 } 5348 if (time <= 0) { 5349 con_log(CL_ANN1, (CE_NOTE, "%llx: " 5350 "io_timeout_checker: TIMING OUT: pkt " 5351 ": %p, cmd %p", gethrtime(), (void *)pkt, 5352 (void *)cmd)); 5353 counter++; 5354 break; 5355 } 5356 } 5357 mutex_exit(&instance->cmd_pend_mtx); 5358 5359 if (counter) { 5360 con_log(CL_ANN1, (CE_NOTE, 5361 "io_timeout_checker " 5362 "cmd->retrycount_for_ocr %d, " 5363 "cmd index %d , cmd address %p ", 5364 cmd->retry_count_for_ocr+1, cmd->index, (void *)cmd)); 5365 5366 if (instance->disable_online_ctrl_reset == 1) { 5367 con_log(CL_ANN1, (CE_NOTE, "mrsas: " 5368 "OCR is not supported by the Firmware " 5369 "Failing all the queued packets \n")); 5370 5371 (void) mrsas_kill_adapter(instance); 5372 } else { 5373 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) { 5374 if (instance->adapterresetinprogress == 0) { 5375 con_log(CL_ANN1, (CE_NOTE, "mrsas: " 5376 "OCR is supported by FW " 5377 "triggering mrsas_reset_ppc")); 5378 (void) mrsas_reset_ppc(instance); 5379 } 5380 } else { 5381 con_log(CL_ANN1, (CE_NOTE, 5382 "io_timeout_checker:" 5383 " cmdindex: %d,cmd address: %p " 5384 "timed out even after 3 resets: " 5385 "so kill adapter", cmd->index, 5386 (void *)cmd)); 5387 (void) mrsas_kill_adapter(instance); 5388 return; 5389 } 5390 } 5391 } 5392 5393 5394 if (!mlist_empty(&instance->cmd_pend_list)) { 5395 con_log(CL_ANN1, (CE_NOTE, "mrsas: " 5396 "schedule next timeout check: " 5397 "do timeout \n")); 5398 if (instance->timeout_id == (timeout_id_t)-1) { 5399 instance->timeout_id = 5400 timeout(io_timeout_checker, (void *)instance, 5401 drv_usectohz(MRSAS_1_SECOND)); 5402 } 5403 } 5404 5405 } 5406 static int 5407 read_fw_status_reg_ppc(struct mrsas_instance *instance) 5408 { 5409 return ((int)RD_OB_SCRATCH_PAD_0(instance)); 5410 } 5411 5412 static void 5413 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance) 5414 { 5415 struct scsi_pkt *pkt; 5416 atomic_add_16(&instance->fw_outstanding, 1); 5417 5418 pkt = cmd->pkt; 5419 if (pkt) { 5420 con_log(CL_ANN1, (CE_CONT, "%llx : issue_cmd_ppc:" 5421 "ISSUED CMD TO FW : called : cmd:" 5422 ": %p instance : %p pkt : %p pkt_time : %x\n", 5423 gethrtime(), (void *)cmd, (void *)instance, 5424 (void *)pkt, cmd->drv_pkt_time)); 5425 if (instance->adapterresetinprogress) { 5426 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 5427 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer")); 5428 } else { 5429 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 5430 push_pending_mfi_pkt(instance, cmd); 5431 } 5432 5433 if (pkt) { 5434 con_log(CL_ANN1, (CE_NOTE, 5435 "TO ISSUE: cmd %p index %x " 5436 "pkt %p time %llx", 5437 (void *)cmd, cmd->index, (void *)pkt, 5438 gethrtime())); 5439 } 5440 } else { 5441 con_log(CL_ANN1, (CE_CONT, "%llx : issue_cmd_ppc:" 5442 "ISSUED CMD TO FW : called : cmd : %p, instance: %p" 5443 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance)); 5444 } 5445 /* Issue the command to the FW */ 5446 WR_IB_QPORT((cmd->frame_phys_addr) | 5447 (((cmd->frame_count - 1) << 1) | 1), instance); 5448 } 5449 5450 /* 5451 * issue_cmd_in_sync_mode 5452 */ 5453 static int 5454 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance, 5455 struct mrsas_cmd *cmd) 5456 { 5457 int i; 5458 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC); 5459 5460 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called")); 5461 5462 if (instance->adapterresetinprogress) { 5463 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: " 5464 "issue and return in reset case\n")); 5465 WR_IB_QPORT((cmd->frame_phys_addr) | 5466 (((cmd->frame_count - 1) << 1) | 1), instance); 5467 return (DDI_SUCCESS); 5468 } else { 5469 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n")); 5470 push_pending_mfi_pkt(instance, cmd); 5471 } 5472 5473 cmd->cmd_status = ENODATA; 5474 5475 WR_IB_QPORT((cmd->frame_phys_addr) | 5476 (((cmd->frame_count - 1) << 1) | 1), instance); 5477 5478 mutex_enter(&instance->int_cmd_mtx); 5479 5480 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 5481 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 5482 } 5483 5484 mutex_exit(&instance->int_cmd_mtx); 5485 5486 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done")); 5487 5488 if (i < (msecs -1)) { 5489 return (DDI_SUCCESS); 5490 } else { 5491 return (DDI_FAILURE); 5492 } 5493 } 5494 5495 /* 5496 * issue_cmd_in_poll_mode 5497 */ 5498 static int 5499 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance, 5500 struct mrsas_cmd *cmd) 5501 { 5502 int i; 5503 uint16_t flags; 5504 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; 5505 struct mrsas_header *frame_hdr; 5506 5507 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called")); 5508 5509 frame_hdr = (struct mrsas_header *)cmd->frame; 5510 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, 5511 MFI_CMD_STATUS_POLL_MODE); 5512 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); 5513 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 5514 5515 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); 5516 5517 /* issue the frame using inbound queue port */ 5518 WR_IB_QPORT((cmd->frame_phys_addr) | 5519 (((cmd->frame_count - 1) << 1) | 1), instance); 5520 5521 /* wait for cmd_status to change from 0xFF */ 5522 for (i = 0; i < msecs && ( 5523 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 5524 == MFI_CMD_STATUS_POLL_MODE); i++) { 5525 drv_usecwait(MILLISEC); /* wait for 1000 usecs */ 5526 } 5527 5528 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 5529 == MFI_CMD_STATUS_POLL_MODE) { 5530 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode: " 5531 "cmd polling timed out")); 5532 return (DDI_FAILURE); 5533 } 5534 5535 return (DDI_SUCCESS); 5536 } 5537 5538 static void 5539 enable_intr_ppc(struct mrsas_instance *instance) 5540 { 5541 uint32_t mask; 5542 5543 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called")); 5544 5545 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */ 5546 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance); 5547 5548 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 5549 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance); 5550 5551 /* dummy read to force PCI flush */ 5552 mask = RD_OB_INTR_MASK(instance); 5553 5554 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 5555 "outbound_intr_mask = 0x%x", mask)); 5556 } 5557 5558 static void 5559 disable_intr_ppc(struct mrsas_instance *instance) 5560 { 5561 uint32_t mask; 5562 5563 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called")); 5564 5565 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 5566 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 5567 5568 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */ 5569 WR_OB_INTR_MASK(OB_INTR_MASK, instance); 5570 5571 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 5572 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 5573 5574 /* dummy read to force PCI flush */ 5575 mask = RD_OB_INTR_MASK(instance); 5576 #ifdef lint 5577 mask = mask; 5578 #endif 5579 } 5580 5581 static int 5582 intr_ack_ppc(struct mrsas_instance *instance) 5583 { 5584 uint32_t status; 5585 int ret = DDI_INTR_CLAIMED; 5586 5587 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called")); 5588 5589 /* check if it is our interrupt */ 5590 status = RD_OB_INTR_STATUS(instance); 5591 5592 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status)); 5593 5594 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) { 5595 ret = DDI_INTR_UNCLAIMED; 5596 } 5597 5598 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 5599 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 5600 ret = DDI_INTR_UNCLAIMED; 5601 } 5602 5603 if (ret == DDI_INTR_UNCLAIMED) { 5604 return (ret); 5605 } 5606 /* clear the interrupt by writing back the same value */ 5607 WR_OB_DOORBELL_CLEAR(status, instance); 5608 5609 /* dummy READ */ 5610 status = RD_OB_INTR_STATUS(instance); 5611 5612 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared")); 5613 5614 return (ret); 5615 } 5616 5617 /* 5618 * Marks HBA as bad. This will be called either when an 5619 * IO packet times out even after 3 FW resets 5620 * or FW is found to be fault even after 3 continuous resets. 5621 */ 5622 5623 static int 5624 mrsas_kill_adapter(struct mrsas_instance *instance) 5625 { 5626 if (instance->deadadapter == 1) 5627 return (DDI_FAILURE); 5628 5629 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: " 5630 "Writing to doorbell with MFI_STOP_ADP ")); 5631 mutex_enter(&instance->ocr_flags_mtx); 5632 instance->deadadapter = 1; 5633 mutex_exit(&instance->ocr_flags_mtx); 5634 instance->func_ptr->disable_intr(instance); 5635 WR_IB_DOORBELL(MFI_STOP_ADP, instance); 5636 (void) mrsas_complete_pending_cmds(instance); 5637 return (DDI_SUCCESS); 5638 } 5639 5640 5641 static int 5642 mrsas_reset_ppc(struct mrsas_instance *instance) 5643 { 5644 uint32_t status; 5645 uint32_t retry = 0; 5646 uint32_t cur_abs_reg_val; 5647 uint32_t fw_state; 5648 5649 if (instance->deadadapter == 1) { 5650 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5651 "no more resets as HBA has been marked dead ")); 5652 return (DDI_FAILURE); 5653 } 5654 mutex_enter(&instance->ocr_flags_mtx); 5655 instance->adapterresetinprogress = 1; 5656 mutex_exit(&instance->ocr_flags_mtx); 5657 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress " 5658 "flag set, time %llx", gethrtime())); 5659 instance->func_ptr->disable_intr(instance); 5660 retry_reset: 5661 WR_IB_WRITE_SEQ(0, instance); 5662 WR_IB_WRITE_SEQ(4, instance); 5663 WR_IB_WRITE_SEQ(0xb, instance); 5664 WR_IB_WRITE_SEQ(2, instance); 5665 WR_IB_WRITE_SEQ(7, instance); 5666 WR_IB_WRITE_SEQ(0xd, instance); 5667 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written " 5668 "to write sequence register\n")); 5669 delay(100 * drv_usectohz(MILLISEC)); 5670 status = RD_OB_DRWE(instance); 5671 5672 while (!(status & DIAG_WRITE_ENABLE)) { 5673 delay(100 * drv_usectohz(MILLISEC)); 5674 status = RD_OB_DRWE(instance); 5675 if (retry++ == 100) { 5676 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: DRWE bit " 5677 "check retry count %d\n", retry)); 5678 return (DDI_FAILURE); 5679 } 5680 } 5681 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance); 5682 delay(100 * drv_usectohz(MILLISEC)); 5683 status = RD_OB_DRWE(instance); 5684 while (status & DIAG_RESET_ADAPTER) { 5685 delay(100 * drv_usectohz(MILLISEC)); 5686 status = RD_OB_DRWE(instance); 5687 if (retry++ == 100) { 5688 (void) mrsas_kill_adapter(instance); 5689 return (DDI_FAILURE); 5690 } 5691 } 5692 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete")); 5693 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5694 "Calling mfi_state_transition_to_ready")); 5695 5696 /* Mark HBA as bad, if FW is fault after 3 continuous resets */ 5697 if (mfi_state_transition_to_ready(instance) || 5698 debug_fw_faults_after_ocr_g == 1) { 5699 cur_abs_reg_val = 5700 instance->func_ptr->read_fw_status_reg(instance); 5701 fw_state = cur_abs_reg_val & MFI_STATE_MASK; 5702 5703 #ifdef OCRDEBUG 5704 con_log(CL_ANN1, (CE_NOTE, 5705 "mrsas_reset_ppc :before fake: FW is not ready " 5706 "FW state = 0x%x", fw_state)); 5707 if (debug_fw_faults_after_ocr_g == 1) 5708 fw_state = MFI_STATE_FAULT; 5709 #endif 5710 5711 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready " 5712 "FW state = 0x%x", fw_state)); 5713 5714 if (fw_state == MFI_STATE_FAULT) { 5715 // increment the count 5716 instance->fw_fault_count_after_ocr++; 5717 if (instance->fw_fault_count_after_ocr 5718 < MAX_FW_RESET_COUNT) { 5719 con_log(CL_ANN1, (CE_WARN, "mrsas_reset_ppc: " 5720 "FW is in fault after OCR count %d ", 5721 instance->fw_fault_count_after_ocr)); 5722 goto retry_reset; 5723 5724 } else { 5725 con_log(CL_ANN1, (CE_WARN, "mrsas_reset_ppc: " 5726 "Max Reset Count exceeded " 5727 "Mark HBA as bad")); 5728 (void) mrsas_kill_adapter(instance); 5729 return (DDI_FAILURE); 5730 } 5731 } 5732 } 5733 // reset the counter as FW is up after OCR 5734 instance->fw_fault_count_after_ocr = 0; 5735 5736 5737 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 5738 instance->producer, 0); 5739 5740 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 5741 instance->consumer, 0); 5742 5743 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5744 " after resetting produconsumer chck indexs:" 5745 "producer %x consumer %x", *instance->producer, 5746 *instance->consumer)); 5747 5748 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5749 "Calling mrsas_issue_init_mfi")); 5750 (void) mrsas_issue_init_mfi(instance); 5751 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5752 "mrsas_issue_init_mfi Done")); 5753 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5754 "Calling mrsas_print_pending_cmd\n")); 5755 (void) mrsas_print_pending_cmds(instance); 5756 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5757 "mrsas_print_pending_cmd done\n")); 5758 instance->func_ptr->enable_intr(instance); 5759 instance->fw_outstanding = 0; 5760 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5761 "Calling mrsas_issue_pending_cmds")); 5762 (void) mrsas_issue_pending_cmds(instance); 5763 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5764 "Complete")); 5765 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5766 "Calling aen registration")); 5767 instance->func_ptr->issue_cmd(instance->aen_cmd, instance); 5768 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n")); 5769 mutex_enter(&instance->ocr_flags_mtx); 5770 instance->adapterresetinprogress = 0; 5771 mutex_exit(&instance->ocr_flags_mtx); 5772 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5773 "adpterresetinprogress flag unset")); 5774 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n")); 5775 return (DDI_SUCCESS); 5776 } 5777 static int 5778 mrsas_common_check(struct mrsas_instance *instance, 5779 struct mrsas_cmd *cmd) 5780 { 5781 int ret = DDI_SUCCESS; 5782 5783 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 5784 DDI_SUCCESS) { 5785 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5786 if (cmd->pkt != NULL) { 5787 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5788 cmd->pkt->pkt_statistics = 0; 5789 } 5790 ret = DDI_FAILURE; 5791 } 5792 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 5793 != DDI_SUCCESS) { 5794 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5795 if (cmd->pkt != NULL) { 5796 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5797 cmd->pkt->pkt_statistics = 0; 5798 } 5799 ret = DDI_FAILURE; 5800 } 5801 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) != 5802 DDI_SUCCESS) { 5803 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5804 if (cmd->pkt != NULL) { 5805 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5806 cmd->pkt->pkt_statistics = 0; 5807 } 5808 ret = DDI_FAILURE; 5809 } 5810 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 5811 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5812 5813 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0); 5814 5815 if (cmd->pkt != NULL) { 5816 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5817 cmd->pkt->pkt_statistics = 0; 5818 } 5819 ret = DDI_FAILURE; 5820 } 5821 5822 return (ret); 5823 } 5824 5825 /*ARGSUSED*/ 5826 static int 5827 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5828 { 5829 /* 5830 * as the driver can always deal with an error in any dma or 5831 * access handle, we can just return the fme_status value. 5832 */ 5833 pci_ereport_post(dip, err, NULL); 5834 return (err->fme_status); 5835 } 5836 5837 static void 5838 mrsas_fm_init(struct mrsas_instance *instance) 5839 { 5840 /* Need to change iblock to priority for new MSI intr */ 5841 ddi_iblock_cookie_t fm_ibc; 5842 5843 /* Only register with IO Fault Services if we have some capability */ 5844 if (instance->fm_capabilities) { 5845 /* Adjust access and dma attributes for FMA */ 5846 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5847 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 5848 5849 /* 5850 * Register capabilities with IO Fault Services. 5851 * fm_capabilities will be updated to indicate 5852 * capabilities actually supported (not requested.) 5853 */ 5854 5855 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc); 5856 5857 /* 5858 * Initialize pci ereport capabilities if ereport 5859 * capable (should always be.) 5860 */ 5861 5862 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 5863 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5864 pci_ereport_setup(instance->dip); 5865 } 5866 5867 /* 5868 * Register error callback if error callback capable. 5869 */ 5870 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5871 ddi_fm_handler_register(instance->dip, 5872 mrsas_fm_error_cb, (void*) instance); 5873 } 5874 } else { 5875 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5876 mrsas_generic_dma_attr.dma_attr_flags = 0; 5877 } 5878 } 5879 5880 static void 5881 mrsas_fm_fini(struct mrsas_instance *instance) 5882 { 5883 /* Only unregister FMA capabilities if registered */ 5884 if (instance->fm_capabilities) { 5885 /* 5886 * Un-register error callback if error callback capable. 5887 */ 5888 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5889 ddi_fm_handler_unregister(instance->dip); 5890 } 5891 5892 /* 5893 * Release any resources allocated by pci_ereport_setup() 5894 */ 5895 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 5896 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5897 pci_ereport_teardown(instance->dip); 5898 } 5899 5900 /* Unregister from IO Fault Services */ 5901 ddi_fm_fini(instance->dip); 5902 5903 /* Adjust access and dma attributes for FMA */ 5904 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5905 mrsas_generic_dma_attr.dma_attr_flags = 0; 5906 } 5907 } 5908 5909 int 5910 mrsas_check_acc_handle(ddi_acc_handle_t handle) 5911 { 5912 ddi_fm_error_t de; 5913 5914 if (handle == NULL) { 5915 return (DDI_FAILURE); 5916 } 5917 5918 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5919 5920 return (de.fme_status); 5921 } 5922 5923 int 5924 mrsas_check_dma_handle(ddi_dma_handle_t handle) 5925 { 5926 ddi_fm_error_t de; 5927 5928 if (handle == NULL) { 5929 return (DDI_FAILURE); 5930 } 5931 5932 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5933 5934 return (de.fme_status); 5935 } 5936 5937 void 5938 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail) 5939 { 5940 uint64_t ena; 5941 char buf[FM_MAX_CLASS]; 5942 5943 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5944 ena = fm_ena_generate(0, FM_ENA_FMT1); 5945 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) { 5946 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP, 5947 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 5948 } 5949 } 5950 5951 static int 5952 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type) 5953 { 5954 5955 dev_info_t *dip = instance->dip; 5956 int avail, actual, count; 5957 int i, flag, ret; 5958 5959 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: intr_type = %x", 5960 intr_type)); 5961 5962 /* Get number of interrupts */ 5963 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 5964 if ((ret != DDI_SUCCESS) || (count == 0)) { 5965 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:" 5966 "ret %d count %d", ret, count)); 5967 5968 return (DDI_FAILURE); 5969 } 5970 5971 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: count = %d ", count)); 5972 5973 /* Get number of available interrupts */ 5974 ret = ddi_intr_get_navail(dip, intr_type, &avail); 5975 if ((ret != DDI_SUCCESS) || (avail == 0)) { 5976 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:" 5977 "ret %d avail %d", ret, avail)); 5978 5979 return (DDI_FAILURE); 5980 } 5981 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: avail = %d ", avail)); 5982 5983 /* Only one interrupt routine. So limit the count to 1 */ 5984 if (count > 1) { 5985 count = 1; 5986 } 5987 5988 /* 5989 * Allocate an array of interrupt handlers. Currently we support 5990 * only one interrupt. The framework can be extended later. 5991 */ 5992 instance->intr_size = count * sizeof (ddi_intr_handle_t); 5993 instance->intr_htable = kmem_zalloc(instance->intr_size, KM_SLEEP); 5994 ASSERT(instance->intr_htable); 5995 5996 flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type == 5997 DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL; 5998 5999 /* Allocate interrupt */ 6000 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0, 6001 count, &actual, flag); 6002 6003 if ((ret != DDI_SUCCESS) || (actual == 0)) { 6004 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 6005 "avail = %d", avail)); 6006 kmem_free(instance->intr_htable, instance->intr_size); 6007 return (DDI_FAILURE); 6008 } 6009 if (actual < count) { 6010 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 6011 "Requested = %d Received = %d", count, actual)); 6012 } 6013 instance->intr_cnt = actual; 6014 6015 /* 6016 * Get the priority of the interrupt allocated. 6017 */ 6018 if ((ret = ddi_intr_get_pri(instance->intr_htable[0], 6019 &instance->intr_pri)) != DDI_SUCCESS) { 6020 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 6021 "get priority call failed")); 6022 6023 for (i = 0; i < actual; i++) { 6024 (void) ddi_intr_free(instance->intr_htable[i]); 6025 } 6026 kmem_free(instance->intr_htable, instance->intr_size); 6027 return (DDI_FAILURE); 6028 } 6029 6030 /* 6031 * Test for high level mutex. we don't support them. 6032 */ 6033 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) { 6034 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 6035 "High level interrupts not supported.")); 6036 6037 for (i = 0; i < actual; i++) { 6038 (void) ddi_intr_free(instance->intr_htable[i]); 6039 } 6040 kmem_free(instance->intr_htable, instance->intr_size); 6041 return (DDI_FAILURE); 6042 } 6043 6044 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ", 6045 instance->intr_pri)); 6046 6047 /* Call ddi_intr_add_handler() */ 6048 for (i = 0; i < actual; i++) { 6049 ret = ddi_intr_add_handler(instance->intr_htable[i], 6050 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance, 6051 (caddr_t)(uintptr_t)i); 6052 6053 if (ret != DDI_SUCCESS) { 6054 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:" 6055 "failed %d", ret)); 6056 6057 for (i = 0; i < actual; i++) { 6058 (void) ddi_intr_free(instance->intr_htable[i]); 6059 } 6060 kmem_free(instance->intr_htable, instance->intr_size); 6061 return (DDI_FAILURE); 6062 } 6063 6064 } 6065 6066 con_log(CL_DLEVEL1, (CE_WARN, " ddi_intr_add_handler done")); 6067 6068 if ((ret = ddi_intr_get_cap(instance->intr_htable[0], 6069 &instance->intr_cap)) != DDI_SUCCESS) { 6070 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d", 6071 ret)); 6072 6073 /* Free already allocated intr */ 6074 for (i = 0; i < actual; i++) { 6075 (void) ddi_intr_remove_handler( 6076 instance->intr_htable[i]); 6077 (void) ddi_intr_free(instance->intr_htable[i]); 6078 } 6079 kmem_free(instance->intr_htable, instance->intr_size); 6080 return (DDI_FAILURE); 6081 } 6082 6083 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 6084 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable")); 6085 6086 (void) ddi_intr_block_enable(instance->intr_htable, 6087 instance->intr_cnt); 6088 } else { 6089 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable")); 6090 6091 for (i = 0; i < instance->intr_cnt; i++) { 6092 (void) ddi_intr_enable(instance->intr_htable[i]); 6093 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns " 6094 "%d", i)); 6095 } 6096 } 6097 6098 return (DDI_SUCCESS); 6099 6100 } 6101 6102 6103 static void 6104 mrsas_rem_intrs(struct mrsas_instance *instance) 6105 { 6106 int i; 6107 6108 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called")); 6109 6110 /* Disable all interrupts first */ 6111 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 6112 (void) ddi_intr_block_disable(instance->intr_htable, 6113 instance->intr_cnt); 6114 } else { 6115 for (i = 0; i < instance->intr_cnt; i++) { 6116 (void) ddi_intr_disable(instance->intr_htable[i]); 6117 } 6118 } 6119 6120 /* Remove all the handlers */ 6121 6122 for (i = 0; i < instance->intr_cnt; i++) { 6123 (void) ddi_intr_remove_handler(instance->intr_htable[i]); 6124 (void) ddi_intr_free(instance->intr_htable[i]); 6125 } 6126 6127 kmem_free(instance->intr_htable, instance->intr_size); 6128 } 6129 6130 static int 6131 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags, 6132 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 6133 { 6134 struct mrsas_instance *instance; 6135 int config; 6136 int rval; 6137 6138 char *ptr = NULL; 6139 int tgt, lun; 6140 6141 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op)); 6142 6143 if ((instance = ddi_get_soft_state(mrsas_state, 6144 ddi_get_instance(parent))) == NULL) { 6145 return (NDI_FAILURE); 6146 } 6147 6148 /* Hold nexus during bus_config */ 6149 ndi_devi_enter(parent, &config); 6150 switch (op) { 6151 case BUS_CONFIG_ONE: { 6152 6153 /* parse wwid/target name out of name given */ 6154 if ((ptr = strchr((char *)arg, '@')) == NULL) { 6155 rval = NDI_FAILURE; 6156 break; 6157 } 6158 ptr++; 6159 6160 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) { 6161 rval = NDI_FAILURE; 6162 break; 6163 } 6164 6165 if (lun == 0) { 6166 rval = mrsas_config_ld(instance, tgt, lun, childp); 6167 } else { 6168 rval = NDI_FAILURE; 6169 } 6170 6171 break; 6172 } 6173 case BUS_CONFIG_DRIVER: 6174 case BUS_CONFIG_ALL: { 6175 6176 rval = mrsas_config_all_devices(instance); 6177 6178 rval = NDI_SUCCESS; 6179 break; 6180 } 6181 } 6182 6183 if (rval == NDI_SUCCESS) { 6184 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 6185 6186 } 6187 ndi_devi_exit(parent, config); 6188 6189 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x", 6190 rval)); 6191 return (rval); 6192 } 6193 6194 static int 6195 mrsas_config_all_devices(struct mrsas_instance *instance) 6196 { 6197 int rval, tgt; 6198 6199 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 6200 (void) mrsas_config_ld(instance, tgt, 0, NULL); 6201 6202 } 6203 6204 rval = NDI_SUCCESS; 6205 return (rval); 6206 } 6207 6208 static int 6209 mrsas_parse_devname(char *devnm, int *tgt, int *lun) 6210 { 6211 char devbuf[SCSI_MAXNAMELEN]; 6212 char *addr; 6213 char *p, *tp, *lp; 6214 long num; 6215 6216 /* Parse dev name and address */ 6217 (void) strcpy(devbuf, devnm); 6218 addr = ""; 6219 for (p = devbuf; *p != '\0'; p++) { 6220 if (*p == '@') { 6221 addr = p + 1; 6222 *p = '\0'; 6223 } else if (*p == ':') { 6224 *p = '\0'; 6225 break; 6226 } 6227 } 6228 6229 /* Parse target and lun */ 6230 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 6231 if (*p == ',') { 6232 lp = p + 1; 6233 *p = '\0'; 6234 break; 6235 } 6236 } 6237 if (tgt && tp) { 6238 if (ddi_strtol(tp, NULL, 0x10, &num)) { 6239 return (DDI_FAILURE); /* Can declare this as constant */ 6240 } 6241 *tgt = (int)num; 6242 } 6243 if (lun && lp) { 6244 if (ddi_strtol(lp, NULL, 0x10, &num)) { 6245 return (DDI_FAILURE); 6246 } 6247 *lun = (int)num; 6248 } 6249 return (DDI_SUCCESS); /* Success case */ 6250 } 6251 6252 static int 6253 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt, 6254 uint8_t lun, dev_info_t **ldip) 6255 { 6256 struct scsi_device *sd; 6257 dev_info_t *child; 6258 int rval; 6259 6260 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d", 6261 tgt, lun)); 6262 6263 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { 6264 if (ldip) { 6265 *ldip = child; 6266 } 6267 con_log(CL_ANN1, (CE_NOTE, 6268 "mrsas_config_ld: Child = %p found t = %d l = %d", 6269 (void *)child, tgt, lun)); 6270 return (NDI_SUCCESS); 6271 } 6272 6273 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP); 6274 sd->sd_address.a_hba_tran = instance->tran; 6275 sd->sd_address.a_target = (uint16_t)tgt; 6276 sd->sd_address.a_lun = (uint8_t)lun; 6277 6278 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) 6279 rval = mrsas_config_scsi_device(instance, sd, ldip); 6280 else 6281 rval = NDI_FAILURE; 6282 6283 /* sd_unprobe is blank now. Free buffer manually */ 6284 if (sd->sd_inq) { 6285 kmem_free(sd->sd_inq, SUN_INQSIZE); 6286 sd->sd_inq = (struct scsi_inquiry *)NULL; 6287 } 6288 6289 kmem_free(sd, sizeof (struct scsi_device)); 6290 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: return rval = %d", 6291 rval)); 6292 return (rval); 6293 } 6294 6295 static int 6296 mrsas_config_scsi_device(struct mrsas_instance *instance, 6297 struct scsi_device *sd, dev_info_t **dipp) 6298 { 6299 char *nodename = NULL; 6300 char **compatible = NULL; 6301 int ncompatible = 0; 6302 char *childname; 6303 dev_info_t *ldip = NULL; 6304 int tgt = sd->sd_address.a_target; 6305 int lun = sd->sd_address.a_lun; 6306 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 6307 int rval; 6308 6309 con_log(CL_ANN1, (CE_WARN, "mr_sas: scsi_device t%dL%d", tgt, lun)); 6310 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 6311 NULL, &nodename, &compatible, &ncompatible); 6312 6313 if (nodename == NULL) { 6314 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver " 6315 "for t%dL%d", tgt, lun)); 6316 rval = NDI_FAILURE; 6317 goto finish; 6318 } 6319 6320 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename; 6321 con_log(CL_ANN1, (CE_WARN, 6322 "mr_sas: Childname = %2s nodename = %s", childname, nodename)); 6323 6324 /* Create a dev node */ 6325 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip); 6326 con_log(CL_ANN1, (CE_WARN, 6327 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval)); 6328 if (rval == NDI_SUCCESS) { 6329 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) != 6330 DDI_PROP_SUCCESS) { 6331 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 6332 "property for t%dl%d target", tgt, lun)); 6333 rval = NDI_FAILURE; 6334 goto finish; 6335 } 6336 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) != 6337 DDI_PROP_SUCCESS) { 6338 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 6339 "property for t%dl%d lun", tgt, lun)); 6340 rval = NDI_FAILURE; 6341 goto finish; 6342 } 6343 6344 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 6345 "compatible", compatible, ncompatible) != 6346 DDI_PROP_SUCCESS) { 6347 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 6348 "property for t%dl%d compatible", tgt, lun)); 6349 rval = NDI_FAILURE; 6350 goto finish; 6351 } 6352 6353 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 6354 if (rval != NDI_SUCCESS) { 6355 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online " 6356 "t%dl%d", tgt, lun)); 6357 ndi_prop_remove_all(ldip); 6358 (void) ndi_devi_free(ldip); 6359 } else { 6360 con_log(CL_ANN1, (CE_WARN, "mr_sas: online Done :" 6361 "0 t%dl%d", tgt, lun)); 6362 } 6363 6364 } 6365 finish: 6366 if (dipp) { 6367 *dipp = ldip; 6368 } 6369 6370 con_log(CL_DLEVEL1, (CE_WARN, 6371 "mr_sas: config_scsi_device rval = %d t%dL%d", 6372 rval, tgt, lun)); 6373 scsi_hba_nodename_compatible_free(nodename, compatible); 6374 return (rval); 6375 } 6376 6377 /*ARGSUSED*/ 6378 static int 6379 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event, 6380 uint64_t wwn) 6381 { 6382 struct mrsas_eventinfo *mrevt = NULL; 6383 6384 con_log(CL_ANN1, (CE_NOTE, 6385 "mrsas_service_evt called for t%dl%d event = %d", 6386 tgt, lun, event)); 6387 6388 if ((instance->taskq == NULL) || (mrevt = 6389 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) { 6390 return (ENOMEM); 6391 } 6392 6393 mrevt->instance = instance; 6394 mrevt->tgt = tgt; 6395 mrevt->lun = lun; 6396 mrevt->event = event; 6397 6398 if ((ddi_taskq_dispatch(instance->taskq, 6399 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) != 6400 DDI_SUCCESS) { 6401 con_log(CL_ANN1, (CE_NOTE, 6402 "mr_sas: Event task failed for t%dl%d event = %d", 6403 tgt, lun, event)); 6404 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 6405 return (DDI_FAILURE); 6406 } 6407 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event); 6408 return (DDI_SUCCESS); 6409 } 6410 6411 static void 6412 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt) 6413 { 6414 struct mrsas_instance *instance = mrevt->instance; 6415 dev_info_t *dip, *pdip; 6416 int circ1 = 0; 6417 char *devname; 6418 6419 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for" 6420 " tgt %d lun %d event %d", 6421 mrevt->tgt, mrevt->lun, mrevt->event)); 6422 6423 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) { 6424 dip = instance->mr_ld_list[mrevt->tgt].dip; 6425 } else { 6426 return; 6427 } 6428 6429 ndi_devi_enter(instance->dip, &circ1); 6430 switch (mrevt->event) { 6431 case MRSAS_EVT_CONFIG_TGT: 6432 if (dip == NULL) { 6433 6434 if (mrevt->lun == 0) { 6435 (void) mrsas_config_ld(instance, mrevt->tgt, 6436 0, NULL); 6437 } 6438 con_log(CL_ANN1, (CE_NOTE, 6439 "mr_sas: EVT_CONFIG_TGT called:" 6440 " for tgt %d lun %d event %d", 6441 mrevt->tgt, mrevt->lun, mrevt->event)); 6442 6443 } else { 6444 con_log(CL_ANN1, (CE_NOTE, 6445 "mr_sas: EVT_CONFIG_TGT dip != NULL:" 6446 " for tgt %d lun %d event %d", 6447 mrevt->tgt, mrevt->lun, mrevt->event)); 6448 } 6449 break; 6450 case MRSAS_EVT_UNCONFIG_TGT: 6451 if (dip) { 6452 if (i_ddi_devi_attached(dip)) { 6453 6454 pdip = ddi_get_parent(dip); 6455 6456 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP); 6457 (void) ddi_deviname(dip, devname); 6458 6459 (void) devfs_clean(pdip, devname + 1, 6460 DV_CLEAN_FORCE); 6461 kmem_free(devname, MAXNAMELEN + 1); 6462 } 6463 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 6464 con_log(CL_ANN1, (CE_NOTE, 6465 "mr_sas: EVT_UNCONFIG_TGT called:" 6466 " for tgt %d lun %d event %d", 6467 mrevt->tgt, mrevt->lun, mrevt->event)); 6468 } else { 6469 con_log(CL_ANN1, (CE_NOTE, 6470 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:" 6471 " for tgt %d lun %d event %d", 6472 mrevt->tgt, mrevt->lun, mrevt->event)); 6473 } 6474 break; 6475 } 6476 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 6477 ndi_devi_exit(instance->dip, circ1); 6478 } 6479 6480 static int 6481 mrsas_mode_sense_build(struct scsi_pkt *pkt) 6482 { 6483 union scsi_cdb *cdbp; 6484 uint16_t page_code; 6485 struct scsa_cmd *acmd; 6486 struct buf *bp; 6487 struct mode_header *modehdrp; 6488 6489 cdbp = (void *)pkt->pkt_cdbp; 6490 page_code = cdbp->cdb_un.sg.scsi[0]; 6491 acmd = PKT2CMD(pkt); 6492 bp = acmd->cmd_buf; 6493 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) { 6494 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command")); 6495 /* ADD pkt statistics as Command failed. */ 6496 return (NULL); 6497 } 6498 6499 bp_mapin(bp); 6500 bzero(bp->b_un.b_addr, bp->b_bcount); 6501 6502 switch (page_code) { 6503 case 0x3: { 6504 struct mode_format *page3p = NULL; 6505 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 6506 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 6507 6508 page3p = (void *)((caddr_t)modehdrp + 6509 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 6510 page3p->mode_page.code = 0x3; 6511 page3p->mode_page.length = 6512 (uchar_t)(sizeof (struct mode_format)); 6513 page3p->data_bytes_sect = 512; 6514 page3p->sect_track = 63; 6515 break; 6516 } 6517 case 0x4: { 6518 struct mode_geometry *page4p = NULL; 6519 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 6520 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 6521 6522 page4p = (void *)((caddr_t)modehdrp + 6523 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 6524 page4p->mode_page.code = 0x4; 6525 page4p->mode_page.length = 6526 (uchar_t)(sizeof (struct mode_geometry)); 6527 page4p->heads = 255; 6528 page4p->rpm = 10000; 6529 break; 6530 } 6531 default: 6532 break; 6533 } 6534 return (NULL); 6535 } 6536