1 /* 2 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 3 */ 4 5 /* 6 * Copyright 2005-08 Adaptec, Inc. 7 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner 8 * Copyright (c) 2000 Michael Smith 9 * Copyright (c) 2001 Scott Long 10 * Copyright (c) 2000 BSDi 11 * All rights reserved. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/modctl.h> 35 #include <sys/conf.h> 36 #include <sys/cmn_err.h> 37 #include <sys/ddi.h> 38 #include <sys/devops.h> 39 #include <sys/pci.h> 40 #include <sys/types.h> 41 #include <sys/ddidmareq.h> 42 #include <sys/scsi/scsi.h> 43 #include <sys/ksynch.h> 44 #include <sys/sunddi.h> 45 #include <sys/byteorder.h> 46 #include "aac_regs.h" 47 #include "aac.h" 48 49 /* 50 * FMA header files 51 */ 52 #include <sys/ddifm.h> 53 #include <sys/fm/protocol.h> 54 #include <sys/fm/util.h> 55 #include <sys/fm/io/ddi.h> 56 57 /* 58 * For minor nodes created by the SCSA framework, minor numbers are 59 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a 60 * number less than 64. 61 * 62 * To support cfgadm, need to confirm the SCSA framework by creating 63 * devctl/scsi and driver specific minor nodes under SCSA format, 64 * and calling scsi_hba_xxx() functions aacordingly. 65 */ 66 67 #define AAC_MINOR 32 68 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR) 69 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK) 70 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR) 71 72 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran) 73 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private) 74 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip)) 75 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip))) 76 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd))) 77 #define AAC_PD(t) ((t) - AAC_MAX_LD) 78 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \ 79 &(softs)->containers[(t)].dev : \ 80 ((t) < AAC_MAX_DEV(softs)) ? \ 81 &(softs)->nondasds[AAC_PD(t)].dev : NULL) 82 #define AAC_DEVCFG_BEGIN(softs, tgt) \ 83 aac_devcfg((softs), (tgt), 1) 84 #define AAC_DEVCFG_END(softs, tgt) \ 85 aac_devcfg((softs), (tgt), 0) 86 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private) 87 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \ 88 if (!(cond)) { \ 89 int count = (timeout) * 10; \ 90 while (count) { \ 91 drv_usecwait(100); \ 92 if (cond) \ 93 break; \ 94 count--; \ 95 } \ 96 (timeout) = (count + 9) / 10; \ 97 } \ 98 } 99 100 #define AAC_SENSE_DATA_DESCR_LEN \ 101 (sizeof (struct scsi_descr_sense_hdr) + \ 102 sizeof (struct scsi_information_sense_descr)) 103 #define AAC_ARQ64_LENGTH \ 104 (sizeof (struct scsi_arq_status) + \ 105 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH) 106 107 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 108 #define AAC_GETGXADDR(cmdlen, cdbp) \ 109 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \ 110 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \ 111 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp)) 112 113 #define AAC_CDB_INQUIRY_CMDDT 0x02 114 #define AAC_CDB_INQUIRY_EVPD 0x01 115 #define AAC_VPD_PAGE_CODE 1 116 #define AAC_VPD_PAGE_LENGTH 3 117 #define AAC_VPD_PAGE_DATA 4 118 #define AAC_VPD_ID_CODESET 0 119 #define AAC_VPD_ID_TYPE 1 120 #define AAC_VPD_ID_LENGTH 3 121 #define AAC_VPD_ID_DATA 4 122 123 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08 124 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08 125 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0 126 /* 00b - peripheral device addressing method */ 127 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00 128 /* 01b - flat space addressing method */ 129 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40 130 /* 10b - logical unit addressing method */ 131 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80 132 133 /* Return the size of FIB with data part type data_type */ 134 #define AAC_FIB_SIZEOF(data_type) \ 135 (sizeof (struct aac_fib_header) + sizeof (data_type)) 136 /* Return the container size defined in mir */ 137 #define AAC_MIR_SIZE(softs, acc, mir) \ 138 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \ 139 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \ 140 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \ 141 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity)) 142 143 /* The last entry of aac_cards[] is for unknown cards */ 144 #define AAC_UNKNOWN_CARD \ 145 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1) 146 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD) 147 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ) 148 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL) 149 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC)) 150 151 #define PCI_MEM_GET32(softs, off) \ 152 ddi_get32((softs)->pci_mem_handle, \ 153 (void *)((softs)->pci_mem_base_vaddr + (off))) 154 #define PCI_MEM_PUT32(softs, off, val) \ 155 ddi_put32((softs)->pci_mem_handle, \ 156 (void *)((softs)->pci_mem_base_vaddr + (off)), \ 157 (uint32_t)(val)) 158 #define PCI_MEM_GET16(softs, off) \ 159 ddi_get16((softs)->pci_mem_handle, \ 160 (void *)((softs)->pci_mem_base_vaddr + (off))) 161 #define PCI_MEM_PUT16(softs, off, val) \ 162 ddi_put16((softs)->pci_mem_handle, \ 163 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val)) 164 /* Write host data at valp to device mem[off] repeatedly count times */ 165 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \ 166 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 167 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 168 count, DDI_DEV_AUTOINCR) 169 /* Read device data at mem[off] to host addr valp repeatedly count times */ 170 #define PCI_MEM_REP_GET8(softs, off, valp, count) \ 171 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \ 172 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \ 173 count, DDI_DEV_AUTOINCR) 174 #define AAC_GET_FIELD8(acc, d, s, field) \ 175 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field) 176 #define AAC_GET_FIELD32(acc, d, s, field) \ 177 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field) 178 #define AAC_GET_FIELD64(acc, d, s, field) \ 179 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field) 180 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \ 181 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \ 182 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 183 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \ 184 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \ 185 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR) 186 187 #define AAC_ENABLE_INTR(softs) { \ 188 if (softs->flags & AAC_FLAGS_NEW_COMM) \ 189 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \ 190 else \ 191 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \ 192 softs->state |= AAC_STATE_INTR; \ 193 } 194 195 #define AAC_DISABLE_INTR(softs) { \ 196 PCI_MEM_PUT32(softs, AAC_OIMR, ~0); \ 197 softs->state &= ~AAC_STATE_INTR; \ 198 } 199 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask) 200 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR) 201 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val) 202 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE) 203 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val) 204 #define AAC_FWSTATUS_GET(softs) \ 205 ((softs)->aac_if.aif_get_fwstatus(softs)) 206 #define AAC_MAILBOX_GET(softs, mb) \ 207 ((softs)->aac_if.aif_get_mailbox((softs), (mb))) 208 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \ 209 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \ 210 (arg0), (arg1), (arg2), (arg3))) 211 212 #define AAC_MGT_SLOT_NUM 2 213 #define AAC_THROTTLE_DRAIN -1 214 215 #define AAC_QUIESCE_TICK 1 /* 1 second */ 216 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */ 217 #define AAC_DEFAULT_TICK 10 /* 10 seconds */ 218 #define AAC_SYNC_TICK (30*60) /* 30 minutes */ 219 220 /* Poll time for aac_do_poll_io() */ 221 #define AAC_POLL_TIME 60 /* 60 seconds */ 222 223 /* IOP reset */ 224 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */ 225 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */ 226 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */ 227 228 /* 229 * Hardware access functions 230 */ 231 static int aac_rx_get_fwstatus(struct aac_softstate *); 232 static int aac_rx_get_mailbox(struct aac_softstate *, int); 233 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 234 uint32_t, uint32_t, uint32_t); 235 static int aac_rkt_get_fwstatus(struct aac_softstate *); 236 static int aac_rkt_get_mailbox(struct aac_softstate *, int); 237 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t, 238 uint32_t, uint32_t, uint32_t); 239 240 /* 241 * SCSA function prototypes 242 */ 243 static int aac_attach(dev_info_t *, ddi_attach_cmd_t); 244 static int aac_detach(dev_info_t *, ddi_detach_cmd_t); 245 static int aac_reset(dev_info_t *, ddi_reset_cmd_t); 246 static int aac_quiesce(dev_info_t *); 247 static int aac_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 248 249 /* 250 * Interrupt handler functions 251 */ 252 static int aac_query_intrs(struct aac_softstate *, int); 253 static int aac_add_intrs(struct aac_softstate *); 254 static void aac_remove_intrs(struct aac_softstate *); 255 static int aac_enable_intrs(struct aac_softstate *); 256 static int aac_disable_intrs(struct aac_softstate *); 257 static uint_t aac_intr_old(caddr_t); 258 static uint_t aac_intr_new(caddr_t); 259 static uint_t aac_softintr(caddr_t); 260 261 /* 262 * Internal functions in attach 263 */ 264 static int aac_check_card_type(struct aac_softstate *); 265 static int aac_check_firmware(struct aac_softstate *); 266 static int aac_common_attach(struct aac_softstate *); 267 static void aac_common_detach(struct aac_softstate *); 268 static int aac_probe_containers(struct aac_softstate *); 269 static int aac_alloc_comm_space(struct aac_softstate *); 270 static int aac_setup_comm_space(struct aac_softstate *); 271 static void aac_free_comm_space(struct aac_softstate *); 272 static int aac_hba_setup(struct aac_softstate *); 273 274 /* 275 * Sync FIB operation functions 276 */ 277 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t, 278 uint32_t, uint32_t, uint32_t, uint32_t *); 279 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t); 280 281 /* 282 * Command queue operation functions 283 */ 284 static void aac_cmd_initq(struct aac_cmd_queue *); 285 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *); 286 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *); 287 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *); 288 289 /* 290 * FIB queue operation functions 291 */ 292 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t); 293 static int aac_fib_dequeue(struct aac_softstate *, int, int *); 294 295 /* 296 * Slot operation functions 297 */ 298 static int aac_create_slots(struct aac_softstate *); 299 static void aac_destroy_slots(struct aac_softstate *); 300 static void aac_alloc_fibs(struct aac_softstate *); 301 static void aac_destroy_fibs(struct aac_softstate *); 302 static struct aac_slot *aac_get_slot(struct aac_softstate *); 303 static void aac_release_slot(struct aac_softstate *, struct aac_slot *); 304 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *); 305 static void aac_free_fib(struct aac_slot *); 306 307 /* 308 * Internal functions 309 */ 310 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_cmd *, 311 uint16_t); 312 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *); 313 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *); 314 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *); 315 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *); 316 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *); 317 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *); 318 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *); 319 static void aac_start_waiting_io(struct aac_softstate *); 320 static void aac_drain_comp_q(struct aac_softstate *); 321 int aac_do_io(struct aac_softstate *, struct aac_cmd *); 322 static int aac_sync_fib_slot_bind(struct aac_softstate *, struct aac_cmd *); 323 static void aac_sync_fib_slot_release(struct aac_softstate *, struct aac_cmd *); 324 static void aac_start_io(struct aac_softstate *, struct aac_cmd *); 325 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *); 326 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *); 327 static int aac_send_command(struct aac_softstate *, struct aac_slot *); 328 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *); 329 static int aac_dma_sync_ac(struct aac_cmd *); 330 static int aac_shutdown(struct aac_softstate *); 331 static int aac_reset_adapter(struct aac_softstate *); 332 static int aac_do_quiesce(struct aac_softstate *softs); 333 static int aac_do_unquiesce(struct aac_softstate *softs); 334 static void aac_unhold_bus(struct aac_softstate *, int); 335 static void aac_set_throttle(struct aac_softstate *, struct aac_device *, 336 int, int); 337 338 /* 339 * Adapter Initiated FIB handling function 340 */ 341 static void aac_save_aif(struct aac_softstate *, ddi_acc_handle_t, 342 struct aac_fib *, int); 343 static int aac_handle_aif(struct aac_softstate *, struct aac_aif_command *); 344 345 /* 346 * Event handling related functions 347 */ 348 static void aac_timer(void *); 349 static void aac_event_thread(struct aac_softstate *); 350 static void aac_event_disp(struct aac_softstate *, int); 351 352 /* 353 * IOCTL interface related functions 354 */ 355 static int aac_open(dev_t *, int, int, cred_t *); 356 static int aac_close(dev_t, int, int, cred_t *); 357 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 358 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int); 359 360 /* 361 * FMA Prototypes 362 */ 363 static void aac_fm_init(struct aac_softstate *); 364 static void aac_fm_fini(struct aac_softstate *); 365 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 366 int aac_check_acc_handle(ddi_acc_handle_t); 367 int aac_check_dma_handle(ddi_dma_handle_t); 368 void aac_fm_ereport(struct aac_softstate *, char *); 369 370 /* 371 * Auto enumeration functions 372 */ 373 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t); 374 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 375 void *, dev_info_t **); 376 static int aac_handle_dr(struct aac_softstate *, int, int, int); 377 378 extern pri_t minclsyspri; 379 380 #ifdef DEBUG 381 /* 382 * UART debug output support 383 */ 384 385 #define AAC_PRINT_BUFFER_SIZE 512 386 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */ 387 388 #define AAC_FW_DBG_STRLEN_OFFSET 0x00 389 #define AAC_FW_DBG_FLAGS_OFFSET 0x04 390 #define AAC_FW_DBG_BLED_OFFSET 0x08 391 392 static int aac_get_fw_debug_buffer(struct aac_softstate *); 393 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *); 394 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *); 395 396 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE]; 397 static char aac_fmt[] = " %s"; 398 static char aac_fmt_header[] = " %s.%d: %s"; 399 static kmutex_t aac_prt_mutex; 400 401 /* 402 * Debug flags to be put into the softstate flags field 403 * when initialized 404 */ 405 uint32_t aac_debug_flags = 406 /* AACDB_FLAGS_KERNEL_PRINT | */ 407 /* AACDB_FLAGS_FW_PRINT | */ 408 /* AACDB_FLAGS_MISC | */ 409 /* AACDB_FLAGS_FUNC1 | */ 410 /* AACDB_FLAGS_FUNC2 | */ 411 /* AACDB_FLAGS_SCMD | */ 412 /* AACDB_FLAGS_AIF | */ 413 /* AACDB_FLAGS_FIB | */ 414 /* AACDB_FLAGS_IOCTL | */ 415 0; 416 uint32_t aac_debug_fib_flags = 417 /* AACDB_FLAGS_FIB_RW | */ 418 /* AACDB_FLAGS_FIB_IOCTL | */ 419 /* AACDB_FLAGS_FIB_SRB | */ 420 /* AACDB_FLAGS_FIB_SYNC | */ 421 /* AACDB_FLAGS_FIB_HEADER | */ 422 /* AACDB_FLAGS_FIB_TIMEOUT | */ 423 0; 424 425 #endif /* DEBUG */ 426 427 static struct cb_ops aac_cb_ops = { 428 aac_open, /* open */ 429 aac_close, /* close */ 430 nodev, /* strategy */ 431 nodev, /* print */ 432 nodev, /* dump */ 433 nodev, /* read */ 434 nodev, /* write */ 435 aac_ioctl, /* ioctl */ 436 nodev, /* devmap */ 437 nodev, /* mmap */ 438 nodev, /* segmap */ 439 nochpoll, /* poll */ 440 ddi_prop_op, /* cb_prop_op */ 441 NULL, /* streamtab */ 442 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 443 CB_REV, /* cb_rev */ 444 nodev, /* async I/O read entry point */ 445 nodev /* async I/O write entry point */ 446 }; 447 448 static struct dev_ops aac_dev_ops = { 449 DEVO_REV, 450 0, 451 aac_getinfo, 452 nulldev, 453 nulldev, 454 aac_attach, 455 aac_detach, 456 aac_reset, 457 &aac_cb_ops, 458 NULL, 459 NULL, 460 aac_quiesce, 461 }; 462 463 static struct modldrv aac_modldrv = { 464 &mod_driverops, 465 "AAC Driver " AAC_DRIVER_VERSION, 466 &aac_dev_ops, 467 }; 468 469 static struct modlinkage aac_modlinkage = { 470 MODREV_1, 471 &aac_modldrv, 472 NULL 473 }; 474 475 static struct aac_softstate *aac_softstatep; 476 477 /* 478 * Supported card list 479 * ordered in vendor id, subvendor id, subdevice id, and device id 480 */ 481 static struct aac_card_type aac_cards[] = { 482 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX, 483 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 484 "Dell", "PERC 3/Di"}, 485 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX, 486 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 487 "Dell", "PERC 3/Di"}, 488 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX, 489 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 490 "Dell", "PERC 3/Si"}, 491 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX, 492 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 493 "Dell", "PERC 3/Di"}, 494 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX, 495 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 496 "Dell", "PERC 3/Si"}, 497 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX, 498 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 499 "Dell", "PERC 3/Di"}, 500 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX, 501 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 502 "Dell", "PERC 3/Di"}, 503 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX, 504 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 505 "Dell", "PERC 3/Di"}, 506 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX, 507 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 508 "Dell", "PERC 3/Di"}, 509 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX, 510 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI, 511 "Dell", "PERC 3/Di"}, 512 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX, 513 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 514 "Dell", "PERC 320/DC"}, 515 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX, 516 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"}, 517 518 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX, 519 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"}, 520 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX, 521 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"}, 522 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT, 523 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"}, 524 525 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX, 526 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 527 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX, 528 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"}, 529 530 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX, 531 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 532 "Adaptec", "2200S"}, 533 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX, 534 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 535 "Adaptec", "2120S"}, 536 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX, 537 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI, 538 "Adaptec", "2200S"}, 539 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX, 540 0, AAC_TYPE_SCSI, "Adaptec", "3230S"}, 541 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX, 542 0, AAC_TYPE_SCSI, "Adaptec", "3240S"}, 543 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX, 544 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"}, 545 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX, 546 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"}, 547 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT, 548 0, AAC_TYPE_SCSI, "Adaptec", "2230S"}, 549 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT, 550 0, AAC_TYPE_SCSI, "Adaptec", "2130S"}, 551 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX, 552 0, AAC_TYPE_SATA, "Adaptec", "2020SA"}, 553 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX, 554 0, AAC_TYPE_SATA, "Adaptec", "2025SA"}, 555 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX, 556 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"}, 557 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX, 558 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"}, 559 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX, 560 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"}, 561 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX, 562 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"}, 563 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX, 564 0, AAC_TYPE_SCSI, "Adaptec", "2240S"}, 565 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX, 566 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"}, 567 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX, 568 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"}, 569 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX, 570 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"}, 571 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX, 572 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"}, 573 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT, 574 0, AAC_TYPE_SATA, "Adaptec", "2820SA"}, 575 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT, 576 0, AAC_TYPE_SATA, "Adaptec", "2620SA"}, 577 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT, 578 0, AAC_TYPE_SATA, "Adaptec", "2420SA"}, 579 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT, 580 0, AAC_TYPE_SATA, "ICP", "9024RO"}, 581 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT, 582 0, AAC_TYPE_SATA, "ICP", "9014RO"}, 583 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT, 584 0, AAC_TYPE_SATA, "ICP", "9047MA"}, 585 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT, 586 0, AAC_TYPE_SATA, "ICP", "9087MA"}, 587 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX, 588 0, AAC_TYPE_SAS, "ICP", "9085LI"}, 589 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX, 590 0, AAC_TYPE_SAS, "ICP", "5085BR"}, 591 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT, 592 0, AAC_TYPE_SATA, "ICP", "9067MA"}, 593 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX, 594 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"}, 595 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX, 596 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"}, 597 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX, 598 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"}, 599 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX, 600 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"}, 601 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX, 602 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"}, 603 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX, 604 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"}, 605 606 {0, 0, 0, 0, AAC_HWIF_UNKNOWN, 607 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"}, 608 }; 609 610 /* 611 * Hardware access functions for i960 based cards 612 */ 613 static struct aac_interface aac_rx_interface = { 614 aac_rx_get_fwstatus, 615 aac_rx_get_mailbox, 616 aac_rx_set_mailbox 617 }; 618 619 /* 620 * Hardware access functions for Rocket based cards 621 */ 622 static struct aac_interface aac_rkt_interface = { 623 aac_rkt_get_fwstatus, 624 aac_rkt_get_mailbox, 625 aac_rkt_set_mailbox 626 }; 627 628 ddi_device_acc_attr_t aac_acc_attr = { 629 DDI_DEVICE_ATTR_V1, 630 DDI_STRUCTURE_LE_ACC, 631 DDI_STRICTORDER_ACC, 632 DDI_DEFAULT_ACC 633 }; 634 635 static struct { 636 int size; 637 int notify; 638 } aac_qinfo[] = { 639 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 640 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 641 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 642 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 643 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 644 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 645 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 646 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 647 }; 648 649 /* 650 * Default aac dma attributes 651 */ 652 static ddi_dma_attr_t aac_dma_attr = { 653 DMA_ATTR_V0, 654 0, /* lowest usable address */ 655 0xffffffffull, /* high DMA address range */ 656 0xffffffffull, /* DMA counter register */ 657 AAC_DMA_ALIGN, /* DMA address alignment */ 658 1, /* DMA burstsizes */ 659 1, /* min effective DMA size */ 660 0xffffffffull, /* max DMA xfer size */ 661 0xffffffffull, /* segment boundary */ 662 1, /* s/g list length */ 663 AAC_BLK_SIZE, /* granularity of device */ 664 0 /* DMA transfer flags */ 665 }; 666 667 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */ 668 static uint32_t aac_timebase = 0; /* internal timer in seconds */ 669 670 /* 671 * Warlock directives 672 * 673 * Different variables with the same types have to be protected by the 674 * same mutex; otherwise, warlock will complain with "variables don't 675 * seem to be protected consistently". For example, 676 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected 677 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to 678 * declare them as protected explictly at aac_cmd_dequeue(). 679 */ 680 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \ 681 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \ 682 mode_format mode_geometry mode_header aac_cmd)) 683 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \ 684 aac_sge)) 685 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \ 686 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \ 687 aac_sg_table aac_srb)) 688 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry)) 689 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address)) 690 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf)) 691 692 int 693 _init(void) 694 { 695 int rval = 0; 696 697 #ifdef DEBUG 698 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL); 699 #endif 700 DBCALLED(NULL, 1); 701 702 if ((rval = ddi_soft_state_init((void *)&aac_softstatep, 703 sizeof (struct aac_softstate), 0)) != 0) 704 goto error; 705 706 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) { 707 ddi_soft_state_fini((void *)&aac_softstatep); 708 goto error; 709 } 710 711 if ((rval = mod_install(&aac_modlinkage)) != 0) { 712 ddi_soft_state_fini((void *)&aac_softstatep); 713 scsi_hba_fini(&aac_modlinkage); 714 goto error; 715 } 716 return (rval); 717 718 error: 719 AACDB_PRINT(NULL, CE_WARN, "Mod init error!"); 720 #ifdef DEBUG 721 mutex_destroy(&aac_prt_mutex); 722 #endif 723 return (rval); 724 } 725 726 int 727 _info(struct modinfo *modinfop) 728 { 729 DBCALLED(NULL, 1); 730 return (mod_info(&aac_modlinkage, modinfop)); 731 } 732 733 /* 734 * An HBA driver cannot be unload unless you reboot, 735 * so this function will be of no use. 736 */ 737 int 738 _fini(void) 739 { 740 int rval; 741 742 DBCALLED(NULL, 1); 743 744 if ((rval = mod_remove(&aac_modlinkage)) != 0) 745 goto error; 746 747 scsi_hba_fini(&aac_modlinkage); 748 ddi_soft_state_fini((void *)&aac_softstatep); 749 #ifdef DEBUG 750 mutex_destroy(&aac_prt_mutex); 751 #endif 752 return (0); 753 754 error: 755 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!"); 756 return (rval); 757 } 758 759 static int 760 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 761 { 762 int instance, i; 763 struct aac_softstate *softs = NULL; 764 int attach_state = 0; 765 char *data; 766 767 DBCALLED(NULL, 1); 768 769 switch (cmd) { 770 case DDI_ATTACH: 771 break; 772 case DDI_RESUME: 773 return (DDI_FAILURE); 774 default: 775 return (DDI_FAILURE); 776 } 777 778 instance = ddi_get_instance(dip); 779 780 /* Get soft state */ 781 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) { 782 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state"); 783 goto error; 784 } 785 softs = ddi_get_soft_state(aac_softstatep, instance); 786 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED; 787 788 softs->instance = instance; 789 softs->devinfo_p = dip; 790 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr; 791 softs->addr_dma_attr.dma_attr_granular = 1; 792 softs->acc_attr = aac_acc_attr; 793 softs->reg_attr = aac_acc_attr; 794 softs->card = AAC_UNKNOWN_CARD; 795 #ifdef DEBUG 796 softs->debug_flags = aac_debug_flags; 797 softs->debug_fib_flags = aac_debug_fib_flags; 798 #endif 799 800 /* Initialize FMA */ 801 aac_fm_init(softs); 802 803 /* Check the card type */ 804 if (aac_check_card_type(softs) == AACERR) { 805 AACDB_PRINT(softs, CE_WARN, "Card not supported"); 806 goto error; 807 } 808 /* We have found the right card and everything is OK */ 809 attach_state |= AAC_ATTACH_CARD_DETECTED; 810 811 /* Map PCI mem space */ 812 if (ddi_regs_map_setup(dip, 1, 813 (caddr_t *)&softs->pci_mem_base_vaddr, 0, 814 softs->map_size_min, &softs->reg_attr, 815 &softs->pci_mem_handle) != DDI_SUCCESS) 816 goto error; 817 818 softs->map_size = softs->map_size_min; 819 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED; 820 821 AAC_DISABLE_INTR(softs); 822 823 /* Init mutexes and condvars */ 824 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER, 825 DDI_INTR_PRI(softs->intr_pri)); 826 mutex_init(&softs->q_comp_mutex, NULL, MUTEX_DRIVER, 827 DDI_INTR_PRI(softs->intr_pri)); 828 mutex_init(&softs->time_mutex, NULL, MUTEX_DRIVER, 829 DDI_INTR_PRI(softs->intr_pri)); 830 mutex_init(&softs->ev_lock, NULL, MUTEX_DRIVER, 831 DDI_INTR_PRI(softs->intr_pri)); 832 mutex_init(&softs->aifq_mutex, NULL, 833 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri)); 834 cv_init(&softs->event, NULL, CV_DRIVER, NULL); 835 cv_init(&softs->sync_fib_cv, NULL, CV_DRIVER, NULL); 836 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL); 837 cv_init(&softs->event_wait_cv, NULL, CV_DRIVER, NULL); 838 cv_init(&softs->event_disp_cv, NULL, CV_DRIVER, NULL); 839 cv_init(&softs->aifq_cv, NULL, CV_DRIVER, NULL); 840 attach_state |= AAC_ATTACH_KMUTEX_INITED; 841 842 /* Init the cmd queues */ 843 for (i = 0; i < AAC_CMDQ_NUM; i++) 844 aac_cmd_initq(&softs->q_wait[i]); 845 aac_cmd_initq(&softs->q_busy); 846 aac_cmd_initq(&softs->q_comp); 847 848 /* Check for legacy device naming support */ 849 softs->legacy = 1; /* default to use legacy name */ 850 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 851 "legacy-name-enable", &data) == DDI_SUCCESS)) { 852 if (strcmp(data, "no") == 0) { 853 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled"); 854 softs->legacy = 0; 855 } 856 ddi_prop_free(data); 857 } 858 859 /* 860 * Everything has been set up till now, 861 * we will do some common attach. 862 */ 863 mutex_enter(&softs->io_lock); 864 if (aac_common_attach(softs) == AACERR) { 865 mutex_exit(&softs->io_lock); 866 goto error; 867 } 868 mutex_exit(&softs->io_lock); 869 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP; 870 871 /* Check for buf breakup support */ 872 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 873 "breakup-enable", &data) == DDI_SUCCESS)) { 874 if (strcmp(data, "yes") == 0) { 875 AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled"); 876 softs->flags |= AAC_FLAGS_BRKUP; 877 } 878 ddi_prop_free(data); 879 } 880 softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer; 881 if (softs->flags & AAC_FLAGS_BRKUP) { 882 softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 883 DDI_PROP_DONTPASS, "dma-max", softs->dma_max); 884 } 885 886 if (aac_hba_setup(softs) != AACOK) 887 goto error; 888 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP; 889 890 /* Create devctl/scsi nodes for cfgadm */ 891 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 892 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 893 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node"); 894 goto error; 895 } 896 attach_state |= AAC_ATTACH_CREATE_DEVCTL; 897 898 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance), 899 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 900 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node"); 901 goto error; 902 } 903 attach_state |= AAC_ATTACH_CREATE_SCSI; 904 905 /* Create aac node for app. to issue ioctls */ 906 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance), 907 DDI_PSEUDO, 0) != DDI_SUCCESS) { 908 AACDB_PRINT(softs, CE_WARN, "failed to create aac node"); 909 goto error; 910 } 911 912 /* Common attach is OK, so we are attached! */ 913 softs->state |= AAC_STATE_RUN; 914 915 /* Create event thread */ 916 softs->fibctx_p = &softs->aifctx; 917 if ((softs->event_thread = thread_create(NULL, 0, aac_event_thread, 918 softs, 0, &p0, TS_RUN, minclsyspri)) == NULL) { 919 AACDB_PRINT(softs, CE_WARN, "aif thread create failed"); 920 softs->state &= ~AAC_STATE_RUN; 921 goto error; 922 } 923 924 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 925 926 /* Create a thread for command timeout */ 927 softs->timeout_id = timeout(aac_timer, (void *)softs, 928 (aac_tick * drv_usectohz(1000000))); 929 930 /* Common attach is OK, so we are attached! */ 931 ddi_report_dev(dip); 932 AACDB_PRINT(softs, CE_NOTE, "aac attached ok"); 933 return (DDI_SUCCESS); 934 935 error: 936 if (attach_state & AAC_ATTACH_CREATE_SCSI) 937 ddi_remove_minor_node(dip, "scsi"); 938 if (attach_state & AAC_ATTACH_CREATE_DEVCTL) 939 ddi_remove_minor_node(dip, "devctl"); 940 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP) 941 aac_common_detach(softs); 942 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) { 943 (void) scsi_hba_detach(dip); 944 scsi_hba_tran_free(AAC_DIP2TRAN(dip)); 945 } 946 if (attach_state & AAC_ATTACH_KMUTEX_INITED) { 947 mutex_destroy(&softs->io_lock); 948 mutex_destroy(&softs->q_comp_mutex); 949 mutex_destroy(&softs->time_mutex); 950 mutex_destroy(&softs->ev_lock); 951 mutex_destroy(&softs->aifq_mutex); 952 cv_destroy(&softs->event); 953 cv_destroy(&softs->sync_fib_cv); 954 cv_destroy(&softs->drain_cv); 955 cv_destroy(&softs->event_wait_cv); 956 cv_destroy(&softs->event_disp_cv); 957 cv_destroy(&softs->aifq_cv); 958 } 959 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED) 960 ddi_regs_map_free(&softs->pci_mem_handle); 961 aac_fm_fini(softs); 962 if (attach_state & AAC_ATTACH_CARD_DETECTED) 963 softs->card = AACERR; 964 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED) 965 ddi_soft_state_free(aac_softstatep, instance); 966 return (DDI_FAILURE); 967 } 968 969 static int 970 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 971 { 972 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip); 973 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 974 975 DBCALLED(softs, 1); 976 977 switch (cmd) { 978 case DDI_DETACH: 979 break; 980 case DDI_SUSPEND: 981 return (DDI_FAILURE); 982 default: 983 return (DDI_FAILURE); 984 } 985 986 mutex_enter(&softs->io_lock); 987 AAC_DISABLE_INTR(softs); 988 softs->state = AAC_STATE_STOPPED; 989 990 ddi_remove_minor_node(dip, "aac"); 991 ddi_remove_minor_node(dip, "scsi"); 992 ddi_remove_minor_node(dip, "devctl"); 993 mutex_exit(&softs->io_lock); 994 995 aac_common_detach(softs); 996 997 mutex_enter(&softs->io_lock); 998 (void) scsi_hba_detach(dip); 999 scsi_hba_tran_free(tran); 1000 mutex_exit(&softs->io_lock); 1001 1002 /* Stop timer */ 1003 mutex_enter(&softs->time_mutex); 1004 if (softs->timeout_id) { 1005 timeout_id_t tid = softs->timeout_id; 1006 softs->timeout_id = 0; 1007 1008 mutex_exit(&softs->time_mutex); 1009 (void) untimeout(tid); 1010 mutex_enter(&softs->time_mutex); 1011 } 1012 mutex_exit(&softs->time_mutex); 1013 1014 /* Destroy event thread */ 1015 mutex_enter(&softs->ev_lock); 1016 cv_signal(&softs->event_disp_cv); 1017 cv_wait(&softs->event_wait_cv, &softs->ev_lock); 1018 mutex_exit(&softs->ev_lock); 1019 1020 cv_destroy(&softs->aifq_cv); 1021 cv_destroy(&softs->event_disp_cv); 1022 cv_destroy(&softs->event_wait_cv); 1023 cv_destroy(&softs->drain_cv); 1024 cv_destroy(&softs->sync_fib_cv); 1025 cv_destroy(&softs->event); 1026 mutex_destroy(&softs->aifq_mutex); 1027 mutex_destroy(&softs->ev_lock); 1028 mutex_destroy(&softs->time_mutex); 1029 mutex_destroy(&softs->q_comp_mutex); 1030 mutex_destroy(&softs->io_lock); 1031 1032 ddi_regs_map_free(&softs->pci_mem_handle); 1033 aac_fm_fini(softs); 1034 softs->hwif = AAC_HWIF_UNKNOWN; 1035 softs->card = AAC_UNKNOWN_CARD; 1036 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip)); 1037 1038 return (DDI_SUCCESS); 1039 } 1040 1041 /*ARGSUSED*/ 1042 static int 1043 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1044 { 1045 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1046 1047 DBCALLED(softs, 1); 1048 1049 mutex_enter(&softs->io_lock); 1050 AAC_DISABLE_INTR(softs); 1051 (void) aac_shutdown(softs); 1052 mutex_exit(&softs->io_lock); 1053 1054 return (DDI_SUCCESS); 1055 } 1056 1057 /* 1058 * quiesce(9E) entry point. 1059 * 1060 * This function is called when the system is single-threaded at high 1061 * PIL with preemption disabled. Therefore, this function must not be 1062 * blocked. 1063 * 1064 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 1065 * DDI_FAILURE indicates an error condition and should almost never happen. 1066 */ 1067 static int 1068 aac_quiesce(dev_info_t *dip) 1069 { 1070 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 1071 1072 if (softs == NULL) 1073 return (DDI_FAILURE); 1074 1075 _NOTE(ASSUMING_PROTECTED(softs->state)) 1076 AAC_DISABLE_INTR(softs); 1077 1078 return (DDI_SUCCESS); 1079 } 1080 1081 /* ARGSUSED */ 1082 static int 1083 aac_getinfo(dev_info_t *self, ddi_info_cmd_t infocmd, void *arg, 1084 void **result) 1085 { 1086 int error = DDI_SUCCESS; 1087 1088 switch (infocmd) { 1089 case DDI_INFO_DEVT2INSTANCE: 1090 *result = (void *)(intptr_t)(MINOR2INST(getminor((dev_t)arg))); 1091 break; 1092 default: 1093 error = DDI_FAILURE; 1094 } 1095 return (error); 1096 } 1097 1098 /* 1099 * Bring the controller down to a dormant state and detach all child devices. 1100 * This function is called before detach or system shutdown. 1101 * Note: we can assume that the q_wait on the controller is empty, as we 1102 * won't allow shutdown if any device is open. 1103 */ 1104 static int 1105 aac_shutdown(struct aac_softstate *softs) 1106 { 1107 ddi_acc_handle_t acc; 1108 struct aac_close_command *cc; 1109 int rval; 1110 1111 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 1112 acc = softs->sync_ac.slotp->fib_acc_handle; 1113 1114 cc = (struct aac_close_command *)&softs->sync_ac.slotp->fibp->data[0]; 1115 1116 ddi_put32(acc, &cc->Command, VM_CloseAll); 1117 ddi_put32(acc, &cc->ContainerId, 0xfffffffful); 1118 1119 /* Flush all caches, set FW to write through mode */ 1120 rval = aac_sync_fib(softs, ContainerCommand, 1121 AAC_FIB_SIZEOF(struct aac_close_command)); 1122 aac_sync_fib_slot_release(softs, &softs->sync_ac); 1123 1124 AACDB_PRINT(softs, CE_NOTE, 1125 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail"); 1126 return (rval); 1127 } 1128 1129 static uint_t 1130 aac_softintr(caddr_t arg) 1131 { 1132 struct aac_softstate *softs = (void *)arg; 1133 1134 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) { 1135 aac_drain_comp_q(softs); 1136 } 1137 return (DDI_INTR_CLAIMED); 1138 } 1139 1140 /* 1141 * Setup auto sense data for pkt 1142 */ 1143 static void 1144 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key, 1145 uchar_t add_code, uchar_t qual_code, uint64_t info) 1146 { 1147 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp); 1148 1149 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */ 1150 pkt->pkt_state |= STATE_ARQ_DONE; 1151 1152 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1153 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1154 arqstat->sts_rqpkt_resid = 0; 1155 arqstat->sts_rqpkt_state = 1156 STATE_GOT_BUS | 1157 STATE_GOT_TARGET | 1158 STATE_SENT_CMD | 1159 STATE_XFERRED_DATA; 1160 arqstat->sts_rqpkt_statistics = 0; 1161 1162 if (info <= 0xfffffffful) { 1163 arqstat->sts_sensedata.es_valid = 1; 1164 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; 1165 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT; 1166 arqstat->sts_sensedata.es_key = key; 1167 arqstat->sts_sensedata.es_add_code = add_code; 1168 arqstat->sts_sensedata.es_qual_code = qual_code; 1169 1170 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF; 1171 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF; 1172 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF; 1173 arqstat->sts_sensedata.es_info_4 = info & 0xFF; 1174 } else { /* 64-bit LBA */ 1175 struct scsi_descr_sense_hdr *dsp; 1176 struct scsi_information_sense_descr *isd; 1177 1178 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata; 1179 dsp->ds_class = CLASS_EXTENDED_SENSE; 1180 dsp->ds_code = CODE_FMT_DESCR_CURRENT; 1181 dsp->ds_key = key; 1182 dsp->ds_add_code = add_code; 1183 dsp->ds_qual_code = qual_code; 1184 dsp->ds_addl_sense_length = 1185 sizeof (struct scsi_information_sense_descr); 1186 1187 isd = (struct scsi_information_sense_descr *)(dsp+1); 1188 isd->isd_descr_type = DESCR_INFORMATION; 1189 isd->isd_valid = 1; 1190 isd->isd_information[0] = (info >> 56) & 0xFF; 1191 isd->isd_information[1] = (info >> 48) & 0xFF; 1192 isd->isd_information[2] = (info >> 40) & 0xFF; 1193 isd->isd_information[3] = (info >> 32) & 0xFF; 1194 isd->isd_information[4] = (info >> 24) & 0xFF; 1195 isd->isd_information[5] = (info >> 16) & 0xFF; 1196 isd->isd_information[6] = (info >> 8) & 0xFF; 1197 isd->isd_information[7] = (info) & 0xFF; 1198 } 1199 } 1200 1201 /* 1202 * Setup auto sense data for HARDWARE ERROR 1203 */ 1204 static void 1205 aac_set_arq_data_hwerr(struct aac_cmd *acp) 1206 { 1207 union scsi_cdb *cdbp; 1208 uint64_t err_blkno; 1209 1210 cdbp = (void *)acp->pkt->pkt_cdbp; 1211 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp); 1212 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno); 1213 } 1214 1215 /* 1216 * Send a command to the adapter in New Comm. interface 1217 */ 1218 static int 1219 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp) 1220 { 1221 uint32_t index, device; 1222 1223 index = PCI_MEM_GET32(softs, AAC_IQUE); 1224 if (index == 0xffffffffUL) { 1225 index = PCI_MEM_GET32(softs, AAC_IQUE); 1226 if (index == 0xffffffffUL) 1227 return (AACERR); 1228 } 1229 1230 device = index; 1231 PCI_MEM_PUT32(softs, device, 1232 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful)); 1233 device += 4; 1234 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32)); 1235 device += 4; 1236 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size); 1237 PCI_MEM_PUT32(softs, AAC_IQUE, index); 1238 return (AACOK); 1239 } 1240 1241 static void 1242 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp) 1243 { 1244 struct aac_device *dvp = acp->dvp; 1245 int q = AAC_CMDQ(acp); 1246 1247 if (acp->slotp) { /* outstanding cmd */ 1248 if (!(acp->flags & AAC_CMD_IN_SYNC_SLOT)) { 1249 aac_release_slot(softs, acp->slotp); 1250 acp->slotp = NULL; 1251 } 1252 if (dvp) { 1253 dvp->ncmds[q]--; 1254 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN && 1255 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC) 1256 aac_set_throttle(softs, dvp, q, 1257 softs->total_slots); 1258 /* 1259 * Setup auto sense data for UNIT ATTENTION 1260 * Each lun should generate a unit attention 1261 * condition when reset. 1262 * Phys. drives are treated as logical ones 1263 * during error recovery. 1264 */ 1265 if (dvp->type == AAC_DEV_LD) { 1266 struct aac_container *ctp = 1267 (struct aac_container *)dvp; 1268 if (ctp->reset == 0) 1269 goto noreset; 1270 1271 AACDB_PRINT(softs, CE_NOTE, 1272 "Unit attention: reset"); 1273 ctp->reset = 0; 1274 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 1275 0x29, 0x02, 0); 1276 } 1277 } 1278 noreset: 1279 softs->bus_ncmds[q]--; 1280 aac_cmd_delete(&softs->q_busy, acp); 1281 } else { /* cmd in waiting queue */ 1282 aac_cmd_delete(&softs->q_wait[q], acp); 1283 } 1284 1285 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */ 1286 mutex_enter(&softs->q_comp_mutex); 1287 aac_cmd_enqueue(&softs->q_comp, acp); 1288 mutex_exit(&softs->q_comp_mutex); 1289 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */ 1290 cv_broadcast(&softs->event); 1291 } 1292 } 1293 1294 static void 1295 aac_handle_io(struct aac_softstate *softs, int index) 1296 { 1297 struct aac_slot *slotp; 1298 struct aac_cmd *acp; 1299 uint32_t fast; 1300 1301 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE; 1302 index >>= 2; 1303 1304 /* Make sure firmware reported index is valid */ 1305 ASSERT(index >= 0 && index < softs->total_slots); 1306 slotp = &softs->io_slot[index]; 1307 ASSERT(slotp->index == index); 1308 acp = slotp->acp; 1309 1310 if (acp == NULL || acp->slotp != slotp) { 1311 cmn_err(CE_WARN, 1312 "Firmware error: invalid slot index received from FW"); 1313 return; 1314 } 1315 1316 acp->flags |= AAC_CMD_CMPLT; 1317 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU); 1318 1319 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) { 1320 /* 1321 * For fast response IO, the firmware do not return any FIB 1322 * data, so we need to fill in the FIB status and state so that 1323 * FIB users can handle it correctly. 1324 */ 1325 if (fast) { 1326 uint32_t state; 1327 1328 state = ddi_get32(slotp->fib_acc_handle, 1329 &slotp->fibp->Header.XferState); 1330 /* 1331 * Update state for CPU not for device, no DMA sync 1332 * needed 1333 */ 1334 ddi_put32(slotp->fib_acc_handle, 1335 &slotp->fibp->Header.XferState, 1336 state | AAC_FIBSTATE_DONEADAP); 1337 ddi_put32(slotp->fib_acc_handle, 1338 (void *)&slotp->fibp->data[0], ST_OK); 1339 } 1340 1341 /* Handle completed ac */ 1342 acp->ac_comp(softs, acp); 1343 } else { 1344 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1345 acp->flags |= AAC_CMD_ERR; 1346 if (acp->pkt) { 1347 acp->pkt->pkt_reason = CMD_TRAN_ERR; 1348 acp->pkt->pkt_statistics = 0; 1349 } 1350 } 1351 aac_end_io(softs, acp); 1352 } 1353 1354 /* 1355 * Interrupt handler for New Comm. interface 1356 * New Comm. interface use a different mechanism for interrupt. No explict 1357 * message queues, and driver need only accesses the mapped PCI mem space to 1358 * find the completed FIB or AIF. 1359 */ 1360 static int 1361 aac_process_intr_new(struct aac_softstate *softs) 1362 { 1363 uint32_t index; 1364 1365 index = AAC_OUTB_GET(softs); 1366 if (index == 0xfffffffful) 1367 index = AAC_OUTB_GET(softs); 1368 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1369 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1370 return (0); 1371 } 1372 if (index != 0xfffffffful) { 1373 do { 1374 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) { 1375 aac_handle_io(softs, index); 1376 } else if (index != 0xfffffffeul) { 1377 struct aac_fib *fibp; /* FIB in AIF queue */ 1378 uint16_t fib_size; 1379 1380 /* 1381 * 0xfffffffe means that the controller wants 1382 * more work, ignore it for now. Otherwise, 1383 * AIF received. 1384 */ 1385 index &= ~2; 1386 1387 fibp = (struct aac_fib *)(softs-> \ 1388 pci_mem_base_vaddr + index); 1389 fib_size = PCI_MEM_GET16(softs, index + \ 1390 offsetof(struct aac_fib, Header.Size)); 1391 1392 aac_save_aif(softs, softs->pci_mem_handle, 1393 fibp, fib_size); 1394 1395 /* 1396 * AIF memory is owned by the adapter, so let it 1397 * know that we are done with it. 1398 */ 1399 AAC_OUTB_SET(softs, index); 1400 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1401 } 1402 1403 index = AAC_OUTB_GET(softs); 1404 } while (index != 0xfffffffful); 1405 1406 /* 1407 * Process waiting cmds before start new ones to 1408 * ensure first IOs are serviced first. 1409 */ 1410 aac_start_waiting_io(softs); 1411 return (AAC_DB_COMMAND_READY); 1412 } else { 1413 return (0); 1414 } 1415 } 1416 1417 static uint_t 1418 aac_intr_new(caddr_t arg) 1419 { 1420 struct aac_softstate *softs = (void *)arg; 1421 uint_t rval; 1422 1423 mutex_enter(&softs->io_lock); 1424 if (aac_process_intr_new(softs)) 1425 rval = DDI_INTR_CLAIMED; 1426 else 1427 rval = DDI_INTR_UNCLAIMED; 1428 mutex_exit(&softs->io_lock); 1429 1430 aac_drain_comp_q(softs); 1431 return (rval); 1432 } 1433 1434 /* 1435 * Interrupt handler for old interface 1436 * Explicit message queues are used to send FIB to and get completed FIB from 1437 * the adapter. Driver and adapter maitain the queues in the producer/consumer 1438 * manner. The driver has to query the queues to find the completed FIB. 1439 */ 1440 static int 1441 aac_process_intr_old(struct aac_softstate *softs) 1442 { 1443 uint16_t status; 1444 1445 status = AAC_STATUS_GET(softs); 1446 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 1447 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 1448 return (DDI_INTR_UNCLAIMED); 1449 } 1450 if (status & AAC_DB_RESPONSE_READY) { 1451 int slot_idx; 1452 1453 /* ACK the intr */ 1454 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY); 1455 (void) AAC_STATUS_GET(softs); 1456 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q, 1457 &slot_idx) == AACOK) 1458 aac_handle_io(softs, slot_idx); 1459 1460 /* 1461 * Process waiting cmds before start new ones to 1462 * ensure first IOs are serviced first. 1463 */ 1464 aac_start_waiting_io(softs); 1465 return (AAC_DB_RESPONSE_READY); 1466 } else if (status & AAC_DB_COMMAND_READY) { 1467 int aif_idx; 1468 1469 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY); 1470 (void) AAC_STATUS_GET(softs); 1471 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) == 1472 AACOK) { 1473 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 1474 struct aac_fib *fibp; /* FIB in communication space */ 1475 uint16_t fib_size; 1476 uint32_t fib_xfer_state; 1477 uint32_t addr, size; 1478 1479 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS)); 1480 1481 #define AAC_SYNC_AIF(softs, aif_idx, type) \ 1482 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \ 1483 offsetof(struct aac_comm_space, \ 1484 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \ 1485 (type)); } 1486 1487 /* Copy AIF from adapter to the empty AIF slot */ 1488 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU); 1489 fibp = &softs->comm_space->adapter_fibs[aif_idx]; 1490 fib_size = ddi_get16(acc, &fibp->Header.Size); 1491 1492 aac_save_aif(softs, acc, fibp, fib_size); 1493 1494 /* Complete AIF back to adapter with good status */ 1495 fib_xfer_state = LE_32(fibp->Header.XferState); 1496 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) { 1497 ddi_put32(acc, &fibp->Header.XferState, 1498 fib_xfer_state | AAC_FIBSTATE_DONEHOST); 1499 ddi_put32(acc, (void *)&fibp->data[0], ST_OK); 1500 if (fib_size > AAC_FIB_SIZE) 1501 ddi_put16(acc, &fibp->Header.Size, 1502 AAC_FIB_SIZE); 1503 AAC_SYNC_AIF(softs, aif_idx, 1504 DDI_DMA_SYNC_FORDEV); 1505 } 1506 1507 /* Put the AIF response on the response queue */ 1508 addr = ddi_get32(acc, 1509 &softs->comm_space->adapter_fibs[aif_idx]. \ 1510 Header.SenderFibAddress); 1511 size = (uint32_t)ddi_get16(acc, 1512 &softs->comm_space->adapter_fibs[aif_idx]. \ 1513 Header.Size); 1514 ddi_put32(acc, 1515 &softs->comm_space->adapter_fibs[aif_idx]. \ 1516 Header.ReceiverFibAddress, addr); 1517 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q, 1518 addr, size) == AACERR) 1519 cmn_err(CE_NOTE, "!AIF ack failed"); 1520 } 1521 return (AAC_DB_COMMAND_READY); 1522 } else if (status & AAC_DB_PRINTF_READY) { 1523 /* ACK the intr */ 1524 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY); 1525 (void) AAC_STATUS_GET(softs); 1526 (void) ddi_dma_sync(softs->comm_space_dma_handle, 1527 offsetof(struct aac_comm_space, adapter_print_buf), 1528 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU); 1529 if (aac_check_dma_handle(softs->comm_space_dma_handle) == 1530 DDI_SUCCESS) 1531 cmn_err(CE_NOTE, "MSG From Adapter: %s", 1532 softs->comm_space->adapter_print_buf); 1533 else 1534 ddi_fm_service_impact(softs->devinfo_p, 1535 DDI_SERVICE_UNAFFECTED); 1536 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY); 1537 return (AAC_DB_PRINTF_READY); 1538 } else if (status & AAC_DB_COMMAND_NOT_FULL) { 1539 /* 1540 * Without these two condition statements, the OS could hang 1541 * after a while, especially if there are a lot of AIF's to 1542 * handle, for instance if a drive is pulled from an array 1543 * under heavy load. 1544 */ 1545 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1546 return (AAC_DB_COMMAND_NOT_FULL); 1547 } else if (status & AAC_DB_RESPONSE_NOT_FULL) { 1548 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL); 1549 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL); 1550 return (AAC_DB_RESPONSE_NOT_FULL); 1551 } else { 1552 return (0); 1553 } 1554 } 1555 1556 static uint_t 1557 aac_intr_old(caddr_t arg) 1558 { 1559 struct aac_softstate *softs = (void *)arg; 1560 int rval; 1561 1562 mutex_enter(&softs->io_lock); 1563 if (aac_process_intr_old(softs)) 1564 rval = DDI_INTR_CLAIMED; 1565 else 1566 rval = DDI_INTR_UNCLAIMED; 1567 mutex_exit(&softs->io_lock); 1568 1569 aac_drain_comp_q(softs); 1570 return (rval); 1571 } 1572 1573 /* 1574 * Query FIXED or MSI interrupts 1575 */ 1576 static int 1577 aac_query_intrs(struct aac_softstate *softs, int intr_type) 1578 { 1579 dev_info_t *dip = softs->devinfo_p; 1580 int avail, actual, count; 1581 int i, flag, ret; 1582 1583 AACDB_PRINT(softs, CE_NOTE, 1584 "aac_query_intrs:interrupt type 0x%x", intr_type); 1585 1586 /* Get number of interrupts */ 1587 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 1588 if ((ret != DDI_SUCCESS) || (count == 0)) { 1589 AACDB_PRINT(softs, CE_WARN, 1590 "ddi_intr_get_nintrs() failed, ret %d count %d", 1591 ret, count); 1592 return (DDI_FAILURE); 1593 } 1594 1595 /* Get number of available interrupts */ 1596 ret = ddi_intr_get_navail(dip, intr_type, &avail); 1597 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1598 AACDB_PRINT(softs, CE_WARN, 1599 "ddi_intr_get_navail() failed, ret %d avail %d", 1600 ret, avail); 1601 return (DDI_FAILURE); 1602 } 1603 1604 AACDB_PRINT(softs, CE_NOTE, 1605 "ddi_intr_get_nvail returned %d, navail() returned %d", 1606 count, avail); 1607 1608 /* Allocate an array of interrupt handles */ 1609 softs->intr_size = count * sizeof (ddi_intr_handle_t); 1610 softs->htable = kmem_alloc(softs->intr_size, KM_SLEEP); 1611 1612 if (intr_type == DDI_INTR_TYPE_MSI) { 1613 count = 1; /* only one vector needed by now */ 1614 flag = DDI_INTR_ALLOC_STRICT; 1615 } else { /* must be DDI_INTR_TYPE_FIXED */ 1616 flag = DDI_INTR_ALLOC_NORMAL; 1617 } 1618 1619 /* Call ddi_intr_alloc() */ 1620 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0, 1621 count, &actual, flag); 1622 1623 if ((ret != DDI_SUCCESS) || (actual == 0)) { 1624 AACDB_PRINT(softs, CE_WARN, 1625 "ddi_intr_alloc() failed, ret = %d", ret); 1626 actual = 0; 1627 goto error; 1628 } 1629 1630 if (actual < count) { 1631 AACDB_PRINT(softs, CE_NOTE, 1632 "Requested: %d, Received: %d", count, actual); 1633 goto error; 1634 } 1635 1636 softs->intr_cnt = actual; 1637 1638 /* Get priority for first msi, assume remaining are all the same */ 1639 if ((ret = ddi_intr_get_pri(softs->htable[0], 1640 &softs->intr_pri)) != DDI_SUCCESS) { 1641 AACDB_PRINT(softs, CE_WARN, 1642 "ddi_intr_get_pri() failed, ret = %d", ret); 1643 goto error; 1644 } 1645 1646 /* Test for high level mutex */ 1647 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) { 1648 AACDB_PRINT(softs, CE_WARN, 1649 "aac_query_intrs: Hi level interrupt not supported"); 1650 goto error; 1651 } 1652 1653 return (DDI_SUCCESS); 1654 1655 error: 1656 /* Free already allocated intr */ 1657 for (i = 0; i < actual; i++) 1658 (void) ddi_intr_free(softs->htable[i]); 1659 1660 kmem_free(softs->htable, softs->intr_size); 1661 return (DDI_FAILURE); 1662 } 1663 1664 1665 /* 1666 * Register FIXED or MSI interrupts, and enable them 1667 */ 1668 static int 1669 aac_add_intrs(struct aac_softstate *softs) 1670 { 1671 int i, ret; 1672 int actual; 1673 ddi_intr_handler_t *aac_intr; 1674 1675 actual = softs->intr_cnt; 1676 aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ? 1677 aac_intr_new : aac_intr_old); 1678 1679 /* Call ddi_intr_add_handler() */ 1680 for (i = 0; i < actual; i++) { 1681 if ((ret = ddi_intr_add_handler(softs->htable[i], 1682 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) { 1683 cmn_err(CE_WARN, 1684 "ddi_intr_add_handler() failed ret = %d", ret); 1685 1686 /* Free already allocated intr */ 1687 for (i = 0; i < actual; i++) 1688 (void) ddi_intr_free(softs->htable[i]); 1689 1690 kmem_free(softs->htable, softs->intr_size); 1691 return (DDI_FAILURE); 1692 } 1693 } 1694 1695 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap)) 1696 != DDI_SUCCESS) { 1697 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret); 1698 1699 /* Free already allocated intr */ 1700 for (i = 0; i < actual; i++) 1701 (void) ddi_intr_free(softs->htable[i]); 1702 1703 kmem_free(softs->htable, softs->intr_size); 1704 return (DDI_FAILURE); 1705 } 1706 1707 return (DDI_SUCCESS); 1708 } 1709 1710 /* 1711 * Unregister FIXED or MSI interrupts 1712 */ 1713 static void 1714 aac_remove_intrs(struct aac_softstate *softs) 1715 { 1716 int i; 1717 1718 /* Disable all interrupts */ 1719 (void) aac_disable_intrs(softs); 1720 /* Call ddi_intr_remove_handler() */ 1721 for (i = 0; i < softs->intr_cnt; i++) { 1722 (void) ddi_intr_remove_handler(softs->htable[i]); 1723 (void) ddi_intr_free(softs->htable[i]); 1724 } 1725 1726 kmem_free(softs->htable, softs->intr_size); 1727 } 1728 1729 static int 1730 aac_enable_intrs(struct aac_softstate *softs) 1731 { 1732 int rval = AACOK; 1733 1734 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1735 /* for MSI block enable */ 1736 if (ddi_intr_block_enable(softs->htable, softs->intr_cnt) != 1737 DDI_SUCCESS) 1738 rval = AACERR; 1739 } else { 1740 int i; 1741 1742 /* Call ddi_intr_enable() for legacy/MSI non block enable */ 1743 for (i = 0; i < softs->intr_cnt; i++) { 1744 if (ddi_intr_enable(softs->htable[i]) != DDI_SUCCESS) 1745 rval = AACERR; 1746 } 1747 } 1748 return (rval); 1749 } 1750 1751 static int 1752 aac_disable_intrs(struct aac_softstate *softs) 1753 { 1754 int rval = AACOK; 1755 1756 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) { 1757 /* Call ddi_intr_block_disable() */ 1758 if (ddi_intr_block_disable(softs->htable, softs->intr_cnt) != 1759 DDI_SUCCESS) 1760 rval = AACERR; 1761 } else { 1762 int i; 1763 1764 for (i = 0; i < softs->intr_cnt; i++) { 1765 if (ddi_intr_disable(softs->htable[i]) != DDI_SUCCESS) 1766 rval = AACERR; 1767 } 1768 } 1769 return (rval); 1770 } 1771 1772 /* 1773 * Set pkt_reason and OR in pkt_statistics flag 1774 */ 1775 static void 1776 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp, 1777 uchar_t reason, uint_t stat) 1778 { 1779 #ifndef __lock_lint 1780 _NOTE(ARGUNUSED(softs)) 1781 #endif 1782 if (acp->pkt->pkt_reason == CMD_CMPLT) 1783 acp->pkt->pkt_reason = reason; 1784 acp->pkt->pkt_statistics |= stat; 1785 } 1786 1787 /* 1788 * Handle a finished pkt of soft SCMD 1789 */ 1790 static void 1791 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp) 1792 { 1793 ASSERT(acp->pkt); 1794 1795 acp->flags |= AAC_CMD_CMPLT; 1796 1797 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \ 1798 STATE_SENT_CMD | STATE_GOT_STATUS; 1799 if (acp->pkt->pkt_state & STATE_XFERRED_DATA) 1800 acp->pkt->pkt_resid = 0; 1801 1802 /* AAC_CMD_NO_INTR means no complete callback */ 1803 if (!(acp->flags & AAC_CMD_NO_INTR)) { 1804 mutex_enter(&softs->q_comp_mutex); 1805 aac_cmd_enqueue(&softs->q_comp, acp); 1806 mutex_exit(&softs->q_comp_mutex); 1807 ddi_trigger_softintr(softs->softint_id); 1808 } 1809 } 1810 1811 /* 1812 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old() 1813 */ 1814 1815 /* 1816 * Handle completed logical device IO command 1817 */ 1818 /*ARGSUSED*/ 1819 static void 1820 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1821 { 1822 struct aac_slot *slotp = acp->slotp; 1823 struct aac_blockread_response *resp; 1824 uint32_t status; 1825 1826 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1827 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1828 1829 acp->pkt->pkt_state |= STATE_GOT_STATUS; 1830 1831 /* 1832 * block_read/write has a similar response header, use blockread 1833 * response for both. 1834 */ 1835 resp = (struct aac_blockread_response *)&slotp->fibp->data[0]; 1836 status = ddi_get32(slotp->fib_acc_handle, &resp->Status); 1837 if (status == ST_OK) { 1838 acp->pkt->pkt_resid = 0; 1839 acp->pkt->pkt_state |= STATE_XFERRED_DATA; 1840 } else { 1841 aac_set_arq_data_hwerr(acp); 1842 } 1843 } 1844 1845 /* 1846 * Handle completed phys. device IO command 1847 */ 1848 static void 1849 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp) 1850 { 1851 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 1852 struct aac_fib *fibp = acp->slotp->fibp; 1853 struct scsi_pkt *pkt = acp->pkt; 1854 struct aac_srb_reply *resp; 1855 uint32_t resp_status; 1856 1857 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 1858 ASSERT(!(acp->flags & AAC_CMD_NO_CB)); 1859 1860 resp = (struct aac_srb_reply *)&fibp->data[0]; 1861 resp_status = ddi_get32(acc, &resp->status); 1862 1863 /* First check FIB status */ 1864 if (resp_status == ST_OK) { 1865 uint32_t scsi_status; 1866 uint32_t srb_status; 1867 uint32_t data_xfer_length; 1868 1869 scsi_status = ddi_get32(acc, &resp->scsi_status); 1870 srb_status = ddi_get32(acc, &resp->srb_status); 1871 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length); 1872 1873 *pkt->pkt_scbp = (uint8_t)scsi_status; 1874 pkt->pkt_state |= STATE_GOT_STATUS; 1875 if (scsi_status == STATUS_GOOD) { 1876 uchar_t cmd = ((union scsi_cdb *)(void *) 1877 (pkt->pkt_cdbp))->scc_cmd; 1878 1879 /* Next check SRB status */ 1880 switch (srb_status & 0x3f) { 1881 case SRB_STATUS_DATA_OVERRUN: 1882 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \ 1883 "scmd=%d, xfer=%d, buflen=%d", 1884 (uint32_t)cmd, data_xfer_length, 1885 acp->bcount); 1886 1887 switch (cmd) { 1888 case SCMD_READ: 1889 case SCMD_WRITE: 1890 case SCMD_READ_G1: 1891 case SCMD_WRITE_G1: 1892 case SCMD_READ_G4: 1893 case SCMD_WRITE_G4: 1894 case SCMD_READ_G5: 1895 case SCMD_WRITE_G5: 1896 aac_set_pkt_reason(softs, acp, 1897 CMD_DATA_OVR, 0); 1898 break; 1899 } 1900 /*FALLTHRU*/ 1901 case SRB_STATUS_ERROR_RECOVERY: 1902 case SRB_STATUS_PENDING: 1903 case SRB_STATUS_SUCCESS: 1904 /* 1905 * pkt_resid should only be calculated if the 1906 * status is ERROR_RECOVERY/PENDING/SUCCESS/ 1907 * OVERRUN/UNDERRUN 1908 */ 1909 if (data_xfer_length) { 1910 pkt->pkt_state |= STATE_XFERRED_DATA; 1911 pkt->pkt_resid = acp->bcount - \ 1912 data_xfer_length; 1913 ASSERT(pkt->pkt_resid >= 0); 1914 } 1915 break; 1916 case SRB_STATUS_ABORTED: 1917 AACDB_PRINT(softs, CE_NOTE, 1918 "SRB_STATUS_ABORTED, xfer=%d, resid=%d", 1919 data_xfer_length, pkt->pkt_resid); 1920 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 1921 STAT_ABORTED); 1922 break; 1923 case SRB_STATUS_ABORT_FAILED: 1924 AACDB_PRINT(softs, CE_NOTE, 1925 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \ 1926 "resid=%d", data_xfer_length, 1927 pkt->pkt_resid); 1928 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL, 1929 0); 1930 break; 1931 case SRB_STATUS_PARITY_ERROR: 1932 AACDB_PRINT(softs, CE_NOTE, 1933 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \ 1934 "resid=%d", data_xfer_length, 1935 pkt->pkt_resid); 1936 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0); 1937 break; 1938 case SRB_STATUS_NO_DEVICE: 1939 case SRB_STATUS_INVALID_PATH_ID: 1940 case SRB_STATUS_INVALID_TARGET_ID: 1941 case SRB_STATUS_INVALID_LUN: 1942 case SRB_STATUS_SELECTION_TIMEOUT: 1943 #ifdef DEBUG 1944 if (AAC_DEV_IS_VALID(acp->dvp)) { 1945 AACDB_PRINT(softs, CE_NOTE, 1946 "SRB_STATUS_NO_DEVICE(%d), " \ 1947 "xfer=%d, resid=%d ", 1948 srb_status & 0x3f, 1949 data_xfer_length, pkt->pkt_resid); 1950 } 1951 #endif 1952 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0); 1953 break; 1954 case SRB_STATUS_COMMAND_TIMEOUT: 1955 case SRB_STATUS_TIMEOUT: 1956 AACDB_PRINT(softs, CE_NOTE, 1957 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \ 1958 "resid=%d", data_xfer_length, 1959 pkt->pkt_resid); 1960 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 1961 STAT_TIMEOUT); 1962 break; 1963 case SRB_STATUS_BUS_RESET: 1964 AACDB_PRINT(softs, CE_NOTE, 1965 "SRB_STATUS_BUS_RESET, xfer=%d, " \ 1966 "resid=%d", data_xfer_length, 1967 pkt->pkt_resid); 1968 aac_set_pkt_reason(softs, acp, CMD_RESET, 1969 STAT_BUS_RESET); 1970 break; 1971 default: 1972 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \ 1973 "xfer=%d, resid=%d", srb_status & 0x3f, 1974 data_xfer_length, pkt->pkt_resid); 1975 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 1976 break; 1977 } 1978 } else if (scsi_status == STATUS_CHECK) { 1979 /* CHECK CONDITION */ 1980 struct scsi_arq_status *arqstat = 1981 (void *)(pkt->pkt_scbp); 1982 uint32_t sense_data_size; 1983 1984 pkt->pkt_state |= STATE_ARQ_DONE; 1985 1986 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; 1987 arqstat->sts_rqpkt_reason = CMD_CMPLT; 1988 arqstat->sts_rqpkt_resid = 0; 1989 arqstat->sts_rqpkt_state = 1990 STATE_GOT_BUS | 1991 STATE_GOT_TARGET | 1992 STATE_SENT_CMD | 1993 STATE_XFERRED_DATA; 1994 arqstat->sts_rqpkt_statistics = 0; 1995 1996 sense_data_size = ddi_get32(acc, 1997 &resp->sense_data_size); 1998 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE); 1999 AACDB_PRINT(softs, CE_NOTE, 2000 "CHECK CONDITION: sense len=%d, xfer len=%d", 2001 sense_data_size, data_xfer_length); 2002 2003 if (sense_data_size > SENSE_LENGTH) 2004 sense_data_size = SENSE_LENGTH; 2005 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata, 2006 (uint8_t *)resp->sense_data, sense_data_size, 2007 DDI_DEV_AUTOINCR); 2008 } else { 2009 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \ 2010 "scsi_status=%d, srb_status=%d", 2011 scsi_status, srb_status); 2012 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 2013 } 2014 } else { 2015 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d", 2016 resp_status); 2017 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0); 2018 } 2019 } 2020 2021 /* 2022 * Handle completed IOCTL command 2023 */ 2024 /*ARGSUSED*/ 2025 void 2026 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2027 { 2028 struct aac_slot *slotp = acp->slotp; 2029 2030 /* 2031 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb() 2032 * may wait on softs->event, so use cv_broadcast() instead 2033 * of cv_signal(). 2034 */ 2035 ASSERT(acp->flags & AAC_CMD_SYNC); 2036 ASSERT(acp->flags & AAC_CMD_NO_CB); 2037 2038 /* Get the size of the response FIB from its FIB.Header.Size field */ 2039 acp->fib_size = ddi_get16(slotp->fib_acc_handle, 2040 &slotp->fibp->Header.Size); 2041 2042 ASSERT(acp->fib_size <= softs->aac_max_fib_size); 2043 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp, 2044 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR); 2045 } 2046 2047 /* 2048 * Handle completed sync fib command 2049 */ 2050 /*ARGSUSED*/ 2051 void 2052 aac_sync_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2053 { 2054 } 2055 2056 /* 2057 * Handle completed Flush command 2058 */ 2059 /*ARGSUSED*/ 2060 static void 2061 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2062 { 2063 struct aac_slot *slotp = acp->slotp; 2064 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2065 struct aac_synchronize_reply *resp; 2066 uint32_t status; 2067 2068 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2069 2070 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2071 2072 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0]; 2073 status = ddi_get32(acc, &resp->Status); 2074 if (status != CT_OK) 2075 aac_set_arq_data_hwerr(acp); 2076 } 2077 2078 /*ARGSUSED*/ 2079 static void 2080 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp) 2081 { 2082 struct aac_slot *slotp = acp->slotp; 2083 ddi_acc_handle_t acc = slotp->fib_acc_handle; 2084 struct aac_Container_resp *resp; 2085 uint32_t status; 2086 2087 ASSERT(!(acp->flags & AAC_CMD_SYNC)); 2088 2089 acp->pkt->pkt_state |= STATE_GOT_STATUS; 2090 2091 resp = (struct aac_Container_resp *)&slotp->fibp->data[0]; 2092 status = ddi_get32(acc, &resp->Status); 2093 if (status != 0) { 2094 AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit"); 2095 aac_set_arq_data_hwerr(acp); 2096 } 2097 } 2098 2099 /* 2100 * Access PCI space to see if the driver can support the card 2101 */ 2102 static int 2103 aac_check_card_type(struct aac_softstate *softs) 2104 { 2105 ddi_acc_handle_t pci_config_handle; 2106 int card_index; 2107 uint32_t pci_cmd; 2108 2109 /* Map pci configuration space */ 2110 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) != 2111 DDI_SUCCESS) { 2112 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space"); 2113 return (AACERR); 2114 } 2115 2116 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID); 2117 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID); 2118 softs->subvendid = pci_config_get16(pci_config_handle, 2119 PCI_CONF_SUBVENID); 2120 softs->subsysid = pci_config_get16(pci_config_handle, 2121 PCI_CONF_SUBSYSID); 2122 2123 card_index = 0; 2124 while (!CARD_IS_UNKNOWN(card_index)) { 2125 if ((aac_cards[card_index].vendor == softs->vendid) && 2126 (aac_cards[card_index].device == softs->devid) && 2127 (aac_cards[card_index].subvendor == softs->subvendid) && 2128 (aac_cards[card_index].subsys == softs->subsysid)) { 2129 break; 2130 } 2131 card_index++; 2132 } 2133 2134 softs->card = card_index; 2135 softs->hwif = aac_cards[card_index].hwif; 2136 2137 /* 2138 * Unknown aac card 2139 * do a generic match based on the VendorID and DeviceID to 2140 * support the new cards in the aac family 2141 */ 2142 if (CARD_IS_UNKNOWN(card_index)) { 2143 if (softs->vendid != 0x9005) { 2144 AACDB_PRINT(softs, CE_WARN, 2145 "Unknown vendor 0x%x", softs->vendid); 2146 goto error; 2147 } 2148 switch (softs->devid) { 2149 case 0x285: 2150 softs->hwif = AAC_HWIF_I960RX; 2151 break; 2152 case 0x286: 2153 softs->hwif = AAC_HWIF_RKT; 2154 break; 2155 default: 2156 AACDB_PRINT(softs, CE_WARN, 2157 "Unknown device \"pci9005,%x\"", softs->devid); 2158 goto error; 2159 } 2160 } 2161 2162 /* Set hardware dependent interface */ 2163 switch (softs->hwif) { 2164 case AAC_HWIF_I960RX: 2165 softs->aac_if = aac_rx_interface; 2166 softs->map_size_min = AAC_MAP_SIZE_MIN_RX; 2167 break; 2168 case AAC_HWIF_RKT: 2169 softs->aac_if = aac_rkt_interface; 2170 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT; 2171 break; 2172 default: 2173 AACDB_PRINT(softs, CE_WARN, 2174 "Unknown hardware interface %d", softs->hwif); 2175 goto error; 2176 } 2177 2178 /* Set card names */ 2179 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid, 2180 AAC_VENDOR_LEN); 2181 (void *)strncpy(softs->product_name, aac_cards[card_index].desc, 2182 AAC_PRODUCT_LEN); 2183 2184 /* Set up quirks */ 2185 softs->flags = aac_cards[card_index].quirks; 2186 2187 /* Force the busmaster enable bit on */ 2188 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2189 if ((pci_cmd & PCI_COMM_ME) == 0) { 2190 pci_cmd |= PCI_COMM_ME; 2191 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd); 2192 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM); 2193 if ((pci_cmd & PCI_COMM_ME) == 0) { 2194 cmn_err(CE_CONT, "?Cannot enable busmaster bit"); 2195 goto error; 2196 } 2197 } 2198 2199 /* Set memory base to map */ 2200 softs->pci_mem_base_paddr = 0xfffffff0UL & \ 2201 pci_config_get32(pci_config_handle, PCI_CONF_BASE0); 2202 2203 pci_config_teardown(&pci_config_handle); 2204 2205 return (AACOK); /* card type detected */ 2206 error: 2207 pci_config_teardown(&pci_config_handle); 2208 return (AACERR); /* no matched card found */ 2209 } 2210 2211 /* 2212 * Do the usual interrupt handler setup stuff. 2213 */ 2214 static int 2215 aac_register_intrs(struct aac_softstate *softs) 2216 { 2217 dev_info_t *dip; 2218 int intr_types; 2219 2220 ASSERT(softs->devinfo_p); 2221 dip = softs->devinfo_p; 2222 2223 /* Get the type of device intrrupts */ 2224 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) { 2225 AACDB_PRINT(softs, CE_WARN, 2226 "ddi_intr_get_supported_types() failed"); 2227 return (AACERR); 2228 } 2229 AACDB_PRINT(softs, CE_NOTE, 2230 "ddi_intr_get_supported_types() ret: 0x%x", intr_types); 2231 2232 /* Query interrupt, and alloc/init all needed struct */ 2233 if (intr_types & DDI_INTR_TYPE_MSI) { 2234 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI) 2235 != DDI_SUCCESS) { 2236 AACDB_PRINT(softs, CE_WARN, 2237 "MSI interrupt query failed"); 2238 return (AACERR); 2239 } 2240 softs->intr_type = DDI_INTR_TYPE_MSI; 2241 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 2242 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED) 2243 != DDI_SUCCESS) { 2244 AACDB_PRINT(softs, CE_WARN, 2245 "FIXED interrupt query failed"); 2246 return (AACERR); 2247 } 2248 softs->intr_type = DDI_INTR_TYPE_FIXED; 2249 } else { 2250 AACDB_PRINT(softs, CE_WARN, 2251 "Device cannot suppport both FIXED and MSI interrupts"); 2252 return (AACERR); 2253 } 2254 2255 /* Connect interrupt handlers */ 2256 if (aac_add_intrs(softs) != DDI_SUCCESS) { 2257 AACDB_PRINT(softs, CE_WARN, 2258 "Interrupt registration failed, intr type: %s", 2259 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED"); 2260 return (AACERR); 2261 } 2262 (void) aac_enable_intrs(softs); 2263 2264 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id, 2265 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) { 2266 AACDB_PRINT(softs, CE_WARN, 2267 "Can not setup soft interrupt handler!"); 2268 aac_remove_intrs(softs); 2269 return (AACERR); 2270 } 2271 2272 return (AACOK); 2273 } 2274 2275 static void 2276 aac_unregister_intrs(struct aac_softstate *softs) 2277 { 2278 aac_remove_intrs(softs); 2279 ddi_remove_softintr(softs->softint_id); 2280 } 2281 2282 /* 2283 * Check the firmware to determine the features to support and the FIB 2284 * parameters to use. 2285 */ 2286 static int 2287 aac_check_firmware(struct aac_softstate *softs) 2288 { 2289 uint32_t options; 2290 uint32_t atu_size; 2291 ddi_acc_handle_t pci_handle; 2292 uint8_t *data; 2293 uint32_t max_fibs; 2294 uint32_t max_fib_size; 2295 uint32_t sg_tablesize; 2296 uint32_t max_sectors; 2297 uint32_t status; 2298 2299 /* Get supported options */ 2300 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0, 2301 &status)) != AACOK) { 2302 if (status != SRB_STATUS_INVALID_REQUEST) { 2303 cmn_err(CE_CONT, 2304 "?Fatal error: request adapter info error"); 2305 return (AACERR); 2306 } 2307 options = 0; 2308 atu_size = 0; 2309 } else { 2310 options = AAC_MAILBOX_GET(softs, 1); 2311 atu_size = AAC_MAILBOX_GET(softs, 2); 2312 } 2313 2314 if (softs->state & AAC_STATE_RESET) { 2315 if ((softs->support_opt == options) && 2316 (softs->atu_size == atu_size)) 2317 return (AACOK); 2318 2319 cmn_err(CE_WARN, 2320 "?Fatal error: firmware changed, system needs reboot"); 2321 return (AACERR); 2322 } 2323 2324 /* 2325 * The following critical settings are initialized only once during 2326 * driver attachment. 2327 */ 2328 softs->support_opt = options; 2329 softs->atu_size = atu_size; 2330 2331 /* Process supported options */ 2332 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 2333 (softs->flags & AAC_FLAGS_NO4GB) == 0) { 2334 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window"); 2335 softs->flags |= AAC_FLAGS_4GB_WINDOW; 2336 } else { 2337 /* 2338 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space 2339 * only. IO is handled by the DMA engine which does not suffer 2340 * from the ATU window programming workarounds necessary for 2341 * CPU copy operations. 2342 */ 2343 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull; 2344 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull; 2345 } 2346 2347 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) { 2348 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address"); 2349 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 2350 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull; 2351 softs->flags |= AAC_FLAGS_SG_64BIT; 2352 } 2353 2354 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) { 2355 softs->flags |= AAC_FLAGS_ARRAY_64BIT; 2356 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size"); 2357 } 2358 2359 if (options & AAC_SUPPORTED_NONDASD) { 2360 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0, 2361 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) { 2362 if (strcmp((char *)data, "yes") == 0) { 2363 AACDB_PRINT(softs, CE_NOTE, 2364 "!Enable Non-DASD access"); 2365 softs->flags |= AAC_FLAGS_NONDASD; 2366 } 2367 ddi_prop_free(data); 2368 } 2369 } 2370 2371 /* Read preferred settings */ 2372 max_fib_size = 0; 2373 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF, 2374 0, 0, 0, 0, NULL)) == AACOK) { 2375 options = AAC_MAILBOX_GET(softs, 1); 2376 max_fib_size = (options & 0xffff); 2377 max_sectors = (options >> 16) << 1; 2378 options = AAC_MAILBOX_GET(softs, 2); 2379 sg_tablesize = (options >> 16); 2380 options = AAC_MAILBOX_GET(softs, 3); 2381 max_fibs = (options & 0xffff); 2382 } 2383 2384 /* Enable new comm. and rawio at the same time */ 2385 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) && 2386 (max_fib_size != 0)) { 2387 /* read out and save PCI MBR */ 2388 if ((atu_size > softs->map_size) && 2389 (ddi_regs_map_setup(softs->devinfo_p, 1, 2390 (caddr_t *)&data, 0, atu_size, &softs->reg_attr, 2391 &pci_handle) == DDI_SUCCESS)) { 2392 ddi_regs_map_free(&softs->pci_mem_handle); 2393 softs->pci_mem_handle = pci_handle; 2394 softs->pci_mem_base_vaddr = data; 2395 softs->map_size = atu_size; 2396 } 2397 if (atu_size == softs->map_size) { 2398 softs->flags |= AAC_FLAGS_NEW_COMM; 2399 AACDB_PRINT(softs, CE_NOTE, 2400 "!Enable New Comm. interface"); 2401 } 2402 } 2403 2404 /* Set FIB parameters */ 2405 if (softs->flags & AAC_FLAGS_NEW_COMM) { 2406 softs->aac_max_fibs = max_fibs; 2407 softs->aac_max_fib_size = max_fib_size; 2408 softs->aac_max_sectors = max_sectors; 2409 softs->aac_sg_tablesize = sg_tablesize; 2410 2411 softs->flags |= AAC_FLAGS_RAW_IO; 2412 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO"); 2413 } else { 2414 softs->aac_max_fibs = 2415 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512; 2416 softs->aac_max_fib_size = AAC_FIB_SIZE; 2417 softs->aac_max_sectors = 128; /* 64K */ 2418 if (softs->flags & AAC_FLAGS_17SG) 2419 softs->aac_sg_tablesize = 17; 2420 else if (softs->flags & AAC_FLAGS_34SG) 2421 softs->aac_sg_tablesize = 34; 2422 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2423 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2424 sizeof (struct aac_blockwrite64) + 2425 sizeof (struct aac_sg_entry64)) / 2426 sizeof (struct aac_sg_entry64); 2427 else 2428 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE - 2429 sizeof (struct aac_blockwrite) + 2430 sizeof (struct aac_sg_entry)) / 2431 sizeof (struct aac_sg_entry); 2432 } 2433 2434 if ((softs->flags & AAC_FLAGS_RAW_IO) && 2435 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) { 2436 softs->flags |= AAC_FLAGS_LBA_64BIT; 2437 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array"); 2438 } 2439 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize; 2440 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9; 2441 /* 2442 * 64K maximum segment size in scatter gather list is controlled by 2443 * the NEW_COMM bit in the adapter information. If not set, the card 2444 * can only accept a maximum of 64K. It is not recommended to permit 2445 * more than 128KB of total transfer size to the adapters because 2446 * performance is negatively impacted. 2447 * 2448 * For new comm, segment size equals max xfer size. For old comm, 2449 * we use 64K for both. 2450 */ 2451 softs->buf_dma_attr.dma_attr_count_max = 2452 softs->buf_dma_attr.dma_attr_maxxfer - 1; 2453 2454 /* Setup FIB operations */ 2455 if (softs->flags & AAC_FLAGS_RAW_IO) 2456 softs->aac_cmd_fib = aac_cmd_fib_rawio; 2457 else if (softs->flags & AAC_FLAGS_SG_64BIT) 2458 softs->aac_cmd_fib = aac_cmd_fib_brw64; 2459 else 2460 softs->aac_cmd_fib = aac_cmd_fib_brw; 2461 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \ 2462 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32; 2463 2464 /* 64-bit LBA needs descriptor format sense data */ 2465 softs->slen = sizeof (struct scsi_arq_status); 2466 if ((softs->flags & AAC_FLAGS_LBA_64BIT) && 2467 softs->slen < AAC_ARQ64_LENGTH) 2468 softs->slen = AAC_ARQ64_LENGTH; 2469 2470 AACDB_PRINT(softs, CE_NOTE, 2471 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d", 2472 softs->aac_max_fibs, softs->aac_max_fib_size, 2473 softs->aac_max_sectors, softs->aac_sg_tablesize); 2474 2475 return (AACOK); 2476 } 2477 2478 static void 2479 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0, 2480 struct FsaRev *fsarev1) 2481 { 2482 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 2483 2484 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash); 2485 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type); 2486 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor); 2487 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major); 2488 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber); 2489 } 2490 2491 /* 2492 * The following function comes from Adaptec: 2493 * 2494 * Query adapter information and supplement adapter information 2495 */ 2496 static int 2497 aac_get_adapter_info(struct aac_softstate *softs, 2498 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr) 2499 { 2500 struct aac_cmd *acp = &softs->sync_ac; 2501 ddi_acc_handle_t acc; 2502 struct aac_fib *fibp; 2503 struct aac_adapter_info *ainfp; 2504 struct aac_supplement_adapter_info *sinfp; 2505 int rval; 2506 2507 (void) aac_sync_fib_slot_bind(softs, acp); 2508 acc = acp->slotp->fib_acc_handle; 2509 fibp = acp->slotp->fibp; 2510 2511 ddi_put8(acc, &fibp->data[0], 0); 2512 if (aac_sync_fib(softs, RequestAdapterInfo, 2513 AAC_FIB_SIZEOF(struct aac_adapter_info)) != AACOK) { 2514 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed"); 2515 rval = AACERR; 2516 goto finish; 2517 } 2518 ainfp = (struct aac_adapter_info *)fibp->data; 2519 if (ainfr) { 2520 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2521 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase); 2522 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture); 2523 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant); 2524 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed); 2525 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem); 2526 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem); 2527 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem); 2528 aac_fsa_rev(softs, &ainfp->KernelRevision, 2529 &ainfr->KernelRevision); 2530 aac_fsa_rev(softs, &ainfp->MonitorRevision, 2531 &ainfr->MonitorRevision); 2532 aac_fsa_rev(softs, &ainfp->HardwareRevision, 2533 &ainfr->HardwareRevision); 2534 aac_fsa_rev(softs, &ainfp->BIOSRevision, 2535 &ainfr->BIOSRevision); 2536 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled); 2537 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask); 2538 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber); 2539 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform); 2540 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions); 2541 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant); 2542 } 2543 if (sinfr) { 2544 if (!(softs->support_opt & 2545 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) { 2546 AACDB_PRINT(softs, CE_WARN, 2547 "SupplementAdapterInfo not supported"); 2548 rval = AACERR; 2549 goto finish; 2550 } 2551 ddi_put8(acc, &fibp->data[0], 0); 2552 if (aac_sync_fib(softs, RequestSupplementAdapterInfo, 2553 AAC_FIB_SIZEOF(struct aac_supplement_adapter_info)) 2554 != AACOK) { 2555 AACDB_PRINT(softs, CE_WARN, 2556 "RequestSupplementAdapterInfo failed"); 2557 rval = AACERR; 2558 goto finish; 2559 } 2560 sinfp = (struct aac_supplement_adapter_info *)fibp->data; 2561 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1); 2562 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2); 2563 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize); 2564 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId); 2565 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts); 2566 AAC_GET_FIELD32(acc, sinfr, sinfp, Version); 2567 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits); 2568 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber); 2569 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3); 2570 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12); 2571 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts); 2572 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo, 2573 sizeof (struct vpd_info)); 2574 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision, 2575 &sinfr->FlashFirmwareRevision); 2576 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions); 2577 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision, 2578 &sinfr->FlashFirmwareBootRevision); 2579 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo, 2580 MFG_PCBA_SERIAL_NUMBER_WIDTH); 2581 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0], 2582 MFG_WWN_WIDTH); 2583 AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2); 2584 AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag); 2585 if (sinfr->ExpansionFlag == 1) { 2586 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3); 2587 AAC_GET_FIELD32(acc, sinfr, sinfp, 2588 SupportedPerformanceMode); 2589 AAC_REP_GET_FIELD32(acc, sinfr, sinfp, 2590 ReservedGrowth[0], 80); 2591 } 2592 } 2593 rval = AACOK; 2594 finish: 2595 aac_sync_fib_slot_release(softs, acp); 2596 return (rval); 2597 } 2598 2599 static int 2600 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max, 2601 uint32_t *tgt_max) 2602 { 2603 struct aac_cmd *acp = &softs->sync_ac; 2604 ddi_acc_handle_t acc; 2605 struct aac_fib *fibp; 2606 struct aac_ctcfg *c_cmd; 2607 struct aac_ctcfg_resp *c_resp; 2608 uint32_t scsi_method_id; 2609 struct aac_bus_info *cmd; 2610 struct aac_bus_info_response *resp; 2611 int rval; 2612 2613 (void) aac_sync_fib_slot_bind(softs, acp); 2614 acc = acp->slotp->fib_acc_handle; 2615 fibp = acp->slotp->fibp; 2616 2617 /* Detect MethodId */ 2618 c_cmd = (struct aac_ctcfg *)&fibp->data[0]; 2619 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig); 2620 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD); 2621 ddi_put32(acc, &c_cmd->param, 0); 2622 rval = aac_sync_fib(softs, ContainerCommand, 2623 AAC_FIB_SIZEOF(struct aac_ctcfg)); 2624 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0]; 2625 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) { 2626 AACDB_PRINT(softs, CE_WARN, 2627 "VM_ContainerConfig command fail"); 2628 rval = AACERR; 2629 goto finish; 2630 } 2631 scsi_method_id = ddi_get32(acc, &c_resp->param); 2632 2633 /* Detect phys. bus count and max. target id first */ 2634 cmd = (struct aac_bus_info *)&fibp->data[0]; 2635 ddi_put32(acc, &cmd->Command, VM_Ioctl); 2636 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */ 2637 ddi_put32(acc, &cmd->MethodId, scsi_method_id); 2638 ddi_put32(acc, &cmd->ObjectId, 0); 2639 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo); 2640 /* 2641 * For VM_Ioctl, the firmware uses the Header.Size filled from the 2642 * driver as the size to be returned. Therefore the driver has to use 2643 * sizeof (struct aac_bus_info_response) because it is greater than 2644 * sizeof (struct aac_bus_info). 2645 */ 2646 rval = aac_sync_fib(softs, ContainerCommand, 2647 AAC_FIB_SIZEOF(struct aac_bus_info_response)); 2648 resp = (struct aac_bus_info_response *)cmd; 2649 2650 /* Scan all coordinates with INQUIRY */ 2651 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) { 2652 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail"); 2653 rval = AACERR; 2654 goto finish; 2655 } 2656 *bus_max = ddi_get32(acc, &resp->BusCount); 2657 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus); 2658 2659 finish: 2660 aac_sync_fib_slot_release(softs, acp); 2661 return (AACOK); 2662 } 2663 2664 /* 2665 * The following function comes from Adaptec: 2666 * 2667 * Routine to be called during initialization of communications with 2668 * the adapter to handle possible adapter configuration issues. When 2669 * the adapter first boots up, it examines attached drives, etc, and 2670 * potentially comes up with a new or revised configuration (relative to 2671 * what's stored in it's NVRAM). Additionally it may discover problems 2672 * that make the current physical configuration unworkable (currently 2673 * applicable only to cluster configuration issues). 2674 * 2675 * If there are no configuration issues or the issues are considered 2676 * trival by the adapter, it will set it's configuration status to 2677 * "FSACT_CONTINUE" and execute the "commit confiuguration" action 2678 * automatically on it's own. 2679 * 2680 * However, if there are non-trivial issues, the adapter will set it's 2681 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT" 2682 * and wait for some agent on the host to issue the "\ContainerCommand 2683 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the 2684 * adapter to commit the new/updated configuration and enable 2685 * un-inhibited operation. The host agent should first issue the 2686 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB 2687 * command to obtain information about config issues detected by 2688 * the adapter. 2689 * 2690 * Normally the adapter's PC BIOS will execute on the host following 2691 * adapter poweron and reset and will be responsible for querring the 2692 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG 2693 * command if appropriate. 2694 * 2695 * However, with the introduction of IOP reset support, the adapter may 2696 * boot up without the benefit of the adapter's PC BIOS host agent. 2697 * This routine is intended to take care of these issues in situations 2698 * where BIOS doesn't execute following adapter poweron or reset. The 2699 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so 2700 * there is no harm in doing this when it's already been done. 2701 */ 2702 static int 2703 aac_handle_adapter_config_issues(struct aac_softstate *softs) 2704 { 2705 struct aac_cmd *acp = &softs->sync_ac; 2706 ddi_acc_handle_t acc; 2707 struct aac_fib *fibp; 2708 struct aac_Container *cmd; 2709 struct aac_Container_resp *resp; 2710 struct aac_cf_status_header *cfg_sts_hdr; 2711 uint32_t resp_status; 2712 uint32_t ct_status; 2713 uint32_t cfg_stat_action; 2714 int rval; 2715 2716 (void) aac_sync_fib_slot_bind(softs, acp); 2717 acc = acp->slotp->fib_acc_handle; 2718 fibp = acp->slotp->fibp; 2719 2720 /* Get adapter config status */ 2721 cmd = (struct aac_Container *)&fibp->data[0]; 2722 2723 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2724 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2725 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS); 2726 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE], 2727 sizeof (struct aac_cf_status_header)); 2728 rval = aac_sync_fib(softs, ContainerCommand, 2729 AAC_FIB_SIZEOF(struct aac_Container)); 2730 resp = (struct aac_Container_resp *)cmd; 2731 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data; 2732 2733 resp_status = ddi_get32(acc, &resp->Status); 2734 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2735 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) { 2736 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action); 2737 2738 /* Commit configuration if it's reasonable to do so. */ 2739 if (cfg_stat_action <= CFACT_PAUSE) { 2740 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 2741 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 2742 ddi_put32(acc, &cmd->CTCommand.command, 2743 CT_COMMIT_CONFIG); 2744 rval = aac_sync_fib(softs, ContainerCommand, 2745 AAC_FIB_SIZEOF(struct aac_Container)); 2746 2747 resp_status = ddi_get32(acc, &resp->Status); 2748 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]); 2749 if ((rval == AACOK) && (resp_status == 0) && 2750 (ct_status == CT_OK)) 2751 /* Successful completion */ 2752 rval = AACMPE_OK; 2753 else 2754 /* Auto-commit aborted due to error(s). */ 2755 rval = AACMPE_COMMIT_CONFIG; 2756 } else { 2757 /* 2758 * Auto-commit aborted due to adapter indicating 2759 * configuration issue(s) too dangerous to auto-commit. 2760 */ 2761 rval = AACMPE_CONFIG_STATUS; 2762 } 2763 } else { 2764 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted"); 2765 rval = AACMPE_CONFIG_STATUS; 2766 } 2767 2768 aac_sync_fib_slot_release(softs, acp); 2769 return (rval); 2770 } 2771 2772 /* 2773 * Hardware initialization and resource allocation 2774 */ 2775 static int 2776 aac_common_attach(struct aac_softstate *softs) 2777 { 2778 uint32_t status; 2779 int i; 2780 struct aac_supplement_adapter_info sinf; 2781 2782 DBCALLED(softs, 1); 2783 2784 /* 2785 * Do a little check here to make sure there aren't any outstanding 2786 * FIBs in the message queue. At this point there should not be and 2787 * if there are they are probably left over from another instance of 2788 * the driver like when the system crashes and the crash dump driver 2789 * gets loaded. 2790 */ 2791 while (AAC_OUTB_GET(softs) != 0xfffffffful) 2792 ; 2793 2794 /* 2795 * Wait the card to complete booting up before do anything that 2796 * attempts to communicate with it. 2797 */ 2798 status = AAC_FWSTATUS_GET(softs); 2799 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC) 2800 goto error; 2801 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */ 2802 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i); 2803 if (i == 0) { 2804 cmn_err(CE_CONT, "?Fatal error: controller not ready"); 2805 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2806 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2807 goto error; 2808 } 2809 2810 /* Read and set card supported options and settings */ 2811 if (aac_check_firmware(softs) == AACERR) { 2812 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2813 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2814 goto error; 2815 } 2816 2817 /* Add interrupt handlers */ 2818 if (aac_register_intrs(softs) == AACERR) { 2819 cmn_err(CE_CONT, 2820 "?Fatal error: interrupts register failed"); 2821 goto error; 2822 } 2823 2824 /* Setup communication space with the card */ 2825 if (softs->comm_space_dma_handle == NULL) { 2826 if (aac_alloc_comm_space(softs) != AACOK) 2827 goto error; 2828 } 2829 if (aac_setup_comm_space(softs) != AACOK) { 2830 cmn_err(CE_CONT, "?Setup communication space failed"); 2831 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2832 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2833 goto error; 2834 } 2835 2836 #ifdef DEBUG 2837 if (aac_get_fw_debug_buffer(softs) != AACOK) 2838 cmn_err(CE_CONT, "?firmware UART trace not supported"); 2839 #endif 2840 2841 /* Allocate slots */ 2842 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) { 2843 cmn_err(CE_CONT, "?Fatal error: slots allocate failed"); 2844 goto error; 2845 } 2846 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots); 2847 2848 /* Allocate FIBs */ 2849 if (softs->total_fibs < softs->total_slots) { 2850 aac_alloc_fibs(softs); 2851 if (softs->total_fibs == 0) 2852 goto error; 2853 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated", 2854 softs->total_fibs); 2855 } 2856 2857 AAC_STATUS_CLR(softs, ~0); /* Clear out all interrupts */ 2858 AAC_ENABLE_INTR(softs); /* Enable the interrupts we can handle */ 2859 2860 if (aac_get_adapter_info(softs, NULL, &sinf) == AACOK) { 2861 softs->feature_bits = sinf.FeatureBits; 2862 softs->support_opt2 = sinf.SupportedOptions2; 2863 2864 /* Get adapter names */ 2865 if (CARD_IS_UNKNOWN(softs->card)) { 2866 char *p, *p0, *p1; 2867 2868 /* 2869 * Now find the controller name in supp_adapter_info-> 2870 * AdapterTypeText. Use the first word as the vendor 2871 * and the other words as the product name. 2872 */ 2873 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = " 2874 "\"%s\"", sinf.AdapterTypeText); 2875 p = sinf.AdapterTypeText; 2876 p0 = p1 = NULL; 2877 /* Skip heading spaces */ 2878 while (*p && (*p == ' ' || *p == '\t')) 2879 p++; 2880 p0 = p; 2881 while (*p && (*p != ' ' && *p != '\t')) 2882 p++; 2883 /* Remove middle spaces */ 2884 while (*p && (*p == ' ' || *p == '\t')) 2885 *p++ = 0; 2886 p1 = p; 2887 /* Remove trailing spaces */ 2888 p = p1 + strlen(p1) - 1; 2889 while (p > p1 && (*p == ' ' || *p == '\t')) 2890 *p-- = 0; 2891 if (*p0 && *p1) { 2892 (void *)strncpy(softs->vendor_name, p0, 2893 AAC_VENDOR_LEN); 2894 (void *)strncpy(softs->product_name, p1, 2895 AAC_PRODUCT_LEN); 2896 } else { 2897 cmn_err(CE_WARN, 2898 "?adapter name mis-formatted\n"); 2899 if (*p0) 2900 (void *)strncpy(softs->product_name, 2901 p0, AAC_PRODUCT_LEN); 2902 } 2903 } 2904 } else { 2905 cmn_err(CE_CONT, "?Query adapter information failed"); 2906 } 2907 2908 2909 cmn_err(CE_NOTE, 2910 "!aac driver %d.%02d.%02d-%d, found card: " \ 2911 "%s %s(pci0x%x.%x.%x.%x) at 0x%x", 2912 AAC_DRIVER_MAJOR_VERSION, 2913 AAC_DRIVER_MINOR_VERSION, 2914 AAC_DRIVER_BUGFIX_LEVEL, 2915 AAC_DRIVER_BUILD, 2916 softs->vendor_name, softs->product_name, 2917 softs->vendid, softs->devid, softs->subvendid, softs->subsysid, 2918 softs->pci_mem_base_paddr); 2919 2920 /* Perform acceptance of adapter-detected config changes if possible */ 2921 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) { 2922 cmn_err(CE_CONT, "?Handle adapter config issues failed"); 2923 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE); 2924 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2925 goto error; 2926 } 2927 2928 /* Setup containers (logical devices) */ 2929 if (aac_probe_containers(softs) != AACOK) { 2930 cmn_err(CE_CONT, "?Fatal error: get container info error"); 2931 goto error; 2932 } 2933 2934 /* Check for JBOD support. Default disable */ 2935 char *data; 2936 if (softs->feature_bits & AAC_FEATURE_SUPPORTED_JBOD) { 2937 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 2938 0, "jbod-enable", &data) == DDI_SUCCESS)) { 2939 if (strcmp(data, "yes") == 0) { 2940 AACDB_PRINT(softs, CE_NOTE, 2941 "Enable JBOD access"); 2942 softs->flags |= AAC_FLAGS_JBOD; 2943 } 2944 ddi_prop_free(data); 2945 } 2946 } 2947 2948 /* Setup phys. devices */ 2949 if (softs->flags & (AAC_FLAGS_NONDASD | AAC_FLAGS_JBOD)) { 2950 uint32_t bus_max, tgt_max; 2951 uint32_t bus, tgt; 2952 int index; 2953 2954 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) { 2955 cmn_err(CE_CONT, "?Fatal error: get bus info error"); 2956 goto error; 2957 } 2958 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d", 2959 bus_max, tgt_max); 2960 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) { 2961 if (softs->state & AAC_STATE_RESET) { 2962 cmn_err(CE_WARN, 2963 "?Fatal error: bus map changed"); 2964 goto error; 2965 } 2966 softs->bus_max = bus_max; 2967 softs->tgt_max = tgt_max; 2968 if (softs->nondasds) { 2969 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 2970 sizeof (struct aac_nondasd)); 2971 } 2972 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \ 2973 sizeof (struct aac_nondasd), KM_SLEEP); 2974 2975 index = 0; 2976 for (bus = 0; bus < softs->bus_max; bus++) { 2977 for (tgt = 0; tgt < softs->tgt_max; tgt++) { 2978 struct aac_nondasd *dvp = 2979 &softs->nondasds[index++]; 2980 dvp->dev.type = AAC_DEV_PD; 2981 dvp->bus = bus; 2982 dvp->tid = tgt; 2983 } 2984 } 2985 } 2986 } 2987 2988 /* Check dma & acc handles allocated in attach */ 2989 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) { 2990 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2991 goto error; 2992 } 2993 2994 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 2995 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 2996 goto error; 2997 } 2998 2999 for (i = 0; i < softs->total_slots; i++) { 3000 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) != 3001 DDI_SUCCESS) { 3002 ddi_fm_service_impact(softs->devinfo_p, 3003 DDI_SERVICE_LOST); 3004 goto error; 3005 } 3006 } 3007 3008 return (AACOK); 3009 error: 3010 if (softs->state & AAC_STATE_RESET) 3011 return (AACERR); 3012 if (softs->nondasds) { 3013 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 3014 sizeof (struct aac_nondasd)); 3015 softs->nondasds = NULL; 3016 } 3017 if (softs->total_fibs > 0) 3018 aac_destroy_fibs(softs); 3019 if (softs->total_slots > 0) 3020 aac_destroy_slots(softs); 3021 if (softs->comm_space_dma_handle) 3022 aac_free_comm_space(softs); 3023 return (AACERR); 3024 } 3025 3026 /* 3027 * Hardware shutdown and resource release 3028 */ 3029 static void 3030 aac_common_detach(struct aac_softstate *softs) 3031 { 3032 DBCALLED(softs, 1); 3033 3034 aac_unregister_intrs(softs); 3035 3036 mutex_enter(&softs->io_lock); 3037 (void) aac_shutdown(softs); 3038 3039 if (softs->nondasds) { 3040 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \ 3041 sizeof (struct aac_nondasd)); 3042 softs->nondasds = NULL; 3043 } 3044 aac_destroy_fibs(softs); 3045 aac_destroy_slots(softs); 3046 aac_free_comm_space(softs); 3047 mutex_exit(&softs->io_lock); 3048 } 3049 3050 /* 3051 * Send a synchronous command to the controller and wait for a result. 3052 * Indicate if the controller completed the command with an error status. 3053 */ 3054 int 3055 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd, 3056 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, 3057 uint32_t *statusp) 3058 { 3059 int timeout; 3060 uint32_t status; 3061 3062 if (statusp != NULL) 3063 *statusp = SRB_STATUS_SUCCESS; 3064 3065 /* Fill in mailbox */ 3066 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3); 3067 3068 /* Ensure the sync command doorbell flag is cleared */ 3069 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 3070 3071 /* Then set it to signal the adapter */ 3072 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND); 3073 3074 /* Spin waiting for the command to complete */ 3075 timeout = AAC_IMMEDIATE_TIMEOUT * 1000; 3076 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout); 3077 if (!timeout) { 3078 AACDB_PRINT(softs, CE_WARN, 3079 "Sync command timed out after %d seconds (0x%x)!", 3080 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs)); 3081 return (AACERR); 3082 } 3083 3084 /* Clear the completion flag */ 3085 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND); 3086 3087 /* Get the command status */ 3088 status = AAC_MAILBOX_GET(softs, 0); 3089 if (statusp != NULL) 3090 *statusp = status; 3091 if (status != SRB_STATUS_SUCCESS) { 3092 AACDB_PRINT(softs, CE_WARN, 3093 "Sync command fail: status = 0x%x", status); 3094 return (AACERR); 3095 } 3096 3097 return (AACOK); 3098 } 3099 3100 /* 3101 * Send a synchronous FIB to the adapter and wait for its completion 3102 */ 3103 static int 3104 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize) 3105 { 3106 struct aac_cmd *acp = &softs->sync_ac; 3107 3108 acp->flags = AAC_CMD_SYNC | AAC_CMD_IN_SYNC_SLOT; 3109 if (softs->state & AAC_STATE_INTR) 3110 acp->flags |= AAC_CMD_NO_CB; 3111 else 3112 acp->flags |= AAC_CMD_NO_INTR; 3113 3114 acp->ac_comp = aac_sync_complete; 3115 acp->timeout = AAC_SYNC_TIMEOUT; 3116 acp->fib_size = fibsize; 3117 3118 /* 3119 * Only need to setup sync fib header, caller should have init 3120 * fib data 3121 */ 3122 aac_cmd_fib_header(softs, acp, cmd); 3123 3124 (void) ddi_dma_sync(acp->slotp->fib_dma_handle, 0, fibsize, 3125 DDI_DMA_SYNC_FORDEV); 3126 3127 aac_start_io(softs, acp); 3128 3129 if (softs->state & AAC_STATE_INTR) 3130 return (aac_do_sync_io(softs, acp)); 3131 else 3132 return (aac_do_poll_io(softs, acp)); 3133 } 3134 3135 static void 3136 aac_cmd_initq(struct aac_cmd_queue *q) 3137 { 3138 q->q_head = NULL; 3139 q->q_tail = (struct aac_cmd *)&q->q_head; 3140 } 3141 3142 /* 3143 * Remove a cmd from the head of q 3144 */ 3145 static struct aac_cmd * 3146 aac_cmd_dequeue(struct aac_cmd_queue *q) 3147 { 3148 struct aac_cmd *acp; 3149 3150 _NOTE(ASSUMING_PROTECTED(*q)) 3151 3152 if ((acp = q->q_head) != NULL) { 3153 if ((q->q_head = acp->next) != NULL) 3154 acp->next = NULL; 3155 else 3156 q->q_tail = (struct aac_cmd *)&q->q_head; 3157 acp->prev = NULL; 3158 } 3159 return (acp); 3160 } 3161 3162 /* 3163 * Add a cmd to the tail of q 3164 */ 3165 static void 3166 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp) 3167 { 3168 ASSERT(acp->next == NULL); 3169 acp->prev = q->q_tail; 3170 q->q_tail->next = acp; 3171 q->q_tail = acp; 3172 } 3173 3174 /* 3175 * Remove the cmd ac from q 3176 */ 3177 static void 3178 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp) 3179 { 3180 if (acp->prev) { 3181 if ((acp->prev->next = acp->next) != NULL) { 3182 acp->next->prev = acp->prev; 3183 acp->next = NULL; 3184 } else { 3185 q->q_tail = acp->prev; 3186 } 3187 acp->prev = NULL; 3188 } 3189 /* ac is not in the queue */ 3190 } 3191 3192 /* 3193 * Atomically insert an entry into the nominated queue, returns 0 on success or 3194 * AACERR if the queue is full. 3195 * 3196 * Note: it would be more efficient to defer notifying the controller in 3197 * the case where we may be inserting several entries in rapid succession, 3198 * but implementing this usefully may be difficult (it would involve a 3199 * separate queue/notify interface). 3200 */ 3201 static int 3202 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr, 3203 uint32_t fib_size) 3204 { 3205 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3206 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3207 uint32_t pi, ci; 3208 3209 DBCALLED(softs, 2); 3210 3211 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q); 3212 3213 /* Get the producer/consumer indices */ 3214 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3215 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3216 DDI_DMA_SYNC_FORCPU); 3217 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3218 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3219 return (AACERR); 3220 } 3221 3222 pi = ddi_get32(acc, 3223 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3224 ci = ddi_get32(acc, 3225 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3226 3227 /* 3228 * Wrap the queue first before we check the queue to see 3229 * if it is full 3230 */ 3231 if (pi >= aac_qinfo[queue].size) 3232 pi = 0; 3233 3234 /* XXX queue full */ 3235 if ((pi + 1) == ci) 3236 return (AACERR); 3237 3238 /* Fill in queue entry */ 3239 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size); 3240 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr); 3241 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3242 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3243 DDI_DMA_SYNC_FORDEV); 3244 3245 /* Update producer index */ 3246 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX], 3247 pi + 1); 3248 (void) ddi_dma_sync(dma, 3249 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \ 3250 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3251 DDI_DMA_SYNC_FORDEV); 3252 3253 if (aac_qinfo[queue].notify != 0) 3254 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3255 return (AACOK); 3256 } 3257 3258 /* 3259 * Atomically remove one entry from the nominated queue, returns 0 on 3260 * success or AACERR if the queue is empty. 3261 */ 3262 static int 3263 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp) 3264 { 3265 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3266 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3267 uint32_t pi, ci; 3268 int unfull = 0; 3269 3270 DBCALLED(softs, 2); 3271 3272 ASSERT(idxp); 3273 3274 /* Get the producer/consumer indices */ 3275 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \ 3276 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2, 3277 DDI_DMA_SYNC_FORCPU); 3278 pi = ddi_get32(acc, 3279 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]); 3280 ci = ddi_get32(acc, 3281 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]); 3282 3283 /* Check for queue empty */ 3284 if (ci == pi) 3285 return (AACERR); 3286 3287 if (pi >= aac_qinfo[queue].size) 3288 pi = 0; 3289 3290 /* Check for queue full */ 3291 if (ci == pi + 1) 3292 unfull = 1; 3293 3294 /* 3295 * The controller does not wrap the queue, 3296 * so we have to do it by ourselves 3297 */ 3298 if (ci >= aac_qinfo[queue].size) 3299 ci = 0; 3300 3301 /* Fetch the entry */ 3302 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \ 3303 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry), 3304 DDI_DMA_SYNC_FORCPU); 3305 if (aac_check_dma_handle(dma) != DDI_SUCCESS) { 3306 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 3307 return (AACERR); 3308 } 3309 3310 switch (queue) { 3311 case AAC_HOST_NORM_RESP_Q: 3312 case AAC_HOST_HIGH_RESP_Q: 3313 *idxp = ddi_get32(acc, 3314 &(softs->qentries[queue] + ci)->aq_fib_addr); 3315 break; 3316 3317 case AAC_HOST_NORM_CMD_Q: 3318 case AAC_HOST_HIGH_CMD_Q: 3319 *idxp = ddi_get32(acc, 3320 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE; 3321 break; 3322 3323 default: 3324 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()"); 3325 return (AACERR); 3326 } 3327 3328 /* Update consumer index */ 3329 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX], 3330 ci + 1); 3331 (void) ddi_dma_sync(dma, 3332 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \ 3333 (uintptr_t)softs->comm_space, sizeof (uint32_t), 3334 DDI_DMA_SYNC_FORDEV); 3335 3336 if (unfull && aac_qinfo[queue].notify != 0) 3337 AAC_NOTIFY(softs, aac_qinfo[queue].notify); 3338 return (AACOK); 3339 } 3340 3341 static struct aac_mntinforesp * 3342 aac_get_mntinfo(struct aac_softstate *softs, int cid) 3343 { 3344 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3345 struct aac_fib *fibp = softs->sync_ac.slotp->fibp; 3346 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0]; 3347 struct aac_mntinforesp *mir; 3348 3349 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */ 3350 (softs->flags & AAC_FLAGS_LBA_64BIT) ? 3351 VM_NameServe64 : VM_NameServe); 3352 ddi_put32(acc, &mi->MntType, FT_FILESYS); 3353 ddi_put32(acc, &mi->MntCount, cid); 3354 3355 if (aac_sync_fib(softs, ContainerCommand, 3356 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) { 3357 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid); 3358 return (NULL); 3359 } 3360 3361 mir = (struct aac_mntinforesp *)&fibp->data[0]; 3362 if (ddi_get32(acc, &mir->Status) == ST_OK) 3363 return (mir); 3364 return (NULL); 3365 } 3366 3367 static int 3368 aac_get_container_count(struct aac_softstate *softs, int *count) 3369 { 3370 ddi_acc_handle_t acc; 3371 struct aac_mntinforesp *mir; 3372 int rval; 3373 3374 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 3375 acc = softs->sync_ac.slotp->fib_acc_handle; 3376 3377 if ((mir = aac_get_mntinfo(softs, 0)) == NULL) { 3378 rval = AACERR; 3379 goto finish; 3380 } 3381 *count = ddi_get32(acc, &mir->MntRespCount); 3382 if (*count > AAC_MAX_LD) { 3383 AACDB_PRINT(softs, CE_CONT, 3384 "container count(%d) > AAC_MAX_LD", *count); 3385 rval = AACERR; 3386 goto finish; 3387 } 3388 rval = AACOK; 3389 3390 finish: 3391 aac_sync_fib_slot_release(softs, &softs->sync_ac); 3392 return (rval); 3393 } 3394 3395 static int 3396 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid) 3397 { 3398 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3399 struct aac_Container *ct = (struct aac_Container *) \ 3400 &softs->sync_ac.slotp->fibp->data[0]; 3401 3402 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE); 3403 ddi_put32(acc, &ct->Command, VM_ContainerConfig); 3404 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID); 3405 ddi_put32(acc, &ct->CTCommand.param[0], cid); 3406 3407 if (aac_sync_fib(softs, ContainerCommand, 3408 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR) 3409 return (AACERR); 3410 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK) 3411 return (AACERR); 3412 3413 *uid = ddi_get32(acc, &ct->CTCommand.param[1]); 3414 return (AACOK); 3415 } 3416 3417 /* 3418 * Request information of the container cid 3419 */ 3420 static struct aac_mntinforesp * 3421 aac_get_container_info(struct aac_softstate *softs, int cid) 3422 { 3423 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle; 3424 struct aac_mntinforesp *mir; 3425 int rval_uid; 3426 uint32_t uid; 3427 3428 /* Get container UID first so that it will not overwrite mntinfo */ 3429 rval_uid = aac_get_container_uid(softs, cid, &uid); 3430 3431 /* Get container basic info */ 3432 if ((mir = aac_get_mntinfo(softs, cid)) == NULL) { 3433 AACDB_PRINT(softs, CE_CONT, 3434 "query container %d info failed", cid); 3435 return (NULL); 3436 } 3437 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) 3438 return (mir); 3439 if (rval_uid != AACOK) { 3440 AACDB_PRINT(softs, CE_CONT, 3441 "query container %d uid failed", cid); 3442 return (NULL); 3443 } 3444 3445 ddi_put32(acc, &mir->Status, uid); 3446 return (mir); 3447 } 3448 3449 static enum aac_cfg_event 3450 aac_probe_container(struct aac_softstate *softs, uint32_t cid) 3451 { 3452 enum aac_cfg_event event = AAC_CFG_NULL_NOEXIST; 3453 struct aac_container *dvp = &softs->containers[cid]; 3454 struct aac_mntinforesp *mir; 3455 ddi_acc_handle_t acc; 3456 3457 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 3458 acc = softs->sync_ac.slotp->fib_acc_handle; 3459 3460 /* Get container basic info */ 3461 if ((mir = aac_get_container_info(softs, cid)) == NULL) { 3462 /* AAC_CFG_NULL_NOEXIST */ 3463 goto finish; 3464 } 3465 3466 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) { 3467 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3468 AACDB_PRINT(softs, CE_NOTE, 3469 ">>> Container %d deleted", cid); 3470 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3471 event = AAC_CFG_DELETE; 3472 } 3473 /* AAC_CFG_NULL_NOEXIST */ 3474 } else { 3475 uint64_t size; 3476 uint32_t uid; 3477 3478 event = AAC_CFG_NULL_EXIST; 3479 3480 size = AAC_MIR_SIZE(softs, acc, mir); 3481 uid = ddi_get32(acc, &mir->Status); 3482 if (AAC_DEV_IS_VALID(&dvp->dev)) { 3483 if (dvp->uid != uid) { 3484 AACDB_PRINT(softs, CE_WARN, 3485 ">>> Container %u uid changed to %d", 3486 cid, uid); 3487 dvp->uid = uid; 3488 event = AAC_CFG_CHANGE; 3489 } 3490 if (dvp->size != size) { 3491 AACDB_PRINT(softs, CE_NOTE, 3492 ">>> Container %u size changed to %"PRIu64, 3493 cid, size); 3494 dvp->size = size; 3495 event = AAC_CFG_CHANGE; 3496 } 3497 } else { /* Init new container */ 3498 AACDB_PRINT(softs, CE_NOTE, 3499 ">>> Container %d added: " \ 3500 "size=0x%x.%08x, type=%d, name=%s", 3501 cid, 3502 ddi_get32(acc, &mir->MntObj.CapacityHigh), 3503 ddi_get32(acc, &mir->MntObj.Capacity), 3504 ddi_get32(acc, &mir->MntObj.VolType), 3505 mir->MntObj.FileSystemName); 3506 dvp->dev.flags |= AAC_DFLAG_VALID; 3507 dvp->dev.type = AAC_DEV_LD; 3508 3509 dvp->cid = cid; 3510 dvp->uid = uid; 3511 dvp->size = size; 3512 dvp->locked = 0; 3513 dvp->deleted = 0; 3514 3515 event = AAC_CFG_ADD; 3516 } 3517 } 3518 3519 finish: 3520 aac_sync_fib_slot_release(softs, &softs->sync_ac); 3521 return (event); 3522 } 3523 3524 /* 3525 * Do a rescan of all the possible containers and update the container list 3526 * with newly online/offline containers, and prepare for autoconfiguration. 3527 */ 3528 static int 3529 aac_probe_containers(struct aac_softstate *softs) 3530 { 3531 int i, count, total; 3532 3533 /* Loop over possible containers */ 3534 count = softs->container_count; 3535 if (aac_get_container_count(softs, &count) == AACERR) 3536 return (AACERR); 3537 3538 for (i = total = 0; i < count; i++) { 3539 enum aac_cfg_event event = aac_probe_container(softs, i); 3540 if ((event != AAC_CFG_NULL_NOEXIST) && 3541 (event != AAC_CFG_NULL_EXIST)) { 3542 (void) aac_handle_dr(softs, i, -1, event); 3543 total++; 3544 } 3545 } 3546 3547 if (count < softs->container_count) { 3548 struct aac_container *dvp; 3549 3550 for (dvp = &softs->containers[count]; 3551 dvp < &softs->containers[softs->container_count]; dvp++) { 3552 if (!AAC_DEV_IS_VALID(&dvp->dev)) 3553 continue; 3554 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted", 3555 dvp->cid); 3556 dvp->dev.flags &= ~AAC_DFLAG_VALID; 3557 (void) aac_handle_dr(softs, dvp->cid, -1, 3558 AAC_CFG_DELETE); 3559 } 3560 } 3561 3562 softs->container_count = count; 3563 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total); 3564 return (AACOK); 3565 } 3566 3567 static int 3568 aac_probe_jbod(struct aac_softstate *softs, int tgt, int event) 3569 { 3570 ASSERT(AAC_MAX_LD <= tgt); 3571 ASSERT(tgt < AAC_MAX_DEV(softs)); 3572 struct aac_device *dvp; 3573 dvp = AAC_DEV(softs, tgt); 3574 3575 switch (event) { 3576 case AAC_CFG_ADD: 3577 AACDB_PRINT(softs, CE_NOTE, 3578 ">>> Jbod %d added", tgt - AAC_MAX_LD); 3579 dvp->flags |= AAC_DFLAG_VALID; 3580 dvp->type = AAC_DEV_PD; 3581 break; 3582 case AAC_CFG_DELETE: 3583 AACDB_PRINT(softs, CE_NOTE, 3584 ">>> Jbod %d deleted", tgt - AAC_MAX_LD); 3585 dvp->flags &= ~AAC_DFLAG_VALID; 3586 break; 3587 default: 3588 return (AACERR); 3589 } 3590 (void) aac_handle_dr(softs, tgt, 0, event); 3591 return (AACOK); 3592 } 3593 3594 static int 3595 aac_alloc_comm_space(struct aac_softstate *softs) 3596 { 3597 size_t rlen; 3598 ddi_dma_cookie_t cookie; 3599 uint_t cookien; 3600 3601 /* Allocate DMA for comm. space */ 3602 if (ddi_dma_alloc_handle( 3603 softs->devinfo_p, 3604 &softs->addr_dma_attr, 3605 DDI_DMA_SLEEP, 3606 NULL, 3607 &softs->comm_space_dma_handle) != DDI_SUCCESS) { 3608 AACDB_PRINT(softs, CE_WARN, 3609 "Cannot alloc dma handle for communication area"); 3610 goto error; 3611 } 3612 if (ddi_dma_mem_alloc( 3613 softs->comm_space_dma_handle, 3614 sizeof (struct aac_comm_space), 3615 &softs->acc_attr, 3616 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3617 DDI_DMA_SLEEP, 3618 NULL, 3619 (caddr_t *)&softs->comm_space, 3620 &rlen, 3621 &softs->comm_space_acc_handle) != DDI_SUCCESS) { 3622 AACDB_PRINT(softs, CE_WARN, 3623 "Cannot alloc mem for communication area"); 3624 goto error; 3625 } 3626 if (ddi_dma_addr_bind_handle( 3627 softs->comm_space_dma_handle, 3628 NULL, 3629 (caddr_t)softs->comm_space, 3630 sizeof (struct aac_comm_space), 3631 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3632 DDI_DMA_SLEEP, 3633 NULL, 3634 &cookie, 3635 &cookien) != DDI_DMA_MAPPED) { 3636 AACDB_PRINT(softs, CE_WARN, 3637 "DMA bind failed for communication area"); 3638 goto error; 3639 } 3640 softs->comm_space_phyaddr = cookie.dmac_address; 3641 3642 return (AACOK); 3643 error: 3644 if (softs->comm_space_acc_handle) { 3645 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3646 softs->comm_space_acc_handle = NULL; 3647 } 3648 if (softs->comm_space_dma_handle) { 3649 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3650 softs->comm_space_dma_handle = NULL; 3651 } 3652 return (AACERR); 3653 } 3654 3655 static void 3656 aac_free_comm_space(struct aac_softstate *softs) 3657 { 3658 3659 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle); 3660 ddi_dma_mem_free(&softs->comm_space_acc_handle); 3661 softs->comm_space_acc_handle = NULL; 3662 ddi_dma_free_handle(&softs->comm_space_dma_handle); 3663 softs->comm_space_dma_handle = NULL; 3664 softs->comm_space_phyaddr = NULL; 3665 } 3666 3667 /* 3668 * Initialize the data structures that are required for the communication 3669 * interface to operate 3670 */ 3671 static int 3672 aac_setup_comm_space(struct aac_softstate *softs) 3673 { 3674 ddi_dma_handle_t dma = softs->comm_space_dma_handle; 3675 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 3676 uint32_t comm_space_phyaddr; 3677 struct aac_adapter_init *initp; 3678 int qoffset; 3679 3680 comm_space_phyaddr = softs->comm_space_phyaddr; 3681 3682 /* Setup adapter init struct */ 3683 initp = &softs->comm_space->init_data; 3684 bzero(initp, sizeof (struct aac_adapter_init)); 3685 3686 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION); 3687 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time()); 3688 3689 /* Setup new/old comm. specific data */ 3690 if (softs->flags & AAC_FLAGS_RAW_IO) { 3691 uint32_t init_flags = 0; 3692 3693 if (softs->flags & AAC_FLAGS_NEW_COMM) 3694 init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED; 3695 /* AAC_SUPPORTED_POWER_MANAGEMENT */ 3696 init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM; 3697 init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME; 3698 3699 ddi_put32(acc, &initp->InitStructRevision, 3700 AAC_INIT_STRUCT_REVISION_4); 3701 ddi_put32(acc, &initp->InitFlags, init_flags); 3702 /* Setup the preferred settings */ 3703 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs); 3704 ddi_put32(acc, &initp->MaxIoSize, 3705 (softs->aac_max_sectors << 9)); 3706 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size); 3707 } else { 3708 /* 3709 * Tells the adapter about the physical location of various 3710 * important shared data structures 3711 */ 3712 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress, 3713 comm_space_phyaddr + \ 3714 offsetof(struct aac_comm_space, adapter_fibs)); 3715 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0); 3716 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE); 3717 ddi_put32(acc, &initp->AdapterFibsSize, 3718 AAC_ADAPTER_FIBS * AAC_FIB_SIZE); 3719 ddi_put32(acc, &initp->PrintfBufferAddress, 3720 comm_space_phyaddr + \ 3721 offsetof(struct aac_comm_space, adapter_print_buf)); 3722 ddi_put32(acc, &initp->PrintfBufferSize, 3723 AAC_ADAPTER_PRINT_BUFSIZE); 3724 ddi_put32(acc, &initp->MiniPortRevision, 3725 AAC_INIT_STRUCT_MINIPORT_REVISION); 3726 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN); 3727 3728 qoffset = (comm_space_phyaddr + \ 3729 offsetof(struct aac_comm_space, qtable)) % \ 3730 AAC_QUEUE_ALIGN; 3731 if (qoffset) 3732 qoffset = AAC_QUEUE_ALIGN - qoffset; 3733 softs->qtablep = (struct aac_queue_table *) \ 3734 ((char *)&softs->comm_space->qtable + qoffset); 3735 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \ 3736 offsetof(struct aac_comm_space, qtable) + qoffset); 3737 3738 /* Init queue table */ 3739 ddi_put32(acc, &softs->qtablep-> \ 3740 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3741 AAC_HOST_NORM_CMD_ENTRIES); 3742 ddi_put32(acc, &softs->qtablep-> \ 3743 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3744 AAC_HOST_NORM_CMD_ENTRIES); 3745 ddi_put32(acc, &softs->qtablep-> \ 3746 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3747 AAC_HOST_HIGH_CMD_ENTRIES); 3748 ddi_put32(acc, &softs->qtablep-> \ 3749 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3750 AAC_HOST_HIGH_CMD_ENTRIES); 3751 ddi_put32(acc, &softs->qtablep-> \ 3752 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX], 3753 AAC_ADAP_NORM_CMD_ENTRIES); 3754 ddi_put32(acc, &softs->qtablep-> \ 3755 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX], 3756 AAC_ADAP_NORM_CMD_ENTRIES); 3757 ddi_put32(acc, &softs->qtablep-> \ 3758 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX], 3759 AAC_ADAP_HIGH_CMD_ENTRIES); 3760 ddi_put32(acc, &softs->qtablep-> \ 3761 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX], 3762 AAC_ADAP_HIGH_CMD_ENTRIES); 3763 ddi_put32(acc, &softs->qtablep-> \ 3764 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3765 AAC_HOST_NORM_RESP_ENTRIES); 3766 ddi_put32(acc, &softs->qtablep-> \ 3767 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3768 AAC_HOST_NORM_RESP_ENTRIES); 3769 ddi_put32(acc, &softs->qtablep-> \ 3770 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3771 AAC_HOST_HIGH_RESP_ENTRIES); 3772 ddi_put32(acc, &softs->qtablep-> \ 3773 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3774 AAC_HOST_HIGH_RESP_ENTRIES); 3775 ddi_put32(acc, &softs->qtablep-> \ 3776 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX], 3777 AAC_ADAP_NORM_RESP_ENTRIES); 3778 ddi_put32(acc, &softs->qtablep-> \ 3779 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX], 3780 AAC_ADAP_NORM_RESP_ENTRIES); 3781 ddi_put32(acc, &softs->qtablep-> \ 3782 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX], 3783 AAC_ADAP_HIGH_RESP_ENTRIES); 3784 ddi_put32(acc, &softs->qtablep-> \ 3785 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX], 3786 AAC_ADAP_HIGH_RESP_ENTRIES); 3787 3788 /* Init queue entries */ 3789 softs->qentries[AAC_HOST_NORM_CMD_Q] = 3790 &softs->qtablep->qt_HostNormCmdQueue[0]; 3791 softs->qentries[AAC_HOST_HIGH_CMD_Q] = 3792 &softs->qtablep->qt_HostHighCmdQueue[0]; 3793 softs->qentries[AAC_ADAP_NORM_CMD_Q] = 3794 &softs->qtablep->qt_AdapNormCmdQueue[0]; 3795 softs->qentries[AAC_ADAP_HIGH_CMD_Q] = 3796 &softs->qtablep->qt_AdapHighCmdQueue[0]; 3797 softs->qentries[AAC_HOST_NORM_RESP_Q] = 3798 &softs->qtablep->qt_HostNormRespQueue[0]; 3799 softs->qentries[AAC_HOST_HIGH_RESP_Q] = 3800 &softs->qtablep->qt_HostHighRespQueue[0]; 3801 softs->qentries[AAC_ADAP_NORM_RESP_Q] = 3802 &softs->qtablep->qt_AdapNormRespQueue[0]; 3803 softs->qentries[AAC_ADAP_HIGH_RESP_Q] = 3804 &softs->qtablep->qt_AdapHighRespQueue[0]; 3805 } 3806 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV); 3807 3808 /* Send init structure to the card */ 3809 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT, 3810 comm_space_phyaddr + \ 3811 offsetof(struct aac_comm_space, init_data), 3812 0, 0, 0, NULL) == AACERR) { 3813 AACDB_PRINT(softs, CE_WARN, 3814 "Cannot send init structure to adapter"); 3815 return (AACERR); 3816 } 3817 3818 return (AACOK); 3819 } 3820 3821 static uchar_t * 3822 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf) 3823 { 3824 (void) memset(buf, ' ', AAC_VENDOR_LEN); 3825 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name)); 3826 return (buf + AAC_VENDOR_LEN); 3827 } 3828 3829 static uchar_t * 3830 aac_product_id(struct aac_softstate *softs, uchar_t *buf) 3831 { 3832 (void) memset(buf, ' ', AAC_PRODUCT_LEN); 3833 bcopy(softs->product_name, buf, strlen(softs->product_name)); 3834 return (buf + AAC_PRODUCT_LEN); 3835 } 3836 3837 /* 3838 * Construct unit serial number from container uid 3839 */ 3840 static uchar_t * 3841 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf) 3842 { 3843 int i, d; 3844 uint32_t uid; 3845 3846 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD); 3847 3848 uid = softs->containers[tgt].uid; 3849 for (i = 7; i >= 0; i--) { 3850 d = uid & 0xf; 3851 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d; 3852 uid >>= 4; 3853 } 3854 return (buf + 8); 3855 } 3856 3857 /* 3858 * SPC-3 7.5 INQUIRY command implementation 3859 */ 3860 static void 3861 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt, 3862 union scsi_cdb *cdbp, struct buf *bp) 3863 { 3864 int tgt = pkt->pkt_address.a_target; 3865 char *b_addr = NULL; 3866 uchar_t page = cdbp->cdb_opaque[2]; 3867 3868 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) { 3869 /* Command Support Data is not supported */ 3870 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0); 3871 return; 3872 } 3873 3874 if (bp && bp->b_un.b_addr && bp->b_bcount) { 3875 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 3876 bp_mapin(bp); 3877 b_addr = bp->b_un.b_addr; 3878 } 3879 3880 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) { 3881 uchar_t *vpdp = (uchar_t *)b_addr; 3882 uchar_t *idp, *sp; 3883 3884 /* SPC-3 8.4 Vital product data parameters */ 3885 switch (page) { 3886 case 0x00: 3887 /* Supported VPD pages */ 3888 if (vpdp == NULL || 3889 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3)) 3890 return; 3891 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3892 vpdp[AAC_VPD_PAGE_CODE] = 0x00; 3893 vpdp[AAC_VPD_PAGE_LENGTH] = 3; 3894 3895 vpdp[AAC_VPD_PAGE_DATA] = 0x00; 3896 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80; 3897 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83; 3898 3899 pkt->pkt_state |= STATE_XFERRED_DATA; 3900 break; 3901 3902 case 0x80: 3903 /* Unit serial number page */ 3904 if (vpdp == NULL || 3905 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8)) 3906 return; 3907 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3908 vpdp[AAC_VPD_PAGE_CODE] = 0x80; 3909 vpdp[AAC_VPD_PAGE_LENGTH] = 8; 3910 3911 sp = &vpdp[AAC_VPD_PAGE_DATA]; 3912 (void) aac_lun_serialno(softs, tgt, sp); 3913 3914 pkt->pkt_state |= STATE_XFERRED_DATA; 3915 break; 3916 3917 case 0x83: 3918 /* Device identification page */ 3919 if (vpdp == NULL || 3920 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32)) 3921 return; 3922 bzero(vpdp, AAC_VPD_PAGE_LENGTH); 3923 vpdp[AAC_VPD_PAGE_CODE] = 0x83; 3924 3925 idp = &vpdp[AAC_VPD_PAGE_DATA]; 3926 bzero(idp, AAC_VPD_ID_LENGTH); 3927 idp[AAC_VPD_ID_CODESET] = 0x02; 3928 idp[AAC_VPD_ID_TYPE] = 0x01; 3929 3930 /* 3931 * SPC-3 Table 111 - Identifier type 3932 * One recommanded method of constructing the remainder 3933 * of identifier field is to concatenate the product 3934 * identification field from the standard INQUIRY data 3935 * field and the product serial number field from the 3936 * unit serial number page. 3937 */ 3938 sp = &idp[AAC_VPD_ID_DATA]; 3939 sp = aac_vendor_id(softs, sp); 3940 sp = aac_product_id(softs, sp); 3941 sp = aac_lun_serialno(softs, tgt, sp); 3942 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \ 3943 (uintptr_t)&idp[AAC_VPD_ID_DATA]; 3944 3945 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \ 3946 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA]; 3947 pkt->pkt_state |= STATE_XFERRED_DATA; 3948 break; 3949 3950 default: 3951 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3952 0x24, 0x00, 0); 3953 break; 3954 } 3955 } else { 3956 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr; 3957 size_t len = sizeof (struct scsi_inquiry); 3958 3959 if (page != 0) { 3960 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 3961 0x24, 0x00, 0); 3962 return; 3963 } 3964 if (inqp == NULL || bp->b_bcount < len) 3965 return; 3966 3967 bzero(inqp, len); 3968 inqp->inq_len = AAC_ADDITIONAL_LEN; 3969 inqp->inq_ansi = AAC_ANSI_VER; 3970 inqp->inq_rdf = AAC_RESP_DATA_FORMAT; 3971 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid); 3972 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid); 3973 bcopy("V1.0", inqp->inq_revision, 4); 3974 inqp->inq_cmdque = 1; /* enable tagged-queuing */ 3975 /* 3976 * For "sd-max-xfer-size" property which may impact performance 3977 * when IO threads increase. 3978 */ 3979 inqp->inq_wbus32 = 1; 3980 3981 pkt->pkt_state |= STATE_XFERRED_DATA; 3982 } 3983 } 3984 3985 /* 3986 * SPC-3 7.10 MODE SENSE command implementation 3987 */ 3988 static void 3989 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt, 3990 union scsi_cdb *cdbp, struct buf *bp, int capacity) 3991 { 3992 uchar_t pagecode; 3993 struct mode_header *headerp; 3994 struct mode_header_g1 *g1_headerp; 3995 unsigned int ncyl; 3996 caddr_t sense_data; 3997 caddr_t next_page; 3998 size_t sdata_size; 3999 size_t pages_size; 4000 int unsupport_page = 0; 4001 4002 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE || 4003 cdbp->scc_cmd == SCMD_MODE_SENSE_G1); 4004 4005 if (!(bp && bp->b_un.b_addr && bp->b_bcount)) 4006 return; 4007 4008 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4009 bp_mapin(bp); 4010 pkt->pkt_state |= STATE_XFERRED_DATA; 4011 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F; 4012 4013 /* calculate the size of needed buffer */ 4014 if (cdbp->scc_cmd == SCMD_MODE_SENSE) 4015 sdata_size = MODE_HEADER_LENGTH; 4016 else /* must be SCMD_MODE_SENSE_G1 */ 4017 sdata_size = MODE_HEADER_LENGTH_G1; 4018 4019 pages_size = 0; 4020 switch (pagecode) { 4021 case SD_MODE_SENSE_PAGE3_CODE: 4022 pages_size += sizeof (struct mode_format); 4023 break; 4024 4025 case SD_MODE_SENSE_PAGE4_CODE: 4026 pages_size += sizeof (struct mode_geometry); 4027 break; 4028 4029 case MODEPAGE_CTRL_MODE: 4030 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 4031 pages_size += sizeof (struct mode_control_scsi3); 4032 } else { 4033 unsupport_page = 1; 4034 } 4035 break; 4036 4037 case MODEPAGE_ALLPAGES: 4038 if (softs->flags & AAC_FLAGS_LBA_64BIT) { 4039 pages_size += sizeof (struct mode_format) + 4040 sizeof (struct mode_geometry) + 4041 sizeof (struct mode_control_scsi3); 4042 } else { 4043 pages_size += sizeof (struct mode_format) + 4044 sizeof (struct mode_geometry); 4045 } 4046 break; 4047 4048 default: 4049 /* unsupported pages */ 4050 unsupport_page = 1; 4051 } 4052 4053 /* allocate buffer to fill the send data */ 4054 sdata_size += pages_size; 4055 sense_data = kmem_zalloc(sdata_size, KM_SLEEP); 4056 4057 if (cdbp->scc_cmd == SCMD_MODE_SENSE) { 4058 headerp = (struct mode_header *)sense_data; 4059 headerp->length = MODE_HEADER_LENGTH + pages_size - 4060 sizeof (headerp->length); 4061 headerp->bdesc_length = 0; 4062 next_page = sense_data + sizeof (struct mode_header); 4063 } else { 4064 g1_headerp = (void *)sense_data; 4065 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size - 4066 sizeof (g1_headerp->length)); 4067 g1_headerp->bdesc_length = 0; 4068 next_page = sense_data + sizeof (struct mode_header_g1); 4069 } 4070 4071 if (unsupport_page) 4072 goto finish; 4073 4074 if (pagecode == SD_MODE_SENSE_PAGE3_CODE || 4075 pagecode == MODEPAGE_ALLPAGES) { 4076 /* SBC-3 7.1.3.3 Format device page */ 4077 struct mode_format *page3p; 4078 4079 page3p = (void *)next_page; 4080 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE; 4081 page3p->mode_page.length = sizeof (struct mode_format); 4082 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE); 4083 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK); 4084 4085 next_page += sizeof (struct mode_format); 4086 } 4087 4088 if (pagecode == SD_MODE_SENSE_PAGE4_CODE || 4089 pagecode == MODEPAGE_ALLPAGES) { 4090 /* SBC-3 7.1.3.8 Rigid disk device geometry page */ 4091 struct mode_geometry *page4p; 4092 4093 page4p = (void *)next_page; 4094 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE; 4095 page4p->mode_page.length = sizeof (struct mode_geometry); 4096 page4p->heads = AAC_NUMBER_OF_HEADS; 4097 page4p->rpm = BE_16(AAC_ROTATION_SPEED); 4098 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK); 4099 page4p->cyl_lb = ncyl & 0xff; 4100 page4p->cyl_mb = (ncyl >> 8) & 0xff; 4101 page4p->cyl_ub = (ncyl >> 16) & 0xff; 4102 4103 next_page += sizeof (struct mode_geometry); 4104 } 4105 4106 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) && 4107 softs->flags & AAC_FLAGS_LBA_64BIT) { 4108 /* 64-bit LBA need large sense data */ 4109 struct mode_control_scsi3 *mctl; 4110 4111 mctl = (void *)next_page; 4112 mctl->mode_page.code = MODEPAGE_CTRL_MODE; 4113 mctl->mode_page.length = 4114 sizeof (struct mode_control_scsi3) - 4115 sizeof (struct mode_page); 4116 mctl->d_sense = 1; 4117 } 4118 4119 finish: 4120 /* copyout the valid data. */ 4121 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount)); 4122 kmem_free(sense_data, sdata_size); 4123 } 4124 4125 static int 4126 aac_name_node(dev_info_t *dip, char *name, int len) 4127 { 4128 int tgt, lun; 4129 4130 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4131 DDI_PROP_DONTPASS, "target", -1); 4132 if (tgt == -1) 4133 return (DDI_FAILURE); 4134 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4135 DDI_PROP_DONTPASS, "lun", -1); 4136 if (lun == -1) 4137 return (DDI_FAILURE); 4138 4139 (void) snprintf(name, len, "%x,%x", tgt, lun); 4140 return (DDI_SUCCESS); 4141 } 4142 4143 /*ARGSUSED*/ 4144 static int 4145 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 4146 scsi_hba_tran_t *tran, struct scsi_device *sd) 4147 { 4148 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran); 4149 #if defined(DEBUG) || defined(__lock_lint) 4150 int ctl = ddi_get_instance(softs->devinfo_p); 4151 #endif 4152 uint16_t tgt = sd->sd_address.a_target; 4153 uint8_t lun = sd->sd_address.a_lun; 4154 struct aac_device *dvp; 4155 4156 DBCALLED(softs, 2); 4157 4158 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 4159 /* 4160 * If no persistent node exist, we don't allow .conf node 4161 * to be created. 4162 */ 4163 if (aac_find_child(softs, tgt, lun) != NULL) { 4164 if (ndi_merge_node(tgt_dip, aac_name_node) != 4165 DDI_SUCCESS) 4166 /* Create this .conf node */ 4167 return (DDI_SUCCESS); 4168 } 4169 return (DDI_FAILURE); 4170 } 4171 4172 /* 4173 * Only support container/phys. device that has been 4174 * detected and valid 4175 */ 4176 mutex_enter(&softs->io_lock); 4177 if (tgt >= AAC_MAX_DEV(softs)) { 4178 AACDB_PRINT_TRAN(softs, 4179 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun); 4180 mutex_exit(&softs->io_lock); 4181 return (DDI_FAILURE); 4182 } 4183 4184 if (tgt < AAC_MAX_LD) { 4185 dvp = (struct aac_device *)&softs->containers[tgt]; 4186 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) { 4187 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d", 4188 ctl, tgt, lun); 4189 mutex_exit(&softs->io_lock); 4190 return (DDI_FAILURE); 4191 } 4192 /* 4193 * Save the tgt_dip for the given target if one doesn't exist 4194 * already. Dip's for non-existance tgt's will be cleared in 4195 * tgt_free. 4196 */ 4197 if (softs->containers[tgt].dev.dip == NULL && 4198 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) 4199 softs->containers[tgt].dev.dip = tgt_dip; 4200 } else { 4201 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)]; 4202 /* 4203 * Save the tgt_dip for the given target if one doesn't exist 4204 * already. Dip's for non-existance tgt's will be cleared in 4205 * tgt_free. 4206 */ 4207 4208 if (softs->nondasds[AAC_PD(tgt)].dev.dip == NULL && 4209 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) 4210 softs->nondasds[AAC_PD(tgt)].dev.dip = tgt_dip; 4211 } 4212 4213 if (softs->flags & AAC_FLAGS_BRKUP) { 4214 if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip, 4215 "buf_break", 1) != DDI_PROP_SUCCESS) { 4216 cmn_err(CE_CONT, "unable to create " 4217 "property for t%dL%d (buf_break)", tgt, lun); 4218 } 4219 } 4220 4221 AACDB_PRINT(softs, CE_NOTE, 4222 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun, 4223 (dvp->type == AAC_DEV_PD) ? "pd" : "ld"); 4224 mutex_exit(&softs->io_lock); 4225 return (DDI_SUCCESS); 4226 } 4227 4228 static void 4229 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 4230 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 4231 { 4232 #ifndef __lock_lint 4233 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran)) 4234 #endif 4235 4236 struct aac_softstate *softs = SD2AAC(sd); 4237 int tgt = sd->sd_address.a_target; 4238 4239 mutex_enter(&softs->io_lock); 4240 if (tgt < AAC_MAX_LD) { 4241 if (softs->containers[tgt].dev.dip == tgt_dip) 4242 softs->containers[tgt].dev.dip = NULL; 4243 } else { 4244 if (softs->nondasds[AAC_PD(tgt)].dev.dip == tgt_dip) 4245 softs->nondasds[AAC_PD(tgt)].dev.dip = NULL; 4246 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID; 4247 } 4248 mutex_exit(&softs->io_lock); 4249 } 4250 4251 /* 4252 * Check if the firmware is Up And Running. If it is in the Kernel Panic 4253 * state, (BlinkLED code + 1) is returned. 4254 * 0 -- firmware up and running 4255 * -1 -- firmware dead 4256 * >0 -- firmware kernel panic 4257 */ 4258 static int 4259 aac_check_adapter_health(struct aac_softstate *softs) 4260 { 4261 int rval; 4262 4263 rval = PCI_MEM_GET32(softs, AAC_OMR0); 4264 4265 if (rval & AAC_KERNEL_UP_AND_RUNNING) { 4266 rval = 0; 4267 } else if (rval & AAC_KERNEL_PANIC) { 4268 cmn_err(CE_WARN, "firmware panic"); 4269 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */ 4270 } else { 4271 cmn_err(CE_WARN, "firmware dead"); 4272 rval = -1; 4273 } 4274 return (rval); 4275 } 4276 4277 static void 4278 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp, 4279 uchar_t reason) 4280 { 4281 acp->flags |= AAC_CMD_ABORT; 4282 4283 if (acp->pkt) { 4284 if (acp->slotp) { /* outstanding cmd */ 4285 acp->pkt->pkt_state |= STATE_GOT_STATUS; 4286 } 4287 4288 switch (reason) { 4289 case CMD_TIMEOUT: 4290 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p", 4291 acp); 4292 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT, 4293 STAT_TIMEOUT | STAT_BUS_RESET); 4294 break; 4295 case CMD_RESET: 4296 /* aac support only RESET_ALL */ 4297 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp); 4298 aac_set_pkt_reason(softs, acp, CMD_RESET, 4299 STAT_BUS_RESET); 4300 break; 4301 case CMD_ABORTED: 4302 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p", 4303 acp); 4304 aac_set_pkt_reason(softs, acp, CMD_ABORTED, 4305 STAT_ABORTED); 4306 break; 4307 } 4308 } 4309 aac_end_io(softs, acp); 4310 } 4311 4312 /* 4313 * Abort all the pending commands of type iocmd or just the command pkt 4314 * corresponding to pkt 4315 */ 4316 static void 4317 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt, 4318 int reason) 4319 { 4320 struct aac_cmd *ac_arg, *acp; 4321 int i; 4322 4323 if (pkt == NULL) { 4324 ac_arg = NULL; 4325 } else { 4326 ac_arg = PKT2AC(pkt); 4327 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ? 4328 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC; 4329 } 4330 4331 /* 4332 * a) outstanding commands on the controller 4333 * Note: should abort outstanding commands only after one 4334 * IOP reset has been done. 4335 */ 4336 if (iocmd & AAC_IOCMD_OUTSTANDING) { 4337 struct aac_cmd *acp; 4338 4339 for (i = 0; i < AAC_MAX_LD; i++) { 4340 if (AAC_DEV_IS_VALID(&softs->containers[i].dev)) 4341 softs->containers[i].reset = 1; 4342 } 4343 while ((acp = softs->q_busy.q_head) != NULL) 4344 aac_abort_iocmd(softs, acp, reason); 4345 } 4346 4347 /* b) commands in the waiting queues */ 4348 for (i = 0; i < AAC_CMDQ_NUM; i++) { 4349 if (iocmd & (1 << i)) { 4350 if (ac_arg) { 4351 aac_abort_iocmd(softs, ac_arg, reason); 4352 } else { 4353 while ((acp = softs->q_wait[i].q_head) != NULL) 4354 aac_abort_iocmd(softs, acp, reason); 4355 } 4356 } 4357 } 4358 } 4359 4360 /* 4361 * The draining thread is shared among quiesce threads. It terminates 4362 * when the adapter is quiesced or stopped by aac_stop_drain(). 4363 */ 4364 static void 4365 aac_check_drain(void *arg) 4366 { 4367 struct aac_softstate *softs = arg; 4368 4369 mutex_enter(&softs->io_lock); 4370 if (softs->ndrains) { 4371 softs->drain_timeid = 0; 4372 /* 4373 * If both ASYNC and SYNC bus throttle are held, 4374 * wake up threads only when both are drained out. 4375 */ 4376 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 || 4377 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) && 4378 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 || 4379 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0)) 4380 cv_broadcast(&softs->drain_cv); 4381 else 4382 softs->drain_timeid = timeout(aac_check_drain, softs, 4383 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4384 } 4385 mutex_exit(&softs->io_lock); 4386 } 4387 4388 /* 4389 * If not draining the outstanding cmds, drain them. Otherwise, 4390 * only update ndrains. 4391 */ 4392 static void 4393 aac_start_drain(struct aac_softstate *softs) 4394 { 4395 if (softs->ndrains == 0) { 4396 ASSERT(softs->drain_timeid == 0); 4397 softs->drain_timeid = timeout(aac_check_drain, softs, 4398 AAC_QUIESCE_TICK * drv_usectohz(1000000)); 4399 } 4400 softs->ndrains++; 4401 } 4402 4403 /* 4404 * Stop the draining thread when no other threads use it any longer. 4405 * Side effect: io_lock may be released in the middle. 4406 */ 4407 static void 4408 aac_stop_drain(struct aac_softstate *softs) 4409 { 4410 softs->ndrains--; 4411 if (softs->ndrains == 0) { 4412 if (softs->drain_timeid != 0) { 4413 timeout_id_t tid = softs->drain_timeid; 4414 4415 softs->drain_timeid = 0; 4416 mutex_exit(&softs->io_lock); 4417 (void) untimeout(tid); 4418 mutex_enter(&softs->io_lock); 4419 } 4420 } 4421 } 4422 4423 /* 4424 * The following function comes from Adaptec: 4425 * 4426 * Once do an IOP reset, basically the driver have to re-initialize the card 4427 * as if up from a cold boot, and the driver is responsible for any IO that 4428 * is outstanding to the adapter at the time of the IOP RESET. And prepare 4429 * for IOP RESET by making the init code modular with the ability to call it 4430 * from multiple places. 4431 */ 4432 static int 4433 aac_reset_adapter(struct aac_softstate *softs) 4434 { 4435 int health; 4436 uint32_t status; 4437 int rval = AAC_IOP_RESET_FAILED; 4438 4439 DBCALLED(softs, 1); 4440 4441 ASSERT(softs->state & AAC_STATE_RESET); 4442 4443 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0); 4444 /* Disable interrupt */ 4445 AAC_DISABLE_INTR(softs); 4446 4447 health = aac_check_adapter_health(softs); 4448 if (health == -1) { 4449 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 4450 goto finish; 4451 } 4452 if (health == 0) /* flush drives if possible */ 4453 (void) aac_shutdown(softs); 4454 4455 /* Execute IOP reset */ 4456 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0, 4457 &status)) != AACOK) { 4458 ddi_acc_handle_t acc; 4459 struct aac_fib *fibp; 4460 struct aac_pause_command *pc; 4461 4462 if ((status & 0xf) == 0xf) { 4463 uint32_t wait_count; 4464 4465 /* 4466 * Sunrise Lake has dual cores and we must drag the 4467 * other core with us to reset simultaneously. There 4468 * are 2 bits in the Inbound Reset Control and Status 4469 * Register (offset 0x38) of the Sunrise Lake to reset 4470 * the chip without clearing out the PCI configuration 4471 * info (COMMAND & BARS). 4472 */ 4473 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST); 4474 4475 /* 4476 * We need to wait for 5 seconds before accessing the MU 4477 * again 10000 * 100us = 1000,000us = 1000ms = 1s 4478 */ 4479 wait_count = 5 * 10000; 4480 while (wait_count) { 4481 drv_usecwait(100); /* delay 100 microseconds */ 4482 wait_count--; 4483 } 4484 } else { 4485 if (status == SRB_STATUS_INVALID_REQUEST) 4486 cmn_err(CE_WARN, "!IOP_RESET not supported"); 4487 else /* probably timeout */ 4488 cmn_err(CE_WARN, "!IOP_RESET failed"); 4489 4490 /* Unwind aac_shutdown() */ 4491 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 4492 acc = softs->sync_ac.slotp->fib_acc_handle; 4493 4494 fibp = softs->sync_ac.slotp->fibp; 4495 pc = (struct aac_pause_command *)&fibp->data[0]; 4496 4497 bzero(pc, sizeof (*pc)); 4498 ddi_put32(acc, &pc->Command, VM_ContainerConfig); 4499 ddi_put32(acc, &pc->Type, CT_PAUSE_IO); 4500 ddi_put32(acc, &pc->Timeout, 1); 4501 ddi_put32(acc, &pc->Min, 1); 4502 ddi_put32(acc, &pc->NoRescan, 1); 4503 4504 (void) aac_sync_fib(softs, ContainerCommand, 4505 AAC_FIB_SIZEOF(struct aac_pause_command)); 4506 aac_sync_fib_slot_release(softs, &softs->sync_ac); 4507 4508 if (aac_check_adapter_health(softs) != 0) 4509 ddi_fm_service_impact(softs->devinfo_p, 4510 DDI_SERVICE_LOST); 4511 else 4512 /* 4513 * IOP reset not supported or IOP not reseted 4514 */ 4515 rval = AAC_IOP_RESET_ABNORMAL; 4516 goto finish; 4517 } 4518 } 4519 4520 /* 4521 * Re-read and renegotiate the FIB parameters, as one of the actions 4522 * that can result from an IOP reset is the running of a new firmware 4523 * image. 4524 */ 4525 if (aac_common_attach(softs) != AACOK) 4526 goto finish; 4527 4528 rval = AAC_IOP_RESET_SUCCEED; 4529 4530 finish: 4531 AAC_ENABLE_INTR(softs); 4532 return (rval); 4533 } 4534 4535 static void 4536 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q, 4537 int throttle) 4538 { 4539 /* 4540 * If the bus is draining/quiesced, no changes to the throttles 4541 * are allowed. All throttles should have been set to 0. 4542 */ 4543 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains) 4544 return; 4545 dvp->throttle[q] = throttle; 4546 } 4547 4548 static void 4549 aac_hold_bus(struct aac_softstate *softs, int iocmds) 4550 { 4551 int i, q; 4552 4553 /* Hold bus by holding every device on the bus */ 4554 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4555 if (iocmds & (1 << q)) { 4556 softs->bus_throttle[q] = 0; 4557 for (i = 0; i < AAC_MAX_LD; i++) 4558 aac_set_throttle(softs, 4559 &softs->containers[i].dev, q, 0); 4560 for (i = 0; i < AAC_MAX_PD(softs); i++) 4561 aac_set_throttle(softs, 4562 &softs->nondasds[i].dev, q, 0); 4563 } 4564 } 4565 } 4566 4567 static void 4568 aac_unhold_bus(struct aac_softstate *softs, int iocmds) 4569 { 4570 int i, q, max_throttle; 4571 4572 for (q = 0; q < AAC_CMDQ_NUM; q++) { 4573 if (iocmds & (1 << q)) { 4574 /* 4575 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been 4576 * quiesced or being drained by possibly some quiesce 4577 * threads. 4578 */ 4579 if (q == AAC_CMDQ_ASYNC && ((softs->state & 4580 AAC_STATE_QUIESCED) || softs->ndrains)) 4581 continue; 4582 if (q == AAC_CMDQ_ASYNC) 4583 max_throttle = softs->total_slots - 4584 AAC_MGT_SLOT_NUM; 4585 else 4586 max_throttle = softs->total_slots - 1; 4587 softs->bus_throttle[q] = max_throttle; 4588 for (i = 0; i < AAC_MAX_LD; i++) 4589 aac_set_throttle(softs, 4590 &softs->containers[i].dev, 4591 q, max_throttle); 4592 for (i = 0; i < AAC_MAX_PD(softs); i++) 4593 aac_set_throttle(softs, &softs->nondasds[i].dev, 4594 q, max_throttle); 4595 } 4596 } 4597 } 4598 4599 static int 4600 aac_do_reset(struct aac_softstate *softs) 4601 { 4602 int health; 4603 int rval; 4604 4605 softs->state |= AAC_STATE_RESET; 4606 health = aac_check_adapter_health(softs); 4607 4608 /* 4609 * Hold off new io commands and wait all outstanding io 4610 * commands to complete. 4611 */ 4612 if (health == 0) { 4613 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC]; 4614 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC]; 4615 4616 if (sync_cmds == 0 && async_cmds == 0) { 4617 rval = AAC_IOP_RESET_SUCCEED; 4618 goto finish; 4619 } 4620 /* 4621 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds 4622 * to complete the outstanding io commands 4623 */ 4624 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10; 4625 int (*intr_handler)(struct aac_softstate *); 4626 4627 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4628 /* 4629 * Poll the adapter by ourselves in case interrupt is disabled 4630 * and to avoid releasing the io_lock. 4631 */ 4632 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 4633 aac_process_intr_new : aac_process_intr_old; 4634 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] || 4635 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) { 4636 drv_usecwait(100); 4637 (void) intr_handler(softs); 4638 timeout--; 4639 } 4640 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC); 4641 4642 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 && 4643 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) { 4644 /* Cmds drained out */ 4645 rval = AAC_IOP_RESET_SUCCEED; 4646 goto finish; 4647 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds || 4648 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) { 4649 /* Cmds not drained out, adapter overloaded */ 4650 rval = AAC_IOP_RESET_ABNORMAL; 4651 goto finish; 4652 } 4653 } 4654 4655 /* 4656 * If a longer waiting time still can't drain any outstanding io 4657 * commands, do IOP reset. 4658 */ 4659 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED) 4660 softs->state |= AAC_STATE_DEAD; 4661 4662 finish: 4663 softs->state &= ~AAC_STATE_RESET; 4664 return (rval); 4665 } 4666 4667 static int 4668 aac_tran_reset(struct scsi_address *ap, int level) 4669 { 4670 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4671 int rval; 4672 4673 DBCALLED(softs, 1); 4674 4675 if (level != RESET_ALL) { 4676 cmn_err(CE_NOTE, "!reset target/lun not supported"); 4677 return (0); 4678 } 4679 4680 mutex_enter(&softs->io_lock); 4681 switch (rval = aac_do_reset(softs)) { 4682 case AAC_IOP_RESET_SUCCEED: 4683 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC, 4684 NULL, CMD_RESET); 4685 aac_start_waiting_io(softs); 4686 break; 4687 case AAC_IOP_RESET_FAILED: 4688 /* Abort IOCTL cmds when adapter is dead */ 4689 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET); 4690 break; 4691 case AAC_IOP_RESET_ABNORMAL: 4692 aac_start_waiting_io(softs); 4693 } 4694 mutex_exit(&softs->io_lock); 4695 4696 aac_drain_comp_q(softs); 4697 return (rval == 0); 4698 } 4699 4700 static int 4701 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 4702 { 4703 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4704 4705 DBCALLED(softs, 1); 4706 4707 mutex_enter(&softs->io_lock); 4708 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED); 4709 mutex_exit(&softs->io_lock); 4710 4711 aac_drain_comp_q(softs); 4712 return (1); 4713 } 4714 4715 void 4716 aac_free_dmamap(struct aac_cmd *acp) 4717 { 4718 /* Free dma mapping */ 4719 if (acp->flags & AAC_CMD_DMA_VALID) { 4720 ASSERT(acp->buf_dma_handle); 4721 (void) ddi_dma_unbind_handle(acp->buf_dma_handle); 4722 acp->flags &= ~AAC_CMD_DMA_VALID; 4723 } 4724 4725 if (acp->abp != NULL) { /* free non-aligned buf DMA */ 4726 ASSERT(acp->buf_dma_handle); 4727 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp) 4728 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr, 4729 (uint8_t *)acp->abp, acp->bp->b_bcount, 4730 DDI_DEV_AUTOINCR); 4731 ddi_dma_mem_free(&acp->abh); 4732 acp->abp = NULL; 4733 } 4734 4735 if (acp->buf_dma_handle) { 4736 ddi_dma_free_handle(&acp->buf_dma_handle); 4737 acp->buf_dma_handle = NULL; 4738 } 4739 } 4740 4741 static void 4742 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 4743 { 4744 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported", 4745 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd); 4746 aac_free_dmamap(acp); 4747 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0); 4748 aac_soft_callback(softs, acp); 4749 } 4750 4751 /* 4752 * Handle command to logical device 4753 */ 4754 static int 4755 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp) 4756 { 4757 struct aac_container *dvp; 4758 struct scsi_pkt *pkt; 4759 union scsi_cdb *cdbp; 4760 struct buf *bp; 4761 int rval; 4762 4763 dvp = (struct aac_container *)acp->dvp; 4764 pkt = acp->pkt; 4765 cdbp = (void *)pkt->pkt_cdbp; 4766 bp = acp->bp; 4767 4768 switch (cdbp->scc_cmd) { 4769 case SCMD_INQUIRY: /* inquiry */ 4770 aac_free_dmamap(acp); 4771 aac_inquiry(softs, pkt, cdbp, bp); 4772 aac_soft_callback(softs, acp); 4773 rval = TRAN_ACCEPT; 4774 break; 4775 4776 case SCMD_READ_CAPACITY: /* read capacity */ 4777 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4778 struct scsi_capacity cap; 4779 uint64_t last_lba; 4780 4781 /* check 64-bit LBA */ 4782 last_lba = dvp->size - 1; 4783 if (last_lba > 0xffffffffull) { 4784 cap.capacity = 0xfffffffful; 4785 } else { 4786 cap.capacity = BE_32(last_lba); 4787 } 4788 cap.lbasize = BE_32(AAC_SECTOR_SIZE); 4789 4790 aac_free_dmamap(acp); 4791 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4792 bp_mapin(bp); 4793 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8)); 4794 pkt->pkt_state |= STATE_XFERRED_DATA; 4795 } 4796 aac_soft_callback(softs, acp); 4797 rval = TRAN_ACCEPT; 4798 break; 4799 4800 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */ 4801 /* Check if containers need 64-bit LBA support */ 4802 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) { 4803 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4804 struct scsi_capacity_16 cap16; 4805 int cap_len = sizeof (struct scsi_capacity_16); 4806 4807 bzero(&cap16, cap_len); 4808 cap16.sc_capacity = BE_64(dvp->size - 1); 4809 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE); 4810 4811 aac_free_dmamap(acp); 4812 if (bp->b_flags & (B_PHYS | B_PAGEIO)) 4813 bp_mapin(bp); 4814 bcopy(&cap16, bp->b_un.b_addr, 4815 min(bp->b_bcount, cap_len)); 4816 pkt->pkt_state |= STATE_XFERRED_DATA; 4817 } 4818 aac_soft_callback(softs, acp); 4819 } else { 4820 aac_unknown_scmd(softs, acp); 4821 } 4822 rval = TRAN_ACCEPT; 4823 break; 4824 4825 case SCMD_READ_G4: /* read_16 */ 4826 case SCMD_WRITE_G4: /* write_16 */ 4827 if (softs->flags & AAC_FLAGS_RAW_IO) { 4828 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */ 4829 acp->blkno = ((uint64_t) \ 4830 GETG4ADDR(cdbp) << 32) | \ 4831 (uint32_t)GETG4ADDRTL(cdbp); 4832 goto do_io; 4833 } 4834 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported"); 4835 aac_unknown_scmd(softs, acp); 4836 rval = TRAN_ACCEPT; 4837 break; 4838 4839 case SCMD_READ: /* read_6 */ 4840 case SCMD_WRITE: /* write_6 */ 4841 acp->blkno = GETG0ADDR(cdbp); 4842 goto do_io; 4843 4844 case SCMD_READ_G5: /* read_12 */ 4845 case SCMD_WRITE_G5: /* write_12 */ 4846 acp->blkno = GETG5ADDR(cdbp); 4847 goto do_io; 4848 4849 case SCMD_READ_G1: /* read_10 */ 4850 case SCMD_WRITE_G1: /* write_10 */ 4851 acp->blkno = (uint32_t)GETG1ADDR(cdbp); 4852 do_io: 4853 if (acp->flags & AAC_CMD_DMA_VALID) { 4854 uint64_t cnt_size = dvp->size; 4855 4856 /* 4857 * If LBA > array size AND rawio, the 4858 * adapter may hang. So check it before 4859 * sending. 4860 * NOTE: (blkno + blkcnt) may overflow 4861 */ 4862 if ((acp->blkno < cnt_size) && 4863 ((acp->blkno + acp->bcount / 4864 AAC_BLK_SIZE) <= cnt_size)) { 4865 rval = aac_do_io(softs, acp); 4866 } else { 4867 /* 4868 * Request exceeds the capacity of disk, 4869 * set error block number to last LBA 4870 * + 1. 4871 */ 4872 aac_set_arq_data(pkt, 4873 KEY_ILLEGAL_REQUEST, 0x21, 4874 0x00, cnt_size); 4875 aac_soft_callback(softs, acp); 4876 rval = TRAN_ACCEPT; 4877 } 4878 } else if (acp->bcount == 0) { 4879 /* For 0 length IO, just return ok */ 4880 aac_soft_callback(softs, acp); 4881 rval = TRAN_ACCEPT; 4882 } else { 4883 rval = TRAN_BADPKT; 4884 } 4885 break; 4886 4887 case SCMD_MODE_SENSE: /* mode_sense_6 */ 4888 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */ 4889 int capacity; 4890 4891 aac_free_dmamap(acp); 4892 if (dvp->size > 0xffffffffull) 4893 capacity = 0xfffffffful; /* 64-bit LBA */ 4894 else 4895 capacity = dvp->size; 4896 aac_mode_sense(softs, pkt, cdbp, bp, capacity); 4897 aac_soft_callback(softs, acp); 4898 rval = TRAN_ACCEPT; 4899 break; 4900 } 4901 4902 case SCMD_START_STOP: 4903 if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { 4904 acp->aac_cmd_fib = aac_cmd_fib_startstop; 4905 acp->ac_comp = aac_startstop_complete; 4906 rval = aac_do_io(softs, acp); 4907 break; 4908 } 4909 /* FALLTHRU */ 4910 case SCMD_TEST_UNIT_READY: 4911 case SCMD_REQUEST_SENSE: 4912 case SCMD_FORMAT: 4913 aac_free_dmamap(acp); 4914 if (bp && bp->b_un.b_addr && bp->b_bcount) { 4915 if (acp->flags & AAC_CMD_BUF_READ) { 4916 if (bp->b_flags & (B_PHYS|B_PAGEIO)) 4917 bp_mapin(bp); 4918 bzero(bp->b_un.b_addr, bp->b_bcount); 4919 } 4920 pkt->pkt_state |= STATE_XFERRED_DATA; 4921 } 4922 aac_soft_callback(softs, acp); 4923 rval = TRAN_ACCEPT; 4924 break; 4925 4926 case SCMD_SYNCHRONIZE_CACHE: 4927 acp->flags |= AAC_CMD_NTAG; 4928 acp->aac_cmd_fib = aac_cmd_fib_sync; 4929 acp->ac_comp = aac_synccache_complete; 4930 rval = aac_do_io(softs, acp); 4931 break; 4932 4933 case SCMD_DOORLOCK: 4934 aac_free_dmamap(acp); 4935 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0; 4936 aac_soft_callback(softs, acp); 4937 rval = TRAN_ACCEPT; 4938 break; 4939 4940 default: /* unknown command */ 4941 aac_unknown_scmd(softs, acp); 4942 rval = TRAN_ACCEPT; 4943 break; 4944 } 4945 4946 return (rval); 4947 } 4948 4949 static int 4950 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) 4951 { 4952 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 4953 struct aac_cmd *acp = PKT2AC(pkt); 4954 struct aac_device *dvp = acp->dvp; 4955 int rval; 4956 4957 DBCALLED(softs, 2); 4958 4959 /* 4960 * Reinitialize some fields of ac and pkt; the packet may 4961 * have been resubmitted 4962 */ 4963 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \ 4964 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID; 4965 acp->timeout = acp->pkt->pkt_time; 4966 if (pkt->pkt_flags & FLAG_NOINTR) 4967 acp->flags |= AAC_CMD_NO_INTR; 4968 #ifdef DEBUG 4969 acp->fib_flags = AACDB_FLAGS_FIB_SCMD; 4970 #endif 4971 pkt->pkt_reason = CMD_CMPLT; 4972 pkt->pkt_state = 0; 4973 pkt->pkt_statistics = 0; 4974 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 4975 4976 if (acp->flags & AAC_CMD_DMA_VALID) { 4977 pkt->pkt_resid = acp->bcount; 4978 /* Consistent packets need to be sync'ed first */ 4979 if ((acp->flags & AAC_CMD_CONSISTENT) && 4980 (acp->flags & AAC_CMD_BUF_WRITE)) 4981 if (aac_dma_sync_ac(acp) != AACOK) { 4982 ddi_fm_service_impact(softs->devinfo_p, 4983 DDI_SERVICE_UNAFFECTED); 4984 return (TRAN_BADPKT); 4985 } 4986 } else { 4987 pkt->pkt_resid = 0; 4988 } 4989 4990 mutex_enter(&softs->io_lock); 4991 AACDB_PRINT_SCMD(softs, acp); 4992 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) && 4993 !(softs->state & AAC_STATE_DEAD)) { 4994 if (dvp->type == AAC_DEV_LD) { 4995 if (ap->a_lun == 0) 4996 rval = aac_tran_start_ld(softs, acp); 4997 else 4998 goto error; 4999 } else { 5000 rval = aac_do_io(softs, acp); 5001 } 5002 } else { 5003 error: 5004 #ifdef DEBUG 5005 if (!(softs->state & AAC_STATE_DEAD)) { 5006 AACDB_PRINT_TRAN(softs, 5007 "Cannot send cmd to target t%dL%d: %s", 5008 ap->a_target, ap->a_lun, 5009 "target invalid"); 5010 } else { 5011 AACDB_PRINT(softs, CE_WARN, 5012 "Cannot send cmd to target t%dL%d: %s", 5013 ap->a_target, ap->a_lun, 5014 "adapter dead"); 5015 } 5016 #endif 5017 rval = TRAN_FATAL_ERROR; 5018 } 5019 mutex_exit(&softs->io_lock); 5020 return (rval); 5021 } 5022 5023 static int 5024 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom) 5025 { 5026 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5027 struct aac_device *dvp; 5028 int rval; 5029 5030 DBCALLED(softs, 2); 5031 5032 /* We don't allow inquiring about capabilities for other targets */ 5033 if (cap == NULL || whom == 0) { 5034 AACDB_PRINT(softs, CE_WARN, 5035 "GetCap> %s not supported: whom=%d", cap, whom); 5036 return (-1); 5037 } 5038 5039 mutex_enter(&softs->io_lock); 5040 dvp = AAC_DEV(softs, ap->a_target); 5041 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 5042 mutex_exit(&softs->io_lock); 5043 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap", 5044 ap->a_target, ap->a_lun); 5045 return (-1); 5046 } 5047 5048 switch (scsi_hba_lookup_capstr(cap)) { 5049 case SCSI_CAP_ARQ: /* auto request sense */ 5050 rval = 1; 5051 break; 5052 case SCSI_CAP_UNTAGGED_QING: 5053 case SCSI_CAP_TAGGED_QING: 5054 rval = 1; 5055 break; 5056 case SCSI_CAP_DMA_MAX: 5057 rval = softs->dma_max; 5058 break; 5059 default: 5060 rval = -1; 5061 break; 5062 } 5063 mutex_exit(&softs->io_lock); 5064 5065 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d", 5066 cap, ap->a_target, ap->a_lun, rval); 5067 return (rval); 5068 } 5069 5070 /*ARGSUSED*/ 5071 static int 5072 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 5073 { 5074 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5075 struct aac_device *dvp; 5076 int rval; 5077 5078 DBCALLED(softs, 2); 5079 5080 /* We don't allow inquiring about capabilities for other targets */ 5081 if (cap == NULL || whom == 0) { 5082 AACDB_PRINT(softs, CE_WARN, 5083 "SetCap> %s not supported: whom=%d", cap, whom); 5084 return (-1); 5085 } 5086 5087 mutex_enter(&softs->io_lock); 5088 dvp = AAC_DEV(softs, ap->a_target); 5089 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) { 5090 mutex_exit(&softs->io_lock); 5091 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap", 5092 ap->a_target, ap->a_lun); 5093 return (-1); 5094 } 5095 5096 switch (scsi_hba_lookup_capstr(cap)) { 5097 case SCSI_CAP_ARQ: 5098 /* Force auto request sense */ 5099 rval = (value == 1) ? 1 : 0; 5100 break; 5101 case SCSI_CAP_UNTAGGED_QING: 5102 case SCSI_CAP_TAGGED_QING: 5103 rval = (value == 1) ? 1 : 0; 5104 break; 5105 default: 5106 rval = -1; 5107 break; 5108 } 5109 mutex_exit(&softs->io_lock); 5110 5111 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d", 5112 cap, ap->a_target, ap->a_lun, value, rval); 5113 return (rval); 5114 } 5115 5116 static void 5117 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5118 { 5119 struct aac_cmd *acp = PKT2AC(pkt); 5120 5121 DBCALLED(NULL, 2); 5122 5123 if (acp->sgt) { 5124 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 5125 acp->left_cookien); 5126 } 5127 aac_free_dmamap(acp); 5128 ASSERT(acp->slotp == NULL); 5129 scsi_hba_pkt_free(ap, pkt); 5130 } 5131 5132 int 5133 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp, 5134 struct buf *bp, int flags, int (*cb)(), caddr_t arg) 5135 { 5136 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP; 5137 uint_t oldcookiec; 5138 int bioerr; 5139 int rval; 5140 5141 oldcookiec = acp->left_cookien; 5142 5143 /* Move window to build s/g map */ 5144 if (acp->total_nwin > 0) { 5145 if (++acp->cur_win < acp->total_nwin) { 5146 off_t off; 5147 size_t len; 5148 5149 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win, 5150 &off, &len, &acp->cookie, &acp->left_cookien); 5151 if (rval == DDI_SUCCESS) 5152 goto get_dma_cookies; 5153 AACDB_PRINT(softs, CE_WARN, 5154 "ddi_dma_getwin() fail %d", rval); 5155 return (AACERR); 5156 } 5157 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer"); 5158 return (AACERR); 5159 } 5160 5161 /* We need to transfer data, so we alloc DMA resources for this pkt */ 5162 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) { 5163 uint_t dma_flags = 0; 5164 struct aac_sge *sge; 5165 5166 /* 5167 * We will still use this point to fake some 5168 * infomation in tran_start 5169 */ 5170 acp->bp = bp; 5171 5172 /* Set dma flags */ 5173 if (BUF_IS_READ(bp)) { 5174 dma_flags |= DDI_DMA_READ; 5175 acp->flags |= AAC_CMD_BUF_READ; 5176 } else { 5177 dma_flags |= DDI_DMA_WRITE; 5178 acp->flags |= AAC_CMD_BUF_WRITE; 5179 } 5180 if (flags & PKT_CONSISTENT) 5181 dma_flags |= DDI_DMA_CONSISTENT; 5182 if (flags & PKT_DMA_PARTIAL) 5183 dma_flags |= DDI_DMA_PARTIAL; 5184 5185 /* Alloc buf dma handle */ 5186 if (!acp->buf_dma_handle) { 5187 rval = ddi_dma_alloc_handle(softs->devinfo_p, 5188 &softs->buf_dma_attr, cb, arg, 5189 &acp->buf_dma_handle); 5190 if (rval != DDI_SUCCESS) { 5191 AACDB_PRINT(softs, CE_WARN, 5192 "Can't allocate DMA handle, errno=%d", 5193 rval); 5194 goto error_out; 5195 } 5196 } 5197 5198 /* Bind buf */ 5199 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) { 5200 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle, 5201 bp, dma_flags, cb, arg, &acp->cookie, 5202 &acp->left_cookien); 5203 } else { 5204 size_t bufsz; 5205 5206 AACDB_PRINT_TRAN(softs, 5207 "non-aligned buffer: addr=0x%p, cnt=%lu", 5208 (void *)bp->b_un.b_addr, bp->b_bcount); 5209 if (bp->b_flags & (B_PAGEIO|B_PHYS)) 5210 bp_mapin(bp); 5211 5212 rval = ddi_dma_mem_alloc(acp->buf_dma_handle, 5213 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN), 5214 &softs->acc_attr, DDI_DMA_STREAMING, 5215 cb, arg, &acp->abp, &bufsz, &acp->abh); 5216 5217 if (rval != DDI_SUCCESS) { 5218 AACDB_PRINT(softs, CE_NOTE, 5219 "Cannot alloc DMA to non-aligned buf"); 5220 bioerr = 0; 5221 goto error_out; 5222 } 5223 5224 if (acp->flags & AAC_CMD_BUF_WRITE) 5225 ddi_rep_put8(acp->abh, 5226 (uint8_t *)bp->b_un.b_addr, 5227 (uint8_t *)acp->abp, bp->b_bcount, 5228 DDI_DEV_AUTOINCR); 5229 5230 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle, 5231 NULL, acp->abp, bufsz, dma_flags, cb, arg, 5232 &acp->cookie, &acp->left_cookien); 5233 } 5234 5235 switch (rval) { 5236 case DDI_DMA_PARTIAL_MAP: 5237 if (ddi_dma_numwin(acp->buf_dma_handle, 5238 &acp->total_nwin) == DDI_FAILURE) { 5239 AACDB_PRINT(softs, CE_WARN, 5240 "Cannot get number of DMA windows"); 5241 bioerr = 0; 5242 goto error_out; 5243 } 5244 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5245 acp->left_cookien); 5246 acp->cur_win = 0; 5247 break; 5248 5249 case DDI_DMA_MAPPED: 5250 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)", 5251 acp->left_cookien); 5252 acp->cur_win = 0; 5253 acp->total_nwin = 1; 5254 break; 5255 5256 case DDI_DMA_NORESOURCES: 5257 bioerr = 0; 5258 AACDB_PRINT(softs, CE_WARN, 5259 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES"); 5260 goto error_out; 5261 case DDI_DMA_BADATTR: 5262 case DDI_DMA_NOMAPPING: 5263 bioerr = EFAULT; 5264 AACDB_PRINT(softs, CE_WARN, 5265 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING"); 5266 goto error_out; 5267 case DDI_DMA_TOOBIG: 5268 bioerr = EINVAL; 5269 AACDB_PRINT(softs, CE_WARN, 5270 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)", 5271 bp->b_bcount); 5272 goto error_out; 5273 default: 5274 bioerr = EINVAL; 5275 AACDB_PRINT(softs, CE_WARN, 5276 "Cannot bind buf for DMA: %d", rval); 5277 goto error_out; 5278 } 5279 acp->flags |= AAC_CMD_DMA_VALID; 5280 5281 get_dma_cookies: 5282 ASSERT(acp->left_cookien > 0); 5283 if (acp->left_cookien > softs->aac_sg_tablesize) { 5284 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d", 5285 acp->left_cookien); 5286 bioerr = EINVAL; 5287 goto error_out; 5288 } 5289 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) { 5290 kmem_free(acp->sgt, sizeof (struct aac_sge) * \ 5291 oldcookiec); 5292 acp->sgt = NULL; 5293 } 5294 if (acp->sgt == NULL) { 5295 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \ 5296 acp->left_cookien, kf); 5297 if (acp->sgt == NULL) { 5298 AACDB_PRINT(softs, CE_WARN, 5299 "sgt kmem_alloc fail"); 5300 bioerr = ENOMEM; 5301 goto error_out; 5302 } 5303 } 5304 5305 sge = &acp->sgt[0]; 5306 sge->bcount = acp->cookie.dmac_size; 5307 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5308 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5309 acp->bcount = acp->cookie.dmac_size; 5310 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) { 5311 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie); 5312 sge->bcount = acp->cookie.dmac_size; 5313 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress); 5314 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress); 5315 acp->bcount += acp->cookie.dmac_size; 5316 } 5317 5318 /* 5319 * Note: The old DMA engine do not correctly handle 5320 * dma_attr_maxxfer attribute. So we have to ensure 5321 * it by ourself. 5322 */ 5323 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) { 5324 AACDB_PRINT(softs, CE_NOTE, 5325 "large xfer size received %d\n", acp->bcount); 5326 bioerr = EINVAL; 5327 goto error_out; 5328 } 5329 5330 acp->total_xfer += acp->bcount; 5331 5332 if (acp->pkt) { 5333 /* Return remaining byte count */ 5334 if (acp->total_xfer <= bp->b_bcount) { 5335 acp->pkt->pkt_resid = bp->b_bcount - \ 5336 acp->total_xfer; 5337 } else { 5338 /* 5339 * Allocated DMA size is greater than the buf 5340 * size of bp. This is caused by devices like 5341 * tape. we have extra bytes allocated, but 5342 * the packet residual has to stay correct. 5343 */ 5344 acp->pkt->pkt_resid = 0; 5345 } 5346 AACDB_PRINT_TRAN(softs, 5347 "bp=0x%p, xfered=%d/%d, resid=%d", 5348 (void *)bp->b_un.b_addr, (int)acp->total_xfer, 5349 (int)bp->b_bcount, (int)acp->pkt->pkt_resid); 5350 } 5351 } 5352 return (AACOK); 5353 5354 error_out: 5355 bioerror(bp, bioerr); 5356 return (AACERR); 5357 } 5358 5359 static struct scsi_pkt * 5360 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 5361 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags, 5362 int (*callback)(), caddr_t arg) 5363 { 5364 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran); 5365 struct aac_cmd *acp, *new_acp; 5366 5367 DBCALLED(softs, 2); 5368 5369 /* Allocate pkt */ 5370 if (pkt == NULL) { 5371 int slen; 5372 5373 /* Force auto request sense */ 5374 slen = (statuslen > softs->slen) ? statuslen : softs->slen; 5375 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen, 5376 slen, tgtlen, sizeof (struct aac_cmd), callback, arg); 5377 if (pkt == NULL) { 5378 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed"); 5379 return (NULL); 5380 } 5381 acp = new_acp = PKT2AC(pkt); 5382 acp->pkt = pkt; 5383 acp->cmdlen = cmdlen; 5384 5385 if (ap->a_target < AAC_MAX_LD) { 5386 acp->dvp = &softs->containers[ap->a_target].dev; 5387 acp->aac_cmd_fib = softs->aac_cmd_fib; 5388 acp->ac_comp = aac_ld_complete; 5389 } else { 5390 _NOTE(ASSUMING_PROTECTED(softs->nondasds)) 5391 5392 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev; 5393 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi; 5394 acp->ac_comp = aac_pd_complete; 5395 } 5396 } else { 5397 acp = PKT2AC(pkt); 5398 new_acp = NULL; 5399 } 5400 5401 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK) 5402 return (pkt); 5403 5404 if (new_acp) 5405 aac_tran_destroy_pkt(ap, pkt); 5406 return (NULL); 5407 } 5408 5409 /* 5410 * tran_sync_pkt(9E) - explicit DMA synchronization 5411 */ 5412 /*ARGSUSED*/ 5413 static void 5414 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 5415 { 5416 struct aac_cmd *acp = PKT2AC(pkt); 5417 5418 DBCALLED(NULL, 2); 5419 5420 if (aac_dma_sync_ac(acp) != AACOK) 5421 ddi_fm_service_impact( 5422 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p, 5423 DDI_SERVICE_UNAFFECTED); 5424 } 5425 5426 /* 5427 * tran_dmafree(9E) - deallocate DMA resources allocated for command 5428 */ 5429 /*ARGSUSED*/ 5430 static void 5431 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 5432 { 5433 struct aac_cmd *acp = PKT2AC(pkt); 5434 5435 DBCALLED(NULL, 2); 5436 5437 aac_free_dmamap(acp); 5438 } 5439 5440 static int 5441 aac_do_quiesce(struct aac_softstate *softs) 5442 { 5443 aac_hold_bus(softs, AAC_IOCMD_ASYNC); 5444 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) { 5445 aac_start_drain(softs); 5446 do { 5447 if (cv_wait_sig(&softs->drain_cv, 5448 &softs->io_lock) == 0) { 5449 /* Quiesce has been interrupted */ 5450 aac_stop_drain(softs); 5451 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5452 aac_start_waiting_io(softs); 5453 return (AACERR); 5454 } 5455 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]); 5456 aac_stop_drain(softs); 5457 } 5458 5459 softs->state |= AAC_STATE_QUIESCED; 5460 return (AACOK); 5461 } 5462 5463 static int 5464 aac_tran_quiesce(dev_info_t *dip) 5465 { 5466 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5467 int rval; 5468 5469 DBCALLED(softs, 1); 5470 5471 mutex_enter(&softs->io_lock); 5472 if (aac_do_quiesce(softs) == AACOK) 5473 rval = 0; 5474 else 5475 rval = 1; 5476 mutex_exit(&softs->io_lock); 5477 return (rval); 5478 } 5479 5480 static int 5481 aac_do_unquiesce(struct aac_softstate *softs) 5482 { 5483 softs->state &= ~AAC_STATE_QUIESCED; 5484 aac_unhold_bus(softs, AAC_IOCMD_ASYNC); 5485 5486 aac_start_waiting_io(softs); 5487 return (AACOK); 5488 } 5489 5490 static int 5491 aac_tran_unquiesce(dev_info_t *dip) 5492 { 5493 struct aac_softstate *softs = AAC_DIP2SOFTS(dip); 5494 int rval; 5495 5496 DBCALLED(softs, 1); 5497 5498 mutex_enter(&softs->io_lock); 5499 if (aac_do_unquiesce(softs) == AACOK) 5500 rval = 0; 5501 else 5502 rval = 1; 5503 mutex_exit(&softs->io_lock); 5504 return (rval); 5505 } 5506 5507 static int 5508 aac_hba_setup(struct aac_softstate *softs) 5509 { 5510 scsi_hba_tran_t *hba_tran; 5511 int rval; 5512 5513 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP); 5514 if (hba_tran == NULL) 5515 return (AACERR); 5516 hba_tran->tran_hba_private = softs; 5517 hba_tran->tran_tgt_init = aac_tran_tgt_init; 5518 hba_tran->tran_tgt_free = aac_tran_tgt_free; 5519 hba_tran->tran_tgt_probe = scsi_hba_probe; 5520 hba_tran->tran_start = aac_tran_start; 5521 hba_tran->tran_getcap = aac_tran_getcap; 5522 hba_tran->tran_setcap = aac_tran_setcap; 5523 hba_tran->tran_init_pkt = aac_tran_init_pkt; 5524 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt; 5525 hba_tran->tran_reset = aac_tran_reset; 5526 hba_tran->tran_abort = aac_tran_abort; 5527 hba_tran->tran_sync_pkt = aac_tran_sync_pkt; 5528 hba_tran->tran_dmafree = aac_tran_dmafree; 5529 hba_tran->tran_quiesce = aac_tran_quiesce; 5530 hba_tran->tran_unquiesce = aac_tran_unquiesce; 5531 hba_tran->tran_bus_config = aac_tran_bus_config; 5532 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr, 5533 hba_tran, 0); 5534 if (rval != DDI_SUCCESS) { 5535 scsi_hba_tran_free(hba_tran); 5536 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed"); 5537 return (AACERR); 5538 } 5539 5540 softs->hba_tran = hba_tran; 5541 return (AACOK); 5542 } 5543 5544 /* 5545 * FIB setup operations 5546 */ 5547 5548 /* 5549 * Init FIB header 5550 */ 5551 static void 5552 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_cmd *acp, 5553 uint16_t cmd) 5554 { 5555 struct aac_slot *slotp = acp->slotp; 5556 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5557 struct aac_fib *fibp = slotp->fibp; 5558 uint32_t xfer_state; 5559 5560 xfer_state = 5561 AAC_FIBSTATE_HOSTOWNED | 5562 AAC_FIBSTATE_INITIALISED | 5563 AAC_FIBSTATE_EMPTY | 5564 AAC_FIBSTATE_FAST_RESPONSE | /* enable fast io */ 5565 AAC_FIBSTATE_FROMHOST | 5566 AAC_FIBSTATE_REXPECTED | 5567 AAC_FIBSTATE_NORM; 5568 5569 if (!(acp->flags & AAC_CMD_SYNC)) 5570 xfer_state |= AAC_FIBSTATE_ASYNC; 5571 5572 ddi_put32(acc, &fibp->Header.XferState, xfer_state); 5573 ddi_put16(acc, &fibp->Header.Command, cmd); 5574 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB); 5575 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */ 5576 ddi_put16(acc, &fibp->Header.Size, acp->fib_size); 5577 ddi_put16(acc, &fibp->Header.SenderSize, softs->aac_max_fib_size); 5578 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2)); 5579 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5580 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */ 5581 } 5582 5583 /* 5584 * Init FIB for raw IO command 5585 */ 5586 static void 5587 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp) 5588 { 5589 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5590 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0]; 5591 struct aac_sg_entryraw *sgp; 5592 struct aac_sge *sge; 5593 5594 /* Calculate FIB size */ 5595 acp->fib_size = sizeof (struct aac_fib_header) + \ 5596 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \ 5597 sizeof (struct aac_sg_entryraw); 5598 5599 aac_cmd_fib_header(softs, acp, RawIo); 5600 5601 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0); 5602 ddi_put16(acc, &io->BpTotal, 0); 5603 ddi_put16(acc, &io->BpComplete, 0); 5604 5605 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno)); 5606 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno)); 5607 ddi_put16(acc, &io->ContainerId, 5608 ((struct aac_container *)acp->dvp)->cid); 5609 5610 /* Fill SG table */ 5611 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien); 5612 ddi_put32(acc, &io->ByteCount, acp->bcount); 5613 5614 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0]; 5615 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5616 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5617 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5618 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5619 sgp->Next = 0; 5620 sgp->Prev = 0; 5621 sgp->Flags = 0; 5622 } 5623 } 5624 5625 /* Init FIB for 64-bit block IO command */ 5626 static void 5627 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp) 5628 { 5629 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5630 struct aac_blockread64 *br = (struct aac_blockread64 *) \ 5631 &acp->slotp->fibp->data[0]; 5632 struct aac_sg_entry64 *sgp; 5633 struct aac_sge *sge; 5634 5635 acp->fib_size = sizeof (struct aac_fib_header) + \ 5636 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \ 5637 sizeof (struct aac_sg_entry64); 5638 5639 aac_cmd_fib_header(softs, acp, ContainerCommand64); 5640 5641 /* 5642 * The definitions for aac_blockread64 and aac_blockwrite64 5643 * are the same. 5644 */ 5645 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5646 ddi_put16(acc, &br->ContainerId, 5647 ((struct aac_container *)acp->dvp)->cid); 5648 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ? 5649 VM_CtHostRead64 : VM_CtHostWrite64); 5650 ddi_put16(acc, &br->Pad, 0); 5651 ddi_put16(acc, &br->Flags, 0); 5652 5653 /* Fill SG table */ 5654 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien); 5655 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE); 5656 5657 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0]; 5658 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5659 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5660 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5661 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5662 } 5663 } 5664 5665 /* Init FIB for block IO command */ 5666 static void 5667 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp) 5668 { 5669 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5670 struct aac_blockread *br = (struct aac_blockread *) \ 5671 &acp->slotp->fibp->data[0]; 5672 struct aac_sg_entry *sgp; 5673 struct aac_sge *sge = &acp->sgt[0]; 5674 5675 if (acp->flags & AAC_CMD_BUF_READ) { 5676 acp->fib_size = sizeof (struct aac_fib_header) + \ 5677 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \ 5678 sizeof (struct aac_sg_entry); 5679 5680 ddi_put32(acc, &br->Command, VM_CtBlockRead); 5681 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien); 5682 sgp = &br->SgMap.SgEntry[0]; 5683 } else { 5684 struct aac_blockwrite *bw = (struct aac_blockwrite *)br; 5685 5686 acp->fib_size = sizeof (struct aac_fib_header) + \ 5687 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \ 5688 sizeof (struct aac_sg_entry); 5689 5690 ddi_put32(acc, &bw->Command, VM_CtBlockWrite); 5691 ddi_put32(acc, &bw->Stable, CUNSTABLE); 5692 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien); 5693 sgp = &bw->SgMap.SgEntry[0]; 5694 } 5695 aac_cmd_fib_header(softs, acp, ContainerCommand); 5696 5697 /* 5698 * aac_blockread and aac_blockwrite have the similar 5699 * structure head, so use br for bw here 5700 */ 5701 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno); 5702 ddi_put32(acc, &br->ContainerId, 5703 ((struct aac_container *)acp->dvp)->cid); 5704 ddi_put32(acc, &br->ByteCount, acp->bcount); 5705 5706 /* Fill SG table */ 5707 for (sge = &acp->sgt[0]; 5708 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5709 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5710 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5711 } 5712 } 5713 5714 /*ARGSUSED*/ 5715 void 5716 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp) 5717 { 5718 struct aac_slot *slotp = acp->slotp; 5719 struct aac_fib *fibp = slotp->fibp; 5720 ddi_acc_handle_t acc = slotp->fib_acc_handle; 5721 5722 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp, 5723 acp->fib_size, /* only copy data of needed length */ 5724 DDI_DEV_AUTOINCR); 5725 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr); 5726 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2); 5727 } 5728 5729 static void 5730 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp) 5731 { 5732 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5733 struct aac_synchronize_command *sync = 5734 (struct aac_synchronize_command *)&acp->slotp->fibp->data[0]; 5735 5736 acp->fib_size = AAC_FIB_SIZEOF(struct aac_synchronize_command); 5737 5738 aac_cmd_fib_header(softs, acp, ContainerCommand); 5739 ddi_put32(acc, &sync->Command, VM_ContainerConfig); 5740 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE); 5741 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid); 5742 ddi_put32(acc, &sync->Count, 5743 sizeof (((struct aac_synchronize_reply *)0)->Data)); 5744 } 5745 5746 /* 5747 * Start/Stop unit (Power Management) 5748 */ 5749 static void 5750 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp) 5751 { 5752 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5753 struct aac_Container *cmd = 5754 (struct aac_Container *)&acp->slotp->fibp->data[0]; 5755 union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp; 5756 5757 acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container); 5758 5759 aac_cmd_fib_header(softs, acp, ContainerCommand); 5760 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE); 5761 ddi_put32(acc, &cmd->Command, VM_ContainerConfig); 5762 ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT); 5763 ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \ 5764 AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT); 5765 ddi_put32(acc, &cmd->CTCommand.param[1], 5766 ((struct aac_container *)acp->dvp)->cid); 5767 ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1); 5768 } 5769 5770 /* 5771 * Init FIB for pass-through SCMD 5772 */ 5773 static void 5774 aac_cmd_fib_srb(struct aac_cmd *acp) 5775 { 5776 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5777 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5778 uint8_t *cdb; 5779 5780 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi); 5781 ddi_put32(acc, &srb->retry_limit, 0); 5782 ddi_put32(acc, &srb->cdb_size, acp->cmdlen); 5783 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */ 5784 if (acp->fibp == NULL) { 5785 if (acp->flags & AAC_CMD_BUF_READ) 5786 ddi_put32(acc, &srb->flags, SRB_DataIn); 5787 else if (acp->flags & AAC_CMD_BUF_WRITE) 5788 ddi_put32(acc, &srb->flags, SRB_DataOut); 5789 ddi_put32(acc, &srb->channel, 5790 ((struct aac_nondasd *)acp->dvp)->bus); 5791 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid); 5792 ddi_put32(acc, &srb->lun, 0); 5793 cdb = acp->pkt->pkt_cdbp; 5794 } else { 5795 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0]; 5796 5797 ddi_put32(acc, &srb->flags, srb0->flags); 5798 ddi_put32(acc, &srb->channel, srb0->channel); 5799 ddi_put32(acc, &srb->id, srb0->id); 5800 ddi_put32(acc, &srb->lun, srb0->lun); 5801 cdb = srb0->cdb; 5802 } 5803 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR); 5804 } 5805 5806 static void 5807 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp) 5808 { 5809 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5810 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5811 struct aac_sg_entry *sgp; 5812 struct aac_sge *sge; 5813 5814 acp->fib_size = sizeof (struct aac_fib_header) + \ 5815 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5816 acp->left_cookien * sizeof (struct aac_sg_entry); 5817 5818 /* Fill FIB and SRB headers, and copy cdb */ 5819 aac_cmd_fib_header(softs, acp, ScsiPortCommand); 5820 aac_cmd_fib_srb(acp); 5821 5822 /* Fill SG table */ 5823 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5824 ddi_put32(acc, &srb->count, acp->bcount); 5825 5826 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0]; 5827 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5828 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32); 5829 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5830 } 5831 } 5832 5833 static void 5834 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp) 5835 { 5836 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle; 5837 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0]; 5838 struct aac_sg_entry64 *sgp; 5839 struct aac_sge *sge; 5840 5841 acp->fib_size = sizeof (struct aac_fib_header) + \ 5842 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \ 5843 acp->left_cookien * sizeof (struct aac_sg_entry64); 5844 5845 /* Fill FIB and SRB headers, and copy cdb */ 5846 aac_cmd_fib_header(softs, acp, ScsiPortCommandU64); 5847 aac_cmd_fib_srb(acp); 5848 5849 /* Fill SG table */ 5850 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien); 5851 ddi_put32(acc, &srb->count, acp->bcount); 5852 5853 for (sge = &acp->sgt[0], 5854 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0]; 5855 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) { 5856 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo); 5857 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi); 5858 ddi_put32(acc, &sgp->SgByteCount, sge->bcount); 5859 } 5860 } 5861 5862 static int 5863 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5864 { 5865 struct aac_slot *slotp; 5866 5867 if (slotp = aac_get_slot(softs)) { 5868 acp->slotp = slotp; 5869 slotp->acp = acp; 5870 acp->aac_cmd_fib(softs, acp); 5871 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, 5872 DDI_DMA_SYNC_FORDEV); 5873 return (AACOK); 5874 } 5875 return (AACERR); 5876 } 5877 5878 static int 5879 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp) 5880 { 5881 struct aac_device *dvp = acp->dvp; 5882 int q = AAC_CMDQ(acp); 5883 5884 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) { 5885 if (dvp) { 5886 if (dvp->ncmds[q] < dvp->throttle[q]) { 5887 if (!(acp->flags & AAC_CMD_NTAG) || 5888 dvp->ncmds[q] == 0) { 5889 return (aac_cmd_slot_bind(softs, acp)); 5890 } 5891 ASSERT(q == AAC_CMDQ_ASYNC); 5892 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC, 5893 AAC_THROTTLE_DRAIN); 5894 } 5895 } else { 5896 return (aac_cmd_slot_bind(softs, acp)); 5897 } 5898 } 5899 return (AACERR); 5900 } 5901 5902 static int 5903 aac_sync_fib_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp) 5904 { 5905 struct aac_slot *slotp; 5906 5907 while (softs->sync_ac.slotp) 5908 cv_wait(&softs->sync_fib_cv, &softs->io_lock); 5909 5910 if (slotp = aac_get_slot(softs)) { 5911 ASSERT(acp->slotp == NULL); 5912 5913 acp->slotp = slotp; 5914 slotp->acp = acp; 5915 return (AACOK); 5916 } 5917 return (AACERR); 5918 } 5919 5920 static void 5921 aac_sync_fib_slot_release(struct aac_softstate *softs, struct aac_cmd *acp) 5922 { 5923 ASSERT(acp->slotp); 5924 5925 aac_release_slot(softs, acp->slotp); 5926 acp->slotp->acp = NULL; 5927 acp->slotp = NULL; 5928 5929 cv_signal(&softs->sync_fib_cv); 5930 } 5931 5932 static void 5933 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp) 5934 { 5935 struct aac_slot *slotp = acp->slotp; 5936 int q = AAC_CMDQ(acp); 5937 int rval; 5938 5939 /* Set ac and pkt */ 5940 if (acp->pkt) { /* ac from ioctl has no pkt */ 5941 acp->pkt->pkt_state |= 5942 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD; 5943 } 5944 if (acp->timeout) /* 0 indicates no timeout */ 5945 acp->timeout += aac_timebase + aac_tick; 5946 5947 if (acp->dvp) 5948 acp->dvp->ncmds[q]++; 5949 softs->bus_ncmds[q]++; 5950 aac_cmd_enqueue(&softs->q_busy, acp); 5951 5952 AACDB_PRINT_FIB(softs, slotp); 5953 5954 if (softs->flags & AAC_FLAGS_NEW_COMM) { 5955 rval = aac_send_command(softs, slotp); 5956 } else { 5957 /* 5958 * If fib can not be enqueued, the adapter is in an abnormal 5959 * state, there will be no interrupt to us. 5960 */ 5961 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q, 5962 slotp->fib_phyaddr, acp->fib_size); 5963 } 5964 5965 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) 5966 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED); 5967 5968 /* 5969 * NOTE: We send command only when slots availabe, so should never 5970 * reach here. 5971 */ 5972 if (rval != AACOK) { 5973 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed"); 5974 if (acp->pkt) { 5975 acp->pkt->pkt_state &= ~STATE_SENT_CMD; 5976 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0); 5977 } 5978 aac_end_io(softs, acp); 5979 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB))) 5980 ddi_trigger_softintr(softs->softint_id); 5981 } 5982 } 5983 5984 static void 5985 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q) 5986 { 5987 struct aac_cmd *acp, *next_acp; 5988 5989 /* Serve as many waiting io's as possible */ 5990 for (acp = q->q_head; acp; acp = next_acp) { 5991 next_acp = acp->next; 5992 if (aac_bind_io(softs, acp) == AACOK) { 5993 aac_cmd_delete(q, acp); 5994 aac_start_io(softs, acp); 5995 } 5996 if (softs->free_io_slot_head == NULL) 5997 break; 5998 } 5999 } 6000 6001 static void 6002 aac_start_waiting_io(struct aac_softstate *softs) 6003 { 6004 /* 6005 * Sync FIB io is served before async FIB io so that io requests 6006 * sent by interactive userland commands get responded asap. 6007 */ 6008 if (softs->q_wait[AAC_CMDQ_SYNC].q_head) 6009 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]); 6010 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head) 6011 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]); 6012 } 6013 6014 static void 6015 aac_drain_comp_q(struct aac_softstate *softs) 6016 { 6017 struct aac_cmd *acp; 6018 struct scsi_pkt *pkt; 6019 6020 /*CONSTCOND*/ 6021 while (1) { 6022 mutex_enter(&softs->q_comp_mutex); 6023 acp = aac_cmd_dequeue(&softs->q_comp); 6024 mutex_exit(&softs->q_comp_mutex); 6025 if (acp != NULL) { 6026 ASSERT(acp->pkt != NULL); 6027 pkt = acp->pkt; 6028 6029 if (pkt->pkt_reason == CMD_CMPLT) { 6030 /* 6031 * Consistent packets need to be sync'ed first 6032 */ 6033 if ((acp->flags & AAC_CMD_CONSISTENT) && 6034 (acp->flags & AAC_CMD_BUF_READ)) { 6035 if (aac_dma_sync_ac(acp) != AACOK) { 6036 ddi_fm_service_impact( 6037 softs->devinfo_p, 6038 DDI_SERVICE_UNAFFECTED); 6039 pkt->pkt_reason = CMD_TRAN_ERR; 6040 pkt->pkt_statistics = 0; 6041 } 6042 } 6043 if ((aac_check_acc_handle(softs-> \ 6044 comm_space_acc_handle) != DDI_SUCCESS) || 6045 (aac_check_acc_handle(softs-> \ 6046 pci_mem_handle) != DDI_SUCCESS)) { 6047 ddi_fm_service_impact(softs->devinfo_p, 6048 DDI_SERVICE_UNAFFECTED); 6049 ddi_fm_acc_err_clear(softs-> \ 6050 pci_mem_handle, DDI_FME_VER0); 6051 pkt->pkt_reason = CMD_TRAN_ERR; 6052 pkt->pkt_statistics = 0; 6053 } 6054 if (aac_check_dma_handle(softs-> \ 6055 comm_space_dma_handle) != DDI_SUCCESS) { 6056 ddi_fm_service_impact(softs->devinfo_p, 6057 DDI_SERVICE_UNAFFECTED); 6058 pkt->pkt_reason = CMD_TRAN_ERR; 6059 pkt->pkt_statistics = 0; 6060 } 6061 } 6062 scsi_hba_pkt_comp(pkt); 6063 } else { 6064 break; 6065 } 6066 } 6067 } 6068 6069 static int 6070 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp) 6071 { 6072 size_t rlen; 6073 ddi_dma_cookie_t cookie; 6074 uint_t cookien; 6075 6076 /* Allocate FIB dma resource */ 6077 if (ddi_dma_alloc_handle( 6078 softs->devinfo_p, 6079 &softs->addr_dma_attr, 6080 DDI_DMA_SLEEP, 6081 NULL, 6082 &slotp->fib_dma_handle) != DDI_SUCCESS) { 6083 AACDB_PRINT(softs, CE_WARN, 6084 "Cannot alloc dma handle for slot fib area"); 6085 goto error; 6086 } 6087 if (ddi_dma_mem_alloc( 6088 slotp->fib_dma_handle, 6089 softs->aac_max_fib_size, 6090 &softs->acc_attr, 6091 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 6092 DDI_DMA_SLEEP, 6093 NULL, 6094 (caddr_t *)&slotp->fibp, 6095 &rlen, 6096 &slotp->fib_acc_handle) != DDI_SUCCESS) { 6097 AACDB_PRINT(softs, CE_WARN, 6098 "Cannot alloc mem for slot fib area"); 6099 goto error; 6100 } 6101 if (ddi_dma_addr_bind_handle( 6102 slotp->fib_dma_handle, 6103 NULL, 6104 (caddr_t)slotp->fibp, 6105 softs->aac_max_fib_size, 6106 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 6107 DDI_DMA_SLEEP, 6108 NULL, 6109 &cookie, 6110 &cookien) != DDI_DMA_MAPPED) { 6111 AACDB_PRINT(softs, CE_WARN, 6112 "dma bind failed for slot fib area"); 6113 goto error; 6114 } 6115 6116 /* Check dma handles allocated in fib attach */ 6117 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) { 6118 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 6119 goto error; 6120 } 6121 6122 /* Check acc handles allocated in fib attach */ 6123 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) { 6124 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST); 6125 goto error; 6126 } 6127 6128 slotp->fib_phyaddr = cookie.dmac_laddress; 6129 return (AACOK); 6130 6131 error: 6132 if (slotp->fib_acc_handle) { 6133 ddi_dma_mem_free(&slotp->fib_acc_handle); 6134 slotp->fib_acc_handle = NULL; 6135 } 6136 if (slotp->fib_dma_handle) { 6137 ddi_dma_free_handle(&slotp->fib_dma_handle); 6138 slotp->fib_dma_handle = NULL; 6139 } 6140 return (AACERR); 6141 } 6142 6143 static void 6144 aac_free_fib(struct aac_slot *slotp) 6145 { 6146 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle); 6147 ddi_dma_mem_free(&slotp->fib_acc_handle); 6148 slotp->fib_acc_handle = NULL; 6149 ddi_dma_free_handle(&slotp->fib_dma_handle); 6150 slotp->fib_dma_handle = NULL; 6151 slotp->fib_phyaddr = 0; 6152 } 6153 6154 static void 6155 aac_alloc_fibs(struct aac_softstate *softs) 6156 { 6157 int i; 6158 struct aac_slot *slotp; 6159 6160 for (i = 0; i < softs->total_slots && 6161 softs->total_fibs < softs->total_slots; i++) { 6162 slotp = &(softs->io_slot[i]); 6163 if (slotp->fib_phyaddr) 6164 continue; 6165 if (aac_alloc_fib(softs, slotp) != AACOK) 6166 break; 6167 6168 /* Insert the slot to the free slot list */ 6169 aac_release_slot(softs, slotp); 6170 softs->total_fibs++; 6171 } 6172 } 6173 6174 static void 6175 aac_destroy_fibs(struct aac_softstate *softs) 6176 { 6177 struct aac_slot *slotp; 6178 6179 while ((slotp = softs->free_io_slot_head) != NULL) { 6180 ASSERT(slotp->fib_phyaddr); 6181 softs->free_io_slot_head = slotp->next; 6182 aac_free_fib(slotp); 6183 ASSERT(slotp->index == (slotp - softs->io_slot)); 6184 softs->total_fibs--; 6185 } 6186 ASSERT(softs->total_fibs == 0); 6187 } 6188 6189 static int 6190 aac_create_slots(struct aac_softstate *softs) 6191 { 6192 int i; 6193 6194 softs->total_slots = softs->aac_max_fibs; 6195 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \ 6196 softs->total_slots, KM_SLEEP); 6197 if (softs->io_slot == NULL) { 6198 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot"); 6199 return (AACERR); 6200 } 6201 for (i = 0; i < softs->total_slots; i++) 6202 softs->io_slot[i].index = i; 6203 softs->free_io_slot_head = NULL; 6204 softs->total_fibs = 0; 6205 return (AACOK); 6206 } 6207 6208 static void 6209 aac_destroy_slots(struct aac_softstate *softs) 6210 { 6211 ASSERT(softs->free_io_slot_head == NULL); 6212 6213 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \ 6214 softs->total_slots); 6215 softs->io_slot = NULL; 6216 softs->total_slots = 0; 6217 } 6218 6219 struct aac_slot * 6220 aac_get_slot(struct aac_softstate *softs) 6221 { 6222 struct aac_slot *slotp; 6223 6224 if ((slotp = softs->free_io_slot_head) != NULL) { 6225 softs->free_io_slot_head = slotp->next; 6226 slotp->next = NULL; 6227 } 6228 return (slotp); 6229 } 6230 6231 static void 6232 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp) 6233 { 6234 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots)); 6235 ASSERT(slotp == &softs->io_slot[slotp->index]); 6236 6237 slotp->acp = NULL; 6238 slotp->next = softs->free_io_slot_head; 6239 softs->free_io_slot_head = slotp; 6240 } 6241 6242 int 6243 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp) 6244 { 6245 if (aac_bind_io(softs, acp) == AACOK) 6246 aac_start_io(softs, acp); 6247 else 6248 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp); 6249 6250 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) 6251 return (TRAN_ACCEPT); 6252 /* 6253 * Because sync FIB is always 512 bytes and used for critical 6254 * functions, async FIB is used for poll IO. 6255 */ 6256 if (acp->flags & AAC_CMD_NO_INTR) { 6257 if (aac_do_poll_io(softs, acp) == AACOK) 6258 return (TRAN_ACCEPT); 6259 } else { 6260 if (aac_do_sync_io(softs, acp) == AACOK) 6261 return (TRAN_ACCEPT); 6262 } 6263 return (TRAN_BADPKT); 6264 } 6265 6266 static int 6267 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp) 6268 { 6269 int (*intr_handler)(struct aac_softstate *); 6270 6271 /* 6272 * Interrupt is disabled, we have to poll the adapter by ourselves. 6273 */ 6274 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ? 6275 aac_process_intr_new : aac_process_intr_old; 6276 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) { 6277 int i = AAC_POLL_TIME * 1000; 6278 6279 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i); 6280 if (i == 0) 6281 aac_cmd_timeout(softs, acp); 6282 } 6283 6284 ddi_trigger_softintr(softs->softint_id); 6285 6286 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR)) 6287 return (AACOK); 6288 return (AACERR); 6289 } 6290 6291 static int 6292 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp) 6293 { 6294 ASSERT(softs && acp); 6295 6296 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) 6297 cv_wait(&softs->event, &softs->io_lock); 6298 6299 if (acp->flags & AAC_CMD_CMPLT) 6300 return (AACOK); 6301 return (AACERR); 6302 } 6303 6304 static int 6305 aac_dma_sync_ac(struct aac_cmd *acp) 6306 { 6307 if (acp->buf_dma_handle) { 6308 if (acp->flags & AAC_CMD_BUF_WRITE) { 6309 if (acp->abp != NULL) 6310 ddi_rep_put8(acp->abh, 6311 (uint8_t *)acp->bp->b_un.b_addr, 6312 (uint8_t *)acp->abp, acp->bp->b_bcount, 6313 DDI_DEV_AUTOINCR); 6314 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6315 DDI_DMA_SYNC_FORDEV); 6316 } else { 6317 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0, 6318 DDI_DMA_SYNC_FORCPU); 6319 if (aac_check_dma_handle(acp->buf_dma_handle) != 6320 DDI_SUCCESS) 6321 return (AACERR); 6322 if (acp->abp != NULL) 6323 ddi_rep_get8(acp->abh, 6324 (uint8_t *)acp->bp->b_un.b_addr, 6325 (uint8_t *)acp->abp, acp->bp->b_bcount, 6326 DDI_DEV_AUTOINCR); 6327 } 6328 } 6329 return (AACOK); 6330 } 6331 6332 /* 6333 * Copy AIF from adapter to the empty AIF slot and inform AIF threads 6334 */ 6335 static void 6336 aac_save_aif(struct aac_softstate *softs, ddi_acc_handle_t acc, 6337 struct aac_fib *fibp0, int fib_size0) 6338 { 6339 struct aac_fib *fibp; /* FIB in AIF queue */ 6340 int fib_size; 6341 uint16_t fib_command; 6342 int current, next; 6343 6344 /* Ignore non AIF messages */ 6345 fib_command = ddi_get16(acc, &fibp0->Header.Command); 6346 if (fib_command != AifRequest) { 6347 cmn_err(CE_WARN, "!Unknown command from controller"); 6348 return; 6349 } 6350 6351 mutex_enter(&softs->aifq_mutex); 6352 6353 /* Save AIF */ 6354 fibp = &softs->aifq[softs->aifq_idx].d; 6355 fib_size = (fib_size0 > AAC_FIB_SIZE) ? AAC_FIB_SIZE : fib_size0; 6356 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, fib_size, 6357 DDI_DEV_AUTOINCR); 6358 6359 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) { 6360 ddi_fm_service_impact(softs->devinfo_p, 6361 DDI_SERVICE_UNAFFECTED); 6362 mutex_exit(&softs->aifq_mutex); 6363 return; 6364 } 6365 6366 AACDB_PRINT_AIF(softs, (struct aac_aif_command *)&fibp->data[0]); 6367 6368 /* Modify AIF contexts */ 6369 current = softs->aifq_idx; 6370 next = (current + 1) % AAC_AIFQ_LENGTH; 6371 if (next == 0) { 6372 struct aac_fib_context *ctx_p; 6373 6374 softs->aifq_wrap = 1; 6375 for (ctx_p = softs->fibctx_p; ctx_p; ctx_p = ctx_p->next) { 6376 if (next == ctx_p->ctx_idx) { 6377 ctx_p->ctx_flags |= AAC_CTXFLAG_FILLED; 6378 } else if (current == ctx_p->ctx_idx && 6379 (ctx_p->ctx_flags & AAC_CTXFLAG_FILLED)) { 6380 ctx_p->ctx_idx = next; 6381 ctx_p->ctx_overrun++; 6382 } 6383 } 6384 } 6385 softs->aifq_idx = next; 6386 6387 /* Wakeup AIF threads */ 6388 cv_broadcast(&softs->aifq_cv); 6389 mutex_exit(&softs->aifq_mutex); 6390 6391 /* Wakeup event thread to handle aif */ 6392 aac_event_disp(softs, AAC_EVENT_AIF); 6393 } 6394 6395 static int 6396 aac_return_aif_common(struct aac_softstate *softs, struct aac_fib_context *ctx, 6397 struct aac_fib **fibpp) 6398 { 6399 int current; 6400 6401 current = ctx->ctx_idx; 6402 if (current == softs->aifq_idx && 6403 !(ctx->ctx_flags & AAC_CTXFLAG_FILLED)) 6404 return (EAGAIN); /* Empty */ 6405 6406 *fibpp = &softs->aifq[current].d; 6407 6408 ctx->ctx_flags &= ~AAC_CTXFLAG_FILLED; 6409 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 6410 return (0); 6411 } 6412 6413 int 6414 aac_return_aif(struct aac_softstate *softs, struct aac_fib_context *ctx, 6415 struct aac_fib **fibpp) 6416 { 6417 int rval; 6418 6419 mutex_enter(&softs->aifq_mutex); 6420 rval = aac_return_aif_common(softs, ctx, fibpp); 6421 mutex_exit(&softs->aifq_mutex); 6422 return (rval); 6423 } 6424 6425 int 6426 aac_return_aif_wait(struct aac_softstate *softs, struct aac_fib_context *ctx, 6427 struct aac_fib **fibpp) 6428 { 6429 int rval; 6430 6431 mutex_enter(&softs->aifq_mutex); 6432 rval = aac_return_aif_common(softs, ctx, fibpp); 6433 if (rval == EAGAIN) { 6434 AACDB_PRINT(softs, CE_NOTE, "Waiting for AIF"); 6435 rval = cv_wait_sig(&softs->aifq_cv, &softs->aifq_mutex); 6436 } 6437 mutex_exit(&softs->aifq_mutex); 6438 return ((rval > 0) ? 0 : EINTR); 6439 } 6440 6441 /* 6442 * The following function comes from Adaptec: 6443 * 6444 * When driver sees a particular event that means containers are changed, it 6445 * will rescan containers. However a change may not be complete until some 6446 * other event is received. For example, creating or deleting an array will 6447 * incur as many as six AifEnConfigChange events which would generate six 6448 * container rescans. To diminish rescans, driver set a flag to wait for 6449 * another particular event. When sees that events come in, it will do rescan. 6450 */ 6451 static int 6452 aac_handle_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 6453 { 6454 ddi_acc_handle_t acc = softs->comm_space_acc_handle; 6455 int en_type; 6456 int devcfg_needed; 6457 int cid; 6458 uint32_t bus_id, tgt_id; 6459 enum aac_cfg_event event = AAC_CFG_NULL_EXIST; 6460 6461 devcfg_needed = 0; 6462 en_type = LE_32((uint32_t)aif->data.EN.type); 6463 6464 switch (LE_32((uint32_t)aif->command)) { 6465 case AifCmdDriverNotify: { 6466 cid = LE_32(aif->data.EN.data.ECC.container[0]); 6467 6468 switch (en_type) { 6469 case AifDenMorphComplete: 6470 case AifDenVolumeExtendComplete: 6471 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev)) 6472 softs->devcfg_wait_on = AifEnConfigChange; 6473 break; 6474 } 6475 if (softs->devcfg_wait_on == en_type) 6476 devcfg_needed = 1; 6477 break; 6478 } 6479 6480 case AifCmdEventNotify: 6481 cid = LE_32(aif->data.EN.data.ECC.container[0]); 6482 switch (en_type) { 6483 case AifEnAddContainer: 6484 case AifEnDeleteContainer: 6485 softs->devcfg_wait_on = AifEnConfigChange; 6486 break; 6487 case AifEnContainerChange: 6488 if (!softs->devcfg_wait_on) 6489 softs->devcfg_wait_on = AifEnConfigChange; 6490 break; 6491 case AifEnContainerEvent: 6492 if (ddi_get32(acc, &aif-> \ 6493 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE) 6494 devcfg_needed = 1; 6495 break; 6496 case AifEnAddJBOD: 6497 if (!(softs->flags & AAC_FLAGS_JBOD)) 6498 return (AACERR); 6499 event = AAC_CFG_ADD; 6500 bus_id = (cid >> 24) & 0xf; 6501 tgt_id = cid & 0xffff; 6502 break; 6503 case AifEnDeleteJBOD: 6504 if (!(softs->flags & AAC_FLAGS_JBOD)) 6505 return (AACERR); 6506 event = AAC_CFG_DELETE; 6507 bus_id = (cid >> 24) & 0xf; 6508 tgt_id = cid & 0xffff; 6509 break; 6510 } 6511 if (softs->devcfg_wait_on == en_type) 6512 devcfg_needed = 1; 6513 break; 6514 6515 case AifCmdJobProgress: 6516 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) { 6517 int pr_status; 6518 uint32_t pr_ftick, pr_ctick; 6519 6520 pr_status = LE_32((uint32_t)aif->data.PR[0].status); 6521 pr_ctick = LE_32(aif->data.PR[0].currentTick); 6522 pr_ftick = LE_32(aif->data.PR[0].finalTick); 6523 6524 if ((pr_ctick == pr_ftick) || 6525 (pr_status == AifJobStsSuccess)) 6526 softs->devcfg_wait_on = AifEnContainerChange; 6527 else if ((pr_ctick == 0) && 6528 (pr_status == AifJobStsRunning)) 6529 softs->devcfg_wait_on = AifEnContainerChange; 6530 } 6531 break; 6532 } 6533 6534 if (devcfg_needed) { 6535 softs->devcfg_wait_on = 0; 6536 (void) aac_probe_containers(softs); 6537 } 6538 6539 if (event != AAC_CFG_NULL_EXIST) { 6540 ASSERT(en_type == AifEnAddJBOD || en_type == AifEnDeleteJBOD); 6541 (void) aac_probe_jbod(softs, 6542 AAC_P2VTGT(softs, bus_id, tgt_id), event); 6543 } 6544 return (AACOK); 6545 } 6546 6547 6548 /* 6549 * Check and handle AIF events 6550 */ 6551 static void 6552 aac_aif_event(struct aac_softstate *softs) 6553 { 6554 struct aac_fib *fibp; 6555 6556 /*CONSTCOND*/ 6557 while (1) { 6558 if (aac_return_aif(softs, &softs->aifctx, &fibp) != 0) 6559 break; /* No more AIFs to handle, end loop */ 6560 6561 /* AIF overrun, array create/delete may missed. */ 6562 if (softs->aifctx.ctx_overrun) { 6563 softs->aifctx.ctx_overrun = 0; 6564 } 6565 6566 /* AIF received, handle it */ 6567 struct aac_aif_command *aifp = 6568 (struct aac_aif_command *)&fibp->data[0]; 6569 uint32_t aif_command = LE_32((uint32_t)aifp->command); 6570 6571 if (aif_command == AifCmdDriverNotify || 6572 aif_command == AifCmdEventNotify || 6573 aif_command == AifCmdJobProgress) 6574 (void) aac_handle_aif(softs, aifp); 6575 } 6576 } 6577 6578 /* 6579 * Timeout recovery 6580 */ 6581 /*ARGSUSED*/ 6582 static void 6583 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp) 6584 { 6585 #ifdef DEBUG 6586 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT; 6587 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp); 6588 AACDB_PRINT_FIB(softs, acp->slotp); 6589 #endif 6590 6591 /* 6592 * Besides the firmware in unhealthy state, an overloaded 6593 * adapter may also incur pkt timeout. 6594 * There is a chance for an adapter with a slower IOP to take 6595 * longer than 60 seconds to process the commands, such as when 6596 * to perform IOs. So the adapter is doing a build on a RAID-5 6597 * while being required longer completion times should be 6598 * tolerated. 6599 */ 6600 switch (aac_do_reset(softs)) { 6601 case AAC_IOP_RESET_SUCCEED: 6602 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET); 6603 aac_start_waiting_io(softs); 6604 break; 6605 case AAC_IOP_RESET_FAILED: 6606 /* Abort all waiting cmds when adapter is dead */ 6607 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT); 6608 break; 6609 case AAC_IOP_RESET_ABNORMAL: 6610 aac_start_waiting_io(softs); 6611 } 6612 } 6613 6614 /* 6615 * The following function comes from Adaptec: 6616 * 6617 * Time sync. command added to synchronize time with firmware every 30 6618 * minutes (required for correct AIF timestamps etc.) 6619 */ 6620 static void 6621 aac_sync_tick(struct aac_softstate *softs) 6622 { 6623 ddi_acc_handle_t acc; 6624 int rval; 6625 6626 mutex_enter(&softs->time_mutex); 6627 ASSERT(softs->time_sync <= softs->timebase); 6628 softs->time_sync = 0; 6629 mutex_exit(&softs->time_mutex); 6630 6631 /* Time sync. with firmware every AAC_SYNC_TICK */ 6632 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac); 6633 acc = softs->sync_ac.slotp->fib_acc_handle; 6634 6635 ddi_put32(acc, (void *)&softs->sync_ac.slotp->fibp->data[0], 6636 ddi_get_time()); 6637 rval = aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t)); 6638 aac_sync_fib_slot_release(softs, &softs->sync_ac); 6639 6640 mutex_enter(&softs->time_mutex); 6641 softs->time_sync = softs->timebase; 6642 if (rval != AACOK) 6643 /* retry shortly */ 6644 softs->time_sync += aac_tick << 1; 6645 else 6646 softs->time_sync += AAC_SYNC_TICK; 6647 mutex_exit(&softs->time_mutex); 6648 } 6649 6650 /* 6651 * Timeout checking and handling 6652 */ 6653 static void 6654 aac_daemon(struct aac_softstate *softs) 6655 { 6656 int time_out; /* set if timeout happened */ 6657 int time_adjust; 6658 uint32_t softs_timebase; 6659 6660 mutex_enter(&softs->time_mutex); 6661 ASSERT(softs->time_out <= softs->timebase); 6662 softs->time_out = 0; 6663 softs_timebase = softs->timebase; 6664 mutex_exit(&softs->time_mutex); 6665 6666 /* Check slots for timeout pkts */ 6667 time_adjust = 0; 6668 do { 6669 struct aac_cmd *acp; 6670 6671 time_out = 0; 6672 for (acp = softs->q_busy.q_head; acp; acp = acp->next) { 6673 if (acp->timeout == 0) 6674 continue; 6675 6676 /* 6677 * If timeout happened, update outstanding cmds 6678 * to be checked later again. 6679 */ 6680 if (time_adjust) { 6681 acp->timeout += time_adjust; 6682 continue; 6683 } 6684 6685 if (acp->timeout <= softs_timebase) { 6686 aac_cmd_timeout(softs, acp); 6687 time_out = 1; 6688 time_adjust = aac_tick * drv_usectohz(1000000); 6689 break; /* timeout happened */ 6690 } else { 6691 break; /* no timeout */ 6692 } 6693 } 6694 } while (time_out); 6695 6696 mutex_enter(&softs->time_mutex); 6697 softs->time_out = softs->timebase + aac_tick; 6698 mutex_exit(&softs->time_mutex); 6699 } 6700 6701 /* 6702 * The event thread handles various tasks serially for the other parts of 6703 * the driver, so that they can run fast. 6704 */ 6705 static void 6706 aac_event_thread(struct aac_softstate *softs) 6707 { 6708 int run = 1; 6709 6710 DBCALLED(softs, 1); 6711 6712 mutex_enter(&softs->ev_lock); 6713 while (run) { 6714 int events; 6715 6716 if ((events = softs->events) == 0) { 6717 cv_wait(&softs->event_disp_cv, &softs->ev_lock); 6718 events = softs->events; 6719 } 6720 softs->events = 0; 6721 mutex_exit(&softs->ev_lock); 6722 6723 mutex_enter(&softs->io_lock); 6724 if ((softs->state & AAC_STATE_RUN) && 6725 (softs->state & AAC_STATE_DEAD) == 0) { 6726 if (events & AAC_EVENT_TIMEOUT) 6727 aac_daemon(softs); 6728 if (events & AAC_EVENT_SYNCTICK) 6729 aac_sync_tick(softs); 6730 if (events & AAC_EVENT_AIF) 6731 aac_aif_event(softs); 6732 } else { 6733 run = 0; 6734 } 6735 mutex_exit(&softs->io_lock); 6736 6737 mutex_enter(&softs->ev_lock); 6738 } 6739 6740 cv_signal(&softs->event_wait_cv); 6741 mutex_exit(&softs->ev_lock); 6742 } 6743 6744 /* 6745 * Internal timer. It is only responsbile for time counting and report time 6746 * related events. Events handling is done by aac_event_thread(), so that 6747 * the timer itself could be as precise as possible. 6748 */ 6749 static void 6750 aac_timer(void *arg) 6751 { 6752 struct aac_softstate *softs = arg; 6753 int events = 0; 6754 6755 mutex_enter(&softs->time_mutex); 6756 6757 /* If timer is being stopped, exit */ 6758 if (softs->timeout_id) { 6759 softs->timeout_id = timeout(aac_timer, (void *)softs, 6760 (aac_tick * drv_usectohz(1000000))); 6761 } else { 6762 mutex_exit(&softs->time_mutex); 6763 return; 6764 } 6765 6766 /* Time counting */ 6767 softs->timebase += aac_tick; 6768 6769 /* Check time related events */ 6770 if (softs->time_out && softs->time_out <= softs->timebase) 6771 events |= AAC_EVENT_TIMEOUT; 6772 if (softs->time_sync && softs->time_sync <= softs->timebase) 6773 events |= AAC_EVENT_SYNCTICK; 6774 6775 mutex_exit(&softs->time_mutex); 6776 6777 if (events) 6778 aac_event_disp(softs, events); 6779 } 6780 6781 /* 6782 * Dispatch events to daemon thread for handling 6783 */ 6784 static void 6785 aac_event_disp(struct aac_softstate *softs, int events) 6786 { 6787 mutex_enter(&softs->ev_lock); 6788 softs->events |= events; 6789 cv_broadcast(&softs->event_disp_cv); 6790 mutex_exit(&softs->ev_lock); 6791 } 6792 6793 /* 6794 * Architecture dependent functions 6795 */ 6796 static int 6797 aac_rx_get_fwstatus(struct aac_softstate *softs) 6798 { 6799 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6800 } 6801 6802 static int 6803 aac_rx_get_mailbox(struct aac_softstate *softs, int mb) 6804 { 6805 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4)); 6806 } 6807 6808 static void 6809 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6810 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6811 { 6812 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd); 6813 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0); 6814 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1); 6815 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2); 6816 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3); 6817 } 6818 6819 static int 6820 aac_rkt_get_fwstatus(struct aac_softstate *softs) 6821 { 6822 return (PCI_MEM_GET32(softs, AAC_OMR0)); 6823 } 6824 6825 static int 6826 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb) 6827 { 6828 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4)); 6829 } 6830 6831 static void 6832 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd, 6833 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3) 6834 { 6835 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd); 6836 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0); 6837 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1); 6838 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2); 6839 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3); 6840 } 6841 6842 /* 6843 * cb_ops functions 6844 */ 6845 static int 6846 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred) 6847 { 6848 struct aac_softstate *softs; 6849 int minor0, minor; 6850 int instance; 6851 6852 DBCALLED(NULL, 2); 6853 6854 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6855 return (EINVAL); 6856 6857 minor0 = getminor(*devp); 6858 minor = AAC_SCSA_MINOR(minor0); 6859 6860 if (AAC_IS_SCSA_NODE(minor)) 6861 return (scsi_hba_open(devp, flag, otyp, cred)); 6862 6863 instance = MINOR2INST(minor0); 6864 if (instance >= AAC_MAX_ADAPTERS) 6865 return (ENXIO); 6866 6867 softs = ddi_get_soft_state(aac_softstatep, instance); 6868 if (softs == NULL) 6869 return (ENXIO); 6870 6871 return (0); 6872 } 6873 6874 /*ARGSUSED*/ 6875 static int 6876 aac_close(dev_t dev, int flag, int otyp, cred_t *cred) 6877 { 6878 int minor0, minor; 6879 int instance; 6880 6881 DBCALLED(NULL, 2); 6882 6883 if (otyp != OTYP_BLK && otyp != OTYP_CHR) 6884 return (EINVAL); 6885 6886 minor0 = getminor(dev); 6887 minor = AAC_SCSA_MINOR(minor0); 6888 6889 if (AAC_IS_SCSA_NODE(minor)) 6890 return (scsi_hba_close(dev, flag, otyp, cred)); 6891 6892 instance = MINOR2INST(minor0); 6893 if (instance >= AAC_MAX_ADAPTERS) 6894 return (ENXIO); 6895 6896 return (0); 6897 } 6898 6899 static int 6900 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, 6901 int *rval_p) 6902 { 6903 struct aac_softstate *softs; 6904 int minor0, minor; 6905 int instance; 6906 6907 DBCALLED(NULL, 2); 6908 6909 if (drv_priv(cred_p) != 0) 6910 return (EPERM); 6911 6912 minor0 = getminor(dev); 6913 minor = AAC_SCSA_MINOR(minor0); 6914 6915 if (AAC_IS_SCSA_NODE(minor)) 6916 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p)); 6917 6918 instance = MINOR2INST(minor0); 6919 if (instance < AAC_MAX_ADAPTERS) { 6920 softs = ddi_get_soft_state(aac_softstatep, instance); 6921 return (aac_do_ioctl(softs, dev, cmd, arg, flag)); 6922 } 6923 return (ENXIO); 6924 } 6925 6926 /* 6927 * The IO fault service error handling callback function 6928 */ 6929 /*ARGSUSED*/ 6930 static int 6931 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6932 { 6933 /* 6934 * as the driver can always deal with an error in any dma or 6935 * access handle, we can just return the fme_status value. 6936 */ 6937 pci_ereport_post(dip, err, NULL); 6938 return (err->fme_status); 6939 } 6940 6941 /* 6942 * aac_fm_init - initialize fma capabilities and register with IO 6943 * fault services. 6944 */ 6945 static void 6946 aac_fm_init(struct aac_softstate *softs) 6947 { 6948 /* 6949 * Need to change iblock to priority for new MSI intr 6950 */ 6951 ddi_iblock_cookie_t fm_ibc; 6952 6953 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p, 6954 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 6955 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 6956 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 6957 6958 /* Only register with IO Fault Services if we have some capability */ 6959 if (softs->fm_capabilities) { 6960 /* Adjust access and dma attributes for FMA */ 6961 softs->reg_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6962 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6963 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 6964 6965 /* 6966 * Register capabilities with IO Fault Services. 6967 * fm_capabilities will be updated to indicate 6968 * capabilities actually supported (not requested.) 6969 */ 6970 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc); 6971 6972 /* 6973 * Initialize pci ereport capabilities if ereport 6974 * capable (should always be.) 6975 */ 6976 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 6977 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6978 pci_ereport_setup(softs->devinfo_p); 6979 } 6980 6981 /* 6982 * Register error callback if error callback capable. 6983 */ 6984 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 6985 ddi_fm_handler_register(softs->devinfo_p, 6986 aac_fm_error_cb, (void *) softs); 6987 } 6988 } 6989 } 6990 6991 /* 6992 * aac_fm_fini - Releases fma capabilities and un-registers with IO 6993 * fault services. 6994 */ 6995 static void 6996 aac_fm_fini(struct aac_softstate *softs) 6997 { 6998 /* Only unregister FMA capabilities if registered */ 6999 if (softs->fm_capabilities) { 7000 /* 7001 * Un-register error callback if error callback capable. 7002 */ 7003 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 7004 ddi_fm_handler_unregister(softs->devinfo_p); 7005 } 7006 7007 /* 7008 * Release any resources allocated by pci_ereport_setup() 7009 */ 7010 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) || 7011 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) { 7012 pci_ereport_teardown(softs->devinfo_p); 7013 } 7014 7015 /* Unregister from IO Fault Services */ 7016 ddi_fm_fini(softs->devinfo_p); 7017 7018 /* Adjust access and dma attributes for FMA */ 7019 softs->reg_attr.devacc_attr_access = DDI_DEFAULT_ACC; 7020 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 7021 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 7022 } 7023 } 7024 7025 int 7026 aac_check_acc_handle(ddi_acc_handle_t handle) 7027 { 7028 ddi_fm_error_t de; 7029 7030 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 7031 return (de.fme_status); 7032 } 7033 7034 int 7035 aac_check_dma_handle(ddi_dma_handle_t handle) 7036 { 7037 ddi_fm_error_t de; 7038 7039 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 7040 return (de.fme_status); 7041 } 7042 7043 void 7044 aac_fm_ereport(struct aac_softstate *softs, char *detail) 7045 { 7046 uint64_t ena; 7047 char buf[FM_MAX_CLASS]; 7048 7049 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 7050 ena = fm_ena_generate(0, FM_ENA_FMT1); 7051 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) { 7052 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP, 7053 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 7054 } 7055 } 7056 7057 /* 7058 * Autoconfiguration support 7059 */ 7060 static int 7061 aac_parse_devname(char *devnm, int *tgt, int *lun) 7062 { 7063 char devbuf[SCSI_MAXNAMELEN]; 7064 char *addr; 7065 char *p, *tp, *lp; 7066 long num; 7067 7068 /* Parse dev name and address */ 7069 (void) strcpy(devbuf, devnm); 7070 addr = ""; 7071 for (p = devbuf; *p != '\0'; p++) { 7072 if (*p == '@') { 7073 addr = p + 1; 7074 *p = '\0'; 7075 } else if (*p == ':') { 7076 *p = '\0'; 7077 break; 7078 } 7079 } 7080 7081 /* Parse taget and lun */ 7082 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 7083 if (*p == ',') { 7084 lp = p + 1; 7085 *p = '\0'; 7086 break; 7087 } 7088 } 7089 if (tgt && tp) { 7090 if (ddi_strtol(tp, NULL, 0x10, &num)) 7091 return (AACERR); 7092 *tgt = (int)num; 7093 } 7094 if (lun && lp) { 7095 if (ddi_strtol(lp, NULL, 0x10, &num)) 7096 return (AACERR); 7097 *lun = (int)num; 7098 } 7099 return (AACOK); 7100 } 7101 7102 static dev_info_t * 7103 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun) 7104 { 7105 dev_info_t *child = NULL; 7106 char addr[SCSI_MAXNAMELEN]; 7107 char tmp[MAXNAMELEN]; 7108 7109 if (tgt < AAC_MAX_LD) { 7110 if (lun == 0) { 7111 struct aac_device *dvp = &softs->containers[tgt].dev; 7112 7113 child = dvp->dip; 7114 } 7115 } else { 7116 (void) sprintf(addr, "%x,%x", tgt, lun); 7117 for (child = ddi_get_child(softs->devinfo_p); 7118 child; child = ddi_get_next_sibling(child)) { 7119 /* We don't care about non-persistent node */ 7120 if (ndi_dev_is_persistent_node(child) == 0) 7121 continue; 7122 7123 if (aac_name_node(child, tmp, MAXNAMELEN) != 7124 DDI_SUCCESS) 7125 continue; 7126 if (strcmp(addr, tmp) == 0) 7127 break; 7128 } 7129 } 7130 return (child); 7131 } 7132 7133 static int 7134 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd, 7135 dev_info_t **dipp) 7136 { 7137 char *nodename = NULL; 7138 char **compatible = NULL; 7139 int ncompatible = 0; 7140 char *childname; 7141 dev_info_t *ldip = NULL; 7142 int tgt = sd->sd_address.a_target; 7143 int lun = sd->sd_address.a_lun; 7144 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 7145 int rval; 7146 7147 DBCALLED(softs, 2); 7148 7149 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 7150 NULL, &nodename, &compatible, &ncompatible); 7151 if (nodename == NULL) { 7152 AACDB_PRINT(softs, CE_WARN, 7153 "found no comptible driver for t%dL%d", tgt, lun); 7154 rval = NDI_FAILURE; 7155 goto finish; 7156 } 7157 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename; 7158 7159 /* Create dev node */ 7160 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID, 7161 &ldip); 7162 if (rval == NDI_SUCCESS) { 7163 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) 7164 != DDI_PROP_SUCCESS) { 7165 AACDB_PRINT(softs, CE_WARN, "unable to create " 7166 "property for t%dL%d (target)", tgt, lun); 7167 rval = NDI_FAILURE; 7168 goto finish; 7169 } 7170 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) 7171 != DDI_PROP_SUCCESS) { 7172 AACDB_PRINT(softs, CE_WARN, "unable to create " 7173 "property for t%dL%d (lun)", tgt, lun); 7174 rval = NDI_FAILURE; 7175 goto finish; 7176 } 7177 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 7178 "compatible", compatible, ncompatible) 7179 != DDI_PROP_SUCCESS) { 7180 AACDB_PRINT(softs, CE_WARN, "unable to create " 7181 "property for t%dL%d (compatible)", tgt, lun); 7182 rval = NDI_FAILURE; 7183 goto finish; 7184 } 7185 7186 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 7187 if (rval != NDI_SUCCESS) { 7188 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d", 7189 tgt, lun); 7190 ndi_prop_remove_all(ldip); 7191 (void) ndi_devi_free(ldip); 7192 } 7193 } 7194 finish: 7195 if (dipp) 7196 *dipp = ldip; 7197 7198 scsi_hba_nodename_compatible_free(nodename, compatible); 7199 return (rval); 7200 } 7201 7202 /*ARGSUSED*/ 7203 static int 7204 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd) 7205 { 7206 int tgt = sd->sd_address.a_target; 7207 int lun = sd->sd_address.a_lun; 7208 7209 DBCALLED(softs, 2); 7210 7211 if (tgt < AAC_MAX_LD) { 7212 enum aac_cfg_event event; 7213 7214 if (lun == 0) { 7215 mutex_enter(&softs->io_lock); 7216 event = aac_probe_container(softs, tgt); 7217 mutex_exit(&softs->io_lock); 7218 if ((event != AAC_CFG_NULL_NOEXIST) && 7219 (event != AAC_CFG_DELETE)) { 7220 if (scsi_hba_probe(sd, NULL) == 7221 SCSIPROBE_EXISTS) 7222 return (NDI_SUCCESS); 7223 } 7224 } 7225 return (NDI_FAILURE); 7226 } else { 7227 int dtype; 7228 int qual; /* device qualifier */ 7229 7230 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS) 7231 return (NDI_FAILURE); 7232 7233 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 7234 qual = dtype >> 5; 7235 7236 AACDB_PRINT(softs, CE_NOTE, 7237 "Phys. device found: tgt %d dtype %d: %s", 7238 tgt, dtype, sd->sd_inq->inq_vid); 7239 7240 /* Only non-DASD and JBOD mode DASD are allowed exposed */ 7241 if (dtype == DTYPE_RODIRECT /* CDROM */ || 7242 dtype == DTYPE_SEQUENTIAL /* TAPE */ || 7243 dtype == DTYPE_ESI /* SES */) { 7244 if (!(softs->flags & AAC_FLAGS_NONDASD)) 7245 return (NDI_FAILURE); 7246 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt); 7247 7248 } else if (dtype == DTYPE_DIRECT) { 7249 if (!(softs->flags & AAC_FLAGS_JBOD) || qual != 0) 7250 return (NDI_FAILURE); 7251 AACDB_PRINT(softs, CE_NOTE, "JBOD DASD %d found", tgt); 7252 } 7253 7254 mutex_enter(&softs->io_lock); 7255 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID; 7256 mutex_exit(&softs->io_lock); 7257 return (NDI_SUCCESS); 7258 } 7259 } 7260 7261 static int 7262 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun, 7263 dev_info_t **ldip) 7264 { 7265 struct scsi_device sd; 7266 dev_info_t *child; 7267 int rval; 7268 7269 DBCALLED(softs, 2); 7270 7271 if ((child = aac_find_child(softs, tgt, lun)) != NULL) { 7272 if (ldip) 7273 *ldip = child; 7274 return (NDI_SUCCESS); 7275 } 7276 7277 bzero(&sd, sizeof (struct scsi_device)); 7278 sd.sd_address.a_hba_tran = softs->hba_tran; 7279 sd.sd_address.a_target = (uint16_t)tgt; 7280 sd.sd_address.a_lun = (uint8_t)lun; 7281 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS) 7282 rval = aac_config_child(softs, &sd, ldip); 7283 /* scsi_unprobe is blank now. Free buffer manually */ 7284 if (sd.sd_inq) { 7285 kmem_free(sd.sd_inq, SUN_INQSIZE); 7286 sd.sd_inq = (struct scsi_inquiry *)NULL; 7287 } 7288 return (rval); 7289 } 7290 7291 static int 7292 aac_config_tgt(struct aac_softstate *softs, int tgt) 7293 { 7294 struct scsi_address ap; 7295 struct buf *bp = NULL; 7296 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE; 7297 int list_len = 0; 7298 int lun_total = 0; 7299 dev_info_t *ldip; 7300 int i; 7301 7302 ap.a_hba_tran = softs->hba_tran; 7303 ap.a_target = (uint16_t)tgt; 7304 ap.a_lun = 0; 7305 7306 for (i = 0; i < 2; i++) { 7307 struct scsi_pkt *pkt; 7308 uchar_t *cdb; 7309 uchar_t *p; 7310 uint32_t data; 7311 7312 if (bp == NULL) { 7313 if ((bp = scsi_alloc_consistent_buf(&ap, NULL, 7314 buf_len, B_READ, NULL_FUNC, NULL)) == NULL) 7315 return (AACERR); 7316 } 7317 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5, 7318 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, 7319 NULL, NULL)) == NULL) { 7320 scsi_free_consistent_buf(bp); 7321 return (AACERR); 7322 } 7323 cdb = pkt->pkt_cdbp; 7324 bzero(cdb, CDB_GROUP5); 7325 cdb[0] = SCMD_REPORT_LUNS; 7326 7327 /* Convert buffer len from local to LE_32 */ 7328 data = buf_len; 7329 for (p = &cdb[9]; p > &cdb[5]; p--) { 7330 *p = data & 0xff; 7331 data >>= 8; 7332 } 7333 7334 if (scsi_poll(pkt) < 0 || 7335 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) { 7336 scsi_destroy_pkt(pkt); 7337 break; 7338 } 7339 7340 /* Convert list_len from LE_32 to local */ 7341 for (p = (uchar_t *)bp->b_un.b_addr; 7342 p < (uchar_t *)bp->b_un.b_addr + 4; p++) { 7343 data <<= 8; 7344 data |= *p; 7345 } 7346 list_len = data; 7347 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) { 7348 scsi_free_consistent_buf(bp); 7349 bp = NULL; 7350 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE; 7351 } 7352 scsi_destroy_pkt(pkt); 7353 } 7354 if (i >= 2) { 7355 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr + 7356 AAC_SCSI_RPTLUNS_HEAD_SIZE); 7357 7358 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) { 7359 uint16_t lun; 7360 7361 /* Determine report luns addressing type */ 7362 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) { 7363 /* 7364 * Vendors in the field have been found to be 7365 * concatenating bus/target/lun to equal the 7366 * complete lun value instead of switching to 7367 * flat space addressing 7368 */ 7369 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL: 7370 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT: 7371 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE: 7372 lun = ((buf[0] & 0x3f) << 8) | buf[1]; 7373 if (lun > UINT8_MAX) { 7374 AACDB_PRINT(softs, CE_WARN, 7375 "abnormal lun number: %d", lun); 7376 break; 7377 } 7378 if (aac_config_lun(softs, tgt, lun, &ldip) == 7379 NDI_SUCCESS) 7380 lun_total++; 7381 break; 7382 } 7383 7384 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE; 7385 } 7386 } else { 7387 /* The target may do not support SCMD_REPORT_LUNS. */ 7388 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS) 7389 lun_total++; 7390 } 7391 scsi_free_consistent_buf(bp); 7392 return (lun_total); 7393 } 7394 7395 static void 7396 aac_devcfg(struct aac_softstate *softs, int tgt, int en) 7397 { 7398 struct aac_device *dvp; 7399 7400 mutex_enter(&softs->io_lock); 7401 dvp = AAC_DEV(softs, tgt); 7402 if (en) 7403 dvp->flags |= AAC_DFLAG_CONFIGURING; 7404 else 7405 dvp->flags &= ~AAC_DFLAG_CONFIGURING; 7406 mutex_exit(&softs->io_lock); 7407 } 7408 7409 static int 7410 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op, 7411 void *arg, dev_info_t **childp) 7412 { 7413 struct aac_softstate *softs; 7414 int circ = 0; 7415 int rval; 7416 7417 if ((softs = ddi_get_soft_state(aac_softstatep, 7418 ddi_get_instance(parent))) == NULL) 7419 return (NDI_FAILURE); 7420 7421 /* Commands for bus config should be blocked as the bus is quiesced */ 7422 mutex_enter(&softs->io_lock); 7423 if (softs->state & AAC_STATE_QUIESCED) { 7424 AACDB_PRINT(softs, CE_NOTE, 7425 "bus_config abroted because bus is quiesced"); 7426 mutex_exit(&softs->io_lock); 7427 return (NDI_FAILURE); 7428 } 7429 mutex_exit(&softs->io_lock); 7430 7431 DBCALLED(softs, 1); 7432 7433 /* Hold the nexus across the bus_config */ 7434 ndi_devi_enter(parent, &circ); 7435 switch (op) { 7436 case BUS_CONFIG_ONE: { 7437 int tgt, lun; 7438 7439 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) { 7440 rval = NDI_FAILURE; 7441 break; 7442 } 7443 if (tgt >= AAC_MAX_LD) { 7444 if (tgt >= AAC_MAX_DEV(softs)) { 7445 rval = NDI_FAILURE; 7446 break; 7447 } 7448 } 7449 7450 AAC_DEVCFG_BEGIN(softs, tgt); 7451 rval = aac_config_lun(softs, tgt, lun, childp); 7452 AAC_DEVCFG_END(softs, tgt); 7453 break; 7454 } 7455 7456 case BUS_CONFIG_DRIVER: 7457 case BUS_CONFIG_ALL: { 7458 uint32_t bus, tgt; 7459 int index, total; 7460 7461 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) { 7462 AAC_DEVCFG_BEGIN(softs, tgt); 7463 (void) aac_config_lun(softs, tgt, 0, NULL); 7464 AAC_DEVCFG_END(softs, tgt); 7465 } 7466 7467 /* Config the non-DASD devices connected to the card */ 7468 total = 0; 7469 index = AAC_MAX_LD; 7470 for (bus = 0; bus < softs->bus_max; bus++) { 7471 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus); 7472 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) { 7473 AAC_DEVCFG_BEGIN(softs, index); 7474 if (aac_config_tgt(softs, index)) 7475 total++; 7476 AAC_DEVCFG_END(softs, index); 7477 } 7478 } 7479 AACDB_PRINT(softs, CE_CONT, 7480 "?Total %d phys. device(s) found", total); 7481 rval = NDI_SUCCESS; 7482 break; 7483 } 7484 } 7485 7486 if (rval == NDI_SUCCESS) 7487 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 7488 ndi_devi_exit(parent, circ); 7489 return (rval); 7490 } 7491 7492 /*ARGSUSED*/ 7493 static int 7494 aac_handle_dr(struct aac_softstate *softs, int tgt, int lun, int event) 7495 { 7496 struct aac_device *dvp; 7497 dev_info_t *dip; 7498 int valid; 7499 int circ1 = 0; 7500 7501 DBCALLED(softs, 1); 7502 7503 /* Hold the nexus across the bus_config */ 7504 dvp = AAC_DEV(softs, tgt); 7505 valid = AAC_DEV_IS_VALID(dvp); 7506 dip = dvp->dip; 7507 if (!(softs->state & AAC_STATE_RUN)) 7508 return (AACERR); 7509 mutex_exit(&softs->io_lock); 7510 7511 switch (event) { 7512 case AAC_CFG_ADD: 7513 case AAC_CFG_DELETE: 7514 /* Device onlined */ 7515 if (dip == NULL && valid) { 7516 ndi_devi_enter(softs->devinfo_p, &circ1); 7517 (void) aac_config_lun(softs, tgt, 0, NULL); 7518 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined", 7519 softs->instance, tgt, lun); 7520 ndi_devi_exit(softs->devinfo_p, circ1); 7521 } 7522 /* Device offlined */ 7523 if (dip && !valid) { 7524 mutex_enter(&softs->io_lock); 7525 (void) aac_do_reset(softs); 7526 mutex_exit(&softs->io_lock); 7527 7528 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 7529 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined", 7530 softs->instance, tgt, lun); 7531 } 7532 break; 7533 } 7534 7535 mutex_enter(&softs->io_lock); 7536 return (AACOK); 7537 } 7538 7539 #ifdef DEBUG 7540 7541 /* -------------------------debug aid functions-------------------------- */ 7542 7543 #define AAC_FIB_CMD_KEY_STRINGS \ 7544 TestCommandResponse, "TestCommandResponse", \ 7545 TestAdapterCommand, "TestAdapterCommand", \ 7546 LastTestCommand, "LastTestCommand", \ 7547 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \ 7548 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \ 7549 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \ 7550 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \ 7551 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \ 7552 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \ 7553 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \ 7554 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \ 7555 InterfaceShutdown, "InterfaceShutdown", \ 7556 DmaCommandFib, "DmaCommandFib", \ 7557 StartProfile, "StartProfile", \ 7558 TermProfile, "TermProfile", \ 7559 SpeedTest, "SpeedTest", \ 7560 TakeABreakPt, "TakeABreakPt", \ 7561 RequestPerfData, "RequestPerfData", \ 7562 SetInterruptDefTimer, "SetInterruptDefTimer", \ 7563 SetInterruptDefCount, "SetInterruptDefCount", \ 7564 GetInterruptDefStatus, "GetInterruptDefStatus", \ 7565 LastCommCommand, "LastCommCommand", \ 7566 NuFileSystem, "NuFileSystem", \ 7567 UFS, "UFS", \ 7568 HostFileSystem, "HostFileSystem", \ 7569 LastFileSystemCommand, "LastFileSystemCommand", \ 7570 ContainerCommand, "ContainerCommand", \ 7571 ContainerCommand64, "ContainerCommand64", \ 7572 ClusterCommand, "ClusterCommand", \ 7573 ScsiPortCommand, "ScsiPortCommand", \ 7574 ScsiPortCommandU64, "ScsiPortCommandU64", \ 7575 AifRequest, "AifRequest", \ 7576 CheckRevision, "CheckRevision", \ 7577 FsaHostShutdown, "FsaHostShutdown", \ 7578 RequestAdapterInfo, "RequestAdapterInfo", \ 7579 IsAdapterPaused, "IsAdapterPaused", \ 7580 SendHostTime, "SendHostTime", \ 7581 LastMiscCommand, "LastMiscCommand" 7582 7583 #define AAC_CTVM_SUBCMD_KEY_STRINGS \ 7584 VM_Null, "VM_Null", \ 7585 VM_NameServe, "VM_NameServe", \ 7586 VM_ContainerConfig, "VM_ContainerConfig", \ 7587 VM_Ioctl, "VM_Ioctl", \ 7588 VM_FilesystemIoctl, "VM_FilesystemIoctl", \ 7589 VM_CloseAll, "VM_CloseAll", \ 7590 VM_CtBlockRead, "VM_CtBlockRead", \ 7591 VM_CtBlockWrite, "VM_CtBlockWrite", \ 7592 VM_SliceBlockRead, "VM_SliceBlockRead", \ 7593 VM_SliceBlockWrite, "VM_SliceBlockWrite", \ 7594 VM_DriveBlockRead, "VM_DriveBlockRead", \ 7595 VM_DriveBlockWrite, "VM_DriveBlockWrite", \ 7596 VM_EnclosureMgt, "VM_EnclosureMgt", \ 7597 VM_Unused, "VM_Unused", \ 7598 VM_CtBlockVerify, "VM_CtBlockVerify", \ 7599 VM_CtPerf, "VM_CtPerf", \ 7600 VM_CtBlockRead64, "VM_CtBlockRead64", \ 7601 VM_CtBlockWrite64, "VM_CtBlockWrite64", \ 7602 VM_CtBlockVerify64, "VM_CtBlockVerify64", \ 7603 VM_CtHostRead64, "VM_CtHostRead64", \ 7604 VM_CtHostWrite64, "VM_CtHostWrite64", \ 7605 VM_NameServe64, "VM_NameServe64" 7606 7607 #define AAC_CT_SUBCMD_KEY_STRINGS \ 7608 CT_Null, "CT_Null", \ 7609 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \ 7610 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \ 7611 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \ 7612 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \ 7613 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \ 7614 CT_WRITE_MBR, "CT_WRITE_MBR", \ 7615 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \ 7616 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \ 7617 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \ 7618 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \ 7619 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \ 7620 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \ 7621 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \ 7622 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \ 7623 CT_READ_MBR, "CT_READ_MBR", \ 7624 CT_READ_PARTITION, "CT_READ_PARTITION", \ 7625 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \ 7626 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \ 7627 CT_SLICE_SIZE, "CT_SLICE_SIZE", \ 7628 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \ 7629 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \ 7630 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \ 7631 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \ 7632 CT_UNMIRROR, "CT_UNMIRROR", \ 7633 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \ 7634 CT_GEN_MIRROR, "CT_GEN_MIRROR", \ 7635 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \ 7636 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \ 7637 CT_MOVE2, "CT_MOVE2", \ 7638 CT_SPLIT, "CT_SPLIT", \ 7639 CT_SPLIT2, "CT_SPLIT2", \ 7640 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \ 7641 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \ 7642 CT_RECONFIG, "CT_RECONFIG", \ 7643 CT_BREAK2, "CT_BREAK2", \ 7644 CT_BREAK, "CT_BREAK", \ 7645 CT_MERGE2, "CT_MERGE2", \ 7646 CT_MERGE, "CT_MERGE", \ 7647 CT_FORCE_ERROR, "CT_FORCE_ERROR", \ 7648 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \ 7649 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \ 7650 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \ 7651 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \ 7652 CT_VOLUME_ADD, "CT_VOLUME_ADD", \ 7653 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \ 7654 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \ 7655 CT_COPY_STATUS, "CT_COPY_STATUS", \ 7656 CT_COPY, "CT_COPY", \ 7657 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \ 7658 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \ 7659 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \ 7660 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \ 7661 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \ 7662 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \ 7663 CT_SET, "CT_SET", \ 7664 CT_GET, "CT_GET", \ 7665 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \ 7666 CT_GET_DELAY, "CT_GET_DELAY", \ 7667 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \ 7668 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \ 7669 CT_SCRUB, "CT_SCRUB", \ 7670 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \ 7671 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \ 7672 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \ 7673 CT_PAUSE_IO, "CT_PAUSE_IO", \ 7674 CT_RELEASE_IO, "CT_RELEASE_IO", \ 7675 CT_SCRUB2, "CT_SCRUB2", \ 7676 CT_MCHECK, "CT_MCHECK", \ 7677 CT_CORRUPT, "CT_CORRUPT", \ 7678 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \ 7679 CT_PROMOTE, "CT_PROMOTE", \ 7680 CT_SET_DEAD, "CT_SET_DEAD", \ 7681 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \ 7682 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \ 7683 CT_GET_PARAM, "CT_GET_PARAM", \ 7684 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \ 7685 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \ 7686 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \ 7687 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \ 7688 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \ 7689 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \ 7690 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \ 7691 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \ 7692 CT_STOP_DATA, "CT_STOP_DATA", \ 7693 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \ 7694 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \ 7695 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \ 7696 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \ 7697 CT_GET_TIME, "CT_GET_TIME", \ 7698 CT_READ_DATA, "CT_READ_DATA", \ 7699 CT_CTR, "CT_CTR", \ 7700 CT_CTL, "CT_CTL", \ 7701 CT_DRAINIO, "CT_DRAINIO", \ 7702 CT_RELEASEIO, "CT_RELEASEIO", \ 7703 CT_GET_NVRAM, "CT_GET_NVRAM", \ 7704 CT_GET_MEMORY, "CT_GET_MEMORY", \ 7705 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \ 7706 CT_ADD_LEVEL, "CT_ADD_LEVEL", \ 7707 CT_NV_ZERO, "CT_NV_ZERO", \ 7708 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \ 7709 CT_THROTTLE_ON, "CT_THROTTLE_ON", \ 7710 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \ 7711 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \ 7712 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \ 7713 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \ 7714 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \ 7715 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \ 7716 CT_MONITOR, "CT_MONITOR", \ 7717 CT_GEN_MORPH, "CT_GEN_MORPH", \ 7718 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \ 7719 CT_CACHE_SET, "CT_CACHE_SET", \ 7720 CT_CACHE_STAT, "CT_CACHE_STAT", \ 7721 CT_TRACE_START, "CT_TRACE_START", \ 7722 CT_TRACE_STOP, "CT_TRACE_STOP", \ 7723 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \ 7724 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \ 7725 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \ 7726 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \ 7727 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \ 7728 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \ 7729 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \ 7730 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \ 7731 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \ 7732 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \ 7733 CT_STOP_DUMPS, "CT_STOP_DUMPS", \ 7734 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \ 7735 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \ 7736 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \ 7737 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \ 7738 CT_READ_NAME, "CT_READ_NAME", \ 7739 CT_WRITE_NAME, "CT_WRITE_NAME", \ 7740 CT_TOSS_CACHE, "CT_TOSS_CACHE", \ 7741 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \ 7742 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \ 7743 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \ 7744 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \ 7745 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \ 7746 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \ 7747 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \ 7748 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \ 7749 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \ 7750 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \ 7751 CT_FLUSH, "CT_FLUSH", \ 7752 CT_REBUILD, "CT_REBUILD", \ 7753 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \ 7754 CT_RESTART, "CT_RESTART", \ 7755 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \ 7756 CT_TRACE_FLAG, "CT_TRACE_FLAG", \ 7757 CT_RESTART_MORPH, "CT_RESTART_MORPH", \ 7758 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \ 7759 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \ 7760 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \ 7761 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \ 7762 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \ 7763 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \ 7764 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \ 7765 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \ 7766 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \ 7767 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \ 7768 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \ 7769 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \ 7770 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \ 7771 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \ 7772 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \ 7773 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \ 7774 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \ 7775 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \ 7776 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \ 7777 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \ 7778 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \ 7779 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \ 7780 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \ 7781 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \ 7782 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \ 7783 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \ 7784 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \ 7785 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \ 7786 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \ 7787 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \ 7788 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \ 7789 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \ 7790 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \ 7791 CT_IS_CONTAINER_MEATADATA_STANDARD, \ 7792 "CT_IS_CONTAINER_MEATADATA_STANDARD", \ 7793 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \ 7794 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \ 7795 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \ 7796 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \ 7797 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \ 7798 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \ 7799 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \ 7800 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \ 7801 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \ 7802 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \ 7803 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \ 7804 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \ 7805 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \ 7806 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \ 7807 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \ 7808 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \ 7809 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \ 7810 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \ 7811 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE" 7812 7813 #define AAC_CL_SUBCMD_KEY_STRINGS \ 7814 CL_NULL, "CL_NULL", \ 7815 DS_INIT, "DS_INIT", \ 7816 DS_RESCAN, "DS_RESCAN", \ 7817 DS_CREATE, "DS_CREATE", \ 7818 DS_DELETE, "DS_DELETE", \ 7819 DS_ADD_DISK, "DS_ADD_DISK", \ 7820 DS_REMOVE_DISK, "DS_REMOVE_DISK", \ 7821 DS_MOVE_DISK, "DS_MOVE_DISK", \ 7822 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \ 7823 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \ 7824 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \ 7825 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \ 7826 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \ 7827 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \ 7828 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \ 7829 DS_GET_DRIVES, "DS_GET_DRIVES", \ 7830 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \ 7831 DS_ONLINE, "DS_ONLINE", \ 7832 DS_OFFLINE, "DS_OFFLINE", \ 7833 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \ 7834 DS_FSAPRINT, "DS_FSAPRINT", \ 7835 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \ 7836 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \ 7837 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \ 7838 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \ 7839 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \ 7840 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \ 7841 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \ 7842 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \ 7843 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \ 7844 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \ 7845 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \ 7846 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \ 7847 CC_GET_BUSINFO, "CC_GET_BUSINFO", \ 7848 CC_GET_PORTINFO, "CC_GET_PORTINFO", \ 7849 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \ 7850 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \ 7851 CQ_QUORUM_OP, "CQ_QUORUM_OP" 7852 7853 #define AAC_AIF_SUBCMD_KEY_STRINGS \ 7854 AifCmdEventNotify, "AifCmdEventNotify", \ 7855 AifCmdJobProgress, "AifCmdJobProgress", \ 7856 AifCmdAPIReport, "AifCmdAPIReport", \ 7857 AifCmdDriverNotify, "AifCmdDriverNotify", \ 7858 AifReqJobList, "AifReqJobList", \ 7859 AifReqJobsForCtr, "AifReqJobsForCtr", \ 7860 AifReqJobsForScsi, "AifReqJobsForScsi", \ 7861 AifReqJobReport, "AifReqJobReport", \ 7862 AifReqTerminateJob, "AifReqTerminateJob", \ 7863 AifReqSuspendJob, "AifReqSuspendJob", \ 7864 AifReqResumeJob, "AifReqResumeJob", \ 7865 AifReqSendAPIReport, "AifReqSendAPIReport", \ 7866 AifReqAPIJobStart, "AifReqAPIJobStart", \ 7867 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \ 7868 AifReqAPIJobFinish, "AifReqAPIJobFinish" 7869 7870 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \ 7871 Reserved_IOCTL, "Reserved_IOCTL", \ 7872 GetDeviceHandle, "GetDeviceHandle", \ 7873 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \ 7874 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \ 7875 RescanBus, "RescanBus", \ 7876 GetDeviceProbeInfo, "GetDeviceProbeInfo", \ 7877 GetDeviceCapacity, "GetDeviceCapacity", \ 7878 GetContainerProbeInfo, "GetContainerProbeInfo", \ 7879 GetRequestedMemorySize, "GetRequestedMemorySize", \ 7880 GetBusInfo, "GetBusInfo", \ 7881 GetVendorSpecific, "GetVendorSpecific", \ 7882 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \ 7883 EnhancedGetBusInfo, "EnhancedGetBusInfo", \ 7884 SetupExtendedCounters, "SetupExtendedCounters", \ 7885 GetPerformanceCounters, "GetPerformanceCounters", \ 7886 ResetPerformanceCounters, "ResetPerformanceCounters", \ 7887 ReadModePage, "ReadModePage", \ 7888 WriteModePage, "WriteModePage", \ 7889 ReadDriveParameter, "ReadDriveParameter", \ 7890 WriteDriveParameter, "WriteDriveParameter", \ 7891 ResetAdapter, "ResetAdapter", \ 7892 ResetBus, "ResetBus", \ 7893 ResetBusDevice, "ResetBusDevice", \ 7894 ExecuteSrb, "ExecuteSrb", \ 7895 Create_IO_Task, "Create_IO_Task", \ 7896 Delete_IO_Task, "Delete_IO_Task", \ 7897 Get_IO_Task_Info, "Get_IO_Task_Info", \ 7898 Check_Task_Progress, "Check_Task_Progress", \ 7899 InjectError, "InjectError", \ 7900 GetDeviceDefectCounts, "GetDeviceDefectCounts", \ 7901 GetDeviceDefectInfo, "GetDeviceDefectInfo", \ 7902 GetDeviceStatus, "GetDeviceStatus", \ 7903 ClearDeviceStatus, "ClearDeviceStatus", \ 7904 DiskSpinControl, "DiskSpinControl", \ 7905 DiskSmartControl, "DiskSmartControl", \ 7906 WriteSame, "WriteSame", \ 7907 ReadWriteLong, "ReadWriteLong", \ 7908 FormatUnit, "FormatUnit", \ 7909 TargetDeviceControl, "TargetDeviceControl", \ 7910 TargetChannelControl, "TargetChannelControl", \ 7911 FlashNewCode, "FlashNewCode", \ 7912 DiskCheck, "DiskCheck", \ 7913 RequestSense, "RequestSense", \ 7914 DiskPERControl, "DiskPERControl", \ 7915 Read10, "Read10", \ 7916 Write10, "Write10" 7917 7918 #define AAC_AIFEN_KEY_STRINGS \ 7919 AifEnGeneric, "Generic", \ 7920 AifEnTaskComplete, "TaskComplete", \ 7921 AifEnConfigChange, "Config change", \ 7922 AifEnContainerChange, "Container change", \ 7923 AifEnDeviceFailure, "device failed", \ 7924 AifEnMirrorFailover, "Mirror failover", \ 7925 AifEnContainerEvent, "container event", \ 7926 AifEnFileSystemChange, "File system changed", \ 7927 AifEnConfigPause, "Container pause event", \ 7928 AifEnConfigResume, "Container resume event", \ 7929 AifEnFailoverChange, "Failover space assignment changed", \ 7930 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \ 7931 AifEnEnclosureManagement, "Enclosure management event", \ 7932 AifEnBatteryEvent, "battery event", \ 7933 AifEnAddContainer, "Add container", \ 7934 AifEnDeleteContainer, "Delete container", \ 7935 AifEnSMARTEvent, "SMART Event", \ 7936 AifEnBatteryNeedsRecond, "battery needs reconditioning", \ 7937 AifEnClusterEvent, "cluster event", \ 7938 AifEnDiskSetEvent, "disk set event occured", \ 7939 AifDenMorphComplete, "morph operation completed", \ 7940 AifDenVolumeExtendComplete, "VolumeExtendComplete" 7941 7942 struct aac_key_strings { 7943 int key; 7944 char *message; 7945 }; 7946 7947 extern struct scsi_key_strings scsi_cmds[]; 7948 7949 static struct aac_key_strings aac_fib_cmds[] = { 7950 AAC_FIB_CMD_KEY_STRINGS, 7951 -1, NULL 7952 }; 7953 7954 static struct aac_key_strings aac_ctvm_subcmds[] = { 7955 AAC_CTVM_SUBCMD_KEY_STRINGS, 7956 -1, NULL 7957 }; 7958 7959 static struct aac_key_strings aac_ct_subcmds[] = { 7960 AAC_CT_SUBCMD_KEY_STRINGS, 7961 -1, NULL 7962 }; 7963 7964 static struct aac_key_strings aac_cl_subcmds[] = { 7965 AAC_CL_SUBCMD_KEY_STRINGS, 7966 -1, NULL 7967 }; 7968 7969 static struct aac_key_strings aac_aif_subcmds[] = { 7970 AAC_AIF_SUBCMD_KEY_STRINGS, 7971 -1, NULL 7972 }; 7973 7974 static struct aac_key_strings aac_ioctl_subcmds[] = { 7975 AAC_IOCTL_SUBCMD_KEY_STRINGS, 7976 -1, NULL 7977 }; 7978 7979 static struct aac_key_strings aac_aifens[] = { 7980 AAC_AIFEN_KEY_STRINGS, 7981 -1, NULL 7982 }; 7983 7984 /* 7985 * The following function comes from Adaptec: 7986 * 7987 * Get the firmware print buffer parameters from the firmware, 7988 * if the command was successful map in the address. 7989 */ 7990 static int 7991 aac_get_fw_debug_buffer(struct aac_softstate *softs) 7992 { 7993 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP, 7994 0, 0, 0, 0, NULL) == AACOK) { 7995 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1); 7996 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2); 7997 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3); 7998 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4); 7999 8000 if (mondrv_buf_size) { 8001 uint32_t offset = mondrv_buf_paddrl - \ 8002 softs->pci_mem_base_paddr; 8003 8004 /* 8005 * See if the address is already mapped in, and 8006 * if so set it up from the base address 8007 */ 8008 if ((mondrv_buf_paddrh == 0) && 8009 (offset + mondrv_buf_size < softs->map_size)) { 8010 mutex_enter(&aac_prt_mutex); 8011 softs->debug_buf_offset = offset; 8012 softs->debug_header_size = mondrv_hdr_size; 8013 softs->debug_buf_size = mondrv_buf_size; 8014 softs->debug_fw_flags = 0; 8015 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 8016 mutex_exit(&aac_prt_mutex); 8017 8018 return (AACOK); 8019 } 8020 } 8021 } 8022 return (AACERR); 8023 } 8024 8025 int 8026 aac_dbflag_on(struct aac_softstate *softs, int flag) 8027 { 8028 int debug_flags = softs ? softs->debug_flags : aac_debug_flags; 8029 8030 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \ 8031 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag)); 8032 } 8033 8034 static void 8035 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader) 8036 { 8037 if (noheader) { 8038 if (sl) { 8039 aac_fmt[0] = sl; 8040 cmn_err(lev, aac_fmt, aac_prt_buf); 8041 } else { 8042 cmn_err(lev, &aac_fmt[1], aac_prt_buf); 8043 } 8044 } else { 8045 if (sl) { 8046 aac_fmt_header[0] = sl; 8047 cmn_err(lev, aac_fmt_header, 8048 softs->vendor_name, softs->instance, 8049 aac_prt_buf); 8050 } else { 8051 cmn_err(lev, &aac_fmt_header[1], 8052 softs->vendor_name, softs->instance, 8053 aac_prt_buf); 8054 } 8055 } 8056 } 8057 8058 /* 8059 * The following function comes from Adaptec: 8060 * 8061 * Format and print out the data passed in to UART or console 8062 * as specified by debug flags. 8063 */ 8064 void 8065 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...) 8066 { 8067 va_list args; 8068 char sl; /* system log character */ 8069 8070 mutex_enter(&aac_prt_mutex); 8071 /* Set up parameters and call sprintf function to format the data */ 8072 if (strchr("^!?", fmt[0]) == NULL) { 8073 sl = 0; 8074 } else { 8075 sl = fmt[0]; 8076 fmt++; 8077 } 8078 va_start(args, fmt); 8079 (void) vsprintf(aac_prt_buf, fmt, args); 8080 va_end(args); 8081 8082 /* Make sure the softs structure has been passed in for this section */ 8083 if (softs) { 8084 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) && 8085 /* If we are set up for a Firmware print */ 8086 (softs->debug_buf_size)) { 8087 uint32_t count, i; 8088 8089 /* Make sure the string size is within boundaries */ 8090 count = strlen(aac_prt_buf); 8091 if (count > softs->debug_buf_size) 8092 count = (uint16_t)softs->debug_buf_size; 8093 8094 /* 8095 * Wait for no more than AAC_PRINT_TIMEOUT for the 8096 * previous message length to clear (the handshake). 8097 */ 8098 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) { 8099 if (!PCI_MEM_GET32(softs, 8100 softs->debug_buf_offset + \ 8101 AAC_FW_DBG_STRLEN_OFFSET)) 8102 break; 8103 8104 drv_usecwait(1000); 8105 } 8106 8107 /* 8108 * If the length is clear, copy over the message, the 8109 * flags, and the length. Make sure the length is the 8110 * last because that is the signal for the Firmware to 8111 * pick it up. 8112 */ 8113 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \ 8114 AAC_FW_DBG_STRLEN_OFFSET)) { 8115 PCI_MEM_REP_PUT8(softs, 8116 softs->debug_buf_offset + \ 8117 softs->debug_header_size, 8118 aac_prt_buf, count); 8119 PCI_MEM_PUT32(softs, 8120 softs->debug_buf_offset + \ 8121 AAC_FW_DBG_FLAGS_OFFSET, 8122 softs->debug_fw_flags); 8123 PCI_MEM_PUT32(softs, 8124 softs->debug_buf_offset + \ 8125 AAC_FW_DBG_STRLEN_OFFSET, count); 8126 } else { 8127 cmn_err(CE_WARN, "UART output fail"); 8128 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT; 8129 } 8130 } 8131 8132 /* 8133 * If the Kernel Debug Print flag is set, send it off 8134 * to the Kernel Debugger 8135 */ 8136 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT) 8137 aac_cmn_err(softs, lev, sl, 8138 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS)); 8139 } else { 8140 /* Driver not initialized yet, no firmware or header output */ 8141 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT) 8142 aac_cmn_err(softs, lev, sl, 1); 8143 } 8144 mutex_exit(&aac_prt_mutex); 8145 } 8146 8147 /* 8148 * Translate command number to description string 8149 */ 8150 static char * 8151 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist) 8152 { 8153 int i; 8154 8155 for (i = 0; cmdlist[i].key != -1; i++) { 8156 if (cmd == cmdlist[i].key) 8157 return (cmdlist[i].message); 8158 } 8159 return (NULL); 8160 } 8161 8162 static void 8163 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp) 8164 { 8165 struct scsi_pkt *pkt = acp->pkt; 8166 struct scsi_address *ap = &pkt->pkt_address; 8167 int is_pd = 0; 8168 int ctl = ddi_get_instance(softs->devinfo_p); 8169 int tgt = ap->a_target; 8170 int lun = ap->a_lun; 8171 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp; 8172 uchar_t cmd = cdbp->scc_cmd; 8173 char *desc; 8174 8175 if (tgt >= AAC_MAX_LD) { 8176 is_pd = 1; 8177 ctl = ((struct aac_nondasd *)acp->dvp)->bus; 8178 tgt = ((struct aac_nondasd *)acp->dvp)->tid; 8179 lun = 0; 8180 } 8181 8182 if ((desc = aac_cmd_name(cmd, 8183 (struct aac_key_strings *)scsi_cmds)) == NULL) { 8184 aac_printf(softs, CE_NOTE, 8185 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s", 8186 cmd, ctl, tgt, lun, is_pd ? "(pd)" : ""); 8187 return; 8188 } 8189 8190 switch (cmd) { 8191 case SCMD_READ: 8192 case SCMD_WRITE: 8193 aac_printf(softs, CE_NOTE, 8194 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 8195 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp), 8196 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8197 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8198 break; 8199 case SCMD_READ_G1: 8200 case SCMD_WRITE_G1: 8201 aac_printf(softs, CE_NOTE, 8202 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 8203 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp), 8204 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8205 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8206 break; 8207 case SCMD_READ_G4: 8208 case SCMD_WRITE_G4: 8209 aac_printf(softs, CE_NOTE, 8210 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s", 8211 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp), 8212 GETG4COUNT(cdbp), 8213 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8214 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8215 break; 8216 case SCMD_READ_G5: 8217 case SCMD_WRITE_G5: 8218 aac_printf(softs, CE_NOTE, 8219 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s", 8220 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp), 8221 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr", 8222 ctl, tgt, lun, is_pd ? "(pd)" : ""); 8223 break; 8224 default: 8225 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s", 8226 desc, ctl, tgt, lun, is_pd ? "(pd)" : ""); 8227 } 8228 } 8229 8230 void 8231 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp) 8232 { 8233 struct aac_cmd *acp = slotp->acp; 8234 struct aac_fib *fibp = slotp->fibp; 8235 ddi_acc_handle_t acc = slotp->fib_acc_handle; 8236 uint16_t fib_size; 8237 uint32_t fib_cmd, sub_cmd; 8238 char *cmdstr, *subcmdstr; 8239 char *caller; 8240 int i; 8241 8242 if (acp) { 8243 if (!(softs->debug_fib_flags & acp->fib_flags)) 8244 return; 8245 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD) 8246 caller = "SCMD"; 8247 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL) 8248 caller = "IOCTL"; 8249 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB) 8250 caller = "SRB"; 8251 else 8252 return; 8253 } else { 8254 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC)) 8255 return; 8256 caller = "SYNC"; 8257 } 8258 8259 fib_cmd = ddi_get16(acc, &fibp->Header.Command); 8260 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds); 8261 sub_cmd = (uint32_t)-1; 8262 subcmdstr = NULL; 8263 8264 /* Print FIB header */ 8265 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) { 8266 aac_printf(softs, CE_NOTE, "FIB> from %s", caller); 8267 aac_printf(softs, CE_NOTE, " XferState %d", 8268 ddi_get32(acc, &fibp->Header.XferState)); 8269 aac_printf(softs, CE_NOTE, " Command %d", 8270 ddi_get16(acc, &fibp->Header.Command)); 8271 aac_printf(softs, CE_NOTE, " StructType %d", 8272 ddi_get8(acc, &fibp->Header.StructType)); 8273 aac_printf(softs, CE_NOTE, " Flags 0x%x", 8274 ddi_get8(acc, &fibp->Header.Flags)); 8275 aac_printf(softs, CE_NOTE, " Size %d", 8276 ddi_get16(acc, &fibp->Header.Size)); 8277 aac_printf(softs, CE_NOTE, " SenderSize %d", 8278 ddi_get16(acc, &fibp->Header.SenderSize)); 8279 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x", 8280 ddi_get32(acc, &fibp->Header.SenderFibAddress)); 8281 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x", 8282 ddi_get32(acc, &fibp->Header.ReceiverFibAddress)); 8283 aac_printf(softs, CE_NOTE, " SenderData 0x%x", 8284 ddi_get32(acc, &fibp->Header.SenderData)); 8285 } 8286 8287 /* Print FIB data */ 8288 switch (fib_cmd) { 8289 case ContainerCommand: 8290 sub_cmd = ddi_get32(acc, 8291 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0])); 8292 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds); 8293 if (subcmdstr == NULL) 8294 break; 8295 8296 switch (sub_cmd) { 8297 case VM_ContainerConfig: { 8298 struct aac_Container *pContainer = 8299 (struct aac_Container *)fibp->data; 8300 8301 fib_cmd = sub_cmd; 8302 cmdstr = subcmdstr; 8303 sub_cmd = (uint32_t)-1; 8304 subcmdstr = NULL; 8305 8306 sub_cmd = ddi_get32(acc, 8307 &pContainer->CTCommand.command); 8308 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds); 8309 if (subcmdstr == NULL) 8310 break; 8311 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)", 8312 subcmdstr, 8313 ddi_get32(acc, &pContainer->CTCommand.param[0]), 8314 ddi_get32(acc, &pContainer->CTCommand.param[1]), 8315 ddi_get32(acc, &pContainer->CTCommand.param[2])); 8316 return; 8317 } 8318 8319 case VM_Ioctl: 8320 fib_cmd = sub_cmd; 8321 cmdstr = subcmdstr; 8322 sub_cmd = (uint32_t)-1; 8323 subcmdstr = NULL; 8324 8325 sub_cmd = ddi_get32(acc, 8326 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4])); 8327 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds); 8328 break; 8329 8330 case VM_CtBlockRead: 8331 case VM_CtBlockWrite: { 8332 struct aac_blockread *br = 8333 (struct aac_blockread *)fibp->data; 8334 struct aac_sg_table *sg = &br->SgMap; 8335 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8336 8337 aac_printf(softs, CE_NOTE, 8338 "FIB> %s Container %d 0x%x/%d", subcmdstr, 8339 ddi_get32(acc, &br->ContainerId), 8340 ddi_get32(acc, &br->BlockNumber), 8341 ddi_get32(acc, &br->ByteCount)); 8342 for (i = 0; i < sgcount; i++) 8343 aac_printf(softs, CE_NOTE, 8344 " %d: 0x%08x/%d", i, 8345 ddi_get32(acc, &sg->SgEntry[i].SgAddress), 8346 ddi_get32(acc, &sg->SgEntry[i]. \ 8347 SgByteCount)); 8348 return; 8349 } 8350 } 8351 break; 8352 8353 case ContainerCommand64: { 8354 struct aac_blockread64 *br = 8355 (struct aac_blockread64 *)fibp->data; 8356 struct aac_sg_table64 *sg = &br->SgMap64; 8357 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8358 uint64_t sgaddr; 8359 8360 sub_cmd = br->Command; 8361 subcmdstr = NULL; 8362 if (sub_cmd == VM_CtHostRead64) 8363 subcmdstr = "VM_CtHostRead64"; 8364 else if (sub_cmd == VM_CtHostWrite64) 8365 subcmdstr = "VM_CtHostWrite64"; 8366 else 8367 break; 8368 8369 aac_printf(softs, CE_NOTE, 8370 "FIB> %s Container %d 0x%x/%d", subcmdstr, 8371 ddi_get16(acc, &br->ContainerId), 8372 ddi_get32(acc, &br->BlockNumber), 8373 ddi_get16(acc, &br->SectorCount)); 8374 for (i = 0; i < sgcount; i++) { 8375 sgaddr = ddi_get64(acc, 8376 &sg->SgEntry64[i].SgAddress); 8377 aac_printf(softs, CE_NOTE, 8378 " %d: 0x%08x.%08x/%d", i, 8379 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 8380 ddi_get32(acc, &sg->SgEntry64[i]. \ 8381 SgByteCount)); 8382 } 8383 return; 8384 } 8385 8386 case RawIo: { 8387 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data; 8388 struct aac_sg_tableraw *sg = &io->SgMapRaw; 8389 uint32_t sgcount = ddi_get32(acc, &sg->SgCount); 8390 uint64_t sgaddr; 8391 8392 aac_printf(softs, CE_NOTE, 8393 "FIB> RawIo Container %d 0x%llx/%d 0x%x", 8394 ddi_get16(acc, &io->ContainerId), 8395 ddi_get64(acc, &io->BlockNumber), 8396 ddi_get32(acc, &io->ByteCount), 8397 ddi_get16(acc, &io->Flags)); 8398 for (i = 0; i < sgcount; i++) { 8399 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress); 8400 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i, 8401 AAC_MS32(sgaddr), AAC_LS32(sgaddr), 8402 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount)); 8403 } 8404 return; 8405 } 8406 8407 case ClusterCommand: 8408 sub_cmd = ddi_get32(acc, 8409 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 8410 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds); 8411 break; 8412 8413 case AifRequest: 8414 sub_cmd = ddi_get32(acc, 8415 (void *)&(((uint32_t *)(void *)fibp->data)[0])); 8416 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds); 8417 break; 8418 8419 default: 8420 break; 8421 } 8422 8423 fib_size = ddi_get16(acc, &(fibp->Header.Size)); 8424 if (subcmdstr) 8425 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 8426 subcmdstr, fib_size); 8427 else if (cmdstr && sub_cmd == (uint32_t)-1) 8428 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d", 8429 cmdstr, fib_size); 8430 else if (cmdstr) 8431 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d", 8432 cmdstr, sub_cmd, fib_size); 8433 else 8434 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d", 8435 fib_cmd, fib_size); 8436 } 8437 8438 static void 8439 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif) 8440 { 8441 int aif_command; 8442 uint32_t aif_seqnumber; 8443 int aif_en_type; 8444 char *str; 8445 8446 aif_command = LE_32(aif->command); 8447 aif_seqnumber = LE_32(aif->seqNumber); 8448 aif_en_type = LE_32(aif->data.EN.type); 8449 8450 switch (aif_command) { 8451 case AifCmdEventNotify: 8452 str = aac_cmd_name(aif_en_type, aac_aifens); 8453 if (str) 8454 aac_printf(softs, CE_NOTE, "AIF! %s", str); 8455 else 8456 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)", 8457 aif_en_type); 8458 break; 8459 8460 case AifCmdJobProgress: 8461 switch (LE_32(aif->data.PR[0].status)) { 8462 case AifJobStsSuccess: 8463 str = "success"; break; 8464 case AifJobStsFinished: 8465 str = "finished"; break; 8466 case AifJobStsAborted: 8467 str = "aborted"; break; 8468 case AifJobStsFailed: 8469 str = "failed"; break; 8470 case AifJobStsSuspended: 8471 str = "suspended"; break; 8472 case AifJobStsRunning: 8473 str = "running"; break; 8474 default: 8475 str = "unknown"; break; 8476 } 8477 aac_printf(softs, CE_NOTE, 8478 "AIF! JobProgress (%d) - %s (%d, %d)", 8479 aif_seqnumber, str, 8480 LE_32(aif->data.PR[0].currentTick), 8481 LE_32(aif->data.PR[0].finalTick)); 8482 break; 8483 8484 case AifCmdAPIReport: 8485 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)", 8486 aif_seqnumber); 8487 break; 8488 8489 case AifCmdDriverNotify: 8490 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)", 8491 aif_seqnumber); 8492 break; 8493 8494 default: 8495 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)", 8496 aif_command, aif_seqnumber); 8497 break; 8498 } 8499 } 8500 8501 #endif /* DEBUG */ 8502