1 /* 2 * Copyright (c) 2014, LSI Corp. 3 * All rights reserved. 4 * Author: Marian Choy 5 * Support: freebsdraid@lsi.com 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of the <ORGANIZATION> nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * The views and conclusions contained in the software and documentation 35 * are those of the authors and should not be interpreted as representing 36 * official policies,either expressed or implied, of the FreeBSD Project. 37 * 38 * Send feedback to: <megaraidfbsd@lsi.com> 39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 40 * ATTN: MegaRaid FreeBSD 41 * 42 * $FreeBSD: head/sys/dev/mrsas/mrsas.c 265555 2014-05-07 16:16:49Z ambrisko $ 43 */ 44 45 #include <dev/raid/mrsas/mrsas.h> 46 #include <dev/raid/mrsas/mrsas_ioctl.h> 47 48 #include <bus/cam/cam.h> 49 #include <bus/cam/cam_ccb.h> 50 51 #include <sys/sysctl.h> 52 #include <sys/types.h> 53 #include <sys/kthread.h> 54 #include <sys/taskqueue.h> 55 #include <sys/device.h> 56 #include <sys/spinlock2.h> 57 58 59 /* 60 * Function prototypes 61 */ 62 static d_open_t mrsas_open; 63 static d_close_t mrsas_close; 64 static d_read_t mrsas_read; 65 static d_write_t mrsas_write; 66 static d_ioctl_t mrsas_ioctl; 67 68 static struct mrsas_ident *mrsas_find_ident(device_t); 69 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 70 static void mrsas_flush_cache(struct mrsas_softc *sc); 71 static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 72 static void mrsas_ocr_thread(void *arg); 73 static int mrsas_get_map_info(struct mrsas_softc *sc); 74 static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 75 static int mrsas_sync_map_info(struct mrsas_softc *sc); 76 static int mrsas_get_pd_list(struct mrsas_softc *sc); 77 static int mrsas_get_ld_list(struct mrsas_softc *sc); 78 static int mrsas_setup_irq(struct mrsas_softc *sc); 79 static int mrsas_alloc_mem(struct mrsas_softc *sc); 80 static int mrsas_init_fw(struct mrsas_softc *sc); 81 static int mrsas_setup_raidmap(struct mrsas_softc *sc); 82 static int mrsas_complete_cmd(struct mrsas_softc *sc); 83 static int mrsas_clear_intr(struct mrsas_softc *sc); 84 static int mrsas_get_ctrl_info(struct mrsas_softc *sc, 85 struct mrsas_ctrl_info *ctrl_info); 86 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 87 struct mrsas_mfi_cmd *cmd_to_abort); 88 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 89 u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 90 struct mrsas_mfi_cmd *mfi_cmd); 91 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 92 int mrsas_init_adapter(struct mrsas_softc *sc); 93 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 94 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 95 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 96 int mrsas_ioc_init(struct mrsas_softc *sc); 97 int mrsas_bus_scan(struct mrsas_softc *sc); 98 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 99 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 100 int mrsas_reset_ctrl(struct mrsas_softc *sc); 101 int mrsas_wait_for_outstanding(struct mrsas_softc *sc); 102 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 103 struct mrsas_mfi_cmd *cmd); 104 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 105 int size); 106 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 107 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 108 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 109 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 110 void mrsas_disable_intr(struct mrsas_softc *sc); 111 void mrsas_enable_intr(struct mrsas_softc *sc); 112 void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 113 void mrsas_free_mem(struct mrsas_softc *sc); 114 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 115 void mrsas_isr(void *arg); 116 void mrsas_teardown_intr(struct mrsas_softc *sc); 117 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 118 void mrsas_kill_hba (struct mrsas_softc *sc); 119 void mrsas_aen_handler(struct mrsas_softc *sc); 120 void mrsas_write_reg(struct mrsas_softc *sc, int offset, 121 u_int32_t value); 122 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 123 u_int32_t req_desc_hi); 124 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 125 void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 126 struct mrsas_mfi_cmd *cmd, u_int8_t status); 127 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, 128 u_int8_t extStatus); 129 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc); 130 MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc, 131 struct mrsas_mfi_cmd *cmd); 132 133 extern int mrsas_cam_attach(struct mrsas_softc *sc); 134 extern void mrsas_cam_detach(struct mrsas_softc *sc); 135 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 136 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 137 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 138 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 139 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 140 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg); 141 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 142 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map); 143 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map); 144 extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 145 extern void mrsas_xpt_release(struct mrsas_softc *sc); 146 extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc, 147 u_int16_t index); 148 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 149 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 150 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 151 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); 152 153 154 /** 155 * PCI device struct and table 156 * 157 */ 158 typedef struct mrsas_ident { 159 uint16_t vendor; 160 uint16_t device; 161 uint16_t subvendor; 162 uint16_t subdevice; 163 const char *desc; 164 } MRSAS_CTLR_ID; 165 166 MRSAS_CTLR_ID device_table[] = { 167 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"}, 168 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"}, 169 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"}, 170 {0, 0, 0, 0, NULL} 171 }; 172 173 /** 174 * Character device entry points 175 * 176 */ 177 static struct dev_ops mrsas_ops = { 178 { "mrsas", 0, D_MPSAFE }, 179 .d_open = mrsas_open, 180 .d_close = mrsas_close, 181 .d_read = mrsas_read, 182 .d_write = mrsas_write, 183 .d_ioctl = mrsas_ioctl, 184 }; 185 186 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 187 188 static int mrsas_mfi_enable = 0; 189 TUNABLE_INT("hw.mrsas.mfi_enable", &mrsas_mfi_enable); 190 191 static int mrsas_msi_enable = 1; 192 TUNABLE_INT("hw.mrsas.msi.enable", &mrsas_msi_enable); 193 194 /** 195 * In the cdevsw routines, we find our softc by using the si_drv1 member 196 * of struct cdev. We set this variable to point to our softc in our 197 * attach routine when we create the /dev entry. 198 */ 199 int 200 mrsas_open(struct dev_open_args *ap) 201 { 202 cdev_t dev = ap->a_head.a_dev; 203 struct mrsas_softc *sc; 204 205 sc = dev->si_drv1; 206 return (0); 207 } 208 209 int 210 mrsas_close(struct dev_close_args *ap) 211 { 212 cdev_t dev = ap->a_head.a_dev; 213 struct mrsas_softc *sc; 214 215 sc = dev->si_drv1; 216 return (0); 217 } 218 219 int 220 mrsas_read(struct dev_read_args *ap) 221 { 222 cdev_t dev = ap->a_head.a_dev; 223 struct mrsas_softc *sc; 224 225 sc = dev->si_drv1; 226 return (0); 227 } 228 int 229 mrsas_write(struct dev_write_args *ap) 230 { 231 cdev_t dev = ap->a_head.a_dev; 232 struct mrsas_softc *sc; 233 234 sc = dev->si_drv1; 235 return (0); 236 } 237 238 /** 239 * Register Read/Write Functions 240 * 241 */ 242 void 243 mrsas_write_reg(struct mrsas_softc *sc, int offset, 244 u_int32_t value) 245 { 246 bus_space_tag_t bus_tag = sc->bus_tag; 247 bus_space_handle_t bus_handle = sc->bus_handle; 248 249 bus_space_write_4(bus_tag, bus_handle, offset, value); 250 } 251 252 u_int32_t 253 mrsas_read_reg(struct mrsas_softc *sc, int offset) 254 { 255 bus_space_tag_t bus_tag = sc->bus_tag; 256 bus_space_handle_t bus_handle = sc->bus_handle; 257 258 return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 259 } 260 261 262 /** 263 * Interrupt Disable/Enable/Clear Functions 264 * 265 */ 266 void mrsas_disable_intr(struct mrsas_softc *sc) 267 { 268 u_int32_t mask = 0xFFFFFFFF; 269 u_int32_t status; 270 271 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 272 /* Dummy read to force pci flush */ 273 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 274 } 275 276 void mrsas_enable_intr(struct mrsas_softc *sc) 277 { 278 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 279 u_int32_t status; 280 281 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 282 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 283 284 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 285 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 286 } 287 288 static int mrsas_clear_intr(struct mrsas_softc *sc) 289 { 290 u_int32_t status, fw_status, fw_state; 291 292 /* Read received interrupt */ 293 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 294 295 /* If FW state change interrupt is received, write to it again to clear */ 296 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) { 297 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 298 outbound_scratch_pad)); 299 fw_state = fw_status & MFI_STATE_MASK; 300 if (fw_state == MFI_STATE_FAULT) { 301 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n"); 302 if(sc->ocr_thread_active) 303 wakeup(&sc->ocr_chan); 304 } 305 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status); 306 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 307 return(1); 308 } 309 310 /* Not our interrupt, so just return */ 311 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 312 return(0); 313 314 /* We got a reply interrupt */ 315 return(1); 316 } 317 318 /** 319 * PCI Support Functions 320 * 321 */ 322 static struct mrsas_ident * mrsas_find_ident(device_t dev) 323 { 324 struct mrsas_ident *pci_device; 325 326 for (pci_device=device_table; pci_device->vendor != 0; pci_device++) 327 { 328 if ((pci_device->vendor == pci_get_vendor(dev)) && 329 (pci_device->device == pci_get_device(dev)) && 330 ((pci_device->subvendor == pci_get_subvendor(dev)) || 331 (pci_device->subvendor == 0xffff)) && 332 ((pci_device->subdevice == pci_get_subdevice(dev)) || 333 (pci_device->subdevice == 0xffff))) 334 return (pci_device); 335 } 336 return (NULL); 337 } 338 339 static int mrsas_probe(device_t dev) 340 { 341 static u_int8_t first_ctrl = 1; 342 struct mrsas_ident *id; 343 344 if ((id = mrsas_find_ident(dev)) != NULL) { 345 /* give priority to mfi(4) if tunable set */ 346 TUNABLE_INT_FETCH("hw.mrsas.mfi_enable", &mrsas_mfi_enable); 347 if ((id->device == MRSAS_TBOLT) && mrsas_mfi_enable) { 348 return (ENXIO); 349 } else { 350 if (first_ctrl) { 351 kprintf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n", 352 MRSAS_VERSION); 353 first_ctrl = 0; 354 } 355 device_set_desc(dev, id->desc); 356 return (BUS_PROBE_DEFAULT); 357 } 358 } 359 return (ENXIO); 360 } 361 362 /** 363 * mrsas_setup_sysctl: setup sysctl values for mrsas 364 * input: Adapter instance soft state 365 * 366 * Setup sysctl entries for mrsas driver. 367 */ 368 static void 369 mrsas_setup_sysctl(struct mrsas_softc *sc) 370 { 371 struct sysctl_ctx_list *sysctl_ctx = NULL; 372 struct sysctl_oid *sysctl_tree = NULL; 373 char tmpstr[80], tmpstr2[80]; 374 375 /* 376 * Setup the sysctl variable so the user can change the debug level 377 * on the fly. 378 */ 379 ksnprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 380 device_get_unit(sc->mrsas_dev)); 381 ksnprintf(tmpstr2, sizeof(tmpstr2), "mrsas%d", device_get_unit(sc->mrsas_dev)); 382 383 #if 0 384 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 385 if (sysctl_ctx != NULL) 386 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 387 388 if (sysctl_tree == NULL) { 389 #endif 390 sysctl_ctx_init(&sc->sysctl_ctx); 391 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 392 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, tmpstr2, 393 CTLFLAG_RD, 0, tmpstr); 394 if (sc->sysctl_tree == NULL) 395 return; 396 sysctl_ctx = &sc->sysctl_ctx; 397 sysctl_tree = sc->sysctl_tree; 398 #if 0 399 } 400 #endif 401 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 402 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 403 "Disable the use of OCR"); 404 405 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 406 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 407 strlen(MRSAS_VERSION), "driver version"); 408 409 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 410 OID_AUTO, "reset_count", CTLFLAG_RD, 411 &sc->reset_count, 0, "number of ocr from start of the day"); 412 413 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 414 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 415 &sc->fw_outstanding, 0, "FW outstanding commands"); 416 417 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 418 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 419 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 420 421 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 422 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 423 "Driver debug level"); 424 425 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 426 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 427 0, "Driver IO timeout value in mili-second."); 428 429 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 430 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 431 &sc->mrsas_fw_fault_check_delay, 432 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 433 434 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 435 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 436 &sc->reset_in_progress, 0, "ocr in progress status"); 437 438 } 439 440 /** 441 * mrsas_get_tunables: get tunable parameters. 442 * input: Adapter instance soft state 443 * 444 * Get tunable parameters. This will help to debug driver at boot time. 445 */ 446 static void 447 mrsas_get_tunables(struct mrsas_softc *sc) 448 { 449 char tmpstr[80]; 450 451 /* XXX default to some debugging for now */ 452 sc->mrsas_debug = MRSAS_FAULT; 453 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 454 sc->mrsas_fw_fault_check_delay = 1; 455 sc->reset_count = 0; 456 sc->reset_in_progress = 0; 457 458 /* 459 * Grab the global variables. 460 */ 461 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 462 463 /* Grab the unit-instance variables */ 464 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 465 device_get_unit(sc->mrsas_dev)); 466 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 467 } 468 469 /** 470 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 471 * Used to get sequence number at driver load time. 472 * input: Adapter soft state 473 * 474 * Allocates DMAable memory for the event log info internal command. 475 */ 476 int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 477 { 478 int el_info_size; 479 480 /* Allocate get event log info command */ 481 el_info_size = sizeof(struct mrsas_evt_log_info); 482 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 483 1, 0, // algnmnt, boundary 484 BUS_SPACE_MAXADDR_32BIT,// lowaddr 485 BUS_SPACE_MAXADDR, // highaddr 486 NULL, NULL, // filter, filterarg 487 el_info_size, // maxsize 488 1, // msegments 489 el_info_size, // maxsegsize 490 BUS_DMA_ALLOCNOW, // flags 491 &sc->el_info_tag)) { 492 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 493 return (ENOMEM); 494 } 495 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 496 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 497 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 498 return (ENOMEM); 499 } 500 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 501 sc->el_info_mem, el_info_size, mrsas_addr_cb, 502 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 503 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 504 return (ENOMEM); 505 } 506 507 memset(sc->el_info_mem, 0, el_info_size); 508 return (0); 509 } 510 511 /** 512 * mrsas_free_evt_info_cmd: Free memory for Event log info command 513 * input: Adapter soft state 514 * 515 * Deallocates memory for the event log info internal command. 516 */ 517 void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 518 { 519 if (sc->el_info_phys_addr) 520 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 521 if (sc->el_info_mem != NULL) 522 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 523 if (sc->el_info_tag != NULL) 524 bus_dma_tag_destroy(sc->el_info_tag); 525 } 526 527 /** 528 * mrsas_get_seq_num: Get latest event sequence number 529 * @sc: Adapter soft state 530 * @eli: Firmware event log sequence number information. 531 * Firmware maintains a log of all events in a non-volatile area. 532 * Driver get the sequence number using DCMD 533 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 534 */ 535 536 static int 537 mrsas_get_seq_num(struct mrsas_softc *sc, 538 struct mrsas_evt_log_info *eli) 539 { 540 struct mrsas_mfi_cmd *cmd; 541 struct mrsas_dcmd_frame *dcmd; 542 543 cmd = mrsas_get_mfi_cmd(sc); 544 545 if (!cmd) { 546 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 547 return -ENOMEM; 548 } 549 550 dcmd = &cmd->frame->dcmd; 551 552 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 553 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 554 mrsas_release_mfi_cmd(cmd); 555 return -ENOMEM; 556 } 557 558 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 559 560 dcmd->cmd = MFI_CMD_DCMD; 561 dcmd->cmd_status = 0x0; 562 dcmd->sge_count = 1; 563 dcmd->flags = MFI_FRAME_DIR_READ; 564 dcmd->timeout = 0; 565 dcmd->pad_0 = 0; 566 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); 567 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 568 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; 569 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); 570 571 mrsas_issue_blocked_cmd(sc, cmd); 572 573 /* 574 * Copy the data back into callers buffer 575 */ 576 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 577 mrsas_free_evt_log_info_cmd(sc); 578 mrsas_release_mfi_cmd(cmd); 579 580 return 0; 581 } 582 583 584 /** 585 * mrsas_register_aen: Register for asynchronous event notification 586 * @sc: Adapter soft state 587 * @seq_num: Starting sequence number 588 * @class_locale: Class of the event 589 * This function subscribes for events beyond the @seq_num 590 * and type @class_locale. 591 * 592 * */ 593 static int 594 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 595 u_int32_t class_locale_word) 596 { 597 int ret_val; 598 struct mrsas_mfi_cmd *cmd; 599 struct mrsas_dcmd_frame *dcmd; 600 union mrsas_evt_class_locale curr_aen; 601 union mrsas_evt_class_locale prev_aen; 602 603 /* 604 * If there an AEN pending already (aen_cmd), check if the 605 * class_locale of that pending AEN is inclusive of the new 606 * AEN request we currently have. If it is, then we don't have 607 * to do anything. In other words, whichever events the current 608 * AEN request is subscribing to, have already been subscribed 609 * to. 610 * If the old_cmd is _not_ inclusive, then we have to abort 611 * that command, form a class_locale that is superset of both 612 * old and current and re-issue to the FW 613 * */ 614 615 curr_aen.word = class_locale_word; 616 617 if (sc->aen_cmd) { 618 619 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; 620 621 /* 622 * A class whose enum value is smaller is inclusive of all 623 * higher values. If a PROGRESS (= -1) was previously 624 * registered, then a new registration requests for higher 625 * classes need not be sent to FW. They are automatically 626 * included. 627 * Locale numbers don't have such hierarchy. They are bitmap values 628 */ 629 if ((prev_aen.members.class <= curr_aen.members.class) && 630 !((prev_aen.members.locale & curr_aen.members.locale) ^ 631 curr_aen.members.locale)) { 632 /* 633 * Previously issued event registration includes 634 * current request. Nothing to do. 635 */ 636 return 0; 637 } else { 638 curr_aen.members.locale |= prev_aen.members.locale; 639 640 if (prev_aen.members.class < curr_aen.members.class) 641 curr_aen.members.class = prev_aen.members.class; 642 643 sc->aen_cmd->abort_aen = 1; 644 ret_val = mrsas_issue_blocked_abort_cmd(sc, 645 sc->aen_cmd); 646 647 if (ret_val) { 648 kprintf("mrsas: Failed to abort " 649 "previous AEN command\n"); 650 return ret_val; 651 } 652 } 653 } 654 655 cmd = mrsas_get_mfi_cmd(sc); 656 657 if (!cmd) 658 return -ENOMEM; 659 660 dcmd = &cmd->frame->dcmd; 661 662 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 663 664 /* 665 * Prepare DCMD for aen registration 666 */ 667 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 668 669 dcmd->cmd = MFI_CMD_DCMD; 670 dcmd->cmd_status = 0x0; 671 dcmd->sge_count = 1; 672 dcmd->flags = MFI_FRAME_DIR_READ; 673 dcmd->timeout = 0; 674 dcmd->pad_0 = 0; 675 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); 676 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 677 dcmd->mbox.w[0] = seq_num; 678 sc->last_seq_num = seq_num; 679 dcmd->mbox.w[1] = curr_aen.word; 680 dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr; 681 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); 682 683 if (sc->aen_cmd != NULL) { 684 mrsas_release_mfi_cmd(cmd); 685 return 0; 686 } 687 688 /* 689 * Store reference to the cmd used to register for AEN. When an 690 * application wants us to register for AEN, we have to abort this 691 * cmd and re-register with a new EVENT LOCALE supplied by that app 692 */ 693 sc->aen_cmd = cmd; 694 695 /* 696 Issue the aen registration frame 697 */ 698 if (mrsas_issue_dcmd(sc, cmd)){ 699 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 700 return(1); 701 } 702 703 return 0; 704 } 705 /** 706 * mrsas_start_aen - Subscribes to AEN during driver load time 707 * @instance: Adapter soft state 708 */ 709 static int mrsas_start_aen(struct mrsas_softc *sc) 710 { 711 struct mrsas_evt_log_info eli; 712 union mrsas_evt_class_locale class_locale; 713 714 715 /* Get the latest sequence number from FW*/ 716 717 memset(&eli, 0, sizeof(eli)); 718 719 if (mrsas_get_seq_num(sc, &eli)) 720 return -1; 721 722 /* Register AEN with FW for latest sequence number plus 1*/ 723 class_locale.members.reserved = 0; 724 class_locale.members.locale = MR_EVT_LOCALE_ALL; 725 class_locale.members.class = MR_EVT_CLASS_DEBUG; 726 727 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 728 class_locale.word); 729 } 730 731 /** 732 * mrsas_attach: PCI entry point 733 * input: device struct pointer 734 * 735 * Performs setup of PCI and registers, initializes mutexes and 736 * linked lists, registers interrupts and CAM, and initializes 737 * the adapter/controller to its proper state. 738 */ 739 static int mrsas_attach(device_t dev) 740 { 741 struct mrsas_softc *sc = device_get_softc(dev); 742 uint32_t cmd, bar, error; 743 744 /* Look up our softc and initialize its fields. */ 745 sc->mrsas_dev = dev; 746 sc->device_id = pci_get_device(dev); 747 748 mrsas_get_tunables(sc); 749 750 /* 751 * Set up PCI and registers 752 */ 753 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 754 if ( (cmd & PCIM_CMD_PORTEN) == 0) { 755 return (ENXIO); 756 } 757 /* Force the busmaster enable bit on. */ 758 cmd |= PCIM_CMD_BUSMASTEREN; 759 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 760 761 //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4); 762 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4); 763 764 sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */ 765 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, 766 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) 767 == NULL) { 768 device_printf(dev, "Cannot allocate PCI registers\n"); 769 goto attach_fail; 770 } 771 sc->bus_tag = rman_get_bustag(sc->reg_res); 772 sc->bus_handle = rman_get_bushandle(sc->reg_res); 773 774 /* Intialize mutexes */ 775 lockinit(&sc->sim_lock, "mrsas_sim_lock", 0, LK_CANRECURSE); 776 lockinit(&sc->pci_lock, "mrsas_pci_lock", 0, LK_CANRECURSE); 777 lockinit(&sc->io_lock, "mrsas_io_lock", 0, LK_CANRECURSE); 778 lockinit(&sc->aen_lock, "mrsas_aen_lock", 0, LK_CANRECURSE); 779 spin_init(&sc->ioctl_lock, "mrsasioctl"); 780 lockinit(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", 0, 781 LK_CANRECURSE); 782 lockinit(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", 0, 783 LK_CANRECURSE); 784 lockinit(&sc->raidmap_lock, "mrsas_raidmap_lock", 0, LK_CANRECURSE); 785 786 /* Intialize linked list */ 787 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 788 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 789 790 atomic_set(&sc->fw_outstanding,0); 791 792 sc->io_cmds_highwater = 0; 793 794 /* Create a /dev entry for this device. */ 795 sc->mrsas_cdev = make_dev(&mrsas_ops, device_get_unit(dev), UID_ROOT, 796 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 797 device_get_unit(dev)); 798 if (sc->mrsas_cdev) 799 sc->mrsas_cdev->si_drv1 = sc; 800 801 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 802 sc->UnevenSpanSupport = 0; 803 804 /* Initialize Firmware */ 805 if (mrsas_init_fw(sc) != SUCCESS) { 806 goto attach_fail_fw; 807 } 808 809 /* Register SCSI mid-layer */ 810 if ((mrsas_cam_attach(sc) != SUCCESS)) { 811 goto attach_fail_cam; 812 } 813 814 /* Register IRQs */ 815 if (mrsas_setup_irq(sc) != SUCCESS) { 816 goto attach_fail_irq; 817 } 818 819 /* Enable Interrupts */ 820 mrsas_enable_intr(sc); 821 822 error = kthread_create(mrsas_ocr_thread, sc, &sc->ocr_thread, "mrsas_ocr%d", 823 device_get_unit(sc->mrsas_dev)); 824 if (error) { 825 kprintf("Error %d starting rescan thread\n", error); 826 goto attach_fail_irq; 827 } 828 829 mrsas_setup_sysctl(sc); 830 831 /* Initiate AEN (Asynchronous Event Notification)*/ 832 833 if (mrsas_start_aen(sc)) { 834 kprintf("Error: start aen failed\n"); 835 goto fail_start_aen; 836 } 837 838 return (0); 839 840 fail_start_aen: 841 attach_fail_irq: 842 mrsas_teardown_intr(sc); 843 attach_fail_cam: 844 mrsas_cam_detach(sc); 845 attach_fail_fw: 846 //attach_fail_raidmap: 847 mrsas_free_mem(sc); 848 lockuninit(&sc->sim_lock); 849 lockuninit(&sc->aen_lock); 850 lockuninit(&sc->pci_lock); 851 lockuninit(&sc->io_lock); 852 spin_uninit(&sc->ioctl_lock); 853 lockuninit(&sc->mpt_cmd_pool_lock); 854 lockuninit(&sc->mfi_cmd_pool_lock); 855 lockuninit(&sc->raidmap_lock); 856 attach_fail: 857 destroy_dev(sc->mrsas_cdev); 858 if (sc->reg_res){ 859 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 860 sc->reg_res_id, sc->reg_res); 861 } 862 return (ENXIO); 863 } 864 865 /** 866 * mrsas_detach: De-allocates and teardown resources 867 * input: device struct pointer 868 * 869 * This function is the entry point for device disconnect and detach. It 870 * performs memory de-allocations, shutdown of the controller and various 871 * teardown and destroy resource functions. 872 */ 873 static int mrsas_detach(device_t dev) 874 { 875 struct mrsas_softc *sc; 876 int i = 0; 877 878 sc = device_get_softc(dev); 879 sc->remove_in_progress = 1; 880 if(sc->ocr_thread_active) 881 wakeup(&sc->ocr_chan); 882 while(sc->reset_in_progress){ 883 i++; 884 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 885 mrsas_dprint(sc, MRSAS_INFO, 886 "[%2d]waiting for ocr to be finished\n",i); 887 } 888 tsleep(mrsas_detach, 0, "mr_shutdown", hz); 889 } 890 i = 0; 891 while(sc->ocr_thread_active){ 892 i++; 893 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 894 mrsas_dprint(sc, MRSAS_INFO, 895 "[%2d]waiting for " 896 "mrsas_ocr thread to quit ocr %d\n",i, 897 sc->ocr_thread_active); 898 } 899 tsleep(mrsas_detach, 0, "mr_shutdown", hz); 900 } 901 mrsas_flush_cache(sc); 902 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 903 mrsas_disable_intr(sc); 904 mrsas_cam_detach(sc); 905 mrsas_teardown_intr(sc); 906 mrsas_free_mem(sc); 907 lockuninit(&sc->sim_lock); 908 lockuninit(&sc->aen_lock); 909 lockuninit(&sc->pci_lock); 910 lockuninit(&sc->io_lock); 911 spin_uninit(&sc->ioctl_lock); 912 lockuninit(&sc->mpt_cmd_pool_lock); 913 lockuninit(&sc->mfi_cmd_pool_lock); 914 lockuninit(&sc->raidmap_lock); 915 if (sc->reg_res){ 916 bus_release_resource(sc->mrsas_dev, 917 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 918 } 919 destroy_dev(sc->mrsas_cdev); 920 if (sc->sysctl_tree != NULL) 921 sysctl_ctx_free(&sc->sysctl_ctx); 922 return (0); 923 } 924 925 /** 926 * mrsas_free_mem: Frees allocated memory 927 * input: Adapter instance soft state 928 * 929 * This function is called from mrsas_detach() to free previously allocated 930 * memory. 931 */ 932 void mrsas_free_mem(struct mrsas_softc *sc) 933 { 934 int i; 935 u_int32_t max_cmd; 936 struct mrsas_mfi_cmd *mfi_cmd; 937 struct mrsas_mpt_cmd *mpt_cmd; 938 939 /* 940 * Free RAID map memory 941 */ 942 for (i=0; i < 2; i++) 943 { 944 if (sc->raidmap_phys_addr[i]) 945 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 946 if (sc->raidmap_mem[i] != NULL) 947 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 948 if (sc->raidmap_tag[i] != NULL) 949 bus_dma_tag_destroy(sc->raidmap_tag[i]); 950 } 951 952 /* 953 * Free version buffer memroy 954 */ 955 if (sc->verbuf_phys_addr) 956 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 957 if (sc->verbuf_mem != NULL) 958 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 959 if (sc->verbuf_tag != NULL) 960 bus_dma_tag_destroy(sc->verbuf_tag); 961 962 963 /* 964 * Free sense buffer memory 965 */ 966 if (sc->sense_phys_addr) 967 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 968 if (sc->sense_mem != NULL) 969 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 970 if (sc->sense_tag != NULL) 971 bus_dma_tag_destroy(sc->sense_tag); 972 973 /* 974 * Free chain frame memory 975 */ 976 if (sc->chain_frame_phys_addr) 977 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 978 if (sc->chain_frame_mem != NULL) 979 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 980 if (sc->chain_frame_tag != NULL) 981 bus_dma_tag_destroy(sc->chain_frame_tag); 982 983 /* 984 * Free IO Request memory 985 */ 986 if (sc->io_request_phys_addr) 987 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 988 if (sc->io_request_mem != NULL) 989 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 990 if (sc->io_request_tag != NULL) 991 bus_dma_tag_destroy(sc->io_request_tag); 992 993 /* 994 * Free Reply Descriptor memory 995 */ 996 if (sc->reply_desc_phys_addr) 997 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 998 if (sc->reply_desc_mem != NULL) 999 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 1000 if (sc->reply_desc_tag != NULL) 1001 bus_dma_tag_destroy(sc->reply_desc_tag); 1002 1003 /* 1004 * Free event detail memory 1005 */ 1006 if (sc->evt_detail_phys_addr) 1007 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 1008 if (sc->evt_detail_mem != NULL) 1009 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1010 if (sc->evt_detail_tag != NULL) 1011 bus_dma_tag_destroy(sc->evt_detail_tag); 1012 1013 /* 1014 * Free MFI frames 1015 */ 1016 if (sc->mfi_cmd_list) { 1017 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1018 mfi_cmd = sc->mfi_cmd_list[i]; 1019 mrsas_free_frame(sc, mfi_cmd); 1020 } 1021 } 1022 if (sc->mficmd_frame_tag != NULL) 1023 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1024 1025 /* 1026 * Free MPT internal command list 1027 */ 1028 max_cmd = sc->max_fw_cmds; 1029 if (sc->mpt_cmd_list) { 1030 for (i = 0; i < max_cmd; i++) { 1031 mpt_cmd = sc->mpt_cmd_list[i]; 1032 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1033 kfree(sc->mpt_cmd_list[i], M_MRSAS); 1034 } 1035 kfree(sc->mpt_cmd_list, M_MRSAS); 1036 sc->mpt_cmd_list = NULL; 1037 } 1038 1039 /* 1040 * Free MFI internal command list 1041 */ 1042 1043 if (sc->mfi_cmd_list) { 1044 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1045 kfree(sc->mfi_cmd_list[i], M_MRSAS); 1046 } 1047 kfree(sc->mfi_cmd_list, M_MRSAS); 1048 sc->mfi_cmd_list = NULL; 1049 } 1050 1051 /* 1052 * Free request descriptor memory 1053 */ 1054 kfree(sc->req_desc, M_MRSAS); 1055 sc->req_desc = NULL; 1056 1057 /* 1058 * Destroy parent tag 1059 */ 1060 if (sc->mrsas_parent_tag != NULL) 1061 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1062 } 1063 1064 /** 1065 * mrsas_teardown_intr: Teardown interrupt 1066 * input: Adapter instance soft state 1067 * 1068 * This function is called from mrsas_detach() to teardown and release 1069 * bus interrupt resourse. 1070 */ 1071 void mrsas_teardown_intr(struct mrsas_softc *sc) 1072 { 1073 if (sc->intr_handle) 1074 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle); 1075 if (sc->mrsas_irq != NULL) 1076 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq); 1077 if (sc->irq_type == PCI_INTR_TYPE_MSI) 1078 pci_release_msi(sc->mrsas_dev); 1079 sc->intr_handle = NULL; 1080 } 1081 1082 /** 1083 * mrsas_suspend: Suspend entry point 1084 * input: Device struct pointer 1085 * 1086 * This function is the entry point for system suspend from the OS. 1087 */ 1088 static int mrsas_suspend(device_t dev) 1089 { 1090 struct mrsas_softc *sc; 1091 1092 sc = device_get_softc(dev); 1093 return (0); 1094 } 1095 1096 /** 1097 * mrsas_resume: Resume entry point 1098 * input: Device struct pointer 1099 * 1100 * This function is the entry point for system resume from the OS. 1101 */ 1102 static int mrsas_resume(device_t dev) 1103 { 1104 struct mrsas_softc *sc; 1105 1106 sc = device_get_softc(dev); 1107 return (0); 1108 } 1109 1110 /** 1111 * mrsas_ioctl: IOCtl commands entry point. 1112 * 1113 * This function is the entry point for IOCtls from the OS. It calls the 1114 * appropriate function for processing depending on the command received. 1115 */ 1116 static int 1117 mrsas_ioctl(struct dev_ioctl_args *ap) 1118 { 1119 cdev_t dev = ap->a_head.a_dev; 1120 u_long cmd = ap->a_cmd; 1121 caddr_t arg = ap->a_data; 1122 struct mrsas_softc *sc; 1123 int ret = 0, i = 0; 1124 1125 sc = (struct mrsas_softc *)(dev->si_drv1); 1126 1127 if (sc->remove_in_progress) { 1128 mrsas_dprint(sc, MRSAS_INFO, 1129 "Driver remove or shutdown called.\n"); 1130 return ENOENT; 1131 } 1132 1133 spin_lock(&sc->ioctl_lock); 1134 if (!sc->reset_in_progress) { 1135 spin_unlock(&sc->ioctl_lock); 1136 goto do_ioctl; 1137 } 1138 1139 /* Release ioclt_lock, and wait for OCR 1140 * to be finished */ 1141 spin_unlock(&sc->ioctl_lock); 1142 while(sc->reset_in_progress){ 1143 i++; 1144 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1145 mrsas_dprint(sc, MRSAS_INFO, 1146 "[%2d]waiting for " 1147 "OCR to be finished %d\n",i, 1148 sc->ocr_thread_active); 1149 } 1150 tsleep(mrsas_ioctl, 0, "mr_ioctl", hz); 1151 } 1152 1153 do_ioctl: 1154 switch (cmd) { 1155 case MRSAS_IOC_FIRMWARE_PASS_THROUGH: 1156 ret = mrsas_passthru(sc, (void *)arg); 1157 break; 1158 case MRSAS_IOC_SCAN_BUS: 1159 ret = mrsas_bus_scan(sc); 1160 break; 1161 } 1162 1163 return (ret); 1164 } 1165 1166 /** 1167 * mrsas_setup_irq: Set up interrupt. 1168 * input: Adapter instance soft state 1169 * 1170 * This function sets up interrupts as a bus resource, with flags indicating 1171 * resource permitting contemporaneous sharing and for resource to activate 1172 * atomically. 1173 */ 1174 static int mrsas_setup_irq(struct mrsas_softc *sc) 1175 { 1176 u_int irq_flags; 1177 1178 sc->irq_id = 0; 1179 sc->irq_type = pci_alloc_1intr(sc->mrsas_dev, mrsas_msi_enable, 1180 &sc->irq_id, &irq_flags); 1181 1182 sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ, 1183 &sc->irq_id, irq_flags); 1184 if (sc->mrsas_irq == NULL){ 1185 device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n"); 1186 return (FAIL); 1187 } 1188 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE, 1189 mrsas_isr, sc, &sc->intr_handle, NULL)) { 1190 device_printf(sc->mrsas_dev, "Cannot set up interrupt\n"); 1191 return (FAIL); 1192 } 1193 1194 return (0); 1195 } 1196 1197 /* 1198 * mrsas_isr: ISR entry point 1199 * input: argument pointer 1200 * 1201 * This function is the interrupt service routine entry point. There 1202 * are two types of interrupts, state change interrupt and response 1203 * interrupt. If an interrupt is not ours, we just return. 1204 */ 1205 void mrsas_isr(void *arg) 1206 { 1207 struct mrsas_softc *sc = (struct mrsas_softc *)arg; 1208 int status; 1209 1210 /* Clear FW state change interrupt */ 1211 status = mrsas_clear_intr(sc); 1212 1213 /* Not our interrupt */ 1214 if (!status) 1215 return; 1216 1217 /* If we are resetting, bail */ 1218 if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1219 kprintf(" Entered into ISR when OCR is going active. \n"); 1220 mrsas_clear_intr(sc); 1221 return; 1222 } 1223 /* Process for reply request and clear response interrupt */ 1224 if (mrsas_complete_cmd(sc) != SUCCESS) 1225 mrsas_clear_intr(sc); 1226 1227 return; 1228 } 1229 1230 /* 1231 * mrsas_complete_cmd: Process reply request 1232 * input: Adapter instance soft state 1233 * 1234 * This function is called from mrsas_isr() to process reply request and 1235 * clear response interrupt. Processing of the reply request entails 1236 * walking through the reply descriptor array for the command request 1237 * pended from Firmware. We look at the Function field to determine 1238 * the command type and perform the appropriate action. Before we 1239 * return, we clear the response interrupt. 1240 */ 1241 static int mrsas_complete_cmd(struct mrsas_softc *sc) 1242 { 1243 Mpi2ReplyDescriptorsUnion_t *desc; 1244 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1245 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1246 struct mrsas_mpt_cmd *cmd_mpt; 1247 struct mrsas_mfi_cmd *cmd_mfi; 1248 u_int8_t arm, reply_descript_type; 1249 u_int16_t smid, num_completed; 1250 u_int8_t status, extStatus; 1251 union desc_value desc_val; 1252 PLD_LOAD_BALANCE_INFO lbinfo; 1253 u_int32_t device_id; 1254 int threshold_reply_count = 0; 1255 1256 1257 /* If we have a hardware error, not need to continue */ 1258 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1259 return (DONE); 1260 1261 desc = sc->reply_desc_mem; 1262 desc += sc->last_reply_idx; 1263 1264 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 1265 1266 desc_val.word = desc->Words; 1267 num_completed = 0; 1268 1269 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1270 1271 /* Find our reply descriptor for the command and process */ 1272 while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) 1273 { 1274 smid = reply_desc->SMID; 1275 cmd_mpt = sc->mpt_cmd_list[smid -1]; 1276 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request; 1277 1278 status = scsi_io_req->RaidContext.status; 1279 extStatus = scsi_io_req->RaidContext.exStatus; 1280 1281 switch (scsi_io_req->Function) 1282 { 1283 case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/ 1284 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1285 lbinfo = &sc->load_balance_info[device_id]; 1286 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1287 arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1; 1288 atomic_dec(&lbinfo->scsi_pending_cmds[arm]); 1289 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1290 } 1291 //Fall thru and complete IO 1292 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1293 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus); 1294 mrsas_cmd_done(sc, cmd_mpt); 1295 scsi_io_req->RaidContext.status = 0; 1296 scsi_io_req->RaidContext.exStatus = 0; 1297 atomic_dec(&sc->fw_outstanding); 1298 break; 1299 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 1300 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1301 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1302 cmd_mpt->flags = 0; 1303 mrsas_release_mpt_cmd(cmd_mpt); 1304 break; 1305 } 1306 1307 sc->last_reply_idx++; 1308 if (sc->last_reply_idx >= sc->reply_q_depth) 1309 sc->last_reply_idx = 0; 1310 1311 desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */ 1312 num_completed++; 1313 threshold_reply_count++; 1314 1315 /* Get the next reply descriptor */ 1316 if (!sc->last_reply_idx) 1317 desc = sc->reply_desc_mem; 1318 else 1319 desc++; 1320 1321 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 1322 desc_val.word = desc->Words; 1323 1324 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1325 1326 if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1327 break; 1328 1329 /* 1330 * Write to reply post index after completing threshold reply count 1331 * and still there are more replies in reply queue pending to be 1332 * completed. 1333 */ 1334 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1335 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index), 1336 sc->last_reply_idx); 1337 threshold_reply_count = 0; 1338 } 1339 } 1340 1341 /* No match, just return */ 1342 if (num_completed == 0) 1343 return (DONE); 1344 1345 /* Clear response interrupt */ 1346 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx); 1347 1348 return(0); 1349 } 1350 1351 /* 1352 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1353 * input: Adapter instance soft state 1354 * 1355 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1356 * It checks the command status and maps the appropriate CAM status for the CCB. 1357 */ 1358 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus) 1359 { 1360 struct mrsas_softc *sc = cmd->sc; 1361 u_int8_t *sense_data; 1362 1363 switch (status) { 1364 case MFI_STAT_OK: 1365 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1366 break; 1367 case MFI_STAT_SCSI_IO_FAILED: 1368 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1369 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1370 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data; 1371 if (sense_data) { 1372 /* For now just copy 18 bytes back */ 1373 memcpy(sense_data, cmd->sense, 18); 1374 cmd->ccb_ptr->csio.sense_len = 18; 1375 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1376 } 1377 break; 1378 case MFI_STAT_LD_OFFLINE: 1379 case MFI_STAT_DEVICE_NOT_FOUND: 1380 if (cmd->ccb_ptr->ccb_h.target_lun) 1381 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1382 else 1383 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1384 break; 1385 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1386 /*send status to CAM layer to retry sending command without 1387 * decrementing retry counter*/ 1388 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1389 break; 1390 default: 1391 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1392 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1393 cmd->ccb_ptr->csio.scsi_status = status; 1394 } 1395 return; 1396 } 1397 1398 /* 1399 * mrsas_alloc_mem: Allocate DMAable memory. 1400 * input: Adapter instance soft state 1401 * 1402 * This function creates the parent DMA tag and allocates DMAable memory. 1403 * DMA tag describes constraints of DMA mapping. Memory allocated is mapped 1404 * into Kernel virtual address. Callback argument is physical memory address. 1405 */ 1406 static int mrsas_alloc_mem(struct mrsas_softc *sc) 1407 { 1408 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, 1409 chain_frame_size, evt_detail_size; 1410 1411 /* 1412 * Allocate parent DMA tag 1413 */ 1414 if (bus_dma_tag_create(NULL, /* parent */ 1415 1, /* alignment */ 1416 0, /* boundary */ 1417 BUS_SPACE_MAXADDR, /* lowaddr */ 1418 BUS_SPACE_MAXADDR, /* highaddr */ 1419 NULL, NULL, /* filter, filterarg */ 1420 MRSAS_MAX_IO_SIZE,/* maxsize */ 1421 MRSAS_MAX_SGL, /* nsegments */ 1422 MRSAS_MAX_IO_SIZE,/* maxsegsize */ 1423 0, /* flags */ 1424 &sc->mrsas_parent_tag /* tag */ 1425 )) { 1426 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1427 return(ENOMEM); 1428 } 1429 1430 /* 1431 * Allocate for version buffer 1432 */ 1433 verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t)); 1434 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent 1435 1, 0, // algnmnt, boundary 1436 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1437 BUS_SPACE_MAXADDR, // highaddr 1438 NULL, NULL, // filter, filterarg 1439 verbuf_size, // maxsize 1440 1, // msegments 1441 verbuf_size, // maxsegsize 1442 BUS_DMA_ALLOCNOW, // flags 1443 &sc->verbuf_tag)) { 1444 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1445 return (ENOMEM); 1446 } 1447 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1448 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1449 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1450 return (ENOMEM); 1451 } 1452 bzero(sc->verbuf_mem, verbuf_size); 1453 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1454 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){ 1455 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1456 return(ENOMEM); 1457 } 1458 1459 /* 1460 * Allocate IO Request Frames 1461 */ 1462 io_req_size = sc->io_frames_alloc_sz; 1463 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1464 16, 0, // algnmnt, boundary 1465 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1466 BUS_SPACE_MAXADDR, // highaddr 1467 NULL, NULL, // filter, filterarg 1468 io_req_size, // maxsize 1469 1, // msegments 1470 io_req_size, // maxsegsize 1471 BUS_DMA_ALLOCNOW, // flags 1472 &sc->io_request_tag)) { 1473 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1474 return (ENOMEM); 1475 } 1476 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1477 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1478 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1479 return (ENOMEM); 1480 } 1481 bzero(sc->io_request_mem, io_req_size); 1482 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1483 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1484 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1485 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 1486 return (ENOMEM); 1487 } 1488 1489 /* 1490 * Allocate Chain Frames 1491 */ 1492 chain_frame_size = sc->chain_frames_alloc_sz; 1493 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1494 4, 0, // algnmnt, boundary 1495 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1496 BUS_SPACE_MAXADDR, // highaddr 1497 NULL, NULL, // filter, filterarg 1498 chain_frame_size, // maxsize 1499 1, // msegments 1500 chain_frame_size, // maxsegsize 1501 BUS_DMA_ALLOCNOW, // flags 1502 &sc->chain_frame_tag)) { 1503 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 1504 return (ENOMEM); 1505 } 1506 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 1507 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 1508 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 1509 return (ENOMEM); 1510 } 1511 bzero(sc->chain_frame_mem, chain_frame_size); 1512 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 1513 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 1514 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 1515 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 1516 return (ENOMEM); 1517 } 1518 1519 /* 1520 * Allocate Reply Descriptor Array 1521 */ 1522 reply_desc_size = sc->reply_alloc_sz; 1523 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1524 16, 0, // algnmnt, boundary 1525 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1526 BUS_SPACE_MAXADDR, // highaddr 1527 NULL, NULL, // filter, filterarg 1528 reply_desc_size, // maxsize 1529 1, // msegments 1530 reply_desc_size, // maxsegsize 1531 BUS_DMA_ALLOCNOW, // flags 1532 &sc->reply_desc_tag)) { 1533 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 1534 return (ENOMEM); 1535 } 1536 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 1537 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 1538 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 1539 return (ENOMEM); 1540 } 1541 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 1542 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 1543 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 1544 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 1545 return (ENOMEM); 1546 } 1547 1548 /* 1549 * Allocate Sense Buffer Array. Keep in lower 4GB 1550 */ 1551 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 1552 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent 1553 64, 0, // algnmnt, boundary 1554 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1555 BUS_SPACE_MAXADDR, // highaddr 1556 NULL, NULL, // filter, filterarg 1557 sense_size, // maxsize 1558 1, // nsegments 1559 sense_size, // maxsegsize 1560 BUS_DMA_ALLOCNOW, // flags 1561 &sc->sense_tag)) { 1562 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 1563 return (ENOMEM); 1564 } 1565 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 1566 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 1567 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 1568 return (ENOMEM); 1569 } 1570 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 1571 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 1572 BUS_DMA_NOWAIT)){ 1573 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 1574 return (ENOMEM); 1575 } 1576 1577 /* 1578 * Allocate for Event detail structure 1579 */ 1580 evt_detail_size = sizeof(struct mrsas_evt_detail); 1581 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1582 1, 0, // algnmnt, boundary 1583 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1584 BUS_SPACE_MAXADDR, // highaddr 1585 NULL, NULL, // filter, filterarg 1586 evt_detail_size, // maxsize 1587 1, // msegments 1588 evt_detail_size, // maxsegsize 1589 BUS_DMA_ALLOCNOW, // flags 1590 &sc->evt_detail_tag)) { 1591 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 1592 return (ENOMEM); 1593 } 1594 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 1595 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 1596 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 1597 return (ENOMEM); 1598 } 1599 bzero(sc->evt_detail_mem, evt_detail_size); 1600 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 1601 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 1602 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 1603 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 1604 return (ENOMEM); 1605 } 1606 1607 1608 /* 1609 * Create a dma tag for data buffers; size will be the maximum 1610 * possible I/O size (280kB). 1611 */ 1612 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent 1613 1, // alignment 1614 0, // boundary 1615 BUS_SPACE_MAXADDR, // lowaddr 1616 BUS_SPACE_MAXADDR, // highaddr 1617 NULL, NULL, // filter, filterarg 1618 MRSAS_MAX_IO_SIZE, // maxsize 1619 MRSAS_MAX_SGL, // nsegments 1620 MRSAS_MAX_IO_SIZE, // maxsegsize 1621 BUS_DMA_ALLOCNOW, // flags 1622 &sc->data_tag)) { 1623 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 1624 return(ENOMEM); 1625 } 1626 1627 return(0); 1628 } 1629 1630 /* 1631 * mrsas_addr_cb: Callback function of bus_dmamap_load() 1632 * input: callback argument, 1633 * machine dependent type that describes DMA segments, 1634 * number of segments, 1635 * error code. 1636 * 1637 * This function is for the driver to receive mapping information resultant 1638 * of the bus_dmamap_load(). The information is actually not being used, 1639 * but the address is saved anyway. 1640 */ 1641 void 1642 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1643 { 1644 bus_addr_t *addr; 1645 1646 addr = arg; 1647 *addr = segs[0].ds_addr; 1648 } 1649 1650 /* 1651 * mrsas_setup_raidmap: Set up RAID map. 1652 * input: Adapter instance soft state 1653 * 1654 * Allocate DMA memory for the RAID maps and perform setup. 1655 */ 1656 static int mrsas_setup_raidmap(struct mrsas_softc *sc) 1657 { 1658 sc->map_sz = sizeof(MR_FW_RAID_MAP) + 1659 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1)); 1660 1661 for (int i=0; i < 2; i++) 1662 { 1663 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent 1664 4, 0, // algnmnt, boundary 1665 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1666 BUS_SPACE_MAXADDR, // highaddr 1667 NULL, NULL, // filter, filterarg 1668 sc->map_sz, // maxsize 1669 1, // nsegments 1670 sc->map_sz, // maxsegsize 1671 BUS_DMA_ALLOCNOW, // flags 1672 &sc->raidmap_tag[i])) { 1673 device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n"); 1674 return (ENOMEM); 1675 } 1676 if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i], 1677 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 1678 device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n"); 1679 return (ENOMEM); 1680 } 1681 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 1682 sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i], 1683 BUS_DMA_NOWAIT)){ 1684 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 1685 return (ENOMEM); 1686 } 1687 if (!sc->raidmap_mem[i]) { 1688 device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n"); 1689 return (ENOMEM); 1690 } 1691 } 1692 1693 if (!mrsas_get_map_info(sc)) 1694 mrsas_sync_map_info(sc); 1695 1696 return (0); 1697 } 1698 1699 /** 1700 * mrsas_init_fw: Initialize Firmware 1701 * input: Adapter soft state 1702 * 1703 * Calls transition_to_ready() to make sure Firmware is in operational 1704 * state and calls mrsas_init_adapter() to send IOC_INIT command to 1705 * Firmware. It issues internal commands to get the controller info 1706 * after the IOC_INIT command response is received by Firmware. 1707 * Note: code relating to get_pdlist, get_ld_list and max_sectors 1708 * are currently not being used, it is left here as placeholder. 1709 */ 1710 static int mrsas_init_fw(struct mrsas_softc *sc) 1711 { 1712 u_int32_t max_sectors_1; 1713 u_int32_t max_sectors_2; 1714 u_int32_t tmp_sectors; 1715 struct mrsas_ctrl_info *ctrl_info; 1716 1717 int ret, ocr = 0; 1718 1719 1720 /* Make sure Firmware is ready */ 1721 ret = mrsas_transition_to_ready(sc, ocr); 1722 if (ret != SUCCESS) { 1723 return(ret); 1724 } 1725 1726 /* Get operational params, sge flags, send init cmd to ctlr */ 1727 if (mrsas_init_adapter(sc) != SUCCESS){ 1728 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 1729 return(1); 1730 } 1731 1732 /* Allocate internal commands for pass-thru */ 1733 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){ 1734 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 1735 return(1); 1736 } 1737 1738 if (mrsas_setup_raidmap(sc) != SUCCESS) { 1739 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n"); 1740 return(1); 1741 } 1742 1743 /* For pass-thru, get PD/LD list and controller info */ 1744 memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 1745 mrsas_get_pd_list(sc); 1746 1747 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD); 1748 mrsas_get_ld_list(sc); 1749 1750 //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO)); 1751 1752 ctrl_info = kmalloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 1753 1754 /* 1755 * Compute the max allowed sectors per IO: The controller info has two 1756 * limits on max sectors. Driver should use the minimum of these two. 1757 * 1758 * 1 << stripe_sz_ops.min = max sectors per strip 1759 * 1760 * Note that older firmwares ( < FW ver 30) didn't report information 1761 * to calculate max_sectors_1. So the number ended up as zero always. 1762 */ 1763 tmp_sectors = 0; 1764 if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) { 1765 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 1766 ctrl_info->max_strips_per_io; 1767 max_sectors_2 = ctrl_info->max_request_size; 1768 tmp_sectors = min(max_sectors_1 , max_sectors_2); 1769 sc->disableOnlineCtrlReset = 1770 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 1771 sc->UnevenSpanSupport = 1772 ctrl_info->adapterOperations2.supportUnevenSpans; 1773 if(sc->UnevenSpanSupport) { 1774 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n", 1775 sc->UnevenSpanSupport); 1776 if (MR_ValidateMapInfo(sc)) 1777 sc->fast_path_io = 1; 1778 else 1779 sc->fast_path_io = 0; 1780 1781 } 1782 } 1783 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; 1784 1785 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 1786 sc->max_sectors_per_req = tmp_sectors; 1787 1788 if (ctrl_info) 1789 kfree(ctrl_info, M_MRSAS); 1790 1791 return(0); 1792 } 1793 1794 /** 1795 * mrsas_init_adapter: Initializes the adapter/controller 1796 * input: Adapter soft state 1797 * 1798 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 1799 * ROC/controller. The FW register is read to determined the number of 1800 * commands that is supported. All memory allocations for IO is based on 1801 * max_cmd. Appropriate calculations are performed in this function. 1802 */ 1803 int mrsas_init_adapter(struct mrsas_softc *sc) 1804 { 1805 uint32_t status; 1806 u_int32_t max_cmd; 1807 int ret; 1808 1809 /* Read FW status register */ 1810 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 1811 1812 /* Get operational params from status register */ 1813 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 1814 1815 /* Decrement the max supported by 1, to correlate with FW */ 1816 sc->max_fw_cmds = sc->max_fw_cmds-1; 1817 max_cmd = sc->max_fw_cmds; 1818 1819 /* Determine allocation size of command frames */ 1820 sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16); 1821 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd; 1822 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 1823 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1)); 1824 sc->chain_frames_alloc_sz = 1024 * max_cmd; 1825 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1826 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16; 1827 1828 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION); 1829 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 1830 1831 /* Used for pass thru MFI frame (DCMD) */ 1832 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16; 1833 1834 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1835 sizeof(MPI2_SGE_IO_UNION))/16; 1836 1837 sc->last_reply_idx = 0; 1838 1839 ret = mrsas_alloc_mem(sc); 1840 if (ret != SUCCESS) 1841 return(ret); 1842 1843 ret = mrsas_alloc_mpt_cmds(sc); 1844 if (ret != SUCCESS) 1845 return(ret); 1846 1847 ret = mrsas_ioc_init(sc); 1848 if (ret != SUCCESS) 1849 return(ret); 1850 1851 1852 return(0); 1853 } 1854 1855 /** 1856 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 1857 * input: Adapter soft state 1858 * 1859 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 1860 */ 1861 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 1862 { 1863 int ioc_init_size; 1864 1865 /* Allocate IOC INIT command */ 1866 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 1867 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1868 1, 0, // algnmnt, boundary 1869 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1870 BUS_SPACE_MAXADDR, // highaddr 1871 NULL, NULL, // filter, filterarg 1872 ioc_init_size, // maxsize 1873 1, // msegments 1874 ioc_init_size, // maxsegsize 1875 BUS_DMA_ALLOCNOW, // flags 1876 &sc->ioc_init_tag)) { 1877 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 1878 return (ENOMEM); 1879 } 1880 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 1881 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 1882 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 1883 return (ENOMEM); 1884 } 1885 bzero(sc->ioc_init_mem, ioc_init_size); 1886 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 1887 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 1888 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 1889 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 1890 return (ENOMEM); 1891 } 1892 1893 return (0); 1894 } 1895 1896 /** 1897 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 1898 * input: Adapter soft state 1899 * 1900 * Deallocates memory of the IOC Init cmd. 1901 */ 1902 void mrsas_free_ioc_cmd(struct mrsas_softc *sc) 1903 { 1904 if (sc->ioc_init_phys_mem) 1905 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 1906 if (sc->ioc_init_mem != NULL) 1907 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 1908 if (sc->ioc_init_tag != NULL) 1909 bus_dma_tag_destroy(sc->ioc_init_tag); 1910 } 1911 1912 /** 1913 * mrsas_ioc_init: Sends IOC Init command to FW 1914 * input: Adapter soft state 1915 * 1916 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 1917 */ 1918 int mrsas_ioc_init(struct mrsas_softc *sc) 1919 { 1920 struct mrsas_init_frame *init_frame; 1921 pMpi2IOCInitRequest_t IOCInitMsg; 1922 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 1923 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME; 1924 bus_addr_t phys_addr; 1925 int i, retcode = 0; 1926 1927 /* Allocate memory for the IOC INIT command */ 1928 if (mrsas_alloc_ioc_cmd(sc)) { 1929 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 1930 return(1); 1931 } 1932 1933 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024); 1934 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 1935 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1936 IOCInitMsg->MsgVersion = MPI2_VERSION; 1937 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; 1938 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 1939 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; 1940 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; 1941 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; 1942 1943 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 1944 init_frame->cmd = MFI_CMD_INIT; 1945 init_frame->cmd_status = 0xFF; 1946 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1947 1948 if (sc->verbuf_mem) { 1949 ksnprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n", 1950 MRSAS_VERSION); 1951 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 1952 init_frame->driver_ver_hi = 0; 1953 } 1954 1955 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 1956 init_frame->queue_info_new_phys_addr_lo = phys_addr; 1957 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); 1958 1959 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; 1960 req_desc.MFAIo.RequestFlags = 1961 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1962 1963 mrsas_disable_intr(sc); 1964 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 1965 //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del? 1966 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high); 1967 1968 /* 1969 * Poll response timer to wait for Firmware response. While this 1970 * timer with the DELAY call could block CPU, the time interval for 1971 * this is only 1 millisecond. 1972 */ 1973 if (init_frame->cmd_status == 0xFF) { 1974 for (i=0; i < (max_wait * 1000); i++){ 1975 if (init_frame->cmd_status == 0xFF) 1976 DELAY(1000); 1977 else 1978 break; 1979 } 1980 } 1981 1982 if (init_frame->cmd_status == 0) 1983 mrsas_dprint(sc, MRSAS_OCR, 1984 "IOC INIT response received from FW.\n"); 1985 //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del? 1986 else 1987 { 1988 if (init_frame->cmd_status == 0xFF) 1989 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 1990 else 1991 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 1992 retcode = 1; 1993 } 1994 1995 mrsas_free_ioc_cmd(sc); 1996 return (retcode); 1997 } 1998 1999 /** 2000 * mrsas_alloc_mpt_cmds: Allocates the command packets 2001 * input: Adapter instance soft state 2002 * 2003 * This function allocates the internal commands for IOs. Each command that is 2004 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. 2005 * An array is allocated with mrsas_mpt_cmd context. The free commands are 2006 * maintained in a linked list (cmd pool). SMID value range is from 1 to 2007 * max_fw_cmds. 2008 */ 2009 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2010 { 2011 int i, j; 2012 u_int32_t max_cmd; 2013 struct mrsas_mpt_cmd *cmd; 2014 pMpi2ReplyDescriptorsUnion_t reply_desc; 2015 u_int32_t offset, chain_offset, sense_offset; 2016 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2017 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2018 2019 max_cmd = sc->max_fw_cmds; 2020 2021 sc->req_desc = kmalloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2022 if (!sc->req_desc) { 2023 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2024 return(ENOMEM); 2025 } 2026 memset(sc->req_desc, 0, sc->request_alloc_sz); 2027 2028 /* 2029 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the 2030 * dynamic array first and then allocate individual commands. 2031 */ 2032 sc->mpt_cmd_list = kmalloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT); 2033 if (!sc->mpt_cmd_list) { 2034 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2035 return(ENOMEM); 2036 } 2037 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd); 2038 for (i = 0; i < max_cmd; i++) { 2039 sc->mpt_cmd_list[i] = kmalloc(sizeof(struct mrsas_mpt_cmd), 2040 M_MRSAS, M_NOWAIT); 2041 if (!sc->mpt_cmd_list[i]) { 2042 for (j = 0; j < i; j++) 2043 kfree(sc->mpt_cmd_list[j],M_MRSAS); 2044 kfree(sc->mpt_cmd_list, M_MRSAS); 2045 sc->mpt_cmd_list = NULL; 2046 return(ENOMEM); 2047 } 2048 } 2049 2050 io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2051 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2052 chain_frame_base = (u_int8_t*)sc->chain_frame_mem; 2053 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2054 sense_base = (u_int8_t*)sc->sense_mem; 2055 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2056 for (i = 0; i < max_cmd; i++) { 2057 cmd = sc->mpt_cmd_list[i]; 2058 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2059 chain_offset = 1024 * i; 2060 sense_offset = MRSAS_SENSE_LEN * i; 2061 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2062 cmd->index = i + 1; 2063 cmd->ccb_ptr = NULL; 2064 callout_init(&cmd->cm_callout); 2065 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2066 cmd->sc = sc; 2067 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2068 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2069 cmd->io_request_phys_addr = io_req_base_phys + offset; 2070 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2071 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2072 cmd->sense = sense_base + sense_offset; 2073 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2074 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2075 return(FAIL); 2076 } 2077 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2078 } 2079 2080 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2081 reply_desc = sc->reply_desc_mem; 2082 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 2083 reply_desc->Words = MRSAS_ULONG_MAX; 2084 } 2085 return(0); 2086 } 2087 2088 /** 2089 * mrsas_fire_cmd: Sends command to FW 2090 * input: Adapter soft state 2091 * request descriptor address low 2092 * request descriptor address high 2093 * 2094 * This functions fires the command to Firmware by writing to the 2095 * inbound_low_queue_port and inbound_high_queue_port. 2096 */ 2097 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2098 u_int32_t req_desc_hi) 2099 { 2100 lockmgr(&sc->pci_lock, LK_EXCLUSIVE); 2101 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2102 req_desc_lo); 2103 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2104 req_desc_hi); 2105 lockmgr(&sc->pci_lock, LK_RELEASE); 2106 } 2107 2108 /** 2109 * mrsas_transition_to_ready: Move FW to Ready state 2110 * input: Adapter instance soft state 2111 * 2112 * During the initialization, FW passes can potentially be in any one of 2113 * several possible states. If the FW in operational, waiting-for-handshake 2114 * states, driver must take steps to bring it to ready state. Otherwise, it 2115 * has to wait for the ready state. 2116 */ 2117 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2118 { 2119 int i; 2120 u_int8_t max_wait; 2121 u_int32_t val, fw_state; 2122 u_int32_t cur_state; 2123 u_int32_t abs_state, curr_abs_state; 2124 2125 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2126 fw_state = val & MFI_STATE_MASK; 2127 max_wait = MRSAS_RESET_WAIT_TIME; 2128 2129 if (fw_state != MFI_STATE_READY) 2130 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2131 2132 while (fw_state != MFI_STATE_READY) { 2133 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2134 switch (fw_state) { 2135 case MFI_STATE_FAULT: 2136 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2137 if (ocr) { 2138 cur_state = MFI_STATE_FAULT; 2139 break; 2140 } 2141 else 2142 return -ENODEV; 2143 case MFI_STATE_WAIT_HANDSHAKE: 2144 /* Set the CLR bit in inbound doorbell */ 2145 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2146 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG); 2147 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2148 break; 2149 case MFI_STATE_BOOT_MESSAGE_PENDING: 2150 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2151 MFI_INIT_HOTPLUG); 2152 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2153 break; 2154 case MFI_STATE_OPERATIONAL: 2155 /* Bring it to READY state; assuming max wait 10 secs */ 2156 mrsas_disable_intr(sc); 2157 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 2158 for (i=0; i < max_wait * 1000; i++) { 2159 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 2160 DELAY(1000); 2161 else 2162 break; 2163 } 2164 cur_state = MFI_STATE_OPERATIONAL; 2165 break; 2166 case MFI_STATE_UNDEFINED: 2167 /* This state should not last for more than 2 seconds */ 2168 cur_state = MFI_STATE_UNDEFINED; 2169 break; 2170 case MFI_STATE_BB_INIT: 2171 cur_state = MFI_STATE_BB_INIT; 2172 break; 2173 case MFI_STATE_FW_INIT: 2174 cur_state = MFI_STATE_FW_INIT; 2175 break; 2176 case MFI_STATE_FW_INIT_2: 2177 cur_state = MFI_STATE_FW_INIT_2; 2178 break; 2179 case MFI_STATE_DEVICE_SCAN: 2180 cur_state = MFI_STATE_DEVICE_SCAN; 2181 break; 2182 case MFI_STATE_FLUSH_CACHE: 2183 cur_state = MFI_STATE_FLUSH_CACHE; 2184 break; 2185 default: 2186 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 2187 return -ENODEV; 2188 } 2189 2190 /* 2191 * The cur_state should not last for more than max_wait secs 2192 */ 2193 for (i = 0; i < (max_wait * 1000); i++) { 2194 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2195 outbound_scratch_pad))& MFI_STATE_MASK); 2196 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2197 outbound_scratch_pad)); 2198 if (abs_state == curr_abs_state) 2199 DELAY(1000); 2200 else 2201 break; 2202 } 2203 2204 /* 2205 * Return error if fw_state hasn't changed after max_wait 2206 */ 2207 if (curr_abs_state == abs_state) { 2208 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 2209 "in %d secs\n", fw_state, max_wait); 2210 return -ENODEV; 2211 } 2212 } 2213 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 2214 //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del? 2215 return 0; 2216 } 2217 2218 /** 2219 * mrsas_get_mfi_cmd: Get a cmd from free command pool 2220 * input: Adapter soft state 2221 * 2222 * This function removes an MFI command from the command list. 2223 */ 2224 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc) 2225 { 2226 struct mrsas_mfi_cmd *cmd = NULL; 2227 2228 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE); 2229 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){ 2230 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 2231 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 2232 } 2233 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE); 2234 2235 return cmd; 2236 } 2237 2238 /** 2239 * mrsas_ocr_thread Thread to handle OCR/Kill Adapter. 2240 * input: Adapter Context. 2241 * 2242 * This function will check FW status register and flag 2243 * do_timeout_reset flag. It will do OCR/Kill adapter if 2244 * FW is in fault state or IO timed out has trigger reset. 2245 */ 2246 static void 2247 mrsas_ocr_thread(void *arg) 2248 { 2249 struct mrsas_softc *sc; 2250 u_int32_t fw_status, fw_state; 2251 2252 sc = (struct mrsas_softc *)arg; 2253 2254 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 2255 2256 sc->ocr_thread_active = 1; 2257 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 2258 for (;;) { 2259 /* Sleep for 1 second and check the queue status*/ 2260 lksleep(&sc->ocr_chan, &sc->sim_lock, 0, 2261 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 2262 if (sc->remove_in_progress) { 2263 mrsas_dprint(sc, MRSAS_OCR, 2264 "Exit due to shutdown from %s\n", __func__); 2265 break; 2266 } 2267 fw_status = mrsas_read_reg(sc, 2268 offsetof(mrsas_reg_set, outbound_scratch_pad)); 2269 fw_state = fw_status & MFI_STATE_MASK; 2270 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) { 2271 device_printf(sc->mrsas_dev, "OCR started due to %s!\n", 2272 sc->do_timedout_reset?"IO Timeout": 2273 "FW fault detected"); 2274 spin_lock(&sc->ioctl_lock); 2275 sc->reset_in_progress = 1; 2276 sc->reset_count++; 2277 spin_unlock(&sc->ioctl_lock); 2278 mrsas_xpt_freeze(sc); 2279 mrsas_reset_ctrl(sc); 2280 mrsas_xpt_release(sc); 2281 sc->reset_in_progress = 0; 2282 sc->do_timedout_reset = 0; 2283 } 2284 } 2285 lockmgr(&sc->sim_lock, LK_RELEASE); 2286 sc->ocr_thread_active = 0; 2287 kthread_exit(); 2288 } 2289 2290 /** 2291 * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR. 2292 * input: Adapter Context. 2293 * 2294 * This function will clear reply descriptor so that post OCR 2295 * driver and FW will lost old history. 2296 */ 2297 void mrsas_reset_reply_desc(struct mrsas_softc *sc) 2298 { 2299 int i; 2300 pMpi2ReplyDescriptorsUnion_t reply_desc; 2301 2302 sc->last_reply_idx = 0; 2303 reply_desc = sc->reply_desc_mem; 2304 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 2305 reply_desc->Words = MRSAS_ULONG_MAX; 2306 } 2307 } 2308 2309 /** 2310 * mrsas_reset_ctrl Core function to OCR/Kill adapter. 2311 * input: Adapter Context. 2312 * 2313 * This function will run from thread context so that it can sleep. 2314 * 1. Do not handle OCR if FW is in HW critical error. 2315 * 2. Wait for outstanding command to complete for 180 seconds. 2316 * 3. If #2 does not find any outstanding command Controller is in working 2317 * state, so skip OCR. 2318 * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset. 2319 * 4. Start of the OCR, return all SCSI command back to CAM layer which has 2320 * ccb_ptr. 2321 * 5. Post OCR, Re-fire Managment command and move Controller to Operation 2322 * state. 2323 */ 2324 int mrsas_reset_ctrl(struct mrsas_softc *sc) 2325 { 2326 int retval = SUCCESS, i, j, retry = 0; 2327 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 2328 union ccb *ccb; 2329 struct mrsas_mfi_cmd *mfi_cmd; 2330 struct mrsas_mpt_cmd *mpt_cmd; 2331 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2332 2333 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 2334 device_printf(sc->mrsas_dev, 2335 "mrsas: Hardware critical error, returning FAIL.\n"); 2336 return FAIL; 2337 } 2338 2339 set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2340 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 2341 mrsas_disable_intr(sc); 2342 DELAY(1000 * 1000); 2343 2344 /* First try waiting for commands to complete */ 2345 if (mrsas_wait_for_outstanding(sc)) { 2346 mrsas_dprint(sc, MRSAS_OCR, 2347 "resetting adapter from %s.\n", 2348 __func__); 2349 /* Now return commands back to the CAM layer */ 2350 for (i = 0 ; i < sc->max_fw_cmds; i++) { 2351 mpt_cmd = sc->mpt_cmd_list[i]; 2352 if (mpt_cmd->ccb_ptr) { 2353 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 2354 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2355 mrsas_cmd_done(sc, mpt_cmd); 2356 atomic_dec(&sc->fw_outstanding); 2357 } 2358 } 2359 2360 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2361 outbound_scratch_pad)); 2362 abs_state = status_reg & MFI_STATE_MASK; 2363 reset_adapter = status_reg & MFI_RESET_ADAPTER; 2364 if (sc->disableOnlineCtrlReset || 2365 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 2366 /* Reset not supported, kill adapter */ 2367 mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n"); 2368 mrsas_kill_hba(sc); 2369 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 2370 retval = FAIL; 2371 goto out; 2372 } 2373 2374 /* Now try to reset the chip */ 2375 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 2376 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2377 MPI2_WRSEQ_FLUSH_KEY_VALUE); 2378 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2379 MPI2_WRSEQ_1ST_KEY_VALUE); 2380 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2381 MPI2_WRSEQ_2ND_KEY_VALUE); 2382 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2383 MPI2_WRSEQ_3RD_KEY_VALUE); 2384 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2385 MPI2_WRSEQ_4TH_KEY_VALUE); 2386 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2387 MPI2_WRSEQ_5TH_KEY_VALUE); 2388 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2389 MPI2_WRSEQ_6TH_KEY_VALUE); 2390 2391 /* Check that the diag write enable (DRWE) bit is on */ 2392 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2393 fusion_host_diag)); 2394 retry = 0; 2395 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2396 DELAY(100 * 1000); 2397 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2398 fusion_host_diag)); 2399 if (retry++ == 100) { 2400 mrsas_dprint(sc, MRSAS_OCR, 2401 "Host diag unlock failed!\n"); 2402 break; 2403 } 2404 } 2405 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 2406 continue; 2407 2408 /* Send chip reset command */ 2409 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 2410 host_diag | HOST_DIAG_RESET_ADAPTER); 2411 DELAY(3000 * 1000); 2412 2413 /* Make sure reset adapter bit is cleared */ 2414 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2415 fusion_host_diag)); 2416 retry = 0; 2417 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 2418 DELAY(100 * 1000); 2419 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2420 fusion_host_diag)); 2421 if (retry++ == 1000) { 2422 mrsas_dprint(sc, MRSAS_OCR, 2423 "Diag reset adapter never cleared!\n"); 2424 break; 2425 } 2426 } 2427 if (host_diag & HOST_DIAG_RESET_ADAPTER) 2428 continue; 2429 2430 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2431 outbound_scratch_pad)) & MFI_STATE_MASK; 2432 retry = 0; 2433 2434 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 2435 DELAY(100 * 1000); 2436 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2437 outbound_scratch_pad)) & MFI_STATE_MASK; 2438 } 2439 if (abs_state <= MFI_STATE_FW_INIT) { 2440 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 2441 " state = 0x%x\n", abs_state); 2442 continue; 2443 } 2444 2445 /* Wait for FW to become ready */ 2446 if (mrsas_transition_to_ready(sc, 1)) { 2447 mrsas_dprint(sc, MRSAS_OCR, 2448 "mrsas: Failed to transition controller to ready.\n"); 2449 continue; 2450 } 2451 2452 mrsas_reset_reply_desc(sc); 2453 if (mrsas_ioc_init(sc)) { 2454 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 2455 continue; 2456 } 2457 2458 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2459 mrsas_enable_intr(sc); 2460 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 2461 2462 /* Re-fire management commands */ 2463 for (j = 0 ; j < sc->max_fw_cmds; j++) { 2464 mpt_cmd = sc->mpt_cmd_list[j]; 2465 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 2466 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 2467 if (mfi_cmd->frame->dcmd.opcode == 2468 MR_DCMD_LD_MAP_GET_INFO) { 2469 mrsas_release_mfi_cmd(mfi_cmd); 2470 mrsas_release_mpt_cmd(mpt_cmd); 2471 } else { 2472 req_desc = mrsas_get_request_desc(sc, 2473 mfi_cmd->cmd_id.context.smid - 1); 2474 mrsas_dprint(sc, MRSAS_OCR, 2475 "Re-fire command DCMD opcode 0x%x index %d\n ", 2476 mfi_cmd->frame->dcmd.opcode, j); 2477 if (!req_desc) 2478 device_printf(sc->mrsas_dev, 2479 "Cannot build MPT cmd.\n"); 2480 else 2481 mrsas_fire_cmd(sc, req_desc->addr.u.low, 2482 req_desc->addr.u.high); 2483 } 2484 } 2485 } 2486 2487 /* Reset load balance info */ 2488 memset(sc->load_balance_info, 0, 2489 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES); 2490 2491 if (!mrsas_get_map_info(sc)) 2492 mrsas_sync_map_info(sc); 2493 2494 /* Adapter reset completed successfully */ 2495 device_printf(sc->mrsas_dev, "Reset successful\n"); 2496 retval = SUCCESS; 2497 goto out; 2498 } 2499 /* Reset failed, kill the adapter */ 2500 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 2501 mrsas_kill_hba(sc); 2502 retval = FAIL; 2503 } else { 2504 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2505 mrsas_enable_intr(sc); 2506 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 2507 } 2508 out: 2509 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2510 mrsas_dprint(sc, MRSAS_OCR, 2511 "Reset Exit with %d.\n", retval); 2512 return retval; 2513 } 2514 2515 /** 2516 * mrsas_kill_hba Kill HBA when OCR is not supported. 2517 * input: Adapter Context. 2518 * 2519 * This function will kill HBA when OCR is not supported. 2520 */ 2521 void mrsas_kill_hba (struct mrsas_softc *sc) 2522 { 2523 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 2524 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2525 MFI_STOP_ADP); 2526 /* Flush */ 2527 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 2528 } 2529 2530 /** 2531 * mrsas_wait_for_outstanding Wait for outstanding commands 2532 * input: Adapter Context. 2533 * 2534 * This function will wait for 180 seconds for outstanding 2535 * commands to be completed. 2536 */ 2537 int mrsas_wait_for_outstanding(struct mrsas_softc *sc) 2538 { 2539 int i, outstanding, retval = 0; 2540 u_int32_t fw_state; 2541 2542 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 2543 if (sc->remove_in_progress) { 2544 mrsas_dprint(sc, MRSAS_OCR, 2545 "Driver remove or shutdown called.\n"); 2546 retval = 1; 2547 goto out; 2548 } 2549 /* Check if firmware is in fault state */ 2550 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2551 outbound_scratch_pad)) & MFI_STATE_MASK; 2552 if (fw_state == MFI_STATE_FAULT) { 2553 mrsas_dprint(sc, MRSAS_OCR, 2554 "Found FW in FAULT state, will reset adapter.\n"); 2555 retval = 1; 2556 goto out; 2557 } 2558 outstanding = atomic_read(&sc->fw_outstanding); 2559 if (!outstanding) 2560 goto out; 2561 2562 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 2563 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 2564 "commands to complete\n",i,outstanding); 2565 mrsas_complete_cmd(sc); 2566 } 2567 DELAY(1000 * 1000); 2568 } 2569 2570 if (atomic_read(&sc->fw_outstanding)) { 2571 mrsas_dprint(sc, MRSAS_OCR, 2572 " pending commands remain after waiting," 2573 " will reset adapter.\n"); 2574 retval = 1; 2575 } 2576 out: 2577 return retval; 2578 } 2579 2580 /** 2581 * mrsas_release_mfi_cmd: Return a cmd to free command pool 2582 * input: Command packet for return to free cmd pool 2583 * 2584 * This function returns the MFI command to the command list. 2585 */ 2586 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd) 2587 { 2588 struct mrsas_softc *sc = cmd->sc; 2589 2590 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE); 2591 cmd->ccb_ptr = NULL; 2592 cmd->cmd_id.frame_count = 0; 2593 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next); 2594 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE); 2595 2596 return; 2597 } 2598 2599 /** 2600 * mrsas_get_controller_info - Returns FW's controller structure 2601 * input: Adapter soft state 2602 * Controller information structure 2603 * 2604 * Issues an internal command (DCMD) to get the FW's controller structure. 2605 * This information is mainly used to find out the maximum IO transfer per 2606 * command supported by the FW. 2607 */ 2608 static int mrsas_get_ctrl_info(struct mrsas_softc *sc, 2609 struct mrsas_ctrl_info *ctrl_info) 2610 { 2611 int retcode = 0; 2612 struct mrsas_mfi_cmd *cmd; 2613 struct mrsas_dcmd_frame *dcmd; 2614 2615 cmd = mrsas_get_mfi_cmd(sc); 2616 2617 if (!cmd) { 2618 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 2619 return -ENOMEM; 2620 } 2621 dcmd = &cmd->frame->dcmd; 2622 2623 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 2624 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 2625 mrsas_release_mfi_cmd(cmd); 2626 return -ENOMEM; 2627 } 2628 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2629 2630 dcmd->cmd = MFI_CMD_DCMD; 2631 dcmd->cmd_status = 0xFF; 2632 dcmd->sge_count = 1; 2633 dcmd->flags = MFI_FRAME_DIR_READ; 2634 dcmd->timeout = 0; 2635 dcmd->pad_0 = 0; 2636 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); 2637 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 2638 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; 2639 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); 2640 2641 if (!mrsas_issue_polled(sc, cmd)) 2642 memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 2643 else 2644 retcode = 1; 2645 2646 mrsas_free_ctlr_info_cmd(sc); 2647 mrsas_release_mfi_cmd(cmd); 2648 return(retcode); 2649 } 2650 2651 /** 2652 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 2653 * input: Adapter soft state 2654 * 2655 * Allocates DMAable memory for the controller info internal command. 2656 */ 2657 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 2658 { 2659 int ctlr_info_size; 2660 2661 /* Allocate get controller info command */ 2662 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 2663 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 2664 1, 0, // algnmnt, boundary 2665 BUS_SPACE_MAXADDR_32BIT,// lowaddr 2666 BUS_SPACE_MAXADDR, // highaddr 2667 NULL, NULL, // filter, filterarg 2668 ctlr_info_size, // maxsize 2669 1, // msegments 2670 ctlr_info_size, // maxsegsize 2671 BUS_DMA_ALLOCNOW, // flags 2672 &sc->ctlr_info_tag)) { 2673 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 2674 return (ENOMEM); 2675 } 2676 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 2677 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 2678 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 2679 return (ENOMEM); 2680 } 2681 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 2682 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 2683 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 2684 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 2685 return (ENOMEM); 2686 } 2687 2688 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 2689 return (0); 2690 } 2691 2692 /** 2693 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 2694 * input: Adapter soft state 2695 * 2696 * Deallocates memory of the get controller info cmd. 2697 */ 2698 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 2699 { 2700 if (sc->ctlr_info_phys_addr) 2701 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 2702 if (sc->ctlr_info_mem != NULL) 2703 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 2704 if (sc->ctlr_info_tag != NULL) 2705 bus_dma_tag_destroy(sc->ctlr_info_tag); 2706 } 2707 2708 /** 2709 * mrsas_issue_polled: Issues a polling command 2710 * inputs: Adapter soft state 2711 * Command packet to be issued 2712 * 2713 * This function is for posting of internal commands to Firmware. MFI 2714 * requires the cmd_status to be set to 0xFF before posting. The maximun 2715 * wait time of the poll response timer is 180 seconds. 2716 */ 2717 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 2718 { 2719 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 2720 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 2721 int i, retcode = 0; 2722 2723 frame_hdr->cmd_status = 0xFF; 2724 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2725 2726 /* Issue the frame using inbound queue port */ 2727 if (mrsas_issue_dcmd(sc, cmd)) { 2728 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 2729 return(1); 2730 } 2731 2732 /* 2733 * Poll response timer to wait for Firmware response. While this 2734 * timer with the DELAY call could block CPU, the time interval for 2735 * this is only 1 millisecond. 2736 */ 2737 if (frame_hdr->cmd_status == 0xFF) { 2738 for (i=0; i < (max_wait * 1000); i++){ 2739 if (frame_hdr->cmd_status == 0xFF) 2740 DELAY(1000); 2741 else 2742 break; 2743 } 2744 } 2745 if (frame_hdr->cmd_status != 0) 2746 { 2747 if (frame_hdr->cmd_status == 0xFF) 2748 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait); 2749 else 2750 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status); 2751 retcode = 1; 2752 } 2753 return(retcode); 2754 } 2755 2756 /** 2757 * mrsas_issue_dcmd - Issues a MFI Pass thru cmd 2758 * input: Adapter soft state 2759 * mfi cmd pointer 2760 * 2761 * This function is called by mrsas_issued_blocked_cmd() and 2762 * mrsas_issued_polled(), to build the MPT command and then fire the 2763 * command to Firmware. 2764 */ 2765 int 2766 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 2767 { 2768 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2769 2770 req_desc = mrsas_build_mpt_cmd(sc, cmd); 2771 if (!req_desc) { 2772 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 2773 return(1); 2774 } 2775 2776 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 2777 2778 return(0); 2779 } 2780 2781 /** 2782 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd 2783 * input: Adapter soft state 2784 * mfi cmd to build 2785 * 2786 * This function is called by mrsas_issue_cmd() to build the MPT-MFI 2787 * passthru command and prepares the MPT command to send to Firmware. 2788 */ 2789 MRSAS_REQUEST_DESCRIPTOR_UNION * 2790 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 2791 { 2792 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2793 u_int16_t index; 2794 2795 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 2796 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 2797 return NULL; 2798 } 2799 2800 index = cmd->cmd_id.context.smid; 2801 2802 req_desc = mrsas_get_request_desc(sc, index-1); 2803 if(!req_desc) 2804 return NULL; 2805 2806 req_desc->addr.Words = 0; 2807 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2808 2809 req_desc->SCSIIO.SMID = index; 2810 2811 return(req_desc); 2812 } 2813 2814 /** 2815 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command 2816 * input: Adapter soft state 2817 * mfi cmd pointer 2818 * 2819 * The MPT command and the io_request are setup as a passthru command. 2820 * The SGE chain address is set to frame_phys_addr of the MFI command. 2821 */ 2822 u_int8_t 2823 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 2824 { 2825 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 2826 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 2827 struct mrsas_mpt_cmd *mpt_cmd; 2828 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 2829 2830 mpt_cmd = mrsas_get_mpt_cmd(sc); 2831 if (!mpt_cmd) 2832 return(1); 2833 2834 /* Save the smid. To be used for returning the cmd */ 2835 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 2836 2837 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 2838 2839 /* 2840 * For cmds where the flag is set, store the flag and check 2841 * on completion. For cmds with this flag, don't call 2842 * mrsas_complete_cmd. 2843 */ 2844 2845 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 2846 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2847 2848 io_req = mpt_cmd->io_request; 2849 2850 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { 2851 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL; 2852 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 2853 sgl_ptr_end->Flags = 0; 2854 } 2855 2856 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 2857 2858 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 2859 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 2860 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 2861 2862 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 2863 2864 mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2865 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 2866 2867 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME; 2868 2869 return(0); 2870 } 2871 2872 /** 2873 * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 2874 * input: Adapter soft state 2875 * Command to be issued 2876 * 2877 * This function waits on an event for the command to be returned 2878 * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. 2879 * Used for issuing internal and ioctl commands. 2880 */ 2881 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 2882 { 2883 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 2884 unsigned long total_time = 0; 2885 int retcode = 0; 2886 2887 /* Initialize cmd_status */ 2888 cmd->cmd_status = ECONNREFUSED; 2889 2890 /* Build MPT-MFI command for issue to FW */ 2891 if (mrsas_issue_dcmd(sc, cmd)){ 2892 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 2893 return(1); 2894 } 2895 2896 sc->chan = (void*)&cmd; 2897 2898 /* The following is for debug only... */ 2899 //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n"); 2900 //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan); 2901 2902 while (1) { 2903 if (cmd->cmd_status == ECONNREFUSED){ 2904 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 2905 } 2906 else 2907 break; 2908 total_time++; 2909 if (total_time >= max_wait) { 2910 device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait); 2911 retcode = 1; 2912 break; 2913 } 2914 } 2915 return(retcode); 2916 } 2917 2918 /** 2919 * mrsas_complete_mptmfi_passthru - Completes a command 2920 * input: sc: Adapter soft state 2921 * cmd: Command to be completed 2922 * status: cmd completion status 2923 * 2924 * This function is called from mrsas_complete_cmd() after an interrupt 2925 * is received from Firmware, and io_request->Function is 2926 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 2927 */ 2928 void 2929 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 2930 u_int8_t status) 2931 { 2932 struct mrsas_header *hdr = &cmd->frame->hdr; 2933 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 2934 2935 /* Reset the retry counter for future re-tries */ 2936 cmd->retry_for_fw_reset = 0; 2937 2938 if (cmd->ccb_ptr) 2939 cmd->ccb_ptr = NULL; 2940 2941 switch (hdr->cmd) { 2942 case MFI_CMD_INVALID: 2943 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 2944 break; 2945 case MFI_CMD_PD_SCSI_IO: 2946 case MFI_CMD_LD_SCSI_IO: 2947 /* 2948 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 2949 * issued either through an IO path or an IOCTL path. If it 2950 * was via IOCTL, we will send it to internal completion. 2951 */ 2952 if (cmd->sync_cmd) { 2953 cmd->sync_cmd = 0; 2954 mrsas_wakeup(sc, cmd); 2955 break; 2956 } 2957 case MFI_CMD_SMP: 2958 case MFI_CMD_STP: 2959 case MFI_CMD_DCMD: 2960 /* Check for LD map update */ 2961 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 2962 (cmd->frame->dcmd.mbox.b[1] == 1)) { 2963 sc->fast_path_io = 0; 2964 lockmgr(&sc->raidmap_lock, LK_EXCLUSIVE); 2965 if (cmd_status != 0) { 2966 if (cmd_status != MFI_STAT_NOT_FOUND) 2967 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status); 2968 else { 2969 mrsas_release_mfi_cmd(cmd); 2970 lockmgr(&sc->raidmap_lock, LK_RELEASE); 2971 break; 2972 } 2973 } 2974 else 2975 sc->map_id++; 2976 mrsas_release_mfi_cmd(cmd); 2977 if (MR_ValidateMapInfo(sc)) 2978 sc->fast_path_io = 0; 2979 else 2980 sc->fast_path_io = 1; 2981 mrsas_sync_map_info(sc); 2982 lockmgr(&sc->raidmap_lock, LK_RELEASE); 2983 break; 2984 } 2985 #if 0 //currently not supporting event handling, so commenting out 2986 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 2987 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 2988 mrsas_poll_wait_aen = 0; 2989 } 2990 #endif 2991 /* See if got an event notification */ 2992 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 2993 mrsas_complete_aen(sc, cmd); 2994 else 2995 mrsas_wakeup(sc, cmd); 2996 break; 2997 case MFI_CMD_ABORT: 2998 /* Command issued to abort another cmd return */ 2999 mrsas_complete_abort(sc, cmd); 3000 break; 3001 default: 3002 device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd); 3003 break; 3004 } 3005 } 3006 3007 /** 3008 * mrsas_wakeup - Completes an internal command 3009 * input: Adapter soft state 3010 * Command to be completed 3011 * 3012 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, 3013 * a wait timer is started. This function is called from 3014 * mrsas_complete_mptmfi_passthru() as it completes the command, 3015 * to wake up from the command wait. 3016 */ 3017 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3018 { 3019 cmd->cmd_status = cmd->frame->io.cmd_status; 3020 3021 if (cmd->cmd_status == ECONNREFUSED) 3022 cmd->cmd_status = 0; 3023 3024 /* For debug only ... */ 3025 //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan); 3026 3027 sc->chan = (void*)&cmd; 3028 wakeup_one((void *)&sc->chan); 3029 return; 3030 } 3031 3032 /** 3033 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller 3034 * input: Adapter soft state 3035 * Shutdown/Hibernate 3036 * 3037 * This function issues a DCMD internal command to Firmware to initiate 3038 * shutdown of the controller. 3039 */ 3040 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 3041 { 3042 struct mrsas_mfi_cmd *cmd; 3043 struct mrsas_dcmd_frame *dcmd; 3044 3045 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3046 return; 3047 3048 cmd = mrsas_get_mfi_cmd(sc); 3049 if (!cmd) { 3050 device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n"); 3051 return; 3052 } 3053 3054 if (sc->aen_cmd) 3055 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 3056 3057 if (sc->map_update_cmd) 3058 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 3059 3060 dcmd = &cmd->frame->dcmd; 3061 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3062 3063 dcmd->cmd = MFI_CMD_DCMD; 3064 dcmd->cmd_status = 0x0; 3065 dcmd->sge_count = 0; 3066 dcmd->flags = MFI_FRAME_DIR_NONE; 3067 dcmd->timeout = 0; 3068 dcmd->pad_0 = 0; 3069 dcmd->data_xfer_len = 0; 3070 dcmd->opcode = opcode; 3071 3072 device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n"); 3073 3074 mrsas_issue_blocked_cmd(sc, cmd); 3075 mrsas_release_mfi_cmd(cmd); 3076 3077 return; 3078 } 3079 3080 /** 3081 * mrsas_flush_cache: Requests FW to flush all its caches 3082 * input: Adapter soft state 3083 * 3084 * This function is issues a DCMD internal command to Firmware to initiate 3085 * flushing of all caches. 3086 */ 3087 static void mrsas_flush_cache(struct mrsas_softc *sc) 3088 { 3089 struct mrsas_mfi_cmd *cmd; 3090 struct mrsas_dcmd_frame *dcmd; 3091 3092 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3093 return; 3094 3095 cmd = mrsas_get_mfi_cmd(sc); 3096 if (!cmd) { 3097 device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n"); 3098 return; 3099 } 3100 3101 dcmd = &cmd->frame->dcmd; 3102 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3103 3104 dcmd->cmd = MFI_CMD_DCMD; 3105 dcmd->cmd_status = 0x0; 3106 dcmd->sge_count = 0; 3107 dcmd->flags = MFI_FRAME_DIR_NONE; 3108 dcmd->timeout = 0; 3109 dcmd->pad_0 = 0; 3110 dcmd->data_xfer_len = 0; 3111 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 3112 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 3113 3114 mrsas_issue_blocked_cmd(sc, cmd); 3115 mrsas_release_mfi_cmd(cmd); 3116 3117 return; 3118 } 3119 3120 /** 3121 * mrsas_get_map_info: Load and validate RAID map 3122 * input: Adapter instance soft state 3123 * 3124 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() 3125 * to load and validate RAID map. It returns 0 if successful, 1 other- 3126 * wise. 3127 */ 3128 static int mrsas_get_map_info(struct mrsas_softc *sc) 3129 { 3130 uint8_t retcode = 0; 3131 3132 sc->fast_path_io = 0; 3133 if (!mrsas_get_ld_map_info(sc)) { 3134 retcode = MR_ValidateMapInfo(sc); 3135 if (retcode == 0) { 3136 sc->fast_path_io = 1; 3137 return 0; 3138 } 3139 } 3140 return 1; 3141 } 3142 3143 /** 3144 * mrsas_get_ld_map_info: Get FW's ld_map structure 3145 * input: Adapter instance soft state 3146 * 3147 * Issues an internal command (DCMD) to get the FW's controller PD 3148 * list structure. 3149 */ 3150 static int mrsas_get_ld_map_info(struct mrsas_softc *sc) 3151 { 3152 int retcode = 0; 3153 struct mrsas_mfi_cmd *cmd; 3154 struct mrsas_dcmd_frame *dcmd; 3155 MR_FW_RAID_MAP_ALL *map; 3156 bus_addr_t map_phys_addr = 0; 3157 3158 cmd = mrsas_get_mfi_cmd(sc); 3159 if (!cmd) { 3160 device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n"); 3161 return 1; 3162 } 3163 3164 dcmd = &cmd->frame->dcmd; 3165 3166 map = sc->raidmap_mem[(sc->map_id & 1)]; 3167 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 3168 if (!map) { 3169 device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n"); 3170 mrsas_release_mfi_cmd(cmd); 3171 return (ENOMEM); 3172 } 3173 memset(map, 0, sizeof(*map)); 3174 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3175 3176 dcmd->cmd = MFI_CMD_DCMD; 3177 dcmd->cmd_status = 0xFF; 3178 dcmd->sge_count = 1; 3179 dcmd->flags = MFI_FRAME_DIR_READ; 3180 dcmd->timeout = 0; 3181 dcmd->pad_0 = 0; 3182 dcmd->data_xfer_len = sc->map_sz; 3183 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 3184 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 3185 dcmd->sgl.sge32[0].length = sc->map_sz; 3186 if (!mrsas_issue_polled(sc, cmd)) 3187 retcode = 0; 3188 else 3189 { 3190 device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n"); 3191 retcode = 1; 3192 } 3193 mrsas_release_mfi_cmd(cmd); 3194 return(retcode); 3195 } 3196 3197 /** 3198 * mrsas_sync_map_info: Get FW's ld_map structure 3199 * input: Adapter instance soft state 3200 * 3201 * Issues an internal command (DCMD) to get the FW's controller PD 3202 * list structure. 3203 */ 3204 static int mrsas_sync_map_info(struct mrsas_softc *sc) 3205 { 3206 int retcode = 0, i; 3207 struct mrsas_mfi_cmd *cmd; 3208 struct mrsas_dcmd_frame *dcmd; 3209 uint32_t size_sync_info, num_lds; 3210 MR_LD_TARGET_SYNC *target_map = NULL; 3211 MR_FW_RAID_MAP_ALL *map; 3212 MR_LD_RAID *raid; 3213 MR_LD_TARGET_SYNC *ld_sync; 3214 bus_addr_t map_phys_addr = 0; 3215 3216 cmd = mrsas_get_mfi_cmd(sc); 3217 if (!cmd) { 3218 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); 3219 return 1; 3220 } 3221 3222 map = sc->raidmap_mem[sc->map_id & 1]; 3223 num_lds = map->raidMap.ldCount; 3224 3225 dcmd = &cmd->frame->dcmd; 3226 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; 3227 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3228 3229 target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1]; 3230 memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL)); 3231 3232 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 3233 3234 ld_sync = (MR_LD_TARGET_SYNC *)target_map; 3235 3236 for (i = 0; i < num_lds; i++, ld_sync++) { 3237 raid = MR_LdRaidGet(i, map); 3238 ld_sync->targetId = MR_GetLDTgtId(i, map); 3239 ld_sync->seqNum = raid->seqNum; 3240 } 3241 3242 dcmd->cmd = MFI_CMD_DCMD; 3243 dcmd->cmd_status = 0xFF; 3244 dcmd->sge_count = 1; 3245 dcmd->flags = MFI_FRAME_DIR_WRITE; 3246 dcmd->timeout = 0; 3247 dcmd->pad_0 = 0; 3248 dcmd->data_xfer_len = sc->map_sz; 3249 dcmd->mbox.b[0] = num_lds; 3250 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 3251 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 3252 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 3253 dcmd->sgl.sge32[0].length = sc->map_sz; 3254 3255 sc->map_update_cmd = cmd; 3256 if (mrsas_issue_dcmd(sc, cmd)) { 3257 device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n"); 3258 return(1); 3259 } 3260 return(retcode); 3261 } 3262 3263 /** 3264 * mrsas_get_pd_list: Returns FW's PD list structure 3265 * input: Adapter soft state 3266 * 3267 * Issues an internal command (DCMD) to get the FW's controller PD 3268 * list structure. This information is mainly used to find out about 3269 * system supported by Firmware. 3270 */ 3271 static int mrsas_get_pd_list(struct mrsas_softc *sc) 3272 { 3273 int retcode = 0, pd_index = 0, pd_count=0, pd_list_size; 3274 struct mrsas_mfi_cmd *cmd; 3275 struct mrsas_dcmd_frame *dcmd; 3276 struct MR_PD_LIST *pd_list_mem; 3277 struct MR_PD_ADDRESS *pd_addr; 3278 bus_addr_t pd_list_phys_addr = 0; 3279 struct mrsas_tmp_dcmd *tcmd; 3280 3281 cmd = mrsas_get_mfi_cmd(sc); 3282 if (!cmd) { 3283 device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n"); 3284 return 1; 3285 } 3286 3287 dcmd = &cmd->frame->dcmd; 3288 3289 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 3290 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3291 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 3292 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n"); 3293 mrsas_release_mfi_cmd(cmd); 3294 return(ENOMEM); 3295 } 3296 else { 3297 pd_list_mem = tcmd->tmp_dcmd_mem; 3298 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 3299 } 3300 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3301 3302 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 3303 dcmd->mbox.b[1] = 0; 3304 dcmd->cmd = MFI_CMD_DCMD; 3305 dcmd->cmd_status = 0xFF; 3306 dcmd->sge_count = 1; 3307 dcmd->flags = MFI_FRAME_DIR_READ; 3308 dcmd->timeout = 0; 3309 dcmd->pad_0 = 0; 3310 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3311 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 3312 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; 3313 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3314 3315 if (!mrsas_issue_polled(sc, cmd)) 3316 retcode = 0; 3317 else 3318 retcode = 1; 3319 3320 /* Get the instance PD list */ 3321 pd_count = MRSAS_MAX_PD; 3322 pd_addr = pd_list_mem->addr; 3323 if (retcode == 0 && pd_list_mem->count < pd_count) { 3324 memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 3325 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { 3326 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; 3327 sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType; 3328 sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM; 3329 pd_addr++; 3330 } 3331 } 3332 3333 /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */ 3334 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 3335 mrsas_free_tmp_dcmd(tcmd); 3336 mrsas_release_mfi_cmd(cmd); 3337 kfree(tcmd, M_MRSAS); 3338 return(retcode); 3339 } 3340 3341 /** 3342 * mrsas_get_ld_list: Returns FW's LD list structure 3343 * input: Adapter soft state 3344 * 3345 * Issues an internal command (DCMD) to get the FW's controller PD 3346 * list structure. This information is mainly used to find out about 3347 * supported by the FW. 3348 */ 3349 static int mrsas_get_ld_list(struct mrsas_softc *sc) 3350 { 3351 int ld_list_size, retcode = 0, ld_index = 0, ids = 0; 3352 struct mrsas_mfi_cmd *cmd; 3353 struct mrsas_dcmd_frame *dcmd; 3354 struct MR_LD_LIST *ld_list_mem; 3355 bus_addr_t ld_list_phys_addr = 0; 3356 struct mrsas_tmp_dcmd *tcmd; 3357 3358 cmd = mrsas_get_mfi_cmd(sc); 3359 if (!cmd) { 3360 device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n"); 3361 return 1; 3362 } 3363 3364 dcmd = &cmd->frame->dcmd; 3365 3366 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 3367 ld_list_size = sizeof(struct MR_LD_LIST); 3368 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 3369 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n"); 3370 mrsas_release_mfi_cmd(cmd); 3371 return(ENOMEM); 3372 } 3373 else { 3374 ld_list_mem = tcmd->tmp_dcmd_mem; 3375 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 3376 } 3377 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3378 3379 dcmd->cmd = MFI_CMD_DCMD; 3380 dcmd->cmd_status = 0xFF; 3381 dcmd->sge_count = 1; 3382 dcmd->flags = MFI_FRAME_DIR_READ; 3383 dcmd->timeout = 0; 3384 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 3385 dcmd->opcode = MR_DCMD_LD_GET_LIST; 3386 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; 3387 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 3388 dcmd->pad_0 = 0; 3389 3390 if (!mrsas_issue_polled(sc, cmd)) 3391 retcode = 0; 3392 else 3393 retcode = 1; 3394 3395 /* Get the instance LD list */ 3396 if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){ 3397 sc->CurLdCount = ld_list_mem->ldCount; 3398 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD); 3399 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { 3400 if (ld_list_mem->ldList[ld_index].state != 0) { 3401 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 3402 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 3403 } 3404 } 3405 } 3406 3407 mrsas_free_tmp_dcmd(tcmd); 3408 mrsas_release_mfi_cmd(cmd); 3409 kfree(tcmd, M_MRSAS); 3410 return(retcode); 3411 } 3412 3413 /** 3414 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command 3415 * input: Adapter soft state 3416 * Temp command 3417 * Size of alloction 3418 * 3419 * Allocates DMAable memory for a temporary internal command. The allocated 3420 * memory is initialized to all zeros upon successful loading of the dma 3421 * mapped memory. 3422 */ 3423 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 3424 int size) 3425 { 3426 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 3427 1, 0, // algnmnt, boundary 3428 BUS_SPACE_MAXADDR_32BIT,// lowaddr 3429 BUS_SPACE_MAXADDR, // highaddr 3430 NULL, NULL, // filter, filterarg 3431 size, // maxsize 3432 1, // msegments 3433 size, // maxsegsize 3434 BUS_DMA_ALLOCNOW, // flags 3435 &tcmd->tmp_dcmd_tag)) { 3436 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 3437 return (ENOMEM); 3438 } 3439 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 3440 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 3441 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 3442 return (ENOMEM); 3443 } 3444 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 3445 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 3446 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 3447 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 3448 return (ENOMEM); 3449 } 3450 3451 memset(tcmd->tmp_dcmd_mem, 0, size); 3452 return (0); 3453 } 3454 3455 /** 3456 * mrsas_free_tmp_dcmd: Free memory for temporary command 3457 * input: temporary dcmd pointer 3458 * 3459 * Deallocates memory of the temporary command for use in the construction 3460 * of the internal DCMD. 3461 */ 3462 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 3463 { 3464 if (tmp->tmp_dcmd_phys_addr) 3465 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 3466 if (tmp->tmp_dcmd_mem != NULL) 3467 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 3468 if (tmp->tmp_dcmd_tag != NULL) 3469 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 3470 } 3471 3472 /** 3473 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd 3474 * input: Adapter soft state 3475 * Previously issued cmd to be aborted 3476 * 3477 * This function is used to abort previously issued commands, such as AEN and 3478 * RAID map sync map commands. The abort command is sent as a DCMD internal 3479 * command and subsequently the driver will wait for a return status. The 3480 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 3481 */ 3482 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 3483 struct mrsas_mfi_cmd *cmd_to_abort) 3484 { 3485 struct mrsas_mfi_cmd *cmd; 3486 struct mrsas_abort_frame *abort_fr; 3487 u_int8_t retcode = 0; 3488 unsigned long total_time = 0; 3489 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3490 3491 cmd = mrsas_get_mfi_cmd(sc); 3492 if (!cmd) { 3493 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 3494 return(1); 3495 } 3496 3497 abort_fr = &cmd->frame->abort; 3498 3499 /* Prepare and issue the abort frame */ 3500 abort_fr->cmd = MFI_CMD_ABORT; 3501 abort_fr->cmd_status = 0xFF; 3502 abort_fr->flags = 0; 3503 abort_fr->abort_context = cmd_to_abort->index; 3504 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 3505 abort_fr->abort_mfi_phys_addr_hi = 0; 3506 3507 cmd->sync_cmd = 1; 3508 cmd->cmd_status = 0xFF; 3509 3510 if (mrsas_issue_dcmd(sc, cmd)) { 3511 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 3512 return(1); 3513 } 3514 3515 /* Wait for this cmd to complete */ 3516 sc->chan = (void*)&cmd; 3517 while (1) { 3518 if (cmd->cmd_status == 0xFF){ 3519 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3520 } 3521 else 3522 break; 3523 total_time++; 3524 if (total_time >= max_wait) { 3525 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 3526 retcode = 1; 3527 break; 3528 } 3529 } 3530 3531 cmd->sync_cmd = 0; 3532 mrsas_release_mfi_cmd(cmd); 3533 return(retcode); 3534 } 3535 3536 /** 3537 * mrsas_complete_abort: Completes aborting a command 3538 * input: Adapter soft state 3539 * Cmd that was issued to abort another cmd 3540 * 3541 * The mrsas_issue_blocked_abort_cmd() function waits for the command status 3542 * to change after sending the command. This function is called from 3543 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 3544 */ 3545 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3546 { 3547 if (cmd->sync_cmd) { 3548 cmd->sync_cmd = 0; 3549 cmd->cmd_status = 0; 3550 sc->chan = (void*)&cmd; 3551 wakeup_one((void *)&sc->chan); 3552 } 3553 return; 3554 } 3555 3556 /** 3557 * mrsas_aen_handler: Callback function for AEN processing from thread context. 3558 * input: Adapter soft state 3559 * 3560 */ 3561 void mrsas_aen_handler(struct mrsas_softc *sc) 3562 { 3563 union mrsas_evt_class_locale class_locale; 3564 int doscan = 0; 3565 u_int32_t seq_num; 3566 int error; 3567 3568 if (!sc) { 3569 device_printf(sc->mrsas_dev, "invalid instance!\n"); 3570 return; 3571 } 3572 3573 if (sc->evt_detail_mem) { 3574 switch (sc->evt_detail_mem->code) { 3575 case MR_EVT_PD_INSERTED: 3576 mrsas_get_pd_list(sc); 3577 mrsas_bus_scan_sim(sc, sc->sim_1); 3578 doscan = 0; 3579 break; 3580 case MR_EVT_PD_REMOVED: 3581 mrsas_get_pd_list(sc); 3582 mrsas_bus_scan_sim(sc, sc->sim_1); 3583 doscan = 0; 3584 break; 3585 case MR_EVT_LD_OFFLINE: 3586 case MR_EVT_CFG_CLEARED: 3587 case MR_EVT_LD_DELETED: 3588 mrsas_bus_scan_sim(sc, sc->sim_0); 3589 doscan = 0; 3590 break; 3591 case MR_EVT_LD_CREATED: 3592 mrsas_get_ld_list(sc); 3593 mrsas_bus_scan_sim(sc, sc->sim_0); 3594 doscan = 0; 3595 break; 3596 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 3597 case MR_EVT_FOREIGN_CFG_IMPORTED: 3598 case MR_EVT_LD_STATE_CHANGE: 3599 doscan = 1; 3600 break; 3601 default: 3602 doscan = 0; 3603 break; 3604 } 3605 } else { 3606 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 3607 return; 3608 } 3609 if (doscan) { 3610 mrsas_get_pd_list(sc); 3611 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 3612 mrsas_bus_scan_sim(sc, sc->sim_1); 3613 mrsas_get_ld_list(sc); 3614 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 3615 mrsas_bus_scan_sim(sc, sc->sim_0); 3616 } 3617 3618 seq_num = sc->evt_detail_mem->seq_num + 1; 3619 3620 // Register AEN with FW for latest sequence number plus 1 3621 class_locale.members.reserved = 0; 3622 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3623 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3624 3625 if (sc->aen_cmd != NULL ) 3626 return ; 3627 3628 lockmgr(&sc->aen_lock, LK_EXCLUSIVE); 3629 error = mrsas_register_aen(sc, seq_num, 3630 class_locale.word); 3631 lockmgr(&sc->aen_lock, LK_RELEASE); 3632 3633 if (error) 3634 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 3635 3636 } 3637 3638 3639 /** 3640 * mrsas_complete_aen: Completes AEN command 3641 * input: Adapter soft state 3642 * Cmd that was issued to abort another cmd 3643 * 3644 * This function will be called from ISR and will continue 3645 * event processing from thread context by enqueuing task 3646 * in ev_tq (callback function "mrsas_aen_handler"). 3647 */ 3648 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3649 { 3650 /* 3651 * Don't signal app if it is just an aborted previously registered aen 3652 */ 3653 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 3654 /* TO DO (?) */ 3655 } 3656 else 3657 cmd->abort_aen = 0; 3658 3659 sc->aen_cmd = NULL; 3660 mrsas_release_mfi_cmd(cmd); 3661 3662 if (!sc->remove_in_progress) 3663 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 3664 3665 return; 3666 } 3667 3668 static device_method_t mrsas_methods[] = { 3669 DEVMETHOD(device_probe, mrsas_probe), 3670 DEVMETHOD(device_attach, mrsas_attach), 3671 DEVMETHOD(device_detach, mrsas_detach), 3672 DEVMETHOD(device_suspend, mrsas_suspend), 3673 DEVMETHOD(device_resume, mrsas_resume), 3674 DEVMETHOD(bus_print_child, bus_generic_print_child), 3675 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 3676 { 0, 0 } 3677 }; 3678 3679 static driver_t mrsas_driver = { 3680 "mrsas", 3681 mrsas_methods, 3682 sizeof(struct mrsas_softc) 3683 }; 3684 3685 static devclass_t mrsas_devclass; 3686 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, NULL, NULL); 3687 MODULE_VERSION(mrsas, 1); 3688 MODULE_DEPEND(mrsas, cam, 1, 1, 1); 3689 3690