1 /* 2 * Copyright (c) 2014, LSI Corp. 3 * All rights reserved. 4 * Author: Marian Choy 5 * Support: freebsdraid@lsi.com 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of the <ORGANIZATION> nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: head/sys/dev/mrsas/mrsas_cam.c 265555 2014-05-07 16:16:49Z ambrisko $ 35 */ 36 37 #include <dev/raid/mrsas/mrsas.h> 38 39 #include <bus/cam/cam.h> 40 #include <bus/cam/cam_ccb.h> 41 #include <bus/cam/cam_sim.h> 42 #include <bus/cam/cam_xpt_sim.h> 43 #include <bus/cam/cam_debug.h> 44 #include <bus/cam/cam_periph.h> 45 #include <bus/cam/cam_xpt_periph.h> 46 47 #include <bus/cam/scsi/scsi_all.h> 48 #include <bus/cam/scsi/scsi_message.h> 49 #include <sys/taskqueue.h> 50 51 52 /* 53 * Function prototypes 54 */ 55 int mrsas_cam_attach(struct mrsas_softc *sc); 56 //int mrsas_ldio_inq(union ccb *ccb); 57 int mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb); 58 int mrsas_bus_scan(struct mrsas_softc *sc); 59 int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 60 int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 61 int mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 62 union ccb *ccb); 63 int mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 64 union ccb *ccb, struct cam_sim *sim); 65 int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 66 union ccb *ccb, u_int32_t device_id, 67 MRSAS_RAID_SCSI_IO_REQUEST *io_request); 68 void mrsas_xpt_freeze(struct mrsas_softc *sc); 69 void mrsas_xpt_release(struct mrsas_softc *sc); 70 void mrsas_cam_detach(struct mrsas_softc *sc); 71 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 72 void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 73 void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 74 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 75 u_int32_t req_desc_hi); 76 void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request, u_int8_t cdb_len, 77 struct IO_REQUEST_INFO *io_info, union ccb *ccb, 78 MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag, 79 u_int32_t ld_block_size); 80 static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim); 81 static void mrsas_poll(struct cam_sim *sim); 82 static void mrsas_action(struct cam_sim *sim, union ccb *ccb); 83 static void mrsas_scsiio_timeout(void *data); 84 static void mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, 85 int nseg, int error); 86 static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, 87 union ccb *ccb); 88 static void mrsas_rescan_callback(struct cam_periph *, union ccb *); 89 struct mrsas_mpt_cmd * mrsas_get_mpt_cmd(struct mrsas_softc *sc); 90 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc, 91 u_int16_t index); 92 93 extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map); 94 extern u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map, 95 struct mrsas_softc *sc); 96 extern void mrsas_isr(void *arg); 97 extern void mrsas_aen_handler(struct mrsas_softc *sc); 98 extern u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc, 99 struct IO_REQUEST_INFO *io_info,RAID_CONTEXT *pRAID_Context, 100 MR_FW_RAID_MAP_ALL *map); 101 extern u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 102 MR_FW_RAID_MAP_ALL *map); 103 extern u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, 104 struct IO_REQUEST_INFO *io_info); 105 extern u_int8_t megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm, 106 u_int64_t block, u_int32_t count); 107 108 109 /** 110 * mrsas_cam_attach: Main entry to CAM subsystem 111 * input: Adapter instance soft state 112 * 113 * This function is called from mrsas_attach() during initialization 114 * to perform SIM allocations and XPT bus registration. If the kernel 115 * version is 7.4 or earlier, it would also initiate a bus scan. 116 */ 117 int mrsas_cam_attach(struct mrsas_softc *sc) 118 { 119 struct cam_devq *devq; 120 int mrsas_cam_depth; 121 122 mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS; 123 124 if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) { 125 device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n"); 126 return(ENOMEM); 127 } 128 129 130 /* 131 * Create SIM for bus 0 and register, also create path 132 */ 133 sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_poll, "mrsas", sc, 134 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, 135 mrsas_cam_depth, devq); 136 if (sc->sim_0 == NULL){ 137 device_printf(sc->mrsas_dev, "Cannot register SIM\n"); 138 cam_simq_release(devq); 139 return(ENXIO); 140 } 141 /* Initialize taskqueue for Event Handling */ 142 TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc); 143 sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO, 144 taskqueue_thread_enqueue, &sc->ev_tq); 145 146 /* Run the task queue with lowest priority */ 147 taskqueue_start_threads(&sc->ev_tq, 1, 255, -1, "%s taskq", 148 device_get_nameunit(sc->mrsas_dev)); 149 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 150 if (xpt_bus_register(sc->sim_0, 0) != CAM_SUCCESS) 151 { 152 cam_sim_free(sc->sim_0); 153 cam_simq_release(devq); 154 lockmgr(&sc->sim_lock, LK_RELEASE); 155 return(ENXIO); 156 } 157 if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0), 158 CAM_TARGET_WILDCARD, 159 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 160 xpt_bus_deregister(cam_sim_path(sc->sim_0)); 161 cam_sim_free(sc->sim_0); 162 cam_simq_release(devq); 163 lockmgr(&sc->sim_lock, LK_RELEASE); 164 return(ENXIO); 165 } 166 lockmgr(&sc->sim_lock, LK_RELEASE); 167 168 /* 169 * Create SIM for bus 1 and register, also create path 170 */ 171 sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_poll, "mrsas", sc, 172 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, 173 mrsas_cam_depth, devq); 174 cam_simq_release(devq); 175 if (sc->sim_1 == NULL){ 176 device_printf(sc->mrsas_dev, "Cannot register SIM\n"); 177 return(ENXIO); 178 } 179 180 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 181 if (xpt_bus_register(sc->sim_1, 1) != CAM_SUCCESS){ 182 cam_sim_free(sc->sim_1); 183 lockmgr(&sc->sim_lock, LK_RELEASE); 184 return(ENXIO); 185 } 186 if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1), 187 CAM_TARGET_WILDCARD, 188 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 189 xpt_bus_deregister(cam_sim_path(sc->sim_1)); 190 cam_sim_free(sc->sim_1); 191 lockmgr(&sc->sim_lock, LK_RELEASE); 192 return(ENXIO); 193 } 194 lockmgr(&sc->sim_lock, LK_RELEASE); 195 196 #if (__FreeBSD_version <= 704000) 197 if (mrsas_bus_scan(sc)){ 198 device_printf(sc->mrsas_dev, "Error in bus scan.\n"); 199 return(1); 200 } 201 #endif 202 return(0); 203 } 204 205 /** 206 * mrsas_cam_detach: De-allocates and teardown CAM 207 * input: Adapter instance soft state 208 * 209 * De-registers and frees the paths and SIMs. 210 */ 211 void mrsas_cam_detach(struct mrsas_softc *sc) 212 { 213 if (sc->ev_tq != NULL) 214 taskqueue_free(sc->ev_tq); 215 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 216 if (sc->path_0) 217 xpt_free_path(sc->path_0); 218 if (sc->sim_0) { 219 xpt_bus_deregister(cam_sim_path(sc->sim_0)); 220 cam_sim_free(sc->sim_0); 221 } 222 if (sc->path_1) 223 xpt_free_path(sc->path_1); 224 if (sc->sim_1) { 225 xpt_bus_deregister(cam_sim_path(sc->sim_1)); 226 cam_sim_free(sc->sim_1); 227 } 228 lockmgr(&sc->sim_lock, LK_RELEASE); 229 } 230 231 /** 232 * mrsas_action: SIM callback entry point 233 * input: pointer to SIM 234 * pointer to CAM Control Block 235 * 236 * This function processes CAM subsystem requests. The type of request is 237 * stored in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary 238 * because ccb->cpi.maxio is not supported for FreeBSD version 7.4 or 239 * earlier. 240 */ 241 static void mrsas_action(struct cam_sim *sim, union ccb *ccb) 242 { 243 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); 244 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 245 u_int32_t device_id; 246 247 switch (ccb->ccb_h.func_code) { 248 case XPT_SCSI_IO: 249 { 250 device_id = ccb_h->target_id; 251 252 /* 253 * bus 0 is LD, bus 1 is for system-PD 254 */ 255 if (cam_sim_bus(sim) == 1 && 256 sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) { 257 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 258 xpt_done(ccb); 259 } 260 else { 261 if (mrsas_startio(sc, sim, ccb)){ 262 ccb->ccb_h.status |= CAM_REQ_INVALID; 263 xpt_done(ccb); 264 } 265 } 266 break; 267 } 268 case XPT_ABORT: 269 { 270 ccb->ccb_h.status = CAM_UA_ABORT; 271 xpt_done(ccb); 272 break; 273 } 274 case XPT_RESET_BUS: 275 { 276 xpt_done(ccb); 277 break; 278 } 279 case XPT_GET_TRAN_SETTINGS: 280 { 281 ccb->cts.protocol = PROTO_SCSI; 282 ccb->cts.protocol_version = SCSI_REV_2; 283 ccb->cts.transport = XPORT_SPI; 284 ccb->cts.transport_version = 2; 285 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; 286 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; 287 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; 288 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; 289 ccb->ccb_h.status = CAM_REQ_CMP; 290 xpt_done(ccb); 291 break; 292 } 293 case XPT_SET_TRAN_SETTINGS: 294 { 295 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 296 xpt_done(ccb); 297 break; 298 } 299 case XPT_CALC_GEOMETRY: 300 { 301 cam_calc_geometry(&ccb->ccg, 1); 302 xpt_done(ccb); 303 break; 304 } 305 case XPT_PATH_INQ: 306 { 307 ccb->cpi.version_num = 1; 308 ccb->cpi.hba_inquiry = 0; 309 ccb->cpi.target_sprt = 0; 310 ccb->cpi.hba_misc = 0; 311 ccb->cpi.hba_eng_cnt = 0; 312 ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS; 313 ccb->cpi.unit_number = cam_sim_unit(sim); 314 ccb->cpi.bus_id = cam_sim_bus(sim); 315 ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID; 316 ccb->cpi.base_transfer_speed = 150000; 317 strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); 318 strncpy(ccb->cpi.hba_vid, "LSI", HBA_IDLEN); 319 strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); 320 ccb->cpi.transport = XPORT_SPI; 321 ccb->cpi.transport_version = 2; 322 ccb->cpi.protocol = PROTO_SCSI; 323 ccb->cpi.protocol_version = SCSI_REV_2; 324 if (ccb->cpi.bus_id == 0) 325 ccb->cpi.max_target = MRSAS_MAX_LD-1; 326 else 327 ccb->cpi.max_target = MRSAS_MAX_PD-1; 328 ccb->cpi.maxio = MRSAS_MAX_IO_SIZE; 329 ccb->ccb_h.status = CAM_REQ_CMP; 330 xpt_done(ccb); 331 break; 332 } 333 default: 334 { 335 ccb->ccb_h.status = CAM_REQ_INVALID; 336 xpt_done(ccb); 337 break; 338 } 339 } 340 } 341 342 /** 343 * mrsas_scsiio_timeout Callback function for IO timed out 344 * input: mpt command context 345 * 346 * This function will execute after timeout value 347 * provided by ccb header from CAM layer, if timer expires. 348 * Driver will run timer for all DCDM and LDIO comming from CAM layer. 349 * This function is callback function for IO timeout and it runs in 350 * no-sleep context. Set do_timedout_reset in Adapter context so that 351 * it will execute OCR/Kill adpter from ocr_thread context. 352 */ 353 static void 354 mrsas_scsiio_timeout(void *data) 355 { 356 struct mrsas_mpt_cmd *cmd; 357 struct mrsas_softc *sc; 358 359 cmd = (struct mrsas_mpt_cmd *)data; 360 sc = cmd->sc; 361 362 if (cmd->ccb_ptr == NULL) { 363 kprintf("command timeout with NULL ccb\n"); 364 return; 365 } 366 367 /* Below callout is dummy entry so that it will be 368 * cancelled from mrsas_cmd_done(). Now Controller will 369 * go to OCR/Kill Adapter based on OCR enable/disable 370 * property of Controller from ocr_thread context. 371 */ 372 callout_reset(&cmd->cm_callout, (600000 * hz) / 1000, 373 mrsas_scsiio_timeout, cmd); 374 sc->do_timedout_reset = 1; 375 if(sc->ocr_thread_active) 376 wakeup(&sc->ocr_chan); 377 } 378 379 /** 380 * mrsas_startio: SCSI IO entry point 381 * input: Adapter instance soft state 382 * pointer to CAM Control Block 383 * 384 * This function is the SCSI IO entry point and it initiates IO processing. 385 * It copies the IO and depending if the IO is read/write or inquiry, it would 386 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 387 * 0 if the command is sent to firmware successfully, otherwise it returns 1. 388 */ 389 static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, 390 union ccb *ccb) 391 { 392 struct mrsas_mpt_cmd *cmd; 393 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 394 struct ccb_scsiio *csio = &(ccb->csio); 395 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 396 397 if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE){ 398 ccb->ccb_h.status = CAM_REQ_CMP; 399 xpt_done(ccb); 400 return(0); 401 } 402 403 ccb_h->status |= CAM_SIM_QUEUED; 404 cmd = mrsas_get_mpt_cmd(sc); 405 406 if (!cmd) { 407 ccb_h->status |= CAM_REQUEUE_REQ; 408 xpt_done(ccb); 409 return(0); 410 } 411 412 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 413 if(ccb_h->flags & CAM_DIR_IN) 414 cmd->flags |= MRSAS_DIR_IN; 415 if(ccb_h->flags & CAM_DIR_OUT) 416 cmd->flags |= MRSAS_DIR_OUT; 417 } 418 else 419 cmd->flags = MRSAS_DIR_NONE; /* no data */ 420 421 /* For FreeBSD 10.0 and higher */ 422 #if 0 /* XXX (__FreeBSD_version >= 1000000) */ 423 /* 424 * * XXX We don't yet support physical addresses here. 425 */ 426 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { 427 case CAM_DATA_PADDR: 428 case CAM_DATA_SG_PADDR: 429 kprintf("%s: physical addresses not supported\n", 430 __func__); 431 mrsas_release_mpt_cmd(cmd); 432 ccb_h->status = CAM_REQ_INVALID; 433 ccb_h->status &= ~CAM_SIM_QUEUED; 434 goto done; 435 case CAM_DATA_SG: 436 kprintf("%s: scatter gather is not supported\n", 437 __func__); 438 mrsas_release_mpt_cmd(cmd); 439 ccb_h->status = CAM_REQ_INVALID; 440 goto done; 441 case CAM_DATA_VADDR: 442 if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) { 443 mrsas_release_mpt_cmd(cmd); 444 ccb_h->status = CAM_REQ_TOO_BIG; 445 goto done; 446 } 447 cmd->length = csio->dxfer_len; 448 if (cmd->length) 449 cmd->data = csio->data_ptr; 450 break; 451 default: 452 ccb->ccb_h.status = CAM_REQ_INVALID; 453 goto done; 454 } 455 #else 456 if (!(ccb_h->flags & CAM_DATA_PHYS)) { //Virtual data address 457 if (!(ccb_h->flags & CAM_SCATTER_VALID)) { 458 if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) { 459 mrsas_release_mpt_cmd(cmd); 460 ccb_h->status = CAM_REQ_TOO_BIG; 461 goto done; 462 } 463 cmd->length = csio->dxfer_len; 464 if (cmd->length) 465 cmd->data = csio->data_ptr; 466 } 467 else { 468 mrsas_release_mpt_cmd(cmd); 469 ccb_h->status = CAM_REQ_INVALID; 470 goto done; 471 } 472 } 473 else { //Data addresses are physical. 474 mrsas_release_mpt_cmd(cmd); 475 ccb_h->status = CAM_REQ_INVALID; 476 ccb_h->status &= ~CAM_SIM_QUEUED; 477 goto done; 478 } 479 #endif 480 /* save ccb ptr */ 481 cmd->ccb_ptr = ccb; 482 483 req_desc = mrsas_get_request_desc(sc, (cmd->index)-1); 484 if (!req_desc) { 485 device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n"); 486 return (FAIL); 487 } 488 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); 489 cmd->request_desc = req_desc; 490 491 if (ccb_h->flags & CAM_CDB_POINTER) 492 bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len); 493 else 494 bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len); 495 lockmgr(&sc->raidmap_lock, LK_EXCLUSIVE); 496 497 if (mrsas_ldio_inq(sim, ccb)) { 498 if (mrsas_build_ldio(sc, cmd, ccb)){ 499 device_printf(sc->mrsas_dev, "Build LDIO failed.\n"); 500 lockmgr(&sc->raidmap_lock, LK_RELEASE); 501 return(1); 502 } 503 } 504 else { 505 if (mrsas_build_dcdb(sc, cmd, ccb, sim)) { 506 device_printf(sc->mrsas_dev, "Build DCDB failed.\n"); 507 lockmgr(&sc->raidmap_lock, LK_RELEASE); 508 return(1); 509 } 510 } 511 lockmgr(&sc->raidmap_lock, LK_RELEASE); 512 513 if (cmd->flags == MRSAS_DIR_IN) //from device 514 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ; 515 else if (cmd->flags == MRSAS_DIR_OUT) //to device 516 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; 517 518 cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 519 cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/4; 520 cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr; 521 cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE; 522 523 req_desc = cmd->request_desc; 524 req_desc->SCSIIO.SMID = cmd->index; 525 526 /* 527 * Start timer for IO timeout. Default timeout value is 90 second. 528 */ 529 callout_reset(&cmd->cm_callout, (sc->mrsas_io_timeout * hz) / 1000, 530 mrsas_scsiio_timeout, cmd); 531 atomic_inc(&sc->fw_outstanding); 532 533 if(atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater) 534 sc->io_cmds_highwater++; 535 536 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 537 return(0); 538 539 done: 540 xpt_done(ccb); 541 return(0); 542 } 543 544 /** 545 * mrsas_ldio_inq: Determines if IO is read/write or inquiry 546 * input: pointer to CAM Control Block 547 * 548 * This function determines if the IO is read/write or inquiry. It returns a 549 * 1 if the IO is read/write and 0 if it is inquiry. 550 */ 551 int mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb) 552 { 553 struct ccb_scsiio *csio = &(ccb->csio); 554 555 if (cam_sim_bus(sim) == 1) 556 return(0); 557 558 switch (csio->cdb_io.cdb_bytes[0]) { 559 case READ_10: 560 case WRITE_10: 561 case READ_12: 562 case WRITE_12: 563 case READ_6: 564 case WRITE_6: 565 case READ_16: 566 case WRITE_16: 567 return 1; 568 default: 569 return 0; 570 } 571 } 572 573 /** 574 * mrsas_get_mpt_cmd: Get a cmd from free command pool 575 * input: Adapter instance soft state 576 * 577 * This function removes an MPT command from the command free list and 578 * initializes it. 579 */ 580 struct mrsas_mpt_cmd* mrsas_get_mpt_cmd(struct mrsas_softc *sc) 581 { 582 struct mrsas_mpt_cmd *cmd = NULL; 583 584 lockmgr(&sc->mpt_cmd_pool_lock, LK_EXCLUSIVE); 585 if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)){ 586 cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head); 587 TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next); 588 } 589 memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 590 cmd->data = NULL; 591 cmd->length = 0; 592 cmd->flags = 0; 593 cmd->error_code = 0; 594 cmd->load_balance = 0; 595 cmd->ccb_ptr = NULL; 596 lockmgr(&sc->mpt_cmd_pool_lock, LK_RELEASE); 597 598 return cmd; 599 } 600 601 /** 602 * mrsas_release_mpt_cmd: Return a cmd to free command pool 603 * input: Command packet for return to free command pool 604 * 605 * This function returns an MPT command to the free command list. 606 */ 607 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd) 608 { 609 struct mrsas_softc *sc = cmd->sc; 610 611 lockmgr(&sc->mpt_cmd_pool_lock, LK_EXCLUSIVE); 612 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 613 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 614 lockmgr(&sc->mpt_cmd_pool_lock, LK_RELEASE); 615 616 return; 617 } 618 619 /** 620 * mrsas_get_request_desc: Get request descriptor from array 621 * input: Adapter instance soft state 622 * SMID index 623 * 624 * This function returns a pointer to the request descriptor. 625 */ 626 MRSAS_REQUEST_DESCRIPTOR_UNION * 627 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index) 628 { 629 u_int8_t *p; 630 631 if (index >= sc->max_fw_cmds) { 632 device_printf(sc->mrsas_dev, "Invalid SMID (0x%x)request for desc\n", index); 633 return NULL; 634 } 635 p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index; 636 637 return (MRSAS_REQUEST_DESCRIPTOR_UNION *)p; 638 } 639 640 /** 641 * mrsas_build_ldio: Builds an LDIO command 642 * input: Adapter instance soft state 643 * Pointer to command packet 644 * Pointer to CCB 645 * 646 * This function builds the LDIO command packet. It returns 0 if the 647 * command is built successfully, otherwise it returns a 1. 648 */ 649 int mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 650 union ccb *ccb) 651 { 652 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 653 struct ccb_scsiio *csio = &(ccb->csio); 654 u_int32_t device_id; 655 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 656 657 device_id = ccb_h->target_id; 658 659 io_request = cmd->io_request; 660 io_request->RaidContext.VirtualDiskTgtId = device_id; 661 io_request->RaidContext.status = 0; 662 io_request->RaidContext.exStatus = 0; 663 664 /* just the cdb len, other flags zero, and ORed-in later for FP */ 665 io_request->IoFlags = csio->cdb_len; 666 667 if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS) 668 device_printf(sc->mrsas_dev, "Build ldio or fpio error\n"); 669 670 io_request->DataLength = cmd->length; 671 672 if (mrsas_map_request(sc, cmd) == SUCCESS) { 673 if (cmd->sge_count > MRSAS_MAX_SGL) { 674 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" 675 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); 676 return (FAIL); 677 } 678 io_request->RaidContext.numSGE = cmd->sge_count; 679 } 680 else { 681 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 682 return(FAIL); 683 } 684 return(0); 685 } 686 687 /** 688 * mrsas_setup_io: Set up data including Fast Path I/O 689 * input: Adapter instance soft state 690 * Pointer to command packet 691 * Pointer to CCB 692 * 693 * This function builds the DCDB inquiry command. It returns 0 if the 694 * command is built successfully, otherwise it returns a 1. 695 */ 696 int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 697 union ccb *ccb, u_int32_t device_id, 698 MRSAS_RAID_SCSI_IO_REQUEST *io_request) 699 { 700 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 701 struct ccb_scsiio *csio = &(ccb->csio); 702 struct IO_REQUEST_INFO io_info; 703 MR_FW_RAID_MAP_ALL *map_ptr; 704 u_int8_t fp_possible; 705 u_int32_t start_lba_hi, start_lba_lo, ld_block_size; 706 u_int32_t datalength = 0; 707 708 start_lba_lo = 0; 709 start_lba_hi = 0; 710 fp_possible = 0; 711 712 /* 713 * READ_6 (0x08) or WRITE_6 (0x0A) cdb 714 */ 715 if (csio->cdb_len == 6) { 716 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4]; 717 start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[1] << 16) | 718 ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 8) | 719 (u_int32_t) csio->cdb_io.cdb_bytes[3]; 720 start_lba_lo &= 0x1FFFFF; 721 } 722 /* 723 * READ_10 (0x28) or WRITE_6 (0x2A) cdb 724 */ 725 else if (csio->cdb_len == 10) { 726 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] | 727 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8); 728 start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) | 729 ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) | 730 (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 | 731 ((u_int32_t) csio->cdb_io.cdb_bytes[5]); 732 } 733 /* 734 * READ_12 (0xA8) or WRITE_12 (0xAA) cdb 735 */ 736 else if (csio->cdb_len == 12) { 737 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 | 738 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | 739 ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) | 740 ((u_int32_t)csio->cdb_io.cdb_bytes[9]); 741 start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) | 742 ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) | 743 (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 | 744 ((u_int32_t) csio->cdb_io.cdb_bytes[5]); 745 } 746 /* 747 * READ_16 (0x88) or WRITE_16 (0xx8A) cdb 748 */ 749 else if (csio->cdb_len == 16) { 750 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 | 751 ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) | 752 ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) | 753 ((u_int32_t)csio->cdb_io.cdb_bytes[13]); 754 start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[6] << 24) | 755 ((u_int32_t) csio->cdb_io.cdb_bytes[7] << 16) | 756 (u_int32_t) csio->cdb_io.cdb_bytes[8] << 8 | 757 ((u_int32_t) csio->cdb_io.cdb_bytes[9]); 758 start_lba_hi = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) | 759 ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) | 760 (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 | 761 ((u_int32_t) csio->cdb_io.cdb_bytes[5]); 762 } 763 764 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 765 io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo; 766 io_info.numBlocks = datalength; 767 io_info.ldTgtId = device_id; 768 769 switch (ccb_h->flags & CAM_DIR_MASK) { 770 case CAM_DIR_IN: 771 io_info.isRead = 1; 772 break; 773 case CAM_DIR_OUT: 774 io_info.isRead = 0; 775 break; 776 case CAM_DIR_NONE: 777 default: 778 mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK); 779 break; 780 } 781 782 map_ptr = sc->raidmap_mem[(sc->map_id & 1)]; 783 ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc); 784 785 if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES) || 786 (!sc->fast_path_io)) { 787 io_request->RaidContext.regLockFlags = 0; 788 fp_possible = 0; 789 } 790 else 791 { 792 if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext, map_ptr)) 793 fp_possible = io_info.fpOkForIo; 794 } 795 796 if (fp_possible) { 797 mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr, 798 start_lba_lo, ld_block_size); 799 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 800 cmd->request_desc->SCSIIO.RequestFlags = 801 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY 802 << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 803 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { 804 if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED) 805 cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 806 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 807 io_request->RaidContext.nseg = 0x1; 808 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; 809 io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); 810 } 811 if ((sc->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) { 812 io_info.devHandle = mrsas_get_updated_dev_handle(&sc->load_balance_info[device_id], 813 &io_info); 814 cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG; 815 } 816 else 817 cmd->load_balance = 0; 818 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 819 io_request->DevHandle = io_info.devHandle; 820 } 821 else { 822 /* Not FP IO */ 823 io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec; 824 cmd->request_desc->SCSIIO.RequestFlags = 825 (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 826 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { 827 if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED) 828 cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 829 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 830 io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE); 831 io_request->RaidContext.nseg = 0x1; 832 } 833 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 834 io_request->DevHandle = device_id; 835 } 836 return(0); 837 } 838 839 /** 840 * mrsas_build_dcdb: Builds an DCDB command 841 * input: Adapter instance soft state 842 * Pointer to command packet 843 * Pointer to CCB 844 * 845 * This function builds the DCDB inquiry command. It returns 0 if the 846 * command is built successfully, otherwise it returns a 1. 847 */ 848 int mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 849 union ccb *ccb, struct cam_sim *sim) 850 { 851 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 852 u_int32_t device_id; 853 MR_FW_RAID_MAP_ALL *map_ptr; 854 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 855 856 io_request = cmd->io_request; 857 device_id = ccb_h->target_id; 858 map_ptr = sc->raidmap_mem[(sc->map_id & 1)]; 859 860 /* Check if this is for system PD */ 861 if (cam_sim_bus(sim) == 1 && 862 sc->pd_list[device_id].driveState == MR_PD_STATE_SYSTEM) { 863 io_request->Function = 0; 864 io_request->DevHandle = map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 865 io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec; 866 io_request->RaidContext.regLockFlags = 0; 867 io_request->RaidContext.regLockRowLBA = 0; 868 io_request->RaidContext.regLockLength = 0; 869 io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << 870 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 871 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) 872 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; 873 cmd->request_desc->SCSIIO.RequestFlags = 874 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 875 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 876 cmd->request_desc->SCSIIO.DevHandle = 877 map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 878 } 879 else { 880 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 881 io_request->DevHandle = device_id; 882 cmd->request_desc->SCSIIO.RequestFlags = 883 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 884 } 885 886 io_request->RaidContext.VirtualDiskTgtId = device_id; 887 io_request->LUN[1] = ccb_h->target_lun & 0xF; 888 io_request->DataLength = cmd->length; 889 890 if (mrsas_map_request(sc, cmd) == SUCCESS) { 891 if (cmd->sge_count > sc->max_num_sge) { 892 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" 893 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); 894 return (1); 895 } 896 io_request->RaidContext.numSGE = cmd->sge_count; 897 } 898 else { 899 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 900 return(1); 901 } 902 return(0); 903 } 904 905 /** 906 * mrsas_map_request: Map and load data 907 * input: Adapter instance soft state 908 * Pointer to command packet 909 * 910 * For data from OS, map and load the data buffer into bus space. The 911 * SG list is built in the callback. If the bus dmamap load is not 912 * successful, cmd->error_code will contain the error code and a 1 is 913 * returned. 914 */ 915 int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) 916 { 917 u_int32_t retcode = 0; 918 struct cam_sim *sim; 919 int flag = BUS_DMA_NOWAIT; 920 921 sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path); 922 923 if (cmd->data != NULL) { 924 lockmgr(&sc->io_lock, LK_EXCLUSIVE); 925 /* Map data buffer into bus space */ 926 retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data, 927 cmd->length, mrsas_data_load_cb, cmd, flag); 928 lockmgr(&sc->io_lock, LK_RELEASE); 929 if (retcode) 930 device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode); 931 if (retcode == EINPROGRESS) { 932 device_printf(sc->mrsas_dev, "request load in progress\n"); 933 mrsas_freeze_simq(cmd, sim); 934 } 935 } 936 if (cmd->error_code) 937 return(1); 938 return(retcode); 939 } 940 941 /** 942 * mrsas_unmap_request: Unmap and unload data 943 * input: Adapter instance soft state 944 * Pointer to command packet 945 * 946 * This function unmaps and unloads data from OS. 947 */ 948 void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) 949 { 950 if (cmd->data != NULL) { 951 if (cmd->flags & MRSAS_DIR_IN) 952 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD); 953 if (cmd->flags & MRSAS_DIR_OUT) 954 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE); 955 lockmgr(&sc->io_lock, LK_EXCLUSIVE); 956 bus_dmamap_unload(sc->data_tag, cmd->data_dmamap); 957 lockmgr(&sc->io_lock, LK_RELEASE); 958 } 959 } 960 961 /** 962 * mrsas_data_load_cb: Callback entry point 963 * input: Pointer to command packet as argument 964 * Pointer to segment 965 * Number of segments 966 * Error 967 * 968 * This is the callback function of the bus dma map load. It builds 969 * the SG list. 970 */ 971 static void 972 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 973 { 974 struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg; 975 struct mrsas_softc *sc = cmd->sc; 976 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 977 pMpi25IeeeSgeChain64_t sgl_ptr; 978 int i=0, sg_processed=0; 979 980 if (error) 981 { 982 cmd->error_code = error; 983 device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error); 984 if (error == EFBIG) { 985 cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG; 986 return; 987 } 988 } 989 990 if (cmd->flags & MRSAS_DIR_IN) 991 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, 992 BUS_DMASYNC_PREREAD); 993 if (cmd->flags & MRSAS_DIR_OUT) 994 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, 995 BUS_DMASYNC_PREWRITE); 996 if (nseg > sc->max_num_sge) { 997 device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n"); 998 return; 999 } 1000 1001 io_request = cmd->io_request; 1002 sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL; 1003 1004 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { 1005 pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr; 1006 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 1007 sgl_ptr_end->Flags = 0; 1008 } 1009 1010 if (nseg != 0) { 1011 for (i=0; i < nseg; i++) { 1012 sgl_ptr->Address = segs[i].ds_addr; 1013 sgl_ptr->Length = segs[i].ds_len; 1014 sgl_ptr->Flags = 0; 1015 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { 1016 if (i == nseg - 1) 1017 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 1018 } 1019 sgl_ptr++; 1020 sg_processed = i + 1; 1021 /* 1022 * Prepare chain element 1023 */ 1024 if ((sg_processed == (sc->max_sge_in_main_msg - 1)) && 1025 (nseg > sc->max_sge_in_main_msg)) { 1026 pMpi25IeeeSgeChain64_t sg_chain; 1027 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { 1028 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1029 != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1030 cmd->io_request->ChainOffset = sc->chain_offset_io_request; 1031 else 1032 cmd->io_request->ChainOffset = 0; 1033 } else 1034 cmd->io_request->ChainOffset = sc->chain_offset_io_request; 1035 sg_chain = sgl_ptr; 1036 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) 1037 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1038 else 1039 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1040 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed)); 1041 sg_chain->Address = cmd->chain_frame_phys_addr; 1042 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame; 1043 } 1044 } 1045 } 1046 cmd->sge_count = nseg; 1047 } 1048 1049 /** 1050 * mrsas_freeze_simq: Freeze SIM queue 1051 * input: Pointer to command packet 1052 * Pointer to SIM 1053 * 1054 * This function freezes the sim queue. 1055 */ 1056 static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim) 1057 { 1058 union ccb *ccb = (union ccb *)(cmd->ccb_ptr); 1059 1060 xpt_freeze_simq(sim, 1); 1061 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1062 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1063 } 1064 1065 void mrsas_xpt_freeze(struct mrsas_softc *sc) { 1066 xpt_freeze_simq(sc->sim_0, 1); 1067 xpt_freeze_simq(sc->sim_1, 1); 1068 } 1069 1070 void mrsas_xpt_release(struct mrsas_softc *sc) { 1071 xpt_release_simq(sc->sim_0, 1); 1072 xpt_release_simq(sc->sim_1, 1); 1073 } 1074 1075 /** 1076 * mrsas_cmd_done: Perform remaining command completion 1077 * input: Adapter instance soft state 1078 * Pointer to command packet 1079 * 1080 * This function calls ummap request and releases the MPT command. 1081 */ 1082 void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) 1083 { 1084 callout_stop(&cmd->cm_callout); 1085 mrsas_unmap_request(sc, cmd); 1086 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 1087 xpt_done(cmd->ccb_ptr); 1088 cmd->ccb_ptr = NULL; 1089 lockmgr(&sc->sim_lock, LK_RELEASE); 1090 mrsas_release_mpt_cmd(cmd); 1091 } 1092 1093 /** 1094 * mrsas_poll: Polling entry point 1095 * input: Pointer to SIM 1096 * 1097 * This is currently a stub function. 1098 */ 1099 static void mrsas_poll(struct cam_sim *sim) 1100 { 1101 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); 1102 mrsas_isr((void *) sc); 1103 } 1104 1105 static void 1106 mrsas_rescan_callback(struct cam_periph *periph, union ccb *ccb) 1107 { 1108 xpt_free_path(ccb->ccb_h.path); 1109 xpt_free_ccb(ccb); 1110 } 1111 1112 /* 1113 * mrsas_bus_scan: Perform bus scan 1114 * input: Adapter instance soft state 1115 * 1116 * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should 1117 * not be called in FreeBSD 8.x and later versions, where the bus scan is 1118 * automatic. 1119 */ 1120 int mrsas_bus_scan(struct mrsas_softc *sc) 1121 { 1122 union ccb *ccb_0; 1123 union ccb *ccb_1; 1124 1125 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 1126 if ((ccb_0 = xpt_alloc_ccb()) == NULL) { 1127 lockmgr(&sc->sim_lock, LK_RELEASE); 1128 return(ENOMEM); 1129 } 1130 1131 if ((ccb_1 = xpt_alloc_ccb()) == NULL) { 1132 xpt_free_ccb(ccb_0); 1133 lockmgr(&sc->sim_lock, LK_RELEASE); 1134 return(ENOMEM); 1135 } 1136 1137 if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0), 1138 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){ 1139 xpt_free_ccb(ccb_0); 1140 xpt_free_ccb(ccb_1); 1141 lockmgr(&sc->sim_lock, LK_RELEASE); 1142 return(EIO); 1143 } 1144 1145 if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1), 1146 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){ 1147 xpt_free_ccb(ccb_0); 1148 xpt_free_ccb(ccb_1); 1149 lockmgr(&sc->sim_lock, LK_RELEASE); 1150 return(EIO); 1151 } 1152 1153 xpt_setup_ccb(&ccb_0->ccb_h, ccb_0->ccb_h.path, 5/*priority (low)*/); 1154 ccb_0->ccb_h.func_code = XPT_SCAN_BUS; 1155 ccb_0->ccb_h.cbfcnp = mrsas_rescan_callback; 1156 ccb_0->crcn.flags = CAM_FLAG_NONE; 1157 xpt_action(ccb_0); /* scan is now in progress */ 1158 1159 xpt_setup_ccb(&ccb_1->ccb_h, ccb_1->ccb_h.path, 5/*priority (low)*/); 1160 ccb_1->ccb_h.func_code = XPT_SCAN_BUS; 1161 ccb_1->ccb_h.cbfcnp = mrsas_rescan_callback; 1162 ccb_1->crcn.flags = CAM_FLAG_NONE; 1163 xpt_action(ccb_1); /* scan is now in progress */ 1164 1165 lockmgr(&sc->sim_lock, LK_RELEASE); 1166 1167 return(0); 1168 } 1169 1170 /* 1171 * mrsas_bus_scan_sim: Perform bus scan per SIM 1172 * input: Adapter instance soft state 1173 * This function will be called from Event handler 1174 * on LD creation/deletion, JBOD on/off. 1175 */ 1176 int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim) 1177 { 1178 union ccb *ccb; 1179 1180 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 1181 if ((ccb = xpt_alloc_ccb()) == NULL) { 1182 lockmgr(&sc->sim_lock, LK_RELEASE); 1183 return(ENOMEM); 1184 } 1185 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim), 1186 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){ 1187 xpt_free_ccb(ccb); 1188 lockmgr(&sc->sim_lock, LK_RELEASE); 1189 return(EIO); 1190 } 1191 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/); 1192 ccb->ccb_h.func_code = XPT_SCAN_BUS; 1193 ccb->ccb_h.cbfcnp = mrsas_rescan_callback; 1194 ccb->crcn.flags = CAM_FLAG_NONE; 1195 xpt_action(ccb); /* scan is now in progress */ 1196 1197 lockmgr(&sc->sim_lock, LK_RELEASE); 1198 1199 return(0); 1200 } 1201