1 /* 2 * Copyright (c) 2010, LSI Corp. 3 * All rights reserved. 4 * Author : Manjunath Ranganathaiah 5 * Support: freebsdraid@lsi.com 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of the <ORGANIZATION> nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/dev/tws/tws_cam.c,v 1.3 2007/05/09 04:16:32 mrangana Exp $ 35 */ 36 37 #include <dev/raid/tws/tws.h> 38 #include <dev/raid/tws/tws_services.h> 39 #include <dev/raid/tws/tws_hdm.h> 40 #include <dev/raid/tws/tws_user.h> 41 #include <bus/cam/cam.h> 42 #include <bus/cam/cam_ccb.h> 43 #include <bus/cam/cam_sim.h> 44 #include <bus/cam/cam_xpt_sim.h> 45 #include <bus/cam/cam_debug.h> 46 #include <bus/cam/cam_periph.h> 47 48 #include <bus/cam/scsi/scsi_all.h> 49 #include <bus/cam/scsi/scsi_message.h> 50 51 static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS); 52 static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"}; 53 54 static void tws_action(struct cam_sim *sim, union ccb *ccb); 55 static void tws_poll(struct cam_sim *sim); 56 static void tws_bus_scan_cb(struct cam_periph *periph, union ccb *ccb); 57 static void tws_scsi_complete(struct tws_request *req); 58 59 60 61 void tws_unmap_request(struct tws_softc *sc, struct tws_request *req); 62 int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req); 63 int tws_bus_scan(struct tws_softc *sc); 64 int tws_cam_attach(struct tws_softc *sc); 65 void tws_cam_detach(struct tws_softc *sc); 66 void tws_reset(void *arg); 67 68 static void tws_reset_cb(void *arg); 69 static void tws_reinit(void *arg); 70 static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb); 71 static void tws_freeze_simq(struct tws_softc *sc); 72 static void tws_release_simq(struct tws_softc *sc); 73 static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs, 74 int nseg, int error); 75 static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, 76 void *sgl_dest, u_int16_t num_sgl_entries); 77 static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa); 78 static void tws_scsi_err_complete(struct tws_request *req, 79 struct tws_command_header *hdr); 80 static void tws_passthru_err_complete(struct tws_request *req, 81 struct tws_command_header *hdr); 82 83 84 static void tws_timeout(void *arg); 85 static void tws_intr_attn_aen(struct tws_softc *sc); 86 static void tws_intr_attn_error(struct tws_softc *sc); 87 static void tws_intr_resp(struct tws_softc *sc); 88 void tws_intr(void *arg); 89 void tws_cmd_complete(struct tws_request *req); 90 void tws_aen_complete(struct tws_request *req); 91 int tws_send_scsi_cmd(struct tws_softc *sc, int cmd); 92 void tws_getset_param_complete(struct tws_request *req); 93 int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, 94 u_int32_t param_size, void *data); 95 int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, 96 u_int32_t param_size, void *data); 97 98 99 extern struct tws_request *tws_get_request(struct tws_softc *sc, 100 u_int16_t type); 101 extern void *tws_release_request(struct tws_request *req); 102 extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req); 103 extern boolean tws_get_response(struct tws_softc *sc, 104 u_int16_t *req_id, u_int64_t *mfa); 105 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req, 106 u_int8_t q_type ); 107 extern struct tws_request * tws_q_remove_request(struct tws_softc *sc, 108 struct tws_request *req, u_int8_t q_type ); 109 extern void tws_send_event(struct tws_softc *sc, u_int8_t event); 110 111 extern struct tws_sense * 112 tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa); 113 114 extern void tws_fetch_aen(void *arg); 115 extern void tws_disable_db_intr(struct tws_softc *sc); 116 extern void tws_enable_db_intr(struct tws_softc *sc); 117 extern void tws_passthru_complete(struct tws_request *req); 118 extern void tws_aen_synctime_with_host(struct tws_softc *sc); 119 extern void tws_circular_aenq_insert(struct tws_softc *sc, 120 struct tws_circular_q *cq, struct tws_event_packet *aen); 121 extern int tws_use_32bit_sgls; 122 extern boolean tws_ctlr_reset(struct tws_softc *sc); 123 extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc, 124 u_int8_t q_type ); 125 extern void tws_turn_off_interrupts(struct tws_softc *sc); 126 extern void tws_turn_on_interrupts(struct tws_softc *sc); 127 extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc); 128 extern void tws_init_obfl_q(struct tws_softc *sc); 129 extern uint8_t tws_get_state(struct tws_softc *sc); 130 extern void tws_assert_soft_reset(struct tws_softc *sc); 131 extern boolean tws_ctlr_ready(struct tws_softc *sc); 132 extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa); 133 134 135 136 int 137 tws_cam_attach(struct tws_softc *sc) 138 { 139 struct cam_devq *devq; 140 int error; 141 142 TWS_TRACE_DEBUG(sc, "entry", 0, sc); 143 /* Create a device queue for sim */ 144 145 /* 146 * if the user sets cam depth to less than 1 147 * cam may get confused 148 */ 149 if ( tws_cam_depth < 1 ) 150 tws_cam_depth = 1; 151 if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS) ) 152 tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS; 153 154 TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth); 155 156 if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) { 157 tws_log(sc, CAM_SIMQ_ALLOC); 158 return(ENOMEM); 159 } 160 161 /* 162 * Create a SIM entry. Though we can support tws_cam_depth 163 * simultaneous requests, we claim to be able to handle only 164 * (tws_cam_depth), so that we always have reserved requests 165 * packet available to service ioctls and internal commands. 166 */ 167 sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc, 168 device_get_unit(sc->tws_dev), 169 &sc->sim_lock, 170 tws_cam_depth, 1, devq); 171 /* 1, 1, devq); */ 172 cam_simq_release(devq); 173 if (sc->sim == NULL) { 174 tws_log(sc, CAM_SIM_ALLOC); 175 } 176 /* Register the bus. */ 177 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 178 if (xpt_bus_register(sc->sim, 0) != CAM_SUCCESS) { 179 cam_sim_free(sc->sim); 180 sc->sim = NULL; /* so cam_detach will not try to free it */ 181 lockmgr(&sc->sim_lock, LK_RELEASE); 182 tws_log(sc, TWS_XPT_BUS_REGISTER); 183 return(ENXIO); 184 } 185 if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim), 186 CAM_TARGET_WILDCARD, 187 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 188 xpt_bus_deregister(cam_sim_path(sc->sim)); 189 cam_sim_free(sc->sim); 190 tws_log(sc, TWS_XPT_CREATE_PATH); 191 lockmgr(&sc->sim_lock, LK_RELEASE); 192 return(ENXIO); 193 } 194 if ((error = tws_bus_scan(sc))) { 195 tws_log(sc, TWS_BUS_SCAN_REQ); 196 lockmgr(&sc->sim_lock, LK_RELEASE); 197 return(error); 198 } 199 lockmgr(&sc->sim_lock, LK_RELEASE); 200 201 return(0); 202 } 203 204 void 205 tws_cam_detach(struct tws_softc *sc) 206 { 207 TWS_TRACE_DEBUG(sc, "entry", 0, 0); 208 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 209 if (sc->path) 210 xpt_free_path(sc->path); 211 if (sc->sim) { 212 xpt_bus_deregister(cam_sim_path(sc->sim)); 213 cam_sim_free(sc->sim); 214 } 215 lockmgr(&sc->sim_lock, LK_RELEASE); 216 } 217 218 int 219 tws_bus_scan(struct tws_softc *sc) 220 { 221 struct cam_path *path; 222 union ccb *ccb; 223 224 TWS_TRACE_DEBUG(sc, "entry", sc, 0); 225 KASSERT(sc->sim, ("sim not allocated")); 226 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0); 227 228 ccb = sc->scan_ccb; 229 230 bzero(ccb, sizeof(union ccb)); 231 if (xpt_create_path(&path, xpt_periph, cam_sim_path(sc->sim), 232 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 233 kfree(ccb, M_TEMP); 234 /* lockmgr(&sc->sim_lock, LK_RELEASE); */ 235 return(EIO); 236 } 237 xpt_setup_ccb(&ccb->ccb_h, path, 5); 238 ccb->ccb_h.func_code = XPT_SCAN_BUS; 239 ccb->ccb_h.cbfcnp = tws_bus_scan_cb; 240 ccb->crcn.flags = CAM_FLAG_NONE; 241 xpt_action(ccb); 242 243 return(0); 244 } 245 246 static void 247 tws_bus_scan_cb(struct cam_periph *periph, union ccb *ccb) 248 { 249 struct tws_softc *sc = periph->softc; 250 251 /* calling trace results in non-sleepable lock head panic 252 using printf to debug */ 253 254 if (ccb->ccb_h.status != CAM_REQ_CMP) { 255 kprintf("cam_scan failure\n"); 256 257 lockmgr(&sc->gen_lock, LK_EXCLUSIVE); 258 tws_send_event(sc, TWS_SCAN_FAILURE); 259 lockmgr(&sc->gen_lock, LK_RELEASE); 260 } 261 262 xpt_free_path(ccb->ccb_h.path); 263 } 264 265 static void 266 tws_action(struct cam_sim *sim, union ccb *ccb) 267 { 268 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim); 269 270 switch( ccb->ccb_h.func_code ) { 271 case XPT_SCSI_IO: 272 { 273 if ( tws_execute_scsi(sc, ccb) ) 274 TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0); 275 break; 276 } 277 case XPT_ABORT: 278 { 279 TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0); 280 ccb->ccb_h.status = CAM_UA_ABORT; 281 xpt_done(ccb); 282 break; 283 } 284 case XPT_RESET_BUS: 285 { 286 TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb); 287 break; 288 } 289 case XPT_SET_TRAN_SETTINGS: 290 { 291 TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb); 292 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 293 xpt_done(ccb); 294 295 break; 296 } 297 case XPT_GET_TRAN_SETTINGS: 298 { 299 TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb); 300 301 ccb->cts.protocol = PROTO_SCSI; 302 ccb->cts.protocol_version = SCSI_REV_2; 303 ccb->cts.transport = XPORT_SPI; 304 ccb->cts.transport_version = 2; 305 306 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; 307 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; 308 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; 309 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; 310 ccb->ccb_h.status = CAM_REQ_CMP; 311 xpt_done(ccb); 312 313 break; 314 } 315 case XPT_CALC_GEOMETRY: 316 { 317 TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb, 318 ccb->ccg.block_size); 319 cam_calc_geometry(&ccb->ccg, 1/* extended */); 320 xpt_done(ccb); 321 322 break; 323 } 324 case XPT_PATH_INQ: 325 { 326 TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb); 327 ccb->cpi.version_num = 1; 328 ccb->cpi.hba_inquiry = 0; 329 ccb->cpi.target_sprt = 0; 330 ccb->cpi.hba_misc = 0; 331 ccb->cpi.hba_eng_cnt = 0; 332 ccb->cpi.max_target = TWS_MAX_NUM_UNITS; 333 ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1; 334 ccb->cpi.unit_number = cam_sim_unit(sim); 335 ccb->cpi.bus_id = cam_sim_bus(sim); 336 ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID; 337 ccb->cpi.base_transfer_speed = 300000; 338 strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); 339 strncpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN); 340 strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); 341 ccb->cpi.transport = XPORT_SPI; 342 ccb->cpi.transport_version = 2; 343 ccb->cpi.protocol = PROTO_SCSI; 344 ccb->cpi.protocol_version = SCSI_REV_2; 345 ccb->ccb_h.status = CAM_REQ_CMP; 346 xpt_done(ccb); 347 348 break; 349 } 350 default: 351 TWS_TRACE_DEBUG(sc, "default", sim, ccb); 352 ccb->ccb_h.status = CAM_REQ_INVALID; 353 xpt_done(ccb); 354 break; 355 } 356 } 357 358 static void 359 tws_scsi_complete(struct tws_request *req) 360 { 361 struct tws_softc *sc = req->sc; 362 363 lockmgr(&sc->q_lock, LK_EXCLUSIVE); 364 tws_q_remove_request(sc, req, TWS_BUSY_Q); 365 lockmgr(&sc->q_lock, LK_RELEASE); 366 367 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch); 368 tws_unmap_request(req->sc, req); 369 370 371 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 372 req->ccb_ptr->ccb_h.status = CAM_REQ_CMP; 373 xpt_done(req->ccb_ptr); 374 lockmgr(&sc->sim_lock, LK_RELEASE); 375 376 lockmgr(&sc->q_lock, LK_EXCLUSIVE); 377 tws_q_insert_tail(sc, req, TWS_FREE_Q); 378 lockmgr(&sc->q_lock, LK_RELEASE); 379 380 } 381 382 void 383 tws_getset_param_complete(struct tws_request *req) 384 { 385 struct tws_softc *sc = req->sc; 386 387 TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id); 388 389 callout_stop(&req->thandle); 390 tws_unmap_request(sc, req); 391 392 kfree(req->data, M_TWS); 393 394 lockmgr(&sc->gen_lock, LK_EXCLUSIVE); 395 req->state = TWS_REQ_STATE_FREE; 396 lockmgr(&sc->gen_lock, LK_RELEASE); 397 398 } 399 400 void 401 tws_aen_complete(struct tws_request *req) 402 { 403 struct tws_softc *sc = req->sc; 404 struct tws_command_header *sense; 405 struct tws_event_packet event; 406 u_int16_t aen_code=0; 407 408 TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id); 409 410 callout_stop(&req->thandle); 411 tws_unmap_request(sc, req); 412 413 sense = (struct tws_command_header *)req->data; 414 415 TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0], 416 sense->sense_data[2]); 417 TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id, 418 sense->status_block.res__severity); 419 TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum, 420 sense->status_block.error); 421 TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header, 422 sense->header_desc.size_sense); 423 424 aen_code = sense->status_block.error; 425 426 switch ( aen_code ) { 427 case TWS_AEN_SYNC_TIME_WITH_HOST : 428 tws_aen_synctime_with_host(sc); 429 break; 430 case TWS_AEN_QUEUE_EMPTY : 431 break; 432 default : 433 bzero(&event, sizeof(struct tws_event_packet)); 434 event.sequence_id = sc->seq_id; 435 event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME; 436 event.aen_code = sense->status_block.error; 437 event.severity = sense->status_block.res__severity & 0x7; 438 event.event_src = TWS_SRC_CTRL_EVENT; 439 strcpy(event.severity_str, tws_sev_str[event.severity]); 440 event.retrieved = TWS_AEN_NOT_RETRIEVED; 441 442 bcopy(sense->err_specific_desc, event.parameter_data, 443 TWS_ERROR_SPECIFIC_DESC_LEN); 444 event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0'; 445 event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1; 446 447 if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) { 448 event.parameter_len += ((u_int8_t)strlen(event.parameter_data + 449 event.parameter_len) + 1); 450 } 451 452 device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n", 453 event.severity_str, 454 event.event_src, 455 event.aen_code, 456 event.parameter_data + 457 (strlen(event.parameter_data) + 1), 458 event.parameter_data); 459 460 lockmgr(&sc->gen_lock, LK_EXCLUSIVE); 461 tws_circular_aenq_insert(sc, &sc->aen_q, &event); 462 sc->seq_id++; 463 lockmgr(&sc->gen_lock, LK_RELEASE); 464 break; 465 466 } 467 468 kfree(req->data, M_TWS); 469 470 lockmgr(&sc->gen_lock, LK_EXCLUSIVE); 471 req->state = TWS_REQ_STATE_FREE; 472 lockmgr(&sc->gen_lock, LK_RELEASE); 473 474 if ( aen_code != TWS_AEN_QUEUE_EMPTY ) { 475 /* timeout(tws_fetch_aen, sc, 1);*/ 476 sc->stats.num_aens++; 477 tws_fetch_aen((void *)sc); 478 } 479 480 } 481 482 void 483 tws_cmd_complete(struct tws_request *req) 484 { 485 struct tws_softc *sc = req->sc; 486 487 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch); 488 tws_unmap_request(sc, req); 489 490 } 491 492 static void 493 tws_err_complete(struct tws_softc *sc, u_int64_t mfa) 494 { 495 496 struct tws_command_header *hdr; 497 struct tws_sense *sen; 498 struct tws_request *req; 499 u_int16_t req_id; 500 u_int32_t reg, status; 501 502 if ( !mfa ) { 503 TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa); 504 return; 505 } else { 506 /* lookup the sense */ 507 sen = tws_find_sense_from_mfa(sc, mfa); 508 if ( sen == NULL ) { 509 TWS_TRACE_DEBUG(sc, "found null req", 0, mfa); 510 return; 511 } 512 hdr = sen->hdr; 513 TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr); 514 req_id = hdr->header_desc.request_id; 515 req = &sc->reqs[req_id]; 516 TWS_TRACE_DEBUG(sc, "req, id", req, req_id); 517 if ( req->error_code != TWS_REQ_SUBMIT_SUCCESS ) 518 TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code); 519 } 520 521 switch (req->type) { 522 case TWS_PASSTHRU_REQ : 523 tws_passthru_err_complete(req, hdr); 524 break; 525 case TWS_GETSET_PARAM_REQ : 526 tws_getset_param_complete(req); 527 break; 528 case TWS_SCSI_IO_REQ : 529 tws_scsi_err_complete(req, hdr); 530 break; 531 532 } 533 534 lockmgr(&sc->io_lock, LK_EXCLUSIVE); 535 hdr->header_desc.size_header = 128; 536 reg = (u_int32_t)( mfa>>32); 537 tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4); 538 reg = (u_int32_t)(mfa); 539 tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4); 540 541 status = tws_read_reg(sc, TWS_I2O0_STATUS, 4); 542 if ( status & TWS_BIT13 ) { 543 TWS_TRACE_DEBUG(sc, "OBFL Overrun", status, TWS_I2O0_STATUS); 544 sc->obfl_q_overrun = true; 545 sen->posted = false; 546 } 547 lockmgr(&sc->io_lock, LK_RELEASE); 548 549 } 550 551 static void 552 tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr) 553 { 554 u_int8_t *sense_data; 555 struct tws_softc *sc = req->sc; 556 union ccb *ccb = req->ccb_ptr; 557 558 TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error, 559 req->cmd_pkt->cmd.pkt_a.status); 560 if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED || 561 hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) { 562 563 if ( ccb->ccb_h.target_lun ) { 564 TWS_TRACE_DEBUG(sc, "invalid lun error",0,0); 565 ccb->ccb_h.status |= CAM_LUN_INVALID; 566 } else { 567 TWS_TRACE_DEBUG(sc, "invalid target error",0,0); 568 ccb->ccb_h.status |= CAM_TID_INVALID; 569 } 570 571 } else { 572 TWS_TRACE_DEBUG(sc, "scsi status error",0,0); 573 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 574 if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) && 575 (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) { 576 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; 577 TWS_TRACE_DEBUG(sc, "page mode not supported",0,0); 578 } 579 } 580 581 /* if there were no error simply mark complete error */ 582 if (ccb->ccb_h.status == 0) 583 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 584 585 sense_data = (u_int8_t *)&ccb->csio.sense_data; 586 if (sense_data) { 587 memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH ); 588 ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH; 589 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 590 } 591 ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status; 592 593 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 594 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 595 xpt_done(ccb); 596 lockmgr(&sc->sim_lock, LK_RELEASE); 597 598 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch); 599 tws_unmap_request(req->sc, req); 600 lockmgr(&sc->q_lock, LK_EXCLUSIVE); 601 tws_q_remove_request(sc, req, TWS_BUSY_Q); 602 tws_q_insert_tail(sc, req, TWS_FREE_Q); 603 lockmgr(&sc->q_lock, LK_RELEASE); 604 605 } 606 607 static void 608 tws_passthru_err_complete(struct tws_request *req, 609 struct tws_command_header *hdr) 610 { 611 612 TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id); 613 req->error_code = hdr->status_block.error; 614 memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header)); 615 tws_passthru_complete(req); 616 } 617 618 static void 619 tws_drain_busy_queue(struct tws_softc *sc) 620 { 621 622 struct tws_request *req; 623 TWS_TRACE_DEBUG(sc, "entry", 0, 0); 624 625 lockmgr(&sc->q_lock, LK_EXCLUSIVE); 626 req = tws_q_remove_tail(sc, TWS_BUSY_Q); 627 lockmgr(&sc->q_lock, LK_RELEASE); 628 while ( req ) { 629 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch); 630 tws_unmap_request(req->sc, req); 631 632 TWS_TRACE_DEBUG(sc, "drained", 0, req->request_id); 633 634 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 635 req->ccb_ptr->ccb_h.status = CAM_REQUEUE_REQ; 636 xpt_done(req->ccb_ptr); 637 lockmgr(&sc->sim_lock, LK_RELEASE); 638 639 lockmgr(&sc->q_lock, LK_EXCLUSIVE); 640 tws_q_insert_tail(sc, req, TWS_FREE_Q); 641 req = tws_q_remove_tail(sc, TWS_BUSY_Q); 642 lockmgr(&sc->q_lock, LK_RELEASE); 643 } 644 645 } 646 647 static void 648 tws_drain_reserved_reqs(struct tws_softc *sc) 649 { 650 651 struct tws_request *r; 652 653 r = &sc->reqs[1]; 654 if ( r->state != TWS_REQ_STATE_FREE ) { 655 TWS_TRACE_DEBUG(sc, "drained aen req", 0, 0); 656 callout_stop(&r->thandle); 657 tws_unmap_request(sc, r); 658 kfree(r->data, M_TWS); 659 lockmgr(&sc->gen_lock, LK_EXCLUSIVE); 660 r->state = TWS_REQ_STATE_FREE; 661 lockmgr(&sc->gen_lock, LK_RELEASE); 662 } 663 r = &sc->reqs[2]; 664 if ( r->state != TWS_REQ_STATE_FREE ) { 665 TWS_TRACE_DEBUG(sc, "drained passthru req", 0, 0); 666 r->error_code = TWS_REQ_REQUEUE; 667 tws_passthru_complete(r); 668 } 669 r = &sc->reqs[3]; 670 if ( r->state != TWS_REQ_STATE_FREE ) { 671 TWS_TRACE_DEBUG(sc, "drained set param req", 0, 0); 672 tws_getset_param_complete(r); 673 } 674 675 } 676 677 static void 678 tws_drain_response_queue(struct tws_softc *sc) 679 { 680 tws_intr_resp(sc); 681 } 682 683 684 static int32_t 685 tws_execute_scsi(struct tws_softc *sc, union ccb *ccb) 686 { 687 struct tws_command_packet *cmd_pkt; 688 struct tws_request *req; 689 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 690 struct ccb_scsiio *csio = &(ccb->csio); 691 int error; 692 u_int16_t lun; 693 694 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0); 695 if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) { 696 TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun); 697 ccb_h->status |= CAM_TID_INVALID; 698 xpt_done(ccb); 699 return(0); 700 } 701 if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) { 702 TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun); 703 ccb_h->status |= CAM_LUN_INVALID; 704 xpt_done(ccb); 705 return(0); 706 } 707 708 if(ccb_h->flags & CAM_CDB_PHYS) { 709 TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun); 710 ccb_h->status = CAM_REQ_CMP_ERR; 711 xpt_done(ccb); 712 return(0); 713 } 714 715 /* 716 * We are going to work on this request. Mark it as enqueued (though 717 * we don't actually queue it...) 718 */ 719 ccb_h->status |= CAM_SIM_QUEUED; 720 721 req = tws_get_request(sc, TWS_SCSI_IO_REQ); 722 if ( !req ) { 723 TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun); 724 /* tws_freeze_simq(sc); */ 725 ccb_h->status |= CAM_REQUEUE_REQ; 726 xpt_done(ccb); 727 return(0); 728 } 729 730 if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 731 if(ccb_h->flags & CAM_DIR_IN) 732 req->flags = TWS_DIR_IN; 733 else 734 req->flags = TWS_DIR_OUT; 735 } else { 736 req->flags = TWS_DIR_NONE; /* no data */ 737 } 738 739 req->type = TWS_SCSI_IO_REQ; 740 req->cb = tws_scsi_complete; 741 742 cmd_pkt = req->cmd_pkt; 743 /* cmd_pkt->hdr.header_desc.size_header = 128; */ 744 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI; 745 cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id; 746 cmd_pkt->cmd.pkt_a.status = 0; 747 cmd_pkt->cmd.pkt_a.sgl_offset = 16; 748 749 /* lower nibble */ 750 lun = ccb_h->target_lun & 0XF; 751 lun = lun << 12; 752 cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id; 753 /* upper nibble */ 754 lun = ccb_h->target_lun & 0XF0; 755 lun = lun << 8; 756 cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun; 757 758 #ifdef TWS_DEBUG 759 if ( csio->cdb_len > 16 ) 760 TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len); 761 #endif 762 763 if(ccb_h->flags & CAM_CDB_POINTER) 764 bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); 765 else 766 bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); 767 768 if (!(ccb_h->flags & CAM_DATA_PHYS)) { 769 /* Virtual data addresses. Need to convert them... */ 770 if (!(ccb_h->flags & CAM_SCATTER_VALID)) { 771 if (csio->dxfer_len > TWS_MAX_IO_SIZE) { 772 TWS_TRACE(sc, "I/O is big", csio->dxfer_len, 0); 773 tws_release_request(req); 774 ccb_h->status = CAM_REQ_TOO_BIG; 775 xpt_done(ccb); 776 return(0); 777 } 778 779 req->length = csio->dxfer_len; 780 if (req->length) { 781 req->data = csio->data_ptr; 782 /* there is 1 sgl_entrie */ 783 /* cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= 1; */ 784 } 785 } else { 786 TWS_TRACE_DEBUG(sc, "got sglist", ccb_h->target_id, ccb_h->target_lun); 787 tws_release_request(req); 788 ccb_h->status = CAM_REQ_CMP_ERR; 789 xpt_done(ccb); 790 return(0); 791 } 792 } else { 793 /* Data addresses are physical. */ 794 TWS_TRACE_DEBUG(sc, "Phy data addr", ccb_h->target_id, ccb_h->target_lun); 795 tws_release_request(req); 796 ccb_h->status = CAM_REQ_CMP_ERR; 797 ccb_h->status |= CAM_RELEASE_SIMQ; 798 ccb_h->status &= ~CAM_SIM_QUEUED; 799 xpt_done(ccb); 800 return(0); 801 } 802 /* save ccb ptr */ 803 req->ccb_ptr = ccb; 804 /* 805 * tws_map_load_data_callback will fill in the SGL, 806 * and submit the I/O. 807 */ 808 sc->stats.scsi_ios++; 809 callout_reset(&ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000, tws_timeout, 810 req); 811 error = tws_map_request(sc, req); 812 return(error); 813 } 814 815 816 int 817 tws_send_scsi_cmd(struct tws_softc *sc, int cmd) 818 { 819 820 struct tws_request *req; 821 struct tws_command_packet *cmd_pkt; 822 int error; 823 824 TWS_TRACE_DEBUG(sc, "entry",sc, cmd); 825 req = tws_get_request(sc, TWS_AEN_FETCH_REQ); 826 827 if ( req == NULL ) 828 return(ENOMEM); 829 830 req->type = TWS_AEN_FETCH_REQ; 831 req->cb = tws_aen_complete; 832 833 cmd_pkt = req->cmd_pkt; 834 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI; 835 cmd_pkt->cmd.pkt_a.status = 0; 836 cmd_pkt->cmd.pkt_a.unit = 0; 837 cmd_pkt->cmd.pkt_a.sgl_offset = 16; 838 cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id; 839 840 cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd; 841 cmd_pkt->cmd.pkt_a.cdb[4] = 128; 842 843 req->length = TWS_SECTOR_SIZE; 844 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); 845 if ( req->data == NULL ) 846 return(ENOMEM); 847 bzero(req->data, TWS_SECTOR_SIZE); 848 req->flags = TWS_DIR_IN; 849 850 callout_reset(&req->thandle, (TWS_IO_TIMEOUT * hz), tws_timeout, req); 851 error = tws_map_request(sc, req); 852 return(error); 853 854 } 855 856 int 857 tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, 858 u_int32_t param_size, void *data) 859 { 860 struct tws_request *req; 861 struct tws_command_packet *cmd_pkt; 862 union tws_command_giga *cmd; 863 struct tws_getset_param *param; 864 int error; 865 866 req = tws_get_request(sc, TWS_GETSET_PARAM_REQ); 867 if ( req == NULL ) { 868 TWS_TRACE_DEBUG(sc, "null req", 0, 0); 869 return(ENOMEM); 870 } 871 872 req->length = TWS_SECTOR_SIZE; 873 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); 874 if ( req->data == NULL ) 875 return(ENOMEM); 876 bzero(req->data, TWS_SECTOR_SIZE); 877 param = (struct tws_getset_param *)req->data; 878 879 req->cb = tws_getset_param_complete; 880 req->flags = TWS_DIR_OUT; 881 cmd_pkt = req->cmd_pkt; 882 883 cmd = &cmd_pkt->cmd.pkt_g; 884 cmd->param.sgl_off__opcode = 885 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM); 886 cmd->param.request_id = (u_int8_t)req->request_id; 887 cmd->param.host_id__unit = 0; 888 cmd->param.param_count = 1; 889 cmd->param.size = 2; /* map routine will add sgls */ 890 891 /* Specify which parameter we want to set. */ 892 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR); 893 param->parameter_id = (u_int8_t)(param_id); 894 param->parameter_size_bytes = (u_int16_t)param_size; 895 memcpy(param->data, data, param_size); 896 897 callout_reset(&req->thandle, (TWS_IO_TIMEOUT * hz), tws_timeout, req); 898 error = tws_map_request(sc, req); 899 return(error); 900 901 } 902 903 int 904 tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, 905 u_int32_t param_size, void *data) 906 { 907 struct tws_request *req; 908 struct tws_command_packet *cmd_pkt; 909 union tws_command_giga *cmd; 910 struct tws_getset_param *param; 911 u_int16_t reqid; 912 u_int64_t mfa; 913 int error = SUCCESS; 914 915 916 req = tws_get_request(sc, TWS_GETSET_PARAM_REQ); 917 if ( req == NULL ) { 918 TWS_TRACE_DEBUG(sc, "null req", 0, 0); 919 return(FAILURE); 920 } 921 922 req->length = TWS_SECTOR_SIZE; 923 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); 924 if ( req->data == NULL ) 925 return(FAILURE); 926 bzero(req->data, TWS_SECTOR_SIZE); 927 param = (struct tws_getset_param *)req->data; 928 929 req->cb = NULL; 930 req->flags = TWS_DIR_IN; 931 cmd_pkt = req->cmd_pkt; 932 933 cmd = &cmd_pkt->cmd.pkt_g; 934 cmd->param.sgl_off__opcode = 935 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM); 936 cmd->param.request_id = (u_int8_t)req->request_id; 937 cmd->param.host_id__unit = 0; 938 cmd->param.param_count = 1; 939 cmd->param.size = 2; /* map routine will add sgls */ 940 941 /* Specify which parameter we want to set. */ 942 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR); 943 param->parameter_id = (u_int8_t)(param_id); 944 param->parameter_size_bytes = (u_int16_t)param_size; 945 946 tws_map_request(sc, req); 947 reqid = tws_poll4_response(sc, &mfa); 948 tws_unmap_request(sc, req); 949 950 if ( reqid == TWS_GETSET_PARAM_REQ ) { 951 memcpy(data, param->data, param_size); 952 } else { 953 error = FAILURE; 954 955 } 956 957 kfree(req->data, M_TWS); 958 lockmgr(&sc->gen_lock, LK_EXCLUSIVE); 959 req->state = TWS_REQ_STATE_FREE; 960 lockmgr(&sc->gen_lock, LK_RELEASE); 961 return(error); 962 963 } 964 965 void 966 tws_unmap_request(struct tws_softc *sc, struct tws_request *req) 967 { 968 969 if (req->data != NULL) { 970 if ( req->flags & TWS_DIR_IN ) 971 bus_dmamap_sync(sc->data_tag, req->dma_map, 972 BUS_DMASYNC_POSTREAD); 973 if ( req->flags & TWS_DIR_OUT ) 974 bus_dmamap_sync(sc->data_tag, req->dma_map, 975 BUS_DMASYNC_POSTWRITE); 976 lockmgr(&sc->io_lock, LK_EXCLUSIVE); 977 bus_dmamap_unload(sc->data_tag, req->dma_map); 978 lockmgr(&sc->io_lock, LK_RELEASE); 979 } 980 } 981 982 int32_t 983 tws_map_request(struct tws_softc *sc, struct tws_request *req) 984 { 985 int32_t error = 0; 986 987 988 /* If the command involves data, map that too. */ 989 if (req->data != NULL) { 990 /* 991 * Map the data buffer into bus space and build the SG list. 992 */ 993 lockmgr(&sc->io_lock, LK_EXCLUSIVE); 994 error = bus_dmamap_load(sc->data_tag, req->dma_map, 995 req->data, req->length, 996 tws_dmamap_data_load_cbfn, req, 997 BUS_DMA_WAITOK); 998 lockmgr(&sc->io_lock, LK_RELEASE); 999 1000 if (error == EINPROGRESS) { 1001 TWS_TRACE(sc, "in progress", 0, error); 1002 /* tws_freeze_simq(sc); */ 1003 error = TWS_REQ_ERR_INPROGRESS; 1004 } 1005 } else { /* no data involved */ 1006 error = tws_submit_command(sc, req); 1007 } 1008 req->error_code = error; 1009 return(error); 1010 } 1011 1012 1013 static void 1014 tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs, 1015 int nseg, int error) 1016 { 1017 1018 struct tws_request *req = (struct tws_request *)arg; 1019 struct tws_softc *sc = req->sc; 1020 u_int16_t sgls = nseg; 1021 void *sgl_ptr; 1022 struct tws_cmd_generic *gcmd; 1023 1024 if ( error == EFBIG ) 1025 TWS_TRACE(sc, "not enough data segs", 0, nseg); 1026 1027 1028 if ( req->flags & TWS_DIR_IN ) 1029 bus_dmamap_sync(req->sc->data_tag, req->dma_map, 1030 BUS_DMASYNC_PREREAD); 1031 if ( req->flags & TWS_DIR_OUT ) 1032 bus_dmamap_sync(req->sc->data_tag, req->dma_map, 1033 BUS_DMASYNC_PREWRITE); 1034 if ( segs ) { 1035 if ( (req->type == TWS_PASSTHRU_REQ && 1036 GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) != 1037 TWS_FW_CMD_EXECUTE_SCSI) || 1038 req->type == TWS_GETSET_PARAM_REQ) { 1039 gcmd = &req->cmd_pkt->cmd.pkt_g.generic; 1040 sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size; 1041 gcmd->size += sgls * 1042 ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 :2 ); 1043 tws_fill_sg_list(req->sc, (void *)segs, sgl_ptr, sgls); 1044 1045 } else { 1046 tws_fill_sg_list(req->sc, (void *)segs, 1047 (void *)req->cmd_pkt->cmd.pkt_a.sg_list, sgls); 1048 req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ; 1049 } 1050 } 1051 1052 1053 req->error_code = tws_submit_command(req->sc, req); 1054 1055 } 1056 1057 1058 static void 1059 tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest, 1060 u_int16_t num_sgl_entries) 1061 { 1062 int i; 1063 1064 if ( sc->is64bit ) { 1065 struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src; 1066 1067 if ( !tws_use_32bit_sgls ) { 1068 struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest; 1069 if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS ) 1070 TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0); 1071 for (i = 0; i < num_sgl_entries; i++) { 1072 sgl_d[i].address = sgl_s->address; 1073 sgl_d[i].length = sgl_s->length; 1074 sgl_d[i].flag = 0; 1075 sgl_d[i].reserved = 0; 1076 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) + 1077 sizeof(bus_dma_segment_t)); 1078 } 1079 } else { 1080 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest; 1081 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS ) 1082 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0); 1083 for (i = 0; i < num_sgl_entries; i++) { 1084 sgl_d[i].address = sgl_s->address; 1085 sgl_d[i].length = sgl_s->length; 1086 sgl_d[i].flag = 0; 1087 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) + 1088 sizeof(bus_dma_segment_t)); 1089 } 1090 } 1091 } else { 1092 struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src; 1093 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest; 1094 1095 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS ) 1096 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0); 1097 1098 1099 for (i = 0; i < num_sgl_entries; i++) { 1100 sgl_d[i].address = sgl_s[i].address; 1101 sgl_d[i].length = sgl_s[i].length; 1102 sgl_d[i].flag = 0; 1103 } 1104 } 1105 } 1106 1107 1108 void 1109 tws_intr(void *arg) 1110 { 1111 struct tws_softc *sc = (struct tws_softc *)arg; 1112 u_int32_t histat=0, db=0; 1113 1114 KASSERT(sc, ("null softc")); 1115 1116 sc->stats.num_intrs++; 1117 histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4); 1118 if ( histat & TWS_BIT2 ) { 1119 TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT); 1120 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4); 1121 if ( db & TWS_BIT21 ) { 1122 tws_intr_attn_error(sc); 1123 return; 1124 } 1125 if ( db & TWS_BIT18 ) { 1126 tws_intr_attn_aen(sc); 1127 } 1128 } 1129 1130 if ( histat & TWS_BIT3 ) { 1131 tws_intr_resp(sc); 1132 } 1133 } 1134 1135 static void 1136 tws_intr_attn_aen(struct tws_softc *sc) 1137 { 1138 u_int32_t db=0; 1139 1140 /* maskoff db intrs untill all the aens are fetched */ 1141 /* tws_disable_db_intr(sc); */ 1142 tws_fetch_aen((void *)sc); 1143 tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4); 1144 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4); 1145 1146 } 1147 1148 static void 1149 tws_intr_attn_error(struct tws_softc *sc) 1150 { 1151 u_int32_t db=0; 1152 1153 TWS_TRACE(sc, "attn error", 0, 0); 1154 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4); 1155 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4); 1156 device_printf(sc->tws_dev, "Micro controller error.\n"); 1157 tws_reset(sc); 1158 } 1159 1160 static void 1161 tws_intr_resp(struct tws_softc *sc) 1162 { 1163 u_int16_t req_id; 1164 u_int64_t mfa; 1165 1166 while ( tws_get_response(sc, &req_id, &mfa) ) { 1167 sc->stats.reqs_out++; 1168 if ( req_id == TWS_INVALID_REQID ) { 1169 TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id); 1170 sc->stats.reqs_errored++; 1171 tws_err_complete(sc, mfa); 1172 continue; 1173 } 1174 1175 sc->reqs[req_id].cb(&sc->reqs[req_id]); 1176 } 1177 1178 } 1179 1180 1181 static void 1182 tws_poll(struct cam_sim *sim) 1183 { 1184 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim); 1185 TWS_TRACE_DEBUG(sc, "entry", 0, 0); 1186 tws_intr((void *) sc); 1187 } 1188 1189 void 1190 tws_timeout(void *arg) 1191 { 1192 struct tws_request *req = (struct tws_request *)arg; 1193 struct tws_softc *sc = req->sc; 1194 1195 1196 if ( tws_get_state(sc) != TWS_RESET ) { 1197 device_printf(sc->tws_dev, "Request timed out.\n"); 1198 tws_reset((void *)sc); 1199 } 1200 } 1201 1202 void 1203 tws_reset(void *arg) 1204 { 1205 1206 struct tws_softc *sc = (struct tws_softc *)arg; 1207 1208 if ( tws_get_state(sc) == TWS_RESET ) { 1209 return; 1210 } 1211 device_printf(sc->tws_dev, "Resetting controller\n"); 1212 lockmgr(&sc->gen_lock, LK_EXCLUSIVE); 1213 tws_send_event(sc, TWS_RESET_START); 1214 lockmgr(&sc->gen_lock, LK_RELEASE); 1215 1216 tws_turn_off_interrupts(sc); 1217 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 1218 tws_freeze_simq(sc); 1219 lockmgr(&sc->sim_lock, LK_RELEASE); 1220 1221 tws_assert_soft_reset(sc); 1222 callout_reset(&sc->reset_cb_handle, hz/10, tws_reset_cb, sc); 1223 } 1224 1225 static void 1226 tws_reset_cb(void *arg) 1227 { 1228 1229 struct tws_softc *sc = (struct tws_softc *)arg; 1230 u_int32_t reg; 1231 1232 if ( tws_get_state(sc) != TWS_RESET ) { 1233 return; 1234 } 1235 reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4); 1236 if (!( reg & TWS_BIT13 )) { 1237 callout_reset(&sc->reset_cb_handle, hz/10, tws_reset_cb, sc); 1238 return; 1239 } 1240 tws_drain_response_queue(sc); 1241 tws_drain_busy_queue(sc); 1242 tws_drain_reserved_reqs(sc); 1243 callout_reset(&sc->reinit_handle, 5*hz, tws_reinit, sc); 1244 } 1245 1246 static void 1247 tws_reinit(void *arg) 1248 { 1249 1250 struct tws_softc *sc = (struct tws_softc *)arg; 1251 static int timeout_val=0, try=2 ; 1252 1253 if ( !tws_ctlr_ready(sc) ) { 1254 timeout_val += 5; 1255 if ( timeout_val >= TWS_RESET_TIMEOUT ) { 1256 timeout_val = 0; 1257 if ( try ) 1258 tws_assert_soft_reset(sc); 1259 try--; 1260 } 1261 callout_reset(&sc->reinit_handle, 5*hz, tws_reinit, sc); 1262 return; 1263 } 1264 1265 timeout_val=0; 1266 try = 2; 1267 sc->obfl_q_overrun = false; 1268 if ( tws_init_connect(sc, tws_queue_depth) ) { 1269 TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit); 1270 } 1271 tws_init_obfl_q(sc); 1272 1273 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 1274 tws_release_simq(sc); 1275 lockmgr(&sc->sim_lock, LK_RELEASE); 1276 tws_turn_on_interrupts(sc); 1277 1278 lockmgr(&sc->gen_lock, LK_EXCLUSIVE); 1279 tws_send_event(sc, TWS_RESET_COMPLETE); 1280 lockmgr(&sc->gen_lock, LK_RELEASE); 1281 if ( sc->chan ) { 1282 sc->chan = 0; 1283 wakeup((void *)&sc->chan); 1284 } 1285 1286 } 1287 1288 1289 static void 1290 tws_freeze_simq(struct tws_softc *sc) 1291 { 1292 1293 TWS_TRACE_DEBUG(sc, "freezeing", 0, 0); 1294 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0); 1295 xpt_freeze_simq(sc->sim, 1); 1296 1297 } 1298 static void 1299 tws_release_simq(struct tws_softc *sc) 1300 { 1301 1302 TWS_TRACE_DEBUG(sc, "unfreezeing", 0, 0); 1303 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0); 1304 xpt_release_simq(sc->sim, 1); 1305 1306 } 1307 1308 1309 TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth); 1310