1 /* 2 * Generic driver for the Advanced Systems Inc. SCSI controllers 3 * Product specific probe and attach routines can be found in: 4 * 5 * adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U, 6 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA, 7 * ABP970, ABP970U 8 * 9 * Copyright (c) 1996-2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification, immediately at the beginning of the file. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/advansys/advansys.c,v 1.14.2.4 2002/01/06 21:21:42 dwmalone Exp $ 34 */ 35 /* 36 * Ported from: 37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 38 * 39 * Copyright (c) 1995-1997 Advanced System Products, Inc. 40 * All Rights Reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that redistributions of source 44 * code retain the above copyright notice and this comment without 45 * modification. 46 */ 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/malloc.h> 51 #include <sys/kernel.h> 52 #include <sys/thread2.h> 53 #include <sys/bus.h> 54 #include <sys/rman.h> 55 56 #include <bus/cam/cam.h> 57 #include <bus/cam/cam_ccb.h> 58 #include <bus/cam/cam_sim.h> 59 #include <bus/cam/cam_xpt_sim.h> 60 #include <bus/cam/cam_xpt_periph.h> 61 #include <bus/cam/cam_debug.h> 62 63 #include <bus/cam/scsi/scsi_all.h> 64 #include <bus/cam/scsi/scsi_message.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_param.h> 68 #include <vm/pmap.h> 69 70 #include "advansys.h" 71 72 static void adv_action(struct cam_sim *sim, union ccb *ccb); 73 static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, 74 int nsegments, int error); 75 static void adv_poll(struct cam_sim *sim); 76 static void adv_run_doneq(struct adv_softc *adv); 77 static struct adv_ccb_info * 78 adv_alloc_ccb_info(struct adv_softc *adv); 79 static void adv_destroy_ccb_info(struct adv_softc *adv, 80 struct adv_ccb_info *cinfo); 81 static __inline struct adv_ccb_info * 82 adv_get_ccb_info(struct adv_softc *adv); 83 static __inline void adv_free_ccb_info(struct adv_softc *adv, 84 struct adv_ccb_info *cinfo); 85 static __inline void adv_set_state(struct adv_softc *adv, adv_state state); 86 static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb); 87 static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb); 88 89 static __inline struct adv_ccb_info * 90 adv_get_ccb_info(struct adv_softc *adv) 91 { 92 struct adv_ccb_info *cinfo; 93 94 crit_enter(); 95 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { 96 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); 97 } else { 98 cinfo = adv_alloc_ccb_info(adv); 99 } 100 crit_exit(); 101 102 return (cinfo); 103 } 104 105 static __inline void 106 adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) 107 { 108 crit_enter(); 109 cinfo->state = ACCB_FREE; 110 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links); 111 crit_exit(); 112 } 113 114 static __inline void 115 adv_set_state(struct adv_softc *adv, adv_state state) 116 { 117 if (adv->state == 0) 118 xpt_freeze_simq(adv->sim, /*count*/1); 119 adv->state |= state; 120 } 121 122 static __inline void 123 adv_clear_state(struct adv_softc *adv, union ccb* ccb) 124 { 125 if (adv->state != 0) 126 adv_clear_state_really(adv, ccb); 127 } 128 129 static void 130 adv_clear_state_really(struct adv_softc *adv, union ccb* ccb) 131 { 132 if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0) 133 adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK); 134 if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) { 135 int openings; 136 137 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q; 138 if (openings >= adv->openings_needed) { 139 adv->state &= ~ADV_RESOURCE_SHORTAGE; 140 adv->openings_needed = 0; 141 } 142 } 143 144 if ((adv->state & ADV_IN_TIMEOUT) != 0) { 145 struct adv_ccb_info *cinfo; 146 147 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 148 if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) { 149 struct ccb_hdr *ccb_h; 150 151 /* 152 * We now traverse our list of pending CCBs 153 * and reinstate their timeouts. 154 */ 155 ccb_h = LIST_FIRST(&adv->pending_ccbs); 156 while (ccb_h != NULL) { 157 callout_reset(&ccb_h->timeout_ch, 158 (ccb_h->timeout * hz) / 1000, 159 adv_timeout, ccb_h); 160 ccb_h = LIST_NEXT(ccb_h, sim_links.le); 161 } 162 adv->state &= ~ADV_IN_TIMEOUT; 163 kprintf("%s: No longer in timeout\n", adv_name(adv)); 164 } 165 } 166 if (adv->state == 0) 167 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 168 } 169 170 void 171 adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 172 { 173 bus_addr_t* physaddr; 174 175 physaddr = (bus_addr_t*)arg; 176 *physaddr = segs->ds_addr; 177 } 178 179 char * 180 adv_name(struct adv_softc *adv) 181 { 182 static char name[10]; 183 184 ksnprintf(name, sizeof(name), "adv%d", adv->unit); 185 return (name); 186 } 187 188 static void 189 adv_action(struct cam_sim *sim, union ccb *ccb) 190 { 191 struct adv_softc *adv; 192 193 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n")); 194 195 adv = (struct adv_softc *)cam_sim_softc(sim); 196 197 switch (ccb->ccb_h.func_code) { 198 /* Common cases first */ 199 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 200 { 201 struct ccb_hdr *ccb_h; 202 struct ccb_scsiio *csio; 203 struct adv_ccb_info *cinfo; 204 205 ccb_h = &ccb->ccb_h; 206 csio = &ccb->csio; 207 cinfo = adv_get_ccb_info(adv); 208 if (cinfo == NULL) 209 panic("XXX Handle CCB info error!!!"); 210 211 ccb_h->ccb_cinfo_ptr = cinfo; 212 cinfo->ccb = ccb; 213 214 /* Only use S/G if there is a transfer */ 215 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 216 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 217 /* 218 * We've been given a pointer 219 * to a single buffer 220 */ 221 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 222 int error; 223 224 crit_enter(); 225 error = 226 bus_dmamap_load(adv->buffer_dmat, 227 cinfo->dmamap, 228 csio->data_ptr, 229 csio->dxfer_len, 230 adv_execute_ccb, 231 csio, /*flags*/0); 232 if (error == EINPROGRESS) { 233 /* 234 * So as to maintain ordering, 235 * freeze the controller queue 236 * until our mapping is 237 * returned. 238 */ 239 adv_set_state(adv, 240 ADV_BUSDMA_BLOCK); 241 } 242 crit_exit(); 243 } else { 244 struct bus_dma_segment seg; 245 246 /* Pointer to physical buffer */ 247 seg.ds_addr = 248 (bus_addr_t)csio->data_ptr; 249 seg.ds_len = csio->dxfer_len; 250 adv_execute_ccb(csio, &seg, 1, 0); 251 } 252 } else { 253 struct bus_dma_segment *segs; 254 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 255 panic("adv_setup_data - Physical " 256 "segment pointers unsupported"); 257 258 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 259 panic("adv_setup_data - Virtual " 260 "segment addresses unsupported"); 261 262 /* Just use the segments provided */ 263 segs = (struct bus_dma_segment *)csio->data_ptr; 264 adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0); 265 } 266 } else { 267 adv_execute_ccb(ccb, NULL, 0, 0); 268 } 269 break; 270 } 271 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 272 case XPT_TARGET_IO: /* Execute target I/O request */ 273 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 274 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 275 case XPT_EN_LUN: /* Enable LUN as a target */ 276 case XPT_ABORT: /* Abort the specified CCB */ 277 /* XXX Implement */ 278 ccb->ccb_h.status = CAM_REQ_INVALID; 279 xpt_done(ccb); 280 break; 281 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 282 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) 283 case XPT_SET_TRAN_SETTINGS: 284 { 285 struct ccb_trans_settings_scsi *scsi; 286 struct ccb_trans_settings_spi *spi; 287 struct ccb_trans_settings *cts; 288 target_bit_vector targ_mask; 289 struct adv_transinfo *tconf; 290 u_int update_type; 291 292 cts = &ccb->cts; 293 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); 294 update_type = 0; 295 296 /* 297 * The user must specify which type of settings he wishes 298 * to change. 299 */ 300 if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) { 301 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 302 update_type |= ADV_TRANS_GOAL; 303 } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) { 304 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 305 update_type |= ADV_TRANS_USER; 306 } else { 307 ccb->ccb_h.status = CAM_REQ_INVALID; 308 break; 309 } 310 311 crit_enter(); 312 scsi = &cts->proto_specific.scsi; 313 spi = &cts->xport_specific.spi; 314 if ((update_type & ADV_TRANS_GOAL) != 0) { 315 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 316 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 317 adv->disc_enable |= targ_mask; 318 else 319 adv->disc_enable &= ~targ_mask; 320 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, 321 adv->disc_enable); 322 } 323 324 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 325 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 326 adv->cmd_qng_enabled |= targ_mask; 327 else 328 adv->cmd_qng_enabled &= ~targ_mask; 329 } 330 } 331 332 if ((update_type & ADV_TRANS_USER) != 0) { 333 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 334 if ((spi->flags & CTS_SPI_VALID_DISC) != 0) 335 adv->user_disc_enable |= targ_mask; 336 else 337 adv->user_disc_enable &= ~targ_mask; 338 } 339 340 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 341 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 342 adv->user_cmd_qng_enabled |= targ_mask; 343 else 344 adv->user_cmd_qng_enabled &= ~targ_mask; 345 } 346 } 347 348 /* 349 * If the user specifies either the sync rate, or offset, 350 * but not both, the unspecified parameter defaults to its 351 * current value in transfer negotiations. 352 */ 353 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 354 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 355 /* 356 * If the user provided a sync rate but no offset, 357 * use the current offset. 358 */ 359 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) 360 spi->sync_offset = tconf->offset; 361 362 /* 363 * If the user provided an offset but no sync rate, 364 * use the current sync rate. 365 */ 366 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) 367 spi->sync_period = tconf->period; 368 369 adv_period_offset_to_sdtr(adv, &spi->sync_period, 370 &spi->sync_offset, 371 cts->ccb_h.target_id); 372 373 adv_set_syncrate(adv, /*struct cam_path */NULL, 374 cts->ccb_h.target_id, spi->sync_period, 375 spi->sync_offset, update_type); 376 } 377 378 crit_exit(); 379 ccb->ccb_h.status = CAM_REQ_CMP; 380 xpt_done(ccb); 381 break; 382 } 383 case XPT_GET_TRAN_SETTINGS: 384 /* Get default/user set transfer settings for the target */ 385 { 386 struct ccb_trans_settings_scsi *scsi; 387 struct ccb_trans_settings_spi *spi; 388 struct ccb_trans_settings *cts; 389 struct adv_transinfo *tconf; 390 target_bit_vector target_mask; 391 392 cts = &ccb->cts; 393 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); 394 395 scsi = &cts->proto_specific.scsi; 396 spi = &cts->xport_specific.spi; 397 398 cts->protocol = PROTO_SCSI; 399 cts->protocol_version = SCSI_REV_2; 400 cts->transport = XPORT_SPI; 401 cts->transport_version = 2; 402 403 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 404 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 405 406 crit_enter(); 407 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 408 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 409 if ((adv->disc_enable & target_mask) != 0) 410 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 411 if ((adv->cmd_qng_enabled & target_mask) != 0) 412 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 413 } else { 414 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 415 if ((adv->user_disc_enable & target_mask) != 0) 416 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 417 if ((adv->user_cmd_qng_enabled & target_mask) != 0) 418 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 419 } 420 spi->sync_period = tconf->period; 421 spi->sync_offset = tconf->offset; 422 crit_exit(); 423 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 424 spi->valid = CTS_SPI_VALID_SYNC_RATE 425 | CTS_SPI_VALID_SYNC_OFFSET 426 | CTS_SPI_VALID_BUS_WIDTH 427 | CTS_SPI_VALID_DISC; 428 scsi->valid = CTS_SCSI_VALID_TQ; 429 ccb->ccb_h.status = CAM_REQ_CMP; 430 xpt_done(ccb); 431 break; 432 } 433 case XPT_CALC_GEOMETRY: 434 { 435 struct ccb_calc_geometry *ccg; 436 u_int32_t size_mb; 437 u_int32_t secs_per_cylinder; 438 int extended; 439 440 ccg = &ccb->ccg; 441 size_mb = ccg->volume_size 442 / ((1024L * 1024L) / ccg->block_size); 443 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0; 444 445 if (size_mb > 1024 && extended) { 446 ccg->heads = 255; 447 ccg->secs_per_track = 63; 448 } else { 449 ccg->heads = 64; 450 ccg->secs_per_track = 32; 451 } 452 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 453 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 454 ccb->ccb_h.status = CAM_REQ_CMP; 455 xpt_done(ccb); 456 break; 457 } 458 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 459 { 460 crit_enter(); 461 adv_stop_execution(adv); 462 adv_reset_bus(adv, /*initiate_reset*/TRUE); 463 adv_start_execution(adv); 464 crit_exit(); 465 466 ccb->ccb_h.status = CAM_REQ_CMP; 467 xpt_done(ccb); 468 break; 469 } 470 case XPT_TERM_IO: /* Terminate the I/O process */ 471 /* XXX Implement */ 472 ccb->ccb_h.status = CAM_REQ_INVALID; 473 xpt_done(ccb); 474 break; 475 case XPT_PATH_INQ: /* Path routing inquiry */ 476 { 477 struct ccb_pathinq *cpi = &ccb->cpi; 478 479 cpi->version_num = 1; /* XXX??? */ 480 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 481 cpi->target_sprt = 0; 482 cpi->hba_misc = 0; 483 cpi->hba_eng_cnt = 0; 484 cpi->max_target = 7; 485 cpi->max_lun = 7; 486 cpi->initiator_id = adv->scsi_id; 487 cpi->bus_id = cam_sim_bus(sim); 488 cpi->base_transfer_speed = 3300; 489 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 490 strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN); 491 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 492 cpi->unit_number = cam_sim_unit(sim); 493 cpi->ccb_h.status = CAM_REQ_CMP; 494 cpi->transport = XPORT_SPI; 495 cpi->transport_version = 2; 496 cpi->protocol = PROTO_SCSI; 497 cpi->protocol_version = SCSI_REV_2; 498 xpt_done(ccb); 499 break; 500 } 501 default: 502 ccb->ccb_h.status = CAM_REQ_INVALID; 503 xpt_done(ccb); 504 break; 505 } 506 } 507 508 /* 509 * Currently, the output of bus_dmammap_load suits our needs just 510 * fine, but should it change, we'd need to do something here. 511 */ 512 #define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs) 513 514 static void 515 adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, 516 int nsegments, int error) 517 { 518 struct ccb_scsiio *csio; 519 struct ccb_hdr *ccb_h; 520 struct cam_sim *sim; 521 struct adv_softc *adv; 522 struct adv_ccb_info *cinfo; 523 struct adv_scsi_q scsiq; 524 struct adv_sg_head sghead; 525 526 csio = (struct ccb_scsiio *)arg; 527 ccb_h = &csio->ccb_h; 528 sim = xpt_path_sim(ccb_h->path); 529 adv = (struct adv_softc *)cam_sim_softc(sim); 530 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr; 531 532 /* 533 * Setup our done routine to release the simq on 534 * the next ccb that completes. 535 */ 536 if ((adv->state & ADV_BUSDMA_BLOCK) != 0) 537 adv->state |= ADV_BUSDMA_BLOCK_CLEARED; 538 539 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 540 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) { 541 /* XXX Need phystovirt!!!! */ 542 /* How about pmap_kenter??? */ 543 scsiq.cdbptr = csio->cdb_io.cdb_ptr; 544 } else { 545 scsiq.cdbptr = csio->cdb_io.cdb_ptr; 546 } 547 } else { 548 scsiq.cdbptr = csio->cdb_io.cdb_bytes; 549 } 550 /* 551 * Build up the request 552 */ 553 scsiq.q1.status = 0; 554 scsiq.q1.q_no = 0; 555 scsiq.q1.cntl = 0; 556 scsiq.q1.sg_queue_cnt = 0; 557 scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id); 558 scsiq.q1.target_lun = ccb_h->target_lun; 559 scsiq.q1.sense_len = csio->sense_len; 560 scsiq.q1.extra_bytes = 0; 561 scsiq.q2.ccb_index = cinfo - adv->ccb_infos; 562 scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id, 563 ccb_h->target_lun); 564 scsiq.q2.flag = 0; 565 scsiq.q2.cdb_len = csio->cdb_len; 566 if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0) 567 scsiq.q2.tag_code = csio->tag_action; 568 else 569 scsiq.q2.tag_code = 0; 570 scsiq.q2.vm_id = 0; 571 572 if (nsegments != 0) { 573 bus_dmasync_op_t op; 574 575 scsiq.q1.data_addr = dm_segs->ds_addr; 576 scsiq.q1.data_cnt = dm_segs->ds_len; 577 if (nsegments > 1) { 578 scsiq.q1.cntl |= QC_SG_HEAD; 579 sghead.entry_cnt 580 = sghead.entry_to_copy 581 = nsegments; 582 sghead.res = 0; 583 sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs); 584 scsiq.sg_head = &sghead; 585 } else { 586 scsiq.sg_head = NULL; 587 } 588 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) 589 op = BUS_DMASYNC_PREREAD; 590 else 591 op = BUS_DMASYNC_PREWRITE; 592 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); 593 } else { 594 scsiq.q1.data_addr = 0; 595 scsiq.q1.data_cnt = 0; 596 scsiq.sg_head = NULL; 597 } 598 599 600 crit_enter(); 601 /* 602 * Last time we need to check if this SCB needs to 603 * be aborted. 604 */ 605 if (ccb_h->status != CAM_REQ_INPROG) { 606 if (nsegments != 0) 607 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 608 adv_clear_state(adv, (union ccb *)csio); 609 adv_free_ccb_info(adv, cinfo); 610 xpt_done((union ccb *)csio); 611 crit_exit(); 612 return; 613 } 614 615 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) { 616 /* Temporary resource shortage */ 617 adv_set_state(adv, ADV_RESOURCE_SHORTAGE); 618 if (nsegments != 0) 619 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 620 csio->ccb_h.status = CAM_REQUEUE_REQ; 621 adv_clear_state(adv, (union ccb *)csio); 622 adv_free_ccb_info(adv, cinfo); 623 xpt_done((union ccb *)csio); 624 crit_exit(); 625 return; 626 } 627 cinfo->state |= ACCB_ACTIVE; 628 ccb_h->status |= CAM_SIM_QUEUED; 629 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le); 630 /* Schedule our timeout */ 631 callout_reset(&ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000, 632 adv_timeout, csio); 633 crit_exit(); 634 } 635 636 static struct adv_ccb_info * 637 adv_alloc_ccb_info(struct adv_softc *adv) 638 { 639 int error; 640 struct adv_ccb_info *cinfo; 641 642 cinfo = &adv->ccb_infos[adv->ccb_infos_allocated]; 643 cinfo->state = ACCB_FREE; 644 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0, 645 &cinfo->dmamap); 646 if (error != 0) { 647 kprintf("%s: Unable to allocate CCB info " 648 "dmamap - error %d\n", adv_name(adv), error); 649 return (NULL); 650 } 651 adv->ccb_infos_allocated++; 652 return (cinfo); 653 } 654 655 static void 656 adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) 657 { 658 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap); 659 } 660 661 void 662 adv_timeout(void *arg) 663 { 664 union ccb *ccb; 665 struct adv_softc *adv; 666 struct adv_ccb_info *cinfo; 667 668 ccb = (union ccb *)arg; 669 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc; 670 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 671 672 xpt_print_path(ccb->ccb_h.path); 673 kprintf("Timed out\n"); 674 675 crit_enter(); 676 /* Have we been taken care of already?? */ 677 if (cinfo == NULL || cinfo->state == ACCB_FREE) { 678 crit_exit(); 679 return; 680 } 681 682 adv_stop_execution(adv); 683 684 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) { 685 struct ccb_hdr *ccb_h; 686 687 /* 688 * In order to simplify the recovery process, we ask the XPT 689 * layer to halt the queue of new transactions and we traverse 690 * the list of pending CCBs and remove their timeouts. This 691 * means that the driver attempts to clear only one error 692 * condition at a time. In general, timeouts that occur 693 * close together are related anyway, so there is no benefit 694 * in attempting to handle errors in parrallel. Timeouts will 695 * be reinstated when the recovery process ends. 696 */ 697 adv_set_state(adv, ADV_IN_TIMEOUT); 698 699 /* This CCB is the CCB representing our recovery actions */ 700 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED; 701 702 ccb_h = LIST_FIRST(&adv->pending_ccbs); 703 while (ccb_h != NULL) { 704 callout_stop(&ccb_h->timeout_ch); 705 ccb_h = LIST_NEXT(ccb_h, sim_links.le); 706 } 707 708 /* XXX Should send a BDR */ 709 /* Attempt an abort as our first tact */ 710 xpt_print_path(ccb->ccb_h.path); 711 kprintf("Attempting abort\n"); 712 adv_abort_ccb(adv, ccb->ccb_h.target_id, 713 ccb->ccb_h.target_lun, ccb, 714 CAM_CMD_TIMEOUT, /*queued_only*/FALSE); 715 callout_reset(&ccb->ccb_h.timeout_ch, 2 * hz, adv_timeout, ccb); 716 } else { 717 /* Our attempt to perform an abort failed, go for a reset */ 718 xpt_print_path(ccb->ccb_h.path); 719 kprintf("Resetting bus\n"); 720 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 721 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 722 adv_reset_bus(adv, /*initiate_reset*/TRUE); 723 } 724 adv_start_execution(adv); 725 crit_exit(); 726 } 727 728 struct adv_softc * 729 adv_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh) 730 { 731 struct adv_softc *adv = device_get_softc(dev); 732 733 /* 734 * Allocate a storage area for us 735 */ 736 LIST_INIT(&adv->pending_ccbs); 737 SLIST_INIT(&adv->free_ccb_infos); 738 adv->dev = dev; 739 adv->unit = device_get_unit(dev); 740 adv->tag = tag; 741 adv->bsh = bsh; 742 743 return(adv); 744 } 745 746 void 747 adv_free(struct adv_softc *adv) 748 { 749 switch (adv->init_level) { 750 case 6: 751 { 752 struct adv_ccb_info *cinfo; 753 754 while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { 755 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); 756 adv_destroy_ccb_info(adv, cinfo); 757 } 758 759 bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap); 760 } 761 case 5: 762 bus_dmamem_free(adv->sense_dmat, adv->sense_buffers, 763 adv->sense_dmamap); 764 case 4: 765 bus_dma_tag_destroy(adv->sense_dmat); 766 case 3: 767 bus_dma_tag_destroy(adv->buffer_dmat); 768 case 2: 769 bus_dma_tag_destroy(adv->parent_dmat); 770 case 1: 771 if (adv->ccb_infos != NULL) 772 kfree(adv->ccb_infos, M_DEVBUF); 773 case 0: 774 break; 775 } 776 } 777 778 int 779 adv_init(struct adv_softc *adv) 780 { 781 struct adv_eeprom_config eeprom_config; 782 int checksum, i; 783 int max_sync; 784 u_int16_t config_lsw; 785 u_int16_t config_msw; 786 787 adv_lib_init(adv); 788 789 /* 790 * Stop script execution. 791 */ 792 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE); 793 adv_stop_execution(adv); 794 if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) { 795 kprintf("adv%d: Unable to halt adapter. Initialization" 796 "failed\n", adv->unit); 797 return (1); 798 } 799 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); 800 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) { 801 kprintf("adv%d: Unable to set program counter. Initialization" 802 "failed\n", adv->unit); 803 return (1); 804 } 805 806 config_msw = ADV_INW(adv, ADV_CONFIG_MSW); 807 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW); 808 809 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) { 810 config_msw &= ~ADV_CFG_MSW_CLR_MASK; 811 /* 812 * XXX The Linux code flags this as an error, 813 * but what should we report to the user??? 814 * It seems that clearing the config register 815 * makes this error recoverable. 816 */ 817 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 818 } 819 820 /* Suck in the configuration from the EEProm */ 821 checksum = adv_get_eeprom_config(adv, &eeprom_config); 822 823 if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) { 824 /* 825 * XXX The Linux code sets a warning level for this 826 * condition, yet nothing of meaning is printed to 827 * the user. What does this mean??? 828 */ 829 if (adv->chip_version == 3) { 830 if (eeprom_config.cfg_lsw != config_lsw) 831 eeprom_config.cfg_lsw = config_lsw; 832 if (eeprom_config.cfg_msw != config_msw) { 833 eeprom_config.cfg_msw = config_msw; 834 } 835 } 836 } 837 if (checksum == eeprom_config.chksum) { 838 839 /* Range/Sanity checking */ 840 if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) { 841 eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG; 842 } 843 if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) { 844 eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG; 845 } 846 if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) { 847 eeprom_config.max_tag_qng = eeprom_config.max_total_qng; 848 } 849 if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) { 850 eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC; 851 } 852 adv->max_openings = eeprom_config.max_total_qng; 853 adv->user_disc_enable = eeprom_config.disc_enable; 854 adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng; 855 adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config); 856 adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID; 857 EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id); 858 adv->control = eeprom_config.cntl; 859 for (i = 0; i <= ADV_MAX_TID; i++) { 860 u_int8_t sync_data; 861 862 if ((eeprom_config.init_sdtr & (0x1 << i)) == 0) 863 sync_data = 0; 864 else 865 sync_data = eeprom_config.sdtr_data[i]; 866 adv_sdtr_to_period_offset(adv, 867 sync_data, 868 &adv->tinfo[i].user.period, 869 &adv->tinfo[i].user.offset, 870 i); 871 } 872 config_lsw = eeprom_config.cfg_lsw; 873 eeprom_config.cfg_msw = config_msw; 874 } else { 875 u_int8_t sync_data; 876 877 kprintf("adv%d: Warning EEPROM Checksum mismatch. " 878 "Using default device parameters\n", adv->unit); 879 880 /* Set reasonable defaults since we can't read the EEPROM */ 881 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1; 882 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG; 883 adv->disc_enable = TARGET_BIT_VECTOR_SET; 884 adv->user_disc_enable = TARGET_BIT_VECTOR_SET; 885 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET; 886 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET; 887 adv->scsi_id = 7; 888 adv->control = 0xFFFF; 889 890 if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050) 891 /* Default to no Ultra to support the 3030 */ 892 adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA; 893 sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4); 894 for (i = 0; i <= ADV_MAX_TID; i++) { 895 adv_sdtr_to_period_offset(adv, sync_data, 896 &adv->tinfo[i].user.period, 897 &adv->tinfo[i].user.offset, 898 i); 899 } 900 config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON; 901 } 902 config_msw &= ~ADV_CFG_MSW_CLR_MASK; 903 config_lsw |= ADV_CFG_LSW_HOST_INT_ON; 904 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA) 905 && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0) 906 /* 25ns or 10MHz */ 907 max_sync = 25; 908 else 909 /* Unlimited */ 910 max_sync = 0; 911 for (i = 0; i <= ADV_MAX_TID; i++) { 912 if (adv->tinfo[i].user.period < max_sync) 913 adv->tinfo[i].user.period = max_sync; 914 } 915 916 if (adv_test_external_lram(adv) == 0) { 917 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) { 918 eeprom_config.max_total_qng = 919 ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; 920 eeprom_config.max_tag_qng = 921 ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG; 922 } else { 923 eeprom_config.cfg_msw |= 0x0800; 924 config_msw |= 0x0800; 925 eeprom_config.max_total_qng = 926 ADV_MAX_PCI_INRAM_TOTAL_QNG; 927 eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG; 928 } 929 adv->max_openings = eeprom_config.max_total_qng; 930 } 931 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 932 ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw); 933 #if 0 934 /* 935 * Don't write the eeprom data back for now. 936 * I'd rather not mess up the user's card. We also don't 937 * fully sanitize the eeprom settings above for the write-back 938 * to be 100% correct. 939 */ 940 if (adv_set_eeprom_config(adv, &eeprom_config) != 0) 941 kprintf("%s: WARNING! Failure writing to EEPROM.\n", 942 adv_name(adv)); 943 #endif 944 945 adv_set_chip_scsiid(adv, adv->scsi_id); 946 if (adv_init_lram_and_mcode(adv)) 947 return (1); 948 949 adv->disc_enable = adv->user_disc_enable; 950 951 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); 952 for (i = 0; i <= ADV_MAX_TID; i++) { 953 /* 954 * Start off in async mode. 955 */ 956 adv_set_syncrate(adv, /*struct cam_path */NULL, 957 i, /*period*/0, /*offset*/0, 958 ADV_TRANS_CUR); 959 /* 960 * Enable the use of tagged commands on all targets. 961 * This allows the kernel driver to make up it's own mind 962 * as it sees fit to tag queue instead of having the 963 * firmware try and second guess the tag_code settins. 964 */ 965 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i, 966 adv->max_openings); 967 } 968 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); 969 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); 970 kprintf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n", 971 adv->unit, (adv->type & ADV_ULTRA) && (max_sync == 0) 972 ? "Ultra SCSI" : "SCSI", 973 adv->scsi_id, adv->max_openings); 974 return (0); 975 } 976 977 void 978 adv_intr(void *arg) 979 { 980 struct adv_softc *adv; 981 u_int16_t chipstat; 982 u_int16_t saved_ram_addr; 983 u_int8_t ctrl_reg; 984 u_int8_t saved_ctrl_reg; 985 u_int8_t host_flag; 986 987 adv = (struct adv_softc *)arg; 988 989 chipstat = ADV_INW(adv, ADV_CHIP_STATUS); 990 991 /* Is it for us? */ 992 if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0) 993 return; 994 995 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL); 996 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET | 997 ADV_CC_SINGLE_STEP | ADV_CC_DIAG | 998 ADV_CC_TEST)); 999 1000 if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) { 1001 kprintf("Detected Bus Reset\n"); 1002 adv_reset_bus(adv, /*initiate_reset*/FALSE); 1003 return; 1004 } 1005 1006 if ((chipstat & ADV_CSW_INT_PENDING) != 0) { 1007 1008 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR); 1009 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B); 1010 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, 1011 host_flag | ADV_HOST_FLAG_IN_ISR); 1012 1013 adv_ack_interrupt(adv); 1014 1015 if ((chipstat & ADV_CSW_HALTED) != 0 1016 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) { 1017 adv_isr_chip_halted(adv); 1018 saved_ctrl_reg &= ~ADV_CC_HALT; 1019 } else { 1020 adv_run_doneq(adv); 1021 } 1022 ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr); 1023 #ifdef DIAGNOSTIC 1024 if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr) 1025 panic("adv_intr: Unable to set LRAM addr"); 1026 #endif 1027 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag); 1028 } 1029 1030 ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg); 1031 } 1032 1033 static void 1034 adv_run_doneq(struct adv_softc *adv) 1035 { 1036 struct adv_q_done_info scsiq; 1037 u_int doneq_head; 1038 u_int done_qno; 1039 1040 doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF; 1041 done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head) 1042 + ADV_SCSIQ_B_FWD); 1043 while (done_qno != ADV_QLINK_END) { 1044 union ccb* ccb; 1045 struct adv_ccb_info *cinfo; 1046 u_int done_qaddr; 1047 u_int sg_queue_cnt; 1048 1049 done_qaddr = ADV_QNO_TO_QADDR(done_qno); 1050 1051 /* Pull status from this request */ 1052 sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq, 1053 adv->max_dma_count); 1054 1055 /* Mark it as free */ 1056 adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, 1057 scsiq.q_status & ~(QS_READY|QS_ABORTED)); 1058 1059 /* Process request based on retrieved info */ 1060 if ((scsiq.cntl & QC_SG_HEAD) != 0) { 1061 u_int i; 1062 1063 /* 1064 * S/G based request. Free all of the queue 1065 * structures that contained S/G information. 1066 */ 1067 for (i = 0; i < sg_queue_cnt; i++) { 1068 done_qno = adv_read_lram_8(adv, done_qaddr 1069 + ADV_SCSIQ_B_FWD); 1070 1071 #ifdef DIAGNOSTIC 1072 if (done_qno == ADV_QLINK_END) { 1073 panic("adv_qdone: Corrupted SG " 1074 "list encountered"); 1075 } 1076 #endif 1077 done_qaddr = ADV_QNO_TO_QADDR(done_qno); 1078 1079 /* Mark SG queue as free */ 1080 adv_write_lram_8(adv, done_qaddr 1081 + ADV_SCSIQ_B_STATUS, QS_FREE); 1082 } 1083 } else 1084 sg_queue_cnt = 0; 1085 #ifdef DIAGNOSTIC 1086 if (adv->cur_active < (sg_queue_cnt + 1)) 1087 panic("adv_qdone: Attempting to free more " 1088 "queues than are active"); 1089 #endif 1090 adv->cur_active -= sg_queue_cnt + 1; 1091 1092 if ((scsiq.q_status != QS_DONE) 1093 && (scsiq.q_status & QS_ABORTED) == 0) 1094 panic("adv_qdone: completed scsiq with unknown status"); 1095 1096 scsiq.remain_bytes += scsiq.extra_bytes; 1097 1098 if ((scsiq.d3.done_stat == QD_WITH_ERROR) && 1099 (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) { 1100 if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) { 1101 scsiq.d3.done_stat = QD_NO_ERROR; 1102 scsiq.d3.host_stat = QHSTA_NO_ERROR; 1103 } 1104 } 1105 1106 cinfo = &adv->ccb_infos[scsiq.d2.ccb_index]; 1107 ccb = cinfo->ccb; 1108 ccb->csio.resid = scsiq.remain_bytes; 1109 adv_done(adv, ccb, 1110 scsiq.d3.done_stat, scsiq.d3.host_stat, 1111 scsiq.d3.scsi_stat, scsiq.q_no); 1112 1113 doneq_head = done_qno; 1114 done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); 1115 } 1116 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head); 1117 } 1118 1119 1120 void 1121 adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat, 1122 u_int host_stat, u_int scsi_status, u_int q_no) 1123 { 1124 struct adv_ccb_info *cinfo; 1125 1126 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 1127 LIST_REMOVE(&ccb->ccb_h, sim_links.le); 1128 callout_stop(&ccb->ccb_h.timeout_ch); 1129 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1130 bus_dmasync_op_t op; 1131 1132 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1133 op = BUS_DMASYNC_POSTREAD; 1134 else 1135 op = BUS_DMASYNC_POSTWRITE; 1136 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); 1137 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 1138 } 1139 1140 switch (done_stat) { 1141 case QD_NO_ERROR: 1142 if (host_stat == QHSTA_NO_ERROR) { 1143 ccb->ccb_h.status = CAM_REQ_CMP; 1144 break; 1145 } 1146 xpt_print_path(ccb->ccb_h.path); 1147 kprintf("adv_done - queue done without error, " 1148 "but host status non-zero(%x)\n", host_stat); 1149 /*FALLTHROUGH*/ 1150 case QD_WITH_ERROR: 1151 switch (host_stat) { 1152 case QHSTA_M_TARGET_STATUS_BUSY: 1153 case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY: 1154 /* 1155 * Assume that if we were a tagged transaction 1156 * the target reported queue full. Otherwise, 1157 * report busy. The firmware really should just 1158 * pass the original status back up to us even 1159 * if it thinks the target was in error for 1160 * returning this status as no other transactions 1161 * from this initiator are in effect, but this 1162 * ignores multi-initiator setups and there is 1163 * evidence that the firmware gets its per-device 1164 * transaction counts screwed up occassionally. 1165 */ 1166 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1167 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 1168 && host_stat != QHSTA_M_TARGET_STATUS_BUSY) 1169 scsi_status = SCSI_STATUS_QUEUE_FULL; 1170 else 1171 scsi_status = SCSI_STATUS_BUSY; 1172 adv_abort_ccb(adv, ccb->ccb_h.target_id, 1173 ccb->ccb_h.target_lun, 1174 /*ccb*/NULL, CAM_REQUEUE_REQ, 1175 /*queued_only*/TRUE); 1176 /*FALLTHROUGH*/ 1177 case QHSTA_M_NO_AUTO_REQ_SENSE: 1178 case QHSTA_NO_ERROR: 1179 ccb->csio.scsi_status = scsi_status; 1180 switch (scsi_status) { 1181 case SCSI_STATUS_CHECK_COND: 1182 case SCSI_STATUS_CMD_TERMINATED: 1183 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1184 /* Structure copy */ 1185 ccb->csio.sense_data = 1186 adv->sense_buffers[q_no - 1]; 1187 /* FALLTHROUGH */ 1188 case SCSI_STATUS_BUSY: 1189 case SCSI_STATUS_RESERV_CONFLICT: 1190 case SCSI_STATUS_QUEUE_FULL: 1191 case SCSI_STATUS_COND_MET: 1192 case SCSI_STATUS_INTERMED: 1193 case SCSI_STATUS_INTERMED_COND_MET: 1194 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1195 break; 1196 case SCSI_STATUS_OK: 1197 ccb->ccb_h.status |= CAM_REQ_CMP; 1198 break; 1199 } 1200 break; 1201 case QHSTA_M_SEL_TIMEOUT: 1202 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1203 break; 1204 case QHSTA_M_DATA_OVER_RUN: 1205 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1206 break; 1207 case QHSTA_M_UNEXPECTED_BUS_FREE: 1208 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1209 break; 1210 case QHSTA_M_BAD_BUS_PHASE_SEQ: 1211 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1212 break; 1213 case QHSTA_M_BAD_CMPL_STATUS_IN: 1214 /* No command complete after a status message */ 1215 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1216 break; 1217 case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT: 1218 case QHSTA_M_WTM_TIMEOUT: 1219 case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET: 1220 /* The SCSI bus hung in a phase */ 1221 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1222 adv_reset_bus(adv, /*initiate_reset*/TRUE); 1223 break; 1224 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1225 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1226 break; 1227 case QHSTA_D_QDONE_SG_LIST_CORRUPTED: 1228 case QHSTA_D_ASC_DVC_ERROR_CODE_SET: 1229 case QHSTA_D_HOST_ABORT_FAILED: 1230 case QHSTA_D_EXE_SCSI_Q_FAILED: 1231 case QHSTA_D_ASPI_NO_BUF_POOL: 1232 case QHSTA_M_BAD_TAG_CODE: 1233 case QHSTA_D_LRAM_CMP_ERROR: 1234 case QHSTA_M_MICRO_CODE_ERROR_HALT: 1235 default: 1236 panic("%s: Unhandled Host status error %x", 1237 adv_name(adv), host_stat); 1238 /* NOTREACHED */ 1239 } 1240 break; 1241 1242 case QD_ABORTED_BY_HOST: 1243 /* Don't clobber any, more explicit, error codes we've set */ 1244 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) 1245 ccb->ccb_h.status = CAM_REQ_ABORTED; 1246 break; 1247 1248 default: 1249 xpt_print_path(ccb->ccb_h.path); 1250 kprintf("adv_done - queue done with unknown status %x:%x\n", 1251 done_stat, host_stat); 1252 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1253 break; 1254 } 1255 adv_clear_state(adv, ccb); 1256 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP 1257 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1258 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1259 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1260 } 1261 adv_free_ccb_info(adv, cinfo); 1262 /* 1263 * Null this out so that we catch driver bugs that cause a 1264 * ccb to be completed twice. 1265 */ 1266 ccb->ccb_h.ccb_cinfo_ptr = NULL; 1267 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1268 xpt_done(ccb); 1269 } 1270 1271 /* 1272 * Function to poll for command completion when 1273 * interrupts are disabled (crash dumps) 1274 */ 1275 static void 1276 adv_poll(struct cam_sim *sim) 1277 { 1278 adv_intr(cam_sim_softc(sim)); 1279 } 1280 1281 /* 1282 * Attach all the sub-devices we can find 1283 */ 1284 int 1285 adv_attach(struct adv_softc *adv) 1286 { 1287 struct ccb_setasync csa; 1288 int max_sg; 1289 1290 /* 1291 * Allocate an array of ccb mapping structures. We put the 1292 * index of the ccb_info structure into the queue representing 1293 * a transaction and use it for mapping the queue to the 1294 * upper level SCSI transaction it represents. 1295 */ 1296 adv->ccb_infos = kmalloc(sizeof(*adv->ccb_infos) * adv->max_openings, 1297 M_DEVBUF, M_WAITOK); 1298 adv->init_level++; 1299 1300 /* 1301 * Create our DMA tags. These tags define the kinds of device 1302 * accessible memory allocations and memory mappings we will 1303 * need to perform during normal operation. 1304 * 1305 * Unless we need to further restrict the allocation, we rely 1306 * on the restrictions of the parent dmat, hence the common 1307 * use of MAXADDR and MAXSIZE. 1308 * 1309 * The ASC boards use chains of "queues" (the transactional 1310 * resources on the board) to represent long S/G lists. 1311 * The first queue represents the command and holds a 1312 * single address and data pair. The queues that follow 1313 * can each hold ADV_SG_LIST_PER_Q entries. Given the 1314 * total number of queues, we can express the largest 1315 * transaction we can map. We reserve a few queues for 1316 * error recovery. Take those into account as well. 1317 * 1318 * There is a way to take an interrupt to download the 1319 * next batch of S/G entries if there are more than 255 1320 * of them (the counter in the queue structure is a u_int8_t). 1321 * We don't use this feature, so limit the S/G list size 1322 * accordingly. 1323 */ 1324 max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q; 1325 if (max_sg > 255) 1326 max_sg = 255; 1327 1328 /* DMA tag for mapping buffers into device visible space. */ 1329 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0, 1330 /*lowaddr*/BUS_SPACE_MAXADDR, 1331 /*highaddr*/BUS_SPACE_MAXADDR, 1332 /*filter*/NULL, /*filterarg*/NULL, 1333 /*maxsize*/MAXPHYS, 1334 /*nsegments*/max_sg, 1335 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1336 /*flags*/BUS_DMA_ALLOCNOW, 1337 &adv->buffer_dmat) != 0) { 1338 return (ENXIO); 1339 } 1340 adv->init_level++; 1341 1342 /* DMA tag for our sense buffers */ 1343 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0, 1344 /*lowaddr*/BUS_SPACE_MAXADDR, 1345 /*highaddr*/BUS_SPACE_MAXADDR, 1346 /*filter*/NULL, /*filterarg*/NULL, 1347 sizeof(struct scsi_sense_data)*adv->max_openings, 1348 /*nsegments*/1, 1349 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1350 /*flags*/0, &adv->sense_dmat) != 0) { 1351 return (ENXIO); 1352 } 1353 1354 adv->init_level++; 1355 1356 /* Allocation for our sense buffers */ 1357 if (bus_dmamem_alloc(adv->sense_dmat, (void *)&adv->sense_buffers, 1358 BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) { 1359 return (ENOMEM); 1360 } 1361 1362 adv->init_level++; 1363 1364 /* And permanently map them */ 1365 bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap, 1366 adv->sense_buffers, 1367 sizeof(struct scsi_sense_data)*adv->max_openings, 1368 adv_map, &adv->sense_physbase, /*flags*/0); 1369 1370 adv->init_level++; 1371 1372 /* 1373 * Fire up the chip 1374 */ 1375 if (adv_start_chip(adv) != 1) { 1376 kprintf("adv%d: Unable to start on board processor. Aborting.\n", 1377 adv->unit); 1378 return (ENXIO); 1379 } 1380 1381 /* 1382 * Construct our SIM entry. 1383 */ 1384 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit, 1385 &sim_mplock, 1, adv->max_openings, NULL); 1386 if (adv->sim == NULL) 1387 return (ENOMEM); 1388 1389 /* 1390 * Register the bus. 1391 * 1392 * XXX Twin Channel EISA Cards??? 1393 */ 1394 if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) { 1395 cam_sim_free(adv->sim); 1396 return (ENXIO); 1397 } 1398 1399 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim), 1400 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1401 != CAM_REQ_CMP) { 1402 xpt_bus_deregister(cam_sim_path(adv->sim)); 1403 cam_sim_free(adv->sim); 1404 return (ENXIO); 1405 } 1406 1407 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5); 1408 csa.ccb_h.func_code = XPT_SASYNC_CB; 1409 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; 1410 csa.callback = advasync; 1411 csa.callback_arg = adv; 1412 xpt_action((union ccb *)&csa); 1413 return (0); 1414 } 1415