1 /* 2 * Generic driver for the Advanced Systems Inc. SCSI controllers 3 * Product specific probe and attach routines can be found in: 4 * 5 * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852 6 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U, 7 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA, 8 * ABP970, ABP970U 9 * 10 * Copyright (c) 1996-2000 Justin Gibbs. 11 * All rights reserved. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions, and the following disclaimer, 18 * without modification, immediately at the beginning of the file. 19 * 2. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/dev/advansys/advansys.c,v 1.14.2.4 2002/01/06 21:21:42 dwmalone Exp $ 35 */ 36 /* 37 * Ported from: 38 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 39 * 40 * Copyright (c) 1995-1997 Advanced System Products, Inc. 41 * All Rights Reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that redistributions of source 45 * code retain the above copyright notice and this comment without 46 * modification. 47 */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/malloc.h> 52 #include <sys/kernel.h> 53 #include <sys/thread2.h> 54 #include <sys/bus.h> 55 #include <sys/rman.h> 56 57 #include <bus/cam/cam.h> 58 #include <bus/cam/cam_ccb.h> 59 #include <bus/cam/cam_sim.h> 60 #include <bus/cam/cam_xpt_sim.h> 61 #include <bus/cam/cam_xpt_periph.h> 62 #include <bus/cam/cam_debug.h> 63 64 #include <bus/cam/scsi/scsi_all.h> 65 #include <bus/cam/scsi/scsi_message.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_param.h> 69 #include <vm/pmap.h> 70 71 #include "advansys.h" 72 73 static void adv_action(struct cam_sim *sim, union ccb *ccb); 74 static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, 75 int nsegments, int error); 76 static void adv_poll(struct cam_sim *sim); 77 static void adv_run_doneq(struct adv_softc *adv); 78 static struct adv_ccb_info * 79 adv_alloc_ccb_info(struct adv_softc *adv); 80 static void adv_destroy_ccb_info(struct adv_softc *adv, 81 struct adv_ccb_info *cinfo); 82 static __inline struct adv_ccb_info * 83 adv_get_ccb_info(struct adv_softc *adv); 84 static __inline void adv_free_ccb_info(struct adv_softc *adv, 85 struct adv_ccb_info *cinfo); 86 static __inline void adv_set_state(struct adv_softc *adv, adv_state state); 87 static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb); 88 static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb); 89 90 static __inline struct adv_ccb_info * 91 adv_get_ccb_info(struct adv_softc *adv) 92 { 93 struct adv_ccb_info *cinfo; 94 95 crit_enter(); 96 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { 97 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); 98 } else { 99 cinfo = adv_alloc_ccb_info(adv); 100 } 101 crit_exit(); 102 103 return (cinfo); 104 } 105 106 static __inline void 107 adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) 108 { 109 crit_enter(); 110 cinfo->state = ACCB_FREE; 111 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links); 112 crit_exit(); 113 } 114 115 static __inline void 116 adv_set_state(struct adv_softc *adv, adv_state state) 117 { 118 if (adv->state == 0) 119 xpt_freeze_simq(adv->sim, /*count*/1); 120 adv->state |= state; 121 } 122 123 static __inline void 124 adv_clear_state(struct adv_softc *adv, union ccb* ccb) 125 { 126 if (adv->state != 0) 127 adv_clear_state_really(adv, ccb); 128 } 129 130 static void 131 adv_clear_state_really(struct adv_softc *adv, union ccb* ccb) 132 { 133 if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0) 134 adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK); 135 if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) { 136 int openings; 137 138 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q; 139 if (openings >= adv->openings_needed) { 140 adv->state &= ~ADV_RESOURCE_SHORTAGE; 141 adv->openings_needed = 0; 142 } 143 } 144 145 if ((adv->state & ADV_IN_TIMEOUT) != 0) { 146 struct adv_ccb_info *cinfo; 147 148 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 149 if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) { 150 struct ccb_hdr *ccb_h; 151 152 /* 153 * We now traverse our list of pending CCBs 154 * and reinstate their timeouts. 155 */ 156 ccb_h = LIST_FIRST(&adv->pending_ccbs); 157 while (ccb_h != NULL) { 158 callout_reset(&ccb_h->timeout_ch, 159 (ccb_h->timeout * hz) / 1000, 160 adv_timeout, ccb_h); 161 ccb_h = LIST_NEXT(ccb_h, sim_links.le); 162 } 163 adv->state &= ~ADV_IN_TIMEOUT; 164 kprintf("%s: No longer in timeout\n", adv_name(adv)); 165 } 166 } 167 if (adv->state == 0) 168 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 169 } 170 171 void 172 adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 173 { 174 bus_addr_t* physaddr; 175 176 physaddr = (bus_addr_t*)arg; 177 *physaddr = segs->ds_addr; 178 } 179 180 char * 181 adv_name(struct adv_softc *adv) 182 { 183 static char name[10]; 184 185 ksnprintf(name, sizeof(name), "adv%d", adv->unit); 186 return (name); 187 } 188 189 static void 190 adv_action(struct cam_sim *sim, union ccb *ccb) 191 { 192 struct adv_softc *adv; 193 194 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n")); 195 196 adv = (struct adv_softc *)cam_sim_softc(sim); 197 198 switch (ccb->ccb_h.func_code) { 199 /* Common cases first */ 200 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 201 { 202 struct ccb_hdr *ccb_h; 203 struct ccb_scsiio *csio; 204 struct adv_ccb_info *cinfo; 205 206 ccb_h = &ccb->ccb_h; 207 csio = &ccb->csio; 208 cinfo = adv_get_ccb_info(adv); 209 if (cinfo == NULL) 210 panic("XXX Handle CCB info error!!!"); 211 212 ccb_h->ccb_cinfo_ptr = cinfo; 213 cinfo->ccb = ccb; 214 215 /* Only use S/G if there is a transfer */ 216 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 217 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 218 /* 219 * We've been given a pointer 220 * to a single buffer 221 */ 222 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 223 int error; 224 225 crit_enter(); 226 error = 227 bus_dmamap_load(adv->buffer_dmat, 228 cinfo->dmamap, 229 csio->data_ptr, 230 csio->dxfer_len, 231 adv_execute_ccb, 232 csio, /*flags*/0); 233 if (error == EINPROGRESS) { 234 /* 235 * So as to maintain ordering, 236 * freeze the controller queue 237 * until our mapping is 238 * returned. 239 */ 240 adv_set_state(adv, 241 ADV_BUSDMA_BLOCK); 242 } 243 crit_exit(); 244 } else { 245 struct bus_dma_segment seg; 246 247 /* Pointer to physical buffer */ 248 seg.ds_addr = 249 (bus_addr_t)csio->data_ptr; 250 seg.ds_len = csio->dxfer_len; 251 adv_execute_ccb(csio, &seg, 1, 0); 252 } 253 } else { 254 struct bus_dma_segment *segs; 255 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 256 panic("adv_setup_data - Physical " 257 "segment pointers unsupported"); 258 259 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 260 panic("adv_setup_data - Virtual " 261 "segment addresses unsupported"); 262 263 /* Just use the segments provided */ 264 segs = (struct bus_dma_segment *)csio->data_ptr; 265 adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0); 266 } 267 } else { 268 adv_execute_ccb(ccb, NULL, 0, 0); 269 } 270 break; 271 } 272 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 273 case XPT_TARGET_IO: /* Execute target I/O request */ 274 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 275 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 276 case XPT_EN_LUN: /* Enable LUN as a target */ 277 case XPT_ABORT: /* Abort the specified CCB */ 278 /* XXX Implement */ 279 ccb->ccb_h.status = CAM_REQ_INVALID; 280 xpt_done(ccb); 281 break; 282 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 283 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) 284 case XPT_SET_TRAN_SETTINGS: 285 { 286 struct ccb_trans_settings_scsi *scsi; 287 struct ccb_trans_settings_spi *spi; 288 struct ccb_trans_settings *cts; 289 target_bit_vector targ_mask; 290 struct adv_transinfo *tconf; 291 u_int update_type; 292 293 cts = &ccb->cts; 294 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); 295 update_type = 0; 296 297 /* 298 * The user must specify which type of settings he wishes 299 * to change. 300 */ 301 if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) { 302 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 303 update_type |= ADV_TRANS_GOAL; 304 } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) { 305 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 306 update_type |= ADV_TRANS_USER; 307 } else { 308 ccb->ccb_h.status = CAM_REQ_INVALID; 309 break; 310 } 311 312 crit_enter(); 313 scsi = &cts->proto_specific.scsi; 314 spi = &cts->xport_specific.spi; 315 if ((update_type & ADV_TRANS_GOAL) != 0) { 316 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 317 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 318 adv->disc_enable |= targ_mask; 319 else 320 adv->disc_enable &= ~targ_mask; 321 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, 322 adv->disc_enable); 323 } 324 325 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 326 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 327 adv->cmd_qng_enabled |= targ_mask; 328 else 329 adv->cmd_qng_enabled &= ~targ_mask; 330 } 331 } 332 333 if ((update_type & ADV_TRANS_USER) != 0) { 334 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 335 if ((spi->flags & CTS_SPI_VALID_DISC) != 0) 336 adv->user_disc_enable |= targ_mask; 337 else 338 adv->user_disc_enable &= ~targ_mask; 339 } 340 341 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 342 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 343 adv->user_cmd_qng_enabled |= targ_mask; 344 else 345 adv->user_cmd_qng_enabled &= ~targ_mask; 346 } 347 } 348 349 /* 350 * If the user specifies either the sync rate, or offset, 351 * but not both, the unspecified parameter defaults to its 352 * current value in transfer negotiations. 353 */ 354 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 355 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 356 /* 357 * If the user provided a sync rate but no offset, 358 * use the current offset. 359 */ 360 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) 361 spi->sync_offset = tconf->offset; 362 363 /* 364 * If the user provided an offset but no sync rate, 365 * use the current sync rate. 366 */ 367 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) 368 spi->sync_period = tconf->period; 369 370 adv_period_offset_to_sdtr(adv, &spi->sync_period, 371 &spi->sync_offset, 372 cts->ccb_h.target_id); 373 374 adv_set_syncrate(adv, /*struct cam_path */NULL, 375 cts->ccb_h.target_id, spi->sync_period, 376 spi->sync_offset, update_type); 377 } 378 379 crit_exit(); 380 ccb->ccb_h.status = CAM_REQ_CMP; 381 xpt_done(ccb); 382 break; 383 } 384 case XPT_GET_TRAN_SETTINGS: 385 /* Get default/user set transfer settings for the target */ 386 { 387 struct ccb_trans_settings_scsi *scsi; 388 struct ccb_trans_settings_spi *spi; 389 struct ccb_trans_settings *cts; 390 struct adv_transinfo *tconf; 391 target_bit_vector target_mask; 392 393 cts = &ccb->cts; 394 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); 395 396 scsi = &cts->proto_specific.scsi; 397 spi = &cts->xport_specific.spi; 398 399 cts->protocol = PROTO_SCSI; 400 cts->protocol_version = SCSI_REV_2; 401 cts->transport = XPORT_SPI; 402 cts->transport_version = 2; 403 404 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 405 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 406 407 crit_enter(); 408 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 409 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 410 if ((adv->disc_enable & target_mask) != 0) 411 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 412 if ((adv->cmd_qng_enabled & target_mask) != 0) 413 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 414 } else { 415 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 416 if ((adv->user_disc_enable & target_mask) != 0) 417 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 418 if ((adv->user_cmd_qng_enabled & target_mask) != 0) 419 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 420 } 421 spi->sync_period = tconf->period; 422 spi->sync_offset = tconf->offset; 423 crit_exit(); 424 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 425 spi->valid = CTS_SPI_VALID_SYNC_RATE 426 | CTS_SPI_VALID_SYNC_OFFSET 427 | CTS_SPI_VALID_BUS_WIDTH 428 | CTS_SPI_VALID_DISC; 429 scsi->valid = CTS_SCSI_VALID_TQ; 430 ccb->ccb_h.status = CAM_REQ_CMP; 431 xpt_done(ccb); 432 break; 433 } 434 case XPT_CALC_GEOMETRY: 435 { 436 struct ccb_calc_geometry *ccg; 437 u_int32_t size_mb; 438 u_int32_t secs_per_cylinder; 439 int extended; 440 441 ccg = &ccb->ccg; 442 size_mb = ccg->volume_size 443 / ((1024L * 1024L) / ccg->block_size); 444 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0; 445 446 if (size_mb > 1024 && extended) { 447 ccg->heads = 255; 448 ccg->secs_per_track = 63; 449 } else { 450 ccg->heads = 64; 451 ccg->secs_per_track = 32; 452 } 453 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 454 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 455 ccb->ccb_h.status = CAM_REQ_CMP; 456 xpt_done(ccb); 457 break; 458 } 459 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 460 { 461 crit_enter(); 462 adv_stop_execution(adv); 463 adv_reset_bus(adv, /*initiate_reset*/TRUE); 464 adv_start_execution(adv); 465 crit_exit(); 466 467 ccb->ccb_h.status = CAM_REQ_CMP; 468 xpt_done(ccb); 469 break; 470 } 471 case XPT_TERM_IO: /* Terminate the I/O process */ 472 /* XXX Implement */ 473 ccb->ccb_h.status = CAM_REQ_INVALID; 474 xpt_done(ccb); 475 break; 476 case XPT_PATH_INQ: /* Path routing inquiry */ 477 { 478 struct ccb_pathinq *cpi = &ccb->cpi; 479 480 cpi->version_num = 1; /* XXX??? */ 481 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 482 cpi->target_sprt = 0; 483 cpi->hba_misc = 0; 484 cpi->hba_eng_cnt = 0; 485 cpi->max_target = 7; 486 cpi->max_lun = 7; 487 cpi->initiator_id = adv->scsi_id; 488 cpi->bus_id = cam_sim_bus(sim); 489 cpi->base_transfer_speed = 3300; 490 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 491 strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN); 492 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 493 cpi->unit_number = cam_sim_unit(sim); 494 cpi->ccb_h.status = CAM_REQ_CMP; 495 cpi->transport = XPORT_SPI; 496 cpi->transport_version = 2; 497 cpi->protocol = PROTO_SCSI; 498 cpi->protocol_version = SCSI_REV_2; 499 xpt_done(ccb); 500 break; 501 } 502 default: 503 ccb->ccb_h.status = CAM_REQ_INVALID; 504 xpt_done(ccb); 505 break; 506 } 507 } 508 509 /* 510 * Currently, the output of bus_dmammap_load suits our needs just 511 * fine, but should it change, we'd need to do something here. 512 */ 513 #define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs) 514 515 static void 516 adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, 517 int nsegments, int error) 518 { 519 struct ccb_scsiio *csio; 520 struct ccb_hdr *ccb_h; 521 struct cam_sim *sim; 522 struct adv_softc *adv; 523 struct adv_ccb_info *cinfo; 524 struct adv_scsi_q scsiq; 525 struct adv_sg_head sghead; 526 527 csio = (struct ccb_scsiio *)arg; 528 ccb_h = &csio->ccb_h; 529 sim = xpt_path_sim(ccb_h->path); 530 adv = (struct adv_softc *)cam_sim_softc(sim); 531 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr; 532 533 /* 534 * Setup our done routine to release the simq on 535 * the next ccb that completes. 536 */ 537 if ((adv->state & ADV_BUSDMA_BLOCK) != 0) 538 adv->state |= ADV_BUSDMA_BLOCK_CLEARED; 539 540 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 541 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) { 542 /* XXX Need phystovirt!!!! */ 543 /* How about pmap_kenter??? */ 544 scsiq.cdbptr = csio->cdb_io.cdb_ptr; 545 } else { 546 scsiq.cdbptr = csio->cdb_io.cdb_ptr; 547 } 548 } else { 549 scsiq.cdbptr = csio->cdb_io.cdb_bytes; 550 } 551 /* 552 * Build up the request 553 */ 554 scsiq.q1.status = 0; 555 scsiq.q1.q_no = 0; 556 scsiq.q1.cntl = 0; 557 scsiq.q1.sg_queue_cnt = 0; 558 scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id); 559 scsiq.q1.target_lun = ccb_h->target_lun; 560 scsiq.q1.sense_len = csio->sense_len; 561 scsiq.q1.extra_bytes = 0; 562 scsiq.q2.ccb_index = cinfo - adv->ccb_infos; 563 scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id, 564 ccb_h->target_lun); 565 scsiq.q2.flag = 0; 566 scsiq.q2.cdb_len = csio->cdb_len; 567 if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0) 568 scsiq.q2.tag_code = csio->tag_action; 569 else 570 scsiq.q2.tag_code = 0; 571 scsiq.q2.vm_id = 0; 572 573 if (nsegments != 0) { 574 bus_dmasync_op_t op; 575 576 scsiq.q1.data_addr = dm_segs->ds_addr; 577 scsiq.q1.data_cnt = dm_segs->ds_len; 578 if (nsegments > 1) { 579 scsiq.q1.cntl |= QC_SG_HEAD; 580 sghead.entry_cnt 581 = sghead.entry_to_copy 582 = nsegments; 583 sghead.res = 0; 584 sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs); 585 scsiq.sg_head = &sghead; 586 } else { 587 scsiq.sg_head = NULL; 588 } 589 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) 590 op = BUS_DMASYNC_PREREAD; 591 else 592 op = BUS_DMASYNC_PREWRITE; 593 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); 594 } else { 595 scsiq.q1.data_addr = 0; 596 scsiq.q1.data_cnt = 0; 597 scsiq.sg_head = NULL; 598 } 599 600 601 crit_enter(); 602 /* 603 * Last time we need to check if this SCB needs to 604 * be aborted. 605 */ 606 if (ccb_h->status != CAM_REQ_INPROG) { 607 if (nsegments != 0) 608 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 609 adv_clear_state(adv, (union ccb *)csio); 610 adv_free_ccb_info(adv, cinfo); 611 xpt_done((union ccb *)csio); 612 crit_exit(); 613 return; 614 } 615 616 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) { 617 /* Temporary resource shortage */ 618 adv_set_state(adv, ADV_RESOURCE_SHORTAGE); 619 if (nsegments != 0) 620 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 621 csio->ccb_h.status = CAM_REQUEUE_REQ; 622 adv_clear_state(adv, (union ccb *)csio); 623 adv_free_ccb_info(adv, cinfo); 624 xpt_done((union ccb *)csio); 625 crit_exit(); 626 return; 627 } 628 cinfo->state |= ACCB_ACTIVE; 629 ccb_h->status |= CAM_SIM_QUEUED; 630 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le); 631 /* Schedule our timeout */ 632 callout_reset(&ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000, 633 adv_timeout, csio); 634 crit_exit(); 635 } 636 637 static struct adv_ccb_info * 638 adv_alloc_ccb_info(struct adv_softc *adv) 639 { 640 int error; 641 struct adv_ccb_info *cinfo; 642 643 cinfo = &adv->ccb_infos[adv->ccb_infos_allocated]; 644 cinfo->state = ACCB_FREE; 645 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0, 646 &cinfo->dmamap); 647 if (error != 0) { 648 kprintf("%s: Unable to allocate CCB info " 649 "dmamap - error %d\n", adv_name(adv), error); 650 return (NULL); 651 } 652 adv->ccb_infos_allocated++; 653 return (cinfo); 654 } 655 656 static void 657 adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) 658 { 659 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap); 660 } 661 662 void 663 adv_timeout(void *arg) 664 { 665 union ccb *ccb; 666 struct adv_softc *adv; 667 struct adv_ccb_info *cinfo; 668 669 ccb = (union ccb *)arg; 670 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc; 671 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 672 673 xpt_print_path(ccb->ccb_h.path); 674 kprintf("Timed out\n"); 675 676 crit_enter(); 677 /* Have we been taken care of already?? */ 678 if (cinfo == NULL || cinfo->state == ACCB_FREE) { 679 crit_exit(); 680 return; 681 } 682 683 adv_stop_execution(adv); 684 685 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) { 686 struct ccb_hdr *ccb_h; 687 688 /* 689 * In order to simplify the recovery process, we ask the XPT 690 * layer to halt the queue of new transactions and we traverse 691 * the list of pending CCBs and remove their timeouts. This 692 * means that the driver attempts to clear only one error 693 * condition at a time. In general, timeouts that occur 694 * close together are related anyway, so there is no benefit 695 * in attempting to handle errors in parrallel. Timeouts will 696 * be reinstated when the recovery process ends. 697 */ 698 adv_set_state(adv, ADV_IN_TIMEOUT); 699 700 /* This CCB is the CCB representing our recovery actions */ 701 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED; 702 703 ccb_h = LIST_FIRST(&adv->pending_ccbs); 704 while (ccb_h != NULL) { 705 callout_stop(&ccb_h->timeout_ch); 706 ccb_h = LIST_NEXT(ccb_h, sim_links.le); 707 } 708 709 /* XXX Should send a BDR */ 710 /* Attempt an abort as our first tact */ 711 xpt_print_path(ccb->ccb_h.path); 712 kprintf("Attempting abort\n"); 713 adv_abort_ccb(adv, ccb->ccb_h.target_id, 714 ccb->ccb_h.target_lun, ccb, 715 CAM_CMD_TIMEOUT, /*queued_only*/FALSE); 716 callout_reset(&ccb->ccb_h.timeout_ch, 2 * hz, adv_timeout, ccb); 717 } else { 718 /* Our attempt to perform an abort failed, go for a reset */ 719 xpt_print_path(ccb->ccb_h.path); 720 kprintf("Resetting bus\n"); 721 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 722 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 723 adv_reset_bus(adv, /*initiate_reset*/TRUE); 724 } 725 adv_start_execution(adv); 726 crit_exit(); 727 } 728 729 struct adv_softc * 730 adv_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh) 731 { 732 struct adv_softc *adv = device_get_softc(dev); 733 734 /* 735 * Allocate a storage area for us 736 */ 737 LIST_INIT(&adv->pending_ccbs); 738 SLIST_INIT(&adv->free_ccb_infos); 739 adv->dev = dev; 740 adv->unit = device_get_unit(dev); 741 adv->tag = tag; 742 adv->bsh = bsh; 743 744 return(adv); 745 } 746 747 void 748 adv_free(struct adv_softc *adv) 749 { 750 switch (adv->init_level) { 751 case 6: 752 { 753 struct adv_ccb_info *cinfo; 754 755 while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { 756 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); 757 adv_destroy_ccb_info(adv, cinfo); 758 } 759 760 bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap); 761 } 762 case 5: 763 bus_dmamem_free(adv->sense_dmat, adv->sense_buffers, 764 adv->sense_dmamap); 765 case 4: 766 bus_dma_tag_destroy(adv->sense_dmat); 767 case 3: 768 bus_dma_tag_destroy(adv->buffer_dmat); 769 case 2: 770 bus_dma_tag_destroy(adv->parent_dmat); 771 case 1: 772 if (adv->ccb_infos != NULL) 773 kfree(adv->ccb_infos, M_DEVBUF); 774 case 0: 775 break; 776 } 777 } 778 779 int 780 adv_init(struct adv_softc *adv) 781 { 782 struct adv_eeprom_config eeprom_config; 783 int checksum, i; 784 int max_sync; 785 u_int16_t config_lsw; 786 u_int16_t config_msw; 787 788 adv_lib_init(adv); 789 790 /* 791 * Stop script execution. 792 */ 793 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE); 794 adv_stop_execution(adv); 795 if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) { 796 kprintf("adv%d: Unable to halt adapter. Initialization" 797 "failed\n", adv->unit); 798 return (1); 799 } 800 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); 801 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) { 802 kprintf("adv%d: Unable to set program counter. Initialization" 803 "failed\n", adv->unit); 804 return (1); 805 } 806 807 config_msw = ADV_INW(adv, ADV_CONFIG_MSW); 808 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW); 809 810 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) { 811 config_msw &= ~ADV_CFG_MSW_CLR_MASK; 812 /* 813 * XXX The Linux code flags this as an error, 814 * but what should we report to the user??? 815 * It seems that clearing the config register 816 * makes this error recoverable. 817 */ 818 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 819 } 820 821 /* Suck in the configuration from the EEProm */ 822 checksum = adv_get_eeprom_config(adv, &eeprom_config); 823 824 if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) { 825 /* 826 * XXX The Linux code sets a warning level for this 827 * condition, yet nothing of meaning is printed to 828 * the user. What does this mean??? 829 */ 830 if (adv->chip_version == 3) { 831 if (eeprom_config.cfg_lsw != config_lsw) 832 eeprom_config.cfg_lsw = config_lsw; 833 if (eeprom_config.cfg_msw != config_msw) { 834 eeprom_config.cfg_msw = config_msw; 835 } 836 } 837 } 838 if (checksum == eeprom_config.chksum) { 839 840 /* Range/Sanity checking */ 841 if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) { 842 eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG; 843 } 844 if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) { 845 eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG; 846 } 847 if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) { 848 eeprom_config.max_tag_qng = eeprom_config.max_total_qng; 849 } 850 if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) { 851 eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC; 852 } 853 adv->max_openings = eeprom_config.max_total_qng; 854 adv->user_disc_enable = eeprom_config.disc_enable; 855 adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng; 856 adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config); 857 adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID; 858 EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id); 859 adv->control = eeprom_config.cntl; 860 for (i = 0; i <= ADV_MAX_TID; i++) { 861 u_int8_t sync_data; 862 863 if ((eeprom_config.init_sdtr & (0x1 << i)) == 0) 864 sync_data = 0; 865 else 866 sync_data = eeprom_config.sdtr_data[i]; 867 adv_sdtr_to_period_offset(adv, 868 sync_data, 869 &adv->tinfo[i].user.period, 870 &adv->tinfo[i].user.offset, 871 i); 872 } 873 config_lsw = eeprom_config.cfg_lsw; 874 eeprom_config.cfg_msw = config_msw; 875 } else { 876 u_int8_t sync_data; 877 878 kprintf("adv%d: Warning EEPROM Checksum mismatch. " 879 "Using default device parameters\n", adv->unit); 880 881 /* Set reasonable defaults since we can't read the EEPROM */ 882 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1; 883 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG; 884 adv->disc_enable = TARGET_BIT_VECTOR_SET; 885 adv->user_disc_enable = TARGET_BIT_VECTOR_SET; 886 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET; 887 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET; 888 adv->scsi_id = 7; 889 adv->control = 0xFFFF; 890 891 if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050) 892 /* Default to no Ultra to support the 3030 */ 893 adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA; 894 sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4); 895 for (i = 0; i <= ADV_MAX_TID; i++) { 896 adv_sdtr_to_period_offset(adv, sync_data, 897 &adv->tinfo[i].user.period, 898 &adv->tinfo[i].user.offset, 899 i); 900 } 901 config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON; 902 } 903 config_msw &= ~ADV_CFG_MSW_CLR_MASK; 904 config_lsw |= ADV_CFG_LSW_HOST_INT_ON; 905 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA) 906 && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0) 907 /* 25ns or 10MHz */ 908 max_sync = 25; 909 else 910 /* Unlimited */ 911 max_sync = 0; 912 for (i = 0; i <= ADV_MAX_TID; i++) { 913 if (adv->tinfo[i].user.period < max_sync) 914 adv->tinfo[i].user.period = max_sync; 915 } 916 917 if (adv_test_external_lram(adv) == 0) { 918 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) { 919 eeprom_config.max_total_qng = 920 ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; 921 eeprom_config.max_tag_qng = 922 ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG; 923 } else { 924 eeprom_config.cfg_msw |= 0x0800; 925 config_msw |= 0x0800; 926 eeprom_config.max_total_qng = 927 ADV_MAX_PCI_INRAM_TOTAL_QNG; 928 eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG; 929 } 930 adv->max_openings = eeprom_config.max_total_qng; 931 } 932 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 933 ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw); 934 #if 0 935 /* 936 * Don't write the eeprom data back for now. 937 * I'd rather not mess up the user's card. We also don't 938 * fully sanitize the eeprom settings above for the write-back 939 * to be 100% correct. 940 */ 941 if (adv_set_eeprom_config(adv, &eeprom_config) != 0) 942 kprintf("%s: WARNING! Failure writing to EEPROM.\n", 943 adv_name(adv)); 944 #endif 945 946 adv_set_chip_scsiid(adv, adv->scsi_id); 947 if (adv_init_lram_and_mcode(adv)) 948 return (1); 949 950 adv->disc_enable = adv->user_disc_enable; 951 952 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); 953 for (i = 0; i <= ADV_MAX_TID; i++) { 954 /* 955 * Start off in async mode. 956 */ 957 adv_set_syncrate(adv, /*struct cam_path */NULL, 958 i, /*period*/0, /*offset*/0, 959 ADV_TRANS_CUR); 960 /* 961 * Enable the use of tagged commands on all targets. 962 * This allows the kernel driver to make up it's own mind 963 * as it sees fit to tag queue instead of having the 964 * firmware try and second guess the tag_code settins. 965 */ 966 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i, 967 adv->max_openings); 968 } 969 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); 970 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); 971 kprintf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n", 972 adv->unit, (adv->type & ADV_ULTRA) && (max_sync == 0) 973 ? "Ultra SCSI" : "SCSI", 974 adv->scsi_id, adv->max_openings); 975 return (0); 976 } 977 978 void 979 adv_intr(void *arg) 980 { 981 struct adv_softc *adv; 982 u_int16_t chipstat; 983 u_int16_t saved_ram_addr; 984 u_int8_t ctrl_reg; 985 u_int8_t saved_ctrl_reg; 986 u_int8_t host_flag; 987 988 adv = (struct adv_softc *)arg; 989 990 chipstat = ADV_INW(adv, ADV_CHIP_STATUS); 991 992 /* Is it for us? */ 993 if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0) 994 return; 995 996 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL); 997 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET | 998 ADV_CC_SINGLE_STEP | ADV_CC_DIAG | 999 ADV_CC_TEST)); 1000 1001 if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) { 1002 kprintf("Detected Bus Reset\n"); 1003 adv_reset_bus(adv, /*initiate_reset*/FALSE); 1004 return; 1005 } 1006 1007 if ((chipstat & ADV_CSW_INT_PENDING) != 0) { 1008 1009 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR); 1010 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B); 1011 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, 1012 host_flag | ADV_HOST_FLAG_IN_ISR); 1013 1014 adv_ack_interrupt(adv); 1015 1016 if ((chipstat & ADV_CSW_HALTED) != 0 1017 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) { 1018 adv_isr_chip_halted(adv); 1019 saved_ctrl_reg &= ~ADV_CC_HALT; 1020 } else { 1021 adv_run_doneq(adv); 1022 } 1023 ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr); 1024 #ifdef DIAGNOSTIC 1025 if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr) 1026 panic("adv_intr: Unable to set LRAM addr"); 1027 #endif 1028 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag); 1029 } 1030 1031 ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg); 1032 } 1033 1034 void 1035 adv_run_doneq(struct adv_softc *adv) 1036 { 1037 struct adv_q_done_info scsiq; 1038 u_int doneq_head; 1039 u_int done_qno; 1040 1041 doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF; 1042 done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head) 1043 + ADV_SCSIQ_B_FWD); 1044 while (done_qno != ADV_QLINK_END) { 1045 union ccb* ccb; 1046 struct adv_ccb_info *cinfo; 1047 u_int done_qaddr; 1048 u_int sg_queue_cnt; 1049 int aborted; 1050 1051 done_qaddr = ADV_QNO_TO_QADDR(done_qno); 1052 1053 /* Pull status from this request */ 1054 sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq, 1055 adv->max_dma_count); 1056 1057 /* Mark it as free */ 1058 adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, 1059 scsiq.q_status & ~(QS_READY|QS_ABORTED)); 1060 1061 /* Process request based on retrieved info */ 1062 if ((scsiq.cntl & QC_SG_HEAD) != 0) { 1063 u_int i; 1064 1065 /* 1066 * S/G based request. Free all of the queue 1067 * structures that contained S/G information. 1068 */ 1069 for (i = 0; i < sg_queue_cnt; i++) { 1070 done_qno = adv_read_lram_8(adv, done_qaddr 1071 + ADV_SCSIQ_B_FWD); 1072 1073 #ifdef DIAGNOSTIC 1074 if (done_qno == ADV_QLINK_END) { 1075 panic("adv_qdone: Corrupted SG " 1076 "list encountered"); 1077 } 1078 #endif 1079 done_qaddr = ADV_QNO_TO_QADDR(done_qno); 1080 1081 /* Mark SG queue as free */ 1082 adv_write_lram_8(adv, done_qaddr 1083 + ADV_SCSIQ_B_STATUS, QS_FREE); 1084 } 1085 } else 1086 sg_queue_cnt = 0; 1087 #ifdef DIAGNOSTIC 1088 if (adv->cur_active < (sg_queue_cnt + 1)) 1089 panic("adv_qdone: Attempting to free more " 1090 "queues than are active"); 1091 #endif 1092 adv->cur_active -= sg_queue_cnt + 1; 1093 1094 aborted = (scsiq.q_status & QS_ABORTED) != 0; 1095 1096 if ((scsiq.q_status != QS_DONE) 1097 && (scsiq.q_status & QS_ABORTED) == 0) 1098 panic("adv_qdone: completed scsiq with unknown status"); 1099 1100 scsiq.remain_bytes += scsiq.extra_bytes; 1101 1102 if ((scsiq.d3.done_stat == QD_WITH_ERROR) && 1103 (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) { 1104 if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) { 1105 scsiq.d3.done_stat = QD_NO_ERROR; 1106 scsiq.d3.host_stat = QHSTA_NO_ERROR; 1107 } 1108 } 1109 1110 cinfo = &adv->ccb_infos[scsiq.d2.ccb_index]; 1111 ccb = cinfo->ccb; 1112 ccb->csio.resid = scsiq.remain_bytes; 1113 adv_done(adv, ccb, 1114 scsiq.d3.done_stat, scsiq.d3.host_stat, 1115 scsiq.d3.scsi_stat, scsiq.q_no); 1116 1117 doneq_head = done_qno; 1118 done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); 1119 } 1120 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head); 1121 } 1122 1123 1124 void 1125 adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat, 1126 u_int host_stat, u_int scsi_status, u_int q_no) 1127 { 1128 struct adv_ccb_info *cinfo; 1129 1130 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 1131 LIST_REMOVE(&ccb->ccb_h, sim_links.le); 1132 callout_stop(&ccb->ccb_h.timeout_ch); 1133 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1134 bus_dmasync_op_t op; 1135 1136 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1137 op = BUS_DMASYNC_POSTREAD; 1138 else 1139 op = BUS_DMASYNC_POSTWRITE; 1140 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); 1141 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 1142 } 1143 1144 switch (done_stat) { 1145 case QD_NO_ERROR: 1146 if (host_stat == QHSTA_NO_ERROR) { 1147 ccb->ccb_h.status = CAM_REQ_CMP; 1148 break; 1149 } 1150 xpt_print_path(ccb->ccb_h.path); 1151 kprintf("adv_done - queue done without error, " 1152 "but host status non-zero(%x)\n", host_stat); 1153 /*FALLTHROUGH*/ 1154 case QD_WITH_ERROR: 1155 switch (host_stat) { 1156 case QHSTA_M_TARGET_STATUS_BUSY: 1157 case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY: 1158 /* 1159 * Assume that if we were a tagged transaction 1160 * the target reported queue full. Otherwise, 1161 * report busy. The firmware really should just 1162 * pass the original status back up to us even 1163 * if it thinks the target was in error for 1164 * returning this status as no other transactions 1165 * from this initiator are in effect, but this 1166 * ignores multi-initiator setups and there is 1167 * evidence that the firmware gets its per-device 1168 * transaction counts screwed up occassionally. 1169 */ 1170 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1171 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 1172 && host_stat != QHSTA_M_TARGET_STATUS_BUSY) 1173 scsi_status = SCSI_STATUS_QUEUE_FULL; 1174 else 1175 scsi_status = SCSI_STATUS_BUSY; 1176 adv_abort_ccb(adv, ccb->ccb_h.target_id, 1177 ccb->ccb_h.target_lun, 1178 /*ccb*/NULL, CAM_REQUEUE_REQ, 1179 /*queued_only*/TRUE); 1180 /*FALLTHROUGH*/ 1181 case QHSTA_M_NO_AUTO_REQ_SENSE: 1182 case QHSTA_NO_ERROR: 1183 ccb->csio.scsi_status = scsi_status; 1184 switch (scsi_status) { 1185 case SCSI_STATUS_CHECK_COND: 1186 case SCSI_STATUS_CMD_TERMINATED: 1187 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1188 /* Structure copy */ 1189 ccb->csio.sense_data = 1190 adv->sense_buffers[q_no - 1]; 1191 /* FALLTHROUGH */ 1192 case SCSI_STATUS_BUSY: 1193 case SCSI_STATUS_RESERV_CONFLICT: 1194 case SCSI_STATUS_QUEUE_FULL: 1195 case SCSI_STATUS_COND_MET: 1196 case SCSI_STATUS_INTERMED: 1197 case SCSI_STATUS_INTERMED_COND_MET: 1198 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1199 break; 1200 case SCSI_STATUS_OK: 1201 ccb->ccb_h.status |= CAM_REQ_CMP; 1202 break; 1203 } 1204 break; 1205 case QHSTA_M_SEL_TIMEOUT: 1206 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1207 break; 1208 case QHSTA_M_DATA_OVER_RUN: 1209 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1210 break; 1211 case QHSTA_M_UNEXPECTED_BUS_FREE: 1212 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1213 break; 1214 case QHSTA_M_BAD_BUS_PHASE_SEQ: 1215 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1216 break; 1217 case QHSTA_M_BAD_CMPL_STATUS_IN: 1218 /* No command complete after a status message */ 1219 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1220 break; 1221 case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT: 1222 case QHSTA_M_WTM_TIMEOUT: 1223 case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET: 1224 /* The SCSI bus hung in a phase */ 1225 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1226 adv_reset_bus(adv, /*initiate_reset*/TRUE); 1227 break; 1228 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1229 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1230 break; 1231 case QHSTA_D_QDONE_SG_LIST_CORRUPTED: 1232 case QHSTA_D_ASC_DVC_ERROR_CODE_SET: 1233 case QHSTA_D_HOST_ABORT_FAILED: 1234 case QHSTA_D_EXE_SCSI_Q_FAILED: 1235 case QHSTA_D_ASPI_NO_BUF_POOL: 1236 case QHSTA_M_BAD_TAG_CODE: 1237 case QHSTA_D_LRAM_CMP_ERROR: 1238 case QHSTA_M_MICRO_CODE_ERROR_HALT: 1239 default: 1240 panic("%s: Unhandled Host status error %x", 1241 adv_name(adv), host_stat); 1242 /* NOTREACHED */ 1243 } 1244 break; 1245 1246 case QD_ABORTED_BY_HOST: 1247 /* Don't clobber any, more explicit, error codes we've set */ 1248 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) 1249 ccb->ccb_h.status = CAM_REQ_ABORTED; 1250 break; 1251 1252 default: 1253 xpt_print_path(ccb->ccb_h.path); 1254 kprintf("adv_done - queue done with unknown status %x:%x\n", 1255 done_stat, host_stat); 1256 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1257 break; 1258 } 1259 adv_clear_state(adv, ccb); 1260 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP 1261 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1262 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1263 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1264 } 1265 adv_free_ccb_info(adv, cinfo); 1266 /* 1267 * Null this out so that we catch driver bugs that cause a 1268 * ccb to be completed twice. 1269 */ 1270 ccb->ccb_h.ccb_cinfo_ptr = NULL; 1271 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1272 xpt_done(ccb); 1273 } 1274 1275 /* 1276 * Function to poll for command completion when 1277 * interrupts are disabled (crash dumps) 1278 */ 1279 static void 1280 adv_poll(struct cam_sim *sim) 1281 { 1282 adv_intr(cam_sim_softc(sim)); 1283 } 1284 1285 /* 1286 * Attach all the sub-devices we can find 1287 */ 1288 int 1289 adv_attach(struct adv_softc *adv) 1290 { 1291 struct ccb_setasync csa; 1292 int max_sg; 1293 1294 /* 1295 * Allocate an array of ccb mapping structures. We put the 1296 * index of the ccb_info structure into the queue representing 1297 * a transaction and use it for mapping the queue to the 1298 * upper level SCSI transaction it represents. 1299 */ 1300 adv->ccb_infos = kmalloc(sizeof(*adv->ccb_infos) * adv->max_openings, 1301 M_DEVBUF, M_WAITOK); 1302 adv->init_level++; 1303 1304 /* 1305 * Create our DMA tags. These tags define the kinds of device 1306 * accessible memory allocations and memory mappings we will 1307 * need to perform during normal operation. 1308 * 1309 * Unless we need to further restrict the allocation, we rely 1310 * on the restrictions of the parent dmat, hence the common 1311 * use of MAXADDR and MAXSIZE. 1312 * 1313 * The ASC boards use chains of "queues" (the transactional 1314 * resources on the board) to represent long S/G lists. 1315 * The first queue represents the command and holds a 1316 * single address and data pair. The queues that follow 1317 * can each hold ADV_SG_LIST_PER_Q entries. Given the 1318 * total number of queues, we can express the largest 1319 * transaction we can map. We reserve a few queues for 1320 * error recovery. Take those into account as well. 1321 * 1322 * There is a way to take an interrupt to download the 1323 * next batch of S/G entries if there are more than 255 1324 * of them (the counter in the queue structure is a u_int8_t). 1325 * We don't use this feature, so limit the S/G list size 1326 * accordingly. 1327 */ 1328 max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q; 1329 if (max_sg > 255) 1330 max_sg = 255; 1331 1332 /* DMA tag for mapping buffers into device visible space. */ 1333 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0, 1334 /*lowaddr*/BUS_SPACE_MAXADDR, 1335 /*highaddr*/BUS_SPACE_MAXADDR, 1336 /*filter*/NULL, /*filterarg*/NULL, 1337 /*maxsize*/MAXPHYS, 1338 /*nsegments*/max_sg, 1339 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1340 /*flags*/BUS_DMA_ALLOCNOW, 1341 &adv->buffer_dmat) != 0) { 1342 return (ENXIO); 1343 } 1344 adv->init_level++; 1345 1346 /* DMA tag for our sense buffers */ 1347 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0, 1348 /*lowaddr*/BUS_SPACE_MAXADDR, 1349 /*highaddr*/BUS_SPACE_MAXADDR, 1350 /*filter*/NULL, /*filterarg*/NULL, 1351 sizeof(struct scsi_sense_data)*adv->max_openings, 1352 /*nsegments*/1, 1353 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1354 /*flags*/0, &adv->sense_dmat) != 0) { 1355 return (ENXIO); 1356 } 1357 1358 adv->init_level++; 1359 1360 /* Allocation for our sense buffers */ 1361 if (bus_dmamem_alloc(adv->sense_dmat, (void *)&adv->sense_buffers, 1362 BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) { 1363 return (ENOMEM); 1364 } 1365 1366 adv->init_level++; 1367 1368 /* And permanently map them */ 1369 bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap, 1370 adv->sense_buffers, 1371 sizeof(struct scsi_sense_data)*adv->max_openings, 1372 adv_map, &adv->sense_physbase, /*flags*/0); 1373 1374 adv->init_level++; 1375 1376 /* 1377 * Fire up the chip 1378 */ 1379 if (adv_start_chip(adv) != 1) { 1380 kprintf("adv%d: Unable to start on board processor. Aborting.\n", 1381 adv->unit); 1382 return (ENXIO); 1383 } 1384 1385 /* 1386 * Construct our SIM entry. 1387 */ 1388 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit, 1389 &sim_mplock, 1, adv->max_openings, NULL); 1390 if (adv->sim == NULL) 1391 return (ENOMEM); 1392 1393 /* 1394 * Register the bus. 1395 * 1396 * XXX Twin Channel EISA Cards??? 1397 */ 1398 if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) { 1399 cam_sim_free(adv->sim); 1400 return (ENXIO); 1401 } 1402 1403 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim), 1404 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1405 != CAM_REQ_CMP) { 1406 xpt_bus_deregister(cam_sim_path(adv->sim)); 1407 cam_sim_free(adv->sim); 1408 return (ENXIO); 1409 } 1410 1411 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5); 1412 csa.ccb_h.func_code = XPT_SASYNC_CB; 1413 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; 1414 csa.callback = advasync; 1415 csa.callback_arg = adv; 1416 xpt_action((union ccb *)&csa); 1417 return (0); 1418 } 1419