1 /* 2 * Generic driver for the Advanced Systems Inc. SCSI controllers 3 * Product specific probe and attach routines can be found in: 4 * 5 * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852 6 * i386/eisa/adv_eisa.c ABP742, ABP752 7 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U, 8 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA, 9 * ABP970, ABP970U 10 * 11 * Copyright (c) 1996-2000 Justin Gibbs. 12 * All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions, and the following disclaimer, 19 * without modification, immediately at the beginning of the file. 20 * 2. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $FreeBSD: src/sys/dev/advansys/advansys.c,v 1.14.2.4 2002/01/06 21:21:42 dwmalone Exp $ 36 * $DragonFly: src/sys/dev/disk/advansys/advansys.c,v 1.12 2007/12/23 07:00:55 pavalos Exp $ 37 */ 38 /* 39 * Ported from: 40 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 41 * 42 * Copyright (c) 1995-1997 Advanced System Products, Inc. 43 * All Rights Reserved. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that redistributions of source 47 * code retain the above copyright notice and this comment without 48 * modification. 49 */ 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/malloc.h> 54 #include <sys/kernel.h> 55 #include <sys/thread2.h> 56 #include <sys/bus.h> 57 #include <sys/rman.h> 58 59 #include <bus/cam/cam.h> 60 #include <bus/cam/cam_ccb.h> 61 #include <bus/cam/cam_sim.h> 62 #include <bus/cam/cam_xpt_sim.h> 63 #include <bus/cam/cam_xpt_periph.h> 64 #include <bus/cam/cam_debug.h> 65 66 #include <bus/cam/scsi/scsi_all.h> 67 #include <bus/cam/scsi/scsi_message.h> 68 69 #include <vm/vm.h> 70 #include <vm/vm_param.h> 71 #include <vm/pmap.h> 72 73 #include "advansys.h" 74 75 static void adv_action(struct cam_sim *sim, union ccb *ccb); 76 static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, 77 int nsegments, int error); 78 static void adv_poll(struct cam_sim *sim); 79 static void adv_run_doneq(struct adv_softc *adv); 80 static struct adv_ccb_info * 81 adv_alloc_ccb_info(struct adv_softc *adv); 82 static void adv_destroy_ccb_info(struct adv_softc *adv, 83 struct adv_ccb_info *cinfo); 84 static __inline struct adv_ccb_info * 85 adv_get_ccb_info(struct adv_softc *adv); 86 static __inline void adv_free_ccb_info(struct adv_softc *adv, 87 struct adv_ccb_info *cinfo); 88 static __inline void adv_set_state(struct adv_softc *adv, adv_state state); 89 static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb); 90 static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb); 91 92 static __inline struct adv_ccb_info * 93 adv_get_ccb_info(struct adv_softc *adv) 94 { 95 struct adv_ccb_info *cinfo; 96 97 crit_enter(); 98 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { 99 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); 100 } else { 101 cinfo = adv_alloc_ccb_info(adv); 102 } 103 crit_exit(); 104 105 return (cinfo); 106 } 107 108 static __inline void 109 adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) 110 { 111 crit_enter(); 112 cinfo->state = ACCB_FREE; 113 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links); 114 crit_exit(); 115 } 116 117 static __inline void 118 adv_set_state(struct adv_softc *adv, adv_state state) 119 { 120 if (adv->state == 0) 121 xpt_freeze_simq(adv->sim, /*count*/1); 122 adv->state |= state; 123 } 124 125 static __inline void 126 adv_clear_state(struct adv_softc *adv, union ccb* ccb) 127 { 128 if (adv->state != 0) 129 adv_clear_state_really(adv, ccb); 130 } 131 132 static void 133 adv_clear_state_really(struct adv_softc *adv, union ccb* ccb) 134 { 135 if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0) 136 adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK); 137 if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) { 138 int openings; 139 140 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q; 141 if (openings >= adv->openings_needed) { 142 adv->state &= ~ADV_RESOURCE_SHORTAGE; 143 adv->openings_needed = 0; 144 } 145 } 146 147 if ((adv->state & ADV_IN_TIMEOUT) != 0) { 148 struct adv_ccb_info *cinfo; 149 150 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 151 if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) { 152 struct ccb_hdr *ccb_h; 153 154 /* 155 * We now traverse our list of pending CCBs 156 * and reinstate their timeouts. 157 */ 158 ccb_h = LIST_FIRST(&adv->pending_ccbs); 159 while (ccb_h != NULL) { 160 callout_reset(&ccb_h->timeout_ch, 161 (ccb_h->timeout * hz) / 1000, 162 adv_timeout, ccb_h); 163 ccb_h = LIST_NEXT(ccb_h, sim_links.le); 164 } 165 adv->state &= ~ADV_IN_TIMEOUT; 166 kprintf("%s: No longer in timeout\n", adv_name(adv)); 167 } 168 } 169 if (adv->state == 0) 170 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 171 } 172 173 void 174 adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 175 { 176 bus_addr_t* physaddr; 177 178 physaddr = (bus_addr_t*)arg; 179 *physaddr = segs->ds_addr; 180 } 181 182 char * 183 adv_name(struct adv_softc *adv) 184 { 185 static char name[10]; 186 187 ksnprintf(name, sizeof(name), "adv%d", adv->unit); 188 return (name); 189 } 190 191 static void 192 adv_action(struct cam_sim *sim, union ccb *ccb) 193 { 194 struct adv_softc *adv; 195 196 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n")); 197 198 adv = (struct adv_softc *)cam_sim_softc(sim); 199 200 switch (ccb->ccb_h.func_code) { 201 /* Common cases first */ 202 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 203 { 204 struct ccb_hdr *ccb_h; 205 struct ccb_scsiio *csio; 206 struct adv_ccb_info *cinfo; 207 208 ccb_h = &ccb->ccb_h; 209 csio = &ccb->csio; 210 cinfo = adv_get_ccb_info(adv); 211 if (cinfo == NULL) 212 panic("XXX Handle CCB info error!!!"); 213 214 ccb_h->ccb_cinfo_ptr = cinfo; 215 cinfo->ccb = ccb; 216 217 /* Only use S/G if there is a transfer */ 218 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 219 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 220 /* 221 * We've been given a pointer 222 * to a single buffer 223 */ 224 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 225 int error; 226 227 crit_enter(); 228 error = 229 bus_dmamap_load(adv->buffer_dmat, 230 cinfo->dmamap, 231 csio->data_ptr, 232 csio->dxfer_len, 233 adv_execute_ccb, 234 csio, /*flags*/0); 235 if (error == EINPROGRESS) { 236 /* 237 * So as to maintain ordering, 238 * freeze the controller queue 239 * until our mapping is 240 * returned. 241 */ 242 adv_set_state(adv, 243 ADV_BUSDMA_BLOCK); 244 } 245 crit_exit(); 246 } else { 247 struct bus_dma_segment seg; 248 249 /* Pointer to physical buffer */ 250 seg.ds_addr = 251 (bus_addr_t)csio->data_ptr; 252 seg.ds_len = csio->dxfer_len; 253 adv_execute_ccb(csio, &seg, 1, 0); 254 } 255 } else { 256 struct bus_dma_segment *segs; 257 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 258 panic("adv_setup_data - Physical " 259 "segment pointers unsupported"); 260 261 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 262 panic("adv_setup_data - Virtual " 263 "segment addresses unsupported"); 264 265 /* Just use the segments provided */ 266 segs = (struct bus_dma_segment *)csio->data_ptr; 267 adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0); 268 } 269 } else { 270 adv_execute_ccb(ccb, NULL, 0, 0); 271 } 272 break; 273 } 274 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 275 case XPT_TARGET_IO: /* Execute target I/O request */ 276 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 277 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 278 case XPT_EN_LUN: /* Enable LUN as a target */ 279 case XPT_ABORT: /* Abort the specified CCB */ 280 /* XXX Implement */ 281 ccb->ccb_h.status = CAM_REQ_INVALID; 282 xpt_done(ccb); 283 break; 284 #ifdef CAM_NEW_TRAN_CODE 285 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 286 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) 287 #else 288 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 289 #define IS_USER_SETTINGS(c) (c->flags & CCB_TRANS_USER_SETTINGS) 290 #endif 291 case XPT_SET_TRAN_SETTINGS: 292 { 293 #ifdef CAM_NEW_TRAN_CODE 294 struct ccb_trans_settings_scsi *scsi; 295 struct ccb_trans_settings_spi *spi; 296 #endif 297 struct ccb_trans_settings *cts; 298 target_bit_vector targ_mask; 299 struct adv_transinfo *tconf; 300 u_int update_type; 301 302 cts = &ccb->cts; 303 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); 304 update_type = 0; 305 306 /* 307 * The user must specify which type of settings he wishes 308 * to change. 309 */ 310 if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) { 311 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 312 update_type |= ADV_TRANS_GOAL; 313 } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) { 314 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 315 update_type |= ADV_TRANS_USER; 316 } else { 317 ccb->ccb_h.status = CAM_REQ_INVALID; 318 break; 319 } 320 321 crit_enter(); 322 #ifdef CAM_NEW_TRAN_CODE 323 scsi = &cts->proto_specific.scsi; 324 spi = &cts->xport_specific.spi; 325 if ((update_type & ADV_TRANS_GOAL) != 0) { 326 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 327 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 328 adv->disc_enable |= targ_mask; 329 else 330 adv->disc_enable &= ~targ_mask; 331 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, 332 adv->disc_enable); 333 } 334 335 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 336 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 337 adv->cmd_qng_enabled |= targ_mask; 338 else 339 adv->cmd_qng_enabled &= ~targ_mask; 340 } 341 } 342 343 if ((update_type & ADV_TRANS_USER) != 0) { 344 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 345 if ((spi->flags & CTS_SPI_VALID_DISC) != 0) 346 adv->user_disc_enable |= targ_mask; 347 else 348 adv->user_disc_enable &= ~targ_mask; 349 } 350 351 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 352 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 353 adv->user_cmd_qng_enabled |= targ_mask; 354 else 355 adv->user_cmd_qng_enabled &= ~targ_mask; 356 } 357 } 358 359 /* 360 * If the user specifies either the sync rate, or offset, 361 * but not both, the unspecified parameter defaults to its 362 * current value in transfer negotiations. 363 */ 364 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 365 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 366 /* 367 * If the user provided a sync rate but no offset, 368 * use the current offset. 369 */ 370 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) 371 spi->sync_offset = tconf->offset; 372 373 /* 374 * If the user provided an offset but no sync rate, 375 * use the current sync rate. 376 */ 377 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) 378 spi->sync_period = tconf->period; 379 380 adv_period_offset_to_sdtr(adv, &spi->sync_period, 381 &spi->sync_offset, 382 cts->ccb_h.target_id); 383 384 adv_set_syncrate(adv, /*struct cam_path */NULL, 385 cts->ccb_h.target_id, spi->sync_period, 386 spi->sync_offset, update_type); 387 } 388 #else 389 if ((update_type & ADV_TRANS_GOAL) != 0) { 390 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 391 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 392 adv->disc_enable |= targ_mask; 393 else 394 adv->disc_enable &= ~targ_mask; 395 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, 396 adv->disc_enable); 397 } 398 399 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 400 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 401 adv->cmd_qng_enabled |= targ_mask; 402 else 403 adv->cmd_qng_enabled &= ~targ_mask; 404 } 405 } 406 407 if ((update_type & ADV_TRANS_USER) != 0) { 408 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 409 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 410 adv->user_disc_enable |= targ_mask; 411 else 412 adv->user_disc_enable &= ~targ_mask; 413 } 414 415 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 416 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 417 adv->user_cmd_qng_enabled |= targ_mask; 418 else 419 adv->user_cmd_qng_enabled &= ~targ_mask; 420 } 421 } 422 423 /* 424 * If the user specifies either the sync rate, or offset, 425 * but not both, the unspecified parameter defaults to its 426 * current value in transfer negotiations. 427 */ 428 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 429 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 430 /* 431 * If the user provided a sync rate but no offset, 432 * use the current offset. 433 */ 434 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) 435 cts->sync_offset = tconf->offset; 436 437 /* 438 * If the user provided an offset but no sync rate, 439 * use the current sync rate. 440 */ 441 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) 442 cts->sync_period = tconf->period; 443 444 adv_period_offset_to_sdtr(adv, &cts->sync_period, 445 &cts->sync_offset, 446 cts->ccb_h.target_id); 447 448 adv_set_syncrate(adv, /*struct cam_path */NULL, 449 cts->ccb_h.target_id, cts->sync_period, 450 cts->sync_offset, update_type); 451 } 452 #endif 453 454 crit_exit(); 455 ccb->ccb_h.status = CAM_REQ_CMP; 456 xpt_done(ccb); 457 break; 458 } 459 case XPT_GET_TRAN_SETTINGS: 460 /* Get default/user set transfer settings for the target */ 461 { 462 #ifdef CAM_NEW_TRAN_CODE 463 struct ccb_trans_settings_scsi *scsi; 464 struct ccb_trans_settings_spi *spi; 465 #endif 466 struct ccb_trans_settings *cts; 467 struct adv_transinfo *tconf; 468 target_bit_vector target_mask; 469 470 cts = &ccb->cts; 471 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); 472 473 #ifdef CAM_NEW_TRAN_CODE 474 scsi = &cts->proto_specific.scsi; 475 spi = &cts->xport_specific.spi; 476 477 cts->protocol = PROTO_SCSI; 478 cts->protocol_version = SCSI_REV_2; 479 cts->transport = XPORT_SPI; 480 cts->transport_version = 2; 481 482 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 483 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 484 485 crit_enter(); 486 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 487 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 488 if ((adv->disc_enable & target_mask) != 0) 489 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 490 if ((adv->cmd_qng_enabled & target_mask) != 0) 491 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 492 } else { 493 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 494 if ((adv->user_disc_enable & target_mask) != 0) 495 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 496 if ((adv->user_cmd_qng_enabled & target_mask) != 0) 497 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 498 } 499 spi->sync_period = tconf->period; 500 spi->sync_offset = tconf->offset; 501 crit_exit(); 502 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 503 spi->valid = CTS_SPI_VALID_SYNC_RATE 504 | CTS_SPI_VALID_SYNC_OFFSET 505 | CTS_SPI_VALID_BUS_WIDTH 506 | CTS_SPI_VALID_DISC; 507 scsi->valid = CTS_SCSI_VALID_TQ; 508 #else 509 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 510 crit_enter(); 511 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 512 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 513 if ((adv->disc_enable & target_mask) != 0) 514 cts->flags |= CCB_TRANS_DISC_ENB; 515 if ((adv->cmd_qng_enabled & target_mask) != 0) 516 cts->flags |= CCB_TRANS_TAG_ENB; 517 } else { 518 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 519 if ((adv->user_disc_enable & target_mask) != 0) 520 cts->flags |= CCB_TRANS_DISC_ENB; 521 if ((adv->user_cmd_qng_enabled & target_mask) != 0) 522 cts->flags |= CCB_TRANS_TAG_ENB; 523 } 524 525 cts->sync_period = tconf->period; 526 cts->sync_offset = tconf->offset; 527 crit_exit(); 528 529 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 530 cts->valid = CCB_TRANS_SYNC_RATE_VALID 531 | CCB_TRANS_SYNC_OFFSET_VALID 532 | CCB_TRANS_BUS_WIDTH_VALID 533 | CCB_TRANS_DISC_VALID 534 | CCB_TRANS_TQ_VALID; 535 #endif 536 ccb->ccb_h.status = CAM_REQ_CMP; 537 xpt_done(ccb); 538 break; 539 } 540 case XPT_CALC_GEOMETRY: 541 { 542 struct ccb_calc_geometry *ccg; 543 u_int32_t size_mb; 544 u_int32_t secs_per_cylinder; 545 int extended; 546 547 ccg = &ccb->ccg; 548 size_mb = ccg->volume_size 549 / ((1024L * 1024L) / ccg->block_size); 550 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0; 551 552 if (size_mb > 1024 && extended) { 553 ccg->heads = 255; 554 ccg->secs_per_track = 63; 555 } else { 556 ccg->heads = 64; 557 ccg->secs_per_track = 32; 558 } 559 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 560 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 561 ccb->ccb_h.status = CAM_REQ_CMP; 562 xpt_done(ccb); 563 break; 564 } 565 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 566 { 567 crit_enter(); 568 adv_stop_execution(adv); 569 adv_reset_bus(adv, /*initiate_reset*/TRUE); 570 adv_start_execution(adv); 571 crit_exit(); 572 573 ccb->ccb_h.status = CAM_REQ_CMP; 574 xpt_done(ccb); 575 break; 576 } 577 case XPT_TERM_IO: /* Terminate the I/O process */ 578 /* XXX Implement */ 579 ccb->ccb_h.status = CAM_REQ_INVALID; 580 xpt_done(ccb); 581 break; 582 case XPT_PATH_INQ: /* Path routing inquiry */ 583 { 584 struct ccb_pathinq *cpi = &ccb->cpi; 585 586 cpi->version_num = 1; /* XXX??? */ 587 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 588 cpi->target_sprt = 0; 589 cpi->hba_misc = 0; 590 cpi->hba_eng_cnt = 0; 591 cpi->max_target = 7; 592 cpi->max_lun = 7; 593 cpi->initiator_id = adv->scsi_id; 594 cpi->bus_id = cam_sim_bus(sim); 595 cpi->base_transfer_speed = 3300; 596 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 597 strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN); 598 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 599 cpi->unit_number = cam_sim_unit(sim); 600 cpi->ccb_h.status = CAM_REQ_CMP; 601 #ifdef CAM_NEW_TRAN_CODE 602 cpi->transport = XPORT_SPI; 603 cpi->transport_version = 2; 604 cpi->protocol = PROTO_SCSI; 605 cpi->protocol_version = SCSI_REV_2; 606 #endif 607 xpt_done(ccb); 608 break; 609 } 610 default: 611 ccb->ccb_h.status = CAM_REQ_INVALID; 612 xpt_done(ccb); 613 break; 614 } 615 } 616 617 /* 618 * Currently, the output of bus_dmammap_load suits our needs just 619 * fine, but should it change, we'd need to do something here. 620 */ 621 #define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs) 622 623 static void 624 adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, 625 int nsegments, int error) 626 { 627 struct ccb_scsiio *csio; 628 struct ccb_hdr *ccb_h; 629 struct cam_sim *sim; 630 struct adv_softc *adv; 631 struct adv_ccb_info *cinfo; 632 struct adv_scsi_q scsiq; 633 struct adv_sg_head sghead; 634 635 csio = (struct ccb_scsiio *)arg; 636 ccb_h = &csio->ccb_h; 637 sim = xpt_path_sim(ccb_h->path); 638 adv = (struct adv_softc *)cam_sim_softc(sim); 639 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr; 640 641 /* 642 * Setup our done routine to release the simq on 643 * the next ccb that completes. 644 */ 645 if ((adv->state & ADV_BUSDMA_BLOCK) != 0) 646 adv->state |= ADV_BUSDMA_BLOCK_CLEARED; 647 648 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 649 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) { 650 /* XXX Need phystovirt!!!! */ 651 /* How about pmap_kenter??? */ 652 scsiq.cdbptr = csio->cdb_io.cdb_ptr; 653 } else { 654 scsiq.cdbptr = csio->cdb_io.cdb_ptr; 655 } 656 } else { 657 scsiq.cdbptr = csio->cdb_io.cdb_bytes; 658 } 659 /* 660 * Build up the request 661 */ 662 scsiq.q1.status = 0; 663 scsiq.q1.q_no = 0; 664 scsiq.q1.cntl = 0; 665 scsiq.q1.sg_queue_cnt = 0; 666 scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id); 667 scsiq.q1.target_lun = ccb_h->target_lun; 668 scsiq.q1.sense_len = csio->sense_len; 669 scsiq.q1.extra_bytes = 0; 670 scsiq.q2.ccb_index = cinfo - adv->ccb_infos; 671 scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id, 672 ccb_h->target_lun); 673 scsiq.q2.flag = 0; 674 scsiq.q2.cdb_len = csio->cdb_len; 675 if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0) 676 scsiq.q2.tag_code = csio->tag_action; 677 else 678 scsiq.q2.tag_code = 0; 679 scsiq.q2.vm_id = 0; 680 681 if (nsegments != 0) { 682 bus_dmasync_op_t op; 683 684 scsiq.q1.data_addr = dm_segs->ds_addr; 685 scsiq.q1.data_cnt = dm_segs->ds_len; 686 if (nsegments > 1) { 687 scsiq.q1.cntl |= QC_SG_HEAD; 688 sghead.entry_cnt 689 = sghead.entry_to_copy 690 = nsegments; 691 sghead.res = 0; 692 sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs); 693 scsiq.sg_head = &sghead; 694 } else { 695 scsiq.sg_head = NULL; 696 } 697 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) 698 op = BUS_DMASYNC_PREREAD; 699 else 700 op = BUS_DMASYNC_PREWRITE; 701 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); 702 } else { 703 scsiq.q1.data_addr = 0; 704 scsiq.q1.data_cnt = 0; 705 scsiq.sg_head = NULL; 706 } 707 708 709 crit_enter(); 710 /* 711 * Last time we need to check if this SCB needs to 712 * be aborted. 713 */ 714 if (ccb_h->status != CAM_REQ_INPROG) { 715 if (nsegments != 0) 716 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 717 adv_clear_state(adv, (union ccb *)csio); 718 adv_free_ccb_info(adv, cinfo); 719 xpt_done((union ccb *)csio); 720 crit_exit(); 721 return; 722 } 723 724 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) { 725 /* Temporary resource shortage */ 726 adv_set_state(adv, ADV_RESOURCE_SHORTAGE); 727 if (nsegments != 0) 728 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 729 csio->ccb_h.status = CAM_REQUEUE_REQ; 730 adv_clear_state(adv, (union ccb *)csio); 731 adv_free_ccb_info(adv, cinfo); 732 xpt_done((union ccb *)csio); 733 crit_exit(); 734 return; 735 } 736 cinfo->state |= ACCB_ACTIVE; 737 ccb_h->status |= CAM_SIM_QUEUED; 738 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le); 739 /* Schedule our timeout */ 740 callout_reset(&ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000, 741 adv_timeout, csio); 742 crit_exit(); 743 } 744 745 static struct adv_ccb_info * 746 adv_alloc_ccb_info(struct adv_softc *adv) 747 { 748 int error; 749 struct adv_ccb_info *cinfo; 750 751 cinfo = &adv->ccb_infos[adv->ccb_infos_allocated]; 752 cinfo->state = ACCB_FREE; 753 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0, 754 &cinfo->dmamap); 755 if (error != 0) { 756 kprintf("%s: Unable to allocate CCB info " 757 "dmamap - error %d\n", adv_name(adv), error); 758 return (NULL); 759 } 760 adv->ccb_infos_allocated++; 761 return (cinfo); 762 } 763 764 static void 765 adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) 766 { 767 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap); 768 } 769 770 void 771 adv_timeout(void *arg) 772 { 773 union ccb *ccb; 774 struct adv_softc *adv; 775 struct adv_ccb_info *cinfo; 776 777 ccb = (union ccb *)arg; 778 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc; 779 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 780 781 xpt_print_path(ccb->ccb_h.path); 782 kprintf("Timed out\n"); 783 784 crit_enter(); 785 /* Have we been taken care of already?? */ 786 if (cinfo == NULL || cinfo->state == ACCB_FREE) { 787 crit_exit(); 788 return; 789 } 790 791 adv_stop_execution(adv); 792 793 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) { 794 struct ccb_hdr *ccb_h; 795 796 /* 797 * In order to simplify the recovery process, we ask the XPT 798 * layer to halt the queue of new transactions and we traverse 799 * the list of pending CCBs and remove their timeouts. This 800 * means that the driver attempts to clear only one error 801 * condition at a time. In general, timeouts that occur 802 * close together are related anyway, so there is no benefit 803 * in attempting to handle errors in parrallel. Timeouts will 804 * be reinstated when the recovery process ends. 805 */ 806 adv_set_state(adv, ADV_IN_TIMEOUT); 807 808 /* This CCB is the CCB representing our recovery actions */ 809 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED; 810 811 ccb_h = LIST_FIRST(&adv->pending_ccbs); 812 while (ccb_h != NULL) { 813 callout_stop(&ccb_h->timeout_ch); 814 ccb_h = LIST_NEXT(ccb_h, sim_links.le); 815 } 816 817 /* XXX Should send a BDR */ 818 /* Attempt an abort as our first tact */ 819 xpt_print_path(ccb->ccb_h.path); 820 kprintf("Attempting abort\n"); 821 adv_abort_ccb(adv, ccb->ccb_h.target_id, 822 ccb->ccb_h.target_lun, ccb, 823 CAM_CMD_TIMEOUT, /*queued_only*/FALSE); 824 callout_reset(&ccb->ccb_h.timeout_ch, 2 * hz, adv_timeout, ccb); 825 } else { 826 /* Our attempt to perform an abort failed, go for a reset */ 827 xpt_print_path(ccb->ccb_h.path); 828 kprintf("Resetting bus\n"); 829 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 830 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 831 adv_reset_bus(adv, /*initiate_reset*/TRUE); 832 } 833 adv_start_execution(adv); 834 crit_exit(); 835 } 836 837 struct adv_softc * 838 adv_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh) 839 { 840 struct adv_softc *adv = device_get_softc(dev); 841 842 /* 843 * Allocate a storage area for us 844 */ 845 LIST_INIT(&adv->pending_ccbs); 846 SLIST_INIT(&adv->free_ccb_infos); 847 adv->dev = dev; 848 adv->unit = device_get_unit(dev); 849 adv->tag = tag; 850 adv->bsh = bsh; 851 852 return(adv); 853 } 854 855 void 856 adv_free(struct adv_softc *adv) 857 { 858 switch (adv->init_level) { 859 case 6: 860 { 861 struct adv_ccb_info *cinfo; 862 863 while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { 864 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); 865 adv_destroy_ccb_info(adv, cinfo); 866 } 867 868 bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap); 869 } 870 case 5: 871 bus_dmamem_free(adv->sense_dmat, adv->sense_buffers, 872 adv->sense_dmamap); 873 case 4: 874 bus_dma_tag_destroy(adv->sense_dmat); 875 case 3: 876 bus_dma_tag_destroy(adv->buffer_dmat); 877 case 2: 878 bus_dma_tag_destroy(adv->parent_dmat); 879 case 1: 880 if (adv->ccb_infos != NULL) 881 kfree(adv->ccb_infos, M_DEVBUF); 882 case 0: 883 break; 884 } 885 } 886 887 int 888 adv_init(struct adv_softc *adv) 889 { 890 struct adv_eeprom_config eeprom_config; 891 int checksum, i; 892 int max_sync; 893 u_int16_t config_lsw; 894 u_int16_t config_msw; 895 896 adv_lib_init(adv); 897 898 /* 899 * Stop script execution. 900 */ 901 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE); 902 adv_stop_execution(adv); 903 if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) { 904 kprintf("adv%d: Unable to halt adapter. Initialization" 905 "failed\n", adv->unit); 906 return (1); 907 } 908 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); 909 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) { 910 kprintf("adv%d: Unable to set program counter. Initialization" 911 "failed\n", adv->unit); 912 return (1); 913 } 914 915 config_msw = ADV_INW(adv, ADV_CONFIG_MSW); 916 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW); 917 918 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) { 919 config_msw &= ~ADV_CFG_MSW_CLR_MASK; 920 /* 921 * XXX The Linux code flags this as an error, 922 * but what should we report to the user??? 923 * It seems that clearing the config register 924 * makes this error recoverable. 925 */ 926 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 927 } 928 929 /* Suck in the configuration from the EEProm */ 930 checksum = adv_get_eeprom_config(adv, &eeprom_config); 931 932 if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) { 933 /* 934 * XXX The Linux code sets a warning level for this 935 * condition, yet nothing of meaning is printed to 936 * the user. What does this mean??? 937 */ 938 if (adv->chip_version == 3) { 939 if (eeprom_config.cfg_lsw != config_lsw) 940 eeprom_config.cfg_lsw = config_lsw; 941 if (eeprom_config.cfg_msw != config_msw) { 942 eeprom_config.cfg_msw = config_msw; 943 } 944 } 945 } 946 if (checksum == eeprom_config.chksum) { 947 948 /* Range/Sanity checking */ 949 if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) { 950 eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG; 951 } 952 if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) { 953 eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG; 954 } 955 if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) { 956 eeprom_config.max_tag_qng = eeprom_config.max_total_qng; 957 } 958 if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) { 959 eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC; 960 } 961 adv->max_openings = eeprom_config.max_total_qng; 962 adv->user_disc_enable = eeprom_config.disc_enable; 963 adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng; 964 adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config); 965 adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID; 966 EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id); 967 adv->control = eeprom_config.cntl; 968 for (i = 0; i <= ADV_MAX_TID; i++) { 969 u_int8_t sync_data; 970 971 if ((eeprom_config.init_sdtr & (0x1 << i)) == 0) 972 sync_data = 0; 973 else 974 sync_data = eeprom_config.sdtr_data[i]; 975 adv_sdtr_to_period_offset(adv, 976 sync_data, 977 &adv->tinfo[i].user.period, 978 &adv->tinfo[i].user.offset, 979 i); 980 } 981 config_lsw = eeprom_config.cfg_lsw; 982 eeprom_config.cfg_msw = config_msw; 983 } else { 984 u_int8_t sync_data; 985 986 kprintf("adv%d: Warning EEPROM Checksum mismatch. " 987 "Using default device parameters\n", adv->unit); 988 989 /* Set reasonable defaults since we can't read the EEPROM */ 990 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1; 991 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG; 992 adv->disc_enable = TARGET_BIT_VECTOR_SET; 993 adv->user_disc_enable = TARGET_BIT_VECTOR_SET; 994 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET; 995 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET; 996 adv->scsi_id = 7; 997 adv->control = 0xFFFF; 998 999 if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050) 1000 /* Default to no Ultra to support the 3030 */ 1001 adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA; 1002 sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4); 1003 for (i = 0; i <= ADV_MAX_TID; i++) { 1004 adv_sdtr_to_period_offset(adv, sync_data, 1005 &adv->tinfo[i].user.period, 1006 &adv->tinfo[i].user.offset, 1007 i); 1008 } 1009 config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON; 1010 } 1011 config_msw &= ~ADV_CFG_MSW_CLR_MASK; 1012 config_lsw |= ADV_CFG_LSW_HOST_INT_ON; 1013 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA) 1014 && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0) 1015 /* 25ns or 10MHz */ 1016 max_sync = 25; 1017 else 1018 /* Unlimited */ 1019 max_sync = 0; 1020 for (i = 0; i <= ADV_MAX_TID; i++) { 1021 if (adv->tinfo[i].user.period < max_sync) 1022 adv->tinfo[i].user.period = max_sync; 1023 } 1024 1025 if (adv_test_external_lram(adv) == 0) { 1026 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) { 1027 eeprom_config.max_total_qng = 1028 ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; 1029 eeprom_config.max_tag_qng = 1030 ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG; 1031 } else { 1032 eeprom_config.cfg_msw |= 0x0800; 1033 config_msw |= 0x0800; 1034 eeprom_config.max_total_qng = 1035 ADV_MAX_PCI_INRAM_TOTAL_QNG; 1036 eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG; 1037 } 1038 adv->max_openings = eeprom_config.max_total_qng; 1039 } 1040 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 1041 ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw); 1042 #if 0 1043 /* 1044 * Don't write the eeprom data back for now. 1045 * I'd rather not mess up the user's card. We also don't 1046 * fully sanitize the eeprom settings above for the write-back 1047 * to be 100% correct. 1048 */ 1049 if (adv_set_eeprom_config(adv, &eeprom_config) != 0) 1050 kprintf("%s: WARNING! Failure writing to EEPROM.\n", 1051 adv_name(adv)); 1052 #endif 1053 1054 adv_set_chip_scsiid(adv, adv->scsi_id); 1055 if (adv_init_lram_and_mcode(adv)) 1056 return (1); 1057 1058 adv->disc_enable = adv->user_disc_enable; 1059 1060 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); 1061 for (i = 0; i <= ADV_MAX_TID; i++) { 1062 /* 1063 * Start off in async mode. 1064 */ 1065 adv_set_syncrate(adv, /*struct cam_path */NULL, 1066 i, /*period*/0, /*offset*/0, 1067 ADV_TRANS_CUR); 1068 /* 1069 * Enable the use of tagged commands on all targets. 1070 * This allows the kernel driver to make up it's own mind 1071 * as it sees fit to tag queue instead of having the 1072 * firmware try and second guess the tag_code settins. 1073 */ 1074 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i, 1075 adv->max_openings); 1076 } 1077 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); 1078 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); 1079 kprintf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n", 1080 adv->unit, (adv->type & ADV_ULTRA) && (max_sync == 0) 1081 ? "Ultra SCSI" : "SCSI", 1082 adv->scsi_id, adv->max_openings); 1083 return (0); 1084 } 1085 1086 void 1087 adv_intr(void *arg) 1088 { 1089 struct adv_softc *adv; 1090 u_int16_t chipstat; 1091 u_int16_t saved_ram_addr; 1092 u_int8_t ctrl_reg; 1093 u_int8_t saved_ctrl_reg; 1094 u_int8_t host_flag; 1095 1096 adv = (struct adv_softc *)arg; 1097 1098 chipstat = ADV_INW(adv, ADV_CHIP_STATUS); 1099 1100 /* Is it for us? */ 1101 if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0) 1102 return; 1103 1104 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL); 1105 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET | 1106 ADV_CC_SINGLE_STEP | ADV_CC_DIAG | 1107 ADV_CC_TEST)); 1108 1109 if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) { 1110 kprintf("Detected Bus Reset\n"); 1111 adv_reset_bus(adv, /*initiate_reset*/FALSE); 1112 return; 1113 } 1114 1115 if ((chipstat & ADV_CSW_INT_PENDING) != 0) { 1116 1117 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR); 1118 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B); 1119 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, 1120 host_flag | ADV_HOST_FLAG_IN_ISR); 1121 1122 adv_ack_interrupt(adv); 1123 1124 if ((chipstat & ADV_CSW_HALTED) != 0 1125 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) { 1126 adv_isr_chip_halted(adv); 1127 saved_ctrl_reg &= ~ADV_CC_HALT; 1128 } else { 1129 adv_run_doneq(adv); 1130 } 1131 ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr); 1132 #ifdef DIAGNOSTIC 1133 if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr) 1134 panic("adv_intr: Unable to set LRAM addr"); 1135 #endif 1136 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag); 1137 } 1138 1139 ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg); 1140 } 1141 1142 void 1143 adv_run_doneq(struct adv_softc *adv) 1144 { 1145 struct adv_q_done_info scsiq; 1146 u_int doneq_head; 1147 u_int done_qno; 1148 1149 doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF; 1150 done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head) 1151 + ADV_SCSIQ_B_FWD); 1152 while (done_qno != ADV_QLINK_END) { 1153 union ccb* ccb; 1154 struct adv_ccb_info *cinfo; 1155 u_int done_qaddr; 1156 u_int sg_queue_cnt; 1157 int aborted; 1158 1159 done_qaddr = ADV_QNO_TO_QADDR(done_qno); 1160 1161 /* Pull status from this request */ 1162 sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq, 1163 adv->max_dma_count); 1164 1165 /* Mark it as free */ 1166 adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, 1167 scsiq.q_status & ~(QS_READY|QS_ABORTED)); 1168 1169 /* Process request based on retrieved info */ 1170 if ((scsiq.cntl & QC_SG_HEAD) != 0) { 1171 u_int i; 1172 1173 /* 1174 * S/G based request. Free all of the queue 1175 * structures that contained S/G information. 1176 */ 1177 for (i = 0; i < sg_queue_cnt; i++) { 1178 done_qno = adv_read_lram_8(adv, done_qaddr 1179 + ADV_SCSIQ_B_FWD); 1180 1181 #ifdef DIAGNOSTIC 1182 if (done_qno == ADV_QLINK_END) { 1183 panic("adv_qdone: Corrupted SG " 1184 "list encountered"); 1185 } 1186 #endif 1187 done_qaddr = ADV_QNO_TO_QADDR(done_qno); 1188 1189 /* Mark SG queue as free */ 1190 adv_write_lram_8(adv, done_qaddr 1191 + ADV_SCSIQ_B_STATUS, QS_FREE); 1192 } 1193 } else 1194 sg_queue_cnt = 0; 1195 #ifdef DIAGNOSTIC 1196 if (adv->cur_active < (sg_queue_cnt + 1)) 1197 panic("adv_qdone: Attempting to free more " 1198 "queues than are active"); 1199 #endif 1200 adv->cur_active -= sg_queue_cnt + 1; 1201 1202 aborted = (scsiq.q_status & QS_ABORTED) != 0; 1203 1204 if ((scsiq.q_status != QS_DONE) 1205 && (scsiq.q_status & QS_ABORTED) == 0) 1206 panic("adv_qdone: completed scsiq with unknown status"); 1207 1208 scsiq.remain_bytes += scsiq.extra_bytes; 1209 1210 if ((scsiq.d3.done_stat == QD_WITH_ERROR) && 1211 (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) { 1212 if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) { 1213 scsiq.d3.done_stat = QD_NO_ERROR; 1214 scsiq.d3.host_stat = QHSTA_NO_ERROR; 1215 } 1216 } 1217 1218 cinfo = &adv->ccb_infos[scsiq.d2.ccb_index]; 1219 ccb = cinfo->ccb; 1220 ccb->csio.resid = scsiq.remain_bytes; 1221 adv_done(adv, ccb, 1222 scsiq.d3.done_stat, scsiq.d3.host_stat, 1223 scsiq.d3.scsi_stat, scsiq.q_no); 1224 1225 doneq_head = done_qno; 1226 done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); 1227 } 1228 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head); 1229 } 1230 1231 1232 void 1233 adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat, 1234 u_int host_stat, u_int scsi_status, u_int q_no) 1235 { 1236 struct adv_ccb_info *cinfo; 1237 1238 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 1239 LIST_REMOVE(&ccb->ccb_h, sim_links.le); 1240 callout_stop(&ccb->ccb_h.timeout_ch); 1241 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1242 bus_dmasync_op_t op; 1243 1244 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1245 op = BUS_DMASYNC_POSTREAD; 1246 else 1247 op = BUS_DMASYNC_POSTWRITE; 1248 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); 1249 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 1250 } 1251 1252 switch (done_stat) { 1253 case QD_NO_ERROR: 1254 if (host_stat == QHSTA_NO_ERROR) { 1255 ccb->ccb_h.status = CAM_REQ_CMP; 1256 break; 1257 } 1258 xpt_print_path(ccb->ccb_h.path); 1259 kprintf("adv_done - queue done without error, " 1260 "but host status non-zero(%x)\n", host_stat); 1261 /*FALLTHROUGH*/ 1262 case QD_WITH_ERROR: 1263 switch (host_stat) { 1264 case QHSTA_M_TARGET_STATUS_BUSY: 1265 case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY: 1266 /* 1267 * Assume that if we were a tagged transaction 1268 * the target reported queue full. Otherwise, 1269 * report busy. The firmware really should just 1270 * pass the original status back up to us even 1271 * if it thinks the target was in error for 1272 * returning this status as no other transactions 1273 * from this initiator are in effect, but this 1274 * ignores multi-initiator setups and there is 1275 * evidence that the firmware gets its per-device 1276 * transaction counts screwed up occassionally. 1277 */ 1278 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1279 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 1280 && host_stat != QHSTA_M_TARGET_STATUS_BUSY) 1281 scsi_status = SCSI_STATUS_QUEUE_FULL; 1282 else 1283 scsi_status = SCSI_STATUS_BUSY; 1284 adv_abort_ccb(adv, ccb->ccb_h.target_id, 1285 ccb->ccb_h.target_lun, 1286 /*ccb*/NULL, CAM_REQUEUE_REQ, 1287 /*queued_only*/TRUE); 1288 /*FALLTHROUGH*/ 1289 case QHSTA_M_NO_AUTO_REQ_SENSE: 1290 case QHSTA_NO_ERROR: 1291 ccb->csio.scsi_status = scsi_status; 1292 switch (scsi_status) { 1293 case SCSI_STATUS_CHECK_COND: 1294 case SCSI_STATUS_CMD_TERMINATED: 1295 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1296 /* Structure copy */ 1297 ccb->csio.sense_data = 1298 adv->sense_buffers[q_no - 1]; 1299 /* FALLTHROUGH */ 1300 case SCSI_STATUS_BUSY: 1301 case SCSI_STATUS_RESERV_CONFLICT: 1302 case SCSI_STATUS_QUEUE_FULL: 1303 case SCSI_STATUS_COND_MET: 1304 case SCSI_STATUS_INTERMED: 1305 case SCSI_STATUS_INTERMED_COND_MET: 1306 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1307 break; 1308 case SCSI_STATUS_OK: 1309 ccb->ccb_h.status |= CAM_REQ_CMP; 1310 break; 1311 } 1312 break; 1313 case QHSTA_M_SEL_TIMEOUT: 1314 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1315 break; 1316 case QHSTA_M_DATA_OVER_RUN: 1317 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1318 break; 1319 case QHSTA_M_UNEXPECTED_BUS_FREE: 1320 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1321 break; 1322 case QHSTA_M_BAD_BUS_PHASE_SEQ: 1323 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1324 break; 1325 case QHSTA_M_BAD_CMPL_STATUS_IN: 1326 /* No command complete after a status message */ 1327 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1328 break; 1329 case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT: 1330 case QHSTA_M_WTM_TIMEOUT: 1331 case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET: 1332 /* The SCSI bus hung in a phase */ 1333 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1334 adv_reset_bus(adv, /*initiate_reset*/TRUE); 1335 break; 1336 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1337 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1338 break; 1339 case QHSTA_D_QDONE_SG_LIST_CORRUPTED: 1340 case QHSTA_D_ASC_DVC_ERROR_CODE_SET: 1341 case QHSTA_D_HOST_ABORT_FAILED: 1342 case QHSTA_D_EXE_SCSI_Q_FAILED: 1343 case QHSTA_D_ASPI_NO_BUF_POOL: 1344 case QHSTA_M_BAD_TAG_CODE: 1345 case QHSTA_D_LRAM_CMP_ERROR: 1346 case QHSTA_M_MICRO_CODE_ERROR_HALT: 1347 default: 1348 panic("%s: Unhandled Host status error %x", 1349 adv_name(adv), host_stat); 1350 /* NOTREACHED */ 1351 } 1352 break; 1353 1354 case QD_ABORTED_BY_HOST: 1355 /* Don't clobber any, more explicit, error codes we've set */ 1356 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) 1357 ccb->ccb_h.status = CAM_REQ_ABORTED; 1358 break; 1359 1360 default: 1361 xpt_print_path(ccb->ccb_h.path); 1362 kprintf("adv_done - queue done with unknown status %x:%x\n", 1363 done_stat, host_stat); 1364 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1365 break; 1366 } 1367 adv_clear_state(adv, ccb); 1368 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP 1369 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1370 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1371 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1372 } 1373 adv_free_ccb_info(adv, cinfo); 1374 /* 1375 * Null this out so that we catch driver bugs that cause a 1376 * ccb to be completed twice. 1377 */ 1378 ccb->ccb_h.ccb_cinfo_ptr = NULL; 1379 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1380 xpt_done(ccb); 1381 } 1382 1383 /* 1384 * Function to poll for command completion when 1385 * interrupts are disabled (crash dumps) 1386 */ 1387 static void 1388 adv_poll(struct cam_sim *sim) 1389 { 1390 adv_intr(cam_sim_softc(sim)); 1391 } 1392 1393 /* 1394 * Attach all the sub-devices we can find 1395 */ 1396 int 1397 adv_attach(adv) 1398 struct adv_softc *adv; 1399 { 1400 struct ccb_setasync csa; 1401 int max_sg; 1402 1403 /* 1404 * Allocate an array of ccb mapping structures. We put the 1405 * index of the ccb_info structure into the queue representing 1406 * a transaction and use it for mapping the queue to the 1407 * upper level SCSI transaction it represents. 1408 */ 1409 adv->ccb_infos = kmalloc(sizeof(*adv->ccb_infos) * adv->max_openings, 1410 M_DEVBUF, M_WAITOK); 1411 adv->init_level++; 1412 1413 /* 1414 * Create our DMA tags. These tags define the kinds of device 1415 * accessible memory allocations and memory mappings we will 1416 * need to perform during normal operation. 1417 * 1418 * Unless we need to further restrict the allocation, we rely 1419 * on the restrictions of the parent dmat, hence the common 1420 * use of MAXADDR and MAXSIZE. 1421 * 1422 * The ASC boards use chains of "queues" (the transactional 1423 * resources on the board) to represent long S/G lists. 1424 * The first queue represents the command and holds a 1425 * single address and data pair. The queues that follow 1426 * can each hold ADV_SG_LIST_PER_Q entries. Given the 1427 * total number of queues, we can express the largest 1428 * transaction we can map. We reserve a few queues for 1429 * error recovery. Take those into account as well. 1430 * 1431 * There is a way to take an interrupt to download the 1432 * next batch of S/G entries if there are more than 255 1433 * of them (the counter in the queue structure is a u_int8_t). 1434 * We don't use this feature, so limit the S/G list size 1435 * accordingly. 1436 */ 1437 max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q; 1438 if (max_sg > 255) 1439 max_sg = 255; 1440 1441 /* DMA tag for mapping buffers into device visible space. */ 1442 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0, 1443 /*lowaddr*/BUS_SPACE_MAXADDR, 1444 /*highaddr*/BUS_SPACE_MAXADDR, 1445 /*filter*/NULL, /*filterarg*/NULL, 1446 /*maxsize*/MAXPHYS, 1447 /*nsegments*/max_sg, 1448 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1449 /*flags*/BUS_DMA_ALLOCNOW, 1450 &adv->buffer_dmat) != 0) { 1451 return (ENXIO); 1452 } 1453 adv->init_level++; 1454 1455 /* DMA tag for our sense buffers */ 1456 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0, 1457 /*lowaddr*/BUS_SPACE_MAXADDR, 1458 /*highaddr*/BUS_SPACE_MAXADDR, 1459 /*filter*/NULL, /*filterarg*/NULL, 1460 sizeof(struct scsi_sense_data)*adv->max_openings, 1461 /*nsegments*/1, 1462 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1463 /*flags*/0, &adv->sense_dmat) != 0) { 1464 return (ENXIO); 1465 } 1466 1467 adv->init_level++; 1468 1469 /* Allocation for our sense buffers */ 1470 if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers, 1471 BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) { 1472 return (ENOMEM); 1473 } 1474 1475 adv->init_level++; 1476 1477 /* And permanently map them */ 1478 bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap, 1479 adv->sense_buffers, 1480 sizeof(struct scsi_sense_data)*adv->max_openings, 1481 adv_map, &adv->sense_physbase, /*flags*/0); 1482 1483 adv->init_level++; 1484 1485 /* 1486 * Fire up the chip 1487 */ 1488 if (adv_start_chip(adv) != 1) { 1489 kprintf("adv%d: Unable to start on board processor. Aborting.\n", 1490 adv->unit); 1491 return (ENXIO); 1492 } 1493 1494 /* 1495 * Construct our SIM entry. 1496 */ 1497 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit, 1498 1, adv->max_openings, NULL); 1499 if (adv->sim == NULL) 1500 return (ENOMEM); 1501 1502 /* 1503 * Register the bus. 1504 * 1505 * XXX Twin Channel EISA Cards??? 1506 */ 1507 if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) { 1508 cam_sim_free(adv->sim); 1509 return (ENXIO); 1510 } 1511 1512 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim), 1513 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1514 != CAM_REQ_CMP) { 1515 xpt_bus_deregister(cam_sim_path(adv->sim)); 1516 cam_sim_free(adv->sim); 1517 return (ENXIO); 1518 } 1519 1520 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5); 1521 csa.ccb_h.func_code = XPT_SASYNC_CB; 1522 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; 1523 csa.callback = advasync; 1524 csa.callback_arg = adv; 1525 xpt_action((union ccb *)&csa); 1526 return (0); 1527 } 1528