1 /* 2 * CAM SCSI interface for the the Advanced Systems Inc. 3 * Second Generation SCSI controllers. 4 * 5 * Product specific probe and attach routines can be found in: 6 * 7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W 8 * 9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/advansys/adwcam.c,v 1.7.2.2 2001/03/05 13:08:55 obrien Exp $ 34 */ 35 /* 36 * Ported from: 37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 38 * 39 * Copyright (c) 1995-1998 Advanced System Products, Inc. 40 * All Rights Reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that redistributions of source 44 * code retain the above copyright notice and this comment without 45 * modification. 46 */ 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/malloc.h> 52 #include <sys/bus.h> 53 #include <sys/thread2.h> 54 55 #include <machine/clock.h> 56 57 #include <sys/rman.h> 58 59 #include <bus/cam/cam.h> 60 #include <bus/cam/cam_ccb.h> 61 #include <bus/cam/cam_sim.h> 62 #include <bus/cam/cam_xpt_sim.h> 63 #include <bus/cam/cam_debug.h> 64 #include <bus/cam/cam_xpt_periph.h> 65 66 #include <bus/cam/scsi/scsi_message.h> 67 68 #include "adwvar.h" 69 70 /* Definitions for our use of the SIM private CCB area */ 71 #define ccb_acb_ptr spriv_ptr0 72 #define ccb_adw_ptr spriv_ptr1 73 74 u_long adw_unit; 75 76 static __inline cam_status adwccbstatus(union ccb*); 77 static __inline struct acb* adwgetacb(struct adw_softc *adw); 78 static __inline void adwfreeacb(struct adw_softc *adw, 79 struct acb *acb); 80 81 static void adwmapmem(void *arg, bus_dma_segment_t *segs, 82 int nseg, int error); 83 static struct sg_map_node* 84 adwallocsgmap(struct adw_softc *adw); 85 static int adwallocacbs(struct adw_softc *adw); 86 87 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 88 int nseg, int error); 89 static void adw_action(struct cam_sim *sim, union ccb *ccb); 90 static void adw_poll(struct cam_sim *sim); 91 static void adw_async(void *callback_arg, u_int32_t code, 92 struct cam_path *path, void *arg); 93 static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 94 static void adwtimeout(void *arg); 95 static void adw_handle_device_reset(struct adw_softc *adw, 96 u_int target); 97 static void adw_handle_bus_reset(struct adw_softc *adw, 98 int initiated); 99 100 static __inline cam_status 101 adwccbstatus(union ccb* ccb) 102 { 103 return (ccb->ccb_h.status & CAM_STATUS_MASK); 104 } 105 106 static __inline struct acb* 107 adwgetacb(struct adw_softc *adw) 108 { 109 struct acb* acb; 110 111 crit_enter(); 112 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 113 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 114 } else if (adw->num_acbs < adw->max_acbs) { 115 adwallocacbs(adw); 116 acb = SLIST_FIRST(&adw->free_acb_list); 117 if (acb == NULL) 118 kprintf("%s: Can't malloc ACB\n", adw_name(adw)); 119 else { 120 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 121 } 122 } 123 crit_exit(); 124 125 return (acb); 126 } 127 128 static __inline void 129 adwfreeacb(struct adw_softc *adw, struct acb *acb) 130 { 131 crit_enter(); 132 if ((acb->state & ACB_ACTIVE) != 0) 133 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 134 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 135 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 136 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 137 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 138 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 139 adw->state &= ~ADW_RESOURCE_SHORTAGE; 140 } 141 acb->state = ACB_FREE; 142 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 143 crit_exit(); 144 } 145 146 static void 147 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 148 { 149 bus_addr_t *busaddrp; 150 151 busaddrp = (bus_addr_t *)arg; 152 *busaddrp = segs->ds_addr; 153 } 154 155 static struct sg_map_node * 156 adwallocsgmap(struct adw_softc *adw) 157 { 158 struct sg_map_node *sg_map; 159 160 sg_map = kmalloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT); 161 162 /* Allocate S/G space for the next batch of ACBS */ 163 if (bus_dmamem_alloc(adw->sg_dmat, (void *)&sg_map->sg_vaddr, 164 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 165 kfree(sg_map, M_DEVBUF); 166 return (NULL); 167 } 168 169 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 170 171 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 172 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 173 174 bzero(sg_map->sg_vaddr, PAGE_SIZE); 175 return (sg_map); 176 } 177 178 /* 179 * Allocate another chunk of CCB's. Return count of entries added. 180 * Assumed to be called under crit_enter(). 181 */ 182 static int 183 adwallocacbs(struct adw_softc *adw) 184 { 185 struct acb *next_acb; 186 struct sg_map_node *sg_map; 187 bus_addr_t busaddr; 188 struct adw_sg_block *blocks; 189 int newcount; 190 int i; 191 192 next_acb = &adw->acbs[adw->num_acbs]; 193 sg_map = adwallocsgmap(adw); 194 195 if (sg_map == NULL) 196 return (0); 197 198 blocks = sg_map->sg_vaddr; 199 busaddr = sg_map->sg_physaddr; 200 201 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 202 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 203 int error; 204 205 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 206 &next_acb->dmamap); 207 if (error != 0) 208 break; 209 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 210 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 211 next_acb->queue.sense_baddr = 212 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 213 next_acb->sg_blocks = blocks; 214 next_acb->sg_busaddr = busaddr; 215 next_acb->state = ACB_FREE; 216 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 217 blocks += ADW_SG_BLOCKCNT; 218 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 219 next_acb++; 220 adw->num_acbs++; 221 } 222 return (i); 223 } 224 225 static void 226 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 227 { 228 struct acb *acb; 229 union ccb *ccb; 230 struct adw_softc *adw; 231 232 acb = (struct acb *)arg; 233 ccb = acb->ccb; 234 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 235 236 if (error != 0) { 237 if (error != EFBIG) 238 kprintf("%s: Unexpected error 0x%x returned from " 239 "bus_dmamap_load\n", adw_name(adw), error); 240 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 241 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 242 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 243 } 244 adwfreeacb(adw, acb); 245 xpt_done(ccb); 246 return; 247 } 248 249 if (nseg != 0) { 250 bus_dmasync_op_t op; 251 252 acb->queue.data_addr = dm_segs[0].ds_addr; 253 acb->queue.data_cnt = ccb->csio.dxfer_len; 254 if (nseg > 1) { 255 struct adw_sg_block *sg_block; 256 struct adw_sg_elm *sg; 257 bus_addr_t sg_busaddr; 258 u_int sg_index; 259 bus_dma_segment_t *end_seg; 260 261 end_seg = dm_segs + nseg; 262 263 sg_busaddr = acb->sg_busaddr; 264 sg_index = 0; 265 /* Copy the segments into our SG list */ 266 for (sg_block = acb->sg_blocks;; sg_block++) { 267 u_int i; 268 269 sg = sg_block->sg_list; 270 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 271 if (dm_segs >= end_seg) 272 break; 273 274 sg->sg_addr = dm_segs->ds_addr; 275 sg->sg_count = dm_segs->ds_len; 276 sg++; 277 dm_segs++; 278 } 279 sg_block->sg_cnt = i; 280 sg_index += i; 281 if (dm_segs == end_seg) { 282 sg_block->sg_busaddr_next = 0; 283 break; 284 } else { 285 sg_busaddr += 286 sizeof(struct adw_sg_block); 287 sg_block->sg_busaddr_next = sg_busaddr; 288 } 289 } 290 acb->queue.sg_real_addr = acb->sg_busaddr; 291 } else { 292 acb->queue.sg_real_addr = 0; 293 } 294 295 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 296 op = BUS_DMASYNC_PREREAD; 297 else 298 op = BUS_DMASYNC_PREWRITE; 299 300 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 301 302 } else { 303 acb->queue.data_addr = 0; 304 acb->queue.data_cnt = 0; 305 acb->queue.sg_real_addr = 0; 306 } 307 308 crit_enter(); 309 310 /* 311 * Last time we need to check if this CCB needs to 312 * be aborted. 313 */ 314 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 315 if (nseg != 0) 316 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 317 adwfreeacb(adw, acb); 318 xpt_done(ccb); 319 crit_exit(); 320 return; 321 } 322 323 acb->state |= ACB_ACTIVE; 324 ccb->ccb_h.status |= CAM_SIM_QUEUED; 325 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 326 callout_reset(ccb->ccb_h.timeout_ch, 327 (ccb->ccb_h.timeout * hz) / 1000, 328 adwtimeout, acb); 329 330 adw_send_acb(adw, acb, acbvtob(adw, acb)); 331 332 crit_exit(); 333 } 334 335 static void 336 adw_action(struct cam_sim *sim, union ccb *ccb) 337 { 338 struct adw_softc *adw; 339 340 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 341 342 adw = (struct adw_softc *)cam_sim_softc(sim); 343 344 switch (ccb->ccb_h.func_code) { 345 /* Common cases first */ 346 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 347 { 348 struct ccb_scsiio *csio; 349 struct ccb_hdr *ccbh; 350 struct acb *acb; 351 352 csio = &ccb->csio; 353 ccbh = &ccb->ccb_h; 354 355 /* Max supported CDB length is 12 bytes */ 356 if (csio->cdb_len > 12) { 357 ccb->ccb_h.status = CAM_REQ_INVALID; 358 xpt_done(ccb); 359 return; 360 } 361 362 if ((acb = adwgetacb(adw)) == NULL) { 363 crit_enter(); 364 adw->state |= ADW_RESOURCE_SHORTAGE; 365 crit_exit(); 366 xpt_freeze_simq(sim, /*count*/1); 367 ccb->ccb_h.status = CAM_REQUEUE_REQ; 368 xpt_done(ccb); 369 return; 370 } 371 372 /* Link acb and ccb so we can find one from the other */ 373 acb->ccb = ccb; 374 ccb->ccb_h.ccb_acb_ptr = acb; 375 ccb->ccb_h.ccb_adw_ptr = adw; 376 377 acb->queue.cntl = 0; 378 acb->queue.target_cmd = 0; 379 acb->queue.target_id = ccb->ccb_h.target_id; 380 acb->queue.target_lun = ccb->ccb_h.target_lun; 381 382 acb->queue.mflag = 0; 383 acb->queue.sense_len = 384 MIN(csio->sense_len, sizeof(acb->sense_data)); 385 acb->queue.cdb_len = csio->cdb_len; 386 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 387 switch (csio->tag_action) { 388 case MSG_SIMPLE_Q_TAG: 389 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 390 break; 391 case MSG_HEAD_OF_Q_TAG: 392 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 393 break; 394 case MSG_ORDERED_Q_TAG: 395 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 396 break; 397 default: 398 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 399 break; 400 } 401 } else 402 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 403 404 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 405 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 406 407 acb->queue.done_status = 0; 408 acb->queue.scsi_status = 0; 409 acb->queue.host_status = 0; 410 acb->queue.sg_wk_ix = 0; 411 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 412 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 413 bcopy(csio->cdb_io.cdb_ptr, 414 acb->queue.cdb, csio->cdb_len); 415 } else { 416 /* I guess I could map it in... */ 417 ccb->ccb_h.status = CAM_REQ_INVALID; 418 adwfreeacb(adw, acb); 419 xpt_done(ccb); 420 return; 421 } 422 } else { 423 bcopy(csio->cdb_io.cdb_bytes, 424 acb->queue.cdb, csio->cdb_len); 425 } 426 427 /* 428 * If we have any data to send with this command, 429 * map it into bus space. 430 */ 431 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 432 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 433 /* 434 * We've been given a pointer 435 * to a single buffer. 436 */ 437 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 438 int error; 439 440 crit_enter(); 441 error = 442 bus_dmamap_load(adw->buffer_dmat, 443 acb->dmamap, 444 csio->data_ptr, 445 csio->dxfer_len, 446 adwexecuteacb, 447 acb, /*flags*/0); 448 if (error == EINPROGRESS) { 449 /* 450 * So as to maintain ordering, 451 * freeze the controller queue 452 * until our mapping is 453 * returned. 454 */ 455 xpt_freeze_simq(sim, 1); 456 acb->state |= CAM_RELEASE_SIMQ; 457 } 458 crit_exit(); 459 } else { 460 struct bus_dma_segment seg; 461 462 /* Pointer to physical buffer */ 463 seg.ds_addr = 464 (bus_addr_t)csio->data_ptr; 465 seg.ds_len = csio->dxfer_len; 466 adwexecuteacb(acb, &seg, 1, 0); 467 } 468 } else { 469 struct bus_dma_segment *segs; 470 471 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 472 panic("adw_action - Physical " 473 "segment pointers " 474 "unsupported"); 475 476 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 477 panic("adw_action - Virtual " 478 "segment addresses " 479 "unsupported"); 480 481 /* Just use the segments provided */ 482 segs = (struct bus_dma_segment *)csio->data_ptr; 483 adwexecuteacb(acb, segs, csio->sglist_cnt, 484 (csio->sglist_cnt < ADW_SGSIZE) 485 ? 0 : EFBIG); 486 } 487 } else { 488 adwexecuteacb(acb, NULL, 0, 0); 489 } 490 break; 491 } 492 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 493 { 494 adw_idle_cmd_status_t status; 495 496 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 497 ccb->ccb_h.target_id); 498 if (status == ADW_IDLE_CMD_SUCCESS) { 499 ccb->ccb_h.status = CAM_REQ_CMP; 500 if (bootverbose) { 501 xpt_print_path(ccb->ccb_h.path); 502 kprintf("BDR Delivered\n"); 503 } 504 } else 505 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 506 xpt_done(ccb); 507 break; 508 } 509 case XPT_ABORT: /* Abort the specified CCB */ 510 /* XXX Implement */ 511 ccb->ccb_h.status = CAM_REQ_INVALID; 512 xpt_done(ccb); 513 break; 514 case XPT_SET_TRAN_SETTINGS: 515 { 516 struct ccb_trans_settings_scsi *scsi; 517 struct ccb_trans_settings_spi *spi; 518 struct ccb_trans_settings *cts; 519 u_int target_mask; 520 521 cts = &ccb->cts; 522 target_mask = 0x01 << ccb->ccb_h.target_id; 523 524 crit_enter(); 525 scsi = &cts->proto_specific.scsi; 526 spi = &cts->xport_specific.spi; 527 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 528 u_int sdtrdone; 529 530 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 531 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 532 u_int discenb; 533 534 discenb = 535 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 536 537 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 538 discenb |= target_mask; 539 else 540 discenb &= ~target_mask; 541 542 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 543 discenb); 544 } 545 546 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 547 548 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 549 adw->tagenb |= target_mask; 550 else 551 adw->tagenb &= ~target_mask; 552 } 553 554 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 555 u_int wdtrenb_orig; 556 u_int wdtrenb; 557 u_int wdtrdone; 558 559 wdtrenb_orig = 560 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 561 wdtrenb = wdtrenb_orig; 562 wdtrdone = adw_lram_read_16(adw, 563 ADW_MC_WDTR_DONE); 564 switch (spi->bus_width) { 565 case MSG_EXT_WDTR_BUS_32_BIT: 566 case MSG_EXT_WDTR_BUS_16_BIT: 567 wdtrenb |= target_mask; 568 break; 569 case MSG_EXT_WDTR_BUS_8_BIT: 570 default: 571 wdtrenb &= ~target_mask; 572 break; 573 } 574 if (wdtrenb != wdtrenb_orig) { 575 adw_lram_write_16(adw, 576 ADW_MC_WDTR_ABLE, 577 wdtrenb); 578 wdtrdone &= ~target_mask; 579 adw_lram_write_16(adw, 580 ADW_MC_WDTR_DONE, 581 wdtrdone); 582 /* Wide negotiation forces async */ 583 sdtrdone &= ~target_mask; 584 adw_lram_write_16(adw, 585 ADW_MC_SDTR_DONE, 586 sdtrdone); 587 } 588 } 589 590 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 591 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 592 u_int sdtr_orig; 593 u_int sdtr; 594 u_int sdtrable_orig; 595 u_int sdtrable; 596 597 sdtr = adw_get_chip_sdtr(adw, 598 ccb->ccb_h.target_id); 599 sdtr_orig = sdtr; 600 sdtrable = adw_lram_read_16(adw, 601 ADW_MC_SDTR_ABLE); 602 sdtrable_orig = sdtrable; 603 604 if ((spi->valid 605 & CTS_SPI_VALID_SYNC_RATE) != 0) { 606 607 sdtr = 608 adw_find_sdtr(adw, 609 spi->sync_period); 610 } 611 612 if ((spi->valid 613 & CTS_SPI_VALID_SYNC_OFFSET) != 0) { 614 if (spi->sync_offset == 0) 615 sdtr = ADW_MC_SDTR_ASYNC; 616 } 617 618 if (sdtr == ADW_MC_SDTR_ASYNC) 619 sdtrable &= ~target_mask; 620 else 621 sdtrable |= target_mask; 622 if (sdtr != sdtr_orig 623 || sdtrable != sdtrable_orig) { 624 adw_set_chip_sdtr(adw, 625 ccb->ccb_h.target_id, 626 sdtr); 627 sdtrdone &= ~target_mask; 628 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 629 sdtrable); 630 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 631 sdtrdone); 632 633 } 634 } 635 } 636 crit_exit(); 637 ccb->ccb_h.status = CAM_REQ_CMP; 638 xpt_done(ccb); 639 break; 640 } 641 case XPT_GET_TRAN_SETTINGS: 642 /* Get default/user set transfer settings for the target */ 643 { 644 struct ccb_trans_settings_scsi *scsi; 645 struct ccb_trans_settings_spi *spi; 646 struct ccb_trans_settings *cts; 647 u_int target_mask; 648 649 cts = &ccb->cts; 650 target_mask = 0x01 << ccb->ccb_h.target_id; 651 cts->protocol = PROTO_SCSI; 652 cts->protocol_version = SCSI_REV_2; 653 cts->transport = XPORT_SPI; 654 cts->transport_version = 2; 655 656 scsi = &cts->proto_specific.scsi; 657 spi = &cts->xport_specific.spi; 658 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 659 u_int mc_sdtr; 660 661 spi->flags = 0; 662 if ((adw->user_discenb & target_mask) != 0) 663 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 664 665 if ((adw->user_tagenb & target_mask) != 0) 666 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 667 668 if ((adw->user_wdtr & target_mask) != 0) 669 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 670 else 671 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 672 673 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 674 spi->sync_period = adw_find_period(adw, mc_sdtr); 675 if (spi->sync_period != 0) 676 spi->sync_offset = 15; /* XXX ??? */ 677 else 678 spi->sync_offset = 0; 679 680 681 } else { 682 u_int targ_tinfo; 683 684 spi->flags = 0; 685 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 686 & target_mask) != 0) 687 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 688 689 if ((adw->tagenb & target_mask) != 0) 690 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 691 692 targ_tinfo = 693 adw_lram_read_16(adw, 694 ADW_MC_DEVICE_HSHK_CFG_TABLE 695 + (2 * ccb->ccb_h.target_id)); 696 697 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 698 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 699 else 700 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 701 702 spi->sync_period = 703 adw_hshk_cfg_period_factor(targ_tinfo); 704 705 spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 706 if (spi->sync_period == 0) 707 spi->sync_offset = 0; 708 709 if (spi->sync_offset == 0) 710 spi->sync_period = 0; 711 } 712 713 spi->valid = CTS_SPI_VALID_SYNC_RATE 714 | CTS_SPI_VALID_SYNC_OFFSET 715 | CTS_SPI_VALID_BUS_WIDTH 716 | CTS_SPI_VALID_DISC; 717 scsi->valid = CTS_SCSI_VALID_TQ; 718 ccb->ccb_h.status = CAM_REQ_CMP; 719 xpt_done(ccb); 720 break; 721 } 722 case XPT_CALC_GEOMETRY: 723 { 724 struct ccb_calc_geometry *ccg; 725 u_int32_t size_mb; 726 u_int32_t secs_per_cylinder; 727 int extended; 728 729 /* 730 * XXX Use Adaptec translation until I find out how to 731 * get this information from the card. 732 */ 733 ccg = &ccb->ccg; 734 size_mb = ccg->volume_size 735 / ((1024L * 1024L) / ccg->block_size); 736 extended = 1; 737 738 if (size_mb > 1024 && extended) { 739 ccg->heads = 255; 740 ccg->secs_per_track = 63; 741 } else { 742 ccg->heads = 64; 743 ccg->secs_per_track = 32; 744 } 745 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 746 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 747 ccb->ccb_h.status = CAM_REQ_CMP; 748 xpt_done(ccb); 749 break; 750 } 751 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 752 { 753 int failure; 754 755 failure = adw_reset_bus(adw); 756 if (failure != 0) { 757 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 758 } else { 759 if (bootverbose) { 760 xpt_print_path(adw->path); 761 kprintf("Bus Reset Delivered\n"); 762 } 763 ccb->ccb_h.status = CAM_REQ_CMP; 764 } 765 xpt_done(ccb); 766 break; 767 } 768 case XPT_TERM_IO: /* Terminate the I/O process */ 769 /* XXX Implement */ 770 ccb->ccb_h.status = CAM_REQ_INVALID; 771 xpt_done(ccb); 772 break; 773 case XPT_PATH_INQ: /* Path routing inquiry */ 774 { 775 struct ccb_pathinq *cpi = &ccb->cpi; 776 777 cpi->version_num = 1; 778 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 779 cpi->target_sprt = 0; 780 cpi->hba_misc = 0; 781 cpi->hba_eng_cnt = 0; 782 cpi->max_target = ADW_MAX_TID; 783 cpi->max_lun = ADW_MAX_LUN; 784 cpi->initiator_id = adw->initiator_id; 785 cpi->bus_id = cam_sim_bus(sim); 786 cpi->base_transfer_speed = 3300; 787 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 788 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 789 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 790 cpi->unit_number = cam_sim_unit(sim); 791 cpi->transport = XPORT_SPI; 792 cpi->transport_version = 2; 793 cpi->protocol = PROTO_SCSI; 794 cpi->protocol_version = SCSI_REV_2; 795 cpi->ccb_h.status = CAM_REQ_CMP; 796 xpt_done(ccb); 797 break; 798 } 799 default: 800 ccb->ccb_h.status = CAM_REQ_INVALID; 801 xpt_done(ccb); 802 break; 803 } 804 } 805 806 static void 807 adw_poll(struct cam_sim *sim) 808 { 809 adw_intr(cam_sim_softc(sim)); 810 } 811 812 static void 813 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 814 { 815 } 816 817 struct adw_softc * 818 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 819 { 820 struct adw_softc *adw; 821 int i; 822 823 /* 824 * Allocate a storage area for us 825 */ 826 adw = kmalloc(sizeof(struct adw_softc), M_DEVBUF, M_INTWAIT | M_ZERO); 827 LIST_INIT(&adw->pending_ccbs); 828 SLIST_INIT(&adw->sg_maps); 829 adw->device = dev; 830 adw->unit = device_get_unit(dev); 831 adw->regs_res_type = regs_type; 832 adw->regs_res_id = regs_id; 833 adw->regs = regs; 834 adw->tag = rman_get_bustag(regs); 835 adw->bsh = rman_get_bushandle(regs); 836 KKASSERT(adw->unit >= 0 && adw->unit < 100); 837 i = adw->unit / 10; 838 adw->name = kmalloc(sizeof("adw") + i + 1, M_DEVBUF, M_INTWAIT); 839 ksprintf(adw->name, "adw%d", adw->unit); 840 return(adw); 841 } 842 843 void 844 adw_free(struct adw_softc *adw) 845 { 846 switch (adw->init_level) { 847 case 9: 848 { 849 struct sg_map_node *sg_map; 850 851 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 852 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 853 bus_dmamap_unload(adw->sg_dmat, 854 sg_map->sg_dmamap); 855 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 856 sg_map->sg_dmamap); 857 kfree(sg_map, M_DEVBUF); 858 } 859 bus_dma_tag_destroy(adw->sg_dmat); 860 } 861 case 8: 862 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 863 case 7: 864 bus_dmamem_free(adw->acb_dmat, adw->acbs, 865 adw->acb_dmamap); 866 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 867 case 6: 868 bus_dma_tag_destroy(adw->acb_dmat); 869 case 5: 870 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 871 case 4: 872 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 873 adw->carrier_dmamap); 874 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 875 case 3: 876 bus_dma_tag_destroy(adw->carrier_dmat); 877 case 2: 878 bus_dma_tag_destroy(adw->buffer_dmat); 879 case 1: 880 bus_dma_tag_destroy(adw->parent_dmat); 881 case 0: 882 break; 883 } 884 kfree(adw->name, M_DEVBUF); 885 kfree(adw, M_DEVBUF); 886 } 887 888 int 889 adw_init(struct adw_softc *adw) 890 { 891 struct adw_eeprom eep_config; 892 u_int tid; 893 u_int i; 894 u_int16_t checksum; 895 u_int16_t scsicfg1; 896 897 checksum = adw_eeprom_read(adw, &eep_config); 898 bcopy(eep_config.serial_number, adw->serial_number, 899 sizeof(adw->serial_number)); 900 if (checksum != eep_config.checksum) { 901 u_int16_t serial_number[3]; 902 903 adw->flags |= ADW_EEPROM_FAILED; 904 kprintf("%s: EEPROM checksum failed. Restoring Defaults\n", 905 adw_name(adw)); 906 907 /* 908 * Restore the default EEPROM settings. 909 * Assume the 6 byte board serial number that was read 910 * from EEPROM is correct even if the EEPROM checksum 911 * failed. 912 */ 913 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 914 bcopy(adw->serial_number, eep_config.serial_number, 915 sizeof(serial_number)); 916 adw_eeprom_write(adw, &eep_config); 917 } 918 919 /* Pull eeprom information into our softc. */ 920 adw->bios_ctrl = eep_config.bios_ctrl; 921 adw->user_wdtr = eep_config.wdtr_able; 922 for (tid = 0; tid < ADW_MAX_TID; tid++) { 923 u_int mc_sdtr; 924 u_int16_t tid_mask; 925 926 tid_mask = 0x1 << tid; 927 if ((adw->features & ADW_ULTRA) != 0) { 928 /* 929 * Ultra chips store sdtr and ultraenb 930 * bits in their seeprom, so we must 931 * construct valid mc_sdtr entries for 932 * indirectly. 933 */ 934 if (eep_config.sync1.sync_enable & tid_mask) { 935 if (eep_config.sync2.ultra_enable & tid_mask) 936 mc_sdtr = ADW_MC_SDTR_20; 937 else 938 mc_sdtr = ADW_MC_SDTR_10; 939 } else 940 mc_sdtr = ADW_MC_SDTR_ASYNC; 941 } else { 942 switch (ADW_TARGET_GROUP(tid)) { 943 case 3: 944 mc_sdtr = eep_config.sync4.sdtr4; 945 break; 946 case 2: 947 mc_sdtr = eep_config.sync3.sdtr3; 948 break; 949 case 1: 950 mc_sdtr = eep_config.sync2.sdtr2; 951 break; 952 default: /* Shut up compiler */ 953 case 0: 954 mc_sdtr = eep_config.sync1.sdtr1; 955 break; 956 } 957 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 958 mc_sdtr &= 0xFF; 959 } 960 adw_set_user_sdtr(adw, tid, mc_sdtr); 961 } 962 adw->user_tagenb = eep_config.tagqng_able; 963 adw->user_discenb = eep_config.disc_enable; 964 adw->max_acbs = eep_config.max_host_qng; 965 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 966 967 /* 968 * Sanity check the number of host openings. 969 */ 970 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 971 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 972 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 973 /* If the value is zero, assume it is uninitialized. */ 974 if (adw->max_acbs == 0) 975 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 976 else 977 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 978 } 979 980 scsicfg1 = 0; 981 if ((adw->features & ADW_ULTRA2) != 0) { 982 switch (eep_config.termination_lvd) { 983 default: 984 kprintf("%s: Invalid EEPROM LVD Termination Settings.\n", 985 adw_name(adw)); 986 kprintf("%s: Reverting to Automatic LVD Termination\n", 987 adw_name(adw)); 988 /* FALLTHROUGH */ 989 case ADW_EEPROM_TERM_AUTO: 990 break; 991 case ADW_EEPROM_TERM_BOTH_ON: 992 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 993 /* FALLTHROUGH */ 994 case ADW_EEPROM_TERM_HIGH_ON: 995 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 996 /* FALLTHROUGH */ 997 case ADW_EEPROM_TERM_OFF: 998 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 999 break; 1000 } 1001 } 1002 1003 switch (eep_config.termination_se) { 1004 default: 1005 kprintf("%s: Invalid SE EEPROM Termination Settings.\n", 1006 adw_name(adw)); 1007 kprintf("%s: Reverting to Automatic SE Termination\n", 1008 adw_name(adw)); 1009 /* FALLTHROUGH */ 1010 case ADW_EEPROM_TERM_AUTO: 1011 break; 1012 case ADW_EEPROM_TERM_BOTH_ON: 1013 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1014 /* FALLTHROUGH */ 1015 case ADW_EEPROM_TERM_HIGH_ON: 1016 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1017 /* FALLTHROUGH */ 1018 case ADW_EEPROM_TERM_OFF: 1019 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1020 break; 1021 } 1022 kprintf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1023 1024 /* DMA tag for mapping buffers into device visible space. */ 1025 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1026 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1027 /*highaddr*/BUS_SPACE_MAXADDR, 1028 /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE, 1029 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1030 /*flags*/BUS_DMA_ALLOCNOW, 1031 &adw->buffer_dmat) != 0) { 1032 return (ENOMEM); 1033 } 1034 1035 adw->init_level++; 1036 1037 /* DMA tag for our ccb carrier structures */ 1038 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10, 1039 /*boundary*/0, 1040 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1041 /*highaddr*/BUS_SPACE_MAXADDR, 1042 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1043 * sizeof(struct adw_carrier), 1044 /*nsegments*/1, 1045 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1046 /*flags*/0, &adw->carrier_dmat) != 0) { 1047 return (ENOMEM); 1048 } 1049 1050 adw->init_level++; 1051 1052 /* Allocation for our ccb carrier structures */ 1053 if (bus_dmamem_alloc(adw->carrier_dmat, (void *)&adw->carriers, 1054 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1055 return (ENOMEM); 1056 } 1057 1058 adw->init_level++; 1059 1060 /* And permanently map them */ 1061 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1062 adw->carriers, 1063 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1064 * sizeof(struct adw_carrier), 1065 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1066 1067 /* Clear them out. */ 1068 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1069 * sizeof(struct adw_carrier)); 1070 1071 /* Setup our free carrier list */ 1072 adw->free_carriers = adw->carriers; 1073 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1074 adw->carriers[i].carr_offset = 1075 carriervtobo(adw, &adw->carriers[i]); 1076 adw->carriers[i].carr_ba = 1077 carriervtob(adw, &adw->carriers[i]); 1078 adw->carriers[i].areq_ba = 0; 1079 adw->carriers[i].next_ba = 1080 carriervtobo(adw, &adw->carriers[i+1]); 1081 } 1082 /* Terminal carrier. Never leaves the freelist */ 1083 adw->carriers[i].carr_offset = 1084 carriervtobo(adw, &adw->carriers[i]); 1085 adw->carriers[i].carr_ba = 1086 carriervtob(adw, &adw->carriers[i]); 1087 adw->carriers[i].areq_ba = 0; 1088 adw->carriers[i].next_ba = ~0; 1089 1090 adw->init_level++; 1091 1092 /* DMA tag for our acb structures */ 1093 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1094 /*lowaddr*/BUS_SPACE_MAXADDR, 1095 /*highaddr*/BUS_SPACE_MAXADDR, 1096 adw->max_acbs * sizeof(struct acb), 1097 /*nsegments*/1, 1098 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1099 /*flags*/0, &adw->acb_dmat) != 0) { 1100 return (ENOMEM); 1101 } 1102 1103 adw->init_level++; 1104 1105 /* Allocation for our ccbs */ 1106 if (bus_dmamem_alloc(adw->acb_dmat, (void *)&adw->acbs, 1107 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1108 return (ENOMEM); 1109 1110 adw->init_level++; 1111 1112 /* And permanently map them */ 1113 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1114 adw->acbs, 1115 adw->max_acbs * sizeof(struct acb), 1116 adwmapmem, &adw->acb_busbase, /*flags*/0); 1117 1118 /* Clear them out. */ 1119 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1120 1121 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1122 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1123 /*lowaddr*/BUS_SPACE_MAXADDR, 1124 /*highaddr*/BUS_SPACE_MAXADDR, 1125 PAGE_SIZE, /*nsegments*/1, 1126 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1127 /*flags*/0, &adw->sg_dmat) != 0) { 1128 return (ENOMEM); 1129 } 1130 1131 adw->init_level++; 1132 1133 /* Allocate our first batch of ccbs */ 1134 if (adwallocacbs(adw) == 0) 1135 return (ENOMEM); 1136 1137 if (adw_init_chip(adw, scsicfg1) != 0) 1138 return (ENXIO); 1139 1140 kprintf("Queue Depth %d\n", adw->max_acbs); 1141 1142 return (0); 1143 } 1144 1145 /* 1146 * Attach all the sub-devices we can find 1147 */ 1148 int 1149 adw_attach(struct adw_softc *adw) 1150 { 1151 struct ccb_setasync *csa; 1152 int error; 1153 1154 error = 0; 1155 crit_enter(); 1156 /* Hook up our interrupt handler */ 1157 if ((error = bus_setup_intr(adw->device, adw->irq, 0, 1158 adw_intr, adw, &adw->ih, NULL)) != 0) { 1159 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1160 error); 1161 goto fail; 1162 } 1163 1164 /* Start the Risc processor now that we are fully configured. */ 1165 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1166 1167 /* 1168 * Construct our SIM entry. 1169 */ 1170 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1171 &sim_mplock, 1, adw->max_acbs, NULL); 1172 if (adw->sim == NULL) { 1173 error = ENOMEM; 1174 goto fail; 1175 } 1176 1177 /* 1178 * Register the bus. 1179 */ 1180 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1181 cam_sim_free(adw->sim); 1182 error = ENOMEM; 1183 goto fail; 1184 } 1185 1186 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1187 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1188 == CAM_REQ_CMP) { 1189 csa = &xpt_alloc_ccb()->csa; 1190 xpt_setup_ccb(&csa->ccb_h, adw->path, /*priority*/5); 1191 csa->ccb_h.func_code = XPT_SASYNC_CB; 1192 csa->event_enable = AC_LOST_DEVICE; 1193 csa->callback = adw_async; 1194 csa->callback_arg = adw; 1195 xpt_action((union ccb *)csa); 1196 xpt_free_ccb(&csa->ccb_h); 1197 } 1198 1199 fail: 1200 crit_exit(); 1201 return (error); 1202 } 1203 1204 void 1205 adw_intr(void *arg) 1206 { 1207 struct adw_softc *adw; 1208 u_int int_stat; 1209 1210 adw = (struct adw_softc *)arg; 1211 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1212 return; 1213 1214 /* Reading the register clears the interrupt. */ 1215 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1216 1217 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1218 u_int intrb_code; 1219 1220 /* Async Microcode Event */ 1221 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1222 switch (intrb_code) { 1223 case ADW_ASYNC_CARRIER_READY_FAILURE: 1224 /* 1225 * The RISC missed our update of 1226 * the commandq. 1227 */ 1228 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1229 adw_tickle_risc(adw, ADW_TICKLE_A); 1230 break; 1231 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1232 /* 1233 * The firmware detected a SCSI Bus reset. 1234 */ 1235 kprintf("Someone Reset the Bus\n"); 1236 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1237 break; 1238 case ADW_ASYNC_RDMA_FAILURE: 1239 /* 1240 * Handle RDMA failure by resetting the 1241 * SCSI Bus and chip. 1242 */ 1243 #if 0 /* XXX */ 1244 AdvResetChipAndSB(adv_dvc_varp); 1245 #endif 1246 break; 1247 1248 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1249 /* 1250 * Host generated SCSI bus reset occurred. 1251 */ 1252 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1253 break; 1254 default: 1255 kprintf("adw_intr: unknown async code 0x%x\n", 1256 intrb_code); 1257 break; 1258 } 1259 } 1260 1261 /* 1262 * Run down the RequestQ. 1263 */ 1264 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1265 struct adw_carrier *free_carrier; 1266 struct acb *acb; 1267 union ccb *ccb; 1268 1269 #if 0 1270 kprintf("0x%x, 0x%x, 0x%x, 0x%x\n", 1271 adw->responseq->carr_offset, 1272 adw->responseq->carr_ba, 1273 adw->responseq->areq_ba, 1274 adw->responseq->next_ba); 1275 #endif 1276 /* 1277 * The firmware copies the adw_scsi_req_q.acb_baddr 1278 * field into the areq_ba field of the carrier. 1279 */ 1280 acb = acbbotov(adw, adw->responseq->areq_ba); 1281 1282 /* 1283 * The least significant four bits of the next_ba 1284 * field are used as flags. Mask them out and then 1285 * advance through the list. 1286 */ 1287 free_carrier = adw->responseq; 1288 adw->responseq = 1289 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1290 free_carrier->next_ba = adw->free_carriers->carr_offset; 1291 adw->free_carriers = free_carrier; 1292 1293 /* Process CCB */ 1294 ccb = acb->ccb; 1295 callout_stop(ccb->ccb_h.timeout_ch); 1296 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1297 bus_dmasync_op_t op; 1298 1299 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1300 op = BUS_DMASYNC_POSTREAD; 1301 else 1302 op = BUS_DMASYNC_POSTWRITE; 1303 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1304 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1305 ccb->csio.resid = acb->queue.data_cnt; 1306 } else 1307 ccb->csio.resid = 0; 1308 1309 /* Common Cases inline... */ 1310 if (acb->queue.host_status == QHSTA_NO_ERROR 1311 && (acb->queue.done_status == QD_NO_ERROR 1312 || acb->queue.done_status == QD_WITH_ERROR)) { 1313 ccb->csio.scsi_status = acb->queue.scsi_status; 1314 ccb->ccb_h.status = 0; 1315 switch (ccb->csio.scsi_status) { 1316 case SCSI_STATUS_OK: 1317 ccb->ccb_h.status |= CAM_REQ_CMP; 1318 break; 1319 case SCSI_STATUS_CHECK_COND: 1320 case SCSI_STATUS_CMD_TERMINATED: 1321 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1322 ccb->csio.sense_len); 1323 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1324 ccb->csio.sense_resid = acb->queue.sense_len; 1325 /* FALLTHROUGH */ 1326 default: 1327 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1328 | CAM_DEV_QFRZN; 1329 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1330 break; 1331 } 1332 adwfreeacb(adw, acb); 1333 xpt_done(ccb); 1334 } else { 1335 adwprocesserror(adw, acb); 1336 } 1337 } 1338 } 1339 1340 static void 1341 adwprocesserror(struct adw_softc *adw, struct acb *acb) 1342 { 1343 union ccb *ccb; 1344 1345 ccb = acb->ccb; 1346 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1347 ccb->ccb_h.status = CAM_REQ_ABORTED; 1348 } else { 1349 1350 switch (acb->queue.host_status) { 1351 case QHSTA_M_SEL_TIMEOUT: 1352 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1353 break; 1354 case QHSTA_M_SXFR_OFF_UFLW: 1355 case QHSTA_M_SXFR_OFF_OFLW: 1356 case QHSTA_M_DATA_OVER_RUN: 1357 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1358 break; 1359 case QHSTA_M_SXFR_DESELECTED: 1360 case QHSTA_M_UNEXPECTED_BUS_FREE: 1361 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1362 break; 1363 case QHSTA_M_SCSI_BUS_RESET: 1364 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1365 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1366 break; 1367 case QHSTA_M_BUS_DEVICE_RESET: 1368 ccb->ccb_h.status = CAM_BDR_SENT; 1369 break; 1370 case QHSTA_M_QUEUE_ABORTED: 1371 /* BDR or Bus Reset */ 1372 kprintf("Saw Queue Aborted\n"); 1373 ccb->ccb_h.status = adw->last_reset; 1374 break; 1375 case QHSTA_M_SXFR_SDMA_ERR: 1376 case QHSTA_M_SXFR_SXFR_PERR: 1377 case QHSTA_M_RDMA_PERR: 1378 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1379 break; 1380 case QHSTA_M_WTM_TIMEOUT: 1381 case QHSTA_M_SXFR_WD_TMO: 1382 { 1383 /* The SCSI bus hung in a phase */ 1384 xpt_print_path(adw->path); 1385 kprintf("Watch Dog timer expired. Reseting bus\n"); 1386 adw_reset_bus(adw); 1387 break; 1388 } 1389 case QHSTA_M_SXFR_XFR_PH_ERR: 1390 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1391 break; 1392 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1393 break; 1394 case QHSTA_M_BAD_CMPL_STATUS_IN: 1395 /* No command complete after a status message */ 1396 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1397 break; 1398 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1399 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1400 break; 1401 case QHSTA_M_INVALID_DEVICE: 1402 ccb->ccb_h.status = CAM_PATH_INVALID; 1403 break; 1404 case QHSTA_M_NO_AUTO_REQ_SENSE: 1405 /* 1406 * User didn't request sense, but we got a 1407 * check condition. 1408 */ 1409 ccb->csio.scsi_status = acb->queue.scsi_status; 1410 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1411 break; 1412 default: 1413 panic("%s: Unhandled Host status error %x", 1414 adw_name(adw), acb->queue.host_status); 1415 /* NOTREACHED */ 1416 } 1417 } 1418 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1419 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1420 || ccb->ccb_h.status == CAM_BDR_SENT) 1421 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1422 } 1423 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1424 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1425 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1426 } 1427 adwfreeacb(adw, acb); 1428 xpt_done(ccb); 1429 } 1430 1431 static void 1432 adwtimeout(void *arg) 1433 { 1434 struct acb *acb; 1435 union ccb *ccb; 1436 struct adw_softc *adw; 1437 adw_idle_cmd_status_t status; 1438 int target_id; 1439 1440 acb = (struct acb *)arg; 1441 ccb = acb->ccb; 1442 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1443 xpt_print_path(ccb->ccb_h.path); 1444 kprintf("ACB %p - timed out\n", (void *)acb); 1445 1446 crit_enter(); 1447 1448 if ((acb->state & ACB_ACTIVE) == 0) { 1449 xpt_print_path(ccb->ccb_h.path); 1450 kprintf("ACB %p - timed out CCB already completed\n", 1451 (void *)acb); 1452 crit_exit(); 1453 return; 1454 } 1455 1456 acb->state |= ACB_RECOVERY_ACB; 1457 target_id = ccb->ccb_h.target_id; 1458 1459 /* Attempt a BDR first */ 1460 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1461 ccb->ccb_h.target_id); 1462 crit_exit(); 1463 if (status == ADW_IDLE_CMD_SUCCESS) { 1464 kprintf("%s: BDR Delivered. No longer in timeout\n", 1465 adw_name(adw)); 1466 adw_handle_device_reset(adw, target_id); 1467 } else { 1468 adw_reset_bus(adw); 1469 xpt_print_path(adw->path); 1470 kprintf("Bus Reset Delivered. No longer in timeout\n"); 1471 } 1472 } 1473 1474 static void 1475 adw_handle_device_reset(struct adw_softc *adw, u_int target) 1476 { 1477 struct cam_path *path; 1478 cam_status error; 1479 1480 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1481 target, CAM_LUN_WILDCARD); 1482 1483 if (error == CAM_REQ_CMP) { 1484 xpt_async(AC_SENT_BDR, path, NULL); 1485 xpt_free_path(path); 1486 } 1487 adw->last_reset = CAM_BDR_SENT; 1488 } 1489 1490 static void 1491 adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1492 { 1493 if (initiated) { 1494 /* 1495 * The microcode currently sets the SCSI Bus Reset signal 1496 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1497 * command above. But the SCSI Bus Reset Hold Time in the 1498 * microcode is not deterministic (it may in fact be for less 1499 * than the SCSI Spec. minimum of 25 us). Therefore on return 1500 * the Adv Library sets the SCSI Bus Reset signal for 1501 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1502 * than 25 us. 1503 */ 1504 u_int scsi_ctrl; 1505 1506 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1507 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1508 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1509 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1510 1511 /* 1512 * We will perform the async notification when the 1513 * SCSI Reset interrupt occurs. 1514 */ 1515 } else 1516 xpt_async(AC_BUS_RESET, adw->path, NULL); 1517 adw->last_reset = CAM_SCSI_BUS_RESET; 1518 } 1519