1 /* 2 * CAM SCSI interface for the the Advanced Systems Inc. 3 * Second Generation SCSI controllers. 4 * 5 * Product specific probe and attach routines can be found in: 6 * 7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W 8 * 9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/advansys/adwcam.c,v 1.7.2.2 2001/03/05 13:08:55 obrien Exp $ 34 */ 35 /* 36 * Ported from: 37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 38 * 39 * Copyright (c) 1995-1998 Advanced System Products, Inc. 40 * All Rights Reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that redistributions of source 44 * code retain the above copyright notice and this comment without 45 * modification. 46 */ 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/malloc.h> 52 #include <sys/bus.h> 53 #include <sys/thread2.h> 54 55 #include <machine/clock.h> 56 57 #include <sys/rman.h> 58 59 #include <bus/cam/cam.h> 60 #include <bus/cam/cam_ccb.h> 61 #include <bus/cam/cam_sim.h> 62 #include <bus/cam/cam_xpt_sim.h> 63 #include <bus/cam/cam_debug.h> 64 65 #include <bus/cam/scsi/scsi_message.h> 66 67 #include "adwvar.h" 68 69 /* Definitions for our use of the SIM private CCB area */ 70 #define ccb_acb_ptr spriv_ptr0 71 #define ccb_adw_ptr spriv_ptr1 72 73 u_long adw_unit; 74 75 static __inline cam_status adwccbstatus(union ccb*); 76 static __inline struct acb* adwgetacb(struct adw_softc *adw); 77 static __inline void adwfreeacb(struct adw_softc *adw, 78 struct acb *acb); 79 80 static void adwmapmem(void *arg, bus_dma_segment_t *segs, 81 int nseg, int error); 82 static struct sg_map_node* 83 adwallocsgmap(struct adw_softc *adw); 84 static int adwallocacbs(struct adw_softc *adw); 85 86 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 87 int nseg, int error); 88 static void adw_action(struct cam_sim *sim, union ccb *ccb); 89 static void adw_poll(struct cam_sim *sim); 90 static void adw_async(void *callback_arg, u_int32_t code, 91 struct cam_path *path, void *arg); 92 static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 93 static void adwtimeout(void *arg); 94 static void adw_handle_device_reset(struct adw_softc *adw, 95 u_int target); 96 static void adw_handle_bus_reset(struct adw_softc *adw, 97 int initiated); 98 99 static __inline cam_status 100 adwccbstatus(union ccb* ccb) 101 { 102 return (ccb->ccb_h.status & CAM_STATUS_MASK); 103 } 104 105 static __inline struct acb* 106 adwgetacb(struct adw_softc *adw) 107 { 108 struct acb* acb; 109 110 crit_enter(); 111 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 112 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 113 } else if (adw->num_acbs < adw->max_acbs) { 114 adwallocacbs(adw); 115 acb = SLIST_FIRST(&adw->free_acb_list); 116 if (acb == NULL) 117 kprintf("%s: Can't malloc ACB\n", adw_name(adw)); 118 else { 119 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 120 } 121 } 122 crit_exit(); 123 124 return (acb); 125 } 126 127 static __inline void 128 adwfreeacb(struct adw_softc *adw, struct acb *acb) 129 { 130 crit_enter(); 131 if ((acb->state & ACB_ACTIVE) != 0) 132 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 133 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 134 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 135 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 136 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 137 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 138 adw->state &= ~ADW_RESOURCE_SHORTAGE; 139 } 140 acb->state = ACB_FREE; 141 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 142 crit_exit(); 143 } 144 145 static void 146 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 147 { 148 bus_addr_t *busaddrp; 149 150 busaddrp = (bus_addr_t *)arg; 151 *busaddrp = segs->ds_addr; 152 } 153 154 static struct sg_map_node * 155 adwallocsgmap(struct adw_softc *adw) 156 { 157 struct sg_map_node *sg_map; 158 159 sg_map = kmalloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT); 160 161 /* Allocate S/G space for the next batch of ACBS */ 162 if (bus_dmamem_alloc(adw->sg_dmat, (void *)&sg_map->sg_vaddr, 163 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 164 kfree(sg_map, M_DEVBUF); 165 return (NULL); 166 } 167 168 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 169 170 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 171 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 172 173 bzero(sg_map->sg_vaddr, PAGE_SIZE); 174 return (sg_map); 175 } 176 177 /* 178 * Allocate another chunk of CCB's. Return count of entries added. 179 * Assumed to be called under crit_enter(). 180 */ 181 static int 182 adwallocacbs(struct adw_softc *adw) 183 { 184 struct acb *next_acb; 185 struct sg_map_node *sg_map; 186 bus_addr_t busaddr; 187 struct adw_sg_block *blocks; 188 int newcount; 189 int i; 190 191 next_acb = &adw->acbs[adw->num_acbs]; 192 sg_map = adwallocsgmap(adw); 193 194 if (sg_map == NULL) 195 return (0); 196 197 blocks = sg_map->sg_vaddr; 198 busaddr = sg_map->sg_physaddr; 199 200 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 201 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 202 int error; 203 204 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 205 &next_acb->dmamap); 206 if (error != 0) 207 break; 208 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 209 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 210 next_acb->queue.sense_baddr = 211 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 212 next_acb->sg_blocks = blocks; 213 next_acb->sg_busaddr = busaddr; 214 next_acb->state = ACB_FREE; 215 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 216 blocks += ADW_SG_BLOCKCNT; 217 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 218 next_acb++; 219 adw->num_acbs++; 220 } 221 return (i); 222 } 223 224 static void 225 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 226 { 227 struct acb *acb; 228 union ccb *ccb; 229 struct adw_softc *adw; 230 231 acb = (struct acb *)arg; 232 ccb = acb->ccb; 233 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 234 235 if (error != 0) { 236 if (error != EFBIG) 237 kprintf("%s: Unexpected error 0x%x returned from " 238 "bus_dmamap_load\n", adw_name(adw), error); 239 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 240 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 241 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 242 } 243 adwfreeacb(adw, acb); 244 xpt_done(ccb); 245 return; 246 } 247 248 if (nseg != 0) { 249 bus_dmasync_op_t op; 250 251 acb->queue.data_addr = dm_segs[0].ds_addr; 252 acb->queue.data_cnt = ccb->csio.dxfer_len; 253 if (nseg > 1) { 254 struct adw_sg_block *sg_block; 255 struct adw_sg_elm *sg; 256 bus_addr_t sg_busaddr; 257 u_int sg_index; 258 bus_dma_segment_t *end_seg; 259 260 end_seg = dm_segs + nseg; 261 262 sg_busaddr = acb->sg_busaddr; 263 sg_index = 0; 264 /* Copy the segments into our SG list */ 265 for (sg_block = acb->sg_blocks;; sg_block++) { 266 u_int i; 267 268 sg = sg_block->sg_list; 269 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 270 if (dm_segs >= end_seg) 271 break; 272 273 sg->sg_addr = dm_segs->ds_addr; 274 sg->sg_count = dm_segs->ds_len; 275 sg++; 276 dm_segs++; 277 } 278 sg_block->sg_cnt = i; 279 sg_index += i; 280 if (dm_segs == end_seg) { 281 sg_block->sg_busaddr_next = 0; 282 break; 283 } else { 284 sg_busaddr += 285 sizeof(struct adw_sg_block); 286 sg_block->sg_busaddr_next = sg_busaddr; 287 } 288 } 289 acb->queue.sg_real_addr = acb->sg_busaddr; 290 } else { 291 acb->queue.sg_real_addr = 0; 292 } 293 294 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 295 op = BUS_DMASYNC_PREREAD; 296 else 297 op = BUS_DMASYNC_PREWRITE; 298 299 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 300 301 } else { 302 acb->queue.data_addr = 0; 303 acb->queue.data_cnt = 0; 304 acb->queue.sg_real_addr = 0; 305 } 306 307 crit_enter(); 308 309 /* 310 * Last time we need to check if this CCB needs to 311 * be aborted. 312 */ 313 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 314 if (nseg != 0) 315 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 316 adwfreeacb(adw, acb); 317 xpt_done(ccb); 318 crit_exit(); 319 return; 320 } 321 322 acb->state |= ACB_ACTIVE; 323 ccb->ccb_h.status |= CAM_SIM_QUEUED; 324 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 325 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 326 adwtimeout, acb); 327 328 adw_send_acb(adw, acb, acbvtob(adw, acb)); 329 330 crit_exit(); 331 } 332 333 static void 334 adw_action(struct cam_sim *sim, union ccb *ccb) 335 { 336 struct adw_softc *adw; 337 338 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 339 340 adw = (struct adw_softc *)cam_sim_softc(sim); 341 342 switch (ccb->ccb_h.func_code) { 343 /* Common cases first */ 344 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 345 { 346 struct ccb_scsiio *csio; 347 struct ccb_hdr *ccbh; 348 struct acb *acb; 349 350 csio = &ccb->csio; 351 ccbh = &ccb->ccb_h; 352 353 /* Max supported CDB length is 12 bytes */ 354 if (csio->cdb_len > 12) { 355 ccb->ccb_h.status = CAM_REQ_INVALID; 356 xpt_done(ccb); 357 return; 358 } 359 360 if ((acb = adwgetacb(adw)) == NULL) { 361 crit_enter(); 362 adw->state |= ADW_RESOURCE_SHORTAGE; 363 crit_exit(); 364 xpt_freeze_simq(sim, /*count*/1); 365 ccb->ccb_h.status = CAM_REQUEUE_REQ; 366 xpt_done(ccb); 367 return; 368 } 369 370 /* Link acb and ccb so we can find one from the other */ 371 acb->ccb = ccb; 372 ccb->ccb_h.ccb_acb_ptr = acb; 373 ccb->ccb_h.ccb_adw_ptr = adw; 374 375 acb->queue.cntl = 0; 376 acb->queue.target_cmd = 0; 377 acb->queue.target_id = ccb->ccb_h.target_id; 378 acb->queue.target_lun = ccb->ccb_h.target_lun; 379 380 acb->queue.mflag = 0; 381 acb->queue.sense_len = 382 MIN(csio->sense_len, sizeof(acb->sense_data)); 383 acb->queue.cdb_len = csio->cdb_len; 384 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 385 switch (csio->tag_action) { 386 case MSG_SIMPLE_Q_TAG: 387 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 388 break; 389 case MSG_HEAD_OF_Q_TAG: 390 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 391 break; 392 case MSG_ORDERED_Q_TAG: 393 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 394 break; 395 default: 396 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 397 break; 398 } 399 } else 400 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 401 402 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 403 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 404 405 acb->queue.done_status = 0; 406 acb->queue.scsi_status = 0; 407 acb->queue.host_status = 0; 408 acb->queue.sg_wk_ix = 0; 409 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 410 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 411 bcopy(csio->cdb_io.cdb_ptr, 412 acb->queue.cdb, csio->cdb_len); 413 } else { 414 /* I guess I could map it in... */ 415 ccb->ccb_h.status = CAM_REQ_INVALID; 416 adwfreeacb(adw, acb); 417 xpt_done(ccb); 418 return; 419 } 420 } else { 421 bcopy(csio->cdb_io.cdb_bytes, 422 acb->queue.cdb, csio->cdb_len); 423 } 424 425 /* 426 * If we have any data to send with this command, 427 * map it into bus space. 428 */ 429 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 430 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 431 /* 432 * We've been given a pointer 433 * to a single buffer. 434 */ 435 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 436 int error; 437 438 crit_enter(); 439 error = 440 bus_dmamap_load(adw->buffer_dmat, 441 acb->dmamap, 442 csio->data_ptr, 443 csio->dxfer_len, 444 adwexecuteacb, 445 acb, /*flags*/0); 446 if (error == EINPROGRESS) { 447 /* 448 * So as to maintain ordering, 449 * freeze the controller queue 450 * until our mapping is 451 * returned. 452 */ 453 xpt_freeze_simq(sim, 1); 454 acb->state |= CAM_RELEASE_SIMQ; 455 } 456 crit_exit(); 457 } else { 458 struct bus_dma_segment seg; 459 460 /* Pointer to physical buffer */ 461 seg.ds_addr = 462 (bus_addr_t)csio->data_ptr; 463 seg.ds_len = csio->dxfer_len; 464 adwexecuteacb(acb, &seg, 1, 0); 465 } 466 } else { 467 struct bus_dma_segment *segs; 468 469 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 470 panic("adw_action - Physical " 471 "segment pointers " 472 "unsupported"); 473 474 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 475 panic("adw_action - Virtual " 476 "segment addresses " 477 "unsupported"); 478 479 /* Just use the segments provided */ 480 segs = (struct bus_dma_segment *)csio->data_ptr; 481 adwexecuteacb(acb, segs, csio->sglist_cnt, 482 (csio->sglist_cnt < ADW_SGSIZE) 483 ? 0 : EFBIG); 484 } 485 } else { 486 adwexecuteacb(acb, NULL, 0, 0); 487 } 488 break; 489 } 490 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 491 { 492 adw_idle_cmd_status_t status; 493 494 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 495 ccb->ccb_h.target_id); 496 if (status == ADW_IDLE_CMD_SUCCESS) { 497 ccb->ccb_h.status = CAM_REQ_CMP; 498 if (bootverbose) { 499 xpt_print_path(ccb->ccb_h.path); 500 kprintf("BDR Delivered\n"); 501 } 502 } else 503 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 504 xpt_done(ccb); 505 break; 506 } 507 case XPT_ABORT: /* Abort the specified CCB */ 508 /* XXX Implement */ 509 ccb->ccb_h.status = CAM_REQ_INVALID; 510 xpt_done(ccb); 511 break; 512 case XPT_SET_TRAN_SETTINGS: 513 { 514 struct ccb_trans_settings_scsi *scsi; 515 struct ccb_trans_settings_spi *spi; 516 struct ccb_trans_settings *cts; 517 u_int target_mask; 518 519 cts = &ccb->cts; 520 target_mask = 0x01 << ccb->ccb_h.target_id; 521 522 crit_enter(); 523 scsi = &cts->proto_specific.scsi; 524 spi = &cts->xport_specific.spi; 525 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 526 u_int sdtrdone; 527 528 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 529 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 530 u_int discenb; 531 532 discenb = 533 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 534 535 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 536 discenb |= target_mask; 537 else 538 discenb &= ~target_mask; 539 540 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 541 discenb); 542 } 543 544 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 545 546 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 547 adw->tagenb |= target_mask; 548 else 549 adw->tagenb &= ~target_mask; 550 } 551 552 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 553 u_int wdtrenb_orig; 554 u_int wdtrenb; 555 u_int wdtrdone; 556 557 wdtrenb_orig = 558 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 559 wdtrenb = wdtrenb_orig; 560 wdtrdone = adw_lram_read_16(adw, 561 ADW_MC_WDTR_DONE); 562 switch (spi->bus_width) { 563 case MSG_EXT_WDTR_BUS_32_BIT: 564 case MSG_EXT_WDTR_BUS_16_BIT: 565 wdtrenb |= target_mask; 566 break; 567 case MSG_EXT_WDTR_BUS_8_BIT: 568 default: 569 wdtrenb &= ~target_mask; 570 break; 571 } 572 if (wdtrenb != wdtrenb_orig) { 573 adw_lram_write_16(adw, 574 ADW_MC_WDTR_ABLE, 575 wdtrenb); 576 wdtrdone &= ~target_mask; 577 adw_lram_write_16(adw, 578 ADW_MC_WDTR_DONE, 579 wdtrdone); 580 /* Wide negotiation forces async */ 581 sdtrdone &= ~target_mask; 582 adw_lram_write_16(adw, 583 ADW_MC_SDTR_DONE, 584 sdtrdone); 585 } 586 } 587 588 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 589 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 590 u_int sdtr_orig; 591 u_int sdtr; 592 u_int sdtrable_orig; 593 u_int sdtrable; 594 595 sdtr = adw_get_chip_sdtr(adw, 596 ccb->ccb_h.target_id); 597 sdtr_orig = sdtr; 598 sdtrable = adw_lram_read_16(adw, 599 ADW_MC_SDTR_ABLE); 600 sdtrable_orig = sdtrable; 601 602 if ((spi->valid 603 & CTS_SPI_VALID_SYNC_RATE) != 0) { 604 605 sdtr = 606 adw_find_sdtr(adw, 607 spi->sync_period); 608 } 609 610 if ((spi->valid 611 & CTS_SPI_VALID_SYNC_OFFSET) != 0) { 612 if (spi->sync_offset == 0) 613 sdtr = ADW_MC_SDTR_ASYNC; 614 } 615 616 if (sdtr == ADW_MC_SDTR_ASYNC) 617 sdtrable &= ~target_mask; 618 else 619 sdtrable |= target_mask; 620 if (sdtr != sdtr_orig 621 || sdtrable != sdtrable_orig) { 622 adw_set_chip_sdtr(adw, 623 ccb->ccb_h.target_id, 624 sdtr); 625 sdtrdone &= ~target_mask; 626 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 627 sdtrable); 628 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 629 sdtrdone); 630 631 } 632 } 633 } 634 crit_exit(); 635 ccb->ccb_h.status = CAM_REQ_CMP; 636 xpt_done(ccb); 637 break; 638 } 639 case XPT_GET_TRAN_SETTINGS: 640 /* Get default/user set transfer settings for the target */ 641 { 642 struct ccb_trans_settings_scsi *scsi; 643 struct ccb_trans_settings_spi *spi; 644 struct ccb_trans_settings *cts; 645 u_int target_mask; 646 647 cts = &ccb->cts; 648 target_mask = 0x01 << ccb->ccb_h.target_id; 649 cts->protocol = PROTO_SCSI; 650 cts->protocol_version = SCSI_REV_2; 651 cts->transport = XPORT_SPI; 652 cts->transport_version = 2; 653 654 scsi = &cts->proto_specific.scsi; 655 spi = &cts->xport_specific.spi; 656 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 657 u_int mc_sdtr; 658 659 spi->flags = 0; 660 if ((adw->user_discenb & target_mask) != 0) 661 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 662 663 if ((adw->user_tagenb & target_mask) != 0) 664 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 665 666 if ((adw->user_wdtr & target_mask) != 0) 667 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 668 else 669 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 670 671 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 672 spi->sync_period = adw_find_period(adw, mc_sdtr); 673 if (spi->sync_period != 0) 674 spi->sync_offset = 15; /* XXX ??? */ 675 else 676 spi->sync_offset = 0; 677 678 679 } else { 680 u_int targ_tinfo; 681 682 spi->flags = 0; 683 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 684 & target_mask) != 0) 685 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 686 687 if ((adw->tagenb & target_mask) != 0) 688 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 689 690 targ_tinfo = 691 adw_lram_read_16(adw, 692 ADW_MC_DEVICE_HSHK_CFG_TABLE 693 + (2 * ccb->ccb_h.target_id)); 694 695 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 696 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 697 else 698 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 699 700 spi->sync_period = 701 adw_hshk_cfg_period_factor(targ_tinfo); 702 703 spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 704 if (spi->sync_period == 0) 705 spi->sync_offset = 0; 706 707 if (spi->sync_offset == 0) 708 spi->sync_period = 0; 709 } 710 711 spi->valid = CTS_SPI_VALID_SYNC_RATE 712 | CTS_SPI_VALID_SYNC_OFFSET 713 | CTS_SPI_VALID_BUS_WIDTH 714 | CTS_SPI_VALID_DISC; 715 scsi->valid = CTS_SCSI_VALID_TQ; 716 ccb->ccb_h.status = CAM_REQ_CMP; 717 xpt_done(ccb); 718 break; 719 } 720 case XPT_CALC_GEOMETRY: 721 { 722 struct ccb_calc_geometry *ccg; 723 u_int32_t size_mb; 724 u_int32_t secs_per_cylinder; 725 int extended; 726 727 /* 728 * XXX Use Adaptec translation until I find out how to 729 * get this information from the card. 730 */ 731 ccg = &ccb->ccg; 732 size_mb = ccg->volume_size 733 / ((1024L * 1024L) / ccg->block_size); 734 extended = 1; 735 736 if (size_mb > 1024 && extended) { 737 ccg->heads = 255; 738 ccg->secs_per_track = 63; 739 } else { 740 ccg->heads = 64; 741 ccg->secs_per_track = 32; 742 } 743 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 744 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 745 ccb->ccb_h.status = CAM_REQ_CMP; 746 xpt_done(ccb); 747 break; 748 } 749 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 750 { 751 int failure; 752 753 failure = adw_reset_bus(adw); 754 if (failure != 0) { 755 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 756 } else { 757 if (bootverbose) { 758 xpt_print_path(adw->path); 759 kprintf("Bus Reset Delivered\n"); 760 } 761 ccb->ccb_h.status = CAM_REQ_CMP; 762 } 763 xpt_done(ccb); 764 break; 765 } 766 case XPT_TERM_IO: /* Terminate the I/O process */ 767 /* XXX Implement */ 768 ccb->ccb_h.status = CAM_REQ_INVALID; 769 xpt_done(ccb); 770 break; 771 case XPT_PATH_INQ: /* Path routing inquiry */ 772 { 773 struct ccb_pathinq *cpi = &ccb->cpi; 774 775 cpi->version_num = 1; 776 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 777 cpi->target_sprt = 0; 778 cpi->hba_misc = 0; 779 cpi->hba_eng_cnt = 0; 780 cpi->max_target = ADW_MAX_TID; 781 cpi->max_lun = ADW_MAX_LUN; 782 cpi->initiator_id = adw->initiator_id; 783 cpi->bus_id = cam_sim_bus(sim); 784 cpi->base_transfer_speed = 3300; 785 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 786 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 787 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 788 cpi->unit_number = cam_sim_unit(sim); 789 cpi->transport = XPORT_SPI; 790 cpi->transport_version = 2; 791 cpi->protocol = PROTO_SCSI; 792 cpi->protocol_version = SCSI_REV_2; 793 cpi->ccb_h.status = CAM_REQ_CMP; 794 xpt_done(ccb); 795 break; 796 } 797 default: 798 ccb->ccb_h.status = CAM_REQ_INVALID; 799 xpt_done(ccb); 800 break; 801 } 802 } 803 804 static void 805 adw_poll(struct cam_sim *sim) 806 { 807 adw_intr(cam_sim_softc(sim)); 808 } 809 810 static void 811 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 812 { 813 } 814 815 struct adw_softc * 816 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 817 { 818 struct adw_softc *adw; 819 int i; 820 821 /* 822 * Allocate a storage area for us 823 */ 824 adw = kmalloc(sizeof(struct adw_softc), M_DEVBUF, M_INTWAIT | M_ZERO); 825 LIST_INIT(&adw->pending_ccbs); 826 SLIST_INIT(&adw->sg_maps); 827 adw->device = dev; 828 adw->unit = device_get_unit(dev); 829 adw->regs_res_type = regs_type; 830 adw->regs_res_id = regs_id; 831 adw->regs = regs; 832 adw->tag = rman_get_bustag(regs); 833 adw->bsh = rman_get_bushandle(regs); 834 KKASSERT(adw->unit >= 0 && adw->unit < 100); 835 i = adw->unit / 10; 836 adw->name = kmalloc(sizeof("adw") + i + 1, M_DEVBUF, M_INTWAIT); 837 ksprintf(adw->name, "adw%d", adw->unit); 838 return(adw); 839 } 840 841 void 842 adw_free(struct adw_softc *adw) 843 { 844 switch (adw->init_level) { 845 case 9: 846 { 847 struct sg_map_node *sg_map; 848 849 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 850 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 851 bus_dmamap_unload(adw->sg_dmat, 852 sg_map->sg_dmamap); 853 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 854 sg_map->sg_dmamap); 855 kfree(sg_map, M_DEVBUF); 856 } 857 bus_dma_tag_destroy(adw->sg_dmat); 858 } 859 case 8: 860 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 861 case 7: 862 bus_dmamem_free(adw->acb_dmat, adw->acbs, 863 adw->acb_dmamap); 864 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 865 case 6: 866 bus_dma_tag_destroy(adw->acb_dmat); 867 case 5: 868 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 869 case 4: 870 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 871 adw->carrier_dmamap); 872 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 873 case 3: 874 bus_dma_tag_destroy(adw->carrier_dmat); 875 case 2: 876 bus_dma_tag_destroy(adw->buffer_dmat); 877 case 1: 878 bus_dma_tag_destroy(adw->parent_dmat); 879 case 0: 880 break; 881 } 882 kfree(adw->name, M_DEVBUF); 883 kfree(adw, M_DEVBUF); 884 } 885 886 int 887 adw_init(struct adw_softc *adw) 888 { 889 struct adw_eeprom eep_config; 890 u_int tid; 891 u_int i; 892 u_int16_t checksum; 893 u_int16_t scsicfg1; 894 895 checksum = adw_eeprom_read(adw, &eep_config); 896 bcopy(eep_config.serial_number, adw->serial_number, 897 sizeof(adw->serial_number)); 898 if (checksum != eep_config.checksum) { 899 u_int16_t serial_number[3]; 900 901 adw->flags |= ADW_EEPROM_FAILED; 902 kprintf("%s: EEPROM checksum failed. Restoring Defaults\n", 903 adw_name(adw)); 904 905 /* 906 * Restore the default EEPROM settings. 907 * Assume the 6 byte board serial number that was read 908 * from EEPROM is correct even if the EEPROM checksum 909 * failed. 910 */ 911 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 912 bcopy(adw->serial_number, eep_config.serial_number, 913 sizeof(serial_number)); 914 adw_eeprom_write(adw, &eep_config); 915 } 916 917 /* Pull eeprom information into our softc. */ 918 adw->bios_ctrl = eep_config.bios_ctrl; 919 adw->user_wdtr = eep_config.wdtr_able; 920 for (tid = 0; tid < ADW_MAX_TID; tid++) { 921 u_int mc_sdtr; 922 u_int16_t tid_mask; 923 924 tid_mask = 0x1 << tid; 925 if ((adw->features & ADW_ULTRA) != 0) { 926 /* 927 * Ultra chips store sdtr and ultraenb 928 * bits in their seeprom, so we must 929 * construct valid mc_sdtr entries for 930 * indirectly. 931 */ 932 if (eep_config.sync1.sync_enable & tid_mask) { 933 if (eep_config.sync2.ultra_enable & tid_mask) 934 mc_sdtr = ADW_MC_SDTR_20; 935 else 936 mc_sdtr = ADW_MC_SDTR_10; 937 } else 938 mc_sdtr = ADW_MC_SDTR_ASYNC; 939 } else { 940 switch (ADW_TARGET_GROUP(tid)) { 941 case 3: 942 mc_sdtr = eep_config.sync4.sdtr4; 943 break; 944 case 2: 945 mc_sdtr = eep_config.sync3.sdtr3; 946 break; 947 case 1: 948 mc_sdtr = eep_config.sync2.sdtr2; 949 break; 950 default: /* Shut up compiler */ 951 case 0: 952 mc_sdtr = eep_config.sync1.sdtr1; 953 break; 954 } 955 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 956 mc_sdtr &= 0xFF; 957 } 958 adw_set_user_sdtr(adw, tid, mc_sdtr); 959 } 960 adw->user_tagenb = eep_config.tagqng_able; 961 adw->user_discenb = eep_config.disc_enable; 962 adw->max_acbs = eep_config.max_host_qng; 963 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 964 965 /* 966 * Sanity check the number of host openings. 967 */ 968 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 969 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 970 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 971 /* If the value is zero, assume it is uninitialized. */ 972 if (adw->max_acbs == 0) 973 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 974 else 975 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 976 } 977 978 scsicfg1 = 0; 979 if ((adw->features & ADW_ULTRA2) != 0) { 980 switch (eep_config.termination_lvd) { 981 default: 982 kprintf("%s: Invalid EEPROM LVD Termination Settings.\n", 983 adw_name(adw)); 984 kprintf("%s: Reverting to Automatic LVD Termination\n", 985 adw_name(adw)); 986 /* FALLTHROUGH */ 987 case ADW_EEPROM_TERM_AUTO: 988 break; 989 case ADW_EEPROM_TERM_BOTH_ON: 990 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 991 /* FALLTHROUGH */ 992 case ADW_EEPROM_TERM_HIGH_ON: 993 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 994 /* FALLTHROUGH */ 995 case ADW_EEPROM_TERM_OFF: 996 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 997 break; 998 } 999 } 1000 1001 switch (eep_config.termination_se) { 1002 default: 1003 kprintf("%s: Invalid SE EEPROM Termination Settings.\n", 1004 adw_name(adw)); 1005 kprintf("%s: Reverting to Automatic SE Termination\n", 1006 adw_name(adw)); 1007 /* FALLTHROUGH */ 1008 case ADW_EEPROM_TERM_AUTO: 1009 break; 1010 case ADW_EEPROM_TERM_BOTH_ON: 1011 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1012 /* FALLTHROUGH */ 1013 case ADW_EEPROM_TERM_HIGH_ON: 1014 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1015 /* FALLTHROUGH */ 1016 case ADW_EEPROM_TERM_OFF: 1017 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1018 break; 1019 } 1020 kprintf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1021 1022 /* DMA tag for mapping buffers into device visible space. */ 1023 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1024 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1025 /*highaddr*/BUS_SPACE_MAXADDR, 1026 /*filter*/NULL, /*filterarg*/NULL, 1027 /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE, 1028 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1029 /*flags*/BUS_DMA_ALLOCNOW, 1030 &adw->buffer_dmat) != 0) { 1031 return (ENOMEM); 1032 } 1033 1034 adw->init_level++; 1035 1036 /* DMA tag for our ccb carrier structures */ 1037 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10, 1038 /*boundary*/0, 1039 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1040 /*highaddr*/BUS_SPACE_MAXADDR, 1041 /*filter*/NULL, /*filterarg*/NULL, 1042 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1043 * sizeof(struct adw_carrier), 1044 /*nsegments*/1, 1045 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1046 /*flags*/0, &adw->carrier_dmat) != 0) { 1047 return (ENOMEM); 1048 } 1049 1050 adw->init_level++; 1051 1052 /* Allocation for our ccb carrier structures */ 1053 if (bus_dmamem_alloc(adw->carrier_dmat, (void *)&adw->carriers, 1054 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1055 return (ENOMEM); 1056 } 1057 1058 adw->init_level++; 1059 1060 /* And permanently map them */ 1061 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1062 adw->carriers, 1063 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1064 * sizeof(struct adw_carrier), 1065 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1066 1067 /* Clear them out. */ 1068 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1069 * sizeof(struct adw_carrier)); 1070 1071 /* Setup our free carrier list */ 1072 adw->free_carriers = adw->carriers; 1073 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1074 adw->carriers[i].carr_offset = 1075 carriervtobo(adw, &adw->carriers[i]); 1076 adw->carriers[i].carr_ba = 1077 carriervtob(adw, &adw->carriers[i]); 1078 adw->carriers[i].areq_ba = 0; 1079 adw->carriers[i].next_ba = 1080 carriervtobo(adw, &adw->carriers[i+1]); 1081 } 1082 /* Terminal carrier. Never leaves the freelist */ 1083 adw->carriers[i].carr_offset = 1084 carriervtobo(adw, &adw->carriers[i]); 1085 adw->carriers[i].carr_ba = 1086 carriervtob(adw, &adw->carriers[i]); 1087 adw->carriers[i].areq_ba = 0; 1088 adw->carriers[i].next_ba = ~0; 1089 1090 adw->init_level++; 1091 1092 /* DMA tag for our acb structures */ 1093 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1094 /*lowaddr*/BUS_SPACE_MAXADDR, 1095 /*highaddr*/BUS_SPACE_MAXADDR, 1096 /*filter*/NULL, /*filterarg*/NULL, 1097 adw->max_acbs * sizeof(struct acb), 1098 /*nsegments*/1, 1099 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1100 /*flags*/0, &adw->acb_dmat) != 0) { 1101 return (ENOMEM); 1102 } 1103 1104 adw->init_level++; 1105 1106 /* Allocation for our ccbs */ 1107 if (bus_dmamem_alloc(adw->acb_dmat, (void *)&adw->acbs, 1108 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1109 return (ENOMEM); 1110 1111 adw->init_level++; 1112 1113 /* And permanently map them */ 1114 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1115 adw->acbs, 1116 adw->max_acbs * sizeof(struct acb), 1117 adwmapmem, &adw->acb_busbase, /*flags*/0); 1118 1119 /* Clear them out. */ 1120 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1121 1122 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1123 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1124 /*lowaddr*/BUS_SPACE_MAXADDR, 1125 /*highaddr*/BUS_SPACE_MAXADDR, 1126 /*filter*/NULL, /*filterarg*/NULL, 1127 PAGE_SIZE, /*nsegments*/1, 1128 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1129 /*flags*/0, &adw->sg_dmat) != 0) { 1130 return (ENOMEM); 1131 } 1132 1133 adw->init_level++; 1134 1135 /* Allocate our first batch of ccbs */ 1136 if (adwallocacbs(adw) == 0) 1137 return (ENOMEM); 1138 1139 if (adw_init_chip(adw, scsicfg1) != 0) 1140 return (ENXIO); 1141 1142 kprintf("Queue Depth %d\n", adw->max_acbs); 1143 1144 return (0); 1145 } 1146 1147 /* 1148 * Attach all the sub-devices we can find 1149 */ 1150 int 1151 adw_attach(struct adw_softc *adw) 1152 { 1153 struct ccb_setasync csa; 1154 int error; 1155 1156 error = 0; 1157 crit_enter(); 1158 /* Hook up our interrupt handler */ 1159 if ((error = bus_setup_intr(adw->device, adw->irq, 0, 1160 adw_intr, adw, &adw->ih, NULL)) != 0) { 1161 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1162 error); 1163 goto fail; 1164 } 1165 1166 /* Start the Risc processor now that we are fully configured. */ 1167 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1168 1169 /* 1170 * Construct our SIM entry. 1171 */ 1172 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1173 &sim_mplock, 1, adw->max_acbs, NULL); 1174 if (adw->sim == NULL) { 1175 error = ENOMEM; 1176 goto fail; 1177 } 1178 1179 /* 1180 * Register the bus. 1181 */ 1182 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1183 cam_sim_free(adw->sim); 1184 error = ENOMEM; 1185 goto fail; 1186 } 1187 1188 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1189 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1190 == CAM_REQ_CMP) { 1191 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1192 csa.ccb_h.func_code = XPT_SASYNC_CB; 1193 csa.event_enable = AC_LOST_DEVICE; 1194 csa.callback = adw_async; 1195 csa.callback_arg = adw; 1196 xpt_action((union ccb *)&csa); 1197 } 1198 1199 fail: 1200 crit_exit(); 1201 return (error); 1202 } 1203 1204 void 1205 adw_intr(void *arg) 1206 { 1207 struct adw_softc *adw; 1208 u_int int_stat; 1209 1210 adw = (struct adw_softc *)arg; 1211 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1212 return; 1213 1214 /* Reading the register clears the interrupt. */ 1215 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1216 1217 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1218 u_int intrb_code; 1219 1220 /* Async Microcode Event */ 1221 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1222 switch (intrb_code) { 1223 case ADW_ASYNC_CARRIER_READY_FAILURE: 1224 /* 1225 * The RISC missed our update of 1226 * the commandq. 1227 */ 1228 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1229 adw_tickle_risc(adw, ADW_TICKLE_A); 1230 break; 1231 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1232 /* 1233 * The firmware detected a SCSI Bus reset. 1234 */ 1235 kprintf("Someone Reset the Bus\n"); 1236 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1237 break; 1238 case ADW_ASYNC_RDMA_FAILURE: 1239 /* 1240 * Handle RDMA failure by resetting the 1241 * SCSI Bus and chip. 1242 */ 1243 #if 0 /* XXX */ 1244 AdvResetChipAndSB(adv_dvc_varp); 1245 #endif 1246 break; 1247 1248 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1249 /* 1250 * Host generated SCSI bus reset occurred. 1251 */ 1252 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1253 break; 1254 default: 1255 kprintf("adw_intr: unknown async code 0x%x\n", 1256 intrb_code); 1257 break; 1258 } 1259 } 1260 1261 /* 1262 * Run down the RequestQ. 1263 */ 1264 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1265 struct adw_carrier *free_carrier; 1266 struct acb *acb; 1267 union ccb *ccb; 1268 1269 #if 0 1270 kprintf("0x%x, 0x%x, 0x%x, 0x%x\n", 1271 adw->responseq->carr_offset, 1272 adw->responseq->carr_ba, 1273 adw->responseq->areq_ba, 1274 adw->responseq->next_ba); 1275 #endif 1276 /* 1277 * The firmware copies the adw_scsi_req_q.acb_baddr 1278 * field into the areq_ba field of the carrier. 1279 */ 1280 acb = acbbotov(adw, adw->responseq->areq_ba); 1281 1282 /* 1283 * The least significant four bits of the next_ba 1284 * field are used as flags. Mask them out and then 1285 * advance through the list. 1286 */ 1287 free_carrier = adw->responseq; 1288 adw->responseq = 1289 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1290 free_carrier->next_ba = adw->free_carriers->carr_offset; 1291 adw->free_carriers = free_carrier; 1292 1293 /* Process CCB */ 1294 ccb = acb->ccb; 1295 callout_stop(&ccb->ccb_h.timeout_ch); 1296 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1297 bus_dmasync_op_t op; 1298 1299 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1300 op = BUS_DMASYNC_POSTREAD; 1301 else 1302 op = BUS_DMASYNC_POSTWRITE; 1303 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1304 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1305 ccb->csio.resid = acb->queue.data_cnt; 1306 } else 1307 ccb->csio.resid = 0; 1308 1309 /* Common Cases inline... */ 1310 if (acb->queue.host_status == QHSTA_NO_ERROR 1311 && (acb->queue.done_status == QD_NO_ERROR 1312 || acb->queue.done_status == QD_WITH_ERROR)) { 1313 ccb->csio.scsi_status = acb->queue.scsi_status; 1314 ccb->ccb_h.status = 0; 1315 switch (ccb->csio.scsi_status) { 1316 case SCSI_STATUS_OK: 1317 ccb->ccb_h.status |= CAM_REQ_CMP; 1318 break; 1319 case SCSI_STATUS_CHECK_COND: 1320 case SCSI_STATUS_CMD_TERMINATED: 1321 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1322 ccb->csio.sense_len); 1323 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1324 ccb->csio.sense_resid = acb->queue.sense_len; 1325 /* FALLTHROUGH */ 1326 default: 1327 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1328 | CAM_DEV_QFRZN; 1329 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1330 break; 1331 } 1332 adwfreeacb(adw, acb); 1333 xpt_done(ccb); 1334 } else { 1335 adwprocesserror(adw, acb); 1336 } 1337 } 1338 } 1339 1340 static void 1341 adwprocesserror(struct adw_softc *adw, struct acb *acb) 1342 { 1343 union ccb *ccb; 1344 1345 ccb = acb->ccb; 1346 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1347 ccb->ccb_h.status = CAM_REQ_ABORTED; 1348 } else { 1349 1350 switch (acb->queue.host_status) { 1351 case QHSTA_M_SEL_TIMEOUT: 1352 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1353 break; 1354 case QHSTA_M_SXFR_OFF_UFLW: 1355 case QHSTA_M_SXFR_OFF_OFLW: 1356 case QHSTA_M_DATA_OVER_RUN: 1357 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1358 break; 1359 case QHSTA_M_SXFR_DESELECTED: 1360 case QHSTA_M_UNEXPECTED_BUS_FREE: 1361 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1362 break; 1363 case QHSTA_M_SCSI_BUS_RESET: 1364 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1365 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1366 break; 1367 case QHSTA_M_BUS_DEVICE_RESET: 1368 ccb->ccb_h.status = CAM_BDR_SENT; 1369 break; 1370 case QHSTA_M_QUEUE_ABORTED: 1371 /* BDR or Bus Reset */ 1372 kprintf("Saw Queue Aborted\n"); 1373 ccb->ccb_h.status = adw->last_reset; 1374 break; 1375 case QHSTA_M_SXFR_SDMA_ERR: 1376 case QHSTA_M_SXFR_SXFR_PERR: 1377 case QHSTA_M_RDMA_PERR: 1378 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1379 break; 1380 case QHSTA_M_WTM_TIMEOUT: 1381 case QHSTA_M_SXFR_WD_TMO: 1382 { 1383 /* The SCSI bus hung in a phase */ 1384 xpt_print_path(adw->path); 1385 kprintf("Watch Dog timer expired. Reseting bus\n"); 1386 adw_reset_bus(adw); 1387 break; 1388 } 1389 case QHSTA_M_SXFR_XFR_PH_ERR: 1390 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1391 break; 1392 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1393 break; 1394 case QHSTA_M_BAD_CMPL_STATUS_IN: 1395 /* No command complete after a status message */ 1396 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1397 break; 1398 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1399 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1400 break; 1401 case QHSTA_M_INVALID_DEVICE: 1402 ccb->ccb_h.status = CAM_PATH_INVALID; 1403 break; 1404 case QHSTA_M_NO_AUTO_REQ_SENSE: 1405 /* 1406 * User didn't request sense, but we got a 1407 * check condition. 1408 */ 1409 ccb->csio.scsi_status = acb->queue.scsi_status; 1410 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1411 break; 1412 default: 1413 panic("%s: Unhandled Host status error %x", 1414 adw_name(adw), acb->queue.host_status); 1415 /* NOTREACHED */ 1416 } 1417 } 1418 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1419 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1420 || ccb->ccb_h.status == CAM_BDR_SENT) 1421 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1422 } 1423 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1424 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1425 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1426 } 1427 adwfreeacb(adw, acb); 1428 xpt_done(ccb); 1429 } 1430 1431 static void 1432 adwtimeout(void *arg) 1433 { 1434 struct acb *acb; 1435 union ccb *ccb; 1436 struct adw_softc *adw; 1437 adw_idle_cmd_status_t status; 1438 int target_id; 1439 1440 acb = (struct acb *)arg; 1441 ccb = acb->ccb; 1442 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1443 xpt_print_path(ccb->ccb_h.path); 1444 kprintf("ACB %p - timed out\n", (void *)acb); 1445 1446 crit_enter(); 1447 1448 if ((acb->state & ACB_ACTIVE) == 0) { 1449 xpt_print_path(ccb->ccb_h.path); 1450 kprintf("ACB %p - timed out CCB already completed\n", 1451 (void *)acb); 1452 crit_exit(); 1453 return; 1454 } 1455 1456 acb->state |= ACB_RECOVERY_ACB; 1457 target_id = ccb->ccb_h.target_id; 1458 1459 /* Attempt a BDR first */ 1460 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1461 ccb->ccb_h.target_id); 1462 crit_exit(); 1463 if (status == ADW_IDLE_CMD_SUCCESS) { 1464 kprintf("%s: BDR Delivered. No longer in timeout\n", 1465 adw_name(adw)); 1466 adw_handle_device_reset(adw, target_id); 1467 } else { 1468 adw_reset_bus(adw); 1469 xpt_print_path(adw->path); 1470 kprintf("Bus Reset Delivered. No longer in timeout\n"); 1471 } 1472 } 1473 1474 static void 1475 adw_handle_device_reset(struct adw_softc *adw, u_int target) 1476 { 1477 struct cam_path *path; 1478 cam_status error; 1479 1480 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1481 target, CAM_LUN_WILDCARD); 1482 1483 if (error == CAM_REQ_CMP) { 1484 xpt_async(AC_SENT_BDR, path, NULL); 1485 xpt_free_path(path); 1486 } 1487 adw->last_reset = CAM_BDR_SENT; 1488 } 1489 1490 static void 1491 adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1492 { 1493 if (initiated) { 1494 /* 1495 * The microcode currently sets the SCSI Bus Reset signal 1496 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1497 * command above. But the SCSI Bus Reset Hold Time in the 1498 * microcode is not deterministic (it may in fact be for less 1499 * than the SCSI Spec. minimum of 25 us). Therefore on return 1500 * the Adv Library sets the SCSI Bus Reset signal for 1501 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1502 * than 25 us. 1503 */ 1504 u_int scsi_ctrl; 1505 1506 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1507 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1508 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1509 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1510 1511 /* 1512 * We will perform the async notification when the 1513 * SCSI Reset interrupt occurs. 1514 */ 1515 } else 1516 xpt_async(AC_BUS_RESET, adw->path, NULL); 1517 adw->last_reset = CAM_SCSI_BUS_RESET; 1518 } 1519