1 /* 2 * CAM SCSI interface for the the Advanced Systems Inc. 3 * Second Generation SCSI controllers. 4 * 5 * Product specific probe and attach routines can be found in: 6 * 7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W 8 * 9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/advansys/adwcam.c,v 1.7.2.2 2001/03/05 13:08:55 obrien Exp $ 34 * $DragonFly: src/sys/dev/disk/advansys/adwcam.c,v 1.7 2004/09/17 03:39:38 joerg Exp $ 35 */ 36 /* 37 * Ported from: 38 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 39 * 40 * Copyright (c) 1995-1998 Advanced System Products, Inc. 41 * All Rights Reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that redistributions of source 45 * code retain the above copyright notice and this comment without 46 * modification. 47 */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #include <sys/bus.h> 54 55 #include <machine/bus_pio.h> 56 #include <machine/bus_memio.h> 57 #include <machine/bus.h> 58 #include <machine/clock.h> 59 #include <machine/resource.h> 60 61 #include <sys/rman.h> 62 63 #include <bus/cam/cam.h> 64 #include <bus/cam/cam_ccb.h> 65 #include <bus/cam/cam_sim.h> 66 #include <bus/cam/cam_xpt_sim.h> 67 #include <bus/cam/cam_debug.h> 68 69 #include <bus/cam/scsi/scsi_message.h> 70 71 #include "adwvar.h" 72 73 /* Definitions for our use of the SIM private CCB area */ 74 #define ccb_acb_ptr spriv_ptr0 75 #define ccb_adw_ptr spriv_ptr1 76 77 u_long adw_unit; 78 79 static __inline cam_status adwccbstatus(union ccb*); 80 static __inline struct acb* adwgetacb(struct adw_softc *adw); 81 static __inline void adwfreeacb(struct adw_softc *adw, 82 struct acb *acb); 83 84 static void adwmapmem(void *arg, bus_dma_segment_t *segs, 85 int nseg, int error); 86 static struct sg_map_node* 87 adwallocsgmap(struct adw_softc *adw); 88 static int adwallocacbs(struct adw_softc *adw); 89 90 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 91 int nseg, int error); 92 static void adw_action(struct cam_sim *sim, union ccb *ccb); 93 static void adw_poll(struct cam_sim *sim); 94 static void adw_async(void *callback_arg, u_int32_t code, 95 struct cam_path *path, void *arg); 96 static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 97 static void adwtimeout(void *arg); 98 static void adw_handle_device_reset(struct adw_softc *adw, 99 u_int target); 100 static void adw_handle_bus_reset(struct adw_softc *adw, 101 int initiated); 102 103 static __inline cam_status 104 adwccbstatus(union ccb* ccb) 105 { 106 return (ccb->ccb_h.status & CAM_STATUS_MASK); 107 } 108 109 static __inline struct acb* 110 adwgetacb(struct adw_softc *adw) 111 { 112 struct acb* acb; 113 int s; 114 115 s = splcam(); 116 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 117 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 118 } else if (adw->num_acbs < adw->max_acbs) { 119 adwallocacbs(adw); 120 acb = SLIST_FIRST(&adw->free_acb_list); 121 if (acb == NULL) 122 printf("%s: Can't malloc ACB\n", adw_name(adw)); 123 else { 124 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 125 } 126 } 127 splx(s); 128 129 return (acb); 130 } 131 132 static __inline void 133 adwfreeacb(struct adw_softc *adw, struct acb *acb) 134 { 135 int s; 136 137 s = splcam(); 138 if ((acb->state & ACB_ACTIVE) != 0) 139 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 140 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 141 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 142 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 143 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 144 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 145 adw->state &= ~ADW_RESOURCE_SHORTAGE; 146 } 147 acb->state = ACB_FREE; 148 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 149 splx(s); 150 } 151 152 static void 153 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 154 { 155 bus_addr_t *busaddrp; 156 157 busaddrp = (bus_addr_t *)arg; 158 *busaddrp = segs->ds_addr; 159 } 160 161 static struct sg_map_node * 162 adwallocsgmap(struct adw_softc *adw) 163 { 164 struct sg_map_node *sg_map; 165 166 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT); 167 168 /* Allocate S/G space for the next batch of ACBS */ 169 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, 170 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 171 free(sg_map, M_DEVBUF); 172 return (NULL); 173 } 174 175 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 176 177 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 178 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 179 180 bzero(sg_map->sg_vaddr, PAGE_SIZE); 181 return (sg_map); 182 } 183 184 /* 185 * Allocate another chunk of CCB's. Return count of entries added. 186 * Assumed to be called at splcam(). 187 */ 188 static int 189 adwallocacbs(struct adw_softc *adw) 190 { 191 struct acb *next_acb; 192 struct sg_map_node *sg_map; 193 bus_addr_t busaddr; 194 struct adw_sg_block *blocks; 195 int newcount; 196 int i; 197 198 next_acb = &adw->acbs[adw->num_acbs]; 199 sg_map = adwallocsgmap(adw); 200 201 if (sg_map == NULL) 202 return (0); 203 204 blocks = sg_map->sg_vaddr; 205 busaddr = sg_map->sg_physaddr; 206 207 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 208 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 209 int error; 210 211 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 212 &next_acb->dmamap); 213 if (error != 0) 214 break; 215 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 216 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 217 next_acb->queue.sense_baddr = 218 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 219 next_acb->sg_blocks = blocks; 220 next_acb->sg_busaddr = busaddr; 221 next_acb->state = ACB_FREE; 222 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 223 blocks += ADW_SG_BLOCKCNT; 224 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 225 next_acb++; 226 adw->num_acbs++; 227 } 228 return (i); 229 } 230 231 static void 232 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 233 { 234 struct acb *acb; 235 union ccb *ccb; 236 struct adw_softc *adw; 237 int s; 238 239 acb = (struct acb *)arg; 240 ccb = acb->ccb; 241 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 242 243 if (error != 0) { 244 if (error != EFBIG) 245 printf("%s: Unexepected error 0x%x returned from " 246 "bus_dmamap_load\n", adw_name(adw), error); 247 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 248 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 249 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 250 } 251 adwfreeacb(adw, acb); 252 xpt_done(ccb); 253 return; 254 } 255 256 if (nseg != 0) { 257 bus_dmasync_op_t op; 258 259 acb->queue.data_addr = dm_segs[0].ds_addr; 260 acb->queue.data_cnt = ccb->csio.dxfer_len; 261 if (nseg > 1) { 262 struct adw_sg_block *sg_block; 263 struct adw_sg_elm *sg; 264 bus_addr_t sg_busaddr; 265 u_int sg_index; 266 bus_dma_segment_t *end_seg; 267 268 end_seg = dm_segs + nseg; 269 270 sg_busaddr = acb->sg_busaddr; 271 sg_index = 0; 272 /* Copy the segments into our SG list */ 273 for (sg_block = acb->sg_blocks;; sg_block++) { 274 u_int i; 275 276 sg = sg_block->sg_list; 277 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 278 if (dm_segs >= end_seg) 279 break; 280 281 sg->sg_addr = dm_segs->ds_addr; 282 sg->sg_count = dm_segs->ds_len; 283 sg++; 284 dm_segs++; 285 } 286 sg_block->sg_cnt = i; 287 sg_index += i; 288 if (dm_segs == end_seg) { 289 sg_block->sg_busaddr_next = 0; 290 break; 291 } else { 292 sg_busaddr += 293 sizeof(struct adw_sg_block); 294 sg_block->sg_busaddr_next = sg_busaddr; 295 } 296 } 297 acb->queue.sg_real_addr = acb->sg_busaddr; 298 } else { 299 acb->queue.sg_real_addr = 0; 300 } 301 302 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 303 op = BUS_DMASYNC_PREREAD; 304 else 305 op = BUS_DMASYNC_PREWRITE; 306 307 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 308 309 } else { 310 acb->queue.data_addr = 0; 311 acb->queue.data_cnt = 0; 312 acb->queue.sg_real_addr = 0; 313 } 314 315 s = splcam(); 316 317 /* 318 * Last time we need to check if this CCB needs to 319 * be aborted. 320 */ 321 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 322 if (nseg != 0) 323 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 324 adwfreeacb(adw, acb); 325 xpt_done(ccb); 326 splx(s); 327 return; 328 } 329 330 acb->state |= ACB_ACTIVE; 331 ccb->ccb_h.status |= CAM_SIM_QUEUED; 332 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 333 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 334 adwtimeout, acb); 335 336 adw_send_acb(adw, acb, acbvtob(adw, acb)); 337 338 splx(s); 339 } 340 341 static void 342 adw_action(struct cam_sim *sim, union ccb *ccb) 343 { 344 struct adw_softc *adw; 345 346 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 347 348 adw = (struct adw_softc *)cam_sim_softc(sim); 349 350 switch (ccb->ccb_h.func_code) { 351 /* Common cases first */ 352 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 353 { 354 struct ccb_scsiio *csio; 355 struct ccb_hdr *ccbh; 356 struct acb *acb; 357 358 csio = &ccb->csio; 359 ccbh = &ccb->ccb_h; 360 361 /* Max supported CDB length is 12 bytes */ 362 if (csio->cdb_len > 12) { 363 ccb->ccb_h.status = CAM_REQ_INVALID; 364 xpt_done(ccb); 365 return; 366 } 367 368 if ((acb = adwgetacb(adw)) == NULL) { 369 int s; 370 371 s = splcam(); 372 adw->state |= ADW_RESOURCE_SHORTAGE; 373 splx(s); 374 xpt_freeze_simq(sim, /*count*/1); 375 ccb->ccb_h.status = CAM_REQUEUE_REQ; 376 xpt_done(ccb); 377 return; 378 } 379 380 /* Link acb and ccb so we can find one from the other */ 381 acb->ccb = ccb; 382 ccb->ccb_h.ccb_acb_ptr = acb; 383 ccb->ccb_h.ccb_adw_ptr = adw; 384 385 acb->queue.cntl = 0; 386 acb->queue.target_cmd = 0; 387 acb->queue.target_id = ccb->ccb_h.target_id; 388 acb->queue.target_lun = ccb->ccb_h.target_lun; 389 390 acb->queue.mflag = 0; 391 acb->queue.sense_len = 392 MIN(csio->sense_len, sizeof(acb->sense_data)); 393 acb->queue.cdb_len = csio->cdb_len; 394 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 395 switch (csio->tag_action) { 396 case MSG_SIMPLE_Q_TAG: 397 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 398 break; 399 case MSG_HEAD_OF_Q_TAG: 400 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 401 break; 402 case MSG_ORDERED_Q_TAG: 403 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 404 break; 405 default: 406 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 407 break; 408 } 409 } else 410 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 411 412 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 413 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 414 415 acb->queue.done_status = 0; 416 acb->queue.scsi_status = 0; 417 acb->queue.host_status = 0; 418 acb->queue.sg_wk_ix = 0; 419 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 420 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 421 bcopy(csio->cdb_io.cdb_ptr, 422 acb->queue.cdb, csio->cdb_len); 423 } else { 424 /* I guess I could map it in... */ 425 ccb->ccb_h.status = CAM_REQ_INVALID; 426 adwfreeacb(adw, acb); 427 xpt_done(ccb); 428 return; 429 } 430 } else { 431 bcopy(csio->cdb_io.cdb_bytes, 432 acb->queue.cdb, csio->cdb_len); 433 } 434 435 /* 436 * If we have any data to send with this command, 437 * map it into bus space. 438 */ 439 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 440 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 441 /* 442 * We've been given a pointer 443 * to a single buffer. 444 */ 445 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 446 int s; 447 int error; 448 449 s = splsoftvm(); 450 error = 451 bus_dmamap_load(adw->buffer_dmat, 452 acb->dmamap, 453 csio->data_ptr, 454 csio->dxfer_len, 455 adwexecuteacb, 456 acb, /*flags*/0); 457 if (error == EINPROGRESS) { 458 /* 459 * So as to maintain ordering, 460 * freeze the controller queue 461 * until our mapping is 462 * returned. 463 */ 464 xpt_freeze_simq(sim, 1); 465 acb->state |= CAM_RELEASE_SIMQ; 466 } 467 splx(s); 468 } else { 469 struct bus_dma_segment seg; 470 471 /* Pointer to physical buffer */ 472 seg.ds_addr = 473 (bus_addr_t)csio->data_ptr; 474 seg.ds_len = csio->dxfer_len; 475 adwexecuteacb(acb, &seg, 1, 0); 476 } 477 } else { 478 struct bus_dma_segment *segs; 479 480 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 481 panic("adw_action - Physical " 482 "segment pointers " 483 "unsupported"); 484 485 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 486 panic("adw_action - Virtual " 487 "segment addresses " 488 "unsupported"); 489 490 /* Just use the segments provided */ 491 segs = (struct bus_dma_segment *)csio->data_ptr; 492 adwexecuteacb(acb, segs, csio->sglist_cnt, 493 (csio->sglist_cnt < ADW_SGSIZE) 494 ? 0 : EFBIG); 495 } 496 } else { 497 adwexecuteacb(acb, NULL, 0, 0); 498 } 499 break; 500 } 501 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 502 { 503 adw_idle_cmd_status_t status; 504 505 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 506 ccb->ccb_h.target_id); 507 if (status == ADW_IDLE_CMD_SUCCESS) { 508 ccb->ccb_h.status = CAM_REQ_CMP; 509 if (bootverbose) { 510 xpt_print_path(ccb->ccb_h.path); 511 printf("BDR Delivered\n"); 512 } 513 } else 514 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 515 xpt_done(ccb); 516 break; 517 } 518 case XPT_ABORT: /* Abort the specified CCB */ 519 /* XXX Implement */ 520 ccb->ccb_h.status = CAM_REQ_INVALID; 521 xpt_done(ccb); 522 break; 523 case XPT_SET_TRAN_SETTINGS: 524 { 525 struct ccb_trans_settings *cts; 526 u_int target_mask; 527 int s; 528 529 cts = &ccb->cts; 530 target_mask = 0x01 << ccb->ccb_h.target_id; 531 532 s = splcam(); 533 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 534 u_int sdtrdone; 535 536 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 537 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 538 u_int discenb; 539 540 discenb = 541 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 542 543 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 544 discenb |= target_mask; 545 else 546 discenb &= ~target_mask; 547 548 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 549 discenb); 550 } 551 552 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 553 554 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 555 adw->tagenb |= target_mask; 556 else 557 adw->tagenb &= ~target_mask; 558 } 559 560 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 561 u_int wdtrenb_orig; 562 u_int wdtrenb; 563 u_int wdtrdone; 564 565 wdtrenb_orig = 566 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 567 wdtrenb = wdtrenb_orig; 568 wdtrdone = adw_lram_read_16(adw, 569 ADW_MC_WDTR_DONE); 570 switch (cts->bus_width) { 571 case MSG_EXT_WDTR_BUS_32_BIT: 572 case MSG_EXT_WDTR_BUS_16_BIT: 573 wdtrenb |= target_mask; 574 break; 575 case MSG_EXT_WDTR_BUS_8_BIT: 576 default: 577 wdtrenb &= ~target_mask; 578 break; 579 } 580 if (wdtrenb != wdtrenb_orig) { 581 adw_lram_write_16(adw, 582 ADW_MC_WDTR_ABLE, 583 wdtrenb); 584 wdtrdone &= ~target_mask; 585 adw_lram_write_16(adw, 586 ADW_MC_WDTR_DONE, 587 wdtrdone); 588 /* Wide negotiation forces async */ 589 sdtrdone &= ~target_mask; 590 adw_lram_write_16(adw, 591 ADW_MC_SDTR_DONE, 592 sdtrdone); 593 } 594 } 595 596 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 597 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 598 u_int sdtr_orig; 599 u_int sdtr; 600 u_int sdtrable_orig; 601 u_int sdtrable; 602 603 sdtr = adw_get_chip_sdtr(adw, 604 ccb->ccb_h.target_id); 605 sdtr_orig = sdtr; 606 sdtrable = adw_lram_read_16(adw, 607 ADW_MC_SDTR_ABLE); 608 sdtrable_orig = sdtrable; 609 610 if ((cts->valid 611 & CCB_TRANS_SYNC_RATE_VALID) != 0) { 612 613 sdtr = 614 adw_find_sdtr(adw, 615 cts->sync_period); 616 } 617 618 if ((cts->valid 619 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) { 620 if (cts->sync_offset == 0) 621 sdtr = ADW_MC_SDTR_ASYNC; 622 } 623 624 if (sdtr == ADW_MC_SDTR_ASYNC) 625 sdtrable &= ~target_mask; 626 else 627 sdtrable |= target_mask; 628 if (sdtr != sdtr_orig 629 || sdtrable != sdtrable_orig) { 630 adw_set_chip_sdtr(adw, 631 ccb->ccb_h.target_id, 632 sdtr); 633 sdtrdone &= ~target_mask; 634 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 635 sdtrable); 636 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 637 sdtrdone); 638 639 } 640 } 641 } 642 splx(s); 643 ccb->ccb_h.status = CAM_REQ_CMP; 644 xpt_done(ccb); 645 break; 646 } 647 case XPT_GET_TRAN_SETTINGS: 648 /* Get default/user set transfer settings for the target */ 649 { 650 struct ccb_trans_settings *cts; 651 u_int target_mask; 652 653 cts = &ccb->cts; 654 target_mask = 0x01 << ccb->ccb_h.target_id; 655 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 656 u_int mc_sdtr; 657 658 cts->flags = 0; 659 if ((adw->user_discenb & target_mask) != 0) 660 cts->flags |= CCB_TRANS_DISC_ENB; 661 662 if ((adw->user_tagenb & target_mask) != 0) 663 cts->flags |= CCB_TRANS_TAG_ENB; 664 665 if ((adw->user_wdtr & target_mask) != 0) 666 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 667 else 668 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 669 670 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 671 cts->sync_period = adw_find_period(adw, mc_sdtr); 672 if (cts->sync_period != 0) 673 cts->sync_offset = 15; /* XXX ??? */ 674 else 675 cts->sync_offset = 0; 676 677 cts->valid = CCB_TRANS_SYNC_RATE_VALID 678 | CCB_TRANS_SYNC_OFFSET_VALID 679 | CCB_TRANS_BUS_WIDTH_VALID 680 | CCB_TRANS_DISC_VALID 681 | CCB_TRANS_TQ_VALID; 682 ccb->ccb_h.status = CAM_REQ_CMP; 683 } else { 684 u_int targ_tinfo; 685 686 cts->flags = 0; 687 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 688 & target_mask) != 0) 689 cts->flags |= CCB_TRANS_DISC_ENB; 690 691 if ((adw->tagenb & target_mask) != 0) 692 cts->flags |= CCB_TRANS_TAG_ENB; 693 694 targ_tinfo = 695 adw_lram_read_16(adw, 696 ADW_MC_DEVICE_HSHK_CFG_TABLE 697 + (2 * ccb->ccb_h.target_id)); 698 699 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 700 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 701 else 702 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 703 704 cts->sync_period = 705 adw_hshk_cfg_period_factor(targ_tinfo); 706 707 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 708 if (cts->sync_period == 0) 709 cts->sync_offset = 0; 710 711 if (cts->sync_offset == 0) 712 cts->sync_period = 0; 713 } 714 cts->valid = CCB_TRANS_SYNC_RATE_VALID 715 | CCB_TRANS_SYNC_OFFSET_VALID 716 | CCB_TRANS_BUS_WIDTH_VALID 717 | CCB_TRANS_DISC_VALID 718 | CCB_TRANS_TQ_VALID; 719 ccb->ccb_h.status = CAM_REQ_CMP; 720 xpt_done(ccb); 721 break; 722 } 723 case XPT_CALC_GEOMETRY: 724 { 725 struct ccb_calc_geometry *ccg; 726 u_int32_t size_mb; 727 u_int32_t secs_per_cylinder; 728 int extended; 729 730 /* 731 * XXX Use Adaptec translation until I find out how to 732 * get this information from the card. 733 */ 734 ccg = &ccb->ccg; 735 size_mb = ccg->volume_size 736 / ((1024L * 1024L) / ccg->block_size); 737 extended = 1; 738 739 if (size_mb > 1024 && extended) { 740 ccg->heads = 255; 741 ccg->secs_per_track = 63; 742 } else { 743 ccg->heads = 64; 744 ccg->secs_per_track = 32; 745 } 746 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 747 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 748 ccb->ccb_h.status = CAM_REQ_CMP; 749 xpt_done(ccb); 750 break; 751 } 752 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 753 { 754 int failure; 755 756 failure = adw_reset_bus(adw); 757 if (failure != 0) { 758 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 759 } else { 760 if (bootverbose) { 761 xpt_print_path(adw->path); 762 printf("Bus Reset Delivered\n"); 763 } 764 ccb->ccb_h.status = CAM_REQ_CMP; 765 } 766 xpt_done(ccb); 767 break; 768 } 769 case XPT_TERM_IO: /* Terminate the I/O process */ 770 /* XXX Implement */ 771 ccb->ccb_h.status = CAM_REQ_INVALID; 772 xpt_done(ccb); 773 break; 774 case XPT_PATH_INQ: /* Path routing inquiry */ 775 { 776 struct ccb_pathinq *cpi = &ccb->cpi; 777 778 cpi->version_num = 1; 779 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 780 cpi->target_sprt = 0; 781 cpi->hba_misc = 0; 782 cpi->hba_eng_cnt = 0; 783 cpi->max_target = ADW_MAX_TID; 784 cpi->max_lun = ADW_MAX_LUN; 785 cpi->initiator_id = adw->initiator_id; 786 cpi->bus_id = cam_sim_bus(sim); 787 cpi->base_transfer_speed = 3300; 788 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 789 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 790 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 791 cpi->unit_number = cam_sim_unit(sim); 792 cpi->ccb_h.status = CAM_REQ_CMP; 793 xpt_done(ccb); 794 break; 795 } 796 default: 797 ccb->ccb_h.status = CAM_REQ_INVALID; 798 xpt_done(ccb); 799 break; 800 } 801 } 802 803 static void 804 adw_poll(struct cam_sim *sim) 805 { 806 adw_intr(cam_sim_softc(sim)); 807 } 808 809 static void 810 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 811 { 812 } 813 814 struct adw_softc * 815 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 816 { 817 struct adw_softc *adw; 818 int i; 819 820 /* 821 * Allocate a storage area for us 822 */ 823 adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_INTWAIT | M_ZERO); 824 LIST_INIT(&adw->pending_ccbs); 825 SLIST_INIT(&adw->sg_maps); 826 adw->device = dev; 827 adw->unit = device_get_unit(dev); 828 adw->regs_res_type = regs_type; 829 adw->regs_res_id = regs_id; 830 adw->regs = regs; 831 adw->tag = rman_get_bustag(regs); 832 adw->bsh = rman_get_bushandle(regs); 833 KKASSERT(adw->unit >= 0 && adw->unit < 100); 834 i = adw->unit / 10; 835 adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_INTWAIT); 836 sprintf(adw->name, "adw%d", adw->unit); 837 return(adw); 838 } 839 840 void 841 adw_free(struct adw_softc *adw) 842 { 843 switch (adw->init_level) { 844 case 9: 845 { 846 struct sg_map_node *sg_map; 847 848 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 849 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 850 bus_dmamap_unload(adw->sg_dmat, 851 sg_map->sg_dmamap); 852 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 853 sg_map->sg_dmamap); 854 free(sg_map, M_DEVBUF); 855 } 856 bus_dma_tag_destroy(adw->sg_dmat); 857 } 858 case 8: 859 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 860 case 7: 861 bus_dmamem_free(adw->acb_dmat, adw->acbs, 862 adw->acb_dmamap); 863 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 864 case 6: 865 bus_dma_tag_destroy(adw->acb_dmat); 866 case 5: 867 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 868 case 4: 869 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 870 adw->carrier_dmamap); 871 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 872 case 3: 873 bus_dma_tag_destroy(adw->carrier_dmat); 874 case 2: 875 bus_dma_tag_destroy(adw->buffer_dmat); 876 case 1: 877 bus_dma_tag_destroy(adw->parent_dmat); 878 case 0: 879 break; 880 } 881 free(adw->name, M_DEVBUF); 882 free(adw, M_DEVBUF); 883 } 884 885 int 886 adw_init(struct adw_softc *adw) 887 { 888 struct adw_eeprom eep_config; 889 u_int tid; 890 u_int i; 891 u_int16_t checksum; 892 u_int16_t scsicfg1; 893 894 checksum = adw_eeprom_read(adw, &eep_config); 895 bcopy(eep_config.serial_number, adw->serial_number, 896 sizeof(adw->serial_number)); 897 if (checksum != eep_config.checksum) { 898 u_int16_t serial_number[3]; 899 900 adw->flags |= ADW_EEPROM_FAILED; 901 printf("%s: EEPROM checksum failed. Restoring Defaults\n", 902 adw_name(adw)); 903 904 /* 905 * Restore the default EEPROM settings. 906 * Assume the 6 byte board serial number that was read 907 * from EEPROM is correct even if the EEPROM checksum 908 * failed. 909 */ 910 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 911 bcopy(adw->serial_number, eep_config.serial_number, 912 sizeof(serial_number)); 913 adw_eeprom_write(adw, &eep_config); 914 } 915 916 /* Pull eeprom information into our softc. */ 917 adw->bios_ctrl = eep_config.bios_ctrl; 918 adw->user_wdtr = eep_config.wdtr_able; 919 for (tid = 0; tid < ADW_MAX_TID; tid++) { 920 u_int mc_sdtr; 921 u_int16_t tid_mask; 922 923 tid_mask = 0x1 << tid; 924 if ((adw->features & ADW_ULTRA) != 0) { 925 /* 926 * Ultra chips store sdtr and ultraenb 927 * bits in their seeprom, so we must 928 * construct valid mc_sdtr entries for 929 * indirectly. 930 */ 931 if (eep_config.sync1.sync_enable & tid_mask) { 932 if (eep_config.sync2.ultra_enable & tid_mask) 933 mc_sdtr = ADW_MC_SDTR_20; 934 else 935 mc_sdtr = ADW_MC_SDTR_10; 936 } else 937 mc_sdtr = ADW_MC_SDTR_ASYNC; 938 } else { 939 switch (ADW_TARGET_GROUP(tid)) { 940 case 3: 941 mc_sdtr = eep_config.sync4.sdtr4; 942 break; 943 case 2: 944 mc_sdtr = eep_config.sync3.sdtr3; 945 break; 946 case 1: 947 mc_sdtr = eep_config.sync2.sdtr2; 948 break; 949 default: /* Shut up compiler */ 950 case 0: 951 mc_sdtr = eep_config.sync1.sdtr1; 952 break; 953 } 954 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 955 mc_sdtr &= 0xFF; 956 } 957 adw_set_user_sdtr(adw, tid, mc_sdtr); 958 } 959 adw->user_tagenb = eep_config.tagqng_able; 960 adw->user_discenb = eep_config.disc_enable; 961 adw->max_acbs = eep_config.max_host_qng; 962 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 963 964 /* 965 * Sanity check the number of host openings. 966 */ 967 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 968 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 969 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 970 /* If the value is zero, assume it is uninitialized. */ 971 if (adw->max_acbs == 0) 972 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 973 else 974 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 975 } 976 977 scsicfg1 = 0; 978 if ((adw->features & ADW_ULTRA2) != 0) { 979 switch (eep_config.termination_lvd) { 980 default: 981 printf("%s: Invalid EEPROM LVD Termination Settings.\n", 982 adw_name(adw)); 983 printf("%s: Reverting to Automatic LVD Termination\n", 984 adw_name(adw)); 985 /* FALLTHROUGH */ 986 case ADW_EEPROM_TERM_AUTO: 987 break; 988 case ADW_EEPROM_TERM_BOTH_ON: 989 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 990 /* FALLTHROUGH */ 991 case ADW_EEPROM_TERM_HIGH_ON: 992 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 993 /* FALLTHROUGH */ 994 case ADW_EEPROM_TERM_OFF: 995 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 996 break; 997 } 998 } 999 1000 switch (eep_config.termination_se) { 1001 default: 1002 printf("%s: Invalid SE EEPROM Termination Settings.\n", 1003 adw_name(adw)); 1004 printf("%s: Reverting to Automatic SE Termination\n", 1005 adw_name(adw)); 1006 /* FALLTHROUGH */ 1007 case ADW_EEPROM_TERM_AUTO: 1008 break; 1009 case ADW_EEPROM_TERM_BOTH_ON: 1010 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1011 /* FALLTHROUGH */ 1012 case ADW_EEPROM_TERM_HIGH_ON: 1013 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1014 /* FALLTHROUGH */ 1015 case ADW_EEPROM_TERM_OFF: 1016 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1017 break; 1018 } 1019 printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1020 1021 /* DMA tag for mapping buffers into device visible space. */ 1022 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1023 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1024 /*highaddr*/BUS_SPACE_MAXADDR, 1025 /*filter*/NULL, /*filterarg*/NULL, 1026 /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE, 1027 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1028 /*flags*/BUS_DMA_ALLOCNOW, 1029 &adw->buffer_dmat) != 0) { 1030 return (ENOMEM); 1031 } 1032 1033 adw->init_level++; 1034 1035 /* DMA tag for our ccb carrier structures */ 1036 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10, 1037 /*boundary*/0, 1038 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1039 /*highaddr*/BUS_SPACE_MAXADDR, 1040 /*filter*/NULL, /*filterarg*/NULL, 1041 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1042 * sizeof(struct adw_carrier), 1043 /*nsegments*/1, 1044 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1045 /*flags*/0, &adw->carrier_dmat) != 0) { 1046 return (ENOMEM); 1047 } 1048 1049 adw->init_level++; 1050 1051 /* Allocation for our ccb carrier structures */ 1052 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, 1053 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1054 return (ENOMEM); 1055 } 1056 1057 adw->init_level++; 1058 1059 /* And permanently map them */ 1060 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1061 adw->carriers, 1062 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1063 * sizeof(struct adw_carrier), 1064 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1065 1066 /* Clear them out. */ 1067 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1068 * sizeof(struct adw_carrier)); 1069 1070 /* Setup our free carrier list */ 1071 adw->free_carriers = adw->carriers; 1072 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1073 adw->carriers[i].carr_offset = 1074 carriervtobo(adw, &adw->carriers[i]); 1075 adw->carriers[i].carr_ba = 1076 carriervtob(adw, &adw->carriers[i]); 1077 adw->carriers[i].areq_ba = 0; 1078 adw->carriers[i].next_ba = 1079 carriervtobo(adw, &adw->carriers[i+1]); 1080 } 1081 /* Terminal carrier. Never leaves the freelist */ 1082 adw->carriers[i].carr_offset = 1083 carriervtobo(adw, &adw->carriers[i]); 1084 adw->carriers[i].carr_ba = 1085 carriervtob(adw, &adw->carriers[i]); 1086 adw->carriers[i].areq_ba = 0; 1087 adw->carriers[i].next_ba = ~0; 1088 1089 adw->init_level++; 1090 1091 /* DMA tag for our acb structures */ 1092 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1093 /*lowaddr*/BUS_SPACE_MAXADDR, 1094 /*highaddr*/BUS_SPACE_MAXADDR, 1095 /*filter*/NULL, /*filterarg*/NULL, 1096 adw->max_acbs * sizeof(struct acb), 1097 /*nsegments*/1, 1098 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1099 /*flags*/0, &adw->acb_dmat) != 0) { 1100 return (ENOMEM); 1101 } 1102 1103 adw->init_level++; 1104 1105 /* Allocation for our ccbs */ 1106 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, 1107 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1108 return (ENOMEM); 1109 1110 adw->init_level++; 1111 1112 /* And permanently map them */ 1113 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1114 adw->acbs, 1115 adw->max_acbs * sizeof(struct acb), 1116 adwmapmem, &adw->acb_busbase, /*flags*/0); 1117 1118 /* Clear them out. */ 1119 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1120 1121 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1122 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1123 /*lowaddr*/BUS_SPACE_MAXADDR, 1124 /*highaddr*/BUS_SPACE_MAXADDR, 1125 /*filter*/NULL, /*filterarg*/NULL, 1126 PAGE_SIZE, /*nsegments*/1, 1127 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1128 /*flags*/0, &adw->sg_dmat) != 0) { 1129 return (ENOMEM); 1130 } 1131 1132 adw->init_level++; 1133 1134 /* Allocate our first batch of ccbs */ 1135 if (adwallocacbs(adw) == 0) 1136 return (ENOMEM); 1137 1138 if (adw_init_chip(adw, scsicfg1) != 0) 1139 return (ENXIO); 1140 1141 printf("Queue Depth %d\n", adw->max_acbs); 1142 1143 return (0); 1144 } 1145 1146 /* 1147 * Attach all the sub-devices we can find 1148 */ 1149 int 1150 adw_attach(struct adw_softc *adw) 1151 { 1152 struct ccb_setasync csa; 1153 int s; 1154 int error; 1155 1156 error = 0; 1157 s = splcam(); 1158 /* Hook up our interrupt handler */ 1159 if ((error = bus_setup_intr(adw->device, adw->irq, INTR_TYPE_CAM, 1160 adw_intr, adw, &adw->ih)) != 0) { 1161 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1162 error); 1163 goto fail; 1164 } 1165 1166 /* Start the Risc processor now that we are fully configured. */ 1167 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1168 1169 /* 1170 * Construct our SIM entry. 1171 */ 1172 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1173 1, adw->max_acbs, NULL); 1174 if (adw->sim == NULL) { 1175 error = ENOMEM; 1176 goto fail; 1177 } 1178 1179 /* 1180 * Register the bus. 1181 */ 1182 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1183 cam_sim_free(adw->sim); 1184 error = ENOMEM; 1185 goto fail; 1186 } 1187 1188 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1189 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1190 == CAM_REQ_CMP) { 1191 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1192 csa.ccb_h.func_code = XPT_SASYNC_CB; 1193 csa.event_enable = AC_LOST_DEVICE; 1194 csa.callback = adw_async; 1195 csa.callback_arg = adw; 1196 xpt_action((union ccb *)&csa); 1197 } 1198 1199 fail: 1200 splx(s); 1201 return (error); 1202 } 1203 1204 void 1205 adw_intr(void *arg) 1206 { 1207 struct adw_softc *adw; 1208 u_int int_stat; 1209 1210 adw = (struct adw_softc *)arg; 1211 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1212 return; 1213 1214 /* Reading the register clears the interrupt. */ 1215 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1216 1217 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1218 u_int intrb_code; 1219 1220 /* Async Microcode Event */ 1221 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1222 switch (intrb_code) { 1223 case ADW_ASYNC_CARRIER_READY_FAILURE: 1224 /* 1225 * The RISC missed our update of 1226 * the commandq. 1227 */ 1228 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1229 adw_tickle_risc(adw, ADW_TICKLE_A); 1230 break; 1231 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1232 /* 1233 * The firmware detected a SCSI Bus reset. 1234 */ 1235 printf("Someone Reset the Bus\n"); 1236 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1237 break; 1238 case ADW_ASYNC_RDMA_FAILURE: 1239 /* 1240 * Handle RDMA failure by resetting the 1241 * SCSI Bus and chip. 1242 */ 1243 #if XXX 1244 AdvResetChipAndSB(adv_dvc_varp); 1245 #endif 1246 break; 1247 1248 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1249 /* 1250 * Host generated SCSI bus reset occurred. 1251 */ 1252 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1253 break; 1254 default: 1255 printf("adw_intr: unknown async code 0x%x\n", 1256 intrb_code); 1257 break; 1258 } 1259 } 1260 1261 /* 1262 * Run down the RequestQ. 1263 */ 1264 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1265 struct adw_carrier *free_carrier; 1266 struct acb *acb; 1267 union ccb *ccb; 1268 1269 #if 0 1270 printf("0x%x, 0x%x, 0x%x, 0x%x\n", 1271 adw->responseq->carr_offset, 1272 adw->responseq->carr_ba, 1273 adw->responseq->areq_ba, 1274 adw->responseq->next_ba); 1275 #endif 1276 /* 1277 * The firmware copies the adw_scsi_req_q.acb_baddr 1278 * field into the areq_ba field of the carrier. 1279 */ 1280 acb = acbbotov(adw, adw->responseq->areq_ba); 1281 1282 /* 1283 * The least significant four bits of the next_ba 1284 * field are used as flags. Mask them out and then 1285 * advance through the list. 1286 */ 1287 free_carrier = adw->responseq; 1288 adw->responseq = 1289 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1290 free_carrier->next_ba = adw->free_carriers->carr_offset; 1291 adw->free_carriers = free_carrier; 1292 1293 /* Process CCB */ 1294 ccb = acb->ccb; 1295 callout_stop(&ccb->ccb_h.timeout_ch); 1296 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1297 bus_dmasync_op_t op; 1298 1299 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1300 op = BUS_DMASYNC_POSTREAD; 1301 else 1302 op = BUS_DMASYNC_POSTWRITE; 1303 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1304 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1305 ccb->csio.resid = acb->queue.data_cnt; 1306 } else 1307 ccb->csio.resid = 0; 1308 1309 /* Common Cases inline... */ 1310 if (acb->queue.host_status == QHSTA_NO_ERROR 1311 && (acb->queue.done_status == QD_NO_ERROR 1312 || acb->queue.done_status == QD_WITH_ERROR)) { 1313 ccb->csio.scsi_status = acb->queue.scsi_status; 1314 ccb->ccb_h.status = 0; 1315 switch (ccb->csio.scsi_status) { 1316 case SCSI_STATUS_OK: 1317 ccb->ccb_h.status |= CAM_REQ_CMP; 1318 break; 1319 case SCSI_STATUS_CHECK_COND: 1320 case SCSI_STATUS_CMD_TERMINATED: 1321 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1322 ccb->csio.sense_len); 1323 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1324 ccb->csio.sense_resid = acb->queue.sense_len; 1325 /* FALLTHROUGH */ 1326 default: 1327 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1328 | CAM_DEV_QFRZN; 1329 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1330 break; 1331 } 1332 adwfreeacb(adw, acb); 1333 xpt_done(ccb); 1334 } else { 1335 adwprocesserror(adw, acb); 1336 } 1337 } 1338 } 1339 1340 static void 1341 adwprocesserror(struct adw_softc *adw, struct acb *acb) 1342 { 1343 union ccb *ccb; 1344 1345 ccb = acb->ccb; 1346 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1347 ccb->ccb_h.status = CAM_REQ_ABORTED; 1348 } else { 1349 1350 switch (acb->queue.host_status) { 1351 case QHSTA_M_SEL_TIMEOUT: 1352 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1353 break; 1354 case QHSTA_M_SXFR_OFF_UFLW: 1355 case QHSTA_M_SXFR_OFF_OFLW: 1356 case QHSTA_M_DATA_OVER_RUN: 1357 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1358 break; 1359 case QHSTA_M_SXFR_DESELECTED: 1360 case QHSTA_M_UNEXPECTED_BUS_FREE: 1361 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1362 break; 1363 case QHSTA_M_SCSI_BUS_RESET: 1364 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1365 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1366 break; 1367 case QHSTA_M_BUS_DEVICE_RESET: 1368 ccb->ccb_h.status = CAM_BDR_SENT; 1369 break; 1370 case QHSTA_M_QUEUE_ABORTED: 1371 /* BDR or Bus Reset */ 1372 printf("Saw Queue Aborted\n"); 1373 ccb->ccb_h.status = adw->last_reset; 1374 break; 1375 case QHSTA_M_SXFR_SDMA_ERR: 1376 case QHSTA_M_SXFR_SXFR_PERR: 1377 case QHSTA_M_RDMA_PERR: 1378 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1379 break; 1380 case QHSTA_M_WTM_TIMEOUT: 1381 case QHSTA_M_SXFR_WD_TMO: 1382 { 1383 /* The SCSI bus hung in a phase */ 1384 xpt_print_path(adw->path); 1385 printf("Watch Dog timer expired. Reseting bus\n"); 1386 adw_reset_bus(adw); 1387 break; 1388 } 1389 case QHSTA_M_SXFR_XFR_PH_ERR: 1390 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1391 break; 1392 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1393 break; 1394 case QHSTA_M_BAD_CMPL_STATUS_IN: 1395 /* No command complete after a status message */ 1396 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1397 break; 1398 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1399 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1400 break; 1401 case QHSTA_M_INVALID_DEVICE: 1402 ccb->ccb_h.status = CAM_PATH_INVALID; 1403 break; 1404 case QHSTA_M_NO_AUTO_REQ_SENSE: 1405 /* 1406 * User didn't request sense, but we got a 1407 * check condition. 1408 */ 1409 ccb->csio.scsi_status = acb->queue.scsi_status; 1410 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1411 break; 1412 default: 1413 panic("%s: Unhandled Host status error %x", 1414 adw_name(adw), acb->queue.host_status); 1415 /* NOTREACHED */ 1416 } 1417 } 1418 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1419 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1420 || ccb->ccb_h.status == CAM_BDR_SENT) 1421 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1422 } 1423 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1424 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1425 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1426 } 1427 adwfreeacb(adw, acb); 1428 xpt_done(ccb); 1429 } 1430 1431 static void 1432 adwtimeout(void *arg) 1433 { 1434 struct acb *acb; 1435 union ccb *ccb; 1436 struct adw_softc *adw; 1437 adw_idle_cmd_status_t status; 1438 int target_id; 1439 int s; 1440 1441 acb = (struct acb *)arg; 1442 ccb = acb->ccb; 1443 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1444 xpt_print_path(ccb->ccb_h.path); 1445 printf("ACB %p - timed out\n", (void *)acb); 1446 1447 s = splcam(); 1448 1449 if ((acb->state & ACB_ACTIVE) == 0) { 1450 xpt_print_path(ccb->ccb_h.path); 1451 printf("ACB %p - timed out CCB already completed\n", 1452 (void *)acb); 1453 splx(s); 1454 return; 1455 } 1456 1457 acb->state |= ACB_RECOVERY_ACB; 1458 target_id = ccb->ccb_h.target_id; 1459 1460 /* Attempt a BDR first */ 1461 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1462 ccb->ccb_h.target_id); 1463 splx(s); 1464 if (status == ADW_IDLE_CMD_SUCCESS) { 1465 printf("%s: BDR Delivered. No longer in timeout\n", 1466 adw_name(adw)); 1467 adw_handle_device_reset(adw, target_id); 1468 } else { 1469 adw_reset_bus(adw); 1470 xpt_print_path(adw->path); 1471 printf("Bus Reset Delivered. No longer in timeout\n"); 1472 } 1473 } 1474 1475 static void 1476 adw_handle_device_reset(struct adw_softc *adw, u_int target) 1477 { 1478 struct cam_path *path; 1479 cam_status error; 1480 1481 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1482 target, CAM_LUN_WILDCARD); 1483 1484 if (error == CAM_REQ_CMP) { 1485 xpt_async(AC_SENT_BDR, path, NULL); 1486 xpt_free_path(path); 1487 } 1488 adw->last_reset = CAM_BDR_SENT; 1489 } 1490 1491 static void 1492 adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1493 { 1494 if (initiated) { 1495 /* 1496 * The microcode currently sets the SCSI Bus Reset signal 1497 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1498 * command above. But the SCSI Bus Reset Hold Time in the 1499 * microcode is not deterministic (it may in fact be for less 1500 * than the SCSI Spec. minimum of 25 us). Therefore on return 1501 * the Adv Library sets the SCSI Bus Reset signal for 1502 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1503 * than 25 us. 1504 */ 1505 u_int scsi_ctrl; 1506 1507 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1508 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1509 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1510 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1511 1512 /* 1513 * We will perform the async notification when the 1514 * SCSI Reset interrupt occurs. 1515 */ 1516 } else 1517 xpt_async(AC_BUS_RESET, adw->path, NULL); 1518 adw->last_reset = CAM_SCSI_BUS_RESET; 1519 } 1520