1 /* 2 * CAM SCSI interface for the the Advanced Systems Inc. 3 * Second Generation SCSI controllers. 4 * 5 * Product specific probe and attach routines can be found in: 6 * 7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W 8 * 9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/advansys/adwcam.c,v 1.7.2.2 2001/03/05 13:08:55 obrien Exp $ 34 * $DragonFly: src/sys/dev/disk/advansys/adwcam.c,v 1.6 2004/05/13 19:44:31 dillon Exp $ 35 */ 36 /* 37 * Ported from: 38 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 39 * 40 * Copyright (c) 1995-1998 Advanced System Products, Inc. 41 * All Rights Reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that redistributions of source 45 * code retain the above copyright notice and this comment without 46 * modification. 47 */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #include <sys/bus.h> 54 55 #include <machine/bus_pio.h> 56 #include <machine/bus_memio.h> 57 #include <machine/bus.h> 58 #include <machine/clock.h> 59 #include <machine/resource.h> 60 61 #include <sys/rman.h> 62 63 #include <bus/cam/cam.h> 64 #include <bus/cam/cam_ccb.h> 65 #include <bus/cam/cam_sim.h> 66 #include <bus/cam/cam_xpt_sim.h> 67 #include <bus/cam/cam_debug.h> 68 69 #include <bus/cam/scsi/scsi_message.h> 70 71 #include "adwvar.h" 72 73 /* Definitions for our use of the SIM private CCB area */ 74 #define ccb_acb_ptr spriv_ptr0 75 #define ccb_adw_ptr spriv_ptr1 76 77 u_long adw_unit; 78 79 static __inline cam_status adwccbstatus(union ccb*); 80 static __inline struct acb* adwgetacb(struct adw_softc *adw); 81 static __inline void adwfreeacb(struct adw_softc *adw, 82 struct acb *acb); 83 84 static void adwmapmem(void *arg, bus_dma_segment_t *segs, 85 int nseg, int error); 86 static struct sg_map_node* 87 adwallocsgmap(struct adw_softc *adw); 88 static int adwallocacbs(struct adw_softc *adw); 89 90 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 91 int nseg, int error); 92 static void adw_action(struct cam_sim *sim, union ccb *ccb); 93 static void adw_poll(struct cam_sim *sim); 94 static void adw_async(void *callback_arg, u_int32_t code, 95 struct cam_path *path, void *arg); 96 static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 97 static void adwtimeout(void *arg); 98 static void adw_handle_device_reset(struct adw_softc *adw, 99 u_int target); 100 static void adw_handle_bus_reset(struct adw_softc *adw, 101 int initiated); 102 103 static __inline cam_status 104 adwccbstatus(union ccb* ccb) 105 { 106 return (ccb->ccb_h.status & CAM_STATUS_MASK); 107 } 108 109 static __inline struct acb* 110 adwgetacb(struct adw_softc *adw) 111 { 112 struct acb* acb; 113 int s; 114 115 s = splcam(); 116 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 117 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 118 } else if (adw->num_acbs < adw->max_acbs) { 119 adwallocacbs(adw); 120 acb = SLIST_FIRST(&adw->free_acb_list); 121 if (acb == NULL) 122 printf("%s: Can't malloc ACB\n", adw_name(adw)); 123 else { 124 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 125 } 126 } 127 splx(s); 128 129 return (acb); 130 } 131 132 static __inline void 133 adwfreeacb(struct adw_softc *adw, struct acb *acb) 134 { 135 int s; 136 137 s = splcam(); 138 if ((acb->state & ACB_ACTIVE) != 0) 139 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 140 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 141 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 142 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 143 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 144 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 145 adw->state &= ~ADW_RESOURCE_SHORTAGE; 146 } 147 acb->state = ACB_FREE; 148 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 149 splx(s); 150 } 151 152 static void 153 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 154 { 155 bus_addr_t *busaddrp; 156 157 busaddrp = (bus_addr_t *)arg; 158 *busaddrp = segs->ds_addr; 159 } 160 161 static struct sg_map_node * 162 adwallocsgmap(struct adw_softc *adw) 163 { 164 struct sg_map_node *sg_map; 165 166 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT); 167 168 /* Allocate S/G space for the next batch of ACBS */ 169 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, 170 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 171 free(sg_map, M_DEVBUF); 172 return (NULL); 173 } 174 175 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 176 177 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 178 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 179 180 bzero(sg_map->sg_vaddr, PAGE_SIZE); 181 return (sg_map); 182 } 183 184 /* 185 * Allocate another chunk of CCB's. Return count of entries added. 186 * Assumed to be called at splcam(). 187 */ 188 static int 189 adwallocacbs(struct adw_softc *adw) 190 { 191 struct acb *next_acb; 192 struct sg_map_node *sg_map; 193 bus_addr_t busaddr; 194 struct adw_sg_block *blocks; 195 int newcount; 196 int i; 197 198 next_acb = &adw->acbs[adw->num_acbs]; 199 sg_map = adwallocsgmap(adw); 200 201 if (sg_map == NULL) 202 return (0); 203 204 blocks = sg_map->sg_vaddr; 205 busaddr = sg_map->sg_physaddr; 206 207 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 208 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 209 int error; 210 211 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 212 &next_acb->dmamap); 213 if (error != 0) 214 break; 215 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 216 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 217 next_acb->queue.sense_baddr = 218 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 219 next_acb->sg_blocks = blocks; 220 next_acb->sg_busaddr = busaddr; 221 next_acb->state = ACB_FREE; 222 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 223 blocks += ADW_SG_BLOCKCNT; 224 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 225 next_acb++; 226 adw->num_acbs++; 227 } 228 return (i); 229 } 230 231 static void 232 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 233 { 234 struct acb *acb; 235 union ccb *ccb; 236 struct adw_softc *adw; 237 int s; 238 239 acb = (struct acb *)arg; 240 ccb = acb->ccb; 241 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 242 243 if (error != 0) { 244 if (error != EFBIG) 245 printf("%s: Unexepected error 0x%x returned from " 246 "bus_dmamap_load\n", adw_name(adw), error); 247 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 248 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 249 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 250 } 251 adwfreeacb(adw, acb); 252 xpt_done(ccb); 253 return; 254 } 255 256 if (nseg != 0) { 257 bus_dmasync_op_t op; 258 259 acb->queue.data_addr = dm_segs[0].ds_addr; 260 acb->queue.data_cnt = ccb->csio.dxfer_len; 261 if (nseg > 1) { 262 struct adw_sg_block *sg_block; 263 struct adw_sg_elm *sg; 264 bus_addr_t sg_busaddr; 265 u_int sg_index; 266 bus_dma_segment_t *end_seg; 267 268 end_seg = dm_segs + nseg; 269 270 sg_busaddr = acb->sg_busaddr; 271 sg_index = 0; 272 /* Copy the segments into our SG list */ 273 for (sg_block = acb->sg_blocks;; sg_block++) { 274 u_int i; 275 276 sg = sg_block->sg_list; 277 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 278 if (dm_segs >= end_seg) 279 break; 280 281 sg->sg_addr = dm_segs->ds_addr; 282 sg->sg_count = dm_segs->ds_len; 283 sg++; 284 dm_segs++; 285 } 286 sg_block->sg_cnt = i; 287 sg_index += i; 288 if (dm_segs == end_seg) { 289 sg_block->sg_busaddr_next = 0; 290 break; 291 } else { 292 sg_busaddr += 293 sizeof(struct adw_sg_block); 294 sg_block->sg_busaddr_next = sg_busaddr; 295 } 296 } 297 acb->queue.sg_real_addr = acb->sg_busaddr; 298 } else { 299 acb->queue.sg_real_addr = 0; 300 } 301 302 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 303 op = BUS_DMASYNC_PREREAD; 304 else 305 op = BUS_DMASYNC_PREWRITE; 306 307 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 308 309 } else { 310 acb->queue.data_addr = 0; 311 acb->queue.data_cnt = 0; 312 acb->queue.sg_real_addr = 0; 313 } 314 315 s = splcam(); 316 317 /* 318 * Last time we need to check if this CCB needs to 319 * be aborted. 320 */ 321 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 322 if (nseg != 0) 323 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 324 adwfreeacb(adw, acb); 325 xpt_done(ccb); 326 splx(s); 327 return; 328 } 329 330 acb->state |= ACB_ACTIVE; 331 ccb->ccb_h.status |= CAM_SIM_QUEUED; 332 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 333 ccb->ccb_h.timeout_ch = 334 timeout(adwtimeout, (caddr_t)acb, 335 (ccb->ccb_h.timeout * hz) / 1000); 336 337 adw_send_acb(adw, acb, acbvtob(adw, acb)); 338 339 splx(s); 340 } 341 342 static void 343 adw_action(struct cam_sim *sim, union ccb *ccb) 344 { 345 struct adw_softc *adw; 346 347 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 348 349 adw = (struct adw_softc *)cam_sim_softc(sim); 350 351 switch (ccb->ccb_h.func_code) { 352 /* Common cases first */ 353 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 354 { 355 struct ccb_scsiio *csio; 356 struct ccb_hdr *ccbh; 357 struct acb *acb; 358 359 csio = &ccb->csio; 360 ccbh = &ccb->ccb_h; 361 362 /* Max supported CDB length is 12 bytes */ 363 if (csio->cdb_len > 12) { 364 ccb->ccb_h.status = CAM_REQ_INVALID; 365 xpt_done(ccb); 366 return; 367 } 368 369 if ((acb = adwgetacb(adw)) == NULL) { 370 int s; 371 372 s = splcam(); 373 adw->state |= ADW_RESOURCE_SHORTAGE; 374 splx(s); 375 xpt_freeze_simq(sim, /*count*/1); 376 ccb->ccb_h.status = CAM_REQUEUE_REQ; 377 xpt_done(ccb); 378 return; 379 } 380 381 /* Link acb and ccb so we can find one from the other */ 382 acb->ccb = ccb; 383 ccb->ccb_h.ccb_acb_ptr = acb; 384 ccb->ccb_h.ccb_adw_ptr = adw; 385 386 acb->queue.cntl = 0; 387 acb->queue.target_cmd = 0; 388 acb->queue.target_id = ccb->ccb_h.target_id; 389 acb->queue.target_lun = ccb->ccb_h.target_lun; 390 391 acb->queue.mflag = 0; 392 acb->queue.sense_len = 393 MIN(csio->sense_len, sizeof(acb->sense_data)); 394 acb->queue.cdb_len = csio->cdb_len; 395 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 396 switch (csio->tag_action) { 397 case MSG_SIMPLE_Q_TAG: 398 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 399 break; 400 case MSG_HEAD_OF_Q_TAG: 401 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 402 break; 403 case MSG_ORDERED_Q_TAG: 404 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 405 break; 406 default: 407 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 408 break; 409 } 410 } else 411 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 412 413 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 414 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 415 416 acb->queue.done_status = 0; 417 acb->queue.scsi_status = 0; 418 acb->queue.host_status = 0; 419 acb->queue.sg_wk_ix = 0; 420 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 421 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 422 bcopy(csio->cdb_io.cdb_ptr, 423 acb->queue.cdb, csio->cdb_len); 424 } else { 425 /* I guess I could map it in... */ 426 ccb->ccb_h.status = CAM_REQ_INVALID; 427 adwfreeacb(adw, acb); 428 xpt_done(ccb); 429 return; 430 } 431 } else { 432 bcopy(csio->cdb_io.cdb_bytes, 433 acb->queue.cdb, csio->cdb_len); 434 } 435 436 /* 437 * If we have any data to send with this command, 438 * map it into bus space. 439 */ 440 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 441 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 442 /* 443 * We've been given a pointer 444 * to a single buffer. 445 */ 446 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 447 int s; 448 int error; 449 450 s = splsoftvm(); 451 error = 452 bus_dmamap_load(adw->buffer_dmat, 453 acb->dmamap, 454 csio->data_ptr, 455 csio->dxfer_len, 456 adwexecuteacb, 457 acb, /*flags*/0); 458 if (error == EINPROGRESS) { 459 /* 460 * So as to maintain ordering, 461 * freeze the controller queue 462 * until our mapping is 463 * returned. 464 */ 465 xpt_freeze_simq(sim, 1); 466 acb->state |= CAM_RELEASE_SIMQ; 467 } 468 splx(s); 469 } else { 470 struct bus_dma_segment seg; 471 472 /* Pointer to physical buffer */ 473 seg.ds_addr = 474 (bus_addr_t)csio->data_ptr; 475 seg.ds_len = csio->dxfer_len; 476 adwexecuteacb(acb, &seg, 1, 0); 477 } 478 } else { 479 struct bus_dma_segment *segs; 480 481 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 482 panic("adw_action - Physical " 483 "segment pointers " 484 "unsupported"); 485 486 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 487 panic("adw_action - Virtual " 488 "segment addresses " 489 "unsupported"); 490 491 /* Just use the segments provided */ 492 segs = (struct bus_dma_segment *)csio->data_ptr; 493 adwexecuteacb(acb, segs, csio->sglist_cnt, 494 (csio->sglist_cnt < ADW_SGSIZE) 495 ? 0 : EFBIG); 496 } 497 } else { 498 adwexecuteacb(acb, NULL, 0, 0); 499 } 500 break; 501 } 502 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 503 { 504 adw_idle_cmd_status_t status; 505 506 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 507 ccb->ccb_h.target_id); 508 if (status == ADW_IDLE_CMD_SUCCESS) { 509 ccb->ccb_h.status = CAM_REQ_CMP; 510 if (bootverbose) { 511 xpt_print_path(ccb->ccb_h.path); 512 printf("BDR Delivered\n"); 513 } 514 } else 515 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 516 xpt_done(ccb); 517 break; 518 } 519 case XPT_ABORT: /* Abort the specified CCB */ 520 /* XXX Implement */ 521 ccb->ccb_h.status = CAM_REQ_INVALID; 522 xpt_done(ccb); 523 break; 524 case XPT_SET_TRAN_SETTINGS: 525 { 526 struct ccb_trans_settings *cts; 527 u_int target_mask; 528 int s; 529 530 cts = &ccb->cts; 531 target_mask = 0x01 << ccb->ccb_h.target_id; 532 533 s = splcam(); 534 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 535 u_int sdtrdone; 536 537 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 538 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 539 u_int discenb; 540 541 discenb = 542 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 543 544 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 545 discenb |= target_mask; 546 else 547 discenb &= ~target_mask; 548 549 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 550 discenb); 551 } 552 553 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 554 555 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 556 adw->tagenb |= target_mask; 557 else 558 adw->tagenb &= ~target_mask; 559 } 560 561 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 562 u_int wdtrenb_orig; 563 u_int wdtrenb; 564 u_int wdtrdone; 565 566 wdtrenb_orig = 567 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 568 wdtrenb = wdtrenb_orig; 569 wdtrdone = adw_lram_read_16(adw, 570 ADW_MC_WDTR_DONE); 571 switch (cts->bus_width) { 572 case MSG_EXT_WDTR_BUS_32_BIT: 573 case MSG_EXT_WDTR_BUS_16_BIT: 574 wdtrenb |= target_mask; 575 break; 576 case MSG_EXT_WDTR_BUS_8_BIT: 577 default: 578 wdtrenb &= ~target_mask; 579 break; 580 } 581 if (wdtrenb != wdtrenb_orig) { 582 adw_lram_write_16(adw, 583 ADW_MC_WDTR_ABLE, 584 wdtrenb); 585 wdtrdone &= ~target_mask; 586 adw_lram_write_16(adw, 587 ADW_MC_WDTR_DONE, 588 wdtrdone); 589 /* Wide negotiation forces async */ 590 sdtrdone &= ~target_mask; 591 adw_lram_write_16(adw, 592 ADW_MC_SDTR_DONE, 593 sdtrdone); 594 } 595 } 596 597 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 598 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 599 u_int sdtr_orig; 600 u_int sdtr; 601 u_int sdtrable_orig; 602 u_int sdtrable; 603 604 sdtr = adw_get_chip_sdtr(adw, 605 ccb->ccb_h.target_id); 606 sdtr_orig = sdtr; 607 sdtrable = adw_lram_read_16(adw, 608 ADW_MC_SDTR_ABLE); 609 sdtrable_orig = sdtrable; 610 611 if ((cts->valid 612 & CCB_TRANS_SYNC_RATE_VALID) != 0) { 613 614 sdtr = 615 adw_find_sdtr(adw, 616 cts->sync_period); 617 } 618 619 if ((cts->valid 620 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) { 621 if (cts->sync_offset == 0) 622 sdtr = ADW_MC_SDTR_ASYNC; 623 } 624 625 if (sdtr == ADW_MC_SDTR_ASYNC) 626 sdtrable &= ~target_mask; 627 else 628 sdtrable |= target_mask; 629 if (sdtr != sdtr_orig 630 || sdtrable != sdtrable_orig) { 631 adw_set_chip_sdtr(adw, 632 ccb->ccb_h.target_id, 633 sdtr); 634 sdtrdone &= ~target_mask; 635 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 636 sdtrable); 637 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 638 sdtrdone); 639 640 } 641 } 642 } 643 splx(s); 644 ccb->ccb_h.status = CAM_REQ_CMP; 645 xpt_done(ccb); 646 break; 647 } 648 case XPT_GET_TRAN_SETTINGS: 649 /* Get default/user set transfer settings for the target */ 650 { 651 struct ccb_trans_settings *cts; 652 u_int target_mask; 653 654 cts = &ccb->cts; 655 target_mask = 0x01 << ccb->ccb_h.target_id; 656 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 657 u_int mc_sdtr; 658 659 cts->flags = 0; 660 if ((adw->user_discenb & target_mask) != 0) 661 cts->flags |= CCB_TRANS_DISC_ENB; 662 663 if ((adw->user_tagenb & target_mask) != 0) 664 cts->flags |= CCB_TRANS_TAG_ENB; 665 666 if ((adw->user_wdtr & target_mask) != 0) 667 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 668 else 669 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 670 671 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 672 cts->sync_period = adw_find_period(adw, mc_sdtr); 673 if (cts->sync_period != 0) 674 cts->sync_offset = 15; /* XXX ??? */ 675 else 676 cts->sync_offset = 0; 677 678 cts->valid = CCB_TRANS_SYNC_RATE_VALID 679 | CCB_TRANS_SYNC_OFFSET_VALID 680 | CCB_TRANS_BUS_WIDTH_VALID 681 | CCB_TRANS_DISC_VALID 682 | CCB_TRANS_TQ_VALID; 683 ccb->ccb_h.status = CAM_REQ_CMP; 684 } else { 685 u_int targ_tinfo; 686 687 cts->flags = 0; 688 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 689 & target_mask) != 0) 690 cts->flags |= CCB_TRANS_DISC_ENB; 691 692 if ((adw->tagenb & target_mask) != 0) 693 cts->flags |= CCB_TRANS_TAG_ENB; 694 695 targ_tinfo = 696 adw_lram_read_16(adw, 697 ADW_MC_DEVICE_HSHK_CFG_TABLE 698 + (2 * ccb->ccb_h.target_id)); 699 700 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 701 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 702 else 703 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 704 705 cts->sync_period = 706 adw_hshk_cfg_period_factor(targ_tinfo); 707 708 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 709 if (cts->sync_period == 0) 710 cts->sync_offset = 0; 711 712 if (cts->sync_offset == 0) 713 cts->sync_period = 0; 714 } 715 cts->valid = CCB_TRANS_SYNC_RATE_VALID 716 | CCB_TRANS_SYNC_OFFSET_VALID 717 | CCB_TRANS_BUS_WIDTH_VALID 718 | CCB_TRANS_DISC_VALID 719 | CCB_TRANS_TQ_VALID; 720 ccb->ccb_h.status = CAM_REQ_CMP; 721 xpt_done(ccb); 722 break; 723 } 724 case XPT_CALC_GEOMETRY: 725 { 726 struct ccb_calc_geometry *ccg; 727 u_int32_t size_mb; 728 u_int32_t secs_per_cylinder; 729 int extended; 730 731 /* 732 * XXX Use Adaptec translation until I find out how to 733 * get this information from the card. 734 */ 735 ccg = &ccb->ccg; 736 size_mb = ccg->volume_size 737 / ((1024L * 1024L) / ccg->block_size); 738 extended = 1; 739 740 if (size_mb > 1024 && extended) { 741 ccg->heads = 255; 742 ccg->secs_per_track = 63; 743 } else { 744 ccg->heads = 64; 745 ccg->secs_per_track = 32; 746 } 747 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 748 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 749 ccb->ccb_h.status = CAM_REQ_CMP; 750 xpt_done(ccb); 751 break; 752 } 753 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 754 { 755 int failure; 756 757 failure = adw_reset_bus(adw); 758 if (failure != 0) { 759 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 760 } else { 761 if (bootverbose) { 762 xpt_print_path(adw->path); 763 printf("Bus Reset Delivered\n"); 764 } 765 ccb->ccb_h.status = CAM_REQ_CMP; 766 } 767 xpt_done(ccb); 768 break; 769 } 770 case XPT_TERM_IO: /* Terminate the I/O process */ 771 /* XXX Implement */ 772 ccb->ccb_h.status = CAM_REQ_INVALID; 773 xpt_done(ccb); 774 break; 775 case XPT_PATH_INQ: /* Path routing inquiry */ 776 { 777 struct ccb_pathinq *cpi = &ccb->cpi; 778 779 cpi->version_num = 1; 780 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 781 cpi->target_sprt = 0; 782 cpi->hba_misc = 0; 783 cpi->hba_eng_cnt = 0; 784 cpi->max_target = ADW_MAX_TID; 785 cpi->max_lun = ADW_MAX_LUN; 786 cpi->initiator_id = adw->initiator_id; 787 cpi->bus_id = cam_sim_bus(sim); 788 cpi->base_transfer_speed = 3300; 789 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 790 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 791 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 792 cpi->unit_number = cam_sim_unit(sim); 793 cpi->ccb_h.status = CAM_REQ_CMP; 794 xpt_done(ccb); 795 break; 796 } 797 default: 798 ccb->ccb_h.status = CAM_REQ_INVALID; 799 xpt_done(ccb); 800 break; 801 } 802 } 803 804 static void 805 adw_poll(struct cam_sim *sim) 806 { 807 adw_intr(cam_sim_softc(sim)); 808 } 809 810 static void 811 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 812 { 813 } 814 815 struct adw_softc * 816 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 817 { 818 struct adw_softc *adw; 819 int i; 820 821 /* 822 * Allocate a storage area for us 823 */ 824 adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_INTWAIT | M_ZERO); 825 LIST_INIT(&adw->pending_ccbs); 826 SLIST_INIT(&adw->sg_maps); 827 adw->device = dev; 828 adw->unit = device_get_unit(dev); 829 adw->regs_res_type = regs_type; 830 adw->regs_res_id = regs_id; 831 adw->regs = regs; 832 adw->tag = rman_get_bustag(regs); 833 adw->bsh = rman_get_bushandle(regs); 834 KKASSERT(adw->unit >= 0 && adw->unit < 100); 835 i = adw->unit / 10; 836 adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_INTWAIT); 837 sprintf(adw->name, "adw%d", adw->unit); 838 return(adw); 839 } 840 841 void 842 adw_free(struct adw_softc *adw) 843 { 844 switch (adw->init_level) { 845 case 9: 846 { 847 struct sg_map_node *sg_map; 848 849 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 850 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 851 bus_dmamap_unload(adw->sg_dmat, 852 sg_map->sg_dmamap); 853 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 854 sg_map->sg_dmamap); 855 free(sg_map, M_DEVBUF); 856 } 857 bus_dma_tag_destroy(adw->sg_dmat); 858 } 859 case 8: 860 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 861 case 7: 862 bus_dmamem_free(adw->acb_dmat, adw->acbs, 863 adw->acb_dmamap); 864 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 865 case 6: 866 bus_dma_tag_destroy(adw->acb_dmat); 867 case 5: 868 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 869 case 4: 870 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 871 adw->carrier_dmamap); 872 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 873 case 3: 874 bus_dma_tag_destroy(adw->carrier_dmat); 875 case 2: 876 bus_dma_tag_destroy(adw->buffer_dmat); 877 case 1: 878 bus_dma_tag_destroy(adw->parent_dmat); 879 case 0: 880 break; 881 } 882 free(adw->name, M_DEVBUF); 883 free(adw, M_DEVBUF); 884 } 885 886 int 887 adw_init(struct adw_softc *adw) 888 { 889 struct adw_eeprom eep_config; 890 u_int tid; 891 u_int i; 892 u_int16_t checksum; 893 u_int16_t scsicfg1; 894 895 checksum = adw_eeprom_read(adw, &eep_config); 896 bcopy(eep_config.serial_number, adw->serial_number, 897 sizeof(adw->serial_number)); 898 if (checksum != eep_config.checksum) { 899 u_int16_t serial_number[3]; 900 901 adw->flags |= ADW_EEPROM_FAILED; 902 printf("%s: EEPROM checksum failed. Restoring Defaults\n", 903 adw_name(adw)); 904 905 /* 906 * Restore the default EEPROM settings. 907 * Assume the 6 byte board serial number that was read 908 * from EEPROM is correct even if the EEPROM checksum 909 * failed. 910 */ 911 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 912 bcopy(adw->serial_number, eep_config.serial_number, 913 sizeof(serial_number)); 914 adw_eeprom_write(adw, &eep_config); 915 } 916 917 /* Pull eeprom information into our softc. */ 918 adw->bios_ctrl = eep_config.bios_ctrl; 919 adw->user_wdtr = eep_config.wdtr_able; 920 for (tid = 0; tid < ADW_MAX_TID; tid++) { 921 u_int mc_sdtr; 922 u_int16_t tid_mask; 923 924 tid_mask = 0x1 << tid; 925 if ((adw->features & ADW_ULTRA) != 0) { 926 /* 927 * Ultra chips store sdtr and ultraenb 928 * bits in their seeprom, so we must 929 * construct valid mc_sdtr entries for 930 * indirectly. 931 */ 932 if (eep_config.sync1.sync_enable & tid_mask) { 933 if (eep_config.sync2.ultra_enable & tid_mask) 934 mc_sdtr = ADW_MC_SDTR_20; 935 else 936 mc_sdtr = ADW_MC_SDTR_10; 937 } else 938 mc_sdtr = ADW_MC_SDTR_ASYNC; 939 } else { 940 switch (ADW_TARGET_GROUP(tid)) { 941 case 3: 942 mc_sdtr = eep_config.sync4.sdtr4; 943 break; 944 case 2: 945 mc_sdtr = eep_config.sync3.sdtr3; 946 break; 947 case 1: 948 mc_sdtr = eep_config.sync2.sdtr2; 949 break; 950 default: /* Shut up compiler */ 951 case 0: 952 mc_sdtr = eep_config.sync1.sdtr1; 953 break; 954 } 955 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 956 mc_sdtr &= 0xFF; 957 } 958 adw_set_user_sdtr(adw, tid, mc_sdtr); 959 } 960 adw->user_tagenb = eep_config.tagqng_able; 961 adw->user_discenb = eep_config.disc_enable; 962 adw->max_acbs = eep_config.max_host_qng; 963 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 964 965 /* 966 * Sanity check the number of host openings. 967 */ 968 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 969 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 970 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 971 /* If the value is zero, assume it is uninitialized. */ 972 if (adw->max_acbs == 0) 973 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 974 else 975 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 976 } 977 978 scsicfg1 = 0; 979 if ((adw->features & ADW_ULTRA2) != 0) { 980 switch (eep_config.termination_lvd) { 981 default: 982 printf("%s: Invalid EEPROM LVD Termination Settings.\n", 983 adw_name(adw)); 984 printf("%s: Reverting to Automatic LVD Termination\n", 985 adw_name(adw)); 986 /* FALLTHROUGH */ 987 case ADW_EEPROM_TERM_AUTO: 988 break; 989 case ADW_EEPROM_TERM_BOTH_ON: 990 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 991 /* FALLTHROUGH */ 992 case ADW_EEPROM_TERM_HIGH_ON: 993 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 994 /* FALLTHROUGH */ 995 case ADW_EEPROM_TERM_OFF: 996 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 997 break; 998 } 999 } 1000 1001 switch (eep_config.termination_se) { 1002 default: 1003 printf("%s: Invalid SE EEPROM Termination Settings.\n", 1004 adw_name(adw)); 1005 printf("%s: Reverting to Automatic SE Termination\n", 1006 adw_name(adw)); 1007 /* FALLTHROUGH */ 1008 case ADW_EEPROM_TERM_AUTO: 1009 break; 1010 case ADW_EEPROM_TERM_BOTH_ON: 1011 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1012 /* FALLTHROUGH */ 1013 case ADW_EEPROM_TERM_HIGH_ON: 1014 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1015 /* FALLTHROUGH */ 1016 case ADW_EEPROM_TERM_OFF: 1017 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1018 break; 1019 } 1020 printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1021 1022 /* DMA tag for mapping buffers into device visible space. */ 1023 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1024 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1025 /*highaddr*/BUS_SPACE_MAXADDR, 1026 /*filter*/NULL, /*filterarg*/NULL, 1027 /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE, 1028 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1029 /*flags*/BUS_DMA_ALLOCNOW, 1030 &adw->buffer_dmat) != 0) { 1031 return (ENOMEM); 1032 } 1033 1034 adw->init_level++; 1035 1036 /* DMA tag for our ccb carrier structures */ 1037 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10, 1038 /*boundary*/0, 1039 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1040 /*highaddr*/BUS_SPACE_MAXADDR, 1041 /*filter*/NULL, /*filterarg*/NULL, 1042 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1043 * sizeof(struct adw_carrier), 1044 /*nsegments*/1, 1045 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1046 /*flags*/0, &adw->carrier_dmat) != 0) { 1047 return (ENOMEM); 1048 } 1049 1050 adw->init_level++; 1051 1052 /* Allocation for our ccb carrier structures */ 1053 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, 1054 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1055 return (ENOMEM); 1056 } 1057 1058 adw->init_level++; 1059 1060 /* And permanently map them */ 1061 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1062 adw->carriers, 1063 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1064 * sizeof(struct adw_carrier), 1065 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1066 1067 /* Clear them out. */ 1068 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1069 * sizeof(struct adw_carrier)); 1070 1071 /* Setup our free carrier list */ 1072 adw->free_carriers = adw->carriers; 1073 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1074 adw->carriers[i].carr_offset = 1075 carriervtobo(adw, &adw->carriers[i]); 1076 adw->carriers[i].carr_ba = 1077 carriervtob(adw, &adw->carriers[i]); 1078 adw->carriers[i].areq_ba = 0; 1079 adw->carriers[i].next_ba = 1080 carriervtobo(adw, &adw->carriers[i+1]); 1081 } 1082 /* Terminal carrier. Never leaves the freelist */ 1083 adw->carriers[i].carr_offset = 1084 carriervtobo(adw, &adw->carriers[i]); 1085 adw->carriers[i].carr_ba = 1086 carriervtob(adw, &adw->carriers[i]); 1087 adw->carriers[i].areq_ba = 0; 1088 adw->carriers[i].next_ba = ~0; 1089 1090 adw->init_level++; 1091 1092 /* DMA tag for our acb structures */ 1093 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1094 /*lowaddr*/BUS_SPACE_MAXADDR, 1095 /*highaddr*/BUS_SPACE_MAXADDR, 1096 /*filter*/NULL, /*filterarg*/NULL, 1097 adw->max_acbs * sizeof(struct acb), 1098 /*nsegments*/1, 1099 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1100 /*flags*/0, &adw->acb_dmat) != 0) { 1101 return (ENOMEM); 1102 } 1103 1104 adw->init_level++; 1105 1106 /* Allocation for our ccbs */ 1107 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, 1108 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1109 return (ENOMEM); 1110 1111 adw->init_level++; 1112 1113 /* And permanently map them */ 1114 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1115 adw->acbs, 1116 adw->max_acbs * sizeof(struct acb), 1117 adwmapmem, &adw->acb_busbase, /*flags*/0); 1118 1119 /* Clear them out. */ 1120 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1121 1122 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1123 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1124 /*lowaddr*/BUS_SPACE_MAXADDR, 1125 /*highaddr*/BUS_SPACE_MAXADDR, 1126 /*filter*/NULL, /*filterarg*/NULL, 1127 PAGE_SIZE, /*nsegments*/1, 1128 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1129 /*flags*/0, &adw->sg_dmat) != 0) { 1130 return (ENOMEM); 1131 } 1132 1133 adw->init_level++; 1134 1135 /* Allocate our first batch of ccbs */ 1136 if (adwallocacbs(adw) == 0) 1137 return (ENOMEM); 1138 1139 if (adw_init_chip(adw, scsicfg1) != 0) 1140 return (ENXIO); 1141 1142 printf("Queue Depth %d\n", adw->max_acbs); 1143 1144 return (0); 1145 } 1146 1147 /* 1148 * Attach all the sub-devices we can find 1149 */ 1150 int 1151 adw_attach(struct adw_softc *adw) 1152 { 1153 struct ccb_setasync csa; 1154 int s; 1155 int error; 1156 1157 error = 0; 1158 s = splcam(); 1159 /* Hook up our interrupt handler */ 1160 if ((error = bus_setup_intr(adw->device, adw->irq, INTR_TYPE_CAM, 1161 adw_intr, adw, &adw->ih)) != 0) { 1162 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1163 error); 1164 goto fail; 1165 } 1166 1167 /* Start the Risc processor now that we are fully configured. */ 1168 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1169 1170 /* 1171 * Construct our SIM entry. 1172 */ 1173 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1174 1, adw->max_acbs, NULL); 1175 if (adw->sim == NULL) { 1176 error = ENOMEM; 1177 goto fail; 1178 } 1179 1180 /* 1181 * Register the bus. 1182 */ 1183 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1184 cam_sim_free(adw->sim); 1185 error = ENOMEM; 1186 goto fail; 1187 } 1188 1189 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1190 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1191 == CAM_REQ_CMP) { 1192 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1193 csa.ccb_h.func_code = XPT_SASYNC_CB; 1194 csa.event_enable = AC_LOST_DEVICE; 1195 csa.callback = adw_async; 1196 csa.callback_arg = adw; 1197 xpt_action((union ccb *)&csa); 1198 } 1199 1200 fail: 1201 splx(s); 1202 return (error); 1203 } 1204 1205 void 1206 adw_intr(void *arg) 1207 { 1208 struct adw_softc *adw; 1209 u_int int_stat; 1210 1211 adw = (struct adw_softc *)arg; 1212 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1213 return; 1214 1215 /* Reading the register clears the interrupt. */ 1216 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1217 1218 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1219 u_int intrb_code; 1220 1221 /* Async Microcode Event */ 1222 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1223 switch (intrb_code) { 1224 case ADW_ASYNC_CARRIER_READY_FAILURE: 1225 /* 1226 * The RISC missed our update of 1227 * the commandq. 1228 */ 1229 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1230 adw_tickle_risc(adw, ADW_TICKLE_A); 1231 break; 1232 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1233 /* 1234 * The firmware detected a SCSI Bus reset. 1235 */ 1236 printf("Someone Reset the Bus\n"); 1237 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1238 break; 1239 case ADW_ASYNC_RDMA_FAILURE: 1240 /* 1241 * Handle RDMA failure by resetting the 1242 * SCSI Bus and chip. 1243 */ 1244 #if XXX 1245 AdvResetChipAndSB(adv_dvc_varp); 1246 #endif 1247 break; 1248 1249 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1250 /* 1251 * Host generated SCSI bus reset occurred. 1252 */ 1253 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1254 break; 1255 default: 1256 printf("adw_intr: unknown async code 0x%x\n", 1257 intrb_code); 1258 break; 1259 } 1260 } 1261 1262 /* 1263 * Run down the RequestQ. 1264 */ 1265 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1266 struct adw_carrier *free_carrier; 1267 struct acb *acb; 1268 union ccb *ccb; 1269 1270 #if 0 1271 printf("0x%x, 0x%x, 0x%x, 0x%x\n", 1272 adw->responseq->carr_offset, 1273 adw->responseq->carr_ba, 1274 adw->responseq->areq_ba, 1275 adw->responseq->next_ba); 1276 #endif 1277 /* 1278 * The firmware copies the adw_scsi_req_q.acb_baddr 1279 * field into the areq_ba field of the carrier. 1280 */ 1281 acb = acbbotov(adw, adw->responseq->areq_ba); 1282 1283 /* 1284 * The least significant four bits of the next_ba 1285 * field are used as flags. Mask them out and then 1286 * advance through the list. 1287 */ 1288 free_carrier = adw->responseq; 1289 adw->responseq = 1290 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1291 free_carrier->next_ba = adw->free_carriers->carr_offset; 1292 adw->free_carriers = free_carrier; 1293 1294 /* Process CCB */ 1295 ccb = acb->ccb; 1296 untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch); 1297 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1298 bus_dmasync_op_t op; 1299 1300 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1301 op = BUS_DMASYNC_POSTREAD; 1302 else 1303 op = BUS_DMASYNC_POSTWRITE; 1304 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1305 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1306 ccb->csio.resid = acb->queue.data_cnt; 1307 } else 1308 ccb->csio.resid = 0; 1309 1310 /* Common Cases inline... */ 1311 if (acb->queue.host_status == QHSTA_NO_ERROR 1312 && (acb->queue.done_status == QD_NO_ERROR 1313 || acb->queue.done_status == QD_WITH_ERROR)) { 1314 ccb->csio.scsi_status = acb->queue.scsi_status; 1315 ccb->ccb_h.status = 0; 1316 switch (ccb->csio.scsi_status) { 1317 case SCSI_STATUS_OK: 1318 ccb->ccb_h.status |= CAM_REQ_CMP; 1319 break; 1320 case SCSI_STATUS_CHECK_COND: 1321 case SCSI_STATUS_CMD_TERMINATED: 1322 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1323 ccb->csio.sense_len); 1324 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1325 ccb->csio.sense_resid = acb->queue.sense_len; 1326 /* FALLTHROUGH */ 1327 default: 1328 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1329 | CAM_DEV_QFRZN; 1330 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1331 break; 1332 } 1333 adwfreeacb(adw, acb); 1334 xpt_done(ccb); 1335 } else { 1336 adwprocesserror(adw, acb); 1337 } 1338 } 1339 } 1340 1341 static void 1342 adwprocesserror(struct adw_softc *adw, struct acb *acb) 1343 { 1344 union ccb *ccb; 1345 1346 ccb = acb->ccb; 1347 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1348 ccb->ccb_h.status = CAM_REQ_ABORTED; 1349 } else { 1350 1351 switch (acb->queue.host_status) { 1352 case QHSTA_M_SEL_TIMEOUT: 1353 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1354 break; 1355 case QHSTA_M_SXFR_OFF_UFLW: 1356 case QHSTA_M_SXFR_OFF_OFLW: 1357 case QHSTA_M_DATA_OVER_RUN: 1358 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1359 break; 1360 case QHSTA_M_SXFR_DESELECTED: 1361 case QHSTA_M_UNEXPECTED_BUS_FREE: 1362 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1363 break; 1364 case QHSTA_M_SCSI_BUS_RESET: 1365 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1366 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1367 break; 1368 case QHSTA_M_BUS_DEVICE_RESET: 1369 ccb->ccb_h.status = CAM_BDR_SENT; 1370 break; 1371 case QHSTA_M_QUEUE_ABORTED: 1372 /* BDR or Bus Reset */ 1373 printf("Saw Queue Aborted\n"); 1374 ccb->ccb_h.status = adw->last_reset; 1375 break; 1376 case QHSTA_M_SXFR_SDMA_ERR: 1377 case QHSTA_M_SXFR_SXFR_PERR: 1378 case QHSTA_M_RDMA_PERR: 1379 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1380 break; 1381 case QHSTA_M_WTM_TIMEOUT: 1382 case QHSTA_M_SXFR_WD_TMO: 1383 { 1384 /* The SCSI bus hung in a phase */ 1385 xpt_print_path(adw->path); 1386 printf("Watch Dog timer expired. Reseting bus\n"); 1387 adw_reset_bus(adw); 1388 break; 1389 } 1390 case QHSTA_M_SXFR_XFR_PH_ERR: 1391 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1392 break; 1393 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1394 break; 1395 case QHSTA_M_BAD_CMPL_STATUS_IN: 1396 /* No command complete after a status message */ 1397 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1398 break; 1399 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1400 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1401 break; 1402 case QHSTA_M_INVALID_DEVICE: 1403 ccb->ccb_h.status = CAM_PATH_INVALID; 1404 break; 1405 case QHSTA_M_NO_AUTO_REQ_SENSE: 1406 /* 1407 * User didn't request sense, but we got a 1408 * check condition. 1409 */ 1410 ccb->csio.scsi_status = acb->queue.scsi_status; 1411 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1412 break; 1413 default: 1414 panic("%s: Unhandled Host status error %x", 1415 adw_name(adw), acb->queue.host_status); 1416 /* NOTREACHED */ 1417 } 1418 } 1419 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1420 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1421 || ccb->ccb_h.status == CAM_BDR_SENT) 1422 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1423 } 1424 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1425 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1426 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1427 } 1428 adwfreeacb(adw, acb); 1429 xpt_done(ccb); 1430 } 1431 1432 static void 1433 adwtimeout(void *arg) 1434 { 1435 struct acb *acb; 1436 union ccb *ccb; 1437 struct adw_softc *adw; 1438 adw_idle_cmd_status_t status; 1439 int target_id; 1440 int s; 1441 1442 acb = (struct acb *)arg; 1443 ccb = acb->ccb; 1444 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1445 xpt_print_path(ccb->ccb_h.path); 1446 printf("ACB %p - timed out\n", (void *)acb); 1447 1448 s = splcam(); 1449 1450 if ((acb->state & ACB_ACTIVE) == 0) { 1451 xpt_print_path(ccb->ccb_h.path); 1452 printf("ACB %p - timed out CCB already completed\n", 1453 (void *)acb); 1454 splx(s); 1455 return; 1456 } 1457 1458 acb->state |= ACB_RECOVERY_ACB; 1459 target_id = ccb->ccb_h.target_id; 1460 1461 /* Attempt a BDR first */ 1462 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1463 ccb->ccb_h.target_id); 1464 splx(s); 1465 if (status == ADW_IDLE_CMD_SUCCESS) { 1466 printf("%s: BDR Delivered. No longer in timeout\n", 1467 adw_name(adw)); 1468 adw_handle_device_reset(adw, target_id); 1469 } else { 1470 adw_reset_bus(adw); 1471 xpt_print_path(adw->path); 1472 printf("Bus Reset Delivered. No longer in timeout\n"); 1473 } 1474 } 1475 1476 static void 1477 adw_handle_device_reset(struct adw_softc *adw, u_int target) 1478 { 1479 struct cam_path *path; 1480 cam_status error; 1481 1482 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1483 target, CAM_LUN_WILDCARD); 1484 1485 if (error == CAM_REQ_CMP) { 1486 xpt_async(AC_SENT_BDR, path, NULL); 1487 xpt_free_path(path); 1488 } 1489 adw->last_reset = CAM_BDR_SENT; 1490 } 1491 1492 static void 1493 adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1494 { 1495 if (initiated) { 1496 /* 1497 * The microcode currently sets the SCSI Bus Reset signal 1498 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1499 * command above. But the SCSI Bus Reset Hold Time in the 1500 * microcode is not deterministic (it may in fact be for less 1501 * than the SCSI Spec. minimum of 25 us). Therefore on return 1502 * the Adv Library sets the SCSI Bus Reset signal for 1503 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1504 * than 25 us. 1505 */ 1506 u_int scsi_ctrl; 1507 1508 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1509 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1510 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1511 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1512 1513 /* 1514 * We will perform the async notification when the 1515 * SCSI Reset interrupt occurs. 1516 */ 1517 } else 1518 xpt_async(AC_BUS_RESET, adw->path, NULL); 1519 adw->last_reset = CAM_SCSI_BUS_RESET; 1520 } 1521