1 /* 2 * CAM SCSI interface for the the Advanced Systems Inc. 3 * Second Generation SCSI controllers. 4 * 5 * Product specific probe and attach routines can be found in: 6 * 7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W 8 * 9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/advansys/adwcam.c,v 1.7.2.2 2001/03/05 13:08:55 obrien Exp $ 34 * $DragonFly: src/sys/dev/disk/advansys/adwcam.c,v 1.15 2006/12/22 23:26:15 swildner Exp $ 35 */ 36 /* 37 * Ported from: 38 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 39 * 40 * Copyright (c) 1995-1998 Advanced System Products, Inc. 41 * All Rights Reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that redistributions of source 45 * code retain the above copyright notice and this comment without 46 * modification. 47 */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #include <sys/bus.h> 54 #include <sys/thread2.h> 55 56 #include <machine/clock.h> 57 58 #include <sys/rman.h> 59 60 #include <bus/cam/cam.h> 61 #include <bus/cam/cam_ccb.h> 62 #include <bus/cam/cam_sim.h> 63 #include <bus/cam/cam_xpt_sim.h> 64 #include <bus/cam/cam_debug.h> 65 66 #include <bus/cam/scsi/scsi_message.h> 67 68 #include "adwvar.h" 69 70 /* Definitions for our use of the SIM private CCB area */ 71 #define ccb_acb_ptr spriv_ptr0 72 #define ccb_adw_ptr spriv_ptr1 73 74 u_long adw_unit; 75 76 static __inline cam_status adwccbstatus(union ccb*); 77 static __inline struct acb* adwgetacb(struct adw_softc *adw); 78 static __inline void adwfreeacb(struct adw_softc *adw, 79 struct acb *acb); 80 81 static void adwmapmem(void *arg, bus_dma_segment_t *segs, 82 int nseg, int error); 83 static struct sg_map_node* 84 adwallocsgmap(struct adw_softc *adw); 85 static int adwallocacbs(struct adw_softc *adw); 86 87 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 88 int nseg, int error); 89 static void adw_action(struct cam_sim *sim, union ccb *ccb); 90 static void adw_poll(struct cam_sim *sim); 91 static void adw_async(void *callback_arg, u_int32_t code, 92 struct cam_path *path, void *arg); 93 static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 94 static void adwtimeout(void *arg); 95 static void adw_handle_device_reset(struct adw_softc *adw, 96 u_int target); 97 static void adw_handle_bus_reset(struct adw_softc *adw, 98 int initiated); 99 100 static __inline cam_status 101 adwccbstatus(union ccb* ccb) 102 { 103 return (ccb->ccb_h.status & CAM_STATUS_MASK); 104 } 105 106 static __inline struct acb* 107 adwgetacb(struct adw_softc *adw) 108 { 109 struct acb* acb; 110 111 crit_enter(); 112 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 113 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 114 } else if (adw->num_acbs < adw->max_acbs) { 115 adwallocacbs(adw); 116 acb = SLIST_FIRST(&adw->free_acb_list); 117 if (acb == NULL) 118 kprintf("%s: Can't malloc ACB\n", adw_name(adw)); 119 else { 120 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 121 } 122 } 123 crit_exit(); 124 125 return (acb); 126 } 127 128 static __inline void 129 adwfreeacb(struct adw_softc *adw, struct acb *acb) 130 { 131 crit_enter(); 132 if ((acb->state & ACB_ACTIVE) != 0) 133 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 134 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 135 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 136 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 137 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 138 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 139 adw->state &= ~ADW_RESOURCE_SHORTAGE; 140 } 141 acb->state = ACB_FREE; 142 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 143 crit_exit(); 144 } 145 146 static void 147 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 148 { 149 bus_addr_t *busaddrp; 150 151 busaddrp = (bus_addr_t *)arg; 152 *busaddrp = segs->ds_addr; 153 } 154 155 static struct sg_map_node * 156 adwallocsgmap(struct adw_softc *adw) 157 { 158 struct sg_map_node *sg_map; 159 160 sg_map = kmalloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT); 161 162 /* Allocate S/G space for the next batch of ACBS */ 163 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, 164 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 165 kfree(sg_map, M_DEVBUF); 166 return (NULL); 167 } 168 169 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 170 171 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 172 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 173 174 bzero(sg_map->sg_vaddr, PAGE_SIZE); 175 return (sg_map); 176 } 177 178 /* 179 * Allocate another chunk of CCB's. Return count of entries added. 180 * Assumed to be called under crit_enter(). 181 */ 182 static int 183 adwallocacbs(struct adw_softc *adw) 184 { 185 struct acb *next_acb; 186 struct sg_map_node *sg_map; 187 bus_addr_t busaddr; 188 struct adw_sg_block *blocks; 189 int newcount; 190 int i; 191 192 next_acb = &adw->acbs[adw->num_acbs]; 193 sg_map = adwallocsgmap(adw); 194 195 if (sg_map == NULL) 196 return (0); 197 198 blocks = sg_map->sg_vaddr; 199 busaddr = sg_map->sg_physaddr; 200 201 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 202 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 203 int error; 204 205 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 206 &next_acb->dmamap); 207 if (error != 0) 208 break; 209 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 210 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 211 next_acb->queue.sense_baddr = 212 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 213 next_acb->sg_blocks = blocks; 214 next_acb->sg_busaddr = busaddr; 215 next_acb->state = ACB_FREE; 216 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 217 blocks += ADW_SG_BLOCKCNT; 218 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 219 next_acb++; 220 adw->num_acbs++; 221 } 222 return (i); 223 } 224 225 static void 226 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 227 { 228 struct acb *acb; 229 union ccb *ccb; 230 struct adw_softc *adw; 231 232 acb = (struct acb *)arg; 233 ccb = acb->ccb; 234 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 235 236 if (error != 0) { 237 if (error != EFBIG) 238 kprintf("%s: Unexepected error 0x%x returned from " 239 "bus_dmamap_load\n", adw_name(adw), error); 240 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 241 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 242 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 243 } 244 adwfreeacb(adw, acb); 245 xpt_done(ccb); 246 return; 247 } 248 249 if (nseg != 0) { 250 bus_dmasync_op_t op; 251 252 acb->queue.data_addr = dm_segs[0].ds_addr; 253 acb->queue.data_cnt = ccb->csio.dxfer_len; 254 if (nseg > 1) { 255 struct adw_sg_block *sg_block; 256 struct adw_sg_elm *sg; 257 bus_addr_t sg_busaddr; 258 u_int sg_index; 259 bus_dma_segment_t *end_seg; 260 261 end_seg = dm_segs + nseg; 262 263 sg_busaddr = acb->sg_busaddr; 264 sg_index = 0; 265 /* Copy the segments into our SG list */ 266 for (sg_block = acb->sg_blocks;; sg_block++) { 267 u_int i; 268 269 sg = sg_block->sg_list; 270 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 271 if (dm_segs >= end_seg) 272 break; 273 274 sg->sg_addr = dm_segs->ds_addr; 275 sg->sg_count = dm_segs->ds_len; 276 sg++; 277 dm_segs++; 278 } 279 sg_block->sg_cnt = i; 280 sg_index += i; 281 if (dm_segs == end_seg) { 282 sg_block->sg_busaddr_next = 0; 283 break; 284 } else { 285 sg_busaddr += 286 sizeof(struct adw_sg_block); 287 sg_block->sg_busaddr_next = sg_busaddr; 288 } 289 } 290 acb->queue.sg_real_addr = acb->sg_busaddr; 291 } else { 292 acb->queue.sg_real_addr = 0; 293 } 294 295 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 296 op = BUS_DMASYNC_PREREAD; 297 else 298 op = BUS_DMASYNC_PREWRITE; 299 300 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 301 302 } else { 303 acb->queue.data_addr = 0; 304 acb->queue.data_cnt = 0; 305 acb->queue.sg_real_addr = 0; 306 } 307 308 crit_enter(); 309 310 /* 311 * Last time we need to check if this CCB needs to 312 * be aborted. 313 */ 314 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 315 if (nseg != 0) 316 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 317 adwfreeacb(adw, acb); 318 xpt_done(ccb); 319 crit_exit(); 320 return; 321 } 322 323 acb->state |= ACB_ACTIVE; 324 ccb->ccb_h.status |= CAM_SIM_QUEUED; 325 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 326 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 327 adwtimeout, acb); 328 329 adw_send_acb(adw, acb, acbvtob(adw, acb)); 330 331 crit_exit(); 332 } 333 334 static void 335 adw_action(struct cam_sim *sim, union ccb *ccb) 336 { 337 struct adw_softc *adw; 338 339 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 340 341 adw = (struct adw_softc *)cam_sim_softc(sim); 342 343 switch (ccb->ccb_h.func_code) { 344 /* Common cases first */ 345 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 346 { 347 struct ccb_scsiio *csio; 348 struct ccb_hdr *ccbh; 349 struct acb *acb; 350 351 csio = &ccb->csio; 352 ccbh = &ccb->ccb_h; 353 354 /* Max supported CDB length is 12 bytes */ 355 if (csio->cdb_len > 12) { 356 ccb->ccb_h.status = CAM_REQ_INVALID; 357 xpt_done(ccb); 358 return; 359 } 360 361 if ((acb = adwgetacb(adw)) == NULL) { 362 crit_enter(); 363 adw->state |= ADW_RESOURCE_SHORTAGE; 364 crit_exit(); 365 xpt_freeze_simq(sim, /*count*/1); 366 ccb->ccb_h.status = CAM_REQUEUE_REQ; 367 xpt_done(ccb); 368 return; 369 } 370 371 /* Link acb and ccb so we can find one from the other */ 372 acb->ccb = ccb; 373 ccb->ccb_h.ccb_acb_ptr = acb; 374 ccb->ccb_h.ccb_adw_ptr = adw; 375 376 acb->queue.cntl = 0; 377 acb->queue.target_cmd = 0; 378 acb->queue.target_id = ccb->ccb_h.target_id; 379 acb->queue.target_lun = ccb->ccb_h.target_lun; 380 381 acb->queue.mflag = 0; 382 acb->queue.sense_len = 383 MIN(csio->sense_len, sizeof(acb->sense_data)); 384 acb->queue.cdb_len = csio->cdb_len; 385 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 386 switch (csio->tag_action) { 387 case MSG_SIMPLE_Q_TAG: 388 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 389 break; 390 case MSG_HEAD_OF_Q_TAG: 391 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 392 break; 393 case MSG_ORDERED_Q_TAG: 394 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 395 break; 396 default: 397 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 398 break; 399 } 400 } else 401 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 402 403 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 404 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 405 406 acb->queue.done_status = 0; 407 acb->queue.scsi_status = 0; 408 acb->queue.host_status = 0; 409 acb->queue.sg_wk_ix = 0; 410 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 411 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 412 bcopy(csio->cdb_io.cdb_ptr, 413 acb->queue.cdb, csio->cdb_len); 414 } else { 415 /* I guess I could map it in... */ 416 ccb->ccb_h.status = CAM_REQ_INVALID; 417 adwfreeacb(adw, acb); 418 xpt_done(ccb); 419 return; 420 } 421 } else { 422 bcopy(csio->cdb_io.cdb_bytes, 423 acb->queue.cdb, csio->cdb_len); 424 } 425 426 /* 427 * If we have any data to send with this command, 428 * map it into bus space. 429 */ 430 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 431 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 432 /* 433 * We've been given a pointer 434 * to a single buffer. 435 */ 436 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 437 int error; 438 439 crit_enter(); 440 error = 441 bus_dmamap_load(adw->buffer_dmat, 442 acb->dmamap, 443 csio->data_ptr, 444 csio->dxfer_len, 445 adwexecuteacb, 446 acb, /*flags*/0); 447 if (error == EINPROGRESS) { 448 /* 449 * So as to maintain ordering, 450 * freeze the controller queue 451 * until our mapping is 452 * returned. 453 */ 454 xpt_freeze_simq(sim, 1); 455 acb->state |= CAM_RELEASE_SIMQ; 456 } 457 crit_exit(); 458 } else { 459 struct bus_dma_segment seg; 460 461 /* Pointer to physical buffer */ 462 seg.ds_addr = 463 (bus_addr_t)csio->data_ptr; 464 seg.ds_len = csio->dxfer_len; 465 adwexecuteacb(acb, &seg, 1, 0); 466 } 467 } else { 468 struct bus_dma_segment *segs; 469 470 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 471 panic("adw_action - Physical " 472 "segment pointers " 473 "unsupported"); 474 475 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 476 panic("adw_action - Virtual " 477 "segment addresses " 478 "unsupported"); 479 480 /* Just use the segments provided */ 481 segs = (struct bus_dma_segment *)csio->data_ptr; 482 adwexecuteacb(acb, segs, csio->sglist_cnt, 483 (csio->sglist_cnt < ADW_SGSIZE) 484 ? 0 : EFBIG); 485 } 486 } else { 487 adwexecuteacb(acb, NULL, 0, 0); 488 } 489 break; 490 } 491 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 492 { 493 adw_idle_cmd_status_t status; 494 495 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 496 ccb->ccb_h.target_id); 497 if (status == ADW_IDLE_CMD_SUCCESS) { 498 ccb->ccb_h.status = CAM_REQ_CMP; 499 if (bootverbose) { 500 xpt_print_path(ccb->ccb_h.path); 501 kprintf("BDR Delivered\n"); 502 } 503 } else 504 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 505 xpt_done(ccb); 506 break; 507 } 508 case XPT_ABORT: /* Abort the specified CCB */ 509 /* XXX Implement */ 510 ccb->ccb_h.status = CAM_REQ_INVALID; 511 xpt_done(ccb); 512 break; 513 case XPT_SET_TRAN_SETTINGS: 514 { 515 struct ccb_trans_settings *cts; 516 u_int target_mask; 517 518 cts = &ccb->cts; 519 target_mask = 0x01 << ccb->ccb_h.target_id; 520 521 crit_enter(); 522 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 523 u_int sdtrdone; 524 525 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 526 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 527 u_int discenb; 528 529 discenb = 530 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 531 532 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 533 discenb |= target_mask; 534 else 535 discenb &= ~target_mask; 536 537 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 538 discenb); 539 } 540 541 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 542 543 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 544 adw->tagenb |= target_mask; 545 else 546 adw->tagenb &= ~target_mask; 547 } 548 549 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 550 u_int wdtrenb_orig; 551 u_int wdtrenb; 552 u_int wdtrdone; 553 554 wdtrenb_orig = 555 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 556 wdtrenb = wdtrenb_orig; 557 wdtrdone = adw_lram_read_16(adw, 558 ADW_MC_WDTR_DONE); 559 switch (cts->bus_width) { 560 case MSG_EXT_WDTR_BUS_32_BIT: 561 case MSG_EXT_WDTR_BUS_16_BIT: 562 wdtrenb |= target_mask; 563 break; 564 case MSG_EXT_WDTR_BUS_8_BIT: 565 default: 566 wdtrenb &= ~target_mask; 567 break; 568 } 569 if (wdtrenb != wdtrenb_orig) { 570 adw_lram_write_16(adw, 571 ADW_MC_WDTR_ABLE, 572 wdtrenb); 573 wdtrdone &= ~target_mask; 574 adw_lram_write_16(adw, 575 ADW_MC_WDTR_DONE, 576 wdtrdone); 577 /* Wide negotiation forces async */ 578 sdtrdone &= ~target_mask; 579 adw_lram_write_16(adw, 580 ADW_MC_SDTR_DONE, 581 sdtrdone); 582 } 583 } 584 585 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 586 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 587 u_int sdtr_orig; 588 u_int sdtr; 589 u_int sdtrable_orig; 590 u_int sdtrable; 591 592 sdtr = adw_get_chip_sdtr(adw, 593 ccb->ccb_h.target_id); 594 sdtr_orig = sdtr; 595 sdtrable = adw_lram_read_16(adw, 596 ADW_MC_SDTR_ABLE); 597 sdtrable_orig = sdtrable; 598 599 if ((cts->valid 600 & CCB_TRANS_SYNC_RATE_VALID) != 0) { 601 602 sdtr = 603 adw_find_sdtr(adw, 604 cts->sync_period); 605 } 606 607 if ((cts->valid 608 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) { 609 if (cts->sync_offset == 0) 610 sdtr = ADW_MC_SDTR_ASYNC; 611 } 612 613 if (sdtr == ADW_MC_SDTR_ASYNC) 614 sdtrable &= ~target_mask; 615 else 616 sdtrable |= target_mask; 617 if (sdtr != sdtr_orig 618 || sdtrable != sdtrable_orig) { 619 adw_set_chip_sdtr(adw, 620 ccb->ccb_h.target_id, 621 sdtr); 622 sdtrdone &= ~target_mask; 623 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 624 sdtrable); 625 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 626 sdtrdone); 627 628 } 629 } 630 } 631 crit_exit(); 632 ccb->ccb_h.status = CAM_REQ_CMP; 633 xpt_done(ccb); 634 break; 635 } 636 case XPT_GET_TRAN_SETTINGS: 637 /* Get default/user set transfer settings for the target */ 638 { 639 struct ccb_trans_settings *cts; 640 u_int target_mask; 641 642 cts = &ccb->cts; 643 target_mask = 0x01 << ccb->ccb_h.target_id; 644 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 645 u_int mc_sdtr; 646 647 cts->flags = 0; 648 if ((adw->user_discenb & target_mask) != 0) 649 cts->flags |= CCB_TRANS_DISC_ENB; 650 651 if ((adw->user_tagenb & target_mask) != 0) 652 cts->flags |= CCB_TRANS_TAG_ENB; 653 654 if ((adw->user_wdtr & target_mask) != 0) 655 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 656 else 657 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 658 659 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 660 cts->sync_period = adw_find_period(adw, mc_sdtr); 661 if (cts->sync_period != 0) 662 cts->sync_offset = 15; /* XXX ??? */ 663 else 664 cts->sync_offset = 0; 665 666 cts->valid = CCB_TRANS_SYNC_RATE_VALID 667 | CCB_TRANS_SYNC_OFFSET_VALID 668 | CCB_TRANS_BUS_WIDTH_VALID 669 | CCB_TRANS_DISC_VALID 670 | CCB_TRANS_TQ_VALID; 671 ccb->ccb_h.status = CAM_REQ_CMP; 672 } else { 673 u_int targ_tinfo; 674 675 cts->flags = 0; 676 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 677 & target_mask) != 0) 678 cts->flags |= CCB_TRANS_DISC_ENB; 679 680 if ((adw->tagenb & target_mask) != 0) 681 cts->flags |= CCB_TRANS_TAG_ENB; 682 683 targ_tinfo = 684 adw_lram_read_16(adw, 685 ADW_MC_DEVICE_HSHK_CFG_TABLE 686 + (2 * ccb->ccb_h.target_id)); 687 688 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 689 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 690 else 691 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 692 693 cts->sync_period = 694 adw_hshk_cfg_period_factor(targ_tinfo); 695 696 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 697 if (cts->sync_period == 0) 698 cts->sync_offset = 0; 699 700 if (cts->sync_offset == 0) 701 cts->sync_period = 0; 702 } 703 cts->valid = CCB_TRANS_SYNC_RATE_VALID 704 | CCB_TRANS_SYNC_OFFSET_VALID 705 | CCB_TRANS_BUS_WIDTH_VALID 706 | CCB_TRANS_DISC_VALID 707 | CCB_TRANS_TQ_VALID; 708 ccb->ccb_h.status = CAM_REQ_CMP; 709 xpt_done(ccb); 710 break; 711 } 712 case XPT_CALC_GEOMETRY: 713 { 714 struct ccb_calc_geometry *ccg; 715 u_int32_t size_mb; 716 u_int32_t secs_per_cylinder; 717 int extended; 718 719 /* 720 * XXX Use Adaptec translation until I find out how to 721 * get this information from the card. 722 */ 723 ccg = &ccb->ccg; 724 size_mb = ccg->volume_size 725 / ((1024L * 1024L) / ccg->block_size); 726 extended = 1; 727 728 if (size_mb > 1024 && extended) { 729 ccg->heads = 255; 730 ccg->secs_per_track = 63; 731 } else { 732 ccg->heads = 64; 733 ccg->secs_per_track = 32; 734 } 735 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 736 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 737 ccb->ccb_h.status = CAM_REQ_CMP; 738 xpt_done(ccb); 739 break; 740 } 741 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 742 { 743 int failure; 744 745 failure = adw_reset_bus(adw); 746 if (failure != 0) { 747 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 748 } else { 749 if (bootverbose) { 750 xpt_print_path(adw->path); 751 kprintf("Bus Reset Delivered\n"); 752 } 753 ccb->ccb_h.status = CAM_REQ_CMP; 754 } 755 xpt_done(ccb); 756 break; 757 } 758 case XPT_TERM_IO: /* Terminate the I/O process */ 759 /* XXX Implement */ 760 ccb->ccb_h.status = CAM_REQ_INVALID; 761 xpt_done(ccb); 762 break; 763 case XPT_PATH_INQ: /* Path routing inquiry */ 764 { 765 struct ccb_pathinq *cpi = &ccb->cpi; 766 767 cpi->version_num = 1; 768 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 769 cpi->target_sprt = 0; 770 cpi->hba_misc = 0; 771 cpi->hba_eng_cnt = 0; 772 cpi->max_target = ADW_MAX_TID; 773 cpi->max_lun = ADW_MAX_LUN; 774 cpi->initiator_id = adw->initiator_id; 775 cpi->bus_id = cam_sim_bus(sim); 776 cpi->base_transfer_speed = 3300; 777 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 778 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 779 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 780 cpi->unit_number = cam_sim_unit(sim); 781 cpi->ccb_h.status = CAM_REQ_CMP; 782 xpt_done(ccb); 783 break; 784 } 785 default: 786 ccb->ccb_h.status = CAM_REQ_INVALID; 787 xpt_done(ccb); 788 break; 789 } 790 } 791 792 static void 793 adw_poll(struct cam_sim *sim) 794 { 795 adw_intr(cam_sim_softc(sim)); 796 } 797 798 static void 799 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 800 { 801 } 802 803 struct adw_softc * 804 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 805 { 806 struct adw_softc *adw; 807 int i; 808 809 /* 810 * Allocate a storage area for us 811 */ 812 adw = kmalloc(sizeof(struct adw_softc), M_DEVBUF, M_INTWAIT | M_ZERO); 813 LIST_INIT(&adw->pending_ccbs); 814 SLIST_INIT(&adw->sg_maps); 815 adw->device = dev; 816 adw->unit = device_get_unit(dev); 817 adw->regs_res_type = regs_type; 818 adw->regs_res_id = regs_id; 819 adw->regs = regs; 820 adw->tag = rman_get_bustag(regs); 821 adw->bsh = rman_get_bushandle(regs); 822 KKASSERT(adw->unit >= 0 && adw->unit < 100); 823 i = adw->unit / 10; 824 adw->name = kmalloc(sizeof("adw") + i + 1, M_DEVBUF, M_INTWAIT); 825 ksprintf(adw->name, "adw%d", adw->unit); 826 return(adw); 827 } 828 829 void 830 adw_free(struct adw_softc *adw) 831 { 832 switch (adw->init_level) { 833 case 9: 834 { 835 struct sg_map_node *sg_map; 836 837 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 838 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 839 bus_dmamap_unload(adw->sg_dmat, 840 sg_map->sg_dmamap); 841 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 842 sg_map->sg_dmamap); 843 kfree(sg_map, M_DEVBUF); 844 } 845 bus_dma_tag_destroy(adw->sg_dmat); 846 } 847 case 8: 848 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 849 case 7: 850 bus_dmamem_free(adw->acb_dmat, adw->acbs, 851 adw->acb_dmamap); 852 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 853 case 6: 854 bus_dma_tag_destroy(adw->acb_dmat); 855 case 5: 856 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 857 case 4: 858 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 859 adw->carrier_dmamap); 860 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 861 case 3: 862 bus_dma_tag_destroy(adw->carrier_dmat); 863 case 2: 864 bus_dma_tag_destroy(adw->buffer_dmat); 865 case 1: 866 bus_dma_tag_destroy(adw->parent_dmat); 867 case 0: 868 break; 869 } 870 kfree(adw->name, M_DEVBUF); 871 kfree(adw, M_DEVBUF); 872 } 873 874 int 875 adw_init(struct adw_softc *adw) 876 { 877 struct adw_eeprom eep_config; 878 u_int tid; 879 u_int i; 880 u_int16_t checksum; 881 u_int16_t scsicfg1; 882 883 checksum = adw_eeprom_read(adw, &eep_config); 884 bcopy(eep_config.serial_number, adw->serial_number, 885 sizeof(adw->serial_number)); 886 if (checksum != eep_config.checksum) { 887 u_int16_t serial_number[3]; 888 889 adw->flags |= ADW_EEPROM_FAILED; 890 kprintf("%s: EEPROM checksum failed. Restoring Defaults\n", 891 adw_name(adw)); 892 893 /* 894 * Restore the default EEPROM settings. 895 * Assume the 6 byte board serial number that was read 896 * from EEPROM is correct even if the EEPROM checksum 897 * failed. 898 */ 899 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 900 bcopy(adw->serial_number, eep_config.serial_number, 901 sizeof(serial_number)); 902 adw_eeprom_write(adw, &eep_config); 903 } 904 905 /* Pull eeprom information into our softc. */ 906 adw->bios_ctrl = eep_config.bios_ctrl; 907 adw->user_wdtr = eep_config.wdtr_able; 908 for (tid = 0; tid < ADW_MAX_TID; tid++) { 909 u_int mc_sdtr; 910 u_int16_t tid_mask; 911 912 tid_mask = 0x1 << tid; 913 if ((adw->features & ADW_ULTRA) != 0) { 914 /* 915 * Ultra chips store sdtr and ultraenb 916 * bits in their seeprom, so we must 917 * construct valid mc_sdtr entries for 918 * indirectly. 919 */ 920 if (eep_config.sync1.sync_enable & tid_mask) { 921 if (eep_config.sync2.ultra_enable & tid_mask) 922 mc_sdtr = ADW_MC_SDTR_20; 923 else 924 mc_sdtr = ADW_MC_SDTR_10; 925 } else 926 mc_sdtr = ADW_MC_SDTR_ASYNC; 927 } else { 928 switch (ADW_TARGET_GROUP(tid)) { 929 case 3: 930 mc_sdtr = eep_config.sync4.sdtr4; 931 break; 932 case 2: 933 mc_sdtr = eep_config.sync3.sdtr3; 934 break; 935 case 1: 936 mc_sdtr = eep_config.sync2.sdtr2; 937 break; 938 default: /* Shut up compiler */ 939 case 0: 940 mc_sdtr = eep_config.sync1.sdtr1; 941 break; 942 } 943 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 944 mc_sdtr &= 0xFF; 945 } 946 adw_set_user_sdtr(adw, tid, mc_sdtr); 947 } 948 adw->user_tagenb = eep_config.tagqng_able; 949 adw->user_discenb = eep_config.disc_enable; 950 adw->max_acbs = eep_config.max_host_qng; 951 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 952 953 /* 954 * Sanity check the number of host openings. 955 */ 956 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 957 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 958 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 959 /* If the value is zero, assume it is uninitialized. */ 960 if (adw->max_acbs == 0) 961 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 962 else 963 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 964 } 965 966 scsicfg1 = 0; 967 if ((adw->features & ADW_ULTRA2) != 0) { 968 switch (eep_config.termination_lvd) { 969 default: 970 kprintf("%s: Invalid EEPROM LVD Termination Settings.\n", 971 adw_name(adw)); 972 kprintf("%s: Reverting to Automatic LVD Termination\n", 973 adw_name(adw)); 974 /* FALLTHROUGH */ 975 case ADW_EEPROM_TERM_AUTO: 976 break; 977 case ADW_EEPROM_TERM_BOTH_ON: 978 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 979 /* FALLTHROUGH */ 980 case ADW_EEPROM_TERM_HIGH_ON: 981 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 982 /* FALLTHROUGH */ 983 case ADW_EEPROM_TERM_OFF: 984 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 985 break; 986 } 987 } 988 989 switch (eep_config.termination_se) { 990 default: 991 kprintf("%s: Invalid SE EEPROM Termination Settings.\n", 992 adw_name(adw)); 993 kprintf("%s: Reverting to Automatic SE Termination\n", 994 adw_name(adw)); 995 /* FALLTHROUGH */ 996 case ADW_EEPROM_TERM_AUTO: 997 break; 998 case ADW_EEPROM_TERM_BOTH_ON: 999 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1000 /* FALLTHROUGH */ 1001 case ADW_EEPROM_TERM_HIGH_ON: 1002 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1003 /* FALLTHROUGH */ 1004 case ADW_EEPROM_TERM_OFF: 1005 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1006 break; 1007 } 1008 kprintf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1009 1010 /* DMA tag for mapping buffers into device visible space. */ 1011 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1012 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1013 /*highaddr*/BUS_SPACE_MAXADDR, 1014 /*filter*/NULL, /*filterarg*/NULL, 1015 /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE, 1016 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1017 /*flags*/BUS_DMA_ALLOCNOW, 1018 &adw->buffer_dmat) != 0) { 1019 return (ENOMEM); 1020 } 1021 1022 adw->init_level++; 1023 1024 /* DMA tag for our ccb carrier structures */ 1025 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10, 1026 /*boundary*/0, 1027 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1028 /*highaddr*/BUS_SPACE_MAXADDR, 1029 /*filter*/NULL, /*filterarg*/NULL, 1030 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1031 * sizeof(struct adw_carrier), 1032 /*nsegments*/1, 1033 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1034 /*flags*/0, &adw->carrier_dmat) != 0) { 1035 return (ENOMEM); 1036 } 1037 1038 adw->init_level++; 1039 1040 /* Allocation for our ccb carrier structures */ 1041 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, 1042 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1043 return (ENOMEM); 1044 } 1045 1046 adw->init_level++; 1047 1048 /* And permanently map them */ 1049 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1050 adw->carriers, 1051 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1052 * sizeof(struct adw_carrier), 1053 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1054 1055 /* Clear them out. */ 1056 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1057 * sizeof(struct adw_carrier)); 1058 1059 /* Setup our free carrier list */ 1060 adw->free_carriers = adw->carriers; 1061 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1062 adw->carriers[i].carr_offset = 1063 carriervtobo(adw, &adw->carriers[i]); 1064 adw->carriers[i].carr_ba = 1065 carriervtob(adw, &adw->carriers[i]); 1066 adw->carriers[i].areq_ba = 0; 1067 adw->carriers[i].next_ba = 1068 carriervtobo(adw, &adw->carriers[i+1]); 1069 } 1070 /* Terminal carrier. Never leaves the freelist */ 1071 adw->carriers[i].carr_offset = 1072 carriervtobo(adw, &adw->carriers[i]); 1073 adw->carriers[i].carr_ba = 1074 carriervtob(adw, &adw->carriers[i]); 1075 adw->carriers[i].areq_ba = 0; 1076 adw->carriers[i].next_ba = ~0; 1077 1078 adw->init_level++; 1079 1080 /* DMA tag for our acb structures */ 1081 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1082 /*lowaddr*/BUS_SPACE_MAXADDR, 1083 /*highaddr*/BUS_SPACE_MAXADDR, 1084 /*filter*/NULL, /*filterarg*/NULL, 1085 adw->max_acbs * sizeof(struct acb), 1086 /*nsegments*/1, 1087 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1088 /*flags*/0, &adw->acb_dmat) != 0) { 1089 return (ENOMEM); 1090 } 1091 1092 adw->init_level++; 1093 1094 /* Allocation for our ccbs */ 1095 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, 1096 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1097 return (ENOMEM); 1098 1099 adw->init_level++; 1100 1101 /* And permanently map them */ 1102 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1103 adw->acbs, 1104 adw->max_acbs * sizeof(struct acb), 1105 adwmapmem, &adw->acb_busbase, /*flags*/0); 1106 1107 /* Clear them out. */ 1108 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1109 1110 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1111 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1112 /*lowaddr*/BUS_SPACE_MAXADDR, 1113 /*highaddr*/BUS_SPACE_MAXADDR, 1114 /*filter*/NULL, /*filterarg*/NULL, 1115 PAGE_SIZE, /*nsegments*/1, 1116 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1117 /*flags*/0, &adw->sg_dmat) != 0) { 1118 return (ENOMEM); 1119 } 1120 1121 adw->init_level++; 1122 1123 /* Allocate our first batch of ccbs */ 1124 if (adwallocacbs(adw) == 0) 1125 return (ENOMEM); 1126 1127 if (adw_init_chip(adw, scsicfg1) != 0) 1128 return (ENXIO); 1129 1130 kprintf("Queue Depth %d\n", adw->max_acbs); 1131 1132 return (0); 1133 } 1134 1135 /* 1136 * Attach all the sub-devices we can find 1137 */ 1138 int 1139 adw_attach(struct adw_softc *adw) 1140 { 1141 struct ccb_setasync csa; 1142 int error; 1143 1144 error = 0; 1145 crit_enter(); 1146 /* Hook up our interrupt handler */ 1147 if ((error = bus_setup_intr(adw->device, adw->irq, 0, 1148 adw_intr, adw, &adw->ih, NULL)) != 0) { 1149 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1150 error); 1151 goto fail; 1152 } 1153 1154 /* Start the Risc processor now that we are fully configured. */ 1155 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1156 1157 /* 1158 * Construct our SIM entry. 1159 */ 1160 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1161 1, adw->max_acbs, NULL); 1162 if (adw->sim == NULL) { 1163 error = ENOMEM; 1164 goto fail; 1165 } 1166 1167 /* 1168 * Register the bus. 1169 */ 1170 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1171 cam_sim_free(adw->sim); 1172 error = ENOMEM; 1173 goto fail; 1174 } 1175 1176 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1177 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1178 == CAM_REQ_CMP) { 1179 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1180 csa.ccb_h.func_code = XPT_SASYNC_CB; 1181 csa.event_enable = AC_LOST_DEVICE; 1182 csa.callback = adw_async; 1183 csa.callback_arg = adw; 1184 xpt_action((union ccb *)&csa); 1185 } 1186 1187 fail: 1188 crit_exit(); 1189 return (error); 1190 } 1191 1192 void 1193 adw_intr(void *arg) 1194 { 1195 struct adw_softc *adw; 1196 u_int int_stat; 1197 1198 adw = (struct adw_softc *)arg; 1199 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1200 return; 1201 1202 /* Reading the register clears the interrupt. */ 1203 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1204 1205 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1206 u_int intrb_code; 1207 1208 /* Async Microcode Event */ 1209 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1210 switch (intrb_code) { 1211 case ADW_ASYNC_CARRIER_READY_FAILURE: 1212 /* 1213 * The RISC missed our update of 1214 * the commandq. 1215 */ 1216 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1217 adw_tickle_risc(adw, ADW_TICKLE_A); 1218 break; 1219 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1220 /* 1221 * The firmware detected a SCSI Bus reset. 1222 */ 1223 kprintf("Someone Reset the Bus\n"); 1224 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1225 break; 1226 case ADW_ASYNC_RDMA_FAILURE: 1227 /* 1228 * Handle RDMA failure by resetting the 1229 * SCSI Bus and chip. 1230 */ 1231 #if XXX 1232 AdvResetChipAndSB(adv_dvc_varp); 1233 #endif 1234 break; 1235 1236 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1237 /* 1238 * Host generated SCSI bus reset occurred. 1239 */ 1240 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1241 break; 1242 default: 1243 kprintf("adw_intr: unknown async code 0x%x\n", 1244 intrb_code); 1245 break; 1246 } 1247 } 1248 1249 /* 1250 * Run down the RequestQ. 1251 */ 1252 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1253 struct adw_carrier *free_carrier; 1254 struct acb *acb; 1255 union ccb *ccb; 1256 1257 #if 0 1258 kprintf("0x%x, 0x%x, 0x%x, 0x%x\n", 1259 adw->responseq->carr_offset, 1260 adw->responseq->carr_ba, 1261 adw->responseq->areq_ba, 1262 adw->responseq->next_ba); 1263 #endif 1264 /* 1265 * The firmware copies the adw_scsi_req_q.acb_baddr 1266 * field into the areq_ba field of the carrier. 1267 */ 1268 acb = acbbotov(adw, adw->responseq->areq_ba); 1269 1270 /* 1271 * The least significant four bits of the next_ba 1272 * field are used as flags. Mask them out and then 1273 * advance through the list. 1274 */ 1275 free_carrier = adw->responseq; 1276 adw->responseq = 1277 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1278 free_carrier->next_ba = adw->free_carriers->carr_offset; 1279 adw->free_carriers = free_carrier; 1280 1281 /* Process CCB */ 1282 ccb = acb->ccb; 1283 callout_stop(&ccb->ccb_h.timeout_ch); 1284 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1285 bus_dmasync_op_t op; 1286 1287 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1288 op = BUS_DMASYNC_POSTREAD; 1289 else 1290 op = BUS_DMASYNC_POSTWRITE; 1291 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1292 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1293 ccb->csio.resid = acb->queue.data_cnt; 1294 } else 1295 ccb->csio.resid = 0; 1296 1297 /* Common Cases inline... */ 1298 if (acb->queue.host_status == QHSTA_NO_ERROR 1299 && (acb->queue.done_status == QD_NO_ERROR 1300 || acb->queue.done_status == QD_WITH_ERROR)) { 1301 ccb->csio.scsi_status = acb->queue.scsi_status; 1302 ccb->ccb_h.status = 0; 1303 switch (ccb->csio.scsi_status) { 1304 case SCSI_STATUS_OK: 1305 ccb->ccb_h.status |= CAM_REQ_CMP; 1306 break; 1307 case SCSI_STATUS_CHECK_COND: 1308 case SCSI_STATUS_CMD_TERMINATED: 1309 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1310 ccb->csio.sense_len); 1311 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1312 ccb->csio.sense_resid = acb->queue.sense_len; 1313 /* FALLTHROUGH */ 1314 default: 1315 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1316 | CAM_DEV_QFRZN; 1317 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1318 break; 1319 } 1320 adwfreeacb(adw, acb); 1321 xpt_done(ccb); 1322 } else { 1323 adwprocesserror(adw, acb); 1324 } 1325 } 1326 } 1327 1328 static void 1329 adwprocesserror(struct adw_softc *adw, struct acb *acb) 1330 { 1331 union ccb *ccb; 1332 1333 ccb = acb->ccb; 1334 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1335 ccb->ccb_h.status = CAM_REQ_ABORTED; 1336 } else { 1337 1338 switch (acb->queue.host_status) { 1339 case QHSTA_M_SEL_TIMEOUT: 1340 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1341 break; 1342 case QHSTA_M_SXFR_OFF_UFLW: 1343 case QHSTA_M_SXFR_OFF_OFLW: 1344 case QHSTA_M_DATA_OVER_RUN: 1345 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1346 break; 1347 case QHSTA_M_SXFR_DESELECTED: 1348 case QHSTA_M_UNEXPECTED_BUS_FREE: 1349 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1350 break; 1351 case QHSTA_M_SCSI_BUS_RESET: 1352 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1353 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1354 break; 1355 case QHSTA_M_BUS_DEVICE_RESET: 1356 ccb->ccb_h.status = CAM_BDR_SENT; 1357 break; 1358 case QHSTA_M_QUEUE_ABORTED: 1359 /* BDR or Bus Reset */ 1360 kprintf("Saw Queue Aborted\n"); 1361 ccb->ccb_h.status = adw->last_reset; 1362 break; 1363 case QHSTA_M_SXFR_SDMA_ERR: 1364 case QHSTA_M_SXFR_SXFR_PERR: 1365 case QHSTA_M_RDMA_PERR: 1366 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1367 break; 1368 case QHSTA_M_WTM_TIMEOUT: 1369 case QHSTA_M_SXFR_WD_TMO: 1370 { 1371 /* The SCSI bus hung in a phase */ 1372 xpt_print_path(adw->path); 1373 kprintf("Watch Dog timer expired. Reseting bus\n"); 1374 adw_reset_bus(adw); 1375 break; 1376 } 1377 case QHSTA_M_SXFR_XFR_PH_ERR: 1378 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1379 break; 1380 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1381 break; 1382 case QHSTA_M_BAD_CMPL_STATUS_IN: 1383 /* No command complete after a status message */ 1384 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1385 break; 1386 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1387 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1388 break; 1389 case QHSTA_M_INVALID_DEVICE: 1390 ccb->ccb_h.status = CAM_PATH_INVALID; 1391 break; 1392 case QHSTA_M_NO_AUTO_REQ_SENSE: 1393 /* 1394 * User didn't request sense, but we got a 1395 * check condition. 1396 */ 1397 ccb->csio.scsi_status = acb->queue.scsi_status; 1398 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1399 break; 1400 default: 1401 panic("%s: Unhandled Host status error %x", 1402 adw_name(adw), acb->queue.host_status); 1403 /* NOTREACHED */ 1404 } 1405 } 1406 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1407 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1408 || ccb->ccb_h.status == CAM_BDR_SENT) 1409 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1410 } 1411 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1412 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1413 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1414 } 1415 adwfreeacb(adw, acb); 1416 xpt_done(ccb); 1417 } 1418 1419 static void 1420 adwtimeout(void *arg) 1421 { 1422 struct acb *acb; 1423 union ccb *ccb; 1424 struct adw_softc *adw; 1425 adw_idle_cmd_status_t status; 1426 int target_id; 1427 1428 acb = (struct acb *)arg; 1429 ccb = acb->ccb; 1430 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1431 xpt_print_path(ccb->ccb_h.path); 1432 kprintf("ACB %p - timed out\n", (void *)acb); 1433 1434 crit_enter(); 1435 1436 if ((acb->state & ACB_ACTIVE) == 0) { 1437 xpt_print_path(ccb->ccb_h.path); 1438 kprintf("ACB %p - timed out CCB already completed\n", 1439 (void *)acb); 1440 crit_exit(); 1441 return; 1442 } 1443 1444 acb->state |= ACB_RECOVERY_ACB; 1445 target_id = ccb->ccb_h.target_id; 1446 1447 /* Attempt a BDR first */ 1448 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1449 ccb->ccb_h.target_id); 1450 crit_exit(); 1451 if (status == ADW_IDLE_CMD_SUCCESS) { 1452 kprintf("%s: BDR Delivered. No longer in timeout\n", 1453 adw_name(adw)); 1454 adw_handle_device_reset(adw, target_id); 1455 } else { 1456 adw_reset_bus(adw); 1457 xpt_print_path(adw->path); 1458 kprintf("Bus Reset Delivered. No longer in timeout\n"); 1459 } 1460 } 1461 1462 static void 1463 adw_handle_device_reset(struct adw_softc *adw, u_int target) 1464 { 1465 struct cam_path *path; 1466 cam_status error; 1467 1468 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1469 target, CAM_LUN_WILDCARD); 1470 1471 if (error == CAM_REQ_CMP) { 1472 xpt_async(AC_SENT_BDR, path, NULL); 1473 xpt_free_path(path); 1474 } 1475 adw->last_reset = CAM_BDR_SENT; 1476 } 1477 1478 static void 1479 adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1480 { 1481 if (initiated) { 1482 /* 1483 * The microcode currently sets the SCSI Bus Reset signal 1484 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1485 * command above. But the SCSI Bus Reset Hold Time in the 1486 * microcode is not deterministic (it may in fact be for less 1487 * than the SCSI Spec. minimum of 25 us). Therefore on return 1488 * the Adv Library sets the SCSI Bus Reset signal for 1489 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1490 * than 25 us. 1491 */ 1492 u_int scsi_ctrl; 1493 1494 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1495 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1496 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1497 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1498 1499 /* 1500 * We will perform the async notification when the 1501 * SCSI Reset interrupt occurs. 1502 */ 1503 } else 1504 xpt_async(AC_BUS_RESET, adw->path, NULL); 1505 adw->last_reset = CAM_SCSI_BUS_RESET; 1506 } 1507