1 /* 2 * CAM SCSI interface for the the Advanced Systems Inc. 3 * Second Generation SCSI controllers. 4 * 5 * Product specific probe and attach routines can be found in: 6 * 7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W 8 * 9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/advansys/adwcam.c,v 1.7.2.2 2001/03/05 13:08:55 obrien Exp $ 34 * $DragonFly: src/sys/dev/disk/advansys/adwcam.c,v 1.10 2005/06/06 21:48:15 eirikn Exp $ 35 */ 36 /* 37 * Ported from: 38 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 39 * 40 * Copyright (c) 1995-1998 Advanced System Products, Inc. 41 * All Rights Reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that redistributions of source 45 * code retain the above copyright notice and this comment without 46 * modification. 47 */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #include <sys/bus.h> 54 #include <sys/thread2.h> 55 56 #include <machine/bus_pio.h> 57 #include <machine/bus_memio.h> 58 #include <machine/bus.h> 59 #include <machine/clock.h> 60 #include <machine/resource.h> 61 62 #include <sys/rman.h> 63 64 #include <bus/cam/cam.h> 65 #include <bus/cam/cam_ccb.h> 66 #include <bus/cam/cam_sim.h> 67 #include <bus/cam/cam_xpt_sim.h> 68 #include <bus/cam/cam_debug.h> 69 70 #include <bus/cam/scsi/scsi_message.h> 71 72 #include "adwvar.h" 73 74 /* Definitions for our use of the SIM private CCB area */ 75 #define ccb_acb_ptr spriv_ptr0 76 #define ccb_adw_ptr spriv_ptr1 77 78 u_long adw_unit; 79 80 static __inline cam_status adwccbstatus(union ccb*); 81 static __inline struct acb* adwgetacb(struct adw_softc *adw); 82 static __inline void adwfreeacb(struct adw_softc *adw, 83 struct acb *acb); 84 85 static void adwmapmem(void *arg, bus_dma_segment_t *segs, 86 int nseg, int error); 87 static struct sg_map_node* 88 adwallocsgmap(struct adw_softc *adw); 89 static int adwallocacbs(struct adw_softc *adw); 90 91 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 92 int nseg, int error); 93 static void adw_action(struct cam_sim *sim, union ccb *ccb); 94 static void adw_poll(struct cam_sim *sim); 95 static void adw_async(void *callback_arg, u_int32_t code, 96 struct cam_path *path, void *arg); 97 static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 98 static void adwtimeout(void *arg); 99 static void adw_handle_device_reset(struct adw_softc *adw, 100 u_int target); 101 static void adw_handle_bus_reset(struct adw_softc *adw, 102 int initiated); 103 104 static __inline cam_status 105 adwccbstatus(union ccb* ccb) 106 { 107 return (ccb->ccb_h.status & CAM_STATUS_MASK); 108 } 109 110 static __inline struct acb* 111 adwgetacb(struct adw_softc *adw) 112 { 113 struct acb* acb; 114 115 crit_enter(); 116 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 117 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 118 } else if (adw->num_acbs < adw->max_acbs) { 119 adwallocacbs(adw); 120 acb = SLIST_FIRST(&adw->free_acb_list); 121 if (acb == NULL) 122 printf("%s: Can't malloc ACB\n", adw_name(adw)); 123 else { 124 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 125 } 126 } 127 crit_exit(); 128 129 return (acb); 130 } 131 132 static __inline void 133 adwfreeacb(struct adw_softc *adw, struct acb *acb) 134 { 135 crit_enter(); 136 if ((acb->state & ACB_ACTIVE) != 0) 137 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 138 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 139 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 140 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 141 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 142 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 143 adw->state &= ~ADW_RESOURCE_SHORTAGE; 144 } 145 acb->state = ACB_FREE; 146 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 147 crit_exit(); 148 } 149 150 static void 151 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 152 { 153 bus_addr_t *busaddrp; 154 155 busaddrp = (bus_addr_t *)arg; 156 *busaddrp = segs->ds_addr; 157 } 158 159 static struct sg_map_node * 160 adwallocsgmap(struct adw_softc *adw) 161 { 162 struct sg_map_node *sg_map; 163 164 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT); 165 166 /* Allocate S/G space for the next batch of ACBS */ 167 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, 168 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 169 free(sg_map, M_DEVBUF); 170 return (NULL); 171 } 172 173 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 174 175 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 176 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 177 178 bzero(sg_map->sg_vaddr, PAGE_SIZE); 179 return (sg_map); 180 } 181 182 /* 183 * Allocate another chunk of CCB's. Return count of entries added. 184 * Assumed to be called under crit_enter(). 185 */ 186 static int 187 adwallocacbs(struct adw_softc *adw) 188 { 189 struct acb *next_acb; 190 struct sg_map_node *sg_map; 191 bus_addr_t busaddr; 192 struct adw_sg_block *blocks; 193 int newcount; 194 int i; 195 196 next_acb = &adw->acbs[adw->num_acbs]; 197 sg_map = adwallocsgmap(adw); 198 199 if (sg_map == NULL) 200 return (0); 201 202 blocks = sg_map->sg_vaddr; 203 busaddr = sg_map->sg_physaddr; 204 205 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 206 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 207 int error; 208 209 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 210 &next_acb->dmamap); 211 if (error != 0) 212 break; 213 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 214 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 215 next_acb->queue.sense_baddr = 216 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 217 next_acb->sg_blocks = blocks; 218 next_acb->sg_busaddr = busaddr; 219 next_acb->state = ACB_FREE; 220 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 221 blocks += ADW_SG_BLOCKCNT; 222 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 223 next_acb++; 224 adw->num_acbs++; 225 } 226 return (i); 227 } 228 229 static void 230 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 231 { 232 struct acb *acb; 233 union ccb *ccb; 234 struct adw_softc *adw; 235 236 acb = (struct acb *)arg; 237 ccb = acb->ccb; 238 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 239 240 if (error != 0) { 241 if (error != EFBIG) 242 printf("%s: Unexepected error 0x%x returned from " 243 "bus_dmamap_load\n", adw_name(adw), error); 244 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 245 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 246 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 247 } 248 adwfreeacb(adw, acb); 249 xpt_done(ccb); 250 return; 251 } 252 253 if (nseg != 0) { 254 bus_dmasync_op_t op; 255 256 acb->queue.data_addr = dm_segs[0].ds_addr; 257 acb->queue.data_cnt = ccb->csio.dxfer_len; 258 if (nseg > 1) { 259 struct adw_sg_block *sg_block; 260 struct adw_sg_elm *sg; 261 bus_addr_t sg_busaddr; 262 u_int sg_index; 263 bus_dma_segment_t *end_seg; 264 265 end_seg = dm_segs + nseg; 266 267 sg_busaddr = acb->sg_busaddr; 268 sg_index = 0; 269 /* Copy the segments into our SG list */ 270 for (sg_block = acb->sg_blocks;; sg_block++) { 271 u_int i; 272 273 sg = sg_block->sg_list; 274 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 275 if (dm_segs >= end_seg) 276 break; 277 278 sg->sg_addr = dm_segs->ds_addr; 279 sg->sg_count = dm_segs->ds_len; 280 sg++; 281 dm_segs++; 282 } 283 sg_block->sg_cnt = i; 284 sg_index += i; 285 if (dm_segs == end_seg) { 286 sg_block->sg_busaddr_next = 0; 287 break; 288 } else { 289 sg_busaddr += 290 sizeof(struct adw_sg_block); 291 sg_block->sg_busaddr_next = sg_busaddr; 292 } 293 } 294 acb->queue.sg_real_addr = acb->sg_busaddr; 295 } else { 296 acb->queue.sg_real_addr = 0; 297 } 298 299 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 300 op = BUS_DMASYNC_PREREAD; 301 else 302 op = BUS_DMASYNC_PREWRITE; 303 304 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 305 306 } else { 307 acb->queue.data_addr = 0; 308 acb->queue.data_cnt = 0; 309 acb->queue.sg_real_addr = 0; 310 } 311 312 crit_enter(); 313 314 /* 315 * Last time we need to check if this CCB needs to 316 * be aborted. 317 */ 318 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 319 if (nseg != 0) 320 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 321 adwfreeacb(adw, acb); 322 xpt_done(ccb); 323 crit_exit(); 324 return; 325 } 326 327 acb->state |= ACB_ACTIVE; 328 ccb->ccb_h.status |= CAM_SIM_QUEUED; 329 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 330 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 331 adwtimeout, acb); 332 333 adw_send_acb(adw, acb, acbvtob(adw, acb)); 334 335 crit_exit(); 336 } 337 338 static void 339 adw_action(struct cam_sim *sim, union ccb *ccb) 340 { 341 struct adw_softc *adw; 342 343 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 344 345 adw = (struct adw_softc *)cam_sim_softc(sim); 346 347 switch (ccb->ccb_h.func_code) { 348 /* Common cases first */ 349 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 350 { 351 struct ccb_scsiio *csio; 352 struct ccb_hdr *ccbh; 353 struct acb *acb; 354 355 csio = &ccb->csio; 356 ccbh = &ccb->ccb_h; 357 358 /* Max supported CDB length is 12 bytes */ 359 if (csio->cdb_len > 12) { 360 ccb->ccb_h.status = CAM_REQ_INVALID; 361 xpt_done(ccb); 362 return; 363 } 364 365 if ((acb = adwgetacb(adw)) == NULL) { 366 crit_enter(); 367 adw->state |= ADW_RESOURCE_SHORTAGE; 368 crit_exit(); 369 xpt_freeze_simq(sim, /*count*/1); 370 ccb->ccb_h.status = CAM_REQUEUE_REQ; 371 xpt_done(ccb); 372 return; 373 } 374 375 /* Link acb and ccb so we can find one from the other */ 376 acb->ccb = ccb; 377 ccb->ccb_h.ccb_acb_ptr = acb; 378 ccb->ccb_h.ccb_adw_ptr = adw; 379 380 acb->queue.cntl = 0; 381 acb->queue.target_cmd = 0; 382 acb->queue.target_id = ccb->ccb_h.target_id; 383 acb->queue.target_lun = ccb->ccb_h.target_lun; 384 385 acb->queue.mflag = 0; 386 acb->queue.sense_len = 387 MIN(csio->sense_len, sizeof(acb->sense_data)); 388 acb->queue.cdb_len = csio->cdb_len; 389 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 390 switch (csio->tag_action) { 391 case MSG_SIMPLE_Q_TAG: 392 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 393 break; 394 case MSG_HEAD_OF_Q_TAG: 395 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 396 break; 397 case MSG_ORDERED_Q_TAG: 398 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 399 break; 400 default: 401 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 402 break; 403 } 404 } else 405 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 406 407 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 408 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 409 410 acb->queue.done_status = 0; 411 acb->queue.scsi_status = 0; 412 acb->queue.host_status = 0; 413 acb->queue.sg_wk_ix = 0; 414 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 415 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 416 bcopy(csio->cdb_io.cdb_ptr, 417 acb->queue.cdb, csio->cdb_len); 418 } else { 419 /* I guess I could map it in... */ 420 ccb->ccb_h.status = CAM_REQ_INVALID; 421 adwfreeacb(adw, acb); 422 xpt_done(ccb); 423 return; 424 } 425 } else { 426 bcopy(csio->cdb_io.cdb_bytes, 427 acb->queue.cdb, csio->cdb_len); 428 } 429 430 /* 431 * If we have any data to send with this command, 432 * map it into bus space. 433 */ 434 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 435 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 436 /* 437 * We've been given a pointer 438 * to a single buffer. 439 */ 440 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 441 int error; 442 443 crit_enter(); 444 error = 445 bus_dmamap_load(adw->buffer_dmat, 446 acb->dmamap, 447 csio->data_ptr, 448 csio->dxfer_len, 449 adwexecuteacb, 450 acb, /*flags*/0); 451 if (error == EINPROGRESS) { 452 /* 453 * So as to maintain ordering, 454 * freeze the controller queue 455 * until our mapping is 456 * returned. 457 */ 458 xpt_freeze_simq(sim, 1); 459 acb->state |= CAM_RELEASE_SIMQ; 460 } 461 crit_exit(); 462 } else { 463 struct bus_dma_segment seg; 464 465 /* Pointer to physical buffer */ 466 seg.ds_addr = 467 (bus_addr_t)csio->data_ptr; 468 seg.ds_len = csio->dxfer_len; 469 adwexecuteacb(acb, &seg, 1, 0); 470 } 471 } else { 472 struct bus_dma_segment *segs; 473 474 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 475 panic("adw_action - Physical " 476 "segment pointers " 477 "unsupported"); 478 479 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 480 panic("adw_action - Virtual " 481 "segment addresses " 482 "unsupported"); 483 484 /* Just use the segments provided */ 485 segs = (struct bus_dma_segment *)csio->data_ptr; 486 adwexecuteacb(acb, segs, csio->sglist_cnt, 487 (csio->sglist_cnt < ADW_SGSIZE) 488 ? 0 : EFBIG); 489 } 490 } else { 491 adwexecuteacb(acb, NULL, 0, 0); 492 } 493 break; 494 } 495 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 496 { 497 adw_idle_cmd_status_t status; 498 499 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 500 ccb->ccb_h.target_id); 501 if (status == ADW_IDLE_CMD_SUCCESS) { 502 ccb->ccb_h.status = CAM_REQ_CMP; 503 if (bootverbose) { 504 xpt_print_path(ccb->ccb_h.path); 505 printf("BDR Delivered\n"); 506 } 507 } else 508 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 509 xpt_done(ccb); 510 break; 511 } 512 case XPT_ABORT: /* Abort the specified CCB */ 513 /* XXX Implement */ 514 ccb->ccb_h.status = CAM_REQ_INVALID; 515 xpt_done(ccb); 516 break; 517 case XPT_SET_TRAN_SETTINGS: 518 { 519 struct ccb_trans_settings *cts; 520 u_int target_mask; 521 522 cts = &ccb->cts; 523 target_mask = 0x01 << ccb->ccb_h.target_id; 524 525 crit_enter(); 526 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 527 u_int sdtrdone; 528 529 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 530 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 531 u_int discenb; 532 533 discenb = 534 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 535 536 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 537 discenb |= target_mask; 538 else 539 discenb &= ~target_mask; 540 541 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 542 discenb); 543 } 544 545 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 546 547 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 548 adw->tagenb |= target_mask; 549 else 550 adw->tagenb &= ~target_mask; 551 } 552 553 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 554 u_int wdtrenb_orig; 555 u_int wdtrenb; 556 u_int wdtrdone; 557 558 wdtrenb_orig = 559 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 560 wdtrenb = wdtrenb_orig; 561 wdtrdone = adw_lram_read_16(adw, 562 ADW_MC_WDTR_DONE); 563 switch (cts->bus_width) { 564 case MSG_EXT_WDTR_BUS_32_BIT: 565 case MSG_EXT_WDTR_BUS_16_BIT: 566 wdtrenb |= target_mask; 567 break; 568 case MSG_EXT_WDTR_BUS_8_BIT: 569 default: 570 wdtrenb &= ~target_mask; 571 break; 572 } 573 if (wdtrenb != wdtrenb_orig) { 574 adw_lram_write_16(adw, 575 ADW_MC_WDTR_ABLE, 576 wdtrenb); 577 wdtrdone &= ~target_mask; 578 adw_lram_write_16(adw, 579 ADW_MC_WDTR_DONE, 580 wdtrdone); 581 /* Wide negotiation forces async */ 582 sdtrdone &= ~target_mask; 583 adw_lram_write_16(adw, 584 ADW_MC_SDTR_DONE, 585 sdtrdone); 586 } 587 } 588 589 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 590 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 591 u_int sdtr_orig; 592 u_int sdtr; 593 u_int sdtrable_orig; 594 u_int sdtrable; 595 596 sdtr = adw_get_chip_sdtr(adw, 597 ccb->ccb_h.target_id); 598 sdtr_orig = sdtr; 599 sdtrable = adw_lram_read_16(adw, 600 ADW_MC_SDTR_ABLE); 601 sdtrable_orig = sdtrable; 602 603 if ((cts->valid 604 & CCB_TRANS_SYNC_RATE_VALID) != 0) { 605 606 sdtr = 607 adw_find_sdtr(adw, 608 cts->sync_period); 609 } 610 611 if ((cts->valid 612 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) { 613 if (cts->sync_offset == 0) 614 sdtr = ADW_MC_SDTR_ASYNC; 615 } 616 617 if (sdtr == ADW_MC_SDTR_ASYNC) 618 sdtrable &= ~target_mask; 619 else 620 sdtrable |= target_mask; 621 if (sdtr != sdtr_orig 622 || sdtrable != sdtrable_orig) { 623 adw_set_chip_sdtr(adw, 624 ccb->ccb_h.target_id, 625 sdtr); 626 sdtrdone &= ~target_mask; 627 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 628 sdtrable); 629 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 630 sdtrdone); 631 632 } 633 } 634 } 635 crit_exit(); 636 ccb->ccb_h.status = CAM_REQ_CMP; 637 xpt_done(ccb); 638 break; 639 } 640 case XPT_GET_TRAN_SETTINGS: 641 /* Get default/user set transfer settings for the target */ 642 { 643 struct ccb_trans_settings *cts; 644 u_int target_mask; 645 646 cts = &ccb->cts; 647 target_mask = 0x01 << ccb->ccb_h.target_id; 648 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 649 u_int mc_sdtr; 650 651 cts->flags = 0; 652 if ((adw->user_discenb & target_mask) != 0) 653 cts->flags |= CCB_TRANS_DISC_ENB; 654 655 if ((adw->user_tagenb & target_mask) != 0) 656 cts->flags |= CCB_TRANS_TAG_ENB; 657 658 if ((adw->user_wdtr & target_mask) != 0) 659 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 660 else 661 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 662 663 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 664 cts->sync_period = adw_find_period(adw, mc_sdtr); 665 if (cts->sync_period != 0) 666 cts->sync_offset = 15; /* XXX ??? */ 667 else 668 cts->sync_offset = 0; 669 670 cts->valid = CCB_TRANS_SYNC_RATE_VALID 671 | CCB_TRANS_SYNC_OFFSET_VALID 672 | CCB_TRANS_BUS_WIDTH_VALID 673 | CCB_TRANS_DISC_VALID 674 | CCB_TRANS_TQ_VALID; 675 ccb->ccb_h.status = CAM_REQ_CMP; 676 } else { 677 u_int targ_tinfo; 678 679 cts->flags = 0; 680 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 681 & target_mask) != 0) 682 cts->flags |= CCB_TRANS_DISC_ENB; 683 684 if ((adw->tagenb & target_mask) != 0) 685 cts->flags |= CCB_TRANS_TAG_ENB; 686 687 targ_tinfo = 688 adw_lram_read_16(adw, 689 ADW_MC_DEVICE_HSHK_CFG_TABLE 690 + (2 * ccb->ccb_h.target_id)); 691 692 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 693 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 694 else 695 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 696 697 cts->sync_period = 698 adw_hshk_cfg_period_factor(targ_tinfo); 699 700 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 701 if (cts->sync_period == 0) 702 cts->sync_offset = 0; 703 704 if (cts->sync_offset == 0) 705 cts->sync_period = 0; 706 } 707 cts->valid = CCB_TRANS_SYNC_RATE_VALID 708 | CCB_TRANS_SYNC_OFFSET_VALID 709 | CCB_TRANS_BUS_WIDTH_VALID 710 | CCB_TRANS_DISC_VALID 711 | CCB_TRANS_TQ_VALID; 712 ccb->ccb_h.status = CAM_REQ_CMP; 713 xpt_done(ccb); 714 break; 715 } 716 case XPT_CALC_GEOMETRY: 717 { 718 struct ccb_calc_geometry *ccg; 719 u_int32_t size_mb; 720 u_int32_t secs_per_cylinder; 721 int extended; 722 723 /* 724 * XXX Use Adaptec translation until I find out how to 725 * get this information from the card. 726 */ 727 ccg = &ccb->ccg; 728 size_mb = ccg->volume_size 729 / ((1024L * 1024L) / ccg->block_size); 730 extended = 1; 731 732 if (size_mb > 1024 && extended) { 733 ccg->heads = 255; 734 ccg->secs_per_track = 63; 735 } else { 736 ccg->heads = 64; 737 ccg->secs_per_track = 32; 738 } 739 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 740 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 741 ccb->ccb_h.status = CAM_REQ_CMP; 742 xpt_done(ccb); 743 break; 744 } 745 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 746 { 747 int failure; 748 749 failure = adw_reset_bus(adw); 750 if (failure != 0) { 751 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 752 } else { 753 if (bootverbose) { 754 xpt_print_path(adw->path); 755 printf("Bus Reset Delivered\n"); 756 } 757 ccb->ccb_h.status = CAM_REQ_CMP; 758 } 759 xpt_done(ccb); 760 break; 761 } 762 case XPT_TERM_IO: /* Terminate the I/O process */ 763 /* XXX Implement */ 764 ccb->ccb_h.status = CAM_REQ_INVALID; 765 xpt_done(ccb); 766 break; 767 case XPT_PATH_INQ: /* Path routing inquiry */ 768 { 769 struct ccb_pathinq *cpi = &ccb->cpi; 770 771 cpi->version_num = 1; 772 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 773 cpi->target_sprt = 0; 774 cpi->hba_misc = 0; 775 cpi->hba_eng_cnt = 0; 776 cpi->max_target = ADW_MAX_TID; 777 cpi->max_lun = ADW_MAX_LUN; 778 cpi->initiator_id = adw->initiator_id; 779 cpi->bus_id = cam_sim_bus(sim); 780 cpi->base_transfer_speed = 3300; 781 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 782 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 783 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 784 cpi->unit_number = cam_sim_unit(sim); 785 cpi->ccb_h.status = CAM_REQ_CMP; 786 xpt_done(ccb); 787 break; 788 } 789 default: 790 ccb->ccb_h.status = CAM_REQ_INVALID; 791 xpt_done(ccb); 792 break; 793 } 794 } 795 796 static void 797 adw_poll(struct cam_sim *sim) 798 { 799 adw_intr(cam_sim_softc(sim)); 800 } 801 802 static void 803 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 804 { 805 } 806 807 struct adw_softc * 808 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 809 { 810 struct adw_softc *adw; 811 int i; 812 813 /* 814 * Allocate a storage area for us 815 */ 816 adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_INTWAIT | M_ZERO); 817 LIST_INIT(&adw->pending_ccbs); 818 SLIST_INIT(&adw->sg_maps); 819 adw->device = dev; 820 adw->unit = device_get_unit(dev); 821 adw->regs_res_type = regs_type; 822 adw->regs_res_id = regs_id; 823 adw->regs = regs; 824 adw->tag = rman_get_bustag(regs); 825 adw->bsh = rman_get_bushandle(regs); 826 KKASSERT(adw->unit >= 0 && adw->unit < 100); 827 i = adw->unit / 10; 828 adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_INTWAIT); 829 sprintf(adw->name, "adw%d", adw->unit); 830 return(adw); 831 } 832 833 void 834 adw_free(struct adw_softc *adw) 835 { 836 switch (adw->init_level) { 837 case 9: 838 { 839 struct sg_map_node *sg_map; 840 841 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 842 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 843 bus_dmamap_unload(adw->sg_dmat, 844 sg_map->sg_dmamap); 845 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 846 sg_map->sg_dmamap); 847 free(sg_map, M_DEVBUF); 848 } 849 bus_dma_tag_destroy(adw->sg_dmat); 850 } 851 case 8: 852 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 853 case 7: 854 bus_dmamem_free(adw->acb_dmat, adw->acbs, 855 adw->acb_dmamap); 856 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 857 case 6: 858 bus_dma_tag_destroy(adw->acb_dmat); 859 case 5: 860 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 861 case 4: 862 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 863 adw->carrier_dmamap); 864 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 865 case 3: 866 bus_dma_tag_destroy(adw->carrier_dmat); 867 case 2: 868 bus_dma_tag_destroy(adw->buffer_dmat); 869 case 1: 870 bus_dma_tag_destroy(adw->parent_dmat); 871 case 0: 872 break; 873 } 874 free(adw->name, M_DEVBUF); 875 free(adw, M_DEVBUF); 876 } 877 878 int 879 adw_init(struct adw_softc *adw) 880 { 881 struct adw_eeprom eep_config; 882 u_int tid; 883 u_int i; 884 u_int16_t checksum; 885 u_int16_t scsicfg1; 886 887 checksum = adw_eeprom_read(adw, &eep_config); 888 bcopy(eep_config.serial_number, adw->serial_number, 889 sizeof(adw->serial_number)); 890 if (checksum != eep_config.checksum) { 891 u_int16_t serial_number[3]; 892 893 adw->flags |= ADW_EEPROM_FAILED; 894 printf("%s: EEPROM checksum failed. Restoring Defaults\n", 895 adw_name(adw)); 896 897 /* 898 * Restore the default EEPROM settings. 899 * Assume the 6 byte board serial number that was read 900 * from EEPROM is correct even if the EEPROM checksum 901 * failed. 902 */ 903 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 904 bcopy(adw->serial_number, eep_config.serial_number, 905 sizeof(serial_number)); 906 adw_eeprom_write(adw, &eep_config); 907 } 908 909 /* Pull eeprom information into our softc. */ 910 adw->bios_ctrl = eep_config.bios_ctrl; 911 adw->user_wdtr = eep_config.wdtr_able; 912 for (tid = 0; tid < ADW_MAX_TID; tid++) { 913 u_int mc_sdtr; 914 u_int16_t tid_mask; 915 916 tid_mask = 0x1 << tid; 917 if ((adw->features & ADW_ULTRA) != 0) { 918 /* 919 * Ultra chips store sdtr and ultraenb 920 * bits in their seeprom, so we must 921 * construct valid mc_sdtr entries for 922 * indirectly. 923 */ 924 if (eep_config.sync1.sync_enable & tid_mask) { 925 if (eep_config.sync2.ultra_enable & tid_mask) 926 mc_sdtr = ADW_MC_SDTR_20; 927 else 928 mc_sdtr = ADW_MC_SDTR_10; 929 } else 930 mc_sdtr = ADW_MC_SDTR_ASYNC; 931 } else { 932 switch (ADW_TARGET_GROUP(tid)) { 933 case 3: 934 mc_sdtr = eep_config.sync4.sdtr4; 935 break; 936 case 2: 937 mc_sdtr = eep_config.sync3.sdtr3; 938 break; 939 case 1: 940 mc_sdtr = eep_config.sync2.sdtr2; 941 break; 942 default: /* Shut up compiler */ 943 case 0: 944 mc_sdtr = eep_config.sync1.sdtr1; 945 break; 946 } 947 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 948 mc_sdtr &= 0xFF; 949 } 950 adw_set_user_sdtr(adw, tid, mc_sdtr); 951 } 952 adw->user_tagenb = eep_config.tagqng_able; 953 adw->user_discenb = eep_config.disc_enable; 954 adw->max_acbs = eep_config.max_host_qng; 955 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 956 957 /* 958 * Sanity check the number of host openings. 959 */ 960 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 961 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 962 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 963 /* If the value is zero, assume it is uninitialized. */ 964 if (adw->max_acbs == 0) 965 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 966 else 967 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 968 } 969 970 scsicfg1 = 0; 971 if ((adw->features & ADW_ULTRA2) != 0) { 972 switch (eep_config.termination_lvd) { 973 default: 974 printf("%s: Invalid EEPROM LVD Termination Settings.\n", 975 adw_name(adw)); 976 printf("%s: Reverting to Automatic LVD Termination\n", 977 adw_name(adw)); 978 /* FALLTHROUGH */ 979 case ADW_EEPROM_TERM_AUTO: 980 break; 981 case ADW_EEPROM_TERM_BOTH_ON: 982 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 983 /* FALLTHROUGH */ 984 case ADW_EEPROM_TERM_HIGH_ON: 985 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 986 /* FALLTHROUGH */ 987 case ADW_EEPROM_TERM_OFF: 988 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 989 break; 990 } 991 } 992 993 switch (eep_config.termination_se) { 994 default: 995 printf("%s: Invalid SE EEPROM Termination Settings.\n", 996 adw_name(adw)); 997 printf("%s: Reverting to Automatic SE Termination\n", 998 adw_name(adw)); 999 /* FALLTHROUGH */ 1000 case ADW_EEPROM_TERM_AUTO: 1001 break; 1002 case ADW_EEPROM_TERM_BOTH_ON: 1003 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1004 /* FALLTHROUGH */ 1005 case ADW_EEPROM_TERM_HIGH_ON: 1006 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1007 /* FALLTHROUGH */ 1008 case ADW_EEPROM_TERM_OFF: 1009 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1010 break; 1011 } 1012 printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1013 1014 /* DMA tag for mapping buffers into device visible space. */ 1015 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1016 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1017 /*highaddr*/BUS_SPACE_MAXADDR, 1018 /*filter*/NULL, /*filterarg*/NULL, 1019 /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE, 1020 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1021 /*flags*/BUS_DMA_ALLOCNOW, 1022 &adw->buffer_dmat) != 0) { 1023 return (ENOMEM); 1024 } 1025 1026 adw->init_level++; 1027 1028 /* DMA tag for our ccb carrier structures */ 1029 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10, 1030 /*boundary*/0, 1031 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1032 /*highaddr*/BUS_SPACE_MAXADDR, 1033 /*filter*/NULL, /*filterarg*/NULL, 1034 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1035 * sizeof(struct adw_carrier), 1036 /*nsegments*/1, 1037 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1038 /*flags*/0, &adw->carrier_dmat) != 0) { 1039 return (ENOMEM); 1040 } 1041 1042 adw->init_level++; 1043 1044 /* Allocation for our ccb carrier structures */ 1045 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, 1046 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1047 return (ENOMEM); 1048 } 1049 1050 adw->init_level++; 1051 1052 /* And permanently map them */ 1053 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1054 adw->carriers, 1055 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1056 * sizeof(struct adw_carrier), 1057 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1058 1059 /* Clear them out. */ 1060 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1061 * sizeof(struct adw_carrier)); 1062 1063 /* Setup our free carrier list */ 1064 adw->free_carriers = adw->carriers; 1065 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1066 adw->carriers[i].carr_offset = 1067 carriervtobo(adw, &adw->carriers[i]); 1068 adw->carriers[i].carr_ba = 1069 carriervtob(adw, &adw->carriers[i]); 1070 adw->carriers[i].areq_ba = 0; 1071 adw->carriers[i].next_ba = 1072 carriervtobo(adw, &adw->carriers[i+1]); 1073 } 1074 /* Terminal carrier. Never leaves the freelist */ 1075 adw->carriers[i].carr_offset = 1076 carriervtobo(adw, &adw->carriers[i]); 1077 adw->carriers[i].carr_ba = 1078 carriervtob(adw, &adw->carriers[i]); 1079 adw->carriers[i].areq_ba = 0; 1080 adw->carriers[i].next_ba = ~0; 1081 1082 adw->init_level++; 1083 1084 /* DMA tag for our acb structures */ 1085 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1086 /*lowaddr*/BUS_SPACE_MAXADDR, 1087 /*highaddr*/BUS_SPACE_MAXADDR, 1088 /*filter*/NULL, /*filterarg*/NULL, 1089 adw->max_acbs * sizeof(struct acb), 1090 /*nsegments*/1, 1091 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1092 /*flags*/0, &adw->acb_dmat) != 0) { 1093 return (ENOMEM); 1094 } 1095 1096 adw->init_level++; 1097 1098 /* Allocation for our ccbs */ 1099 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, 1100 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1101 return (ENOMEM); 1102 1103 adw->init_level++; 1104 1105 /* And permanently map them */ 1106 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1107 adw->acbs, 1108 adw->max_acbs * sizeof(struct acb), 1109 adwmapmem, &adw->acb_busbase, /*flags*/0); 1110 1111 /* Clear them out. */ 1112 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1113 1114 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1115 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1116 /*lowaddr*/BUS_SPACE_MAXADDR, 1117 /*highaddr*/BUS_SPACE_MAXADDR, 1118 /*filter*/NULL, /*filterarg*/NULL, 1119 PAGE_SIZE, /*nsegments*/1, 1120 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1121 /*flags*/0, &adw->sg_dmat) != 0) { 1122 return (ENOMEM); 1123 } 1124 1125 adw->init_level++; 1126 1127 /* Allocate our first batch of ccbs */ 1128 if (adwallocacbs(adw) == 0) 1129 return (ENOMEM); 1130 1131 if (adw_init_chip(adw, scsicfg1) != 0) 1132 return (ENXIO); 1133 1134 printf("Queue Depth %d\n", adw->max_acbs); 1135 1136 return (0); 1137 } 1138 1139 /* 1140 * Attach all the sub-devices we can find 1141 */ 1142 int 1143 adw_attach(struct adw_softc *adw) 1144 { 1145 struct ccb_setasync csa; 1146 int error; 1147 1148 error = 0; 1149 crit_enter(); 1150 /* Hook up our interrupt handler */ 1151 if ((error = bus_setup_intr(adw->device, adw->irq, INTR_TYPE_CAM, 1152 adw_intr, adw, &adw->ih, NULL)) != 0) { 1153 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1154 error); 1155 goto fail; 1156 } 1157 1158 /* Start the Risc processor now that we are fully configured. */ 1159 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1160 1161 /* 1162 * Construct our SIM entry. 1163 */ 1164 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1165 1, adw->max_acbs, NULL); 1166 if (adw->sim == NULL) { 1167 error = ENOMEM; 1168 goto fail; 1169 } 1170 1171 /* 1172 * Register the bus. 1173 */ 1174 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1175 cam_sim_free(adw->sim); 1176 error = ENOMEM; 1177 goto fail; 1178 } 1179 1180 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1181 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1182 == CAM_REQ_CMP) { 1183 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1184 csa.ccb_h.func_code = XPT_SASYNC_CB; 1185 csa.event_enable = AC_LOST_DEVICE; 1186 csa.callback = adw_async; 1187 csa.callback_arg = adw; 1188 xpt_action((union ccb *)&csa); 1189 } 1190 1191 fail: 1192 crit_exit(); 1193 return (error); 1194 } 1195 1196 void 1197 adw_intr(void *arg) 1198 { 1199 struct adw_softc *adw; 1200 u_int int_stat; 1201 1202 adw = (struct adw_softc *)arg; 1203 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1204 return; 1205 1206 /* Reading the register clears the interrupt. */ 1207 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1208 1209 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1210 u_int intrb_code; 1211 1212 /* Async Microcode Event */ 1213 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1214 switch (intrb_code) { 1215 case ADW_ASYNC_CARRIER_READY_FAILURE: 1216 /* 1217 * The RISC missed our update of 1218 * the commandq. 1219 */ 1220 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1221 adw_tickle_risc(adw, ADW_TICKLE_A); 1222 break; 1223 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1224 /* 1225 * The firmware detected a SCSI Bus reset. 1226 */ 1227 printf("Someone Reset the Bus\n"); 1228 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1229 break; 1230 case ADW_ASYNC_RDMA_FAILURE: 1231 /* 1232 * Handle RDMA failure by resetting the 1233 * SCSI Bus and chip. 1234 */ 1235 #if XXX 1236 AdvResetChipAndSB(adv_dvc_varp); 1237 #endif 1238 break; 1239 1240 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1241 /* 1242 * Host generated SCSI bus reset occurred. 1243 */ 1244 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1245 break; 1246 default: 1247 printf("adw_intr: unknown async code 0x%x\n", 1248 intrb_code); 1249 break; 1250 } 1251 } 1252 1253 /* 1254 * Run down the RequestQ. 1255 */ 1256 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1257 struct adw_carrier *free_carrier; 1258 struct acb *acb; 1259 union ccb *ccb; 1260 1261 #if 0 1262 printf("0x%x, 0x%x, 0x%x, 0x%x\n", 1263 adw->responseq->carr_offset, 1264 adw->responseq->carr_ba, 1265 adw->responseq->areq_ba, 1266 adw->responseq->next_ba); 1267 #endif 1268 /* 1269 * The firmware copies the adw_scsi_req_q.acb_baddr 1270 * field into the areq_ba field of the carrier. 1271 */ 1272 acb = acbbotov(adw, adw->responseq->areq_ba); 1273 1274 /* 1275 * The least significant four bits of the next_ba 1276 * field are used as flags. Mask them out and then 1277 * advance through the list. 1278 */ 1279 free_carrier = adw->responseq; 1280 adw->responseq = 1281 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1282 free_carrier->next_ba = adw->free_carriers->carr_offset; 1283 adw->free_carriers = free_carrier; 1284 1285 /* Process CCB */ 1286 ccb = acb->ccb; 1287 callout_stop(&ccb->ccb_h.timeout_ch); 1288 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1289 bus_dmasync_op_t op; 1290 1291 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1292 op = BUS_DMASYNC_POSTREAD; 1293 else 1294 op = BUS_DMASYNC_POSTWRITE; 1295 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1296 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1297 ccb->csio.resid = acb->queue.data_cnt; 1298 } else 1299 ccb->csio.resid = 0; 1300 1301 /* Common Cases inline... */ 1302 if (acb->queue.host_status == QHSTA_NO_ERROR 1303 && (acb->queue.done_status == QD_NO_ERROR 1304 || acb->queue.done_status == QD_WITH_ERROR)) { 1305 ccb->csio.scsi_status = acb->queue.scsi_status; 1306 ccb->ccb_h.status = 0; 1307 switch (ccb->csio.scsi_status) { 1308 case SCSI_STATUS_OK: 1309 ccb->ccb_h.status |= CAM_REQ_CMP; 1310 break; 1311 case SCSI_STATUS_CHECK_COND: 1312 case SCSI_STATUS_CMD_TERMINATED: 1313 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1314 ccb->csio.sense_len); 1315 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1316 ccb->csio.sense_resid = acb->queue.sense_len; 1317 /* FALLTHROUGH */ 1318 default: 1319 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1320 | CAM_DEV_QFRZN; 1321 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1322 break; 1323 } 1324 adwfreeacb(adw, acb); 1325 xpt_done(ccb); 1326 } else { 1327 adwprocesserror(adw, acb); 1328 } 1329 } 1330 } 1331 1332 static void 1333 adwprocesserror(struct adw_softc *adw, struct acb *acb) 1334 { 1335 union ccb *ccb; 1336 1337 ccb = acb->ccb; 1338 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1339 ccb->ccb_h.status = CAM_REQ_ABORTED; 1340 } else { 1341 1342 switch (acb->queue.host_status) { 1343 case QHSTA_M_SEL_TIMEOUT: 1344 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1345 break; 1346 case QHSTA_M_SXFR_OFF_UFLW: 1347 case QHSTA_M_SXFR_OFF_OFLW: 1348 case QHSTA_M_DATA_OVER_RUN: 1349 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1350 break; 1351 case QHSTA_M_SXFR_DESELECTED: 1352 case QHSTA_M_UNEXPECTED_BUS_FREE: 1353 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1354 break; 1355 case QHSTA_M_SCSI_BUS_RESET: 1356 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1357 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1358 break; 1359 case QHSTA_M_BUS_DEVICE_RESET: 1360 ccb->ccb_h.status = CAM_BDR_SENT; 1361 break; 1362 case QHSTA_M_QUEUE_ABORTED: 1363 /* BDR or Bus Reset */ 1364 printf("Saw Queue Aborted\n"); 1365 ccb->ccb_h.status = adw->last_reset; 1366 break; 1367 case QHSTA_M_SXFR_SDMA_ERR: 1368 case QHSTA_M_SXFR_SXFR_PERR: 1369 case QHSTA_M_RDMA_PERR: 1370 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1371 break; 1372 case QHSTA_M_WTM_TIMEOUT: 1373 case QHSTA_M_SXFR_WD_TMO: 1374 { 1375 /* The SCSI bus hung in a phase */ 1376 xpt_print_path(adw->path); 1377 printf("Watch Dog timer expired. Reseting bus\n"); 1378 adw_reset_bus(adw); 1379 break; 1380 } 1381 case QHSTA_M_SXFR_XFR_PH_ERR: 1382 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1383 break; 1384 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1385 break; 1386 case QHSTA_M_BAD_CMPL_STATUS_IN: 1387 /* No command complete after a status message */ 1388 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1389 break; 1390 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1391 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1392 break; 1393 case QHSTA_M_INVALID_DEVICE: 1394 ccb->ccb_h.status = CAM_PATH_INVALID; 1395 break; 1396 case QHSTA_M_NO_AUTO_REQ_SENSE: 1397 /* 1398 * User didn't request sense, but we got a 1399 * check condition. 1400 */ 1401 ccb->csio.scsi_status = acb->queue.scsi_status; 1402 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1403 break; 1404 default: 1405 panic("%s: Unhandled Host status error %x", 1406 adw_name(adw), acb->queue.host_status); 1407 /* NOTREACHED */ 1408 } 1409 } 1410 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1411 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1412 || ccb->ccb_h.status == CAM_BDR_SENT) 1413 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1414 } 1415 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1416 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1417 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1418 } 1419 adwfreeacb(adw, acb); 1420 xpt_done(ccb); 1421 } 1422 1423 static void 1424 adwtimeout(void *arg) 1425 { 1426 struct acb *acb; 1427 union ccb *ccb; 1428 struct adw_softc *adw; 1429 adw_idle_cmd_status_t status; 1430 int target_id; 1431 1432 acb = (struct acb *)arg; 1433 ccb = acb->ccb; 1434 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1435 xpt_print_path(ccb->ccb_h.path); 1436 printf("ACB %p - timed out\n", (void *)acb); 1437 1438 crit_enter(); 1439 1440 if ((acb->state & ACB_ACTIVE) == 0) { 1441 xpt_print_path(ccb->ccb_h.path); 1442 printf("ACB %p - timed out CCB already completed\n", 1443 (void *)acb); 1444 crit_exit(); 1445 return; 1446 } 1447 1448 acb->state |= ACB_RECOVERY_ACB; 1449 target_id = ccb->ccb_h.target_id; 1450 1451 /* Attempt a BDR first */ 1452 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1453 ccb->ccb_h.target_id); 1454 crit_exit(); 1455 if (status == ADW_IDLE_CMD_SUCCESS) { 1456 printf("%s: BDR Delivered. No longer in timeout\n", 1457 adw_name(adw)); 1458 adw_handle_device_reset(adw, target_id); 1459 } else { 1460 adw_reset_bus(adw); 1461 xpt_print_path(adw->path); 1462 printf("Bus Reset Delivered. No longer in timeout\n"); 1463 } 1464 } 1465 1466 static void 1467 adw_handle_device_reset(struct adw_softc *adw, u_int target) 1468 { 1469 struct cam_path *path; 1470 cam_status error; 1471 1472 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1473 target, CAM_LUN_WILDCARD); 1474 1475 if (error == CAM_REQ_CMP) { 1476 xpt_async(AC_SENT_BDR, path, NULL); 1477 xpt_free_path(path); 1478 } 1479 adw->last_reset = CAM_BDR_SENT; 1480 } 1481 1482 static void 1483 adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1484 { 1485 if (initiated) { 1486 /* 1487 * The microcode currently sets the SCSI Bus Reset signal 1488 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1489 * command above. But the SCSI Bus Reset Hold Time in the 1490 * microcode is not deterministic (it may in fact be for less 1491 * than the SCSI Spec. minimum of 25 us). Therefore on return 1492 * the Adv Library sets the SCSI Bus Reset signal for 1493 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1494 * than 25 us. 1495 */ 1496 u_int scsi_ctrl; 1497 1498 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1499 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1500 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1501 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1502 1503 /* 1504 * We will perform the async notification when the 1505 * SCSI Reset interrupt occurs. 1506 */ 1507 } else 1508 xpt_async(AC_BUS_RESET, adw->path, NULL); 1509 adw->last_reset = CAM_SCSI_BUS_RESET; 1510 } 1511