1 /* 2 * CAM SCSI interface for the the Advanced Systems Inc. 3 * Second Generation SCSI controllers. 4 * 5 * Product specific probe and attach routines can be found in: 6 * 7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W 8 * 9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/advansys/adwcam.c,v 1.7.2.2 2001/03/05 13:08:55 obrien Exp $ 34 * $DragonFly: src/sys/dev/disk/advansys/adwcam.c,v 1.17 2007/12/23 07:00:56 pavalos Exp $ 35 */ 36 /* 37 * Ported from: 38 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 39 * 40 * Copyright (c) 1995-1998 Advanced System Products, Inc. 41 * All Rights Reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that redistributions of source 45 * code retain the above copyright notice and this comment without 46 * modification. 47 */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #include <sys/bus.h> 54 #include <sys/thread2.h> 55 56 #include <machine/clock.h> 57 58 #include <sys/rman.h> 59 60 #include <bus/cam/cam.h> 61 #include <bus/cam/cam_ccb.h> 62 #include <bus/cam/cam_sim.h> 63 #include <bus/cam/cam_xpt_sim.h> 64 #include <bus/cam/cam_debug.h> 65 66 #include <bus/cam/scsi/scsi_message.h> 67 68 #include "adwvar.h" 69 70 /* Definitions for our use of the SIM private CCB area */ 71 #define ccb_acb_ptr spriv_ptr0 72 #define ccb_adw_ptr spriv_ptr1 73 74 u_long adw_unit; 75 76 static __inline cam_status adwccbstatus(union ccb*); 77 static __inline struct acb* adwgetacb(struct adw_softc *adw); 78 static __inline void adwfreeacb(struct adw_softc *adw, 79 struct acb *acb); 80 81 static void adwmapmem(void *arg, bus_dma_segment_t *segs, 82 int nseg, int error); 83 static struct sg_map_node* 84 adwallocsgmap(struct adw_softc *adw); 85 static int adwallocacbs(struct adw_softc *adw); 86 87 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 88 int nseg, int error); 89 static void adw_action(struct cam_sim *sim, union ccb *ccb); 90 static void adw_poll(struct cam_sim *sim); 91 static void adw_async(void *callback_arg, u_int32_t code, 92 struct cam_path *path, void *arg); 93 static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 94 static void adwtimeout(void *arg); 95 static void adw_handle_device_reset(struct adw_softc *adw, 96 u_int target); 97 static void adw_handle_bus_reset(struct adw_softc *adw, 98 int initiated); 99 100 static __inline cam_status 101 adwccbstatus(union ccb* ccb) 102 { 103 return (ccb->ccb_h.status & CAM_STATUS_MASK); 104 } 105 106 static __inline struct acb* 107 adwgetacb(struct adw_softc *adw) 108 { 109 struct acb* acb; 110 111 crit_enter(); 112 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 113 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 114 } else if (adw->num_acbs < adw->max_acbs) { 115 adwallocacbs(adw); 116 acb = SLIST_FIRST(&adw->free_acb_list); 117 if (acb == NULL) 118 kprintf("%s: Can't malloc ACB\n", adw_name(adw)); 119 else { 120 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 121 } 122 } 123 crit_exit(); 124 125 return (acb); 126 } 127 128 static __inline void 129 adwfreeacb(struct adw_softc *adw, struct acb *acb) 130 { 131 crit_enter(); 132 if ((acb->state & ACB_ACTIVE) != 0) 133 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 134 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 135 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 136 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 137 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 138 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 139 adw->state &= ~ADW_RESOURCE_SHORTAGE; 140 } 141 acb->state = ACB_FREE; 142 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 143 crit_exit(); 144 } 145 146 static void 147 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 148 { 149 bus_addr_t *busaddrp; 150 151 busaddrp = (bus_addr_t *)arg; 152 *busaddrp = segs->ds_addr; 153 } 154 155 static struct sg_map_node * 156 adwallocsgmap(struct adw_softc *adw) 157 { 158 struct sg_map_node *sg_map; 159 160 sg_map = kmalloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT); 161 162 /* Allocate S/G space for the next batch of ACBS */ 163 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, 164 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 165 kfree(sg_map, M_DEVBUF); 166 return (NULL); 167 } 168 169 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 170 171 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 172 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 173 174 bzero(sg_map->sg_vaddr, PAGE_SIZE); 175 return (sg_map); 176 } 177 178 /* 179 * Allocate another chunk of CCB's. Return count of entries added. 180 * Assumed to be called under crit_enter(). 181 */ 182 static int 183 adwallocacbs(struct adw_softc *adw) 184 { 185 struct acb *next_acb; 186 struct sg_map_node *sg_map; 187 bus_addr_t busaddr; 188 struct adw_sg_block *blocks; 189 int newcount; 190 int i; 191 192 next_acb = &adw->acbs[adw->num_acbs]; 193 sg_map = adwallocsgmap(adw); 194 195 if (sg_map == NULL) 196 return (0); 197 198 blocks = sg_map->sg_vaddr; 199 busaddr = sg_map->sg_physaddr; 200 201 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 202 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 203 int error; 204 205 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 206 &next_acb->dmamap); 207 if (error != 0) 208 break; 209 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 210 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 211 next_acb->queue.sense_baddr = 212 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 213 next_acb->sg_blocks = blocks; 214 next_acb->sg_busaddr = busaddr; 215 next_acb->state = ACB_FREE; 216 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 217 blocks += ADW_SG_BLOCKCNT; 218 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 219 next_acb++; 220 adw->num_acbs++; 221 } 222 return (i); 223 } 224 225 static void 226 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 227 { 228 struct acb *acb; 229 union ccb *ccb; 230 struct adw_softc *adw; 231 232 acb = (struct acb *)arg; 233 ccb = acb->ccb; 234 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 235 236 if (error != 0) { 237 if (error != EFBIG) 238 kprintf("%s: Unexpected error 0x%x returned from " 239 "bus_dmamap_load\n", adw_name(adw), error); 240 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 241 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 242 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 243 } 244 adwfreeacb(adw, acb); 245 xpt_done(ccb); 246 return; 247 } 248 249 if (nseg != 0) { 250 bus_dmasync_op_t op; 251 252 acb->queue.data_addr = dm_segs[0].ds_addr; 253 acb->queue.data_cnt = ccb->csio.dxfer_len; 254 if (nseg > 1) { 255 struct adw_sg_block *sg_block; 256 struct adw_sg_elm *sg; 257 bus_addr_t sg_busaddr; 258 u_int sg_index; 259 bus_dma_segment_t *end_seg; 260 261 end_seg = dm_segs + nseg; 262 263 sg_busaddr = acb->sg_busaddr; 264 sg_index = 0; 265 /* Copy the segments into our SG list */ 266 for (sg_block = acb->sg_blocks;; sg_block++) { 267 u_int i; 268 269 sg = sg_block->sg_list; 270 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 271 if (dm_segs >= end_seg) 272 break; 273 274 sg->sg_addr = dm_segs->ds_addr; 275 sg->sg_count = dm_segs->ds_len; 276 sg++; 277 dm_segs++; 278 } 279 sg_block->sg_cnt = i; 280 sg_index += i; 281 if (dm_segs == end_seg) { 282 sg_block->sg_busaddr_next = 0; 283 break; 284 } else { 285 sg_busaddr += 286 sizeof(struct adw_sg_block); 287 sg_block->sg_busaddr_next = sg_busaddr; 288 } 289 } 290 acb->queue.sg_real_addr = acb->sg_busaddr; 291 } else { 292 acb->queue.sg_real_addr = 0; 293 } 294 295 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 296 op = BUS_DMASYNC_PREREAD; 297 else 298 op = BUS_DMASYNC_PREWRITE; 299 300 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 301 302 } else { 303 acb->queue.data_addr = 0; 304 acb->queue.data_cnt = 0; 305 acb->queue.sg_real_addr = 0; 306 } 307 308 crit_enter(); 309 310 /* 311 * Last time we need to check if this CCB needs to 312 * be aborted. 313 */ 314 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 315 if (nseg != 0) 316 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 317 adwfreeacb(adw, acb); 318 xpt_done(ccb); 319 crit_exit(); 320 return; 321 } 322 323 acb->state |= ACB_ACTIVE; 324 ccb->ccb_h.status |= CAM_SIM_QUEUED; 325 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 326 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 327 adwtimeout, acb); 328 329 adw_send_acb(adw, acb, acbvtob(adw, acb)); 330 331 crit_exit(); 332 } 333 334 static void 335 adw_action(struct cam_sim *sim, union ccb *ccb) 336 { 337 struct adw_softc *adw; 338 339 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 340 341 adw = (struct adw_softc *)cam_sim_softc(sim); 342 343 switch (ccb->ccb_h.func_code) { 344 /* Common cases first */ 345 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 346 { 347 struct ccb_scsiio *csio; 348 struct ccb_hdr *ccbh; 349 struct acb *acb; 350 351 csio = &ccb->csio; 352 ccbh = &ccb->ccb_h; 353 354 /* Max supported CDB length is 12 bytes */ 355 if (csio->cdb_len > 12) { 356 ccb->ccb_h.status = CAM_REQ_INVALID; 357 xpt_done(ccb); 358 return; 359 } 360 361 if ((acb = adwgetacb(adw)) == NULL) { 362 crit_enter(); 363 adw->state |= ADW_RESOURCE_SHORTAGE; 364 crit_exit(); 365 xpt_freeze_simq(sim, /*count*/1); 366 ccb->ccb_h.status = CAM_REQUEUE_REQ; 367 xpt_done(ccb); 368 return; 369 } 370 371 /* Link acb and ccb so we can find one from the other */ 372 acb->ccb = ccb; 373 ccb->ccb_h.ccb_acb_ptr = acb; 374 ccb->ccb_h.ccb_adw_ptr = adw; 375 376 acb->queue.cntl = 0; 377 acb->queue.target_cmd = 0; 378 acb->queue.target_id = ccb->ccb_h.target_id; 379 acb->queue.target_lun = ccb->ccb_h.target_lun; 380 381 acb->queue.mflag = 0; 382 acb->queue.sense_len = 383 MIN(csio->sense_len, sizeof(acb->sense_data)); 384 acb->queue.cdb_len = csio->cdb_len; 385 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 386 switch (csio->tag_action) { 387 case MSG_SIMPLE_Q_TAG: 388 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 389 break; 390 case MSG_HEAD_OF_Q_TAG: 391 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 392 break; 393 case MSG_ORDERED_Q_TAG: 394 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 395 break; 396 default: 397 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 398 break; 399 } 400 } else 401 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 402 403 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 404 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 405 406 acb->queue.done_status = 0; 407 acb->queue.scsi_status = 0; 408 acb->queue.host_status = 0; 409 acb->queue.sg_wk_ix = 0; 410 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 411 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 412 bcopy(csio->cdb_io.cdb_ptr, 413 acb->queue.cdb, csio->cdb_len); 414 } else { 415 /* I guess I could map it in... */ 416 ccb->ccb_h.status = CAM_REQ_INVALID; 417 adwfreeacb(adw, acb); 418 xpt_done(ccb); 419 return; 420 } 421 } else { 422 bcopy(csio->cdb_io.cdb_bytes, 423 acb->queue.cdb, csio->cdb_len); 424 } 425 426 /* 427 * If we have any data to send with this command, 428 * map it into bus space. 429 */ 430 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 431 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 432 /* 433 * We've been given a pointer 434 * to a single buffer. 435 */ 436 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 437 int error; 438 439 crit_enter(); 440 error = 441 bus_dmamap_load(adw->buffer_dmat, 442 acb->dmamap, 443 csio->data_ptr, 444 csio->dxfer_len, 445 adwexecuteacb, 446 acb, /*flags*/0); 447 if (error == EINPROGRESS) { 448 /* 449 * So as to maintain ordering, 450 * freeze the controller queue 451 * until our mapping is 452 * returned. 453 */ 454 xpt_freeze_simq(sim, 1); 455 acb->state |= CAM_RELEASE_SIMQ; 456 } 457 crit_exit(); 458 } else { 459 struct bus_dma_segment seg; 460 461 /* Pointer to physical buffer */ 462 seg.ds_addr = 463 (bus_addr_t)csio->data_ptr; 464 seg.ds_len = csio->dxfer_len; 465 adwexecuteacb(acb, &seg, 1, 0); 466 } 467 } else { 468 struct bus_dma_segment *segs; 469 470 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 471 panic("adw_action - Physical " 472 "segment pointers " 473 "unsupported"); 474 475 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 476 panic("adw_action - Virtual " 477 "segment addresses " 478 "unsupported"); 479 480 /* Just use the segments provided */ 481 segs = (struct bus_dma_segment *)csio->data_ptr; 482 adwexecuteacb(acb, segs, csio->sglist_cnt, 483 (csio->sglist_cnt < ADW_SGSIZE) 484 ? 0 : EFBIG); 485 } 486 } else { 487 adwexecuteacb(acb, NULL, 0, 0); 488 } 489 break; 490 } 491 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 492 { 493 adw_idle_cmd_status_t status; 494 495 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 496 ccb->ccb_h.target_id); 497 if (status == ADW_IDLE_CMD_SUCCESS) { 498 ccb->ccb_h.status = CAM_REQ_CMP; 499 if (bootverbose) { 500 xpt_print_path(ccb->ccb_h.path); 501 kprintf("BDR Delivered\n"); 502 } 503 } else 504 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 505 xpt_done(ccb); 506 break; 507 } 508 case XPT_ABORT: /* Abort the specified CCB */ 509 /* XXX Implement */ 510 ccb->ccb_h.status = CAM_REQ_INVALID; 511 xpt_done(ccb); 512 break; 513 case XPT_SET_TRAN_SETTINGS: 514 { 515 #ifdef CAM_NEW_TRAN_CODE 516 struct ccb_trans_settings_scsi *scsi; 517 struct ccb_trans_settings_spi *spi; 518 #endif 519 struct ccb_trans_settings *cts; 520 u_int target_mask; 521 522 cts = &ccb->cts; 523 target_mask = 0x01 << ccb->ccb_h.target_id; 524 525 crit_enter(); 526 #ifdef CAM_NEW_TRAN_CODE 527 scsi = &cts->proto_specific.scsi; 528 spi = &cts->xport_specific.spi; 529 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 530 u_int sdtrdone; 531 532 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 533 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 534 u_int discenb; 535 536 discenb = 537 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 538 539 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 540 discenb |= target_mask; 541 else 542 discenb &= ~target_mask; 543 544 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 545 discenb); 546 } 547 548 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 549 550 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 551 adw->tagenb |= target_mask; 552 else 553 adw->tagenb &= ~target_mask; 554 } 555 556 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 557 u_int wdtrenb_orig; 558 u_int wdtrenb; 559 u_int wdtrdone; 560 561 wdtrenb_orig = 562 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 563 wdtrenb = wdtrenb_orig; 564 wdtrdone = adw_lram_read_16(adw, 565 ADW_MC_WDTR_DONE); 566 switch (spi->bus_width) { 567 case MSG_EXT_WDTR_BUS_32_BIT: 568 case MSG_EXT_WDTR_BUS_16_BIT: 569 wdtrenb |= target_mask; 570 break; 571 case MSG_EXT_WDTR_BUS_8_BIT: 572 default: 573 wdtrenb &= ~target_mask; 574 break; 575 } 576 if (wdtrenb != wdtrenb_orig) { 577 adw_lram_write_16(adw, 578 ADW_MC_WDTR_ABLE, 579 wdtrenb); 580 wdtrdone &= ~target_mask; 581 adw_lram_write_16(adw, 582 ADW_MC_WDTR_DONE, 583 wdtrdone); 584 /* Wide negotiation forces async */ 585 sdtrdone &= ~target_mask; 586 adw_lram_write_16(adw, 587 ADW_MC_SDTR_DONE, 588 sdtrdone); 589 } 590 } 591 592 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 593 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 594 u_int sdtr_orig; 595 u_int sdtr; 596 u_int sdtrable_orig; 597 u_int sdtrable; 598 599 sdtr = adw_get_chip_sdtr(adw, 600 ccb->ccb_h.target_id); 601 sdtr_orig = sdtr; 602 sdtrable = adw_lram_read_16(adw, 603 ADW_MC_SDTR_ABLE); 604 sdtrable_orig = sdtrable; 605 606 if ((spi->valid 607 & CTS_SPI_VALID_SYNC_RATE) != 0) { 608 609 sdtr = 610 adw_find_sdtr(adw, 611 spi->sync_period); 612 } 613 614 if ((spi->valid 615 & CTS_SPI_VALID_SYNC_OFFSET) != 0) { 616 if (spi->sync_offset == 0) 617 sdtr = ADW_MC_SDTR_ASYNC; 618 } 619 620 if (sdtr == ADW_MC_SDTR_ASYNC) 621 sdtrable &= ~target_mask; 622 else 623 sdtrable |= target_mask; 624 if (sdtr != sdtr_orig 625 || sdtrable != sdtrable_orig) { 626 adw_set_chip_sdtr(adw, 627 ccb->ccb_h.target_id, 628 sdtr); 629 sdtrdone &= ~target_mask; 630 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 631 sdtrable); 632 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 633 sdtrdone); 634 635 } 636 } 637 } 638 #else 639 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 640 u_int sdtrdone; 641 642 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 643 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 644 u_int discenb; 645 646 discenb = 647 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 648 649 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 650 discenb |= target_mask; 651 else 652 discenb &= ~target_mask; 653 654 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 655 discenb); 656 } 657 658 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 659 660 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 661 adw->tagenb |= target_mask; 662 else 663 adw->tagenb &= ~target_mask; 664 } 665 666 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 667 u_int wdtrenb_orig; 668 u_int wdtrenb; 669 u_int wdtrdone; 670 671 wdtrenb_orig = 672 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 673 wdtrenb = wdtrenb_orig; 674 wdtrdone = adw_lram_read_16(adw, 675 ADW_MC_WDTR_DONE); 676 switch (cts->bus_width) { 677 case MSG_EXT_WDTR_BUS_32_BIT: 678 case MSG_EXT_WDTR_BUS_16_BIT: 679 wdtrenb |= target_mask; 680 break; 681 case MSG_EXT_WDTR_BUS_8_BIT: 682 default: 683 wdtrenb &= ~target_mask; 684 break; 685 } 686 if (wdtrenb != wdtrenb_orig) { 687 adw_lram_write_16(adw, 688 ADW_MC_WDTR_ABLE, 689 wdtrenb); 690 wdtrdone &= ~target_mask; 691 adw_lram_write_16(adw, 692 ADW_MC_WDTR_DONE, 693 wdtrdone); 694 /* Wide negotiation forces async */ 695 sdtrdone &= ~target_mask; 696 adw_lram_write_16(adw, 697 ADW_MC_SDTR_DONE, 698 sdtrdone); 699 } 700 } 701 702 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 703 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 704 u_int sdtr_orig; 705 u_int sdtr; 706 u_int sdtrable_orig; 707 u_int sdtrable; 708 709 sdtr = adw_get_chip_sdtr(adw, 710 ccb->ccb_h.target_id); 711 sdtr_orig = sdtr; 712 sdtrable = adw_lram_read_16(adw, 713 ADW_MC_SDTR_ABLE); 714 sdtrable_orig = sdtrable; 715 716 if ((cts->valid 717 & CCB_TRANS_SYNC_RATE_VALID) != 0) { 718 719 sdtr = 720 adw_find_sdtr(adw, 721 cts->sync_period); 722 } 723 724 if ((cts->valid 725 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) { 726 if (cts->sync_offset == 0) 727 sdtr = ADW_MC_SDTR_ASYNC; 728 } 729 730 if (sdtr == ADW_MC_SDTR_ASYNC) 731 sdtrable &= ~target_mask; 732 else 733 sdtrable |= target_mask; 734 if (sdtr != sdtr_orig 735 || sdtrable != sdtrable_orig) { 736 adw_set_chip_sdtr(adw, 737 ccb->ccb_h.target_id, 738 sdtr); 739 sdtrdone &= ~target_mask; 740 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 741 sdtrable); 742 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 743 sdtrdone); 744 745 } 746 } 747 } 748 #endif 749 crit_exit(); 750 ccb->ccb_h.status = CAM_REQ_CMP; 751 xpt_done(ccb); 752 break; 753 } 754 case XPT_GET_TRAN_SETTINGS: 755 /* Get default/user set transfer settings for the target */ 756 { 757 #ifdef CAM_NEW_TRAN_CODE 758 struct ccb_trans_settings_scsi *scsi; 759 struct ccb_trans_settings_spi *spi; 760 #endif 761 struct ccb_trans_settings *cts; 762 u_int target_mask; 763 764 cts = &ccb->cts; 765 target_mask = 0x01 << ccb->ccb_h.target_id; 766 #ifdef CAM_NEW_TRAN_CODE 767 cts->protocol = PROTO_SCSI; 768 cts->protocol_version = SCSI_REV_2; 769 cts->transport = XPORT_SPI; 770 cts->transport_version = 2; 771 772 scsi = &cts->proto_specific.scsi; 773 spi = &cts->xport_specific.spi; 774 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 775 u_int mc_sdtr; 776 777 spi->flags = 0; 778 if ((adw->user_discenb & target_mask) != 0) 779 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 780 781 if ((adw->user_tagenb & target_mask) != 0) 782 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 783 784 if ((adw->user_wdtr & target_mask) != 0) 785 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 786 else 787 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 788 789 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 790 spi->sync_period = adw_find_period(adw, mc_sdtr); 791 if (spi->sync_period != 0) 792 spi->sync_offset = 15; /* XXX ??? */ 793 else 794 spi->sync_offset = 0; 795 796 797 } else { 798 u_int targ_tinfo; 799 800 spi->flags = 0; 801 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 802 & target_mask) != 0) 803 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 804 805 if ((adw->tagenb & target_mask) != 0) 806 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 807 808 targ_tinfo = 809 adw_lram_read_16(adw, 810 ADW_MC_DEVICE_HSHK_CFG_TABLE 811 + (2 * ccb->ccb_h.target_id)); 812 813 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 814 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 815 else 816 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 817 818 spi->sync_period = 819 adw_hshk_cfg_period_factor(targ_tinfo); 820 821 spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 822 if (spi->sync_period == 0) 823 spi->sync_offset = 0; 824 825 if (spi->sync_offset == 0) 826 spi->sync_period = 0; 827 } 828 829 spi->valid = CTS_SPI_VALID_SYNC_RATE 830 | CTS_SPI_VALID_SYNC_OFFSET 831 | CTS_SPI_VALID_BUS_WIDTH 832 | CTS_SPI_VALID_DISC; 833 scsi->valid = CTS_SCSI_VALID_TQ; 834 #else 835 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 836 u_int mc_sdtr; 837 838 cts->flags = 0; 839 if ((adw->user_discenb & target_mask) != 0) 840 cts->flags |= CCB_TRANS_DISC_ENB; 841 842 if ((adw->user_tagenb & target_mask) != 0) 843 cts->flags |= CCB_TRANS_TAG_ENB; 844 845 if ((adw->user_wdtr & target_mask) != 0) 846 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 847 else 848 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 849 850 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 851 cts->sync_period = adw_find_period(adw, mc_sdtr); 852 if (cts->sync_period != 0) 853 cts->sync_offset = 15; /* XXX ??? */ 854 else 855 cts->sync_offset = 0; 856 857 cts->valid = CCB_TRANS_SYNC_RATE_VALID 858 | CCB_TRANS_SYNC_OFFSET_VALID 859 | CCB_TRANS_BUS_WIDTH_VALID 860 | CCB_TRANS_DISC_VALID 861 | CCB_TRANS_TQ_VALID; 862 ccb->ccb_h.status = CAM_REQ_CMP; 863 } else { 864 u_int targ_tinfo; 865 866 cts->flags = 0; 867 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 868 & target_mask) != 0) 869 cts->flags |= CCB_TRANS_DISC_ENB; 870 871 if ((adw->tagenb & target_mask) != 0) 872 cts->flags |= CCB_TRANS_TAG_ENB; 873 874 targ_tinfo = 875 adw_lram_read_16(adw, 876 ADW_MC_DEVICE_HSHK_CFG_TABLE 877 + (2 * ccb->ccb_h.target_id)); 878 879 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 880 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 881 else 882 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 883 884 cts->sync_period = 885 adw_hshk_cfg_period_factor(targ_tinfo); 886 887 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 888 if (cts->sync_period == 0) 889 cts->sync_offset = 0; 890 891 if (cts->sync_offset == 0) 892 cts->sync_period = 0; 893 } 894 cts->valid = CCB_TRANS_SYNC_RATE_VALID 895 | CCB_TRANS_SYNC_OFFSET_VALID 896 | CCB_TRANS_BUS_WIDTH_VALID 897 | CCB_TRANS_DISC_VALID 898 | CCB_TRANS_TQ_VALID; 899 #endif 900 ccb->ccb_h.status = CAM_REQ_CMP; 901 xpt_done(ccb); 902 break; 903 } 904 case XPT_CALC_GEOMETRY: 905 { 906 struct ccb_calc_geometry *ccg; 907 u_int32_t size_mb; 908 u_int32_t secs_per_cylinder; 909 int extended; 910 911 /* 912 * XXX Use Adaptec translation until I find out how to 913 * get this information from the card. 914 */ 915 ccg = &ccb->ccg; 916 size_mb = ccg->volume_size 917 / ((1024L * 1024L) / ccg->block_size); 918 extended = 1; 919 920 if (size_mb > 1024 && extended) { 921 ccg->heads = 255; 922 ccg->secs_per_track = 63; 923 } else { 924 ccg->heads = 64; 925 ccg->secs_per_track = 32; 926 } 927 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 928 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 929 ccb->ccb_h.status = CAM_REQ_CMP; 930 xpt_done(ccb); 931 break; 932 } 933 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 934 { 935 int failure; 936 937 failure = adw_reset_bus(adw); 938 if (failure != 0) { 939 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 940 } else { 941 if (bootverbose) { 942 xpt_print_path(adw->path); 943 kprintf("Bus Reset Delivered\n"); 944 } 945 ccb->ccb_h.status = CAM_REQ_CMP; 946 } 947 xpt_done(ccb); 948 break; 949 } 950 case XPT_TERM_IO: /* Terminate the I/O process */ 951 /* XXX Implement */ 952 ccb->ccb_h.status = CAM_REQ_INVALID; 953 xpt_done(ccb); 954 break; 955 case XPT_PATH_INQ: /* Path routing inquiry */ 956 { 957 struct ccb_pathinq *cpi = &ccb->cpi; 958 959 cpi->version_num = 1; 960 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 961 cpi->target_sprt = 0; 962 cpi->hba_misc = 0; 963 cpi->hba_eng_cnt = 0; 964 cpi->max_target = ADW_MAX_TID; 965 cpi->max_lun = ADW_MAX_LUN; 966 cpi->initiator_id = adw->initiator_id; 967 cpi->bus_id = cam_sim_bus(sim); 968 cpi->base_transfer_speed = 3300; 969 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 970 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 971 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 972 cpi->unit_number = cam_sim_unit(sim); 973 #ifdef CAM_NEW_TRAN_CODE 974 cpi->transport = XPORT_SPI; 975 cpi->transport_version = 2; 976 cpi->protocol = PROTO_SCSI; 977 cpi->protocol_version = SCSI_REV_2; 978 #endif 979 cpi->ccb_h.status = CAM_REQ_CMP; 980 xpt_done(ccb); 981 break; 982 } 983 default: 984 ccb->ccb_h.status = CAM_REQ_INVALID; 985 xpt_done(ccb); 986 break; 987 } 988 } 989 990 static void 991 adw_poll(struct cam_sim *sim) 992 { 993 adw_intr(cam_sim_softc(sim)); 994 } 995 996 static void 997 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 998 { 999 } 1000 1001 struct adw_softc * 1002 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 1003 { 1004 struct adw_softc *adw; 1005 int i; 1006 1007 /* 1008 * Allocate a storage area for us 1009 */ 1010 adw = kmalloc(sizeof(struct adw_softc), M_DEVBUF, M_INTWAIT | M_ZERO); 1011 LIST_INIT(&adw->pending_ccbs); 1012 SLIST_INIT(&adw->sg_maps); 1013 adw->device = dev; 1014 adw->unit = device_get_unit(dev); 1015 adw->regs_res_type = regs_type; 1016 adw->regs_res_id = regs_id; 1017 adw->regs = regs; 1018 adw->tag = rman_get_bustag(regs); 1019 adw->bsh = rman_get_bushandle(regs); 1020 KKASSERT(adw->unit >= 0 && adw->unit < 100); 1021 i = adw->unit / 10; 1022 adw->name = kmalloc(sizeof("adw") + i + 1, M_DEVBUF, M_INTWAIT); 1023 ksprintf(adw->name, "adw%d", adw->unit); 1024 return(adw); 1025 } 1026 1027 void 1028 adw_free(struct adw_softc *adw) 1029 { 1030 switch (adw->init_level) { 1031 case 9: 1032 { 1033 struct sg_map_node *sg_map; 1034 1035 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 1036 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 1037 bus_dmamap_unload(adw->sg_dmat, 1038 sg_map->sg_dmamap); 1039 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 1040 sg_map->sg_dmamap); 1041 kfree(sg_map, M_DEVBUF); 1042 } 1043 bus_dma_tag_destroy(adw->sg_dmat); 1044 } 1045 case 8: 1046 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 1047 case 7: 1048 bus_dmamem_free(adw->acb_dmat, adw->acbs, 1049 adw->acb_dmamap); 1050 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 1051 case 6: 1052 bus_dma_tag_destroy(adw->acb_dmat); 1053 case 5: 1054 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 1055 case 4: 1056 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 1057 adw->carrier_dmamap); 1058 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 1059 case 3: 1060 bus_dma_tag_destroy(adw->carrier_dmat); 1061 case 2: 1062 bus_dma_tag_destroy(adw->buffer_dmat); 1063 case 1: 1064 bus_dma_tag_destroy(adw->parent_dmat); 1065 case 0: 1066 break; 1067 } 1068 kfree(adw->name, M_DEVBUF); 1069 kfree(adw, M_DEVBUF); 1070 } 1071 1072 int 1073 adw_init(struct adw_softc *adw) 1074 { 1075 struct adw_eeprom eep_config; 1076 u_int tid; 1077 u_int i; 1078 u_int16_t checksum; 1079 u_int16_t scsicfg1; 1080 1081 checksum = adw_eeprom_read(adw, &eep_config); 1082 bcopy(eep_config.serial_number, adw->serial_number, 1083 sizeof(adw->serial_number)); 1084 if (checksum != eep_config.checksum) { 1085 u_int16_t serial_number[3]; 1086 1087 adw->flags |= ADW_EEPROM_FAILED; 1088 kprintf("%s: EEPROM checksum failed. Restoring Defaults\n", 1089 adw_name(adw)); 1090 1091 /* 1092 * Restore the default EEPROM settings. 1093 * Assume the 6 byte board serial number that was read 1094 * from EEPROM is correct even if the EEPROM checksum 1095 * failed. 1096 */ 1097 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 1098 bcopy(adw->serial_number, eep_config.serial_number, 1099 sizeof(serial_number)); 1100 adw_eeprom_write(adw, &eep_config); 1101 } 1102 1103 /* Pull eeprom information into our softc. */ 1104 adw->bios_ctrl = eep_config.bios_ctrl; 1105 adw->user_wdtr = eep_config.wdtr_able; 1106 for (tid = 0; tid < ADW_MAX_TID; tid++) { 1107 u_int mc_sdtr; 1108 u_int16_t tid_mask; 1109 1110 tid_mask = 0x1 << tid; 1111 if ((adw->features & ADW_ULTRA) != 0) { 1112 /* 1113 * Ultra chips store sdtr and ultraenb 1114 * bits in their seeprom, so we must 1115 * construct valid mc_sdtr entries for 1116 * indirectly. 1117 */ 1118 if (eep_config.sync1.sync_enable & tid_mask) { 1119 if (eep_config.sync2.ultra_enable & tid_mask) 1120 mc_sdtr = ADW_MC_SDTR_20; 1121 else 1122 mc_sdtr = ADW_MC_SDTR_10; 1123 } else 1124 mc_sdtr = ADW_MC_SDTR_ASYNC; 1125 } else { 1126 switch (ADW_TARGET_GROUP(tid)) { 1127 case 3: 1128 mc_sdtr = eep_config.sync4.sdtr4; 1129 break; 1130 case 2: 1131 mc_sdtr = eep_config.sync3.sdtr3; 1132 break; 1133 case 1: 1134 mc_sdtr = eep_config.sync2.sdtr2; 1135 break; 1136 default: /* Shut up compiler */ 1137 case 0: 1138 mc_sdtr = eep_config.sync1.sdtr1; 1139 break; 1140 } 1141 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 1142 mc_sdtr &= 0xFF; 1143 } 1144 adw_set_user_sdtr(adw, tid, mc_sdtr); 1145 } 1146 adw->user_tagenb = eep_config.tagqng_able; 1147 adw->user_discenb = eep_config.disc_enable; 1148 adw->max_acbs = eep_config.max_host_qng; 1149 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 1150 1151 /* 1152 * Sanity check the number of host openings. 1153 */ 1154 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 1155 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 1156 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 1157 /* If the value is zero, assume it is uninitialized. */ 1158 if (adw->max_acbs == 0) 1159 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 1160 else 1161 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 1162 } 1163 1164 scsicfg1 = 0; 1165 if ((adw->features & ADW_ULTRA2) != 0) { 1166 switch (eep_config.termination_lvd) { 1167 default: 1168 kprintf("%s: Invalid EEPROM LVD Termination Settings.\n", 1169 adw_name(adw)); 1170 kprintf("%s: Reverting to Automatic LVD Termination\n", 1171 adw_name(adw)); 1172 /* FALLTHROUGH */ 1173 case ADW_EEPROM_TERM_AUTO: 1174 break; 1175 case ADW_EEPROM_TERM_BOTH_ON: 1176 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 1177 /* FALLTHROUGH */ 1178 case ADW_EEPROM_TERM_HIGH_ON: 1179 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 1180 /* FALLTHROUGH */ 1181 case ADW_EEPROM_TERM_OFF: 1182 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 1183 break; 1184 } 1185 } 1186 1187 switch (eep_config.termination_se) { 1188 default: 1189 kprintf("%s: Invalid SE EEPROM Termination Settings.\n", 1190 adw_name(adw)); 1191 kprintf("%s: Reverting to Automatic SE Termination\n", 1192 adw_name(adw)); 1193 /* FALLTHROUGH */ 1194 case ADW_EEPROM_TERM_AUTO: 1195 break; 1196 case ADW_EEPROM_TERM_BOTH_ON: 1197 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1198 /* FALLTHROUGH */ 1199 case ADW_EEPROM_TERM_HIGH_ON: 1200 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1201 /* FALLTHROUGH */ 1202 case ADW_EEPROM_TERM_OFF: 1203 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1204 break; 1205 } 1206 kprintf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1207 1208 /* DMA tag for mapping buffers into device visible space. */ 1209 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1210 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1211 /*highaddr*/BUS_SPACE_MAXADDR, 1212 /*filter*/NULL, /*filterarg*/NULL, 1213 /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE, 1214 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1215 /*flags*/BUS_DMA_ALLOCNOW, 1216 &adw->buffer_dmat) != 0) { 1217 return (ENOMEM); 1218 } 1219 1220 adw->init_level++; 1221 1222 /* DMA tag for our ccb carrier structures */ 1223 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10, 1224 /*boundary*/0, 1225 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1226 /*highaddr*/BUS_SPACE_MAXADDR, 1227 /*filter*/NULL, /*filterarg*/NULL, 1228 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1229 * sizeof(struct adw_carrier), 1230 /*nsegments*/1, 1231 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1232 /*flags*/0, &adw->carrier_dmat) != 0) { 1233 return (ENOMEM); 1234 } 1235 1236 adw->init_level++; 1237 1238 /* Allocation for our ccb carrier structures */ 1239 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, 1240 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1241 return (ENOMEM); 1242 } 1243 1244 adw->init_level++; 1245 1246 /* And permanently map them */ 1247 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1248 adw->carriers, 1249 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1250 * sizeof(struct adw_carrier), 1251 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1252 1253 /* Clear them out. */ 1254 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1255 * sizeof(struct adw_carrier)); 1256 1257 /* Setup our free carrier list */ 1258 adw->free_carriers = adw->carriers; 1259 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1260 adw->carriers[i].carr_offset = 1261 carriervtobo(adw, &adw->carriers[i]); 1262 adw->carriers[i].carr_ba = 1263 carriervtob(adw, &adw->carriers[i]); 1264 adw->carriers[i].areq_ba = 0; 1265 adw->carriers[i].next_ba = 1266 carriervtobo(adw, &adw->carriers[i+1]); 1267 } 1268 /* Terminal carrier. Never leaves the freelist */ 1269 adw->carriers[i].carr_offset = 1270 carriervtobo(adw, &adw->carriers[i]); 1271 adw->carriers[i].carr_ba = 1272 carriervtob(adw, &adw->carriers[i]); 1273 adw->carriers[i].areq_ba = 0; 1274 adw->carriers[i].next_ba = ~0; 1275 1276 adw->init_level++; 1277 1278 /* DMA tag for our acb structures */ 1279 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1280 /*lowaddr*/BUS_SPACE_MAXADDR, 1281 /*highaddr*/BUS_SPACE_MAXADDR, 1282 /*filter*/NULL, /*filterarg*/NULL, 1283 adw->max_acbs * sizeof(struct acb), 1284 /*nsegments*/1, 1285 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1286 /*flags*/0, &adw->acb_dmat) != 0) { 1287 return (ENOMEM); 1288 } 1289 1290 adw->init_level++; 1291 1292 /* Allocation for our ccbs */ 1293 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, 1294 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1295 return (ENOMEM); 1296 1297 adw->init_level++; 1298 1299 /* And permanently map them */ 1300 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1301 adw->acbs, 1302 adw->max_acbs * sizeof(struct acb), 1303 adwmapmem, &adw->acb_busbase, /*flags*/0); 1304 1305 /* Clear them out. */ 1306 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1307 1308 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1309 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1310 /*lowaddr*/BUS_SPACE_MAXADDR, 1311 /*highaddr*/BUS_SPACE_MAXADDR, 1312 /*filter*/NULL, /*filterarg*/NULL, 1313 PAGE_SIZE, /*nsegments*/1, 1314 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1315 /*flags*/0, &adw->sg_dmat) != 0) { 1316 return (ENOMEM); 1317 } 1318 1319 adw->init_level++; 1320 1321 /* Allocate our first batch of ccbs */ 1322 if (adwallocacbs(adw) == 0) 1323 return (ENOMEM); 1324 1325 if (adw_init_chip(adw, scsicfg1) != 0) 1326 return (ENXIO); 1327 1328 kprintf("Queue Depth %d\n", adw->max_acbs); 1329 1330 return (0); 1331 } 1332 1333 /* 1334 * Attach all the sub-devices we can find 1335 */ 1336 int 1337 adw_attach(struct adw_softc *adw) 1338 { 1339 struct ccb_setasync csa; 1340 int error; 1341 1342 error = 0; 1343 crit_enter(); 1344 /* Hook up our interrupt handler */ 1345 if ((error = bus_setup_intr(adw->device, adw->irq, 0, 1346 adw_intr, adw, &adw->ih, NULL)) != 0) { 1347 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1348 error); 1349 goto fail; 1350 } 1351 1352 /* Start the Risc processor now that we are fully configured. */ 1353 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1354 1355 /* 1356 * Construct our SIM entry. 1357 */ 1358 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1359 1, adw->max_acbs, NULL); 1360 if (adw->sim == NULL) { 1361 error = ENOMEM; 1362 goto fail; 1363 } 1364 1365 /* 1366 * Register the bus. 1367 */ 1368 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1369 cam_sim_free(adw->sim); 1370 error = ENOMEM; 1371 goto fail; 1372 } 1373 1374 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1375 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1376 == CAM_REQ_CMP) { 1377 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1378 csa.ccb_h.func_code = XPT_SASYNC_CB; 1379 csa.event_enable = AC_LOST_DEVICE; 1380 csa.callback = adw_async; 1381 csa.callback_arg = adw; 1382 xpt_action((union ccb *)&csa); 1383 } 1384 1385 fail: 1386 crit_exit(); 1387 return (error); 1388 } 1389 1390 void 1391 adw_intr(void *arg) 1392 { 1393 struct adw_softc *adw; 1394 u_int int_stat; 1395 1396 adw = (struct adw_softc *)arg; 1397 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1398 return; 1399 1400 /* Reading the register clears the interrupt. */ 1401 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1402 1403 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1404 u_int intrb_code; 1405 1406 /* Async Microcode Event */ 1407 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1408 switch (intrb_code) { 1409 case ADW_ASYNC_CARRIER_READY_FAILURE: 1410 /* 1411 * The RISC missed our update of 1412 * the commandq. 1413 */ 1414 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1415 adw_tickle_risc(adw, ADW_TICKLE_A); 1416 break; 1417 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1418 /* 1419 * The firmware detected a SCSI Bus reset. 1420 */ 1421 kprintf("Someone Reset the Bus\n"); 1422 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1423 break; 1424 case ADW_ASYNC_RDMA_FAILURE: 1425 /* 1426 * Handle RDMA failure by resetting the 1427 * SCSI Bus and chip. 1428 */ 1429 #if XXX 1430 AdvResetChipAndSB(adv_dvc_varp); 1431 #endif 1432 break; 1433 1434 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1435 /* 1436 * Host generated SCSI bus reset occurred. 1437 */ 1438 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1439 break; 1440 default: 1441 kprintf("adw_intr: unknown async code 0x%x\n", 1442 intrb_code); 1443 break; 1444 } 1445 } 1446 1447 /* 1448 * Run down the RequestQ. 1449 */ 1450 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1451 struct adw_carrier *free_carrier; 1452 struct acb *acb; 1453 union ccb *ccb; 1454 1455 #if 0 1456 kprintf("0x%x, 0x%x, 0x%x, 0x%x\n", 1457 adw->responseq->carr_offset, 1458 adw->responseq->carr_ba, 1459 adw->responseq->areq_ba, 1460 adw->responseq->next_ba); 1461 #endif 1462 /* 1463 * The firmware copies the adw_scsi_req_q.acb_baddr 1464 * field into the areq_ba field of the carrier. 1465 */ 1466 acb = acbbotov(adw, adw->responseq->areq_ba); 1467 1468 /* 1469 * The least significant four bits of the next_ba 1470 * field are used as flags. Mask them out and then 1471 * advance through the list. 1472 */ 1473 free_carrier = adw->responseq; 1474 adw->responseq = 1475 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1476 free_carrier->next_ba = adw->free_carriers->carr_offset; 1477 adw->free_carriers = free_carrier; 1478 1479 /* Process CCB */ 1480 ccb = acb->ccb; 1481 callout_stop(&ccb->ccb_h.timeout_ch); 1482 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1483 bus_dmasync_op_t op; 1484 1485 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1486 op = BUS_DMASYNC_POSTREAD; 1487 else 1488 op = BUS_DMASYNC_POSTWRITE; 1489 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1490 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1491 ccb->csio.resid = acb->queue.data_cnt; 1492 } else 1493 ccb->csio.resid = 0; 1494 1495 /* Common Cases inline... */ 1496 if (acb->queue.host_status == QHSTA_NO_ERROR 1497 && (acb->queue.done_status == QD_NO_ERROR 1498 || acb->queue.done_status == QD_WITH_ERROR)) { 1499 ccb->csio.scsi_status = acb->queue.scsi_status; 1500 ccb->ccb_h.status = 0; 1501 switch (ccb->csio.scsi_status) { 1502 case SCSI_STATUS_OK: 1503 ccb->ccb_h.status |= CAM_REQ_CMP; 1504 break; 1505 case SCSI_STATUS_CHECK_COND: 1506 case SCSI_STATUS_CMD_TERMINATED: 1507 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1508 ccb->csio.sense_len); 1509 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1510 ccb->csio.sense_resid = acb->queue.sense_len; 1511 /* FALLTHROUGH */ 1512 default: 1513 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1514 | CAM_DEV_QFRZN; 1515 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1516 break; 1517 } 1518 adwfreeacb(adw, acb); 1519 xpt_done(ccb); 1520 } else { 1521 adwprocesserror(adw, acb); 1522 } 1523 } 1524 } 1525 1526 static void 1527 adwprocesserror(struct adw_softc *adw, struct acb *acb) 1528 { 1529 union ccb *ccb; 1530 1531 ccb = acb->ccb; 1532 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1533 ccb->ccb_h.status = CAM_REQ_ABORTED; 1534 } else { 1535 1536 switch (acb->queue.host_status) { 1537 case QHSTA_M_SEL_TIMEOUT: 1538 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1539 break; 1540 case QHSTA_M_SXFR_OFF_UFLW: 1541 case QHSTA_M_SXFR_OFF_OFLW: 1542 case QHSTA_M_DATA_OVER_RUN: 1543 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1544 break; 1545 case QHSTA_M_SXFR_DESELECTED: 1546 case QHSTA_M_UNEXPECTED_BUS_FREE: 1547 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1548 break; 1549 case QHSTA_M_SCSI_BUS_RESET: 1550 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1551 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1552 break; 1553 case QHSTA_M_BUS_DEVICE_RESET: 1554 ccb->ccb_h.status = CAM_BDR_SENT; 1555 break; 1556 case QHSTA_M_QUEUE_ABORTED: 1557 /* BDR or Bus Reset */ 1558 kprintf("Saw Queue Aborted\n"); 1559 ccb->ccb_h.status = adw->last_reset; 1560 break; 1561 case QHSTA_M_SXFR_SDMA_ERR: 1562 case QHSTA_M_SXFR_SXFR_PERR: 1563 case QHSTA_M_RDMA_PERR: 1564 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1565 break; 1566 case QHSTA_M_WTM_TIMEOUT: 1567 case QHSTA_M_SXFR_WD_TMO: 1568 { 1569 /* The SCSI bus hung in a phase */ 1570 xpt_print_path(adw->path); 1571 kprintf("Watch Dog timer expired. Reseting bus\n"); 1572 adw_reset_bus(adw); 1573 break; 1574 } 1575 case QHSTA_M_SXFR_XFR_PH_ERR: 1576 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1577 break; 1578 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1579 break; 1580 case QHSTA_M_BAD_CMPL_STATUS_IN: 1581 /* No command complete after a status message */ 1582 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1583 break; 1584 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1585 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1586 break; 1587 case QHSTA_M_INVALID_DEVICE: 1588 ccb->ccb_h.status = CAM_PATH_INVALID; 1589 break; 1590 case QHSTA_M_NO_AUTO_REQ_SENSE: 1591 /* 1592 * User didn't request sense, but we got a 1593 * check condition. 1594 */ 1595 ccb->csio.scsi_status = acb->queue.scsi_status; 1596 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1597 break; 1598 default: 1599 panic("%s: Unhandled Host status error %x", 1600 adw_name(adw), acb->queue.host_status); 1601 /* NOTREACHED */ 1602 } 1603 } 1604 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1605 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1606 || ccb->ccb_h.status == CAM_BDR_SENT) 1607 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1608 } 1609 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1610 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1611 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1612 } 1613 adwfreeacb(adw, acb); 1614 xpt_done(ccb); 1615 } 1616 1617 static void 1618 adwtimeout(void *arg) 1619 { 1620 struct acb *acb; 1621 union ccb *ccb; 1622 struct adw_softc *adw; 1623 adw_idle_cmd_status_t status; 1624 int target_id; 1625 1626 acb = (struct acb *)arg; 1627 ccb = acb->ccb; 1628 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1629 xpt_print_path(ccb->ccb_h.path); 1630 kprintf("ACB %p - timed out\n", (void *)acb); 1631 1632 crit_enter(); 1633 1634 if ((acb->state & ACB_ACTIVE) == 0) { 1635 xpt_print_path(ccb->ccb_h.path); 1636 kprintf("ACB %p - timed out CCB already completed\n", 1637 (void *)acb); 1638 crit_exit(); 1639 return; 1640 } 1641 1642 acb->state |= ACB_RECOVERY_ACB; 1643 target_id = ccb->ccb_h.target_id; 1644 1645 /* Attempt a BDR first */ 1646 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1647 ccb->ccb_h.target_id); 1648 crit_exit(); 1649 if (status == ADW_IDLE_CMD_SUCCESS) { 1650 kprintf("%s: BDR Delivered. No longer in timeout\n", 1651 adw_name(adw)); 1652 adw_handle_device_reset(adw, target_id); 1653 } else { 1654 adw_reset_bus(adw); 1655 xpt_print_path(adw->path); 1656 kprintf("Bus Reset Delivered. No longer in timeout\n"); 1657 } 1658 } 1659 1660 static void 1661 adw_handle_device_reset(struct adw_softc *adw, u_int target) 1662 { 1663 struct cam_path *path; 1664 cam_status error; 1665 1666 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1667 target, CAM_LUN_WILDCARD); 1668 1669 if (error == CAM_REQ_CMP) { 1670 xpt_async(AC_SENT_BDR, path, NULL); 1671 xpt_free_path(path); 1672 } 1673 adw->last_reset = CAM_BDR_SENT; 1674 } 1675 1676 static void 1677 adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1678 { 1679 if (initiated) { 1680 /* 1681 * The microcode currently sets the SCSI Bus Reset signal 1682 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1683 * command above. But the SCSI Bus Reset Hold Time in the 1684 * microcode is not deterministic (it may in fact be for less 1685 * than the SCSI Spec. minimum of 25 us). Therefore on return 1686 * the Adv Library sets the SCSI Bus Reset signal for 1687 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1688 * than 25 us. 1689 */ 1690 u_int scsi_ctrl; 1691 1692 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1693 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1694 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1695 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1696 1697 /* 1698 * We will perform the async notification when the 1699 * SCSI Reset interrupt occurs. 1700 */ 1701 } else 1702 xpt_async(AC_BUS_RESET, adw->path, NULL); 1703 adw->last_reset = CAM_SCSI_BUS_RESET; 1704 } 1705