1 /* 2 ********************************************************************* 3 * FILE NAME : amd.c 4 * BY : C.L. Huang (ching@tekram.com.tw) 5 * Erich Chen (erich@tekram.com.tw) 6 * Description: Device Driver for the amd53c974 PCI Bus Master 7 * SCSI Host adapter found on cards such as 8 * the Tekram DC-390(T). 9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 ********************************************************************* 33 * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $ 34 */ 35 36 /* 37 ********************************************************************* 38 * HISTORY: 39 * 40 * REV# DATE NAME DESCRIPTION 41 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0 42 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5 43 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning 44 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA 45 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5 46 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM) 47 ********************************************************************* 48 */ 49 50 /* #define AMD_DEBUG0 */ 51 /* #define AMD_DEBUG_SCSI_PHASE */ 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/malloc.h> 57 #include <sys/queue.h> 58 #include <sys/buf.h> 59 #include <sys/bus.h> 60 #include <sys/rman.h> 61 #include <sys/thread2.h> 62 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 66 #include <machine/clock.h> 67 68 #include <bus/cam/cam.h> 69 #include <bus/cam/cam_ccb.h> 70 #include <bus/cam/cam_sim.h> 71 #include <bus/cam/cam_xpt_sim.h> 72 #include <bus/cam/cam_debug.h> 73 74 #include <bus/cam/scsi/scsi_all.h> 75 #include <bus/cam/scsi/scsi_message.h> 76 77 #include <bus/pci/pcivar.h> 78 #include <bus/pci/pcireg.h> 79 #include "amd.h" 80 81 #define PCI_DEVICE_ID_AMD53C974 0x20201022ul 82 #define PCI_BASE_ADDR0 0x10 83 84 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int); 85 typedef phase_handler_t *phase_handler_func_t; 86 87 static void amd_intr(void *vamd); 88 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB); 89 static phase_handler_t amd_NopPhase; 90 91 static phase_handler_t amd_DataOutPhase0; 92 static phase_handler_t amd_DataInPhase0; 93 #define amd_CommandPhase0 amd_NopPhase 94 static phase_handler_t amd_StatusPhase0; 95 static phase_handler_t amd_MsgOutPhase0; 96 static phase_handler_t amd_MsgInPhase0; 97 static phase_handler_t amd_DataOutPhase1; 98 static phase_handler_t amd_DataInPhase1; 99 static phase_handler_t amd_CommandPhase1; 100 static phase_handler_t amd_StatusPhase1; 101 static phase_handler_t amd_MsgOutPhase1; 102 static phase_handler_t amd_MsgInPhase1; 103 104 static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb); 105 static int amdparsemsg(struct amd_softc *amd); 106 static int amdhandlemsgreject(struct amd_softc *amd); 107 static void amdconstructsdtr(struct amd_softc *amd, 108 u_int period, u_int offset); 109 static u_int amdfindclockrate(struct amd_softc *amd, u_int *period); 110 static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full); 111 112 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir); 113 static void amd_Disconnect(struct amd_softc *amd); 114 static void amd_Reselect(struct amd_softc *amd); 115 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB); 116 static void amd_ScsiRstDetect(struct amd_softc *amd); 117 static void amd_ResetSCSIBus(struct amd_softc *amd); 118 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB); 119 static void amd_InvalidCmd(struct amd_softc *amd); 120 121 #if 0 122 static void amd_timeout(void *arg1); 123 static void amd_reset(struct amd_softc *amd); 124 #endif 125 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt); 126 127 void amd_linkSRB(struct amd_softc *amd); 128 static int amd_init(device_t); 129 static void amd_load_defaults(struct amd_softc *amd); 130 static void amd_load_eeprom_or_defaults(struct amd_softc *amd); 131 static int amd_EEpromInDO(struct amd_softc *amd); 132 static u_int16_t EEpromGetData1(struct amd_softc *amd); 133 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval); 134 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry); 135 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd); 136 static void amd_ReadEEprom(struct amd_softc *amd); 137 138 static int amd_probe(device_t); 139 static int amd_attach(device_t); 140 static void amdcompletematch(struct amd_softc *amd, target_id_t target, 141 lun_id_t lun, u_int tag, struct srb_queue *queue, 142 cam_status status); 143 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate, 144 u_int period, u_int offset, u_int type); 145 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb); 146 147 static __inline void amd_clear_msg_state(struct amd_softc *amd); 148 149 static __inline void 150 amd_clear_msg_state(struct amd_softc *amd) 151 { 152 amd->msgout_len = 0; 153 amd->msgout_index = 0; 154 amd->msgin_index = 0; 155 } 156 157 /* CAM SIM entry points */ 158 #define ccb_srb_ptr spriv_ptr0 159 #define ccb_amd_ptr spriv_ptr1 160 static void amd_action(struct cam_sim *sim, union ccb *ccb); 161 static void amd_poll(struct cam_sim *sim); 162 163 /* 164 * State engine function tables indexed by SCSI phase number 165 */ 166 phase_handler_func_t amd_SCSI_phase0[] = { 167 amd_DataOutPhase0, 168 amd_DataInPhase0, 169 amd_CommandPhase0, 170 amd_StatusPhase0, 171 amd_NopPhase, 172 amd_NopPhase, 173 amd_MsgOutPhase0, 174 amd_MsgInPhase0 175 }; 176 177 phase_handler_func_t amd_SCSI_phase1[] = { 178 amd_DataOutPhase1, 179 amd_DataInPhase1, 180 amd_CommandPhase1, 181 amd_StatusPhase1, 182 amd_NopPhase, 183 amd_NopPhase, 184 amd_MsgOutPhase1, 185 amd_MsgInPhase1 186 }; 187 188 /* 189 * EEProm/BIOS negotiation periods 190 */ 191 u_int8_t eeprom_period[] = { 192 25, /* 10.0MHz */ 193 32, /* 8.0MHz */ 194 38, /* 6.6MHz */ 195 44, /* 5.7MHz */ 196 50, /* 5.0MHz */ 197 63, /* 4.0MHz */ 198 83, /* 3.0MHz */ 199 125 /* 2.0MHz */ 200 }; 201 202 /* 203 * chip clock setting to SCSI specified sync parameter table. 204 */ 205 u_int8_t tinfo_sync_period[] = { 206 25, /* 10.0 */ 207 32, /* 8.0 */ 208 38, /* 6.6 */ 209 44, /* 5.7 */ 210 50, /* 5.0 */ 211 57, /* 4.4 */ 212 63, /* 4.0 */ 213 70, /* 3.6 */ 214 76, /* 3.3 */ 215 83 /* 3.0 */ 216 }; 217 218 static __inline struct amd_srb * 219 amdgetsrb(struct amd_softc * amd) 220 { 221 struct amd_srb * pSRB; 222 223 crit_enter(); 224 pSRB = TAILQ_FIRST(&amd->free_srbs); 225 if (pSRB) 226 TAILQ_REMOVE(&amd->free_srbs, pSRB, links); 227 crit_exit(); 228 return (pSRB); 229 } 230 231 static void 232 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb) 233 { 234 struct scsi_request_sense sense_cmd; 235 struct ccb_scsiio *csio; 236 u_int8_t *cdb; 237 u_int cdb_len; 238 239 csio = &srb->pccb->csio; 240 241 if (srb->SRBFlag & AUTO_REQSENSE) { 242 sense_cmd.opcode = REQUEST_SENSE; 243 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5; 244 sense_cmd.unused[0] = 0; 245 sense_cmd.unused[1] = 0; 246 sense_cmd.length = csio->sense_len; 247 sense_cmd.control = 0; 248 cdb = &sense_cmd.opcode; 249 cdb_len = sizeof(sense_cmd); 250 } else { 251 cdb = &srb->CmdBlock[0]; 252 cdb_len = srb->ScsiCmdLen; 253 } 254 amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len); 255 } 256 257 /* 258 * Attempt to start a waiting transaction. Interrupts must be disabled 259 * upon entry to this function. 260 */ 261 static void 262 amdrunwaiting(struct amd_softc *amd) { 263 struct amd_srb *srb; 264 265 if (amd->last_phase != SCSI_BUS_FREE) 266 return; 267 268 srb = TAILQ_FIRST(&amd->waiting_srbs); 269 if (srb == NULL) 270 return; 271 272 if (amdstart(amd, srb) == 0) { 273 TAILQ_REMOVE(&amd->waiting_srbs, srb, links); 274 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links); 275 } 276 } 277 278 static void 279 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 280 { 281 struct amd_srb *srb; 282 union ccb *ccb; 283 struct amd_softc *amd; 284 285 srb = (struct amd_srb *)arg; 286 ccb = srb->pccb; 287 amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr; 288 289 if (error != 0) { 290 if (error != EFBIG) 291 kprintf("amd%d: Unexpected error 0x%x returned from " 292 "bus_dmamap_load\n", amd->unit, error); 293 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 294 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 295 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 296 } 297 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links); 298 xpt_done(ccb); 299 return; 300 } 301 302 if (nseg != 0) { 303 struct amd_sg *sg; 304 bus_dma_segment_t *end_seg; 305 bus_dmasync_op_t op; 306 307 end_seg = dm_segs + nseg; 308 309 /* Copy the segments into our SG list */ 310 srb->pSGlist = &srb->SGsegment[0]; 311 sg = srb->pSGlist; 312 while (dm_segs < end_seg) { 313 sg->SGXLen = dm_segs->ds_len; 314 sg->SGXPtr = dm_segs->ds_addr; 315 sg++; 316 dm_segs++; 317 } 318 319 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 320 op = BUS_DMASYNC_PREREAD; 321 else 322 op = BUS_DMASYNC_PREWRITE; 323 324 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op); 325 326 } 327 srb->SGcount = nseg; 328 srb->SGIndex = 0; 329 srb->AdaptStatus = 0; 330 srb->TargetStatus = 0; 331 srb->MsgCnt = 0; 332 srb->SRBStatus = 0; 333 srb->SRBFlag = 0; 334 srb->SRBState = 0; 335 srb->TotalXferredLen = 0; 336 srb->SGPhysAddr = 0; 337 srb->SGToBeXferLen = 0; 338 srb->EndMessage = 0; 339 340 crit_enter(); 341 342 /* 343 * Last time we need to check if this CCB needs to 344 * be aborted. 345 */ 346 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 347 if (nseg != 0) 348 bus_dmamap_unload(amd->buffer_dmat, srb->dmamap); 349 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links); 350 xpt_done(ccb); 351 crit_exit(); 352 return; 353 } 354 ccb->ccb_h.status |= CAM_SIM_QUEUED; 355 #if 0 356 /* XXX Need a timeout handler */ 357 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 358 amdtimeout, srb); 359 #endif 360 TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links); 361 amdrunwaiting(amd); 362 crit_exit(); 363 } 364 365 static void 366 amd_action(struct cam_sim * psim, union ccb * pccb) 367 { 368 struct amd_softc * amd; 369 u_int target_id; 370 371 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n")); 372 373 amd = (struct amd_softc *) cam_sim_softc(psim); 374 target_id = pccb->ccb_h.target_id; 375 376 switch (pccb->ccb_h.func_code) { 377 case XPT_SCSI_IO: 378 { 379 struct amd_srb * pSRB; 380 struct ccb_scsiio *pcsio; 381 382 pcsio = &pccb->csio; 383 384 /* 385 * Assign an SRB and connect it with this ccb. 386 */ 387 pSRB = amdgetsrb(amd); 388 389 if (!pSRB) { 390 /* Freeze SIMQ */ 391 pccb->ccb_h.status = CAM_RESRC_UNAVAIL; 392 xpt_done(pccb); 393 return; 394 } 395 pSRB->pccb = pccb; 396 pccb->ccb_h.ccb_srb_ptr = pSRB; 397 pccb->ccb_h.ccb_amd_ptr = amd; 398 pSRB->ScsiCmdLen = pcsio->cdb_len; 399 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len); 400 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 401 if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 402 /* 403 * We've been given a pointer 404 * to a single buffer. 405 */ 406 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 407 int error; 408 409 crit_enter(); 410 error = 411 bus_dmamap_load(amd->buffer_dmat, 412 pSRB->dmamap, 413 pcsio->data_ptr, 414 pcsio->dxfer_len, 415 amdexecutesrb, 416 pSRB, /*flags*/0); 417 if (error == EINPROGRESS) { 418 /* 419 * So as to maintain 420 * ordering, freeze the 421 * controller queue 422 * until our mapping is 423 * returned. 424 */ 425 xpt_freeze_simq(amd->psim, 1); 426 pccb->ccb_h.status |= 427 CAM_RELEASE_SIMQ; 428 } 429 crit_exit(); 430 } else { 431 struct bus_dma_segment seg; 432 433 /* Pointer to physical buffer */ 434 seg.ds_addr = 435 (bus_addr_t)pcsio->data_ptr; 436 seg.ds_len = pcsio->dxfer_len; 437 amdexecutesrb(pSRB, &seg, 1, 0); 438 } 439 } else { 440 struct bus_dma_segment *segs; 441 442 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 443 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { 444 TAILQ_INSERT_HEAD(&amd->free_srbs, 445 pSRB, links); 446 pccb->ccb_h.status = CAM_PROVIDE_FAIL; 447 xpt_done(pccb); 448 return; 449 } 450 451 /* Just use the segments provided */ 452 segs = 453 (struct bus_dma_segment *)pcsio->data_ptr; 454 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0); 455 } 456 } else 457 amdexecutesrb(pSRB, NULL, 0, 0); 458 break; 459 } 460 case XPT_PATH_INQ: 461 { 462 struct ccb_pathinq *cpi = &pccb->cpi; 463 464 cpi->version_num = 1; 465 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; 466 cpi->target_sprt = 0; 467 cpi->hba_misc = 0; 468 cpi->hba_eng_cnt = 0; 469 cpi->max_target = 7; 470 cpi->max_lun = amd->max_lun; /* 7 or 0 */ 471 cpi->initiator_id = amd->AdaptSCSIID; 472 cpi->bus_id = cam_sim_bus(psim); 473 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 474 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN); 475 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); 476 cpi->unit_number = cam_sim_unit(psim); 477 cpi->transport = XPORT_SPI; 478 cpi->transport_version = 2; 479 cpi->protocol = PROTO_SCSI; 480 cpi->protocol_version = SCSI_REV_2; 481 cpi->ccb_h.status = CAM_REQ_CMP; 482 xpt_done(pccb); 483 break; 484 } 485 case XPT_ABORT: 486 pccb->ccb_h.status = CAM_REQ_INVALID; 487 xpt_done(pccb); 488 break; 489 case XPT_RESET_BUS: 490 { 491 492 int i; 493 494 amd_ResetSCSIBus(amd); 495 amd->ACBFlag = 0; 496 497 for (i = 0; i < 500; i++) { 498 DELAY(1000); /* Wait until our interrupt 499 * handler sees it */ 500 } 501 502 pccb->ccb_h.status = CAM_REQ_CMP; 503 xpt_done(pccb); 504 break; 505 } 506 case XPT_RESET_DEV: 507 pccb->ccb_h.status = CAM_REQ_INVALID; 508 xpt_done(pccb); 509 break; 510 case XPT_TERM_IO: 511 pccb->ccb_h.status = CAM_REQ_INVALID; 512 xpt_done(pccb); 513 break; 514 case XPT_GET_TRAN_SETTINGS: 515 { 516 struct ccb_trans_settings *cts = &pccb->cts; 517 struct amd_target_info *targ_info = &amd->tinfo[target_id]; 518 struct amd_transinfo *tinfo; 519 struct ccb_trans_settings_scsi *scsi = 520 &cts->proto_specific.scsi; 521 struct ccb_trans_settings_spi *spi = 522 &cts->xport_specific.spi; 523 524 cts->protocol = PROTO_SCSI; 525 cts->protocol_version = SCSI_REV_2; 526 cts->transport = XPORT_SPI; 527 cts->transport_version = 2; 528 529 crit_enter(); 530 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 531 /* current transfer settings */ 532 if (targ_info->disc_tag & AMD_CUR_DISCENB) { 533 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 534 } else { 535 spi->flags = 0; 536 } 537 if (targ_info->disc_tag & AMD_CUR_TAGENB) { 538 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 539 } else { 540 scsi->flags = 0; 541 } 542 tinfo = &targ_info->current; 543 } else { 544 /* default(user) transfer settings */ 545 if (targ_info->disc_tag & AMD_USR_DISCENB) { 546 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 547 } else { 548 spi->flags = 0; 549 } 550 if (targ_info->disc_tag & AMD_USR_TAGENB) { 551 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 552 } else { 553 scsi->flags = 0; 554 } 555 tinfo = &targ_info->user; 556 } 557 spi->sync_period = tinfo->period; 558 spi->sync_offset = tinfo->offset; 559 crit_exit(); 560 561 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 562 spi->valid = CTS_SPI_VALID_SYNC_RATE 563 | CTS_SPI_VALID_SYNC_OFFSET 564 | CTS_SPI_VALID_BUS_WIDTH 565 | CTS_SPI_VALID_DISC; 566 scsi->valid = CTS_SCSI_VALID_TQ; 567 pccb->ccb_h.status = CAM_REQ_CMP; 568 xpt_done(pccb); 569 break; 570 } 571 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 572 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) 573 case XPT_SET_TRAN_SETTINGS: 574 { 575 struct ccb_trans_settings *cts = &pccb->cts; 576 struct amd_target_info *targ_info; 577 u_int update_type = 0; 578 int last_entry; 579 580 struct ccb_trans_settings_scsi *scsi = 581 &cts->proto_specific.scsi; 582 struct ccb_trans_settings_spi *spi = 583 &cts->xport_specific.spi; 584 if (IS_CURRENT_SETTINGS(cts)) { 585 update_type |= AMD_TRANS_GOAL; 586 } else if (IS_USER_SETTINGS(cts)) { 587 update_type |= AMD_TRANS_USER; 588 } 589 if (update_type == 0 590 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) { 591 cts->ccb_h.status = CAM_REQ_INVALID; 592 xpt_done(pccb); 593 } 594 595 crit_enter(); 596 targ_info = &amd->tinfo[target_id]; 597 598 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 599 if (update_type & AMD_TRANS_GOAL) { 600 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) 601 != 0) { 602 targ_info->disc_tag |= AMD_CUR_DISCENB; 603 } else { 604 targ_info->disc_tag &= ~AMD_CUR_DISCENB; 605 } 606 } 607 if (update_type & AMD_TRANS_USER) { 608 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) 609 != 0) { 610 targ_info->disc_tag |= AMD_USR_DISCENB; 611 } else { 612 targ_info->disc_tag &= ~AMD_USR_DISCENB; 613 } 614 } 615 } 616 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 617 if (update_type & AMD_TRANS_GOAL) { 618 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) 619 != 0) { 620 targ_info->disc_tag |= AMD_CUR_TAGENB; 621 } else { 622 targ_info->disc_tag &= ~AMD_CUR_TAGENB; 623 } 624 } 625 if (update_type & AMD_TRANS_USER) { 626 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) 627 != 0) { 628 targ_info->disc_tag |= AMD_USR_TAGENB; 629 } else { 630 targ_info->disc_tag &= ~AMD_USR_TAGENB; 631 } 632 } 633 } 634 635 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { 636 if (update_type & AMD_TRANS_GOAL) 637 spi->sync_offset = targ_info->goal.offset; 638 else 639 spi->sync_offset = targ_info->user.offset; 640 } 641 642 if (spi->sync_offset > AMD_MAX_SYNC_OFFSET) 643 spi->sync_offset = AMD_MAX_SYNC_OFFSET; 644 645 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { 646 if (update_type & AMD_TRANS_GOAL) 647 spi->sync_period = targ_info->goal.period; 648 else 649 spi->sync_period = targ_info->user.period; 650 } 651 652 last_entry = sizeof(tinfo_sync_period) - 1; 653 if ((spi->sync_period != 0) 654 && (spi->sync_period < tinfo_sync_period[0])) 655 spi->sync_period = tinfo_sync_period[0]; 656 if (spi->sync_period > tinfo_sync_period[last_entry]) 657 spi->sync_period = 0; 658 if (spi->sync_offset == 0) 659 spi->sync_period = 0; 660 661 if ((update_type & AMD_TRANS_USER) != 0) { 662 targ_info->user.period = spi->sync_period; 663 targ_info->user.offset = spi->sync_offset; 664 } 665 if ((update_type & AMD_TRANS_GOAL) != 0) { 666 targ_info->goal.period = spi->sync_period; 667 targ_info->goal.offset = spi->sync_offset; 668 } 669 crit_exit(); 670 pccb->ccb_h.status = CAM_REQ_CMP; 671 xpt_done(pccb); 672 break; 673 } 674 case XPT_CALC_GEOMETRY: 675 { 676 struct ccb_calc_geometry *ccg; 677 u_int32_t size_mb; 678 u_int32_t secs_per_cylinder; 679 int extended; 680 681 ccg = &pccb->ccg; 682 size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size); 683 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0; 684 685 if (size_mb > 1024 && extended) { 686 ccg->heads = 255; 687 ccg->secs_per_track = 63; 688 } else { 689 ccg->heads = 64; 690 ccg->secs_per_track = 32; 691 } 692 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 693 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 694 pccb->ccb_h.status = CAM_REQ_CMP; 695 xpt_done(pccb); 696 break; 697 } 698 default: 699 pccb->ccb_h.status = CAM_REQ_INVALID; 700 xpt_done(pccb); 701 break; 702 } 703 } 704 705 static void 706 amd_poll(struct cam_sim * psim) 707 { 708 amd_intr(cam_sim_softc(psim)); 709 } 710 711 static u_int8_t * 712 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt) 713 { 714 intptr_t dataPtr; 715 struct ccb_scsiio *pcsio; 716 u_int8_t i; 717 struct amd_sg * pseg; 718 719 dataPtr = 0; 720 pcsio = &pSRB->pccb->csio; 721 722 dataPtr = (intptr_t) pcsio->data_ptr; 723 pseg = pSRB->SGsegment; 724 for (i = 0; i < pSRB->SGIndex; i++) { 725 dataPtr += (int) pseg->SGXLen; 726 pseg++; 727 } 728 dataPtr += (int) xferCnt; 729 return ((u_int8_t *) dataPtr); 730 } 731 732 static void 733 ResetDevParam(struct amd_softc * amd) 734 { 735 u_int target; 736 737 for (target = 0; target <= amd->max_id; target++) { 738 if (amd->AdaptSCSIID != target) { 739 amdsetsync(amd, target, /*clockrate*/0, 740 /*period*/0, /*offset*/0, AMD_TRANS_CUR); 741 } 742 } 743 } 744 745 static void 746 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun, 747 u_int tag, struct srb_queue *queue, cam_status status) 748 { 749 struct amd_srb *srb; 750 struct amd_srb *next_srb; 751 752 for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) { 753 union ccb *ccb; 754 755 next_srb = TAILQ_NEXT(srb, links); 756 if (srb->pccb->ccb_h.target_id != target 757 && target != CAM_TARGET_WILDCARD) 758 continue; 759 760 if (srb->pccb->ccb_h.target_lun != lun 761 && lun != CAM_LUN_WILDCARD) 762 continue; 763 764 if (srb->TagNumber != tag 765 && tag != AMD_TAG_WILDCARD) 766 continue; 767 768 ccb = srb->pccb; 769 TAILQ_REMOVE(queue, srb, links); 770 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links); 771 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0 772 && (status & CAM_DEV_QFRZN) != 0) 773 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 774 ccb->ccb_h.status = status; 775 xpt_done(ccb); 776 } 777 778 } 779 780 static void 781 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate, 782 u_int period, u_int offset, u_int type) 783 { 784 struct amd_target_info *tinfo; 785 u_int old_period; 786 u_int old_offset; 787 788 tinfo = &amd->tinfo[target]; 789 old_period = tinfo->current.period; 790 old_offset = tinfo->current.offset; 791 if ((type & AMD_TRANS_CUR) != 0 792 && (old_period != period || old_offset != offset)) { 793 struct cam_path *path; 794 795 tinfo->current.period = period; 796 tinfo->current.offset = offset; 797 tinfo->sync_period_reg = clockrate; 798 tinfo->sync_offset_reg = offset; 799 tinfo->CtrlR3 &= ~FAST_SCSI; 800 tinfo->CtrlR4 &= ~EATER_25NS; 801 if (clockrate > 7) 802 tinfo->CtrlR4 |= EATER_25NS; 803 else 804 tinfo->CtrlR3 |= FAST_SCSI; 805 806 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) { 807 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg); 808 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg); 809 amd_write8(amd, CNTLREG3, tinfo->CtrlR3); 810 amd_write8(amd, CNTLREG4, tinfo->CtrlR4); 811 } 812 /* If possible, update the XPT's notion of our transfer rate */ 813 if (xpt_create_path(&path, /*periph*/NULL, 814 cam_sim_path(amd->psim), target, 815 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 816 struct ccb_trans_settings neg; 817 struct ccb_trans_settings_spi *spi = 818 &neg.xport_specific.spi; 819 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1); 820 memset(&neg, 0, sizeof (neg)); 821 spi->sync_period = period; 822 spi->sync_offset = offset; 823 spi->valid = CTS_SPI_VALID_SYNC_RATE 824 | CTS_SPI_VALID_SYNC_OFFSET; 825 xpt_async(AC_TRANSFER_NEG, path, &neg); 826 xpt_free_path(path); 827 } 828 } 829 if ((type & AMD_TRANS_GOAL) != 0) { 830 tinfo->goal.period = period; 831 tinfo->goal.offset = offset; 832 } 833 834 if ((type & AMD_TRANS_USER) != 0) { 835 tinfo->user.period = period; 836 tinfo->user.offset = offset; 837 } 838 } 839 840 static void 841 amdsettags(struct amd_softc *amd, u_int target, int tagenb) 842 { 843 panic("Implement me!"); 844 } 845 846 847 #if 0 848 /* 849 ********************************************************************** 850 * Function : amd_reset (struct amd_softc * amd) 851 * Purpose : perform a hard reset on the SCSI bus( and AMD chip). 852 * Inputs : cmd - command which caused the SCSI RESET 853 ********************************************************************** 854 */ 855 static void 856 amd_reset(struct amd_softc * amd) 857 { 858 u_int8_t bval; 859 u_int16_t i; 860 861 862 #ifdef AMD_DEBUG0 863 kprintf("DC390: RESET"); 864 #endif 865 866 crit_enter(); 867 bval = amd_read8(amd, CNTLREG1); 868 bval |= DIS_INT_ON_SCSI_RST; 869 amd_write8(amd, CNTLREG1, bval); /* disable interrupt */ 870 amd_ResetSCSIBus(amd); 871 872 for (i = 0; i < 500; i++) { 873 DELAY(1000); 874 } 875 876 bval = amd_read8(amd, CNTLREG1); 877 bval &= ~DIS_INT_ON_SCSI_RST; 878 amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */ 879 880 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD); 881 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 882 883 ResetDevParam(amd); 884 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, 885 AMD_TAG_WILDCARD, &amd->running_srbs, 886 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); 887 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, 888 AMD_TAG_WILDCARD, &amd->waiting_srbs, 889 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); 890 amd->active_srb = NULL; 891 amd->ACBFlag = 0; 892 crit_exit(); 893 return; 894 } 895 896 void 897 amd_timeout(void *arg1) 898 { 899 struct amd_srb * pSRB; 900 901 pSRB = (struct amd_srb *) arg1; 902 } 903 #endif 904 905 static int 906 amdstart(struct amd_softc *amd, struct amd_srb *pSRB) 907 { 908 union ccb *pccb; 909 struct ccb_scsiio *pcsio; 910 struct amd_target_info *targ_info; 911 u_int identify_msg; 912 u_int command; 913 u_int target; 914 u_int lun; 915 int tagged; 916 917 pccb = pSRB->pccb; 918 pcsio = &pccb->csio; 919 target = pccb->ccb_h.target_id; 920 lun = pccb->ccb_h.target_lun; 921 targ_info = &amd->tinfo[target]; 922 923 amd_clear_msg_state(amd); 924 amd_write8(amd, SCSIDESTIDREG, target); 925 amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg); 926 amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg); 927 amd_write8(amd, CNTLREG1, targ_info->CtrlR1); 928 amd_write8(amd, CNTLREG3, targ_info->CtrlR3); 929 amd_write8(amd, CNTLREG4, targ_info->CtrlR4); 930 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 931 932 identify_msg = MSG_IDENTIFYFLAG | lun; 933 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0 934 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0 935 && (pSRB->CmdBlock[0] != REQUEST_SENSE) 936 && (pSRB->SRBFlag & AUTO_REQSENSE) == 0) 937 identify_msg |= MSG_IDENTIFY_DISCFLAG; 938 939 amd_write8(amd, SCSIFIFOREG, identify_msg); 940 tagged = 0; 941 if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0 942 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0) 943 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 944 if (targ_info->current.period != targ_info->goal.period 945 || targ_info->current.offset != targ_info->goal.offset) { 946 command = SEL_W_ATN_STOP; 947 amdconstructsdtr(amd, targ_info->goal.period, 948 targ_info->goal.offset); 949 } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 950 command = SEL_W_ATN2; 951 pSRB->SRBState = SRB_START; 952 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action); 953 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber); 954 tagged++; 955 } else { 956 command = SEL_W_ATN; 957 pSRB->SRBState = SRB_START; 958 } 959 if (command != SEL_W_ATN_STOP) 960 amdsetupcommand(amd, pSRB); 961 962 if (amd_read8(amd, SCSISTATREG) & INTERRUPT) { 963 pSRB->SRBState = SRB_READY; 964 return (1); 965 } else { 966 amd->last_phase = SCSI_ARBITRATING; 967 amd_write8(amd, SCSICMDREG, command); 968 amd->active_srb = pSRB; 969 amd->cur_target = target; 970 amd->cur_lun = lun; 971 return (0); 972 } 973 } 974 975 /* 976 * Catch an interrupt from the adapter. 977 * Process pending device interrupts. 978 */ 979 static void 980 amd_intr(void *arg) 981 { 982 struct amd_softc *amd; 983 struct amd_srb *pSRB; 984 u_int internstat = 0; 985 u_int scsistat; 986 u_int intstat; 987 988 amd = (struct amd_softc *)arg; 989 990 if (amd == NULL) { 991 #ifdef AMD_DEBUG0 992 kprintf("amd_intr: amd NULL return......"); 993 #endif 994 return; 995 } 996 997 scsistat = amd_read8(amd, SCSISTATREG); 998 if (!(scsistat & INTERRUPT)) { 999 #ifdef AMD_DEBUG0 1000 kprintf("amd_intr: scsistat = NULL ,return......"); 1001 #endif 1002 return; 1003 } 1004 #ifdef AMD_DEBUG_SCSI_PHASE 1005 kprintf("scsistat=%2x,", scsistat); 1006 #endif 1007 1008 internstat = amd_read8(amd, INTERNSTATREG); 1009 intstat = amd_read8(amd, INTSTATREG); 1010 1011 #ifdef AMD_DEBUG_SCSI_PHASE 1012 kprintf("intstat=%2x,", intstat); 1013 #endif 1014 1015 if (intstat & DISCONNECTED) { 1016 amd_Disconnect(amd); 1017 return; 1018 } 1019 if (intstat & RESELECTED) { 1020 amd_Reselect(amd); 1021 return; 1022 } 1023 if (intstat & INVALID_CMD) { 1024 amd_InvalidCmd(amd); 1025 return; 1026 } 1027 if (intstat & SCSI_RESET_) { 1028 amd_ScsiRstDetect(amd); 1029 return; 1030 } 1031 if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) { 1032 pSRB = amd->active_srb; 1033 /* 1034 * Run our state engine. First perform 1035 * post processing for the last phase we 1036 * were in, followed by any processing 1037 * required to handle the current phase. 1038 */ 1039 scsistat = 1040 amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat); 1041 amd->last_phase = scsistat & SCSI_PHASE_MASK; 1042 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat); 1043 } 1044 } 1045 1046 static u_int 1047 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1048 { 1049 struct amd_sg *psgl; 1050 u_int32_t ResidCnt, xferCnt; 1051 1052 if (!(pSRB->SRBState & SRB_XFERPAD)) { 1053 if (scsistat & PARITY_ERR) { 1054 pSRB->SRBStatus |= PARITY_ERROR; 1055 } 1056 if (scsistat & COUNT_2_ZERO) { 1057 while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0) 1058 ; 1059 pSRB->TotalXferredLen += pSRB->SGToBeXferLen; 1060 pSRB->SGIndex++; 1061 if (pSRB->SGIndex < pSRB->SGcount) { 1062 pSRB->pSGlist++; 1063 psgl = pSRB->pSGlist; 1064 pSRB->SGPhysAddr = psgl->SGXPtr; 1065 pSRB->SGToBeXferLen = psgl->SGXLen; 1066 } else { 1067 pSRB->SGToBeXferLen = 0; 1068 } 1069 } else { 1070 ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f; 1071 ResidCnt += amd_read8(amd, CTCREG_LOW) 1072 | (amd_read8(amd, CTCREG_MID) << 8) 1073 | (amd_read8(amd, CURTXTCNTREG) << 16); 1074 1075 xferCnt = pSRB->SGToBeXferLen - ResidCnt; 1076 pSRB->SGPhysAddr += xferCnt; 1077 pSRB->TotalXferredLen += xferCnt; 1078 pSRB->SGToBeXferLen = ResidCnt; 1079 } 1080 } 1081 amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD); 1082 return (scsistat); 1083 } 1084 1085 static u_int 1086 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1087 { 1088 u_int8_t bval; 1089 u_int16_t i, residual; 1090 struct amd_sg *psgl; 1091 u_int32_t ResidCnt, xferCnt; 1092 u_int8_t * ptr; 1093 1094 if (!(pSRB->SRBState & SRB_XFERPAD)) { 1095 if (scsistat & PARITY_ERR) { 1096 pSRB->SRBStatus |= PARITY_ERROR; 1097 } 1098 if (scsistat & COUNT_2_ZERO) { 1099 while (1) { 1100 bval = amd_read8(amd, DMA_Status); 1101 if ((bval & DMA_XFER_DONE) != 0) 1102 break; 1103 } 1104 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD); 1105 1106 pSRB->TotalXferredLen += pSRB->SGToBeXferLen; 1107 pSRB->SGIndex++; 1108 if (pSRB->SGIndex < pSRB->SGcount) { 1109 pSRB->pSGlist++; 1110 psgl = pSRB->pSGlist; 1111 pSRB->SGPhysAddr = psgl->SGXPtr; 1112 pSRB->SGToBeXferLen = psgl->SGXLen; 1113 } else { 1114 pSRB->SGToBeXferLen = 0; 1115 } 1116 } else { /* phase changed */ 1117 residual = 0; 1118 bval = amd_read8(amd, CURRENTFIFOREG); 1119 while (bval & 0x1f) { 1120 if ((bval & 0x1f) == 1) { 1121 for (i = 0; i < 0x100; i++) { 1122 bval = amd_read8(amd, CURRENTFIFOREG); 1123 if (!(bval & 0x1f)) { 1124 goto din_1; 1125 } else if (i == 0x0ff) { 1126 residual = 1; 1127 goto din_1; 1128 } 1129 } 1130 } else { 1131 bval = amd_read8(amd, CURRENTFIFOREG); 1132 } 1133 } 1134 din_1: 1135 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD); 1136 for (i = 0; i < 0x8000; i++) { 1137 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE)) 1138 break; 1139 } 1140 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD); 1141 1142 ResidCnt = amd_read8(amd, CTCREG_LOW) 1143 | (amd_read8(amd, CTCREG_MID) << 8) 1144 | (amd_read8(amd, CURTXTCNTREG) << 16); 1145 xferCnt = pSRB->SGToBeXferLen - ResidCnt; 1146 pSRB->SGPhysAddr += xferCnt; 1147 pSRB->TotalXferredLen += xferCnt; 1148 pSRB->SGToBeXferLen = ResidCnt; 1149 if (residual) { 1150 /* get residual byte */ 1151 bval = amd_read8(amd, SCSIFIFOREG); 1152 ptr = phystovirt(pSRB, xferCnt); 1153 *ptr = bval; 1154 pSRB->SGPhysAddr++; 1155 pSRB->TotalXferredLen++; 1156 pSRB->SGToBeXferLen--; 1157 } 1158 } 1159 } 1160 return (scsistat); 1161 } 1162 1163 static u_int 1164 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1165 { 1166 pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG); 1167 /* get message */ 1168 pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG); 1169 pSRB->SRBState = SRB_COMPLETED; 1170 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD); 1171 return (SCSI_NOP0); 1172 } 1173 1174 static u_int 1175 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1176 { 1177 if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) { 1178 scsistat = SCSI_NOP0; 1179 } 1180 return (scsistat); 1181 } 1182 1183 static u_int 1184 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1185 { 1186 int done; 1187 1188 amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG); 1189 1190 done = amdparsemsg(amd); 1191 if (done) 1192 amd->msgin_index = 0; 1193 else 1194 amd->msgin_index++; 1195 return (SCSI_NOP0); 1196 } 1197 1198 static int 1199 amdparsemsg(struct amd_softc *amd) 1200 { 1201 int reject; 1202 int done; 1203 int response; 1204 1205 done = FALSE; 1206 response = FALSE; 1207 reject = FALSE; 1208 1209 /* 1210 * Parse as much of the message as is availible, 1211 * rejecting it if we don't support it. When 1212 * the entire message is availible and has been 1213 * handled, return TRUE indicating that we have 1214 * parsed an entire message. 1215 */ 1216 switch (amd->msgin_buf[0]) { 1217 case MSG_DISCONNECT: 1218 amd->active_srb->SRBState = SRB_DISCONNECT; 1219 amd->disc_count[amd->cur_target][amd->cur_lun]++; 1220 done = TRUE; 1221 break; 1222 case MSG_SIMPLE_Q_TAG: 1223 { 1224 struct amd_srb *disc_srb; 1225 1226 if (amd->msgin_index < 1) 1227 break; 1228 disc_srb = &amd->SRB_array[amd->msgin_buf[1]]; 1229 if (amd->active_srb != NULL 1230 || disc_srb->SRBState != SRB_DISCONNECT 1231 || disc_srb->pccb->ccb_h.target_id != amd->cur_target 1232 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) { 1233 kprintf("amd%d: Unexpected tagged reselection " 1234 "for target %d, Issuing Abort\n", amd->unit, 1235 amd->cur_target); 1236 amd->msgout_buf[0] = MSG_ABORT; 1237 amd->msgout_len = 1; 1238 response = TRUE; 1239 break; 1240 } 1241 amd->active_srb = disc_srb; 1242 amd->disc_count[amd->cur_target][amd->cur_lun]--; 1243 done = TRUE; 1244 break; 1245 } 1246 case MSG_MESSAGE_REJECT: 1247 response = amdhandlemsgreject(amd); 1248 if (response == FALSE) 1249 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD); 1250 /* FALLTHROUGH */ 1251 case MSG_NOOP: 1252 done = TRUE; 1253 break; 1254 case MSG_EXTENDED: 1255 { 1256 u_int clockrate; 1257 u_int period; 1258 u_int offset; 1259 u_int saved_offset; 1260 1261 /* Wait for enough of the message to begin validation */ 1262 if (amd->msgin_index < 1) 1263 break; 1264 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 1265 reject = TRUE; 1266 break; 1267 } 1268 1269 /* Wait for opcode */ 1270 if (amd->msgin_index < 2) 1271 break; 1272 1273 if (amd->msgin_buf[2] != MSG_EXT_SDTR) { 1274 reject = TRUE; 1275 break; 1276 } 1277 1278 /* 1279 * Wait until we have both args before validating 1280 * and acting on this message. 1281 * 1282 * Add one to MSG_EXT_SDTR_LEN to account for 1283 * the extended message preamble. 1284 */ 1285 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 1286 break; 1287 1288 period = amd->msgin_buf[3]; 1289 saved_offset = offset = amd->msgin_buf[4]; 1290 clockrate = amdfindclockrate(amd, &period); 1291 if (offset > AMD_MAX_SYNC_OFFSET) 1292 offset = AMD_MAX_SYNC_OFFSET; 1293 if (period == 0 || offset == 0) { 1294 offset = 0; 1295 period = 0; 1296 clockrate = 0; 1297 } 1298 amdsetsync(amd, amd->cur_target, clockrate, period, offset, 1299 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL); 1300 1301 /* 1302 * See if we initiated Sync Negotiation 1303 * and didn't have to fall down to async 1304 * transfers. 1305 */ 1306 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) { 1307 /* We started it */ 1308 if (saved_offset != offset) { 1309 /* Went too low - force async */ 1310 reject = TRUE; 1311 } 1312 } else { 1313 /* 1314 * Send our own SDTR in reply 1315 */ 1316 if (bootverbose) 1317 kprintf("Sending SDTR!\n"); 1318 amd->msgout_index = 0; 1319 amd->msgout_len = 0; 1320 amdconstructsdtr(amd, period, offset); 1321 amd->msgout_index = 0; 1322 response = TRUE; 1323 } 1324 done = TRUE; 1325 break; 1326 } 1327 case MSG_SAVEDATAPOINTER: 1328 case MSG_RESTOREPOINTERS: 1329 /* XXX Implement!!! */ 1330 done = TRUE; 1331 break; 1332 default: 1333 reject = TRUE; 1334 break; 1335 } 1336 1337 if (reject) { 1338 amd->msgout_index = 0; 1339 amd->msgout_len = 1; 1340 amd->msgout_buf[0] = MSG_MESSAGE_REJECT; 1341 done = TRUE; 1342 response = TRUE; 1343 } 1344 1345 if (response) 1346 amd_write8(amd, SCSICMDREG, SET_ATN_CMD); 1347 1348 if (done && !response) 1349 /* Clear the outgoing message buffer */ 1350 amd->msgout_len = 0; 1351 1352 /* Drop Ack */ 1353 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD); 1354 1355 return (done); 1356 } 1357 1358 static u_int 1359 amdfindclockrate(struct amd_softc *amd, u_int *period) 1360 { 1361 u_int i; 1362 u_int clockrate; 1363 1364 for (i = 0; i < sizeof(tinfo_sync_period); i++) { 1365 u_int8_t *table_entry; 1366 1367 table_entry = &tinfo_sync_period[i]; 1368 if (*period <= *table_entry) { 1369 /* 1370 * When responding to a target that requests 1371 * sync, the requested rate may fall between 1372 * two rates that we can output, but still be 1373 * a rate that we can receive. Because of this, 1374 * we want to respond to the target with 1375 * the same rate that it sent to us even 1376 * if the period we use to send data to it 1377 * is lower. Only lower the response period 1378 * if we must. 1379 */ 1380 if (i == 0) { 1381 *period = *table_entry; 1382 } 1383 break; 1384 } 1385 } 1386 1387 if (i == sizeof(tinfo_sync_period)) { 1388 /* Too slow for us. Use asnyc transfers. */ 1389 *period = 0; 1390 clockrate = 0; 1391 } else 1392 clockrate = i + 4; 1393 1394 return (clockrate); 1395 } 1396 1397 /* 1398 * See if we sent a particular extended message to the target. 1399 * If "full" is true, the target saw the full message. 1400 * If "full" is false, the target saw at least the first 1401 * byte of the message. 1402 */ 1403 static int 1404 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full) 1405 { 1406 int found; 1407 int index; 1408 1409 found = FALSE; 1410 index = 0; 1411 1412 while (index < amd->msgout_len) { 1413 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 1414 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT) 1415 index++; 1416 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG 1417 && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) { 1418 /* Skip tag type and tag id */ 1419 index += 2; 1420 } else if (amd->msgout_buf[index] == MSG_EXTENDED) { 1421 /* Found a candidate */ 1422 if (amd->msgout_buf[index+2] == msgtype) { 1423 u_int end_index; 1424 1425 end_index = index + 1 1426 + amd->msgout_buf[index + 1]; 1427 if (full) { 1428 if (amd->msgout_index > end_index) 1429 found = TRUE; 1430 } else if (amd->msgout_index > index) 1431 found = TRUE; 1432 } 1433 break; 1434 } else { 1435 panic("amdsentmsg: Inconsistent msg buffer"); 1436 } 1437 } 1438 return (found); 1439 } 1440 1441 static void 1442 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset) 1443 { 1444 amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED; 1445 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN; 1446 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR; 1447 amd->msgout_buf[amd->msgout_index++] = period; 1448 amd->msgout_buf[amd->msgout_index++] = offset; 1449 amd->msgout_len += 5; 1450 } 1451 1452 static int 1453 amdhandlemsgreject(struct amd_softc *amd) 1454 { 1455 /* 1456 * If we had an outstanding SDTR for this 1457 * target, this is a signal that the target 1458 * is refusing negotiation. Also watch out 1459 * for rejected tag messages. 1460 */ 1461 struct amd_srb *srb; 1462 struct amd_target_info *targ_info; 1463 int response = FALSE; 1464 1465 srb = amd->active_srb; 1466 targ_info = &amd->tinfo[amd->cur_target]; 1467 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) { 1468 /* note asynch xfers and clear flag */ 1469 amdsetsync(amd, amd->cur_target, /*clockrate*/0, 1470 /*period*/0, /*offset*/0, 1471 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL); 1472 kprintf("amd%d:%d: refuses synchronous negotiation. " 1473 "Using asynchronous transfers\n", 1474 amd->unit, amd->cur_target); 1475 } else if ((srb != NULL) 1476 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 1477 struct ccb_trans_settings neg; 1478 struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi; 1479 1480 kprintf("amd%d:%d: refuses tagged commands. Performing " 1481 "non-tagged I/O\n", amd->unit, amd->cur_target); 1482 1483 amdsettags(amd, amd->cur_target, FALSE); 1484 memset(&neg, 0, sizeof (neg)); 1485 scsi->valid = CTS_SCSI_VALID_TQ; 1486 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1); 1487 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg); 1488 1489 /* 1490 * Resend the identify for this CCB as the target 1491 * may believe that the selection is invalid otherwise. 1492 */ 1493 if (amd->msgout_len != 0) 1494 bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1], 1495 amd->msgout_len); 1496 amd->msgout_buf[0] = MSG_IDENTIFYFLAG 1497 | srb->pccb->ccb_h.target_lun; 1498 amd->msgout_len++; 1499 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0 1500 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 1501 amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG; 1502 1503 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 1504 1505 /* 1506 * Requeue all tagged commands for this target 1507 * currently in our posession so they can be 1508 * converted to untagged commands. 1509 */ 1510 amdcompletematch(amd, amd->cur_target, amd->cur_lun, 1511 AMD_TAG_WILDCARD, &amd->waiting_srbs, 1512 CAM_DEV_QFRZN|CAM_REQUEUE_REQ); 1513 } else { 1514 /* 1515 * Otherwise, we ignore it. 1516 */ 1517 kprintf("amd%d:%d: Message reject received -- ignored\n", 1518 amd->unit, amd->cur_target); 1519 } 1520 return (response); 1521 } 1522 1523 #if 0 1524 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) { 1525 if (bval == MSG_DISCONNECT) { 1526 pSRB->SRBState = SRB_DISCONNECT; 1527 } else if (bval == MSG_SAVEDATAPOINTER) { 1528 goto min6; 1529 } else if ((bval == MSG_EXTENDED) 1530 || ((bval >= MSG_SIMPLE_Q_TAG) 1531 && (bval <= MSG_ORDERED_Q_TAG))) { 1532 pSRB->SRBState |= SRB_MSGIN_MULTI; 1533 pSRB->MsgInBuf[0] = bval; 1534 pSRB->MsgCnt = 1; 1535 pSRB->pMsgPtr = &pSRB->MsgInBuf[1]; 1536 } else if (bval == MSG_MESSAGE_REJECT) { 1537 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD); 1538 1539 if (pSRB->SRBState & DO_SYNC_NEGO) { 1540 goto set_async; 1541 } 1542 } else if (bval == MSG_RESTOREPOINTERS) { 1543 goto min6; 1544 } else { 1545 goto min6; 1546 } 1547 } else { /* minx: */ 1548 *pSRB->pMsgPtr = bval; 1549 pSRB->MsgCnt++; 1550 pSRB->pMsgPtr++; 1551 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG) 1552 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) { 1553 if (pSRB->MsgCnt == 2) { 1554 pSRB->SRBState = 0; 1555 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]]; 1556 if (pSRB->SRBState & SRB_DISCONNECT) == 0) { 1557 pSRB = amd->pTmpSRB; 1558 pSRB->SRBState = SRB_UNEXPECT_RESEL; 1559 pDCB->pActiveSRB = pSRB; 1560 pSRB->MsgOutBuf[0] = MSG_ABORT_TAG; 1561 EnableMsgOut2(amd, pSRB); 1562 } else { 1563 if (pDCB->DCBFlag & ABORT_DEV_) { 1564 pSRB->SRBState = SRB_ABORT_SENT; 1565 EnableMsgOut1(amd, pSRB); 1566 } 1567 pDCB->pActiveSRB = pSRB; 1568 pSRB->SRBState = SRB_DATA_XFER; 1569 } 1570 } 1571 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED) 1572 && (pSRB->MsgCnt == 5)) { 1573 pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO); 1574 if ((pSRB->MsgInBuf[1] != 3) 1575 || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */ 1576 pSRB->MsgCnt = 1; 1577 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT; 1578 amd_write8(amd, SCSICMDREG, SET_ATN_CMD); 1579 } else if (!(pSRB->MsgInBuf[3]) 1580 || !(pSRB->MsgInBuf[4])) { 1581 set_async: /* set async */ 1582 1583 pDCB = pSRB->pSRBDCB; 1584 /* disable sync & sync nego */ 1585 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE); 1586 pDCB->SyncPeriod = 0; 1587 pDCB->SyncOffset = 0; 1588 1589 pDCB->tinfo.goal.period = 0; 1590 pDCB->tinfo.goal.offset = 0; 1591 1592 pDCB->tinfo.current.period = 0; 1593 pDCB->tinfo.current.offset = 0; 1594 pDCB->tinfo.current.width = 1595 MSG_EXT_WDTR_BUS_8_BIT; 1596 1597 pDCB->CtrlR3 = FAST_CLK; /* non_fast */ 1598 pDCB->CtrlR4 &= 0x3f; 1599 pDCB->CtrlR4 |= EATER_25NS; 1600 goto re_prog; 1601 } else {/* set sync */ 1602 1603 pDCB = pSRB->pSRBDCB; 1604 /* enable sync & sync nego */ 1605 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE; 1606 1607 /* set sync offset */ 1608 pDCB->SyncOffset &= 0x0f0; 1609 pDCB->SyncOffset |= pSRB->MsgInBuf[4]; 1610 1611 /* set sync period */ 1612 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3]; 1613 1614 wval = (u_int16_t) pSRB->MsgInBuf[3]; 1615 wval = wval << 2; 1616 wval--; 1617 wval1 = wval / 25; 1618 if ((wval1 * 25) != wval) { 1619 wval1++; 1620 } 1621 bval = FAST_CLK|FAST_SCSI; 1622 pDCB->CtrlR4 &= 0x3f; 1623 if (wval1 >= 8) { 1624 /* Fast SCSI */ 1625 wval1--; 1626 bval = FAST_CLK; 1627 pDCB->CtrlR4 |= EATER_25NS; 1628 } 1629 pDCB->CtrlR3 = bval; 1630 pDCB->SyncPeriod = (u_int8_t) wval1; 1631 1632 pDCB->tinfo.goal.period = 1633 tinfo_sync_period[pDCB->SyncPeriod - 4]; 1634 pDCB->tinfo.goal.offset = pDCB->SyncOffset; 1635 pDCB->tinfo.current.period = 1636 tinfo_sync_period[pDCB->SyncPeriod - 4]; 1637 pDCB->tinfo.current.offset = pDCB->SyncOffset; 1638 1639 /* 1640 * program SCSI control register 1641 */ 1642 re_prog: 1643 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod); 1644 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset); 1645 amd_write8(amd, CNTLREG3, pDCB->CtrlR3); 1646 amd_write8(amd, CNTLREG4, pDCB->CtrlR4); 1647 } 1648 } 1649 } 1650 min6: 1651 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD); 1652 return (SCSI_NOP0); 1653 } 1654 #endif 1655 1656 static u_int 1657 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1658 { 1659 DataIO_Comm(amd, pSRB, WRITE_DIRECTION); 1660 return (scsistat); 1661 } 1662 1663 static u_int 1664 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1665 { 1666 DataIO_Comm(amd, pSRB, READ_DIRECTION); 1667 return (scsistat); 1668 } 1669 1670 static void 1671 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir) 1672 { 1673 struct amd_sg * psgl; 1674 u_int32_t lval; 1675 1676 if (pSRB->SGIndex < pSRB->SGcount) { 1677 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */ 1678 1679 if (!pSRB->SGToBeXferLen) { 1680 psgl = pSRB->pSGlist; 1681 pSRB->SGPhysAddr = psgl->SGXPtr; 1682 pSRB->SGToBeXferLen = psgl->SGXLen; 1683 } 1684 lval = pSRB->SGToBeXferLen; 1685 amd_write8(amd, CTCREG_LOW, lval); 1686 amd_write8(amd, CTCREG_MID, lval >> 8); 1687 amd_write8(amd, CURTXTCNTREG, lval >> 16); 1688 1689 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen); 1690 1691 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr); 1692 1693 pSRB->SRBState = SRB_DATA_XFER; 1694 1695 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD); 1696 1697 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */ 1698 1699 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */ 1700 } else { /* xfer pad */ 1701 if (pSRB->SGcount) { 1702 pSRB->AdaptStatus = H_OVER_UNDER_RUN; 1703 pSRB->SRBStatus |= OVER_RUN; 1704 } 1705 amd_write8(amd, CTCREG_LOW, 0); 1706 amd_write8(amd, CTCREG_MID, 0); 1707 amd_write8(amd, CURTXTCNTREG, 0); 1708 1709 pSRB->SRBState |= SRB_XFERPAD; 1710 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE); 1711 } 1712 } 1713 1714 static u_int 1715 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat) 1716 { 1717 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD); 1718 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 1719 1720 amdsetupcommand(amd, srb); 1721 1722 srb->SRBState = SRB_COMMAND; 1723 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD); 1724 return (scsistat); 1725 } 1726 1727 static u_int 1728 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1729 { 1730 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 1731 pSRB->SRBState = SRB_STATUS; 1732 amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE); 1733 return (scsistat); 1734 } 1735 1736 static u_int 1737 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1738 { 1739 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 1740 1741 if (amd->msgout_len == 0) { 1742 amd->msgout_buf[0] = MSG_NOOP; 1743 amd->msgout_len = 1; 1744 } 1745 amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len); 1746 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD); 1747 return (scsistat); 1748 } 1749 1750 static u_int 1751 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1752 { 1753 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 1754 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD); 1755 return (scsistat); 1756 } 1757 1758 static u_int 1759 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1760 { 1761 return (scsistat); 1762 } 1763 1764 static void 1765 amd_Disconnect(struct amd_softc * amd) 1766 { 1767 struct amd_srb *srb; 1768 int target; 1769 int lun; 1770 1771 srb = amd->active_srb; 1772 amd->active_srb = NULL; 1773 amd->last_phase = SCSI_BUS_FREE; 1774 amd_write8(amd, SCSICMDREG, EN_SEL_RESEL); 1775 target = amd->cur_target; 1776 lun = amd->cur_lun; 1777 1778 if (srb == NULL) { 1779 /* Invalid reselection */ 1780 amdrunwaiting(amd); 1781 } else if (srb->SRBState & SRB_ABORT_SENT) { 1782 /* Clean up and done this srb */ 1783 #if 0 1784 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) { 1785 /* XXX What about "done'ing" these srbs??? */ 1786 if (pSRB->pSRBDCB == pDCB) { 1787 TAILQ_REMOVE(&amd->running_srbs, pSRB, links); 1788 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links); 1789 } 1790 } 1791 amdrunwaiting(amd); 1792 #endif 1793 } else { 1794 if ((srb->SRBState & (SRB_START | SRB_MSGOUT)) 1795 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) { 1796 srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT; 1797 goto disc1; 1798 } else if (srb->SRBState & SRB_DISCONNECT) { 1799 if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID)) 1800 amd->untagged_srbs[target][lun] = srb; 1801 amdrunwaiting(amd); 1802 } else if (srb->SRBState & SRB_COMPLETED) { 1803 disc1: 1804 srb->SRBState = SRB_FREE; 1805 SRBdone(amd, srb); 1806 } 1807 } 1808 return; 1809 } 1810 1811 static void 1812 amd_Reselect(struct amd_softc *amd) 1813 { 1814 struct amd_target_info *tinfo; 1815 u_int16_t disc_count; 1816 1817 amd_clear_msg_state(amd); 1818 if (amd->active_srb != NULL) { 1819 /* Requeue the SRB for our attempted Selection */ 1820 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links); 1821 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links); 1822 amd->active_srb = NULL; 1823 } 1824 /* get ID */ 1825 amd->cur_target = amd_read8(amd, SCSIFIFOREG); 1826 amd->cur_target ^= amd->HostID_Bit; 1827 amd->cur_target = ffs(amd->cur_target) - 1; 1828 amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7; 1829 tinfo = &amd->tinfo[amd->cur_target]; 1830 amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun]; 1831 disc_count = amd->disc_count[amd->cur_target][amd->cur_lun]; 1832 if (disc_count == 0) { 1833 kprintf("amd%d: Unexpected reselection for target %d, " 1834 "Issuing Abort\n", amd->unit, amd->cur_target); 1835 amd->msgout_buf[0] = MSG_ABORT; 1836 amd->msgout_len = 1; 1837 amd_write8(amd, SCSICMDREG, SET_ATN_CMD); 1838 } 1839 if (amd->active_srb != NULL) { 1840 amd->disc_count[amd->cur_target][amd->cur_lun]--; 1841 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL; 1842 } 1843 1844 amd_write8(amd, SCSIDESTIDREG, amd->cur_target); 1845 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg); 1846 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg); 1847 amd_write8(amd, CNTLREG1, tinfo->CtrlR1); 1848 amd_write8(amd, CNTLREG3, tinfo->CtrlR3); 1849 amd_write8(amd, CNTLREG4, tinfo->CtrlR4); 1850 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */ 1851 amd->last_phase = SCSI_NOP0; 1852 } 1853 1854 static void 1855 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB) 1856 { 1857 u_int8_t bval, i, status; 1858 union ccb *pccb; 1859 struct ccb_scsiio *pcsio; 1860 struct amd_sg *ptr2; 1861 u_int32_t swlval; 1862 1863 pccb = pSRB->pccb; 1864 pcsio = &pccb->csio; 1865 1866 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, 1867 ("SRBdone - TagNumber %d\n", pSRB->TagNumber)); 1868 1869 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1870 bus_dmasync_op_t op; 1871 1872 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1873 op = BUS_DMASYNC_POSTREAD; 1874 else 1875 op = BUS_DMASYNC_POSTWRITE; 1876 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op); 1877 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap); 1878 } 1879 1880 status = pSRB->TargetStatus; 1881 pccb->ccb_h.status = CAM_REQ_CMP; 1882 if (pSRB->SRBFlag & AUTO_REQSENSE) { 1883 pSRB->SRBFlag &= ~AUTO_REQSENSE; 1884 pSRB->AdaptStatus = 0; 1885 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND; 1886 1887 if (status == SCSI_STATUS_CHECK_COND) { 1888 pccb->ccb_h.status = CAM_SEL_TIMEOUT; 1889 goto ckc_e; 1890 } 1891 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0]; 1892 1893 pcsio->sense_resid = pcsio->sense_len 1894 - pSRB->TotalXferredLen; 1895 pSRB->TotalXferredLen = pSRB->Segment1[1]; 1896 if (pSRB->TotalXferredLen) { 1897 /* ???? */ 1898 pcsio->resid = pcsio->dxfer_len 1899 - pSRB->TotalXferredLen; 1900 /* The resid field contains valid data */ 1901 /* Flush resid bytes on complete */ 1902 } else { 1903 pcsio->scsi_status = SCSI_STATUS_CHECK_COND; 1904 } 1905 pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR; 1906 goto ckc_e; 1907 } 1908 if (status) { 1909 if (status == SCSI_STATUS_CHECK_COND) { 1910 1911 if ((pSRB->SGIndex < pSRB->SGcount) 1912 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) { 1913 bval = pSRB->SGcount; 1914 swlval = pSRB->SGToBeXferLen; 1915 ptr2 = pSRB->pSGlist; 1916 ptr2++; 1917 for (i = pSRB->SGIndex + 1; i < bval; i++) { 1918 swlval += ptr2->SGXLen; 1919 ptr2++; 1920 } 1921 /* ??????? */ 1922 pcsio->resid = swlval; 1923 1924 #ifdef AMD_DEBUG0 1925 kprintf("XferredLen=%8x,NotYetXferLen=%8x,", 1926 pSRB->TotalXferredLen, swlval); 1927 #endif 1928 } 1929 if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) { 1930 #ifdef AMD_DEBUG0 1931 kprintf("RequestSense..................\n"); 1932 #endif 1933 RequestSense(amd, pSRB); 1934 return; 1935 } 1936 pcsio->scsi_status = SCSI_STATUS_CHECK_COND; 1937 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1938 goto ckc_e; 1939 } else if (status == SCSI_STATUS_QUEUE_FULL) { 1940 pSRB->AdaptStatus = 0; 1941 pSRB->TargetStatus = 0; 1942 pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL; 1943 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1944 goto ckc_e; 1945 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) { 1946 pSRB->AdaptStatus = H_SEL_TIMEOUT; 1947 pSRB->TargetStatus = 0; 1948 1949 pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT; 1950 pccb->ccb_h.status = CAM_SEL_TIMEOUT; 1951 } else if (status == SCSI_STATUS_BUSY) { 1952 #ifdef AMD_DEBUG0 1953 kprintf("DC390: target busy at %s %d\n", 1954 __FILE__, __LINE__); 1955 #endif 1956 pcsio->scsi_status = SCSI_STATUS_BUSY; 1957 pccb->ccb_h.status = CAM_SCSI_BUSY; 1958 } else if (status == SCSI_STATUS_RESERV_CONFLICT) { 1959 #ifdef AMD_DEBUG0 1960 kprintf("DC390: target reserved at %s %d\n", 1961 __FILE__, __LINE__); 1962 #endif 1963 pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 1964 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */ 1965 } else { 1966 pSRB->AdaptStatus = 0; 1967 #ifdef AMD_DEBUG0 1968 kprintf("DC390: driver stuffup at %s %d\n", 1969 __FILE__, __LINE__); 1970 #endif 1971 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1972 } 1973 } else { 1974 status = pSRB->AdaptStatus; 1975 if (status & H_OVER_UNDER_RUN) { 1976 pSRB->TargetStatus = 0; 1977 1978 pccb->ccb_h.status = CAM_DATA_RUN_ERR; 1979 } else if (pSRB->SRBStatus & PARITY_ERROR) { 1980 #ifdef AMD_DEBUG0 1981 kprintf("DC390: driver stuffup %s %d\n", 1982 __FILE__, __LINE__); 1983 #endif 1984 /* Driver failed to perform operation */ 1985 pccb->ccb_h.status = CAM_UNCOR_PARITY; 1986 } else { /* No error */ 1987 pSRB->AdaptStatus = 0; 1988 pSRB->TargetStatus = 0; 1989 pcsio->resid = 0; 1990 /* there is no error, (sense is invalid) */ 1991 } 1992 } 1993 ckc_e: 1994 crit_enter(); 1995 if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1996 /* CAM request not yet complete =>device_Q frozen */ 1997 xpt_freeze_devq(pccb->ccb_h.path, 1); 1998 pccb->ccb_h.status |= CAM_DEV_QFRZN; 1999 } 2000 TAILQ_REMOVE(&amd->running_srbs, pSRB, links); 2001 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links); 2002 amdrunwaiting(amd); 2003 crit_exit(); 2004 xpt_done(pccb); 2005 2006 } 2007 2008 static void 2009 amd_ResetSCSIBus(struct amd_softc * amd) 2010 { 2011 crit_enter(); 2012 amd->ACBFlag |= RESET_DEV; 2013 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD); 2014 amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD); 2015 crit_exit(); 2016 return; 2017 } 2018 2019 static void 2020 amd_ScsiRstDetect(struct amd_softc * amd) 2021 { 2022 u_int32_t wlval; 2023 2024 #ifdef AMD_DEBUG0 2025 kprintf("amd_ScsiRstDetect \n"); 2026 #endif 2027 2028 wlval = 1000; 2029 while (--wlval) { /* delay 1 sec */ 2030 DELAY(1000); 2031 } 2032 crit_enter(); 2033 2034 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD); 2035 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 2036 2037 if (amd->ACBFlag & RESET_DEV) { 2038 amd->ACBFlag |= RESET_DONE; 2039 } else { 2040 amd->ACBFlag |= RESET_DETECT; 2041 ResetDevParam(amd); 2042 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, 2043 AMD_TAG_WILDCARD, &amd->running_srbs, 2044 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); 2045 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, 2046 AMD_TAG_WILDCARD, &amd->waiting_srbs, 2047 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); 2048 amd->active_srb = NULL; 2049 amd->ACBFlag = 0; 2050 amdrunwaiting(amd); 2051 } 2052 crit_exit(); 2053 return; 2054 } 2055 2056 static void 2057 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB) 2058 { 2059 union ccb *pccb; 2060 struct ccb_scsiio *pcsio; 2061 2062 pccb = pSRB->pccb; 2063 pcsio = &pccb->csio; 2064 2065 pSRB->SRBFlag |= AUTO_REQSENSE; 2066 pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0])); 2067 pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4])); 2068 pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount; 2069 pSRB->Segment1[1] = pSRB->TotalXferredLen; 2070 2071 pSRB->AdaptStatus = 0; 2072 pSRB->TargetStatus = 0; 2073 2074 pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data); 2075 pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len; 2076 2077 pSRB->pSGlist = &pSRB->Segmentx; 2078 pSRB->SGcount = 1; 2079 pSRB->SGIndex = 0; 2080 2081 *((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003; 2082 pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5; 2083 *((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len; 2084 pSRB->ScsiCmdLen = 6; 2085 2086 pSRB->TotalXferredLen = 0; 2087 pSRB->SGToBeXferLen = 0; 2088 if (amdstart(amd, pSRB) != 0) { 2089 TAILQ_REMOVE(&amd->running_srbs, pSRB, links); 2090 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links); 2091 } 2092 } 2093 2094 static void 2095 amd_InvalidCmd(struct amd_softc * amd) 2096 { 2097 struct amd_srb *srb; 2098 2099 srb = amd->active_srb; 2100 if (srb->SRBState & (SRB_START|SRB_MSGOUT)) 2101 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 2102 } 2103 2104 void 2105 amd_linkSRB(struct amd_softc *amd) 2106 { 2107 u_int16_t count, i; 2108 struct amd_srb *psrb; 2109 int error; 2110 2111 count = amd->SRBCount; 2112 2113 for (i = 0; i < count; i++) { 2114 psrb = (struct amd_srb *)&amd->SRB_array[i]; 2115 psrb->TagNumber = i; 2116 2117 /* 2118 * Create the dmamap. This is no longer optional! 2119 */ 2120 error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap); 2121 if (error) { 2122 device_printf(amd->dev, "Error %d creating buffer " 2123 "dmamap!\n", error); 2124 break; 2125 } 2126 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links); 2127 } 2128 } 2129 2130 void 2131 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval) 2132 { 2133 if (mode == ENABLE_CE) { 2134 *regval = 0xc0; 2135 } else { 2136 *regval = 0x80; 2137 } 2138 pci_write_config(amd->dev, *regval, 0, /*bytes*/1); 2139 if (mode == DISABLE_CE) { 2140 pci_write_config(amd->dev, *regval, 0, /*bytes*/1); 2141 } 2142 DELAY(160); 2143 } 2144 2145 void 2146 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry) 2147 { 2148 u_int bval; 2149 2150 bval = 0; 2151 if (Carry) { 2152 bval = 0x40; 2153 *regval = 0x80; 2154 pci_write_config(amd->dev, *regval, bval, /*bytes*/1); 2155 } 2156 DELAY(160); 2157 bval |= 0x80; 2158 pci_write_config(amd->dev, *regval, bval, /*bytes*/1); 2159 DELAY(160); 2160 pci_write_config(amd->dev, *regval, 0, /*bytes*/1); 2161 DELAY(160); 2162 } 2163 2164 static int 2165 amd_EEpromInDO(struct amd_softc *amd) 2166 { 2167 pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1); 2168 DELAY(160); 2169 pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1); 2170 DELAY(160); 2171 if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22) 2172 return (1); 2173 return (0); 2174 } 2175 2176 static u_int16_t 2177 EEpromGetData1(struct amd_softc *amd) 2178 { 2179 u_int i; 2180 u_int carryFlag; 2181 u_int16_t wval; 2182 2183 wval = 0; 2184 for (i = 0; i < 16; i++) { 2185 wval <<= 1; 2186 carryFlag = amd_EEpromInDO(amd); 2187 wval |= carryFlag; 2188 } 2189 return (wval); 2190 } 2191 2192 static void 2193 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd) 2194 { 2195 u_int i, j; 2196 int carryFlag; 2197 2198 carryFlag = 1; 2199 j = 0x80; 2200 for (i = 0; i < 9; i++) { 2201 amd_EEpromOutDI(amd, regval, carryFlag); 2202 carryFlag = (EEpromCmd & j) ? 1 : 0; 2203 j >>= 1; 2204 } 2205 } 2206 2207 static void 2208 amd_ReadEEprom(struct amd_softc *amd) 2209 { 2210 int regval; 2211 u_int i; 2212 u_int16_t *ptr; 2213 u_int8_t cmd; 2214 2215 ptr = (u_int16_t *)&amd->eepromBuf[0]; 2216 cmd = EEPROM_READ; 2217 for (i = 0; i < 0x40; i++) { 2218 amd_EnDisableCE(amd, ENABLE_CE, ®val); 2219 amd_Prepare(amd, ®val, cmd); 2220 *ptr = EEpromGetData1(amd); 2221 ptr++; 2222 cmd++; 2223 amd_EnDisableCE(amd, DISABLE_CE, ®val); 2224 } 2225 } 2226 2227 static void 2228 amd_load_defaults(struct amd_softc *amd) 2229 { 2230 int target; 2231 2232 bzero(&amd->eepromBuf, sizeof amd->eepromBuf); 2233 for (target = 0; target < MAX_SCSI_ID; target++) 2234 amd->eepromBuf[target << 2] = 2235 (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK); 2236 amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7; 2237 amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G; 2238 amd->eepromBuf[EE_TAG_CMD_NUM] = 4; 2239 } 2240 2241 static void 2242 amd_load_eeprom_or_defaults(struct amd_softc *amd) 2243 { 2244 u_int16_t wval, *ptr; 2245 u_int8_t i; 2246 2247 amd_ReadEEprom(amd); 2248 wval = 0; 2249 ptr = (u_int16_t *) & amd->eepromBuf[0]; 2250 for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++) 2251 wval += *ptr; 2252 2253 if (wval != EE_CHECKSUM) { 2254 if (bootverbose) 2255 kprintf("amd%d: SEEPROM data unavailable. " 2256 "Using default device parameters.\n", 2257 amd->unit); 2258 amd_load_defaults(amd); 2259 } 2260 } 2261 2262 /* 2263 ********************************************************************** 2264 * Function : static int amd_init (struct Scsi_Host *host) 2265 * Purpose : initialize the internal structures for a given SCSI host 2266 * Inputs : host - pointer to this host adapter's structure/ 2267 ********************************************************************** 2268 */ 2269 static int 2270 amd_init(device_t dev) 2271 { 2272 struct amd_softc *amd = device_get_softc(dev); 2273 struct resource *iores; 2274 int i, rid; 2275 u_int bval; 2276 2277 rid = PCI_BASE_ADDR0; 2278 iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, 2279 RF_ACTIVE); 2280 if (iores == NULL) { 2281 if (bootverbose) 2282 kprintf("amd_init: bus_alloc_resource failure!\n"); 2283 return ENXIO; 2284 } 2285 amd->tag = rman_get_bustag(iores); 2286 amd->bsh = rman_get_bushandle(iores); 2287 2288 /* DMA tag for mapping buffers into device visible space. */ 2289 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1, 2290 /*boundary*/0, 2291 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 2292 /*highaddr*/BUS_SPACE_MAXADDR, 2293 /*filter*/NULL, /*filterarg*/NULL, 2294 /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG, 2295 /*maxsegsz*/AMD_MAXTRANSFER_SIZE, 2296 /*flags*/BUS_DMA_ALLOCNOW, 2297 &amd->buffer_dmat) != 0) { 2298 if (bootverbose) 2299 kprintf("amd_init: bus_dma_tag_create failure!\n"); 2300 return ENXIO; 2301 } 2302 TAILQ_INIT(&amd->free_srbs); 2303 TAILQ_INIT(&amd->running_srbs); 2304 TAILQ_INIT(&amd->waiting_srbs); 2305 amd->last_phase = SCSI_BUS_FREE; 2306 amd->dev = dev; 2307 amd->unit = device_get_unit(dev); 2308 amd->SRBCount = MAX_SRB_CNT; 2309 amd->status = 0; 2310 amd_load_eeprom_or_defaults(amd); 2311 amd->max_id = 7; 2312 if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) { 2313 amd->max_lun = 7; 2314 } else { 2315 amd->max_lun = 0; 2316 } 2317 amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID]; 2318 amd->HostID_Bit = (1 << amd->AdaptSCSIID); 2319 amd->AdaptSCSILUN = 0; 2320 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */ 2321 amd->ACBFlag = 0; 2322 amd->Gmode2 = amd->eepromBuf[EE_MODE2]; 2323 amd_linkSRB(amd); 2324 for (i = 0; i <= amd->max_id; i++) { 2325 2326 if (amd->AdaptSCSIID != i) { 2327 struct amd_target_info *tinfo; 2328 PEEprom prom; 2329 2330 tinfo = &amd->tinfo[i]; 2331 prom = (PEEprom)&amd->eepromBuf[i << 2]; 2332 if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) { 2333 tinfo->disc_tag |= AMD_USR_DISCENB; 2334 if ((prom->EE_MODE1 & TAG_QUEUING) != 0) 2335 tinfo->disc_tag |= AMD_USR_TAGENB; 2336 } 2337 if ((prom->EE_MODE1 & SYNC_NEGO) != 0) { 2338 tinfo->user.period = 2339 eeprom_period[prom->EE_SPEED]; 2340 tinfo->user.offset = AMD_MAX_SYNC_OFFSET; 2341 } 2342 tinfo->CtrlR1 = amd->AdaptSCSIID; 2343 if ((prom->EE_MODE1 & PARITY_CHK) != 0) 2344 tinfo->CtrlR1 |= PARITY_ERR_REPO; 2345 tinfo->CtrlR3 = FAST_CLK; 2346 tinfo->CtrlR4 = EATER_25NS; 2347 if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0) 2348 tinfo->CtrlR4 |= NEGATE_REQACKDATA; 2349 } 2350 } 2351 amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */ 2352 /* Conversion factor = 0 , 40MHz clock */ 2353 amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ); 2354 /* NOP cmd - clear command register */ 2355 amd_write8(amd, SCSICMDREG, NOP_CMD); 2356 amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD); 2357 amd_write8(amd, CNTLREG3, FAST_CLK); 2358 bval = EATER_25NS; 2359 if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) { 2360 bval |= NEGATE_REQACKDATA; 2361 } 2362 amd_write8(amd, CNTLREG4, bval); 2363 2364 /* Disable SCSI bus reset interrupt */ 2365 amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST); 2366 2367 return 0; 2368 } 2369 2370 /* 2371 * attach and init a host adapter 2372 */ 2373 static int 2374 amd_attach(device_t dev) 2375 { 2376 struct cam_devq *devq; /* Device Queue to use for this SIM */ 2377 u_int8_t intstat; 2378 struct amd_softc *amd = device_get_softc(dev); 2379 int unit = device_get_unit(dev); 2380 int rid; 2381 void *ih; 2382 struct resource *irqres; 2383 2384 if (amd_init(dev)) { 2385 if (bootverbose) 2386 kprintf("amd_attach: amd_init failure!\n"); 2387 return ENXIO; 2388 } 2389 2390 /* Reset Pending INT */ 2391 intstat = amd_read8(amd, INTSTATREG); 2392 2393 /* After setting up the adapter, map our interrupt */ 2394 rid = 0; 2395 irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 2396 RF_SHAREABLE | RF_ACTIVE); 2397 if (irqres == NULL || 2398 bus_setup_intr(dev, irqres, 0, amd_intr, amd, &ih, NULL) 2399 ) { 2400 if (bootverbose) 2401 kprintf("amd%d: unable to register interrupt handler!\n", 2402 unit); 2403 return ENXIO; 2404 } 2405 2406 /* 2407 * Now let the CAM generic SCSI layer find the SCSI devices on 2408 * the bus * start queue to reset to the idle loop. * 2409 * Create device queue of SIM(s) * (MAX_START_JOB - 1) : 2410 * max_sim_transactions 2411 */ 2412 devq = cam_simq_alloc(MAX_START_JOB); 2413 if (devq == NULL) { 2414 if (bootverbose) 2415 kprintf("amd_attach: cam_simq_alloc failure!\n"); 2416 return ENXIO; 2417 } 2418 2419 amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd", 2420 amd, amd->unit, &sim_mplock, 1, 2421 MAX_TAGS_CMD_QUEUE, devq); 2422 cam_simq_release(devq); 2423 if (amd->psim == NULL) { 2424 if (bootverbose) 2425 kprintf("amd_attach: cam_sim_alloc failure!\n"); 2426 return ENXIO; 2427 } 2428 2429 if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) { 2430 cam_sim_free(amd->psim); 2431 if (bootverbose) 2432 kprintf("amd_attach: xpt_bus_register failure!\n"); 2433 return ENXIO; 2434 } 2435 2436 if (xpt_create_path(&amd->ppath, /* periph */ NULL, 2437 cam_sim_path(amd->psim), CAM_TARGET_WILDCARD, 2438 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2439 xpt_bus_deregister(cam_sim_path(amd->psim)); 2440 cam_sim_free(amd->psim); 2441 if (bootverbose) 2442 kprintf("amd_attach: xpt_create_path failure!\n"); 2443 return ENXIO; 2444 } 2445 2446 return 0; 2447 } 2448 2449 static int 2450 amd_probe(device_t dev) 2451 { 2452 if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) { 2453 device_set_desc(dev, 2454 "Tekram DC390(T)/AMD53c974 SCSI Host Adapter"); 2455 return 0; 2456 } 2457 return ENXIO; 2458 } 2459 2460 static device_method_t amd_methods[] = { 2461 /* Device interface */ 2462 DEVMETHOD(device_probe, amd_probe), 2463 DEVMETHOD(device_attach, amd_attach), 2464 DEVMETHOD_END 2465 }; 2466 2467 static driver_t amd_driver = { 2468 "amd", amd_methods, sizeof(struct amd_softc) 2469 }; 2470 2471 static devclass_t amd_devclass; 2472 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, NULL, NULL); 2473