1 /* 2 ********************************************************************* 3 * FILE NAME : amd.c 4 * BY : C.L. Huang (ching@tekram.com.tw) 5 * Erich Chen (erich@tekram.com.tw) 6 * Description: Device Driver for the amd53c974 PCI Bus Master 7 * SCSI Host adapter found on cards such as 8 * the Tekram DC-390(T). 9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 ********************************************************************* 33 * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $ 34 */ 35 36 /* 37 ********************************************************************* 38 * HISTORY: 39 * 40 * REV# DATE NAME DESCRIPTION 41 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0 42 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5 43 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning 44 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA 45 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5 46 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM) 47 ********************************************************************* 48 */ 49 50 /* #define AMD_DEBUG0 */ 51 /* #define AMD_DEBUG_SCSI_PHASE */ 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/malloc.h> 57 #include <sys/queue.h> 58 #include <sys/buf.h> 59 #include <sys/bus.h> 60 #include <sys/rman.h> 61 #include <sys/thread2.h> 62 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 66 #include <machine/clock.h> 67 68 #include <bus/cam/cam.h> 69 #include <bus/cam/cam_ccb.h> 70 #include <bus/cam/cam_sim.h> 71 #include <bus/cam/cam_xpt_sim.h> 72 #include <bus/cam/cam_debug.h> 73 74 #include <bus/cam/scsi/scsi_all.h> 75 #include <bus/cam/scsi/scsi_message.h> 76 77 #include <bus/pci/pcivar.h> 78 #include <bus/pci/pcireg.h> 79 #include "amd.h" 80 81 #define PCI_DEVICE_ID_AMD53C974 0x20201022ul 82 #define PCI_BASE_ADDR0 0x10 83 84 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int); 85 typedef phase_handler_t *phase_handler_func_t; 86 87 static void amd_intr(void *vamd); 88 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB); 89 static phase_handler_t amd_NopPhase; 90 91 static phase_handler_t amd_DataOutPhase0; 92 static phase_handler_t amd_DataInPhase0; 93 #define amd_CommandPhase0 amd_NopPhase 94 static phase_handler_t amd_StatusPhase0; 95 static phase_handler_t amd_MsgOutPhase0; 96 static phase_handler_t amd_MsgInPhase0; 97 static phase_handler_t amd_DataOutPhase1; 98 static phase_handler_t amd_DataInPhase1; 99 static phase_handler_t amd_CommandPhase1; 100 static phase_handler_t amd_StatusPhase1; 101 static phase_handler_t amd_MsgOutPhase1; 102 static phase_handler_t amd_MsgInPhase1; 103 104 static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb); 105 static int amdparsemsg(struct amd_softc *amd); 106 static int amdhandlemsgreject(struct amd_softc *amd); 107 static void amdconstructsdtr(struct amd_softc *amd, 108 u_int period, u_int offset); 109 static u_int amdfindclockrate(struct amd_softc *amd, u_int *period); 110 static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full); 111 112 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir); 113 static void amd_Disconnect(struct amd_softc *amd); 114 static void amd_Reselect(struct amd_softc *amd); 115 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB); 116 static void amd_ScsiRstDetect(struct amd_softc *amd); 117 static void amd_ResetSCSIBus(struct amd_softc *amd); 118 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB); 119 static void amd_InvalidCmd(struct amd_softc *amd); 120 121 #if 0 122 static void amd_timeout(void *arg1); 123 static void amd_reset(struct amd_softc *amd); 124 #endif 125 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt); 126 127 void amd_linkSRB(struct amd_softc *amd); 128 static int amd_init(device_t); 129 static void amd_load_defaults(struct amd_softc *amd); 130 static void amd_load_eeprom_or_defaults(struct amd_softc *amd); 131 static int amd_EEpromInDO(struct amd_softc *amd); 132 static u_int16_t EEpromGetData1(struct amd_softc *amd); 133 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval); 134 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry); 135 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd); 136 static void amd_ReadEEprom(struct amd_softc *amd); 137 138 static int amd_probe(device_t); 139 static int amd_attach(device_t); 140 static void amdcompletematch(struct amd_softc *amd, target_id_t target, 141 lun_id_t lun, u_int tag, struct srb_queue *queue, 142 cam_status status); 143 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate, 144 u_int period, u_int offset, u_int type); 145 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb); 146 147 static __inline void amd_clear_msg_state(struct amd_softc *amd); 148 149 static __inline void 150 amd_clear_msg_state(struct amd_softc *amd) 151 { 152 amd->msgout_len = 0; 153 amd->msgout_index = 0; 154 amd->msgin_index = 0; 155 } 156 157 /* CAM SIM entry points */ 158 #define ccb_srb_ptr spriv_ptr0 159 #define ccb_amd_ptr spriv_ptr1 160 static void amd_action(struct cam_sim *sim, union ccb *ccb); 161 static void amd_poll(struct cam_sim *sim); 162 163 /* 164 * State engine function tables indexed by SCSI phase number 165 */ 166 phase_handler_func_t amd_SCSI_phase0[] = { 167 amd_DataOutPhase0, 168 amd_DataInPhase0, 169 amd_CommandPhase0, 170 amd_StatusPhase0, 171 amd_NopPhase, 172 amd_NopPhase, 173 amd_MsgOutPhase0, 174 amd_MsgInPhase0 175 }; 176 177 phase_handler_func_t amd_SCSI_phase1[] = { 178 amd_DataOutPhase1, 179 amd_DataInPhase1, 180 amd_CommandPhase1, 181 amd_StatusPhase1, 182 amd_NopPhase, 183 amd_NopPhase, 184 amd_MsgOutPhase1, 185 amd_MsgInPhase1 186 }; 187 188 /* 189 * EEProm/BIOS negotiation periods 190 */ 191 u_int8_t eeprom_period[] = { 192 25, /* 10.0MHz */ 193 32, /* 8.0MHz */ 194 38, /* 6.6MHz */ 195 44, /* 5.7MHz */ 196 50, /* 5.0MHz */ 197 63, /* 4.0MHz */ 198 83, /* 3.0MHz */ 199 125 /* 2.0MHz */ 200 }; 201 202 /* 203 * chip clock setting to SCSI specified sync parameter table. 204 */ 205 u_int8_t tinfo_sync_period[] = { 206 25, /* 10.0 */ 207 32, /* 8.0 */ 208 38, /* 6.6 */ 209 44, /* 5.7 */ 210 50, /* 5.0 */ 211 57, /* 4.4 */ 212 63, /* 4.0 */ 213 70, /* 3.6 */ 214 76, /* 3.3 */ 215 83 /* 3.0 */ 216 }; 217 218 static __inline struct amd_srb * 219 amdgetsrb(struct amd_softc * amd) 220 { 221 struct amd_srb * pSRB; 222 223 crit_enter(); 224 pSRB = TAILQ_FIRST(&amd->free_srbs); 225 if (pSRB) 226 TAILQ_REMOVE(&amd->free_srbs, pSRB, links); 227 crit_exit(); 228 return (pSRB); 229 } 230 231 static void 232 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb) 233 { 234 struct scsi_request_sense sense_cmd; 235 struct ccb_scsiio *csio; 236 u_int8_t *cdb; 237 u_int cdb_len; 238 239 csio = &srb->pccb->csio; 240 241 if (srb->SRBFlag & AUTO_REQSENSE) { 242 sense_cmd.opcode = REQUEST_SENSE; 243 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5; 244 sense_cmd.unused[0] = 0; 245 sense_cmd.unused[1] = 0; 246 sense_cmd.length = csio->sense_len; 247 sense_cmd.control = 0; 248 cdb = &sense_cmd.opcode; 249 cdb_len = sizeof(sense_cmd); 250 } else { 251 cdb = &srb->CmdBlock[0]; 252 cdb_len = srb->ScsiCmdLen; 253 } 254 amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len); 255 } 256 257 /* 258 * Attempt to start a waiting transaction. Interrupts must be disabled 259 * upon entry to this function. 260 */ 261 static void 262 amdrunwaiting(struct amd_softc *amd) { 263 struct amd_srb *srb; 264 265 if (amd->last_phase != SCSI_BUS_FREE) 266 return; 267 268 srb = TAILQ_FIRST(&amd->waiting_srbs); 269 if (srb == NULL) 270 return; 271 272 if (amdstart(amd, srb) == 0) { 273 TAILQ_REMOVE(&amd->waiting_srbs, srb, links); 274 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links); 275 } 276 } 277 278 static void 279 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 280 { 281 struct amd_srb *srb; 282 union ccb *ccb; 283 struct amd_softc *amd; 284 285 srb = (struct amd_srb *)arg; 286 ccb = srb->pccb; 287 amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr; 288 289 if (error != 0) { 290 if (error != EFBIG) 291 kprintf("amd%d: Unexpected error 0x%x returned from " 292 "bus_dmamap_load\n", amd->unit, error); 293 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 294 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 295 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 296 } 297 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links); 298 xpt_done(ccb); 299 return; 300 } 301 302 if (nseg != 0) { 303 struct amd_sg *sg; 304 bus_dma_segment_t *end_seg; 305 bus_dmasync_op_t op; 306 307 end_seg = dm_segs + nseg; 308 309 /* Copy the segments into our SG list */ 310 srb->pSGlist = &srb->SGsegment[0]; 311 sg = srb->pSGlist; 312 while (dm_segs < end_seg) { 313 sg->SGXLen = dm_segs->ds_len; 314 sg->SGXPtr = dm_segs->ds_addr; 315 sg++; 316 dm_segs++; 317 } 318 319 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 320 op = BUS_DMASYNC_PREREAD; 321 else 322 op = BUS_DMASYNC_PREWRITE; 323 324 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op); 325 326 } 327 srb->SGcount = nseg; 328 srb->SGIndex = 0; 329 srb->AdaptStatus = 0; 330 srb->TargetStatus = 0; 331 srb->MsgCnt = 0; 332 srb->SRBStatus = 0; 333 srb->SRBFlag = 0; 334 srb->SRBState = 0; 335 srb->TotalXferredLen = 0; 336 srb->SGPhysAddr = 0; 337 srb->SGToBeXferLen = 0; 338 srb->EndMessage = 0; 339 340 crit_enter(); 341 342 /* 343 * Last time we need to check if this CCB needs to 344 * be aborted. 345 */ 346 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 347 if (nseg != 0) 348 bus_dmamap_unload(amd->buffer_dmat, srb->dmamap); 349 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links); 350 xpt_done(ccb); 351 crit_exit(); 352 return; 353 } 354 ccb->ccb_h.status |= CAM_SIM_QUEUED; 355 #if 0 356 /* XXX Need a timeout handler */ 357 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 358 amdtimeout, srb); 359 #endif 360 TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links); 361 amdrunwaiting(amd); 362 crit_exit(); 363 } 364 365 static void 366 amd_action(struct cam_sim * psim, union ccb * pccb) 367 { 368 struct amd_softc * amd; 369 u_int target_id, target_lun; 370 371 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n")); 372 373 amd = (struct amd_softc *) cam_sim_softc(psim); 374 target_id = pccb->ccb_h.target_id; 375 target_lun = pccb->ccb_h.target_lun; 376 377 switch (pccb->ccb_h.func_code) { 378 case XPT_SCSI_IO: 379 { 380 struct amd_srb * pSRB; 381 struct ccb_scsiio *pcsio; 382 383 pcsio = &pccb->csio; 384 385 /* 386 * Assign an SRB and connect it with this ccb. 387 */ 388 pSRB = amdgetsrb(amd); 389 390 if (!pSRB) { 391 /* Freeze SIMQ */ 392 pccb->ccb_h.status = CAM_RESRC_UNAVAIL; 393 xpt_done(pccb); 394 return; 395 } 396 pSRB->pccb = pccb; 397 pccb->ccb_h.ccb_srb_ptr = pSRB; 398 pccb->ccb_h.ccb_amd_ptr = amd; 399 pSRB->ScsiCmdLen = pcsio->cdb_len; 400 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len); 401 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 402 if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 403 /* 404 * We've been given a pointer 405 * to a single buffer. 406 */ 407 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 408 int error; 409 410 crit_enter(); 411 error = 412 bus_dmamap_load(amd->buffer_dmat, 413 pSRB->dmamap, 414 pcsio->data_ptr, 415 pcsio->dxfer_len, 416 amdexecutesrb, 417 pSRB, /*flags*/0); 418 if (error == EINPROGRESS) { 419 /* 420 * So as to maintain 421 * ordering, freeze the 422 * controller queue 423 * until our mapping is 424 * returned. 425 */ 426 xpt_freeze_simq(amd->psim, 1); 427 pccb->ccb_h.status |= 428 CAM_RELEASE_SIMQ; 429 } 430 crit_exit(); 431 } else { 432 struct bus_dma_segment seg; 433 434 /* Pointer to physical buffer */ 435 seg.ds_addr = 436 (bus_addr_t)pcsio->data_ptr; 437 seg.ds_len = pcsio->dxfer_len; 438 amdexecutesrb(pSRB, &seg, 1, 0); 439 } 440 } else { 441 struct bus_dma_segment *segs; 442 443 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 444 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { 445 TAILQ_INSERT_HEAD(&amd->free_srbs, 446 pSRB, links); 447 pccb->ccb_h.status = CAM_PROVIDE_FAIL; 448 xpt_done(pccb); 449 return; 450 } 451 452 /* Just use the segments provided */ 453 segs = 454 (struct bus_dma_segment *)pcsio->data_ptr; 455 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0); 456 } 457 } else 458 amdexecutesrb(pSRB, NULL, 0, 0); 459 break; 460 } 461 case XPT_PATH_INQ: 462 { 463 struct ccb_pathinq *cpi = &pccb->cpi; 464 465 cpi->version_num = 1; 466 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; 467 cpi->target_sprt = 0; 468 cpi->hba_misc = 0; 469 cpi->hba_eng_cnt = 0; 470 cpi->max_target = 7; 471 cpi->max_lun = amd->max_lun; /* 7 or 0 */ 472 cpi->initiator_id = amd->AdaptSCSIID; 473 cpi->bus_id = cam_sim_bus(psim); 474 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 475 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN); 476 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); 477 cpi->unit_number = cam_sim_unit(psim); 478 cpi->transport = XPORT_SPI; 479 cpi->transport_version = 2; 480 cpi->protocol = PROTO_SCSI; 481 cpi->protocol_version = SCSI_REV_2; 482 cpi->ccb_h.status = CAM_REQ_CMP; 483 xpt_done(pccb); 484 break; 485 } 486 case XPT_ABORT: 487 pccb->ccb_h.status = CAM_REQ_INVALID; 488 xpt_done(pccb); 489 break; 490 case XPT_RESET_BUS: 491 { 492 493 int i; 494 495 amd_ResetSCSIBus(amd); 496 amd->ACBFlag = 0; 497 498 for (i = 0; i < 500; i++) { 499 DELAY(1000); /* Wait until our interrupt 500 * handler sees it */ 501 } 502 503 pccb->ccb_h.status = CAM_REQ_CMP; 504 xpt_done(pccb); 505 break; 506 } 507 case XPT_RESET_DEV: 508 pccb->ccb_h.status = CAM_REQ_INVALID; 509 xpt_done(pccb); 510 break; 511 case XPT_TERM_IO: 512 pccb->ccb_h.status = CAM_REQ_INVALID; 513 xpt_done(pccb); 514 break; 515 case XPT_GET_TRAN_SETTINGS: 516 { 517 struct ccb_trans_settings *cts = &pccb->cts; 518 struct amd_target_info *targ_info = &amd->tinfo[target_id]; 519 struct amd_transinfo *tinfo; 520 struct ccb_trans_settings_scsi *scsi = 521 &cts->proto_specific.scsi; 522 struct ccb_trans_settings_spi *spi = 523 &cts->xport_specific.spi; 524 525 cts->protocol = PROTO_SCSI; 526 cts->protocol_version = SCSI_REV_2; 527 cts->transport = XPORT_SPI; 528 cts->transport_version = 2; 529 530 crit_enter(); 531 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 532 /* current transfer settings */ 533 if (targ_info->disc_tag & AMD_CUR_DISCENB) { 534 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 535 } else { 536 spi->flags = 0; 537 } 538 if (targ_info->disc_tag & AMD_CUR_TAGENB) { 539 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 540 } else { 541 scsi->flags = 0; 542 } 543 tinfo = &targ_info->current; 544 } else { 545 /* default(user) transfer settings */ 546 if (targ_info->disc_tag & AMD_USR_DISCENB) { 547 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 548 } else { 549 spi->flags = 0; 550 } 551 if (targ_info->disc_tag & AMD_USR_TAGENB) { 552 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 553 } else { 554 scsi->flags = 0; 555 } 556 tinfo = &targ_info->user; 557 } 558 spi->sync_period = tinfo->period; 559 spi->sync_offset = tinfo->offset; 560 crit_exit(); 561 562 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 563 spi->valid = CTS_SPI_VALID_SYNC_RATE 564 | CTS_SPI_VALID_SYNC_OFFSET 565 | CTS_SPI_VALID_BUS_WIDTH 566 | CTS_SPI_VALID_DISC; 567 scsi->valid = CTS_SCSI_VALID_TQ; 568 pccb->ccb_h.status = CAM_REQ_CMP; 569 xpt_done(pccb); 570 break; 571 } 572 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 573 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) 574 case XPT_SET_TRAN_SETTINGS: 575 { 576 struct ccb_trans_settings *cts = &pccb->cts; 577 struct amd_target_info *targ_info; 578 u_int update_type = 0; 579 int last_entry; 580 581 struct ccb_trans_settings_scsi *scsi = 582 &cts->proto_specific.scsi; 583 struct ccb_trans_settings_spi *spi = 584 &cts->xport_specific.spi; 585 if (IS_CURRENT_SETTINGS(cts)) { 586 update_type |= AMD_TRANS_GOAL; 587 } else if (IS_USER_SETTINGS(cts)) { 588 update_type |= AMD_TRANS_USER; 589 } 590 if (update_type == 0 591 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) { 592 cts->ccb_h.status = CAM_REQ_INVALID; 593 xpt_done(pccb); 594 } 595 596 crit_enter(); 597 targ_info = &amd->tinfo[target_id]; 598 599 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 600 if (update_type & AMD_TRANS_GOAL) { 601 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) 602 != 0) { 603 targ_info->disc_tag |= AMD_CUR_DISCENB; 604 } else { 605 targ_info->disc_tag &= ~AMD_CUR_DISCENB; 606 } 607 } 608 if (update_type & AMD_TRANS_USER) { 609 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) 610 != 0) { 611 targ_info->disc_tag |= AMD_USR_DISCENB; 612 } else { 613 targ_info->disc_tag &= ~AMD_USR_DISCENB; 614 } 615 } 616 } 617 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 618 if (update_type & AMD_TRANS_GOAL) { 619 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) 620 != 0) { 621 targ_info->disc_tag |= AMD_CUR_TAGENB; 622 } else { 623 targ_info->disc_tag &= ~AMD_CUR_TAGENB; 624 } 625 } 626 if (update_type & AMD_TRANS_USER) { 627 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) 628 != 0) { 629 targ_info->disc_tag |= AMD_USR_TAGENB; 630 } else { 631 targ_info->disc_tag &= ~AMD_USR_TAGENB; 632 } 633 } 634 } 635 636 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { 637 if (update_type & AMD_TRANS_GOAL) 638 spi->sync_offset = targ_info->goal.offset; 639 else 640 spi->sync_offset = targ_info->user.offset; 641 } 642 643 if (spi->sync_offset > AMD_MAX_SYNC_OFFSET) 644 spi->sync_offset = AMD_MAX_SYNC_OFFSET; 645 646 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { 647 if (update_type & AMD_TRANS_GOAL) 648 spi->sync_period = targ_info->goal.period; 649 else 650 spi->sync_period = targ_info->user.period; 651 } 652 653 last_entry = sizeof(tinfo_sync_period) - 1; 654 if ((spi->sync_period != 0) 655 && (spi->sync_period < tinfo_sync_period[0])) 656 spi->sync_period = tinfo_sync_period[0]; 657 if (spi->sync_period > tinfo_sync_period[last_entry]) 658 spi->sync_period = 0; 659 if (spi->sync_offset == 0) 660 spi->sync_period = 0; 661 662 if ((update_type & AMD_TRANS_USER) != 0) { 663 targ_info->user.period = spi->sync_period; 664 targ_info->user.offset = spi->sync_offset; 665 } 666 if ((update_type & AMD_TRANS_GOAL) != 0) { 667 targ_info->goal.period = spi->sync_period; 668 targ_info->goal.offset = spi->sync_offset; 669 } 670 crit_exit(); 671 pccb->ccb_h.status = CAM_REQ_CMP; 672 xpt_done(pccb); 673 break; 674 } 675 case XPT_CALC_GEOMETRY: 676 { 677 struct ccb_calc_geometry *ccg; 678 u_int32_t size_mb; 679 u_int32_t secs_per_cylinder; 680 int extended; 681 682 ccg = &pccb->ccg; 683 size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size); 684 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0; 685 686 if (size_mb > 1024 && extended) { 687 ccg->heads = 255; 688 ccg->secs_per_track = 63; 689 } else { 690 ccg->heads = 64; 691 ccg->secs_per_track = 32; 692 } 693 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 694 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 695 pccb->ccb_h.status = CAM_REQ_CMP; 696 xpt_done(pccb); 697 break; 698 } 699 default: 700 pccb->ccb_h.status = CAM_REQ_INVALID; 701 xpt_done(pccb); 702 break; 703 } 704 } 705 706 static void 707 amd_poll(struct cam_sim * psim) 708 { 709 amd_intr(cam_sim_softc(psim)); 710 } 711 712 static u_int8_t * 713 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt) 714 { 715 intptr_t dataPtr; 716 struct ccb_scsiio *pcsio; 717 u_int8_t i; 718 struct amd_sg * pseg; 719 720 dataPtr = 0; 721 pcsio = &pSRB->pccb->csio; 722 723 dataPtr = (intptr_t) pcsio->data_ptr; 724 pseg = pSRB->SGsegment; 725 for (i = 0; i < pSRB->SGIndex; i++) { 726 dataPtr += (int) pseg->SGXLen; 727 pseg++; 728 } 729 dataPtr += (int) xferCnt; 730 return ((u_int8_t *) dataPtr); 731 } 732 733 static void 734 ResetDevParam(struct amd_softc * amd) 735 { 736 u_int target; 737 738 for (target = 0; target <= amd->max_id; target++) { 739 if (amd->AdaptSCSIID != target) { 740 amdsetsync(amd, target, /*clockrate*/0, 741 /*period*/0, /*offset*/0, AMD_TRANS_CUR); 742 } 743 } 744 } 745 746 static void 747 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun, 748 u_int tag, struct srb_queue *queue, cam_status status) 749 { 750 struct amd_srb *srb; 751 struct amd_srb *next_srb; 752 753 for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) { 754 union ccb *ccb; 755 756 next_srb = TAILQ_NEXT(srb, links); 757 if (srb->pccb->ccb_h.target_id != target 758 && target != CAM_TARGET_WILDCARD) 759 continue; 760 761 if (srb->pccb->ccb_h.target_lun != lun 762 && lun != CAM_LUN_WILDCARD) 763 continue; 764 765 if (srb->TagNumber != tag 766 && tag != AMD_TAG_WILDCARD) 767 continue; 768 769 ccb = srb->pccb; 770 TAILQ_REMOVE(queue, srb, links); 771 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links); 772 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0 773 && (status & CAM_DEV_QFRZN) != 0) 774 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 775 ccb->ccb_h.status = status; 776 xpt_done(ccb); 777 } 778 779 } 780 781 static void 782 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate, 783 u_int period, u_int offset, u_int type) 784 { 785 struct amd_target_info *tinfo; 786 u_int old_period; 787 u_int old_offset; 788 789 tinfo = &amd->tinfo[target]; 790 old_period = tinfo->current.period; 791 old_offset = tinfo->current.offset; 792 if ((type & AMD_TRANS_CUR) != 0 793 && (old_period != period || old_offset != offset)) { 794 struct cam_path *path; 795 796 tinfo->current.period = period; 797 tinfo->current.offset = offset; 798 tinfo->sync_period_reg = clockrate; 799 tinfo->sync_offset_reg = offset; 800 tinfo->CtrlR3 &= ~FAST_SCSI; 801 tinfo->CtrlR4 &= ~EATER_25NS; 802 if (clockrate > 7) 803 tinfo->CtrlR4 |= EATER_25NS; 804 else 805 tinfo->CtrlR3 |= FAST_SCSI; 806 807 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) { 808 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg); 809 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg); 810 amd_write8(amd, CNTLREG3, tinfo->CtrlR3); 811 amd_write8(amd, CNTLREG4, tinfo->CtrlR4); 812 } 813 /* If possible, update the XPT's notion of our transfer rate */ 814 if (xpt_create_path(&path, /*periph*/NULL, 815 cam_sim_path(amd->psim), target, 816 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 817 struct ccb_trans_settings neg; 818 struct ccb_trans_settings_spi *spi = 819 &neg.xport_specific.spi; 820 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1); 821 memset(&neg, 0, sizeof (neg)); 822 spi->sync_period = period; 823 spi->sync_offset = offset; 824 spi->valid = CTS_SPI_VALID_SYNC_RATE 825 | CTS_SPI_VALID_SYNC_OFFSET; 826 xpt_async(AC_TRANSFER_NEG, path, &neg); 827 xpt_free_path(path); 828 } 829 } 830 if ((type & AMD_TRANS_GOAL) != 0) { 831 tinfo->goal.period = period; 832 tinfo->goal.offset = offset; 833 } 834 835 if ((type & AMD_TRANS_USER) != 0) { 836 tinfo->user.period = period; 837 tinfo->user.offset = offset; 838 } 839 } 840 841 static void 842 amdsettags(struct amd_softc *amd, u_int target, int tagenb) 843 { 844 panic("Implement me!\n"); 845 } 846 847 848 #if 0 849 /* 850 ********************************************************************** 851 * Function : amd_reset (struct amd_softc * amd) 852 * Purpose : perform a hard reset on the SCSI bus( and AMD chip). 853 * Inputs : cmd - command which caused the SCSI RESET 854 ********************************************************************** 855 */ 856 static void 857 amd_reset(struct amd_softc * amd) 858 { 859 u_int8_t bval; 860 u_int16_t i; 861 862 863 #ifdef AMD_DEBUG0 864 kprintf("DC390: RESET"); 865 #endif 866 867 crit_enter(); 868 bval = amd_read8(amd, CNTLREG1); 869 bval |= DIS_INT_ON_SCSI_RST; 870 amd_write8(amd, CNTLREG1, bval); /* disable interrupt */ 871 amd_ResetSCSIBus(amd); 872 873 for (i = 0; i < 500; i++) { 874 DELAY(1000); 875 } 876 877 bval = amd_read8(amd, CNTLREG1); 878 bval &= ~DIS_INT_ON_SCSI_RST; 879 amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */ 880 881 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD); 882 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 883 884 ResetDevParam(amd); 885 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, 886 AMD_TAG_WILDCARD, &amd->running_srbs, 887 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); 888 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, 889 AMD_TAG_WILDCARD, &amd->waiting_srbs, 890 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); 891 amd->active_srb = NULL; 892 amd->ACBFlag = 0; 893 crit_exit(); 894 return; 895 } 896 897 void 898 amd_timeout(void *arg1) 899 { 900 struct amd_srb * pSRB; 901 902 pSRB = (struct amd_srb *) arg1; 903 } 904 #endif 905 906 static int 907 amdstart(struct amd_softc *amd, struct amd_srb *pSRB) 908 { 909 union ccb *pccb; 910 struct ccb_scsiio *pcsio; 911 struct amd_target_info *targ_info; 912 u_int identify_msg; 913 u_int command; 914 u_int target; 915 u_int lun; 916 int tagged; 917 918 pccb = pSRB->pccb; 919 pcsio = &pccb->csio; 920 target = pccb->ccb_h.target_id; 921 lun = pccb->ccb_h.target_lun; 922 targ_info = &amd->tinfo[target]; 923 924 amd_clear_msg_state(amd); 925 amd_write8(amd, SCSIDESTIDREG, target); 926 amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg); 927 amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg); 928 amd_write8(amd, CNTLREG1, targ_info->CtrlR1); 929 amd_write8(amd, CNTLREG3, targ_info->CtrlR3); 930 amd_write8(amd, CNTLREG4, targ_info->CtrlR4); 931 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 932 933 identify_msg = MSG_IDENTIFYFLAG | lun; 934 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0 935 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0 936 && (pSRB->CmdBlock[0] != REQUEST_SENSE) 937 && (pSRB->SRBFlag & AUTO_REQSENSE) == 0) 938 identify_msg |= MSG_IDENTIFY_DISCFLAG; 939 940 amd_write8(amd, SCSIFIFOREG, identify_msg); 941 tagged = 0; 942 if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0 943 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0) 944 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 945 if (targ_info->current.period != targ_info->goal.period 946 || targ_info->current.offset != targ_info->goal.offset) { 947 command = SEL_W_ATN_STOP; 948 amdconstructsdtr(amd, targ_info->goal.period, 949 targ_info->goal.offset); 950 } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 951 command = SEL_W_ATN2; 952 pSRB->SRBState = SRB_START; 953 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action); 954 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber); 955 tagged++; 956 } else { 957 command = SEL_W_ATN; 958 pSRB->SRBState = SRB_START; 959 } 960 if (command != SEL_W_ATN_STOP) 961 amdsetupcommand(amd, pSRB); 962 963 if (amd_read8(amd, SCSISTATREG) & INTERRUPT) { 964 pSRB->SRBState = SRB_READY; 965 return (1); 966 } else { 967 amd->last_phase = SCSI_ARBITRATING; 968 amd_write8(amd, SCSICMDREG, command); 969 amd->active_srb = pSRB; 970 amd->cur_target = target; 971 amd->cur_lun = lun; 972 return (0); 973 } 974 } 975 976 /* 977 * Catch an interrupt from the adapter. 978 * Process pending device interrupts. 979 */ 980 static void 981 amd_intr(void *arg) 982 { 983 struct amd_softc *amd; 984 struct amd_srb *pSRB; 985 u_int internstat = 0; 986 u_int scsistat; 987 u_int intstat; 988 989 amd = (struct amd_softc *)arg; 990 991 if (amd == NULL) { 992 #ifdef AMD_DEBUG0 993 kprintf("amd_intr: amd NULL return......"); 994 #endif 995 return; 996 } 997 998 scsistat = amd_read8(amd, SCSISTATREG); 999 if (!(scsistat & INTERRUPT)) { 1000 #ifdef AMD_DEBUG0 1001 kprintf("amd_intr: scsistat = NULL ,return......"); 1002 #endif 1003 return; 1004 } 1005 #ifdef AMD_DEBUG_SCSI_PHASE 1006 kprintf("scsistat=%2x,", scsistat); 1007 #endif 1008 1009 internstat = amd_read8(amd, INTERNSTATREG); 1010 intstat = amd_read8(amd, INTSTATREG); 1011 1012 #ifdef AMD_DEBUG_SCSI_PHASE 1013 kprintf("intstat=%2x,", intstat); 1014 #endif 1015 1016 if (intstat & DISCONNECTED) { 1017 amd_Disconnect(amd); 1018 return; 1019 } 1020 if (intstat & RESELECTED) { 1021 amd_Reselect(amd); 1022 return; 1023 } 1024 if (intstat & INVALID_CMD) { 1025 amd_InvalidCmd(amd); 1026 return; 1027 } 1028 if (intstat & SCSI_RESET_) { 1029 amd_ScsiRstDetect(amd); 1030 return; 1031 } 1032 if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) { 1033 pSRB = amd->active_srb; 1034 /* 1035 * Run our state engine. First perform 1036 * post processing for the last phase we 1037 * were in, followed by any processing 1038 * required to handle the current phase. 1039 */ 1040 scsistat = 1041 amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat); 1042 amd->last_phase = scsistat & SCSI_PHASE_MASK; 1043 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat); 1044 } 1045 } 1046 1047 static u_int 1048 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1049 { 1050 struct amd_sg *psgl; 1051 u_int32_t ResidCnt, xferCnt; 1052 1053 if (!(pSRB->SRBState & SRB_XFERPAD)) { 1054 if (scsistat & PARITY_ERR) { 1055 pSRB->SRBStatus |= PARITY_ERROR; 1056 } 1057 if (scsistat & COUNT_2_ZERO) { 1058 while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0) 1059 ; 1060 pSRB->TotalXferredLen += pSRB->SGToBeXferLen; 1061 pSRB->SGIndex++; 1062 if (pSRB->SGIndex < pSRB->SGcount) { 1063 pSRB->pSGlist++; 1064 psgl = pSRB->pSGlist; 1065 pSRB->SGPhysAddr = psgl->SGXPtr; 1066 pSRB->SGToBeXferLen = psgl->SGXLen; 1067 } else { 1068 pSRB->SGToBeXferLen = 0; 1069 } 1070 } else { 1071 ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f; 1072 ResidCnt += amd_read8(amd, CTCREG_LOW) 1073 | (amd_read8(amd, CTCREG_MID) << 8) 1074 | (amd_read8(amd, CURTXTCNTREG) << 16); 1075 1076 xferCnt = pSRB->SGToBeXferLen - ResidCnt; 1077 pSRB->SGPhysAddr += xferCnt; 1078 pSRB->TotalXferredLen += xferCnt; 1079 pSRB->SGToBeXferLen = ResidCnt; 1080 } 1081 } 1082 amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD); 1083 return (scsistat); 1084 } 1085 1086 static u_int 1087 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1088 { 1089 u_int8_t bval; 1090 u_int16_t i, residual; 1091 struct amd_sg *psgl; 1092 u_int32_t ResidCnt, xferCnt; 1093 u_int8_t * ptr; 1094 1095 if (!(pSRB->SRBState & SRB_XFERPAD)) { 1096 if (scsistat & PARITY_ERR) { 1097 pSRB->SRBStatus |= PARITY_ERROR; 1098 } 1099 if (scsistat & COUNT_2_ZERO) { 1100 while (1) { 1101 bval = amd_read8(amd, DMA_Status); 1102 if ((bval & DMA_XFER_DONE) != 0) 1103 break; 1104 } 1105 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD); 1106 1107 pSRB->TotalXferredLen += pSRB->SGToBeXferLen; 1108 pSRB->SGIndex++; 1109 if (pSRB->SGIndex < pSRB->SGcount) { 1110 pSRB->pSGlist++; 1111 psgl = pSRB->pSGlist; 1112 pSRB->SGPhysAddr = psgl->SGXPtr; 1113 pSRB->SGToBeXferLen = psgl->SGXLen; 1114 } else { 1115 pSRB->SGToBeXferLen = 0; 1116 } 1117 } else { /* phase changed */ 1118 residual = 0; 1119 bval = amd_read8(amd, CURRENTFIFOREG); 1120 while (bval & 0x1f) { 1121 if ((bval & 0x1f) == 1) { 1122 for (i = 0; i < 0x100; i++) { 1123 bval = amd_read8(amd, CURRENTFIFOREG); 1124 if (!(bval & 0x1f)) { 1125 goto din_1; 1126 } else if (i == 0x0ff) { 1127 residual = 1; 1128 goto din_1; 1129 } 1130 } 1131 } else { 1132 bval = amd_read8(amd, CURRENTFIFOREG); 1133 } 1134 } 1135 din_1: 1136 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD); 1137 for (i = 0; i < 0x8000; i++) { 1138 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE)) 1139 break; 1140 } 1141 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD); 1142 1143 ResidCnt = amd_read8(amd, CTCREG_LOW) 1144 | (amd_read8(amd, CTCREG_MID) << 8) 1145 | (amd_read8(amd, CURTXTCNTREG) << 16); 1146 xferCnt = pSRB->SGToBeXferLen - ResidCnt; 1147 pSRB->SGPhysAddr += xferCnt; 1148 pSRB->TotalXferredLen += xferCnt; 1149 pSRB->SGToBeXferLen = ResidCnt; 1150 if (residual) { 1151 /* get residual byte */ 1152 bval = amd_read8(amd, SCSIFIFOREG); 1153 ptr = phystovirt(pSRB, xferCnt); 1154 *ptr = bval; 1155 pSRB->SGPhysAddr++; 1156 pSRB->TotalXferredLen++; 1157 pSRB->SGToBeXferLen--; 1158 } 1159 } 1160 } 1161 return (scsistat); 1162 } 1163 1164 static u_int 1165 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1166 { 1167 pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG); 1168 /* get message */ 1169 pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG); 1170 pSRB->SRBState = SRB_COMPLETED; 1171 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD); 1172 return (SCSI_NOP0); 1173 } 1174 1175 static u_int 1176 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1177 { 1178 if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) { 1179 scsistat = SCSI_NOP0; 1180 } 1181 return (scsistat); 1182 } 1183 1184 static u_int 1185 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1186 { 1187 int done; 1188 1189 amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG); 1190 1191 done = amdparsemsg(amd); 1192 if (done) 1193 amd->msgin_index = 0; 1194 else 1195 amd->msgin_index++; 1196 return (SCSI_NOP0); 1197 } 1198 1199 static int 1200 amdparsemsg(struct amd_softc *amd) 1201 { 1202 struct amd_target_info *targ_info; 1203 int reject; 1204 int done; 1205 int response; 1206 1207 done = FALSE; 1208 response = FALSE; 1209 reject = FALSE; 1210 1211 targ_info = &amd->tinfo[amd->cur_target]; 1212 1213 /* 1214 * Parse as much of the message as is availible, 1215 * rejecting it if we don't support it. When 1216 * the entire message is availible and has been 1217 * handled, return TRUE indicating that we have 1218 * parsed an entire message. 1219 */ 1220 switch (amd->msgin_buf[0]) { 1221 case MSG_DISCONNECT: 1222 amd->active_srb->SRBState = SRB_DISCONNECT; 1223 amd->disc_count[amd->cur_target][amd->cur_lun]++; 1224 done = TRUE; 1225 break; 1226 case MSG_SIMPLE_Q_TAG: 1227 { 1228 struct amd_srb *disc_srb; 1229 1230 if (amd->msgin_index < 1) 1231 break; 1232 disc_srb = &amd->SRB_array[amd->msgin_buf[1]]; 1233 if (amd->active_srb != NULL 1234 || disc_srb->SRBState != SRB_DISCONNECT 1235 || disc_srb->pccb->ccb_h.target_id != amd->cur_target 1236 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) { 1237 kprintf("amd%d: Unexpected tagged reselection " 1238 "for target %d, Issuing Abort\n", amd->unit, 1239 amd->cur_target); 1240 amd->msgout_buf[0] = MSG_ABORT; 1241 amd->msgout_len = 1; 1242 response = TRUE; 1243 break; 1244 } 1245 amd->active_srb = disc_srb; 1246 amd->disc_count[amd->cur_target][amd->cur_lun]--; 1247 done = TRUE; 1248 break; 1249 } 1250 case MSG_MESSAGE_REJECT: 1251 response = amdhandlemsgreject(amd); 1252 if (response == FALSE) 1253 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD); 1254 /* FALLTHROUGH */ 1255 case MSG_NOOP: 1256 done = TRUE; 1257 break; 1258 case MSG_EXTENDED: 1259 { 1260 u_int clockrate; 1261 u_int period; 1262 u_int offset; 1263 u_int saved_offset; 1264 1265 /* Wait for enough of the message to begin validation */ 1266 if (amd->msgin_index < 1) 1267 break; 1268 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 1269 reject = TRUE; 1270 break; 1271 } 1272 1273 /* Wait for opcode */ 1274 if (amd->msgin_index < 2) 1275 break; 1276 1277 if (amd->msgin_buf[2] != MSG_EXT_SDTR) { 1278 reject = TRUE; 1279 break; 1280 } 1281 1282 /* 1283 * Wait until we have both args before validating 1284 * and acting on this message. 1285 * 1286 * Add one to MSG_EXT_SDTR_LEN to account for 1287 * the extended message preamble. 1288 */ 1289 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 1290 break; 1291 1292 period = amd->msgin_buf[3]; 1293 saved_offset = offset = amd->msgin_buf[4]; 1294 clockrate = amdfindclockrate(amd, &period); 1295 if (offset > AMD_MAX_SYNC_OFFSET) 1296 offset = AMD_MAX_SYNC_OFFSET; 1297 if (period == 0 || offset == 0) { 1298 offset = 0; 1299 period = 0; 1300 clockrate = 0; 1301 } 1302 amdsetsync(amd, amd->cur_target, clockrate, period, offset, 1303 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL); 1304 1305 /* 1306 * See if we initiated Sync Negotiation 1307 * and didn't have to fall down to async 1308 * transfers. 1309 */ 1310 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) { 1311 /* We started it */ 1312 if (saved_offset != offset) { 1313 /* Went too low - force async */ 1314 reject = TRUE; 1315 } 1316 } else { 1317 /* 1318 * Send our own SDTR in reply 1319 */ 1320 if (bootverbose) 1321 kprintf("Sending SDTR!\n"); 1322 amd->msgout_index = 0; 1323 amd->msgout_len = 0; 1324 amdconstructsdtr(amd, period, offset); 1325 amd->msgout_index = 0; 1326 response = TRUE; 1327 } 1328 done = TRUE; 1329 break; 1330 } 1331 case MSG_SAVEDATAPOINTER: 1332 case MSG_RESTOREPOINTERS: 1333 /* XXX Implement!!! */ 1334 done = TRUE; 1335 break; 1336 default: 1337 reject = TRUE; 1338 break; 1339 } 1340 1341 if (reject) { 1342 amd->msgout_index = 0; 1343 amd->msgout_len = 1; 1344 amd->msgout_buf[0] = MSG_MESSAGE_REJECT; 1345 done = TRUE; 1346 response = TRUE; 1347 } 1348 1349 if (response) 1350 amd_write8(amd, SCSICMDREG, SET_ATN_CMD); 1351 1352 if (done && !response) 1353 /* Clear the outgoing message buffer */ 1354 amd->msgout_len = 0; 1355 1356 /* Drop Ack */ 1357 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD); 1358 1359 return (done); 1360 } 1361 1362 static u_int 1363 amdfindclockrate(struct amd_softc *amd, u_int *period) 1364 { 1365 u_int i; 1366 u_int clockrate; 1367 1368 for (i = 0; i < sizeof(tinfo_sync_period); i++) { 1369 u_int8_t *table_entry; 1370 1371 table_entry = &tinfo_sync_period[i]; 1372 if (*period <= *table_entry) { 1373 /* 1374 * When responding to a target that requests 1375 * sync, the requested rate may fall between 1376 * two rates that we can output, but still be 1377 * a rate that we can receive. Because of this, 1378 * we want to respond to the target with 1379 * the same rate that it sent to us even 1380 * if the period we use to send data to it 1381 * is lower. Only lower the response period 1382 * if we must. 1383 */ 1384 if (i == 0) { 1385 *period = *table_entry; 1386 } 1387 break; 1388 } 1389 } 1390 1391 if (i == sizeof(tinfo_sync_period)) { 1392 /* Too slow for us. Use asnyc transfers. */ 1393 *period = 0; 1394 clockrate = 0; 1395 } else 1396 clockrate = i + 4; 1397 1398 return (clockrate); 1399 } 1400 1401 /* 1402 * See if we sent a particular extended message to the target. 1403 * If "full" is true, the target saw the full message. 1404 * If "full" is false, the target saw at least the first 1405 * byte of the message. 1406 */ 1407 static int 1408 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full) 1409 { 1410 int found; 1411 int index; 1412 1413 found = FALSE; 1414 index = 0; 1415 1416 while (index < amd->msgout_len) { 1417 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 1418 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT) 1419 index++; 1420 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG 1421 && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) { 1422 /* Skip tag type and tag id */ 1423 index += 2; 1424 } else if (amd->msgout_buf[index] == MSG_EXTENDED) { 1425 /* Found a candidate */ 1426 if (amd->msgout_buf[index+2] == msgtype) { 1427 u_int end_index; 1428 1429 end_index = index + 1 1430 + amd->msgout_buf[index + 1]; 1431 if (full) { 1432 if (amd->msgout_index > end_index) 1433 found = TRUE; 1434 } else if (amd->msgout_index > index) 1435 found = TRUE; 1436 } 1437 break; 1438 } else { 1439 panic("amdsentmsg: Inconsistent msg buffer"); 1440 } 1441 } 1442 return (found); 1443 } 1444 1445 static void 1446 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset) 1447 { 1448 amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED; 1449 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN; 1450 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR; 1451 amd->msgout_buf[amd->msgout_index++] = period; 1452 amd->msgout_buf[amd->msgout_index++] = offset; 1453 amd->msgout_len += 5; 1454 } 1455 1456 static int 1457 amdhandlemsgreject(struct amd_softc *amd) 1458 { 1459 /* 1460 * If we had an outstanding SDTR for this 1461 * target, this is a signal that the target 1462 * is refusing negotiation. Also watch out 1463 * for rejected tag messages. 1464 */ 1465 struct amd_srb *srb; 1466 struct amd_target_info *targ_info; 1467 int response = FALSE; 1468 1469 srb = amd->active_srb; 1470 targ_info = &amd->tinfo[amd->cur_target]; 1471 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) { 1472 /* note asynch xfers and clear flag */ 1473 amdsetsync(amd, amd->cur_target, /*clockrate*/0, 1474 /*period*/0, /*offset*/0, 1475 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL); 1476 kprintf("amd%d:%d: refuses synchronous negotiation. " 1477 "Using asynchronous transfers\n", 1478 amd->unit, amd->cur_target); 1479 } else if ((srb != NULL) 1480 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 1481 struct ccb_trans_settings neg; 1482 struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi; 1483 1484 kprintf("amd%d:%d: refuses tagged commands. Performing " 1485 "non-tagged I/O\n", amd->unit, amd->cur_target); 1486 1487 amdsettags(amd, amd->cur_target, FALSE); 1488 memset(&neg, 0, sizeof (neg)); 1489 scsi->valid = CTS_SCSI_VALID_TQ; 1490 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1); 1491 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg); 1492 1493 /* 1494 * Resend the identify for this CCB as the target 1495 * may believe that the selection is invalid otherwise. 1496 */ 1497 if (amd->msgout_len != 0) 1498 bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1], 1499 amd->msgout_len); 1500 amd->msgout_buf[0] = MSG_IDENTIFYFLAG 1501 | srb->pccb->ccb_h.target_lun; 1502 amd->msgout_len++; 1503 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0 1504 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 1505 amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG; 1506 1507 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 1508 1509 /* 1510 * Requeue all tagged commands for this target 1511 * currently in our posession so they can be 1512 * converted to untagged commands. 1513 */ 1514 amdcompletematch(amd, amd->cur_target, amd->cur_lun, 1515 AMD_TAG_WILDCARD, &amd->waiting_srbs, 1516 CAM_DEV_QFRZN|CAM_REQUEUE_REQ); 1517 } else { 1518 /* 1519 * Otherwise, we ignore it. 1520 */ 1521 kprintf("amd%d:%d: Message reject received -- ignored\n", 1522 amd->unit, amd->cur_target); 1523 } 1524 return (response); 1525 } 1526 1527 #if 0 1528 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) { 1529 if (bval == MSG_DISCONNECT) { 1530 pSRB->SRBState = SRB_DISCONNECT; 1531 } else if (bval == MSG_SAVEDATAPOINTER) { 1532 goto min6; 1533 } else if ((bval == MSG_EXTENDED) 1534 || ((bval >= MSG_SIMPLE_Q_TAG) 1535 && (bval <= MSG_ORDERED_Q_TAG))) { 1536 pSRB->SRBState |= SRB_MSGIN_MULTI; 1537 pSRB->MsgInBuf[0] = bval; 1538 pSRB->MsgCnt = 1; 1539 pSRB->pMsgPtr = &pSRB->MsgInBuf[1]; 1540 } else if (bval == MSG_MESSAGE_REJECT) { 1541 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD); 1542 1543 if (pSRB->SRBState & DO_SYNC_NEGO) { 1544 goto set_async; 1545 } 1546 } else if (bval == MSG_RESTOREPOINTERS) { 1547 goto min6; 1548 } else { 1549 goto min6; 1550 } 1551 } else { /* minx: */ 1552 *pSRB->pMsgPtr = bval; 1553 pSRB->MsgCnt++; 1554 pSRB->pMsgPtr++; 1555 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG) 1556 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) { 1557 if (pSRB->MsgCnt == 2) { 1558 pSRB->SRBState = 0; 1559 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]]; 1560 if (pSRB->SRBState & SRB_DISCONNECT) == 0) { 1561 pSRB = amd->pTmpSRB; 1562 pSRB->SRBState = SRB_UNEXPECT_RESEL; 1563 pDCB->pActiveSRB = pSRB; 1564 pSRB->MsgOutBuf[0] = MSG_ABORT_TAG; 1565 EnableMsgOut2(amd, pSRB); 1566 } else { 1567 if (pDCB->DCBFlag & ABORT_DEV_) { 1568 pSRB->SRBState = SRB_ABORT_SENT; 1569 EnableMsgOut1(amd, pSRB); 1570 } 1571 pDCB->pActiveSRB = pSRB; 1572 pSRB->SRBState = SRB_DATA_XFER; 1573 } 1574 } 1575 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED) 1576 && (pSRB->MsgCnt == 5)) { 1577 pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO); 1578 if ((pSRB->MsgInBuf[1] != 3) 1579 || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */ 1580 pSRB->MsgCnt = 1; 1581 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT; 1582 amd_write8(amd, SCSICMDREG, SET_ATN_CMD); 1583 } else if (!(pSRB->MsgInBuf[3]) 1584 || !(pSRB->MsgInBuf[4])) { 1585 set_async: /* set async */ 1586 1587 pDCB = pSRB->pSRBDCB; 1588 /* disable sync & sync nego */ 1589 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE); 1590 pDCB->SyncPeriod = 0; 1591 pDCB->SyncOffset = 0; 1592 1593 pDCB->tinfo.goal.period = 0; 1594 pDCB->tinfo.goal.offset = 0; 1595 1596 pDCB->tinfo.current.period = 0; 1597 pDCB->tinfo.current.offset = 0; 1598 pDCB->tinfo.current.width = 1599 MSG_EXT_WDTR_BUS_8_BIT; 1600 1601 pDCB->CtrlR3 = FAST_CLK; /* non_fast */ 1602 pDCB->CtrlR4 &= 0x3f; 1603 pDCB->CtrlR4 |= EATER_25NS; 1604 goto re_prog; 1605 } else {/* set sync */ 1606 1607 pDCB = pSRB->pSRBDCB; 1608 /* enable sync & sync nego */ 1609 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE; 1610 1611 /* set sync offset */ 1612 pDCB->SyncOffset &= 0x0f0; 1613 pDCB->SyncOffset |= pSRB->MsgInBuf[4]; 1614 1615 /* set sync period */ 1616 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3]; 1617 1618 wval = (u_int16_t) pSRB->MsgInBuf[3]; 1619 wval = wval << 2; 1620 wval--; 1621 wval1 = wval / 25; 1622 if ((wval1 * 25) != wval) { 1623 wval1++; 1624 } 1625 bval = FAST_CLK|FAST_SCSI; 1626 pDCB->CtrlR4 &= 0x3f; 1627 if (wval1 >= 8) { 1628 /* Fast SCSI */ 1629 wval1--; 1630 bval = FAST_CLK; 1631 pDCB->CtrlR4 |= EATER_25NS; 1632 } 1633 pDCB->CtrlR3 = bval; 1634 pDCB->SyncPeriod = (u_int8_t) wval1; 1635 1636 pDCB->tinfo.goal.period = 1637 tinfo_sync_period[pDCB->SyncPeriod - 4]; 1638 pDCB->tinfo.goal.offset = pDCB->SyncOffset; 1639 pDCB->tinfo.current.period = 1640 tinfo_sync_period[pDCB->SyncPeriod - 4]; 1641 pDCB->tinfo.current.offset = pDCB->SyncOffset; 1642 1643 /* 1644 * program SCSI control register 1645 */ 1646 re_prog: 1647 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod); 1648 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset); 1649 amd_write8(amd, CNTLREG3, pDCB->CtrlR3); 1650 amd_write8(amd, CNTLREG4, pDCB->CtrlR4); 1651 } 1652 } 1653 } 1654 min6: 1655 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD); 1656 return (SCSI_NOP0); 1657 } 1658 #endif 1659 1660 static u_int 1661 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1662 { 1663 DataIO_Comm(amd, pSRB, WRITE_DIRECTION); 1664 return (scsistat); 1665 } 1666 1667 static u_int 1668 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1669 { 1670 DataIO_Comm(amd, pSRB, READ_DIRECTION); 1671 return (scsistat); 1672 } 1673 1674 static void 1675 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir) 1676 { 1677 struct amd_sg * psgl; 1678 u_int32_t lval; 1679 1680 if (pSRB->SGIndex < pSRB->SGcount) { 1681 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */ 1682 1683 if (!pSRB->SGToBeXferLen) { 1684 psgl = pSRB->pSGlist; 1685 pSRB->SGPhysAddr = psgl->SGXPtr; 1686 pSRB->SGToBeXferLen = psgl->SGXLen; 1687 } 1688 lval = pSRB->SGToBeXferLen; 1689 amd_write8(amd, CTCREG_LOW, lval); 1690 amd_write8(amd, CTCREG_MID, lval >> 8); 1691 amd_write8(amd, CURTXTCNTREG, lval >> 16); 1692 1693 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen); 1694 1695 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr); 1696 1697 pSRB->SRBState = SRB_DATA_XFER; 1698 1699 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD); 1700 1701 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */ 1702 1703 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */ 1704 } else { /* xfer pad */ 1705 if (pSRB->SGcount) { 1706 pSRB->AdaptStatus = H_OVER_UNDER_RUN; 1707 pSRB->SRBStatus |= OVER_RUN; 1708 } 1709 amd_write8(amd, CTCREG_LOW, 0); 1710 amd_write8(amd, CTCREG_MID, 0); 1711 amd_write8(amd, CURTXTCNTREG, 0); 1712 1713 pSRB->SRBState |= SRB_XFERPAD; 1714 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE); 1715 } 1716 } 1717 1718 static u_int 1719 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat) 1720 { 1721 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD); 1722 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 1723 1724 amdsetupcommand(amd, srb); 1725 1726 srb->SRBState = SRB_COMMAND; 1727 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD); 1728 return (scsistat); 1729 } 1730 1731 static u_int 1732 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1733 { 1734 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 1735 pSRB->SRBState = SRB_STATUS; 1736 amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE); 1737 return (scsistat); 1738 } 1739 1740 static u_int 1741 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1742 { 1743 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 1744 1745 if (amd->msgout_len == 0) { 1746 amd->msgout_buf[0] = MSG_NOOP; 1747 amd->msgout_len = 1; 1748 } 1749 amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len); 1750 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD); 1751 return (scsistat); 1752 } 1753 1754 static u_int 1755 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1756 { 1757 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 1758 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD); 1759 return (scsistat); 1760 } 1761 1762 static u_int 1763 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) 1764 { 1765 return (scsistat); 1766 } 1767 1768 static void 1769 amd_Disconnect(struct amd_softc * amd) 1770 { 1771 struct amd_srb *srb; 1772 int target; 1773 int lun; 1774 1775 srb = amd->active_srb; 1776 amd->active_srb = NULL; 1777 amd->last_phase = SCSI_BUS_FREE; 1778 amd_write8(amd, SCSICMDREG, EN_SEL_RESEL); 1779 target = amd->cur_target; 1780 lun = amd->cur_lun; 1781 1782 if (srb == NULL) { 1783 /* Invalid reselection */ 1784 amdrunwaiting(amd); 1785 } else if (srb->SRBState & SRB_ABORT_SENT) { 1786 /* Clean up and done this srb */ 1787 #if 0 1788 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) { 1789 /* XXX What about "done'ing" these srbs??? */ 1790 if (pSRB->pSRBDCB == pDCB) { 1791 TAILQ_REMOVE(&amd->running_srbs, pSRB, links); 1792 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links); 1793 } 1794 } 1795 amdrunwaiting(amd); 1796 #endif 1797 } else { 1798 if ((srb->SRBState & (SRB_START | SRB_MSGOUT)) 1799 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) { 1800 srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT; 1801 goto disc1; 1802 } else if (srb->SRBState & SRB_DISCONNECT) { 1803 if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID)) 1804 amd->untagged_srbs[target][lun] = srb; 1805 amdrunwaiting(amd); 1806 } else if (srb->SRBState & SRB_COMPLETED) { 1807 disc1: 1808 srb->SRBState = SRB_FREE; 1809 SRBdone(amd, srb); 1810 } 1811 } 1812 return; 1813 } 1814 1815 static void 1816 amd_Reselect(struct amd_softc *amd) 1817 { 1818 struct amd_target_info *tinfo; 1819 u_int16_t disc_count; 1820 1821 amd_clear_msg_state(amd); 1822 if (amd->active_srb != NULL) { 1823 /* Requeue the SRB for our attempted Selection */ 1824 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links); 1825 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links); 1826 amd->active_srb = NULL; 1827 } 1828 /* get ID */ 1829 amd->cur_target = amd_read8(amd, SCSIFIFOREG); 1830 amd->cur_target ^= amd->HostID_Bit; 1831 amd->cur_target = ffs(amd->cur_target) - 1; 1832 amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7; 1833 tinfo = &amd->tinfo[amd->cur_target]; 1834 amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun]; 1835 disc_count = amd->disc_count[amd->cur_target][amd->cur_lun]; 1836 if (disc_count == 0) { 1837 kprintf("amd%d: Unexpected reselection for target %d, " 1838 "Issuing Abort\n", amd->unit, amd->cur_target); 1839 amd->msgout_buf[0] = MSG_ABORT; 1840 amd->msgout_len = 1; 1841 amd_write8(amd, SCSICMDREG, SET_ATN_CMD); 1842 } 1843 if (amd->active_srb != NULL) { 1844 amd->disc_count[amd->cur_target][amd->cur_lun]--; 1845 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL; 1846 } 1847 1848 amd_write8(amd, SCSIDESTIDREG, amd->cur_target); 1849 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg); 1850 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg); 1851 amd_write8(amd, CNTLREG1, tinfo->CtrlR1); 1852 amd_write8(amd, CNTLREG3, tinfo->CtrlR3); 1853 amd_write8(amd, CNTLREG4, tinfo->CtrlR4); 1854 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */ 1855 amd->last_phase = SCSI_NOP0; 1856 } 1857 1858 static void 1859 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB) 1860 { 1861 u_int8_t bval, i, status; 1862 union ccb *pccb; 1863 struct ccb_scsiio *pcsio; 1864 struct amd_sg *ptr2; 1865 u_int32_t swlval; 1866 u_int target_id, target_lun; 1867 1868 pccb = pSRB->pccb; 1869 pcsio = &pccb->csio; 1870 target_id = pSRB->pccb->ccb_h.target_id; 1871 target_lun = pSRB->pccb->ccb_h.target_lun; 1872 1873 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, 1874 ("SRBdone - TagNumber %d\n", pSRB->TagNumber)); 1875 1876 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1877 bus_dmasync_op_t op; 1878 1879 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1880 op = BUS_DMASYNC_POSTREAD; 1881 else 1882 op = BUS_DMASYNC_POSTWRITE; 1883 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op); 1884 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap); 1885 } 1886 1887 status = pSRB->TargetStatus; 1888 pccb->ccb_h.status = CAM_REQ_CMP; 1889 pccb->ccb_h.status = CAM_REQ_CMP; 1890 if (pSRB->SRBFlag & AUTO_REQSENSE) { 1891 pSRB->SRBFlag &= ~AUTO_REQSENSE; 1892 pSRB->AdaptStatus = 0; 1893 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND; 1894 1895 if (status == SCSI_STATUS_CHECK_COND) { 1896 pccb->ccb_h.status = CAM_SEL_TIMEOUT; 1897 goto ckc_e; 1898 } 1899 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0]; 1900 1901 pcsio->sense_resid = pcsio->sense_len 1902 - pSRB->TotalXferredLen; 1903 pSRB->TotalXferredLen = pSRB->Segment1[1]; 1904 if (pSRB->TotalXferredLen) { 1905 /* ???? */ 1906 pcsio->resid = pcsio->dxfer_len 1907 - pSRB->TotalXferredLen; 1908 /* The resid field contains valid data */ 1909 /* Flush resid bytes on complete */ 1910 } else { 1911 pcsio->scsi_status = SCSI_STATUS_CHECK_COND; 1912 } 1913 pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR; 1914 goto ckc_e; 1915 } 1916 if (status) { 1917 if (status == SCSI_STATUS_CHECK_COND) { 1918 1919 if ((pSRB->SGIndex < pSRB->SGcount) 1920 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) { 1921 bval = pSRB->SGcount; 1922 swlval = pSRB->SGToBeXferLen; 1923 ptr2 = pSRB->pSGlist; 1924 ptr2++; 1925 for (i = pSRB->SGIndex + 1; i < bval; i++) { 1926 swlval += ptr2->SGXLen; 1927 ptr2++; 1928 } 1929 /* ??????? */ 1930 pcsio->resid = (u_int32_t) swlval; 1931 1932 #ifdef AMD_DEBUG0 1933 kprintf("XferredLen=%8x,NotYetXferLen=%8x,", 1934 pSRB->TotalXferredLen, swlval); 1935 #endif 1936 } 1937 if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) { 1938 #ifdef AMD_DEBUG0 1939 kprintf("RequestSense..................\n"); 1940 #endif 1941 RequestSense(amd, pSRB); 1942 return; 1943 } 1944 pcsio->scsi_status = SCSI_STATUS_CHECK_COND; 1945 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1946 goto ckc_e; 1947 } else if (status == SCSI_STATUS_QUEUE_FULL) { 1948 pSRB->AdaptStatus = 0; 1949 pSRB->TargetStatus = 0; 1950 pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL; 1951 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1952 goto ckc_e; 1953 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) { 1954 pSRB->AdaptStatus = H_SEL_TIMEOUT; 1955 pSRB->TargetStatus = 0; 1956 1957 pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT; 1958 pccb->ccb_h.status = CAM_SEL_TIMEOUT; 1959 } else if (status == SCSI_STATUS_BUSY) { 1960 #ifdef AMD_DEBUG0 1961 kprintf("DC390: target busy at %s %d\n", 1962 __FILE__, __LINE__); 1963 #endif 1964 pcsio->scsi_status = SCSI_STATUS_BUSY; 1965 pccb->ccb_h.status = CAM_SCSI_BUSY; 1966 } else if (status == SCSI_STATUS_RESERV_CONFLICT) { 1967 #ifdef AMD_DEBUG0 1968 kprintf("DC390: target reserved at %s %d\n", 1969 __FILE__, __LINE__); 1970 #endif 1971 pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 1972 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */ 1973 } else { 1974 pSRB->AdaptStatus = 0; 1975 #ifdef AMD_DEBUG0 1976 kprintf("DC390: driver stuffup at %s %d\n", 1977 __FILE__, __LINE__); 1978 #endif 1979 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1980 } 1981 } else { 1982 status = pSRB->AdaptStatus; 1983 if (status & H_OVER_UNDER_RUN) { 1984 pSRB->TargetStatus = 0; 1985 1986 pccb->ccb_h.status = CAM_DATA_RUN_ERR; 1987 } else if (pSRB->SRBStatus & PARITY_ERROR) { 1988 #ifdef AMD_DEBUG0 1989 kprintf("DC390: driver stuffup %s %d\n", 1990 __FILE__, __LINE__); 1991 #endif 1992 /* Driver failed to perform operation */ 1993 pccb->ccb_h.status = CAM_UNCOR_PARITY; 1994 } else { /* No error */ 1995 pSRB->AdaptStatus = 0; 1996 pSRB->TargetStatus = 0; 1997 pcsio->resid = 0; 1998 /* there is no error, (sense is invalid) */ 1999 } 2000 } 2001 ckc_e: 2002 crit_enter(); 2003 if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2004 /* CAM request not yet complete =>device_Q frozen */ 2005 xpt_freeze_devq(pccb->ccb_h.path, 1); 2006 pccb->ccb_h.status |= CAM_DEV_QFRZN; 2007 } 2008 TAILQ_REMOVE(&amd->running_srbs, pSRB, links); 2009 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links); 2010 amdrunwaiting(amd); 2011 crit_exit(); 2012 xpt_done(pccb); 2013 2014 } 2015 2016 static void 2017 amd_ResetSCSIBus(struct amd_softc * amd) 2018 { 2019 crit_enter(); 2020 amd->ACBFlag |= RESET_DEV; 2021 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD); 2022 amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD); 2023 crit_exit(); 2024 return; 2025 } 2026 2027 static void 2028 amd_ScsiRstDetect(struct amd_softc * amd) 2029 { 2030 u_int32_t wlval; 2031 2032 #ifdef AMD_DEBUG0 2033 kprintf("amd_ScsiRstDetect \n"); 2034 #endif 2035 2036 wlval = 1000; 2037 while (--wlval) { /* delay 1 sec */ 2038 DELAY(1000); 2039 } 2040 crit_enter(); 2041 2042 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD); 2043 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 2044 2045 if (amd->ACBFlag & RESET_DEV) { 2046 amd->ACBFlag |= RESET_DONE; 2047 } else { 2048 amd->ACBFlag |= RESET_DETECT; 2049 ResetDevParam(amd); 2050 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, 2051 AMD_TAG_WILDCARD, &amd->running_srbs, 2052 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); 2053 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, 2054 AMD_TAG_WILDCARD, &amd->waiting_srbs, 2055 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); 2056 amd->active_srb = NULL; 2057 amd->ACBFlag = 0; 2058 amdrunwaiting(amd); 2059 } 2060 crit_exit(); 2061 return; 2062 } 2063 2064 static void 2065 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB) 2066 { 2067 union ccb *pccb; 2068 struct ccb_scsiio *pcsio; 2069 2070 pccb = pSRB->pccb; 2071 pcsio = &pccb->csio; 2072 2073 pSRB->SRBFlag |= AUTO_REQSENSE; 2074 pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0])); 2075 pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4])); 2076 pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount; 2077 pSRB->Segment1[1] = pSRB->TotalXferredLen; 2078 2079 pSRB->AdaptStatus = 0; 2080 pSRB->TargetStatus = 0; 2081 2082 pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data); 2083 pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len; 2084 2085 pSRB->pSGlist = &pSRB->Segmentx; 2086 pSRB->SGcount = 1; 2087 pSRB->SGIndex = 0; 2088 2089 *((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003; 2090 pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5; 2091 *((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len; 2092 pSRB->ScsiCmdLen = 6; 2093 2094 pSRB->TotalXferredLen = 0; 2095 pSRB->SGToBeXferLen = 0; 2096 if (amdstart(amd, pSRB) != 0) { 2097 TAILQ_REMOVE(&amd->running_srbs, pSRB, links); 2098 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links); 2099 } 2100 } 2101 2102 static void 2103 amd_InvalidCmd(struct amd_softc * amd) 2104 { 2105 struct amd_srb *srb; 2106 2107 srb = amd->active_srb; 2108 if (srb->SRBState & (SRB_START|SRB_MSGOUT)) 2109 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); 2110 } 2111 2112 void 2113 amd_linkSRB(struct amd_softc *amd) 2114 { 2115 u_int16_t count, i; 2116 struct amd_srb *psrb; 2117 int error; 2118 2119 count = amd->SRBCount; 2120 2121 for (i = 0; i < count; i++) { 2122 psrb = (struct amd_srb *)&amd->SRB_array[i]; 2123 psrb->TagNumber = i; 2124 2125 /* 2126 * Create the dmamap. This is no longer optional! 2127 */ 2128 error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap); 2129 if (error) { 2130 device_printf(amd->dev, "Error %d creating buffer " 2131 "dmamap!\n", error); 2132 break; 2133 } 2134 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links); 2135 } 2136 } 2137 2138 void 2139 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval) 2140 { 2141 if (mode == ENABLE_CE) { 2142 *regval = 0xc0; 2143 } else { 2144 *regval = 0x80; 2145 } 2146 pci_write_config(amd->dev, *regval, 0, /*bytes*/1); 2147 if (mode == DISABLE_CE) { 2148 pci_write_config(amd->dev, *regval, 0, /*bytes*/1); 2149 } 2150 DELAY(160); 2151 } 2152 2153 void 2154 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry) 2155 { 2156 u_int bval; 2157 2158 bval = 0; 2159 if (Carry) { 2160 bval = 0x40; 2161 *regval = 0x80; 2162 pci_write_config(amd->dev, *regval, bval, /*bytes*/1); 2163 } 2164 DELAY(160); 2165 bval |= 0x80; 2166 pci_write_config(amd->dev, *regval, bval, /*bytes*/1); 2167 DELAY(160); 2168 pci_write_config(amd->dev, *regval, 0, /*bytes*/1); 2169 DELAY(160); 2170 } 2171 2172 static int 2173 amd_EEpromInDO(struct amd_softc *amd) 2174 { 2175 pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1); 2176 DELAY(160); 2177 pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1); 2178 DELAY(160); 2179 if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22) 2180 return (1); 2181 return (0); 2182 } 2183 2184 static u_int16_t 2185 EEpromGetData1(struct amd_softc *amd) 2186 { 2187 u_int i; 2188 u_int carryFlag; 2189 u_int16_t wval; 2190 2191 wval = 0; 2192 for (i = 0; i < 16; i++) { 2193 wval <<= 1; 2194 carryFlag = amd_EEpromInDO(amd); 2195 wval |= carryFlag; 2196 } 2197 return (wval); 2198 } 2199 2200 static void 2201 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd) 2202 { 2203 u_int i, j; 2204 int carryFlag; 2205 2206 carryFlag = 1; 2207 j = 0x80; 2208 for (i = 0; i < 9; i++) { 2209 amd_EEpromOutDI(amd, regval, carryFlag); 2210 carryFlag = (EEpromCmd & j) ? 1 : 0; 2211 j >>= 1; 2212 } 2213 } 2214 2215 static void 2216 amd_ReadEEprom(struct amd_softc *amd) 2217 { 2218 int regval; 2219 u_int i; 2220 u_int16_t *ptr; 2221 u_int8_t cmd; 2222 2223 ptr = (u_int16_t *)&amd->eepromBuf[0]; 2224 cmd = EEPROM_READ; 2225 for (i = 0; i < 0x40; i++) { 2226 amd_EnDisableCE(amd, ENABLE_CE, ®val); 2227 amd_Prepare(amd, ®val, cmd); 2228 *ptr = EEpromGetData1(amd); 2229 ptr++; 2230 cmd++; 2231 amd_EnDisableCE(amd, DISABLE_CE, ®val); 2232 } 2233 } 2234 2235 static void 2236 amd_load_defaults(struct amd_softc *amd) 2237 { 2238 int target; 2239 2240 bzero(&amd->eepromBuf, sizeof amd->eepromBuf); 2241 for (target = 0; target < MAX_SCSI_ID; target++) 2242 amd->eepromBuf[target << 2] = 2243 (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK); 2244 amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7; 2245 amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G; 2246 amd->eepromBuf[EE_TAG_CMD_NUM] = 4; 2247 } 2248 2249 static void 2250 amd_load_eeprom_or_defaults(struct amd_softc *amd) 2251 { 2252 u_int16_t wval, *ptr; 2253 u_int8_t i; 2254 2255 amd_ReadEEprom(amd); 2256 wval = 0; 2257 ptr = (u_int16_t *) & amd->eepromBuf[0]; 2258 for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++) 2259 wval += *ptr; 2260 2261 if (wval != EE_CHECKSUM) { 2262 if (bootverbose) 2263 kprintf("amd%d: SEEPROM data unavailable. " 2264 "Using default device parameters.\n", 2265 amd->unit); 2266 amd_load_defaults(amd); 2267 } 2268 } 2269 2270 /* 2271 ********************************************************************** 2272 * Function : static int amd_init (struct Scsi_Host *host) 2273 * Purpose : initialize the internal structures for a given SCSI host 2274 * Inputs : host - pointer to this host adapter's structure/ 2275 ********************************************************************** 2276 */ 2277 static int 2278 amd_init(device_t dev) 2279 { 2280 struct amd_softc *amd = device_get_softc(dev); 2281 struct resource *iores; 2282 int i, rid; 2283 u_int bval; 2284 2285 rid = PCI_BASE_ADDR0; 2286 iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, 2287 RF_ACTIVE); 2288 if (iores == NULL) { 2289 if (bootverbose) 2290 kprintf("amd_init: bus_alloc_resource failure!\n"); 2291 return ENXIO; 2292 } 2293 amd->tag = rman_get_bustag(iores); 2294 amd->bsh = rman_get_bushandle(iores); 2295 2296 /* DMA tag for mapping buffers into device visible space. */ 2297 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1, 2298 /*boundary*/0, 2299 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 2300 /*highaddr*/BUS_SPACE_MAXADDR, 2301 /*filter*/NULL, /*filterarg*/NULL, 2302 /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG, 2303 /*maxsegsz*/AMD_MAXTRANSFER_SIZE, 2304 /*flags*/BUS_DMA_ALLOCNOW, 2305 &amd->buffer_dmat) != 0) { 2306 if (bootverbose) 2307 kprintf("amd_init: bus_dma_tag_create failure!\n"); 2308 return ENXIO; 2309 } 2310 TAILQ_INIT(&amd->free_srbs); 2311 TAILQ_INIT(&amd->running_srbs); 2312 TAILQ_INIT(&amd->waiting_srbs); 2313 amd->last_phase = SCSI_BUS_FREE; 2314 amd->dev = dev; 2315 amd->unit = device_get_unit(dev); 2316 amd->SRBCount = MAX_SRB_CNT; 2317 amd->status = 0; 2318 amd_load_eeprom_or_defaults(amd); 2319 amd->max_id = 7; 2320 if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) { 2321 amd->max_lun = 7; 2322 } else { 2323 amd->max_lun = 0; 2324 } 2325 amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID]; 2326 amd->HostID_Bit = (1 << amd->AdaptSCSIID); 2327 amd->AdaptSCSILUN = 0; 2328 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */ 2329 amd->ACBFlag = 0; 2330 amd->Gmode2 = amd->eepromBuf[EE_MODE2]; 2331 amd_linkSRB(amd); 2332 for (i = 0; i <= amd->max_id; i++) { 2333 2334 if (amd->AdaptSCSIID != i) { 2335 struct amd_target_info *tinfo; 2336 PEEprom prom; 2337 2338 tinfo = &amd->tinfo[i]; 2339 prom = (PEEprom)&amd->eepromBuf[i << 2]; 2340 if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) { 2341 tinfo->disc_tag |= AMD_USR_DISCENB; 2342 if ((prom->EE_MODE1 & TAG_QUEUING) != 0) 2343 tinfo->disc_tag |= AMD_USR_TAGENB; 2344 } 2345 if ((prom->EE_MODE1 & SYNC_NEGO) != 0) { 2346 tinfo->user.period = 2347 eeprom_period[prom->EE_SPEED]; 2348 tinfo->user.offset = AMD_MAX_SYNC_OFFSET; 2349 } 2350 tinfo->CtrlR1 = amd->AdaptSCSIID; 2351 if ((prom->EE_MODE1 & PARITY_CHK) != 0) 2352 tinfo->CtrlR1 |= PARITY_ERR_REPO; 2353 tinfo->CtrlR3 = FAST_CLK; 2354 tinfo->CtrlR4 = EATER_25NS; 2355 if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0) 2356 tinfo->CtrlR4 |= NEGATE_REQACKDATA; 2357 } 2358 } 2359 amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */ 2360 /* Conversion factor = 0 , 40MHz clock */ 2361 amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ); 2362 /* NOP cmd - clear command register */ 2363 amd_write8(amd, SCSICMDREG, NOP_CMD); 2364 amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD); 2365 amd_write8(amd, CNTLREG3, FAST_CLK); 2366 bval = EATER_25NS; 2367 if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) { 2368 bval |= NEGATE_REQACKDATA; 2369 } 2370 amd_write8(amd, CNTLREG4, bval); 2371 2372 /* Disable SCSI bus reset interrupt */ 2373 amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST); 2374 2375 return 0; 2376 } 2377 2378 /* 2379 * attach and init a host adapter 2380 */ 2381 static int 2382 amd_attach(device_t dev) 2383 { 2384 struct cam_devq *devq; /* Device Queue to use for this SIM */ 2385 u_int8_t intstat; 2386 struct amd_softc *amd = device_get_softc(dev); 2387 int unit = device_get_unit(dev); 2388 int rid; 2389 void *ih; 2390 struct resource *irqres; 2391 2392 if (amd_init(dev)) { 2393 if (bootverbose) 2394 kprintf("amd_attach: amd_init failure!\n"); 2395 return ENXIO; 2396 } 2397 2398 /* Reset Pending INT */ 2399 intstat = amd_read8(amd, INTSTATREG); 2400 2401 /* After setting up the adapter, map our interrupt */ 2402 rid = 0; 2403 irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 2404 RF_SHAREABLE | RF_ACTIVE); 2405 if (irqres == NULL || 2406 bus_setup_intr(dev, irqres, 0, amd_intr, amd, &ih, NULL) 2407 ) { 2408 if (bootverbose) 2409 kprintf("amd%d: unable to register interrupt handler!\n", 2410 unit); 2411 return ENXIO; 2412 } 2413 2414 /* 2415 * Now let the CAM generic SCSI layer find the SCSI devices on 2416 * the bus * start queue to reset to the idle loop. * 2417 * Create device queue of SIM(s) * (MAX_START_JOB - 1) : 2418 * max_sim_transactions 2419 */ 2420 devq = cam_simq_alloc(MAX_START_JOB); 2421 if (devq == NULL) { 2422 if (bootverbose) 2423 kprintf("amd_attach: cam_simq_alloc failure!\n"); 2424 return ENXIO; 2425 } 2426 2427 amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd", 2428 amd, amd->unit, &sim_mplock, 1, 2429 MAX_TAGS_CMD_QUEUE, devq); 2430 cam_simq_release(devq); 2431 if (amd->psim == NULL) { 2432 if (bootverbose) 2433 kprintf("amd_attach: cam_sim_alloc failure!\n"); 2434 return ENXIO; 2435 } 2436 2437 if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) { 2438 cam_sim_free(amd->psim); 2439 if (bootverbose) 2440 kprintf("amd_attach: xpt_bus_register failure!\n"); 2441 return ENXIO; 2442 } 2443 2444 if (xpt_create_path(&amd->ppath, /* periph */ NULL, 2445 cam_sim_path(amd->psim), CAM_TARGET_WILDCARD, 2446 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2447 xpt_bus_deregister(cam_sim_path(amd->psim)); 2448 cam_sim_free(amd->psim); 2449 if (bootverbose) 2450 kprintf("amd_attach: xpt_create_path failure!\n"); 2451 return ENXIO; 2452 } 2453 2454 return 0; 2455 } 2456 2457 static int 2458 amd_probe(device_t dev) 2459 { 2460 if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) { 2461 device_set_desc(dev, 2462 "Tekram DC390(T)/AMD53c974 SCSI Host Adapter"); 2463 return 0; 2464 } 2465 return ENXIO; 2466 } 2467 2468 static device_method_t amd_methods[] = { 2469 /* Device interface */ 2470 DEVMETHOD(device_probe, amd_probe), 2471 DEVMETHOD(device_attach, amd_attach), 2472 { 0, 0 } 2473 }; 2474 2475 static driver_t amd_driver = { 2476 "amd", amd_methods, sizeof(struct amd_softc) 2477 }; 2478 2479 static devclass_t amd_devclass; 2480 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, NULL, NULL); 2481