1 /* 2 * Copyright (c) 1997 by Simon Shapiro 3 * All Rights Reserved 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/dev/dpt/dpt_scsi.c,v 1.28.2.3 2003/01/31 02:47:10 grog Exp $ 30 */ 31 32 /* 33 * dpt_scsi.c: SCSI dependant code for the DPT driver 34 * 35 * credits: Assisted by Mike Neuffer in the early low level DPT code 36 * Thanx to Mark Salyzyn of DPT for his assistance. 37 * Special thanx to Justin Gibbs for invaluable help in 38 * making this driver look and work like a FreeBSD component. 39 * Last but not least, many thanx to UCB and the FreeBSD 40 * team for creating and maintaining such a wonderful O/S. 41 * 42 * TODO: * Add ISA probe code. 43 * * Add driver-level RAID-0. This will allow interoperability with 44 * NiceTry, M$-Doze, Win-Dog, Slowlaris, etc., in recognizing RAID 45 * arrays that span controllers (Wow!). 46 */ 47 48 #define _DPT_C_ 49 50 #include "opt_dpt.h" 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/eventhandler.h> 54 #include <sys/malloc.h> 55 #include <sys/kernel.h> 56 #include <sys/bus.h> 57 #include <sys/thread2.h> 58 59 #include <machine/clock.h> 60 61 #include <bus/cam/cam.h> 62 #include <bus/cam/cam_ccb.h> 63 #include <bus/cam/cam_sim.h> 64 #include <bus/cam/cam_xpt_sim.h> 65 #include <bus/cam/cam_debug.h> 66 #include <bus/cam/scsi/scsi_all.h> 67 #include <bus/cam/scsi/scsi_message.h> 68 69 #include <vm/vm.h> 70 #include <vm/pmap.h> 71 72 #include "dpt.h" 73 74 /* dpt_isa.c and dpt_pci.c need this in a central place */ 75 int dpt_controllers_present; 76 77 u_long dpt_unit; /* Next unit number to use */ 78 79 /* The linked list of softc structures */ 80 struct dpt_softc_list dpt_softcs = TAILQ_HEAD_INITIALIZER(dpt_softcs); 81 82 #define microtime_now dpt_time_now() 83 84 #define dpt_inl(dpt, port) \ 85 bus_space_read_4((dpt)->tag, (dpt)->bsh, port) 86 #define dpt_inb(dpt, port) \ 87 bus_space_read_1((dpt)->tag, (dpt)->bsh, port) 88 #define dpt_outl(dpt, port, value) \ 89 bus_space_write_4((dpt)->tag, (dpt)->bsh, port, value) 90 #define dpt_outb(dpt, port, value) \ 91 bus_space_write_1((dpt)->tag, (dpt)->bsh, port, value) 92 93 /* 94 * These will have to be setup by parameters passed at boot/load time. For 95 * perfromance reasons, we make them constants for the time being. 96 */ 97 #define dpt_min_segs DPT_MAX_SEGS 98 #define dpt_max_segs DPT_MAX_SEGS 99 100 /* Definitions for our use of the SIM private CCB area */ 101 #define ccb_dccb_ptr spriv_ptr0 102 #define ccb_dpt_ptr spriv_ptr1 103 104 /* ================= Private Inline Function declarations ===================*/ 105 static __inline int dpt_just_reset(dpt_softc_t * dpt); 106 static __inline int dpt_raid_busy(dpt_softc_t * dpt); 107 static __inline int dpt_pio_wait (u_int32_t, u_int, u_int, u_int); 108 static __inline int dpt_wait(dpt_softc_t *dpt, u_int bits, 109 u_int state); 110 static __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt); 111 static __inline void dptfreeccb(struct dpt_softc *dpt, 112 struct dpt_ccb *dccb); 113 static __inline bus_addr_t dptccbvtop(struct dpt_softc *dpt, 114 struct dpt_ccb *dccb); 115 116 static __inline int dpt_send_immediate(dpt_softc_t *dpt, 117 eata_ccb_t *cmd_block, 118 u_int32_t cmd_busaddr, 119 u_int retries, 120 u_int ifc, u_int code, 121 u_int code2); 122 123 /* ==================== Private Function declarations =======================*/ 124 static void dptmapmem(void *arg, bus_dma_segment_t *segs, 125 int nseg, int error); 126 127 static struct sg_map_node* 128 dptallocsgmap(struct dpt_softc *dpt); 129 130 static int dptallocccbs(dpt_softc_t *dpt); 131 132 static int dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, 133 u_int32_t dccb_busaddr, u_int size, 134 u_int page, u_int target, int extent); 135 static void dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, 136 u_int32_t dccb_busaddr, 137 u_int8_t *buff); 138 139 static void dpt_poll(struct cam_sim *sim); 140 141 static void dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, 142 int nseg, int error); 143 144 static void dpt_action(struct cam_sim *sim, union ccb *ccb); 145 146 static int dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd, 147 u_int32_t cmd_busaddr, 148 u_int command, u_int retries, 149 u_int ifc, u_int code, 150 u_int code2); 151 static void dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, 152 union ccb *ccb, u_int hba_stat, 153 u_int scsi_stat, u_int32_t resid); 154 155 static void dpttimeout(void *arg); 156 static void dptshutdown(void *arg, int howto); 157 158 /* ================= Private Inline Function definitions ====================*/ 159 static __inline int 160 dpt_just_reset(dpt_softc_t * dpt) 161 { 162 if ((dpt_inb(dpt, 2) == 'D') 163 && (dpt_inb(dpt, 3) == 'P') 164 && (dpt_inb(dpt, 4) == 'T') 165 && (dpt_inb(dpt, 5) == 'H')) 166 return (1); 167 else 168 return (0); 169 } 170 171 static __inline int 172 dpt_raid_busy(dpt_softc_t * dpt) 173 { 174 if ((dpt_inb(dpt, 0) == 'D') 175 && (dpt_inb(dpt, 1) == 'P') 176 && (dpt_inb(dpt, 2) == 'T')) 177 return (1); 178 else 179 return (0); 180 } 181 182 static __inline int 183 dpt_pio_wait (u_int32_t base, u_int reg, u_int bits, u_int state) 184 { 185 int i; 186 u_int c; 187 188 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */ 189 c = inb(base + reg) & bits; 190 if (!(c == state)) 191 return (0); 192 else 193 DELAY(50); 194 } 195 return (-1); 196 } 197 198 static __inline int 199 dpt_wait(dpt_softc_t *dpt, u_int bits, u_int state) 200 { 201 int i; 202 u_int c; 203 204 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */ 205 c = dpt_inb(dpt, HA_RSTATUS) & bits; 206 if (c == state) 207 return (0); 208 else 209 DELAY(50); 210 } 211 return (-1); 212 } 213 214 static __inline struct dpt_ccb* 215 dptgetccb(struct dpt_softc *dpt) 216 { 217 struct dpt_ccb* dccb; 218 219 crit_enter(); 220 if ((dccb = SLIST_FIRST(&dpt->free_dccb_list)) != NULL) { 221 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links); 222 dpt->free_dccbs--; 223 } else if (dpt->total_dccbs < dpt->max_dccbs) { 224 dptallocccbs(dpt); 225 dccb = SLIST_FIRST(&dpt->free_dccb_list); 226 if (dccb == NULL) 227 kprintf("dpt%d: Can't malloc DCCB\n", dpt->unit); 228 else { 229 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links); 230 dpt->free_dccbs--; 231 } 232 } 233 crit_exit(); 234 235 return (dccb); 236 } 237 238 static __inline void 239 dptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb) 240 { 241 crit_enter(); 242 if ((dccb->state & DCCB_ACTIVE) != 0) 243 LIST_REMOVE(&dccb->ccb->ccb_h, sim_links.le); 244 if ((dccb->state & DCCB_RELEASE_SIMQ) != 0) 245 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 246 else if (dpt->resource_shortage != 0 247 && (dccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 248 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 249 dpt->resource_shortage = FALSE; 250 } 251 dccb->state = DCCB_FREE; 252 SLIST_INSERT_HEAD(&dpt->free_dccb_list, dccb, links); 253 ++dpt->free_dccbs; 254 crit_exit(); 255 } 256 257 static __inline bus_addr_t 258 dptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb) 259 { 260 return (dpt->dpt_ccb_busbase 261 + (u_int32_t)((caddr_t)dccb - (caddr_t)dpt->dpt_dccbs)); 262 } 263 264 static __inline struct dpt_ccb * 265 dptccbptov(struct dpt_softc *dpt, bus_addr_t busaddr) 266 { 267 return (dpt->dpt_dccbs 268 + ((struct dpt_ccb *)busaddr 269 - (struct dpt_ccb *)dpt->dpt_ccb_busbase)); 270 } 271 272 /* 273 * Send a command for immediate execution by the DPT 274 * See above function for IMPORTANT notes. 275 */ 276 static __inline int 277 dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block, 278 u_int32_t cmd_busaddr, u_int retries, 279 u_int ifc, u_int code, u_int code2) 280 { 281 return (dpt_send_eata_command(dpt, cmd_block, cmd_busaddr, 282 EATA_CMD_IMMEDIATE, retries, ifc, 283 code, code2)); 284 } 285 286 287 /* ===================== Private Function definitions =======================*/ 288 static void 289 dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 290 { 291 bus_addr_t *busaddrp; 292 293 busaddrp = (bus_addr_t *)arg; 294 *busaddrp = segs->ds_addr; 295 } 296 297 static struct sg_map_node * 298 dptallocsgmap(struct dpt_softc *dpt) 299 { 300 struct sg_map_node *sg_map; 301 302 sg_map = kmalloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT); 303 304 /* Allocate S/G space for the next batch of CCBS */ 305 if (bus_dmamem_alloc(dpt->sg_dmat, (void *)&sg_map->sg_vaddr, 306 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 307 kfree(sg_map, M_DEVBUF); 308 return (NULL); 309 } 310 311 (void)bus_dmamap_load(dpt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 312 PAGE_SIZE, dptmapmem, &sg_map->sg_physaddr, 313 /*flags*/0); 314 315 SLIST_INSERT_HEAD(&dpt->sg_maps, sg_map, links); 316 317 return (sg_map); 318 } 319 320 /* 321 * Allocate another chunk of CCB's. Return count of entries added. 322 * Assumed to be called at splcam(). 323 */ 324 static int 325 dptallocccbs(dpt_softc_t *dpt) 326 { 327 struct dpt_ccb *next_ccb; 328 struct sg_map_node *sg_map; 329 bus_addr_t physaddr; 330 dpt_sg_t *segs; 331 int newcount; 332 int i; 333 334 next_ccb = &dpt->dpt_dccbs[dpt->total_dccbs]; 335 336 if (next_ccb == dpt->dpt_dccbs) { 337 /* 338 * First time through. Re-use the S/G 339 * space we allocated for initialization 340 * CCBS. 341 */ 342 sg_map = SLIST_FIRST(&dpt->sg_maps); 343 } else { 344 sg_map = dptallocsgmap(dpt); 345 } 346 347 if (sg_map == NULL) 348 return (0); 349 350 segs = sg_map->sg_vaddr; 351 physaddr = sg_map->sg_physaddr; 352 353 newcount = (PAGE_SIZE / (dpt->sgsize * sizeof(dpt_sg_t))); 354 for (i = 0; dpt->total_dccbs < dpt->max_dccbs && i < newcount; i++) { 355 int error; 356 357 error = bus_dmamap_create(dpt->buffer_dmat, /*flags*/0, 358 &next_ccb->dmamap); 359 if (error != 0) 360 break; 361 next_ccb->sg_list = segs; 362 next_ccb->sg_busaddr = htonl(physaddr); 363 next_ccb->eata_ccb.cp_dataDMA = htonl(physaddr); 364 next_ccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr); 365 next_ccb->eata_ccb.cp_reqDMA = 366 htonl(dptccbvtop(dpt, next_ccb) 367 + offsetof(struct dpt_ccb, sense_data)); 368 next_ccb->eata_ccb.cp_busaddr = dpt->dpt_ccb_busend; 369 next_ccb->state = DCCB_FREE; 370 next_ccb->tag = dpt->total_dccbs; 371 SLIST_INSERT_HEAD(&dpt->free_dccb_list, next_ccb, links); 372 segs += dpt->sgsize; 373 physaddr += (dpt->sgsize * sizeof(dpt_sg_t)); 374 dpt->dpt_ccb_busend += sizeof(*next_ccb); 375 next_ccb++; 376 dpt->total_dccbs++; 377 } 378 return (i); 379 } 380 381 dpt_conf_t * 382 dpt_pio_get_conf (u_int32_t base) 383 { 384 static dpt_conf_t * conf; 385 u_int16_t * p; 386 int i; 387 388 /* 389 * Allocate a dpt_conf_t 390 */ 391 if (conf == NULL) 392 conf = kmalloc(sizeof(dpt_conf_t), M_DEVBUF, M_INTWAIT); 393 394 /* 395 * If we have one, clean it up. 396 */ 397 bzero(conf, sizeof(dpt_conf_t)); 398 399 /* 400 * Reset the controller. 401 */ 402 outb((base + HA_WCOMMAND), EATA_CMD_RESET); 403 404 /* 405 * Wait for the controller to become ready. 406 * For some reason there can be -no- delays after calling reset 407 * before we wait on ready status. 408 */ 409 if (dpt_pio_wait(base, HA_RSTATUS, HA_SBUSY, 0)) { 410 kprintf("dpt: timeout waiting for controller to become ready\n"); 411 return (NULL); 412 } 413 414 if (dpt_pio_wait(base, HA_RAUXSTAT, HA_ABUSY, 0)) { 415 kprintf("dpt: timetout waiting for adapter ready.\n"); 416 return (NULL); 417 } 418 419 /* 420 * Send the PIO_READ_CONFIG command. 421 */ 422 outb((base + HA_WCOMMAND), EATA_CMD_PIO_READ_CONFIG); 423 424 /* 425 * Read the data into the struct. 426 */ 427 p = (u_int16_t *)conf; 428 for (i = 0; i < (sizeof(dpt_conf_t) / 2); i++) { 429 430 if (dpt_pio_wait(base, HA_RSTATUS, HA_SDRQ, 0)) { 431 kprintf("dpt: timeout in data read.\n"); 432 return (NULL); 433 } 434 435 (*p) = inw(base + HA_RDATA); 436 p++; 437 } 438 439 if (inb(base + HA_RSTATUS) & HA_SERROR) { 440 kprintf("dpt: error reading configuration data.\n"); 441 return (NULL); 442 } 443 444 #define BE_EATA_SIGNATURE 0x45415441 445 #define LE_EATA_SIGNATURE 0x41544145 446 447 /* 448 * Test to see if we have a valid card. 449 */ 450 if ((conf->signature == BE_EATA_SIGNATURE) || 451 (conf->signature == LE_EATA_SIGNATURE)) { 452 453 while (inb(base + HA_RSTATUS) & HA_SDRQ) { 454 inw(base + HA_RDATA); 455 } 456 457 return (conf); 458 } 459 return (NULL); 460 } 461 462 /* 463 * Read a configuration page into the supplied dpt_cont_t buffer. 464 */ 465 static int 466 dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr, 467 u_int size, u_int page, u_int target, int extent) 468 { 469 eata_ccb_t *cp; 470 471 u_int8_t status; 472 473 int ndx; 474 int result; 475 476 cp = &dccb->eata_ccb; 477 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(*dpt->sp)); 478 479 cp->Interpret = 1; 480 cp->DataIn = 1; 481 cp->Auto_Req_Sen = 1; 482 cp->reqlen = sizeof(struct scsi_sense_data); 483 484 cp->cp_id = target; 485 cp->cp_LUN = 0; /* In the EATA packet */ 486 cp->cp_lun = 0; /* In the SCSI command */ 487 488 cp->cp_scsi_cmd = INQUIRY; 489 cp->cp_len = size; 490 491 cp->cp_extent = extent; 492 493 cp->cp_page = page; 494 cp->cp_channel = 0; /* DNC, Interpret mode is set */ 495 cp->cp_identify = 1; 496 cp->cp_datalen = htonl(size); 497 498 crit_enter(); 499 500 /* 501 * This could be a simple for loop, but we suspected the compiler To 502 * have optimized it a bit too much. Wait for the controller to 503 * become ready 504 */ 505 while (((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC) 506 && (status != (HA_SREADY | HA_SSC | HA_SERROR)) 507 && (status != (HA_SDRDY | HA_SERROR | HA_SDRQ))) 508 || (dpt_wait(dpt, HA_SBUSY, 0))) { 509 510 /* 511 * RAID Drives still Spinning up? (This should only occur if 512 * the DPT controller is in a NON PC (PCI?) platform). 513 */ 514 if (dpt_raid_busy(dpt)) { 515 kprintf("dpt%d WARNING: Get_conf() RSUS failed.\n", 516 dpt->unit); 517 crit_exit(); 518 return (0); 519 } 520 } 521 522 DptStat_Reset_BUSY(dpt->sp); 523 524 /* 525 * XXXX We might want to do something more clever than aborting at 526 * this point, like resetting (rebooting) the controller and trying 527 * again. 528 */ 529 if ((result = dpt_send_eata_command(dpt, cp, dccb_busaddr, 530 EATA_CMD_DMA_SEND_CP, 531 10000, 0, 0, 0)) != 0) { 532 kprintf("dpt%d WARNING: Get_conf() failed (%d) to send " 533 "EATA_CMD_DMA_READ_CONFIG\n", 534 dpt->unit, result); 535 crit_exit(); 536 return (0); 537 } 538 /* Wait for two seconds for a response. This can be slow */ 539 for (ndx = 0; 540 (ndx < 20000) 541 && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ); 542 ndx++) { 543 DELAY(50); 544 } 545 546 /* Grab the status and clear interrupts */ 547 status = dpt_inb(dpt, HA_RSTATUS); 548 549 crit_exit(); 550 551 /* 552 * Check the status carefully. Return only if the 553 * command was successful. 554 */ 555 if (((status & HA_SERROR) == 0) 556 && (dpt->sp->hba_stat == 0) 557 && (dpt->sp->scsi_stat == 0) 558 && (dpt->sp->residue_len == 0)) 559 return (0); 560 561 if (dpt->sp->scsi_stat == SCSI_STATUS_CHECK_COND) 562 return (0); 563 564 return (1); 565 } 566 567 /* Detect Cache parameters and size */ 568 static void 569 dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr, 570 u_int8_t *buff) 571 { 572 eata_ccb_t *cp; 573 u_int8_t *param; 574 int bytes; 575 int result; 576 int ndx; 577 u_int8_t status; 578 579 /* 580 * Default setting, for best perfromance.. 581 * This is what virtually all cards default to.. 582 */ 583 dpt->cache_type = DPT_CACHE_WRITEBACK; 584 dpt->cache_size = 0; 585 586 cp = &dccb->eata_ccb; 587 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(dpt->sp)); 588 bzero(buff, 512); 589 590 /* Setup the command structure */ 591 cp->Interpret = 1; 592 cp->DataIn = 1; 593 cp->Auto_Req_Sen = 1; 594 cp->reqlen = sizeof(struct scsi_sense_data); 595 596 cp->cp_id = 0; /* who cares? The HBA will interpret.. */ 597 cp->cp_LUN = 0; /* In the EATA packet */ 598 cp->cp_lun = 0; /* In the SCSI command */ 599 cp->cp_channel = 0; 600 601 cp->cp_scsi_cmd = EATA_CMD_DMA_SEND_CP; 602 cp->cp_len = 56; 603 604 cp->cp_extent = 0; 605 cp->cp_page = 0; 606 cp->cp_identify = 1; 607 cp->cp_dispri = 1; 608 609 /* 610 * Build the EATA Command Packet structure 611 * for a Log Sense Command. 612 */ 613 cp->cp_cdb[0] = 0x4d; 614 cp->cp_cdb[1] = 0x0; 615 cp->cp_cdb[2] = 0x40 | 0x33; 616 cp->cp_cdb[7] = 1; 617 618 cp->cp_datalen = htonl(512); 619 620 crit_enter(); 621 result = dpt_send_eata_command(dpt, cp, dccb_busaddr, 622 EATA_CMD_DMA_SEND_CP, 623 10000, 0, 0, 0); 624 if (result != 0) { 625 kprintf("dpt%d WARNING: detect_cache() failed (%d) to send " 626 "EATA_CMD_DMA_SEND_CP\n", dpt->unit, result); 627 crit_exit(); 628 return; 629 } 630 /* Wait for two seconds for a response. This can be slow... */ 631 for (ndx = 0; 632 (ndx < 20000) && 633 !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ); 634 ndx++) { 635 DELAY(50); 636 } 637 638 /* Grab the status and clear interrupts */ 639 status = dpt_inb(dpt, HA_RSTATUS); 640 crit_exit(); 641 642 /* 643 * Sanity check 644 */ 645 if (buff[0] != 0x33) { 646 return; 647 } 648 bytes = DPT_HCP_LENGTH(buff); 649 param = DPT_HCP_FIRST(buff); 650 651 if (DPT_HCP_CODE(param) != 1) { 652 /* 653 * DPT Log Page layout error 654 */ 655 kprintf("dpt%d: NOTICE: Log Page (1) layout error\n", 656 dpt->unit); 657 return; 658 } 659 if (!(param[4] & 0x4)) { 660 dpt->cache_type = DPT_NO_CACHE; 661 return; 662 } 663 while (DPT_HCP_CODE(param) != 6) { 664 param = DPT_HCP_NEXT(param); 665 if ((param < buff) 666 || (param >= &buff[bytes])) { 667 return; 668 } 669 } 670 671 if (param[4] & 0x2) { 672 /* 673 * Cache disabled 674 */ 675 dpt->cache_type = DPT_NO_CACHE; 676 return; 677 } 678 679 if (param[4] & 0x4) { 680 dpt->cache_type = DPT_CACHE_WRITETHROUGH; 681 } 682 683 /* XXX This isn't correct. This log parameter only has two bytes.... */ 684 #if 0 685 dpt->cache_size = param[5] 686 | (param[6] << 8) 687 | (param[7] << 16) 688 | (param[8] << 24); 689 #endif 690 } 691 692 static void 693 dpt_poll(struct cam_sim *sim) 694 { 695 dpt_intr(cam_sim_softc(sim)); 696 } 697 698 static void 699 dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 700 { 701 struct dpt_ccb *dccb; 702 union ccb *ccb; 703 struct dpt_softc *dpt; 704 705 dccb = (struct dpt_ccb *)arg; 706 ccb = dccb->ccb; 707 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr; 708 709 if (error != 0) { 710 if (error != EFBIG) 711 kprintf("dpt%d: Unexpected error 0x%x returned from " 712 "bus_dmamap_load\n", dpt->unit, error); 713 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 714 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 715 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 716 } 717 dptfreeccb(dpt, dccb); 718 xpt_done(ccb); 719 return; 720 } 721 722 if (nseg != 0) { 723 dpt_sg_t *sg; 724 bus_dma_segment_t *end_seg; 725 bus_dmasync_op_t op; 726 727 end_seg = dm_segs + nseg; 728 729 /* Copy the segments into our SG list */ 730 sg = dccb->sg_list; 731 while (dm_segs < end_seg) { 732 sg->seg_len = htonl(dm_segs->ds_len); 733 sg->seg_addr = htonl(dm_segs->ds_addr); 734 sg++; 735 dm_segs++; 736 } 737 738 if (nseg > 1) { 739 dccb->eata_ccb.scatter = 1; 740 dccb->eata_ccb.cp_dataDMA = dccb->sg_busaddr; 741 dccb->eata_ccb.cp_datalen = 742 htonl(nseg * sizeof(dpt_sg_t)); 743 } else { 744 dccb->eata_ccb.cp_dataDMA = dccb->sg_list[0].seg_addr; 745 dccb->eata_ccb.cp_datalen = dccb->sg_list[0].seg_len; 746 } 747 748 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 749 op = BUS_DMASYNC_PREREAD; 750 else 751 op = BUS_DMASYNC_PREWRITE; 752 753 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op); 754 755 } else { 756 dccb->eata_ccb.cp_dataDMA = 0; 757 dccb->eata_ccb.cp_datalen = 0; 758 } 759 760 crit_enter(); 761 762 /* 763 * Last time we need to check if this CCB needs to 764 * be aborted. 765 */ 766 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 767 if (nseg != 0) 768 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); 769 dptfreeccb(dpt, dccb); 770 xpt_done(ccb); 771 crit_exit(); 772 return; 773 } 774 775 dccb->state |= DCCB_ACTIVE; 776 ccb->ccb_h.status |= CAM_SIM_QUEUED; 777 LIST_INSERT_HEAD(&dpt->pending_ccb_list, &ccb->ccb_h, sim_links.le); 778 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 779 dpttimeout, dccb); 780 if (dpt_send_eata_command(dpt, &dccb->eata_ccb, 781 dccb->eata_ccb.cp_busaddr, 782 EATA_CMD_DMA_SEND_CP, 0, 0, 0, 0) != 0) { 783 ccb->ccb_h.status = CAM_NO_HBA; /* HBA dead or just busy?? */ 784 if (nseg != 0) 785 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); 786 dptfreeccb(dpt, dccb); 787 xpt_done(ccb); 788 } 789 790 crit_exit(); 791 } 792 793 static void 794 dpt_action(struct cam_sim *sim, union ccb *ccb) 795 { 796 struct dpt_softc *dpt; 797 798 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("dpt_action\n")); 799 800 dpt = (struct dpt_softc *)cam_sim_softc(sim); 801 802 if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE) != 0) { 803 xpt_print_path(ccb->ccb_h.path); 804 kprintf("controller is shutdown. Aborting CCB.\n"); 805 ccb->ccb_h.status = CAM_NO_HBA; 806 xpt_done(ccb); 807 return; 808 } 809 810 switch (ccb->ccb_h.func_code) { 811 /* Common cases first */ 812 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 813 { 814 struct ccb_scsiio *csio; 815 struct ccb_hdr *ccbh; 816 struct dpt_ccb *dccb; 817 struct eata_ccb *eccb; 818 819 csio = &ccb->csio; 820 ccbh = &ccb->ccb_h; 821 /* Max CDB length is 12 bytes */ 822 if (csio->cdb_len > 12) { 823 ccb->ccb_h.status = CAM_REQ_INVALID; 824 xpt_done(ccb); 825 return; 826 } 827 if ((dccb = dptgetccb(dpt)) == NULL) { 828 crit_enter(); 829 dpt->resource_shortage = 1; 830 crit_exit(); 831 xpt_freeze_simq(sim, /*count*/1); 832 ccb->ccb_h.status = CAM_REQUEUE_REQ; 833 xpt_done(ccb); 834 return; 835 } 836 eccb = &dccb->eata_ccb; 837 838 /* Link dccb and ccb so we can find one from the other */ 839 dccb->ccb = ccb; 840 ccb->ccb_h.ccb_dccb_ptr = dccb; 841 ccb->ccb_h.ccb_dpt_ptr = dpt; 842 843 /* 844 * Explicitly set all flags so that the compiler can 845 * be smart about setting them. 846 */ 847 eccb->SCSI_Reset = 0; 848 eccb->HBA_Init = 0; 849 eccb->Auto_Req_Sen = (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) 850 ? 0 : 1; 851 eccb->scatter = 0; 852 eccb->Quick = 0; 853 eccb->Interpret = 854 ccb->ccb_h.target_id == dpt->hostid[cam_sim_bus(sim)] 855 ? 1 : 0; 856 eccb->DataOut = (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0; 857 eccb->DataIn = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0; 858 eccb->reqlen = csio->sense_len; 859 eccb->cp_id = ccb->ccb_h.target_id; 860 eccb->cp_channel = cam_sim_bus(sim); 861 eccb->cp_LUN = ccb->ccb_h.target_lun; 862 eccb->cp_luntar = 0; 863 eccb->cp_dispri = (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) 864 ? 0 : 1; 865 eccb->cp_identify = 1; 866 867 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 868 && csio->tag_action != CAM_TAG_ACTION_NONE) { 869 eccb->cp_msg[0] = csio->tag_action; 870 eccb->cp_msg[1] = dccb->tag; 871 } else { 872 eccb->cp_msg[0] = 0; 873 eccb->cp_msg[1] = 0; 874 } 875 eccb->cp_msg[2] = 0; 876 877 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 878 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 879 bcopy(csio->cdb_io.cdb_ptr, 880 eccb->cp_cdb, csio->cdb_len); 881 } else { 882 /* I guess I could map it in... */ 883 ccb->ccb_h.status = CAM_REQ_INVALID; 884 dptfreeccb(dpt, dccb); 885 xpt_done(ccb); 886 return; 887 } 888 } else { 889 bcopy(csio->cdb_io.cdb_bytes, 890 eccb->cp_cdb, csio->cdb_len); 891 } 892 /* 893 * If we have any data to send with this command, 894 * map it into bus space. 895 */ 896 /* Only use S/G if there is a transfer */ 897 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 898 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 899 /* 900 * We've been given a pointer 901 * to a single buffer. 902 */ 903 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 904 int error; 905 906 crit_enter(); 907 error = 908 bus_dmamap_load(dpt->buffer_dmat, 909 dccb->dmamap, 910 csio->data_ptr, 911 csio->dxfer_len, 912 dptexecuteccb, 913 dccb, /*flags*/0); 914 if (error == EINPROGRESS) { 915 /* 916 * So as to maintain ordering, 917 * freeze the controller queue 918 * until our mapping is 919 * returned. 920 */ 921 xpt_freeze_simq(sim, 1); 922 dccb->state |= CAM_RELEASE_SIMQ; 923 } 924 crit_exit(); 925 } else { 926 struct bus_dma_segment seg; 927 928 /* Pointer to physical buffer */ 929 seg.ds_addr = 930 (bus_addr_t)csio->data_ptr; 931 seg.ds_len = csio->dxfer_len; 932 dptexecuteccb(dccb, &seg, 1, 0); 933 } 934 } else { 935 struct bus_dma_segment *segs; 936 937 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 938 panic("dpt_action - Physical " 939 "segment pointers " 940 "unsupported"); 941 942 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 943 panic("dpt_action - Virtual " 944 "segment addresses " 945 "unsupported"); 946 947 /* Just use the segments provided */ 948 segs = (struct bus_dma_segment *)csio->data_ptr; 949 dptexecuteccb(dccb, segs, csio->sglist_cnt, 0); 950 } 951 } else { 952 /* 953 * XXX JGibbs. 954 * Does it want them both on or both off? 955 * CAM_DIR_NONE is both on, so this code can 956 * be removed if this is also what the DPT 957 * exptects. 958 */ 959 eccb->DataOut = 0; 960 eccb->DataIn = 0; 961 dptexecuteccb(dccb, NULL, 0, 0); 962 } 963 break; 964 } 965 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 966 case XPT_ABORT: /* Abort the specified CCB */ 967 /* XXX Implement */ 968 ccb->ccb_h.status = CAM_REQ_INVALID; 969 xpt_done(ccb); 970 break; 971 case XPT_SET_TRAN_SETTINGS: 972 { 973 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 974 xpt_done(ccb); 975 break; 976 } 977 case XPT_GET_TRAN_SETTINGS: 978 /* Get default/user set transfer settings for the target */ 979 { 980 struct ccb_trans_settings *cts = &ccb->cts; 981 struct ccb_trans_settings_scsi *scsi = 982 &cts->proto_specific.scsi; 983 struct ccb_trans_settings_spi *spi = 984 &cts->xport_specific.spi; 985 986 cts->protocol = PROTO_SCSI; 987 cts->protocol_version = SCSI_REV_2; 988 cts->transport = XPORT_SPI; 989 cts->transport_version = 2; 990 991 if (cts->type == CTS_TYPE_USER_SETTINGS) { 992 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 993 spi->bus_width = (dpt->max_id > 7) 994 ? MSG_EXT_WDTR_BUS_8_BIT 995 : MSG_EXT_WDTR_BUS_16_BIT; 996 spi->sync_period = 25; /* 10MHz */ 997 if (spi->sync_period != 0) 998 spi->sync_offset = 15; 999 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1000 1001 spi->valid = CTS_SPI_VALID_SYNC_RATE 1002 | CTS_SPI_VALID_SYNC_OFFSET 1003 | CTS_SPI_VALID_BUS_WIDTH 1004 | CTS_SPI_VALID_DISC; 1005 scsi->valid = CTS_SCSI_VALID_TQ; 1006 ccb->ccb_h.status = CAM_REQ_CMP; 1007 } else { 1008 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1009 } 1010 xpt_done(ccb); 1011 break; 1012 } 1013 case XPT_CALC_GEOMETRY: 1014 { 1015 struct ccb_calc_geometry *ccg; 1016 u_int32_t size_mb; 1017 u_int32_t secs_per_cylinder; 1018 int extended; 1019 1020 /* 1021 * XXX Use Adaptec translation until I find out how to 1022 * get this information from the card. 1023 */ 1024 ccg = &ccb->ccg; 1025 size_mb = ccg->volume_size 1026 / ((1024L * 1024L) / ccg->block_size); 1027 extended = 1; 1028 1029 if (size_mb > 1024 && extended) { 1030 ccg->heads = 255; 1031 ccg->secs_per_track = 63; 1032 } else { 1033 ccg->heads = 64; 1034 ccg->secs_per_track = 32; 1035 } 1036 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 1037 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 1038 ccb->ccb_h.status = CAM_REQ_CMP; 1039 xpt_done(ccb); 1040 break; 1041 } 1042 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 1043 { 1044 /* XXX Implement */ 1045 ccb->ccb_h.status = CAM_REQ_CMP; 1046 xpt_done(ccb); 1047 break; 1048 } 1049 case XPT_TERM_IO: /* Terminate the I/O process */ 1050 /* XXX Implement */ 1051 ccb->ccb_h.status = CAM_REQ_INVALID; 1052 xpt_done(ccb); 1053 break; 1054 case XPT_PATH_INQ: /* Path routing inquiry */ 1055 { 1056 struct ccb_pathinq *cpi = &ccb->cpi; 1057 1058 cpi->version_num = 1; 1059 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 1060 if (dpt->max_id > 7) 1061 cpi->hba_inquiry |= PI_WIDE_16; 1062 cpi->target_sprt = 0; 1063 cpi->hba_misc = 0; 1064 cpi->hba_eng_cnt = 0; 1065 cpi->max_target = dpt->max_id; 1066 cpi->max_lun = dpt->max_lun; 1067 cpi->initiator_id = dpt->hostid[cam_sim_bus(sim)]; 1068 cpi->bus_id = cam_sim_bus(sim); 1069 cpi->base_transfer_speed = 3300; 1070 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1071 strncpy(cpi->hba_vid, "DPT", HBA_IDLEN); 1072 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1073 cpi->unit_number = cam_sim_unit(sim); 1074 cpi->transport = XPORT_SPI; 1075 cpi->transport_version = 2; 1076 cpi->protocol = PROTO_SCSI; 1077 cpi->protocol_version = SCSI_REV_2; 1078 cpi->ccb_h.status = CAM_REQ_CMP; 1079 xpt_done(ccb); 1080 break; 1081 } 1082 default: 1083 ccb->ccb_h.status = CAM_REQ_INVALID; 1084 xpt_done(ccb); 1085 break; 1086 } 1087 } 1088 1089 /* 1090 * This routine will try to send an EATA command to the DPT HBA. 1091 * It will, by default, try 20,000 times, waiting 50us between tries. 1092 * It returns 0 on success and 1 on failure. 1093 * It is assumed to be called at splcam(). 1094 */ 1095 static int 1096 dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd_block, 1097 u_int32_t cmd_busaddr, u_int command, u_int retries, 1098 u_int ifc, u_int code, u_int code2) 1099 { 1100 u_int loop; 1101 1102 if (!retries) 1103 retries = 20000; 1104 1105 /* 1106 * I hate this polling nonsense. Wish there was a way to tell the DPT 1107 * to go get commands at its own pace, or to interrupt when ready. 1108 * In the mean time we will measure how many itterations it really 1109 * takes. 1110 */ 1111 for (loop = 0; loop < retries; loop++) { 1112 if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0) 1113 break; 1114 else 1115 DELAY(50); 1116 } 1117 1118 if (loop < retries) { 1119 #ifdef DPT_MEASURE_PERFORMANCE 1120 if (loop > dpt->performance.max_eata_tries) 1121 dpt->performance.max_eata_tries = loop; 1122 1123 if (loop < dpt->performance.min_eata_tries) 1124 dpt->performance.min_eata_tries = loop; 1125 #endif 1126 } else { 1127 #ifdef DPT_MEASURE_PERFORMANCE 1128 ++dpt->performance.command_too_busy; 1129 #endif 1130 return (1); 1131 } 1132 1133 /* The controller is alive, advance the wedge timer */ 1134 #ifdef DPT_RESET_HBA 1135 dpt->last_contact = microtime_now; 1136 #endif 1137 1138 if (cmd_block == NULL) 1139 cmd_busaddr = 0; 1140 #if (BYTE_ORDER == BIG_ENDIAN) 1141 else { 1142 cmd_busaddr = ((cmd_busaddr >> 24) & 0xFF) 1143 | ((cmd_busaddr >> 16) & 0xFF) 1144 | ((cmd_busaddr >> 8) & 0xFF) 1145 | (cmd_busaddr & 0xFF); 1146 } 1147 #endif 1148 /* And now the address */ 1149 dpt_outl(dpt, HA_WDMAADDR, cmd_busaddr); 1150 1151 if (command == EATA_CMD_IMMEDIATE) { 1152 if (cmd_block == NULL) { 1153 dpt_outb(dpt, HA_WCODE2, code2); 1154 dpt_outb(dpt, HA_WCODE, code); 1155 } 1156 dpt_outb(dpt, HA_WIFC, ifc); 1157 } 1158 dpt_outb(dpt, HA_WCOMMAND, command); 1159 1160 return (0); 1161 } 1162 1163 1164 /* ==================== Exported Function definitions =======================*/ 1165 dpt_softc_t * 1166 dpt_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh) 1167 { 1168 dpt_softc_t *dpt = device_get_softc(dev); 1169 int i; 1170 1171 bzero(dpt, sizeof(dpt_softc_t)); 1172 dpt->tag = tag; 1173 dpt->bsh = bsh; 1174 dpt->unit = device_get_unit(dev); 1175 SLIST_INIT(&dpt->free_dccb_list); 1176 LIST_INIT(&dpt->pending_ccb_list); 1177 TAILQ_INSERT_TAIL(&dpt_softcs, dpt, links); 1178 for (i = 0; i < MAX_CHANNELS; i++) 1179 dpt->resetlevel[i] = DPT_HA_OK; 1180 1181 #ifdef DPT_MEASURE_PERFORMANCE 1182 dpt_reset_performance(dpt); 1183 #endif /* DPT_MEASURE_PERFORMANCE */ 1184 return (dpt); 1185 } 1186 1187 void 1188 dpt_free(struct dpt_softc *dpt) 1189 { 1190 switch (dpt->init_level) { 1191 default: 1192 case 5: 1193 bus_dmamap_unload(dpt->dccb_dmat, dpt->dccb_dmamap); 1194 case 4: 1195 bus_dmamem_free(dpt->dccb_dmat, dpt->dpt_dccbs, 1196 dpt->dccb_dmamap); 1197 bus_dmamap_destroy(dpt->dccb_dmat, dpt->dccb_dmamap); 1198 case 3: 1199 bus_dma_tag_destroy(dpt->dccb_dmat); 1200 case 2: 1201 bus_dma_tag_destroy(dpt->buffer_dmat); 1202 case 1: 1203 { 1204 struct sg_map_node *sg_map; 1205 1206 while ((sg_map = SLIST_FIRST(&dpt->sg_maps)) != NULL) { 1207 SLIST_REMOVE_HEAD(&dpt->sg_maps, links); 1208 bus_dmamap_unload(dpt->sg_dmat, 1209 sg_map->sg_dmamap); 1210 bus_dmamem_free(dpt->sg_dmat, sg_map->sg_vaddr, 1211 sg_map->sg_dmamap); 1212 kfree(sg_map, M_DEVBUF); 1213 } 1214 bus_dma_tag_destroy(dpt->sg_dmat); 1215 } 1216 case 0: 1217 break; 1218 } 1219 TAILQ_REMOVE(&dpt_softcs, dpt, links); 1220 } 1221 1222 static u_int8_t string_sizes[] = 1223 { 1224 sizeof(((dpt_inq_t*)NULL)->vendor), 1225 sizeof(((dpt_inq_t*)NULL)->modelNum), 1226 sizeof(((dpt_inq_t*)NULL)->firmware), 1227 sizeof(((dpt_inq_t*)NULL)->protocol), 1228 }; 1229 1230 int 1231 dpt_init(struct dpt_softc *dpt) 1232 { 1233 dpt_conf_t conf; 1234 struct sg_map_node *sg_map; 1235 dpt_ccb_t *dccb; 1236 u_int8_t *strp; 1237 int index; 1238 int i; 1239 int retval; 1240 1241 dpt->init_level = 0; 1242 SLIST_INIT(&dpt->sg_maps); 1243 1244 #ifdef DPT_RESET_BOARD 1245 kprintf("dpt%d: resetting HBA\n", dpt->unit); 1246 dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET); 1247 DELAY(750000); 1248 /* XXX Shouldn't we poll a status register or something??? */ 1249 #endif 1250 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1251 if (bus_dma_tag_create(dpt->parent_dmat, /*alignment*/1, /*boundary*/0, 1252 /*lowaddr*/BUS_SPACE_MAXADDR, 1253 /*highaddr*/BUS_SPACE_MAXADDR, 1254 /*filter*/NULL, /*filterarg*/NULL, 1255 PAGE_SIZE, /*nsegments*/1, 1256 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1257 /*flags*/0, &dpt->sg_dmat) != 0) { 1258 goto error_exit; 1259 } 1260 1261 dpt->init_level++; 1262 1263 /* 1264 * We allocate our DPT ccbs as a contiguous array of bus dma'able 1265 * memory. To get the allocation size, we need to know how many 1266 * ccbs the card supports. This requires a ccb. We solve this 1267 * chicken and egg problem by allocating some re-usable S/G space 1268 * up front, and treating it as our status packet, CCB, and target 1269 * memory space for these commands. 1270 */ 1271 sg_map = dptallocsgmap(dpt); 1272 if (sg_map == NULL) 1273 goto error_exit; 1274 1275 dpt->sp = (volatile dpt_sp_t *)sg_map->sg_vaddr; 1276 dccb = (struct dpt_ccb *)(uintptr_t)(volatile void *)&dpt->sp[1]; 1277 bzero(dccb, sizeof(*dccb)); 1278 dpt->sp_physaddr = sg_map->sg_physaddr; 1279 dccb->eata_ccb.cp_dataDMA = 1280 htonl(sg_map->sg_physaddr + sizeof(dpt_sp_t) + sizeof(*dccb)); 1281 dccb->eata_ccb.cp_busaddr = ~0; 1282 dccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr); 1283 dccb->eata_ccb.cp_reqDMA = htonl(dpt->sp_physaddr + sizeof(*dccb) 1284 + offsetof(struct dpt_ccb, sense_data)); 1285 1286 /* Okay. Fetch our config */ 1287 bzero(&dccb[1], sizeof(conf)); /* data area */ 1288 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), 1289 sizeof(conf), 0xc1, 7, 1); 1290 1291 if (retval != 0) { 1292 kprintf("dpt%d: Failed to get board configuration\n", dpt->unit); 1293 return (retval); 1294 } 1295 bcopy(&dccb[1], &conf, sizeof(conf)); 1296 1297 bzero(&dccb[1], sizeof(dpt->board_data)); 1298 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), 1299 sizeof(dpt->board_data), 0, conf.scsi_id0, 0); 1300 if (retval != 0) { 1301 kprintf("dpt%d: Failed to get inquiry information\n", dpt->unit); 1302 return (retval); 1303 } 1304 bcopy(&dccb[1], &dpt->board_data, sizeof(dpt->board_data)); 1305 1306 dpt_detect_cache(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), 1307 (u_int8_t *)&dccb[1]); 1308 1309 switch (ntohl(conf.splen)) { 1310 case DPT_EATA_REVA: 1311 dpt->EATA_revision = 'a'; 1312 break; 1313 case DPT_EATA_REVB: 1314 dpt->EATA_revision = 'b'; 1315 break; 1316 case DPT_EATA_REVC: 1317 dpt->EATA_revision = 'c'; 1318 break; 1319 case DPT_EATA_REVZ: 1320 dpt->EATA_revision = 'z'; 1321 break; 1322 default: 1323 dpt->EATA_revision = '?'; 1324 } 1325 1326 dpt->max_id = conf.MAX_ID; 1327 dpt->max_lun = conf.MAX_LUN; 1328 dpt->irq = conf.IRQ; 1329 dpt->dma_channel = (8 - conf.DMA_channel) & 7; 1330 dpt->channels = conf.MAX_CHAN + 1; 1331 dpt->state |= DPT_HA_OK; 1332 if (conf.SECOND) 1333 dpt->primary = FALSE; 1334 else 1335 dpt->primary = TRUE; 1336 1337 dpt->more_support = conf.MORE_support; 1338 1339 if (strncmp(dpt->board_data.firmware, "07G0", 4) >= 0) 1340 dpt->immediate_support = 1; 1341 else 1342 dpt->immediate_support = 0; 1343 1344 dpt->broken_INQUIRY = FALSE; 1345 1346 dpt->cplen = ntohl(conf.cplen); 1347 dpt->cppadlen = ntohs(conf.cppadlen); 1348 dpt->max_dccbs = ntohs(conf.queuesiz); 1349 1350 if (dpt->max_dccbs > 256) { 1351 kprintf("dpt%d: Max CCBs reduced from %d to " 1352 "256 due to tag algorithm\n", dpt->unit, dpt->max_dccbs); 1353 dpt->max_dccbs = 256; 1354 } 1355 1356 dpt->hostid[0] = conf.scsi_id0; 1357 dpt->hostid[1] = conf.scsi_id1; 1358 dpt->hostid[2] = conf.scsi_id2; 1359 1360 if (conf.SG_64K) 1361 dpt->sgsize = 8192; 1362 else 1363 dpt->sgsize = ntohs(conf.SGsiz); 1364 1365 /* We can only get 64k buffers, so don't bother to waste space. */ 1366 if (dpt->sgsize < 17 || dpt->sgsize > 32) 1367 dpt->sgsize = 32; 1368 1369 if (dpt->sgsize > dpt_max_segs) 1370 dpt->sgsize = dpt_max_segs; 1371 1372 /* DMA tag for mapping buffers into device visible space. */ 1373 if (bus_dma_tag_create(dpt->parent_dmat, /*alignment*/1, /*boundary*/0, 1374 /*lowaddr*/BUS_SPACE_MAXADDR, 1375 /*highaddr*/BUS_SPACE_MAXADDR, 1376 /*filter*/NULL, /*filterarg*/NULL, 1377 /*maxsize*/MAXBSIZE, /*nsegments*/dpt->sgsize, 1378 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1379 /*flags*/BUS_DMA_ALLOCNOW, 1380 &dpt->buffer_dmat) != 0) { 1381 kprintf("dpt: bus_dma_tag_create(...,dpt->buffer_dmat) failed\n"); 1382 goto error_exit; 1383 } 1384 1385 dpt->init_level++; 1386 1387 /* DMA tag for our ccb structures and interrupt status packet */ 1388 if (bus_dma_tag_create(dpt->parent_dmat, /*alignment*/1, /*boundary*/0, 1389 /*lowaddr*/BUS_SPACE_MAXADDR, 1390 /*highaddr*/BUS_SPACE_MAXADDR, 1391 /*filter*/NULL, /*filterarg*/NULL, 1392 (dpt->max_dccbs * sizeof(struct dpt_ccb)) 1393 + sizeof(dpt_sp_t), 1394 /*nsegments*/1, 1395 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1396 /*flags*/0, &dpt->dccb_dmat) != 0) { 1397 kprintf("dpt: bus_dma_tag_create(...,dpt->dccb_dmat) failed\n"); 1398 goto error_exit; 1399 } 1400 1401 dpt->init_level++; 1402 1403 /* Allocation for our ccbs and interrupt status packet */ 1404 if (bus_dmamem_alloc(dpt->dccb_dmat, (void *)&dpt->dpt_dccbs, 1405 BUS_DMA_NOWAIT, &dpt->dccb_dmamap) != 0) { 1406 kprintf("dpt: bus_dmamem_alloc(dpt->dccb_dmat,...) failed\n"); 1407 goto error_exit; 1408 } 1409 1410 dpt->init_level++; 1411 1412 /* And permanently map them */ 1413 bus_dmamap_load(dpt->dccb_dmat, dpt->dccb_dmamap, 1414 dpt->dpt_dccbs, 1415 (dpt->max_dccbs * sizeof(struct dpt_ccb)) 1416 + sizeof(dpt_sp_t), 1417 dptmapmem, &dpt->dpt_ccb_busbase, /*flags*/0); 1418 1419 /* Clear them out. */ 1420 bzero(dpt->dpt_dccbs, 1421 (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t)); 1422 1423 dpt->dpt_ccb_busend = dpt->dpt_ccb_busbase; 1424 1425 dpt->sp = (dpt_sp_t*)&dpt->dpt_dccbs[dpt->max_dccbs]; 1426 dpt->sp_physaddr = dpt->dpt_ccb_busbase 1427 + (dpt->max_dccbs * sizeof(dpt_ccb_t)); 1428 dpt->init_level++; 1429 1430 /* Allocate our first batch of ccbs */ 1431 if (dptallocccbs(dpt) == 0) { 1432 kprintf("dpt: dptallocccbs(dpt) == 0\n"); 1433 return (2); 1434 } 1435 1436 /* Prepare for Target Mode */ 1437 dpt->target_mode_enabled = 1; 1438 1439 /* Nuke excess spaces from inquiry information */ 1440 strp = dpt->board_data.vendor; 1441 for (i = 0; i < sizeof(string_sizes); i++) { 1442 index = string_sizes[i] - 1; 1443 while (index && (strp[index] == ' ')) 1444 strp[index--] = '\0'; 1445 strp += string_sizes[i]; 1446 } 1447 1448 kprintf("dpt%d: %.8s %.16s FW Rev. %.4s, ", 1449 dpt->unit, dpt->board_data.vendor, 1450 dpt->board_data.modelNum, dpt->board_data.firmware); 1451 1452 kprintf("%d channel%s, ", dpt->channels, dpt->channels > 1 ? "s" : ""); 1453 1454 if (dpt->cache_type != DPT_NO_CACHE 1455 && dpt->cache_size != 0) { 1456 kprintf("%s Cache, ", 1457 dpt->cache_type == DPT_CACHE_WRITETHROUGH 1458 ? "Write-Through" : "Write-Back"); 1459 } 1460 1461 kprintf("%d CCBs\n", dpt->max_dccbs); 1462 return (0); 1463 1464 error_exit: 1465 return (1); 1466 } 1467 1468 int 1469 dpt_attach(dpt_softc_t *dpt) 1470 { 1471 struct cam_devq *devq; 1472 int i; 1473 1474 /* 1475 * Create the device queue for our SIM. 1476 */ 1477 devq = cam_simq_alloc(dpt->max_dccbs); 1478 if (devq == NULL) 1479 return (0); 1480 1481 for (i = 0; i < dpt->channels; i++) { 1482 /* 1483 * Construct our SIM entry 1484 */ 1485 dpt->sims[i] = cam_sim_alloc(dpt_action, dpt_poll, "dpt", 1486 dpt, dpt->unit, &sim_mplock, 1487 /*untagged*/2, 1488 /*tagged*/dpt->max_dccbs, devq); 1489 if (xpt_bus_register(dpt->sims[i], i) != CAM_SUCCESS) { 1490 cam_sim_free(dpt->sims[i]); 1491 break; 1492 } 1493 1494 if (xpt_create_path(&dpt->paths[i], /*periph*/NULL, 1495 cam_sim_path(dpt->sims[i]), 1496 CAM_TARGET_WILDCARD, 1497 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1498 xpt_bus_deregister(cam_sim_path(dpt->sims[i])); 1499 cam_sim_free(dpt->sims[i]); 1500 break; 1501 } 1502 1503 } 1504 cam_simq_release(devq); 1505 if (i > 0) 1506 EVENTHANDLER_REGISTER(shutdown_post_sync, dptshutdown, 1507 dpt, SHUTDOWN_PRI_DRIVER); 1508 return (i); 1509 } 1510 1511 1512 /* 1513 * This is the interrupt handler for the DPT driver. 1514 */ 1515 void 1516 dpt_intr(void *arg) 1517 { 1518 dpt_softc_t *dpt; 1519 dpt_ccb_t *dccb; 1520 union ccb *ccb; 1521 u_int status; 1522 u_int aux_status; 1523 u_int hba_stat; 1524 u_int scsi_stat; 1525 u_int32_t residue_len; /* Number of bytes not transferred */ 1526 1527 dpt = (dpt_softc_t *)arg; 1528 1529 /* First order of business is to check if this interrupt is for us */ 1530 while (((aux_status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ) != 0) { 1531 1532 /* 1533 * What we want to do now, is to capture the status, all of it, 1534 * move it where it belongs, wake up whoever sleeps waiting to 1535 * process this result, and get out of here. 1536 */ 1537 if (dpt->sp->ccb_busaddr < dpt->dpt_ccb_busbase 1538 || dpt->sp->ccb_busaddr >= dpt->dpt_ccb_busend) { 1539 kprintf("Encountered bogus status packet\n"); 1540 status = dpt_inb(dpt, HA_RSTATUS); 1541 return; 1542 } 1543 1544 dccb = dptccbptov(dpt, dpt->sp->ccb_busaddr); 1545 1546 dpt->sp->ccb_busaddr = ~0; 1547 1548 /* Ignore status packets with EOC not set */ 1549 if (dpt->sp->EOC == 0) { 1550 kprintf("dpt%d ERROR: Request %d received with " 1551 "clear EOC.\n Marking as LOST.\n", 1552 dpt->unit, dccb->transaction_id); 1553 1554 #ifdef DPT_HANDLE_TIMEOUTS 1555 dccb->state |= DPT_CCB_STATE_MARKED_LOST; 1556 #endif 1557 /* This CLEARS the interrupt! */ 1558 status = dpt_inb(dpt, HA_RSTATUS); 1559 continue; 1560 } 1561 dpt->sp->EOC = 0; 1562 1563 /* 1564 * Double buffer the status information so the hardware can 1565 * work on updating the status packet while we decifer the 1566 * one we were just interrupted for. 1567 * According to Mark Salyzyn, we only need few pieces of it. 1568 */ 1569 hba_stat = dpt->sp->hba_stat; 1570 scsi_stat = dpt->sp->scsi_stat; 1571 residue_len = dpt->sp->residue_len; 1572 1573 /* Clear interrupts, check for error */ 1574 if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) { 1575 /* 1576 * Error Condition. Check for magic cookie. Exit 1577 * this test on earliest sign of non-reset condition 1578 */ 1579 1580 /* Check that this is not a board reset interrupt */ 1581 if (dpt_just_reset(dpt)) { 1582 kprintf("dpt%d: HBA rebooted.\n" 1583 " All transactions should be " 1584 "resubmitted\n", 1585 dpt->unit); 1586 1587 kprintf("dpt%d: >>---->> This is incomplete, " 1588 "fix me.... <<----<<", dpt->unit); 1589 panic("DPT Rebooted"); 1590 1591 } 1592 } 1593 /* Process CCB */ 1594 ccb = dccb->ccb; 1595 callout_stop(&ccb->ccb_h.timeout_ch); 1596 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1597 bus_dmasync_op_t op; 1598 1599 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1600 op = BUS_DMASYNC_POSTREAD; 1601 else 1602 op = BUS_DMASYNC_POSTWRITE; 1603 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op); 1604 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); 1605 } 1606 1607 /* Common Case inline... */ 1608 if (hba_stat == HA_NO_ERROR) { 1609 ccb->csio.scsi_status = scsi_stat; 1610 ccb->ccb_h.status = 0; 1611 switch (scsi_stat) { 1612 case SCSI_STATUS_OK: 1613 ccb->ccb_h.status |= CAM_REQ_CMP; 1614 break; 1615 case SCSI_STATUS_CHECK_COND: 1616 case SCSI_STATUS_CMD_TERMINATED: 1617 bcopy(&dccb->sense_data, &ccb->csio.sense_data, 1618 ccb->csio.sense_len); 1619 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1620 /* FALLTHROUGH */ 1621 default: 1622 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1623 /* XXX Freeze DevQ */ 1624 break; 1625 } 1626 ccb->csio.resid = residue_len; 1627 dptfreeccb(dpt, dccb); 1628 xpt_done(ccb); 1629 } else { 1630 dptprocesserror(dpt, dccb, ccb, hba_stat, scsi_stat, 1631 residue_len); 1632 } 1633 } 1634 } 1635 1636 static void 1637 dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb, 1638 u_int hba_stat, u_int scsi_stat, u_int32_t resid) 1639 { 1640 ccb->csio.resid = resid; 1641 switch (hba_stat) { 1642 case HA_ERR_SEL_TO: 1643 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1644 break; 1645 case HA_ERR_CMD_TO: 1646 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1647 break; 1648 case HA_SCSIBUS_RESET: 1649 case HA_HBA_POWER_UP: /* Similar effect to a bus reset??? */ 1650 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1651 break; 1652 case HA_CP_ABORTED: 1653 case HA_CP_RESET: /* XXX ??? */ 1654 case HA_CP_ABORT_NA: /* XXX ??? */ 1655 case HA_CP_RESET_NA: /* XXX ??? */ 1656 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) 1657 ccb->ccb_h.status = CAM_REQ_ABORTED; 1658 break; 1659 case HA_PCI_PARITY: 1660 case HA_PCI_MABORT: 1661 case HA_PCI_TABORT: 1662 case HA_PCI_STABORT: 1663 case HA_BUS_PARITY: 1664 case HA_PARITY_ERR: 1665 case HA_ECC_ERR: 1666 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1667 break; 1668 case HA_UNX_MSGRJCT: 1669 ccb->ccb_h.status = CAM_MSG_REJECT_REC; 1670 break; 1671 case HA_UNX_BUSPHASE: 1672 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1673 break; 1674 case HA_UNX_BUS_FREE: 1675 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1676 break; 1677 case HA_SCSI_HUNG: 1678 case HA_RESET_STUCK: 1679 /* 1680 * Dead??? Can the controller get unstuck 1681 * from these conditions 1682 */ 1683 ccb->ccb_h.status = CAM_NO_HBA; 1684 break; 1685 case HA_RSENSE_FAIL: 1686 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1687 break; 1688 default: 1689 kprintf("dpt%d: Undocumented Error %x\n", dpt->unit, hba_stat); 1690 kprintf("Please mail this message to shimon@simon-shapiro.org\n"); 1691 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1692 break; 1693 } 1694 dptfreeccb(dpt, dccb); 1695 xpt_done(ccb); 1696 } 1697 1698 static void 1699 dpttimeout(void *arg) 1700 { 1701 struct dpt_ccb *dccb; 1702 union ccb *ccb; 1703 struct dpt_softc *dpt; 1704 1705 dccb = (struct dpt_ccb *)arg; 1706 ccb = dccb->ccb; 1707 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr; 1708 xpt_print_path(ccb->ccb_h.path); 1709 kprintf("CCB %p - timed out\n", (void *)dccb); 1710 1711 crit_enter(); 1712 1713 /* 1714 * Try to clear any pending jobs. FreeBSD will loose interrupts, 1715 * leaving the controller suspended, and commands timed-out. 1716 * By calling the interrupt handler, any command thus stuck will be 1717 * completed. 1718 */ 1719 dpt_intr(dpt); 1720 1721 if ((dccb->state & DCCB_ACTIVE) == 0) { 1722 xpt_print_path(ccb->ccb_h.path); 1723 kprintf("CCB %p - timed out CCB already completed\n", 1724 (void *)dccb); 1725 crit_exit(); 1726 return; 1727 } 1728 1729 /* Abort this particular command. Leave all others running */ 1730 dpt_send_immediate(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr, 1731 /*retries*/20000, EATA_SPECIFIC_ABORT, 0, 0); 1732 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1733 crit_exit(); 1734 } 1735 1736 /* 1737 * Shutdown the controller and ensure that the cache is completely flushed. 1738 * Called from the shutdown_final event after all disk access has completed. 1739 */ 1740 static void 1741 dptshutdown(void *arg, int howto) 1742 { 1743 dpt_softc_t *dpt; 1744 1745 dpt = (dpt_softc_t *)arg; 1746 1747 kprintf("dpt%d: Shutting down (mode %x) HBA. Please wait...\n", 1748 dpt->unit, howto); 1749 1750 /* 1751 * What we do for a shutdown, is give the DPT early power loss warning 1752 */ 1753 dpt_send_immediate(dpt, NULL, 0, EATA_POWER_OFF_WARN, 0, 0, 0); 1754 DELAY(1000 * 1000 * 5); 1755 kprintf("dpt%d: Controller was warned of shutdown and is now " 1756 "disabled\n", dpt->unit); 1757 } 1758 1759 /*============================================================================*/ 1760 1761 #if 0 1762 #ifdef DPT_RESET_HBA 1763 1764 /* 1765 ** Function name : dpt_reset_hba 1766 ** 1767 ** Description : Reset the HBA and properly discard all pending work 1768 ** Input : Softc 1769 ** Output : Nothing 1770 */ 1771 static void 1772 dpt_reset_hba(dpt_softc_t *dpt) 1773 { 1774 eata_ccb_t *ccb; 1775 dpt_ccb_t dccb, *dccbp; 1776 int result; 1777 struct scsi_xfer *xs; 1778 1779 /* Prepare a control block. The SCSI command part is immaterial */ 1780 dccb.xs = NULL; 1781 dccb.flags = 0; 1782 dccb.state = DPT_CCB_STATE_NEW; 1783 dccb.std_callback = NULL; 1784 dccb.wrbuff_callback = NULL; 1785 1786 ccb = &dccb.eata_ccb; 1787 ccb->CP_OpCode = EATA_CMD_RESET; 1788 ccb->SCSI_Reset = 0; 1789 ccb->HBA_Init = 1; 1790 ccb->Auto_Req_Sen = 1; 1791 ccb->cp_id = 0; /* Should be ignored */ 1792 ccb->DataIn = 1; 1793 ccb->DataOut = 0; 1794 ccb->Interpret = 1; 1795 ccb->reqlen = htonl(sizeof(struct scsi_sense_data)); 1796 ccb->cp_statDMA = htonl(vtophys(&ccb->cp_statDMA)); 1797 ccb->cp_reqDMA = htonl(vtophys(&ccb->cp_reqDMA)); 1798 ccb->cp_viraddr = (u_int32_t) & ccb; 1799 1800 ccb->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO; 1801 ccb->cp_scsi_cmd = 0; /* Should be ignored */ 1802 1803 /* Lock up the submitted queue. We are very persistant here */ 1804 crit_enter(); 1805 while (dpt->queue_status & DPT_SUBMITTED_QUEUE_ACTIVE) { 1806 DELAY(100); 1807 } 1808 1809 dpt->queue_status |= DPT_SUBMITTED_QUEUE_ACTIVE; 1810 crit_exit(); 1811 1812 /* Send the RESET message */ 1813 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb, 1814 EATA_CMD_RESET, 0, 0, 0, 0)) != 0) { 1815 kprintf("dpt%d: Failed to send the RESET message.\n" 1816 " Trying cold boot (ouch!)\n", dpt->unit); 1817 1818 1819 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb, 1820 EATA_COLD_BOOT, 0, 0, 1821 0, 0)) != 0) { 1822 panic("dpt%d: Faild to cold boot the HBA\n", 1823 dpt->unit); 1824 } 1825 #ifdef DPT_MEASURE_PERFORMANCE 1826 dpt->performance.cold_boots++; 1827 #endif /* DPT_MEASURE_PERFORMANCE */ 1828 } 1829 1830 #ifdef DPT_MEASURE_PERFORMANCE 1831 dpt->performance.warm_starts++; 1832 #endif /* DPT_MEASURE_PERFORMANCE */ 1833 1834 kprintf("dpt%d: Aborting pending requests. O/S should re-submit\n", 1835 dpt->unit); 1836 1837 while ((dccbp = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) { 1838 struct scsi_xfer *xs = dccbp->xs; 1839 1840 /* Not all transactions have xs structs */ 1841 if (xs != NULL) { 1842 /* Tell the kernel proper this did not complete well */ 1843 xs->error |= XS_SELTIMEOUT; 1844 xs->flags |= SCSI_ITSDONE; 1845 scsi_done(xs); 1846 } 1847 1848 dpt_Qremove_submitted(dpt, dccbp); 1849 1850 /* Remember, Callbacks are NOT in the standard queue */ 1851 if (dccbp->std_callback != NULL) { 1852 (dccbp->std_callback)(dpt, dccbp->eata_ccb.cp_channel, 1853 dccbp); 1854 } else { 1855 crit_enter(); 1856 dpt_Qpush_free(dpt, dccbp); 1857 crit_exit(); 1858 } 1859 } 1860 1861 kprintf("dpt%d: reset done aborting all pending commands\n", dpt->unit); 1862 dpt->queue_status &= ~DPT_SUBMITTED_QUEUE_ACTIVE; 1863 } 1864 1865 #endif /* DPT_RESET_HBA */ 1866 1867 /* 1868 * Build a Command Block for target mode READ/WRITE BUFFER, 1869 * with the ``sync'' bit ON. 1870 * 1871 * Although the length and offset are 24 bit fields in the command, they cannot 1872 * exceed 8192 bytes, so we take them as short integers andcheck their range. 1873 * If they are sensless, we round them to zero offset, maximum length and 1874 * complain. 1875 */ 1876 1877 static void 1878 dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun, 1879 dpt_ccb_t * ccb, int mode, u_int8_t command, 1880 u_int16_t length, u_int16_t offset) 1881 { 1882 eata_ccb_t *cp; 1883 1884 if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) { 1885 kprintf("dpt%d: Length of %d, and offset of %d are wrong\n", 1886 dpt->unit, length, offset); 1887 length = DPT_MAX_TARGET_MODE_BUFFER_SIZE; 1888 offset = 0; 1889 } 1890 ccb->xs = NULL; 1891 ccb->flags = 0; 1892 ccb->state = DPT_CCB_STATE_NEW; 1893 ccb->std_callback = (ccb_callback) dpt_target_done; 1894 ccb->wrbuff_callback = NULL; 1895 1896 cp = &ccb->eata_ccb; 1897 cp->CP_OpCode = EATA_CMD_DMA_SEND_CP; 1898 cp->SCSI_Reset = 0; 1899 cp->HBA_Init = 0; 1900 cp->Auto_Req_Sen = 1; 1901 cp->cp_id = target; 1902 cp->DataIn = 1; 1903 cp->DataOut = 0; 1904 cp->Interpret = 0; 1905 cp->reqlen = htonl(sizeof(struct scsi_sense_data)); 1906 cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA)); 1907 cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA)); 1908 cp->cp_viraddr = (u_int32_t) & ccb; 1909 1910 cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO; 1911 1912 cp->cp_scsi_cmd = command; 1913 cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK); 1914 cp->cp_lun = lun; /* Order is important here! */ 1915 cp->cp_cdb[2] = 0x00; /* Buffer Id, only 1 :-( */ 1916 cp->cp_cdb[3] = (length >> 16) & 0xFF; /* Buffer offset MSB */ 1917 cp->cp_cdb[4] = (length >> 8) & 0xFF; 1918 cp->cp_cdb[5] = length & 0xFF; 1919 cp->cp_cdb[6] = (length >> 16) & 0xFF; /* Length MSB */ 1920 cp->cp_cdb[7] = (length >> 8) & 0xFF; 1921 cp->cp_cdb[8] = length & 0xFF; /* Length LSB */ 1922 cp->cp_cdb[9] = 0; /* No sync, no match bits */ 1923 1924 /* 1925 * This could be optimized to live in dpt_register_buffer. 1926 * We keep it here, just in case the kernel decides to reallocate pages 1927 */ 1928 if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE, 1929 dpt->rw_buffer[bus][target][lun])) { 1930 kprintf("dpt%d: Failed to setup Scatter/Gather for " 1931 "Target-Mode buffer\n", dpt->unit); 1932 } 1933 } 1934 1935 /* Setup a target mode READ command */ 1936 1937 static void 1938 dpt_set_target(int redo, dpt_softc_t * dpt, 1939 u_int8_t bus, u_int8_t target, u_int8_t lun, int mode, 1940 u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb) 1941 { 1942 if (dpt->target_mode_enabled) { 1943 crit_enter(); 1944 1945 if (!redo) 1946 dpt_target_ccb(dpt, bus, target, lun, ccb, mode, 1947 SCSI_TM_READ_BUFFER, length, offset); 1948 1949 ccb->transaction_id = ++dpt->commands_processed; 1950 1951 #ifdef DPT_MEASURE_PERFORMANCE 1952 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++; 1953 ccb->command_started = microtime_now; 1954 #endif 1955 dpt_Qadd_waiting(dpt, ccb); 1956 dpt_sched_queue(dpt); 1957 1958 crit_exit(); 1959 } else { 1960 kprintf("dpt%d: Target Mode Request, but Target Mode is OFF\n", 1961 dpt->unit); 1962 } 1963 } 1964 1965 /* 1966 * Schedule a buffer to be sent to another target. 1967 * The work will be scheduled and the callback provided will be called when 1968 * the work is actually done. 1969 * 1970 * Please NOTE: ``Anyone'' can send a buffer, but only registered clients 1971 * get notified of receipt of buffers. 1972 */ 1973 1974 int 1975 dpt_send_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun, 1976 u_int8_t mode, u_int16_t length, u_int16_t offset, void *data, 1977 buff_wr_done callback) 1978 { 1979 dpt_softc_t *dpt; 1980 dpt_ccb_t *ccb = NULL; 1981 1982 /* This is an external call. Be a bit paranoid */ 1983 for (dpt = TAILQ_FIRST(&dpt_softc_list); 1984 dpt != NULL; 1985 dpt = TAILQ_NEXT(dpt, links)) { 1986 if (dpt->unit == unit) 1987 goto valid_unit; 1988 } 1989 1990 return (INVALID_UNIT); 1991 1992 valid_unit: 1993 1994 if (dpt->target_mode_enabled) { 1995 if ((channel >= dpt->channels) || (target > dpt->max_id) || 1996 (lun > dpt->max_lun)) { 1997 return (INVALID_SENDER); 1998 } 1999 if ((dpt->rw_buffer[channel][target][lun] == NULL) || 2000 (dpt->buffer_receiver[channel][target][lun] == NULL)) 2001 return (NOT_REGISTERED); 2002 2003 crit_enter(); 2004 /* Process the free list */ 2005 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { 2006 kprintf("dpt%d ERROR: Cannot allocate any more free CCB's.\n" 2007 " Please try later\n", 2008 dpt->unit); 2009 crit_exit(); 2010 return (NO_RESOURCES); 2011 } 2012 /* Now grab the newest CCB */ 2013 if ((ccb = dpt_Qpop_free(dpt)) == NULL) { 2014 crit_exit(); 2015 panic("dpt%d: Got a NULL CCB from pop_free()\n", dpt->unit); 2016 } 2017 crit_exit(); 2018 2019 bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length); 2020 dpt_target_ccb(dpt, channel, target, lun, ccb, mode, 2021 SCSI_TM_WRITE_BUFFER, 2022 length, offset); 2023 ccb->std_callback = (ccb_callback) callback; /* Potential trouble */ 2024 2025 crit_enter(); 2026 ccb->transaction_id = ++dpt->commands_processed; 2027 2028 #ifdef DPT_MEASURE_PERFORMANCE 2029 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++; 2030 ccb->command_started = microtime_now; 2031 #endif 2032 dpt_Qadd_waiting(dpt, ccb); 2033 dpt_sched_queue(dpt); 2034 2035 crit_exit(); 2036 return (0); 2037 } 2038 return (DRIVER_DOWN); 2039 } 2040 2041 static void 2042 dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb) 2043 { 2044 eata_ccb_t *cp; 2045 2046 cp = &ccb->eata_ccb; 2047 2048 /* 2049 * Remove the CCB from the waiting queue. 2050 * We do NOT put it back on the free, etc., queues as it is a special 2051 * ccb, owned by the dpt_softc of this unit. 2052 */ 2053 crit_enter(); 2054 dpt_Qremove_completed(dpt, ccb); 2055 crit_exit(); 2056 2057 #define br_channel (ccb->eata_ccb.cp_channel) 2058 #define br_target (ccb->eata_ccb.cp_id) 2059 #define br_lun (ccb->eata_ccb.cp_LUN) 2060 #define br_index [br_channel][br_target][br_lun] 2061 #define read_buffer_callback (dpt->buffer_receiver br_index ) 2062 #define read_buffer (dpt->rw_buffer[br_channel][br_target][br_lun]) 2063 #define cb(offset) (ccb->eata_ccb.cp_cdb[offset]) 2064 #define br_offset ((cb(3) << 16) | (cb(4) << 8) | cb(5)) 2065 #define br_length ((cb(6) << 16) | (cb(7) << 8) | cb(8)) 2066 2067 /* Different reasons for being here, you know... */ 2068 switch (ccb->eata_ccb.cp_scsi_cmd) { 2069 case SCSI_TM_READ_BUFFER: 2070 if (read_buffer_callback != NULL) { 2071 /* This is a buffer generated by a kernel process */ 2072 read_buffer_callback(dpt->unit, br_channel, 2073 br_target, br_lun, 2074 read_buffer, 2075 br_offset, br_length); 2076 } else { 2077 /* 2078 * This is a buffer waited for by a user (sleeping) 2079 * command 2080 */ 2081 wakeup(ccb); 2082 } 2083 2084 /* We ALWAYS re-issue the same command; args are don't-care */ 2085 dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0); 2086 break; 2087 2088 case SCSI_TM_WRITE_BUFFER: 2089 (ccb->wrbuff_callback) (dpt->unit, br_channel, br_target, 2090 br_offset, br_length, 2091 br_lun, ccb->status_packet.hba_stat); 2092 break; 2093 default: 2094 kprintf("dpt%d: %s is an unsupported command for target mode\n", 2095 dpt->unit, scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd)); 2096 } 2097 crit_enter(); 2098 dpt->target_ccb[br_channel][br_target][br_lun] = NULL; 2099 dpt_Qpush_free(dpt, ccb); 2100 crit_exit(); 2101 } 2102 2103 2104 /* 2105 * Use this function to register a client for a buffer read target operation. 2106 * The function you register will be called every time a buffer is received 2107 * by the target mode code. 2108 */ 2109 dpt_rb_t 2110 dpt_register_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun, 2111 u_int8_t mode, u_int16_t length, u_int16_t offset, 2112 dpt_rec_buff callback, dpt_rb_op_t op) 2113 { 2114 dpt_softc_t *dpt; 2115 dpt_ccb_t *ccb = NULL; 2116 2117 for (dpt = TAILQ_FIRST(&dpt_softc_list); 2118 dpt != NULL; 2119 dpt = TAILQ_NEXT(dpt, links)) { 2120 if (dpt->unit == unit) 2121 goto valid_unit; 2122 } 2123 2124 return (INVALID_UNIT); 2125 2126 valid_unit: 2127 2128 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE) 2129 return (DRIVER_DOWN); 2130 2131 if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) || 2132 (lun > (dpt->max_lun - 1))) 2133 return (INVALID_SENDER); 2134 2135 if (dpt->buffer_receiver[channel][target][lun] == NULL) { 2136 if (op == REGISTER_BUFFER) { 2137 /* Assign the requested callback */ 2138 dpt->buffer_receiver[channel][target][lun] = callback; 2139 /* Get a CCB */ 2140 crit_enter(); 2141 2142 /* Process the free list */ 2143 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { 2144 kprintf("dpt%d ERROR: Cannot allocate any more free CCB's.\n" 2145 " Please try later\n", 2146 dpt->unit); 2147 crit_exit(); 2148 return (NO_RESOURCES); 2149 } 2150 /* Now grab the newest CCB */ 2151 if ((ccb = dpt_Qpop_free(dpt)) == NULL) { 2152 crit_exit(); 2153 panic("dpt%d: Got a NULL CCB from pop_free()\n", 2154 dpt->unit); 2155 } 2156 crit_exit(); 2157 2158 /* Clean up the leftover of the previous tenant */ 2159 ccb->status = DPT_CCB_STATE_NEW; 2160 dpt->target_ccb[channel][target][lun] = ccb; 2161 2162 dpt->rw_buffer[channel][target][lun] = 2163 kmalloc(DPT_RW_BUFFER_SIZE, M_DEVBUF, M_INTWAIT); 2164 dpt_set_target(0, dpt, channel, target, lun, mode, 2165 length, offset, ccb); 2166 return (SUCCESSFULLY_REGISTERED); 2167 } else 2168 return (NOT_REGISTERED); 2169 } else { 2170 if (op == REGISTER_BUFFER) { 2171 if (dpt->buffer_receiver[channel][target][lun] == callback) 2172 return (ALREADY_REGISTERED); 2173 else 2174 return (REGISTERED_TO_ANOTHER); 2175 } else { 2176 if (dpt->buffer_receiver[channel][target][lun] == callback) { 2177 dpt->buffer_receiver[channel][target][lun] = NULL; 2178 crit_enter(); 2179 dpt_Qpush_free(dpt, ccb); 2180 crit_exit(); 2181 kfree(dpt->rw_buffer[channel][target][lun], M_DEVBUF); 2182 return (SUCCESSFULLY_REGISTERED); 2183 } else 2184 return (INVALID_CALLBACK); 2185 } 2186 2187 } 2188 } 2189 2190 /* Return the state of the blinking DPT LED's */ 2191 u_int8_t 2192 dpt_blinking_led(dpt_softc_t * dpt) 2193 { 2194 int ndx; 2195 u_int32_t state; 2196 u_int32_t previous; 2197 u_int8_t result; 2198 2199 crit_enter(); 2200 2201 result = 0; 2202 2203 for (ndx = 0, state = 0, previous = 0; 2204 (ndx < 10) && (state != previous); 2205 ndx++) { 2206 previous = state; 2207 state = dpt_inl(dpt, 1); 2208 } 2209 2210 if ((state == previous) && (state == DPT_BLINK_INDICATOR)) 2211 result = dpt_inb(dpt, 5); 2212 2213 crit_exit(); 2214 return (result); 2215 } 2216 2217 /* 2218 * Execute a command which did not come from the kernel's SCSI layer. 2219 * The only way to map user commands to bus and target is to comply with the 2220 * standard DPT wire-down scheme: 2221 */ 2222 int 2223 dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd, 2224 caddr_t cmdarg, int minor_no) 2225 { 2226 dpt_ccb_t *ccb; 2227 void *data; 2228 int channel, target, lun; 2229 int huh; 2230 int result; 2231 int submitted; 2232 size_t contigsize = 0; 2233 2234 data = NULL; 2235 channel = minor2hba(minor_no); 2236 target = minor2target(minor_no); 2237 lun = minor2lun(minor_no); 2238 2239 if ((channel > (dpt->channels - 1)) 2240 || (target > dpt->max_id) 2241 || (lun > dpt->max_lun)) 2242 return (ENXIO); 2243 2244 if (target == dpt->sc_scsi_link[channel].adapter_targ) { 2245 /* This one is for the controller itself */ 2246 if ((user_cmd->eataID[0] != 'E') 2247 || (user_cmd->eataID[1] != 'A') 2248 || (user_cmd->eataID[2] != 'T') 2249 || (user_cmd->eataID[3] != 'A')) { 2250 return (ENXIO); 2251 } 2252 } 2253 /* Get a DPT CCB, so we can prepare a command */ 2254 crit_enter(); 2255 2256 /* Process the free list */ 2257 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { 2258 kprintf("dpt%d ERROR: Cannot allocate any more free CCB's.\n" 2259 " Please try later\n", 2260 dpt->unit); 2261 crit_exit(); 2262 return (EFAULT); 2263 } 2264 /* Now grab the newest CCB */ 2265 if ((ccb = dpt_Qpop_free(dpt)) == NULL) { 2266 crit_exit(); 2267 panic("dpt%d: Got a NULL CCB from pop_free()\n", dpt->unit); 2268 } else { 2269 crit_exit(); 2270 /* Clean up the leftover of the previous tenant */ 2271 ccb->status = DPT_CCB_STATE_NEW; 2272 } 2273 2274 bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb, 2275 sizeof(eata_ccb_t)); 2276 2277 /* We do not want to do user specified scatter/gather. Why?? */ 2278 if (ccb->eata_ccb.scatter == 1) 2279 return (EINVAL); 2280 2281 ccb->eata_ccb.Auto_Req_Sen = 1; 2282 ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data)); 2283 ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen)); 2284 ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA)); 2285 ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA)); 2286 ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA)); 2287 ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb; 2288 2289 if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) { 2290 /* Data I/O is involved in this command. Alocate buffer */ 2291 if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) { 2292 contigsize = ccb->eata_ccb.cp_datalen; 2293 data = contigmalloc(ccb->eata_ccb.cp_datalen, 2294 M_TEMP, M_WAITOK, 0, ~0, 2295 ccb->eata_ccb.cp_datalen, 2296 0x10000); 2297 } else { 2298 data = kmalloc(ccb->eata_ccb.cp_datalen, M_TEMP, 2299 M_WAITOK); 2300 } 2301 2302 if (data == NULL) { 2303 kprintf("dpt%d: Cannot allocate %d bytes " 2304 "for EATA command\n", dpt->unit, 2305 ccb->eata_ccb.cp_datalen); 2306 return (EFAULT); 2307 } 2308 #define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA 2309 if (ccb->eata_ccb.DataIn == 1) { 2310 if (copyin(usr_cmd_DMA, 2311 data, ccb->eata_ccb.cp_datalen) == -1) 2312 return (EFAULT); 2313 } 2314 } else { 2315 /* No data I/O involved here. Make sure the DPT knows that */ 2316 ccb->eata_ccb.cp_datalen = 0; 2317 data = NULL; 2318 } 2319 2320 if (ccb->eata_ccb.FWNEST == 1) 2321 ccb->eata_ccb.FWNEST = 0; 2322 2323 if (ccb->eata_ccb.cp_datalen != 0) { 2324 if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen, 2325 data) != 0) { 2326 if (data != NULL) { 2327 if (contigsize) 2328 contigfree(data, contigsize, M_TEMP); 2329 else 2330 kfree(data, M_TEMP); 2331 } 2332 return (EFAULT); 2333 } 2334 } 2335 /** 2336 * We are required to quiet a SCSI bus. 2337 * since we do not queue comands on a bus basis, 2338 * we wait for ALL commands on a controller to complete. 2339 * In the mean time, sched_queue() will not schedule new commands. 2340 */ 2341 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD) 2342 && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) { 2343 /* We wait for ALL traffic for this HBa to subside */ 2344 crit_enter(); 2345 dpt->state |= DPT_HA_QUIET; 2346 crit_exit(); 2347 2348 while ((submitted = dpt->submitted_ccbs_count) != 0) { 2349 huh = tsleep((void *) dpt, PCATCH, "dptqt", 100 * hz); 2350 switch (huh) { 2351 case 0: 2352 /* Wakeup call received */ 2353 break; 2354 case EWOULDBLOCK: 2355 /* Timer Expired */ 2356 break; 2357 default: 2358 /* anything else */ 2359 break; 2360 } 2361 } 2362 } 2363 /* Resume normal operation */ 2364 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD) 2365 && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) { 2366 crit_enter(); 2367 dpt->state &= ~DPT_HA_QUIET; 2368 crit_exit(); 2369 } 2370 /** 2371 * Schedule the command and submit it. 2372 * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET 2373 */ 2374 ccb->xs = NULL; 2375 ccb->flags = 0; 2376 ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */ 2377 2378 ccb->transaction_id = ++dpt->commands_processed; 2379 ccb->std_callback = (ccb_callback) dpt_user_cmd_done; 2380 ccb->result = (u_int32_t) & cmdarg; 2381 ccb->data = data; 2382 2383 #ifdef DPT_MEASURE_PERFORMANCE 2384 ++dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]; 2385 ccb->command_started = microtime_now; 2386 #endif 2387 crit_enter(); 2388 dpt_Qadd_waiting(dpt, ccb); 2389 crit_exit(); 2390 2391 dpt_sched_queue(dpt); 2392 2393 /* Wait for the command to complete */ 2394 (void) tsleep((void *) ccb, PCATCH, "dptucw", 100 * hz); 2395 2396 /* Free allocated memory */ 2397 if (data != NULL) { 2398 if (contigsize) 2399 contigfree(data, contigsize, M_TEMP); 2400 else 2401 kfree(data, M_TEMP); 2402 } 2403 2404 return (0); 2405 } 2406 2407 static void 2408 dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb) 2409 { 2410 u_int32_t result; 2411 caddr_t cmd_arg; 2412 2413 crit_enter(); 2414 2415 /** 2416 * If Auto Request Sense is on, copyout the sense struct 2417 */ 2418 #define usr_pckt_DMA (caddr_t)(intptr_t)ntohl(ccb->eata_ccb.cp_reqDMA) 2419 #define usr_pckt_len ntohl(ccb->eata_ccb.cp_datalen) 2420 if (ccb->eata_ccb.Auto_Req_Sen == 1) { 2421 if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA, 2422 sizeof(struct scsi_sense_data))) { 2423 ccb->result = EFAULT; 2424 dpt_Qpush_free(dpt, ccb); 2425 crit_exit(); 2426 wakeup(ccb); 2427 return; 2428 } 2429 } 2430 /* If DataIn is on, copyout the data */ 2431 if ((ccb->eata_ccb.DataIn == 1) 2432 && (ccb->status_packet.hba_stat == HA_NO_ERROR)) { 2433 if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) { 2434 dpt_Qpush_free(dpt, ccb); 2435 ccb->result = EFAULT; 2436 2437 crit_exit(); 2438 wakeup(ccb); 2439 return; 2440 } 2441 } 2442 /* Copyout the status */ 2443 result = ccb->status_packet.hba_stat; 2444 cmd_arg = (caddr_t) ccb->result; 2445 2446 if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) { 2447 dpt_Qpush_free(dpt, ccb); 2448 ccb->result = EFAULT; 2449 crit_exit(); 2450 wakeup(ccb); 2451 return; 2452 } 2453 /* Put the CCB back in the freelist */ 2454 ccb->state |= DPT_CCB_STATE_COMPLETED; 2455 dpt_Qpush_free(dpt, ccb); 2456 2457 /* Free allocated memory */ 2458 crit_exit(); 2459 return; 2460 } 2461 2462 #ifdef DPT_HANDLE_TIMEOUTS 2463 /** 2464 * This function walks down the SUBMITTED queue. 2465 * Every request that is too old gets aborted and marked. 2466 * Since the DPT will complete (interrupt) immediately (what does that mean?), 2467 * We just walk the list, aborting old commands and marking them as such. 2468 * The dpt_complete function will get rid of the that were interrupted in the 2469 * normal manner. 2470 * 2471 * This function needs to run at splcam(), as it interacts with the submitted 2472 * queue, as well as the completed and free queues. Just like dpt_intr() does. 2473 * To run it at any ISPL other than that of dpt_intr(), will mean that dpt_intr 2474 * willbe able to pre-empt it, grab a transaction in progress (towards 2475 * destruction) and operate on it. The state of this transaction will be not 2476 * very clear. 2477 * The only other option, is to lock it only as long as necessary but have 2478 * dpt_intr() spin-wait on it. In a UP environment this makes no sense and in 2479 * a SMP environment, the advantage is dubvious for a function that runs once 2480 * every ten seconds for few microseconds and, on systems with healthy 2481 * hardware, does not do anything anyway. 2482 */ 2483 2484 static void 2485 dpt_handle_timeouts(dpt_softc_t * dpt) 2486 { 2487 dpt_ccb_t *ccb; 2488 2489 crit_enter(); 2490 2491 if (dpt->state & DPT_HA_TIMEOUTS_ACTIVE) { 2492 kprintf("dpt%d WARNING: Timeout Handling Collision\n", 2493 dpt->unit); 2494 crit_exit(); 2495 return; 2496 } 2497 dpt->state |= DPT_HA_TIMEOUTS_ACTIVE; 2498 2499 /* Loop through the entire submitted queue, looking for lost souls */ 2500 for (ccb = TAILQ_FIRST(&dpt->submitted_ccbs); 2501 ccb != NULL; 2502 ccb = TAILQ_NEXT(ccb, links)) { 2503 struct scsi_xfer *xs; 2504 u_int32_t age, max_age; 2505 2506 xs = ccb->xs; 2507 age = dpt_time_delta(ccb->command_started, microtime_now); 2508 2509 #define TenSec 10000000 2510 2511 if (xs == NULL) { /* Local, non-kernel call */ 2512 max_age = TenSec; 2513 } else { 2514 max_age = (((xs->timeout * (dpt->submitted_ccbs_count 2515 + DPT_TIMEOUT_FACTOR)) 2516 > TenSec) 2517 ? (xs->timeout * (dpt->submitted_ccbs_count 2518 + DPT_TIMEOUT_FACTOR)) 2519 : TenSec); 2520 } 2521 2522 /* 2523 * If a transaction is marked lost and is TWICE as old as we 2524 * care, then, and only then do we destroy it! 2525 */ 2526 if (ccb->state & DPT_CCB_STATE_MARKED_LOST) { 2527 /* Remember who is next */ 2528 if (age > (max_age * 2)) { 2529 dpt_Qremove_submitted(dpt, ccb); 2530 ccb->state &= ~DPT_CCB_STATE_MARKED_LOST; 2531 ccb->state |= DPT_CCB_STATE_ABORTED; 2532 #define cmd_name scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd) 2533 if (ccb->retries++ > DPT_RETRIES) { 2534 kprintf("dpt%d ERROR: Destroying stale " 2535 "%d (%s)\n" 2536 " on " 2537 "c%db%dt%du%d (%d/%d)\n", 2538 dpt->unit, ccb->transaction_id, 2539 cmd_name, 2540 dpt->unit, 2541 ccb->eata_ccb.cp_channel, 2542 ccb->eata_ccb.cp_id, 2543 ccb->eata_ccb.cp_LUN, age, 2544 ccb->retries); 2545 #define send_ccb &ccb->eata_ccb 2546 #define ESA EATA_SPECIFIC_ABORT 2547 (void) dpt_send_immediate(dpt, 2548 send_ccb, 2549 ESA, 2550 0, 0); 2551 dpt_Qpush_free(dpt, ccb); 2552 2553 /* The SCSI layer should re-try */ 2554 xs->error |= XS_TIMEOUT; 2555 xs->flags |= SCSI_ITSDONE; 2556 scsi_done(xs); 2557 } else { 2558 kprintf("dpt%d ERROR: Stale %d (%s) on " 2559 "c%db%dt%du%d (%d)\n" 2560 " gets another " 2561 "chance(%d/%d)\n", 2562 dpt->unit, ccb->transaction_id, 2563 cmd_name, 2564 dpt->unit, 2565 ccb->eata_ccb.cp_channel, 2566 ccb->eata_ccb.cp_id, 2567 ccb->eata_ccb.cp_LUN, 2568 age, ccb->retries, DPT_RETRIES); 2569 2570 dpt_Qpush_waiting(dpt, ccb); 2571 dpt_sched_queue(dpt); 2572 } 2573 } 2574 } else { 2575 /* 2576 * This is a transaction that is not to be destroyed 2577 * (yet) But it is too old for our liking. We wait as 2578 * long as the upper layer thinks. Not really, we 2579 * multiply that by the number of commands in the 2580 * submitted queue + 1. 2581 */ 2582 if (!(ccb->state & DPT_CCB_STATE_MARKED_LOST) && 2583 (age != ~0) && (age > max_age)) { 2584 kprintf("dpt%d ERROR: Marking %d (%s) on " 2585 "c%db%dt%du%d \n" 2586 " as late after %dusec\n", 2587 dpt->unit, ccb->transaction_id, 2588 cmd_name, 2589 dpt->unit, ccb->eata_ccb.cp_channel, 2590 ccb->eata_ccb.cp_id, 2591 ccb->eata_ccb.cp_LUN, age); 2592 ccb->state |= DPT_CCB_STATE_MARKED_LOST; 2593 } 2594 } 2595 } 2596 2597 dpt->state &= ~DPT_HA_TIMEOUTS_ACTIVE; 2598 crit_exit(); 2599 } 2600 2601 #endif /* DPT_HANDLE_TIMEOUTS */ 2602 2603 #endif 2604