1 /* 2 * Copyright (c) 1997 by Simon Shapiro 3 * All Rights Reserved 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * dpt_scsi.c: SCSI dependant code for the DPT driver 32 * 33 * credits: Assisted by Mike Neuffer in the early low level DPT code 34 * Thanx to Mark Salyzyn of DPT for his assistance. 35 * Special thanx to Justin Gibbs for invaluable help in 36 * making this driver look and work like a FreeBSD component. 37 * Last but not least, many thanx to UCB and the FreeBSD 38 * team for creating and maintaining such a wonderful O/S. 39 * 40 * TODO: * Add ISA probe code. 41 * * Add driver-level RAID-0. This will allow interoperability with 42 * NiceTry, M$-Doze, Win-Dog, Slowlaris, etc., in recognizing RAID 43 * arrays that span controllers (Wow!). 44 */ 45 46 #ident "$FreeBSD: src/sys/dev/dpt/dpt_scsi.c,v 1.28.2.3 2003/01/31 02:47:10 grog Exp $" 47 #ident "$DragonFly: src/sys/dev/raid/dpt/dpt_scsi.c,v 1.16 2007/12/23 07:00:57 pavalos Exp $" 48 49 #define _DPT_C_ 50 51 #include "opt_dpt.h" 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/eventhandler.h> 55 #include <sys/malloc.h> 56 #include <sys/kernel.h> 57 #include <sys/bus.h> 58 #include <sys/thread2.h> 59 60 #include <machine/clock.h> 61 62 #include <bus/cam/cam.h> 63 #include <bus/cam/cam_ccb.h> 64 #include <bus/cam/cam_sim.h> 65 #include <bus/cam/cam_xpt_sim.h> 66 #include <bus/cam/cam_debug.h> 67 #include <bus/cam/scsi/scsi_all.h> 68 #include <bus/cam/scsi/scsi_message.h> 69 70 #include <vm/vm.h> 71 #include <vm/pmap.h> 72 73 #include "dpt.h" 74 75 /* dpt_isa.c, dpt_eisa.c, and dpt_pci.c need this in a central place */ 76 int dpt_controllers_present; 77 78 u_long dpt_unit; /* Next unit number to use */ 79 80 /* The linked list of softc structures */ 81 struct dpt_softc_list dpt_softcs = TAILQ_HEAD_INITIALIZER(dpt_softcs); 82 83 #define microtime_now dpt_time_now() 84 85 #define dpt_inl(dpt, port) \ 86 bus_space_read_4((dpt)->tag, (dpt)->bsh, port) 87 #define dpt_inb(dpt, port) \ 88 bus_space_read_1((dpt)->tag, (dpt)->bsh, port) 89 #define dpt_outl(dpt, port, value) \ 90 bus_space_write_4((dpt)->tag, (dpt)->bsh, port, value) 91 #define dpt_outb(dpt, port, value) \ 92 bus_space_write_1((dpt)->tag, (dpt)->bsh, port, value) 93 94 /* 95 * These will have to be setup by parameters passed at boot/load time. For 96 * perfromance reasons, we make them constants for the time being. 97 */ 98 #define dpt_min_segs DPT_MAX_SEGS 99 #define dpt_max_segs DPT_MAX_SEGS 100 101 /* Definitions for our use of the SIM private CCB area */ 102 #define ccb_dccb_ptr spriv_ptr0 103 #define ccb_dpt_ptr spriv_ptr1 104 105 /* ================= Private Inline Function declarations ===================*/ 106 static __inline int dpt_just_reset(dpt_softc_t * dpt); 107 static __inline int dpt_raid_busy(dpt_softc_t * dpt); 108 static __inline int dpt_pio_wait (u_int32_t, u_int, u_int, u_int); 109 static __inline int dpt_wait(dpt_softc_t *dpt, u_int bits, 110 u_int state); 111 static __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt); 112 static __inline void dptfreeccb(struct dpt_softc *dpt, 113 struct dpt_ccb *dccb); 114 static __inline u_int32_t dptccbvtop(struct dpt_softc *dpt, 115 struct dpt_ccb *dccb); 116 117 static __inline int dpt_send_immediate(dpt_softc_t *dpt, 118 eata_ccb_t *cmd_block, 119 u_int32_t cmd_busaddr, 120 u_int retries, 121 u_int ifc, u_int code, 122 u_int code2); 123 124 /* ==================== Private Function declarations =======================*/ 125 static void dptmapmem(void *arg, bus_dma_segment_t *segs, 126 int nseg, int error); 127 128 static struct sg_map_node* 129 dptallocsgmap(struct dpt_softc *dpt); 130 131 static int dptallocccbs(dpt_softc_t *dpt); 132 133 static int dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, 134 u_int32_t dccb_busaddr, u_int size, 135 u_int page, u_int target, int extent); 136 static void dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, 137 u_int32_t dccb_busaddr, 138 u_int8_t *buff); 139 140 static void dpt_poll(struct cam_sim *sim); 141 142 static void dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, 143 int nseg, int error); 144 145 static void dpt_action(struct cam_sim *sim, union ccb *ccb); 146 147 static int dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd, 148 u_int32_t cmd_busaddr, 149 u_int command, u_int retries, 150 u_int ifc, u_int code, 151 u_int code2); 152 static void dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, 153 union ccb *ccb, u_int hba_stat, 154 u_int scsi_stat, u_int32_t resid); 155 156 static void dpttimeout(void *arg); 157 static void dptshutdown(void *arg, int howto); 158 159 /* ================= Private Inline Function definitions ====================*/ 160 static __inline int 161 dpt_just_reset(dpt_softc_t * dpt) 162 { 163 if ((dpt_inb(dpt, 2) == 'D') 164 && (dpt_inb(dpt, 3) == 'P') 165 && (dpt_inb(dpt, 4) == 'T') 166 && (dpt_inb(dpt, 5) == 'H')) 167 return (1); 168 else 169 return (0); 170 } 171 172 static __inline int 173 dpt_raid_busy(dpt_softc_t * dpt) 174 { 175 if ((dpt_inb(dpt, 0) == 'D') 176 && (dpt_inb(dpt, 1) == 'P') 177 && (dpt_inb(dpt, 2) == 'T')) 178 return (1); 179 else 180 return (0); 181 } 182 183 static __inline int 184 dpt_pio_wait (u_int32_t base, u_int reg, u_int bits, u_int state) 185 { 186 int i; 187 u_int c; 188 189 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */ 190 c = inb(base + reg) & bits; 191 if (!(c == state)) 192 return (0); 193 else 194 DELAY(50); 195 } 196 return (-1); 197 } 198 199 static __inline int 200 dpt_wait(dpt_softc_t *dpt, u_int bits, u_int state) 201 { 202 int i; 203 u_int c; 204 205 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */ 206 c = dpt_inb(dpt, HA_RSTATUS) & bits; 207 if (c == state) 208 return (0); 209 else 210 DELAY(50); 211 } 212 return (-1); 213 } 214 215 static __inline struct dpt_ccb* 216 dptgetccb(struct dpt_softc *dpt) 217 { 218 struct dpt_ccb* dccb; 219 220 crit_enter(); 221 if ((dccb = SLIST_FIRST(&dpt->free_dccb_list)) != NULL) { 222 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links); 223 dpt->free_dccbs--; 224 } else if (dpt->total_dccbs < dpt->max_dccbs) { 225 dptallocccbs(dpt); 226 dccb = SLIST_FIRST(&dpt->free_dccb_list); 227 if (dccb == NULL) 228 kprintf("dpt%d: Can't malloc DCCB\n", dpt->unit); 229 else { 230 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links); 231 dpt->free_dccbs--; 232 } 233 } 234 crit_exit(); 235 236 return (dccb); 237 } 238 239 static __inline void 240 dptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb) 241 { 242 crit_enter(); 243 if ((dccb->state & DCCB_ACTIVE) != 0) 244 LIST_REMOVE(&dccb->ccb->ccb_h, sim_links.le); 245 if ((dccb->state & DCCB_RELEASE_SIMQ) != 0) 246 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 247 else if (dpt->resource_shortage != 0 248 && (dccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 249 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 250 dpt->resource_shortage = FALSE; 251 } 252 dccb->state = DCCB_FREE; 253 SLIST_INSERT_HEAD(&dpt->free_dccb_list, dccb, links); 254 ++dpt->free_dccbs; 255 crit_exit(); 256 } 257 258 static __inline u_int32_t 259 dptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb) 260 { 261 return (dpt->dpt_ccb_busbase 262 + (u_int32_t)((caddr_t)dccb - (caddr_t)dpt->dpt_dccbs)); 263 } 264 265 static __inline struct dpt_ccb * 266 dptccbptov(struct dpt_softc *dpt, u_int32_t busaddr) 267 { 268 return (dpt->dpt_dccbs 269 + ((struct dpt_ccb *)busaddr 270 - (struct dpt_ccb *)dpt->dpt_ccb_busbase)); 271 } 272 273 /* 274 * Send a command for immediate execution by the DPT 275 * See above function for IMPORTANT notes. 276 */ 277 static __inline int 278 dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block, 279 u_int32_t cmd_busaddr, u_int retries, 280 u_int ifc, u_int code, u_int code2) 281 { 282 return (dpt_send_eata_command(dpt, cmd_block, cmd_busaddr, 283 EATA_CMD_IMMEDIATE, retries, ifc, 284 code, code2)); 285 } 286 287 288 /* ===================== Private Function definitions =======================*/ 289 static void 290 dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 291 { 292 bus_addr_t *busaddrp; 293 294 busaddrp = (bus_addr_t *)arg; 295 *busaddrp = segs->ds_addr; 296 } 297 298 static struct sg_map_node * 299 dptallocsgmap(struct dpt_softc *dpt) 300 { 301 struct sg_map_node *sg_map; 302 303 sg_map = kmalloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT); 304 305 /* Allocate S/G space for the next batch of CCBS */ 306 if (bus_dmamem_alloc(dpt->sg_dmat, (void **)&sg_map->sg_vaddr, 307 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 308 kfree(sg_map, M_DEVBUF); 309 return (NULL); 310 } 311 312 (void)bus_dmamap_load(dpt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 313 PAGE_SIZE, dptmapmem, &sg_map->sg_physaddr, 314 /*flags*/0); 315 316 SLIST_INSERT_HEAD(&dpt->sg_maps, sg_map, links); 317 318 return (sg_map); 319 } 320 321 /* 322 * Allocate another chunk of CCB's. Return count of entries added. 323 * Assumed to be called at splcam(). 324 */ 325 static int 326 dptallocccbs(dpt_softc_t *dpt) 327 { 328 struct dpt_ccb *next_ccb; 329 struct sg_map_node *sg_map; 330 bus_addr_t physaddr; 331 dpt_sg_t *segs; 332 int newcount; 333 int i; 334 335 next_ccb = &dpt->dpt_dccbs[dpt->total_dccbs]; 336 337 if (next_ccb == dpt->dpt_dccbs) { 338 /* 339 * First time through. Re-use the S/G 340 * space we allocated for initialization 341 * CCBS. 342 */ 343 sg_map = SLIST_FIRST(&dpt->sg_maps); 344 } else { 345 sg_map = dptallocsgmap(dpt); 346 } 347 348 if (sg_map == NULL) 349 return (0); 350 351 segs = sg_map->sg_vaddr; 352 physaddr = sg_map->sg_physaddr; 353 354 newcount = (PAGE_SIZE / (dpt->sgsize * sizeof(dpt_sg_t))); 355 for (i = 0; dpt->total_dccbs < dpt->max_dccbs && i < newcount; i++) { 356 int error; 357 358 error = bus_dmamap_create(dpt->buffer_dmat, /*flags*/0, 359 &next_ccb->dmamap); 360 if (error != 0) 361 break; 362 next_ccb->sg_list = segs; 363 next_ccb->sg_busaddr = htonl(physaddr); 364 next_ccb->eata_ccb.cp_dataDMA = htonl(physaddr); 365 next_ccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr); 366 next_ccb->eata_ccb.cp_reqDMA = 367 htonl(dptccbvtop(dpt, next_ccb) 368 + offsetof(struct dpt_ccb, sense_data)); 369 next_ccb->eata_ccb.cp_busaddr = dpt->dpt_ccb_busend; 370 next_ccb->state = DCCB_FREE; 371 next_ccb->tag = dpt->total_dccbs; 372 SLIST_INSERT_HEAD(&dpt->free_dccb_list, next_ccb, links); 373 segs += dpt->sgsize; 374 physaddr += (dpt->sgsize * sizeof(dpt_sg_t)); 375 dpt->dpt_ccb_busend += sizeof(*next_ccb); 376 next_ccb++; 377 dpt->total_dccbs++; 378 } 379 return (i); 380 } 381 382 dpt_conf_t * 383 dpt_pio_get_conf (u_int32_t base) 384 { 385 static dpt_conf_t * conf; 386 u_int16_t * p; 387 int i; 388 389 /* 390 * Allocate a dpt_conf_t 391 */ 392 if (conf == NULL) 393 conf = kmalloc(sizeof(dpt_conf_t), M_DEVBUF, M_INTWAIT); 394 395 /* 396 * If we have one, clean it up. 397 */ 398 bzero(conf, sizeof(dpt_conf_t)); 399 400 /* 401 * Reset the controller. 402 */ 403 outb((base + HA_WCOMMAND), EATA_CMD_RESET); 404 405 /* 406 * Wait for the controller to become ready. 407 * For some reason there can be -no- delays after calling reset 408 * before we wait on ready status. 409 */ 410 if (dpt_pio_wait(base, HA_RSTATUS, HA_SBUSY, 0)) { 411 kprintf("dpt: timeout waiting for controller to become ready\n"); 412 return (NULL); 413 } 414 415 if (dpt_pio_wait(base, HA_RAUXSTAT, HA_ABUSY, 0)) { 416 kprintf("dpt: timetout waiting for adapter ready.\n"); 417 return (NULL); 418 } 419 420 /* 421 * Send the PIO_READ_CONFIG command. 422 */ 423 outb((base + HA_WCOMMAND), EATA_CMD_PIO_READ_CONFIG); 424 425 /* 426 * Read the data into the struct. 427 */ 428 p = (u_int16_t *)conf; 429 for (i = 0; i < (sizeof(dpt_conf_t) / 2); i++) { 430 431 if (dpt_pio_wait(base, HA_RSTATUS, HA_SDRQ, 0)) { 432 kprintf("dpt: timeout in data read.\n"); 433 return (NULL); 434 } 435 436 (*p) = inw(base + HA_RDATA); 437 p++; 438 } 439 440 if (inb(base + HA_RSTATUS) & HA_SERROR) { 441 kprintf("dpt: error reading configuration data.\n"); 442 return (NULL); 443 } 444 445 #define BE_EATA_SIGNATURE 0x45415441 446 #define LE_EATA_SIGNATURE 0x41544145 447 448 /* 449 * Test to see if we have a valid card. 450 */ 451 if ((conf->signature == BE_EATA_SIGNATURE) || 452 (conf->signature == LE_EATA_SIGNATURE)) { 453 454 while (inb(base + HA_RSTATUS) & HA_SDRQ) { 455 inw(base + HA_RDATA); 456 } 457 458 return (conf); 459 } 460 return (NULL); 461 } 462 463 /* 464 * Read a configuration page into the supplied dpt_cont_t buffer. 465 */ 466 static int 467 dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr, 468 u_int size, u_int page, u_int target, int extent) 469 { 470 eata_ccb_t *cp; 471 472 u_int8_t status; 473 474 int ndx; 475 int result; 476 477 cp = &dccb->eata_ccb; 478 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(*dpt->sp)); 479 480 cp->Interpret = 1; 481 cp->DataIn = 1; 482 cp->Auto_Req_Sen = 1; 483 cp->reqlen = sizeof(struct scsi_sense_data); 484 485 cp->cp_id = target; 486 cp->cp_LUN = 0; /* In the EATA packet */ 487 cp->cp_lun = 0; /* In the SCSI command */ 488 489 cp->cp_scsi_cmd = INQUIRY; 490 cp->cp_len = size; 491 492 cp->cp_extent = extent; 493 494 cp->cp_page = page; 495 cp->cp_channel = 0; /* DNC, Interpret mode is set */ 496 cp->cp_identify = 1; 497 cp->cp_datalen = htonl(size); 498 499 crit_enter(); 500 501 /* 502 * This could be a simple for loop, but we suspected the compiler To 503 * have optimized it a bit too much. Wait for the controller to 504 * become ready 505 */ 506 while (((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC) 507 && (status != (HA_SREADY | HA_SSC | HA_SERROR)) 508 && (status != (HA_SDRDY | HA_SERROR | HA_SDRQ))) 509 || (dpt_wait(dpt, HA_SBUSY, 0))) { 510 511 /* 512 * RAID Drives still Spinning up? (This should only occur if 513 * the DPT controller is in a NON PC (PCI?) platform). 514 */ 515 if (dpt_raid_busy(dpt)) { 516 kprintf("dpt%d WARNING: Get_conf() RSUS failed.\n", 517 dpt->unit); 518 crit_exit(); 519 return (0); 520 } 521 } 522 523 DptStat_Reset_BUSY(dpt->sp); 524 525 /* 526 * XXXX We might want to do something more clever than aborting at 527 * this point, like resetting (rebooting) the controller and trying 528 * again. 529 */ 530 if ((result = dpt_send_eata_command(dpt, cp, dccb_busaddr, 531 EATA_CMD_DMA_SEND_CP, 532 10000, 0, 0, 0)) != 0) { 533 kprintf("dpt%d WARNING: Get_conf() failed (%d) to send " 534 "EATA_CMD_DMA_READ_CONFIG\n", 535 dpt->unit, result); 536 crit_exit(); 537 return (0); 538 } 539 /* Wait for two seconds for a response. This can be slow */ 540 for (ndx = 0; 541 (ndx < 20000) 542 && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ); 543 ndx++) { 544 DELAY(50); 545 } 546 547 /* Grab the status and clear interrupts */ 548 status = dpt_inb(dpt, HA_RSTATUS); 549 550 crit_exit(); 551 552 /* 553 * Check the status carefully. Return only if the 554 * command was successful. 555 */ 556 if (((status & HA_SERROR) == 0) 557 && (dpt->sp->hba_stat == 0) 558 && (dpt->sp->scsi_stat == 0) 559 && (dpt->sp->residue_len == 0)) 560 return (0); 561 562 if (dpt->sp->scsi_stat == SCSI_STATUS_CHECK_COND) 563 return (0); 564 565 return (1); 566 } 567 568 /* Detect Cache parameters and size */ 569 static void 570 dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr, 571 u_int8_t *buff) 572 { 573 eata_ccb_t *cp; 574 u_int8_t *param; 575 int bytes; 576 int result; 577 int ndx; 578 u_int8_t status; 579 580 /* 581 * Default setting, for best perfromance.. 582 * This is what virtually all cards default to.. 583 */ 584 dpt->cache_type = DPT_CACHE_WRITEBACK; 585 dpt->cache_size = 0; 586 587 cp = &dccb->eata_ccb; 588 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(dpt->sp)); 589 bzero(buff, 512); 590 591 /* Setup the command structure */ 592 cp->Interpret = 1; 593 cp->DataIn = 1; 594 cp->Auto_Req_Sen = 1; 595 cp->reqlen = sizeof(struct scsi_sense_data); 596 597 cp->cp_id = 0; /* who cares? The HBA will interpret.. */ 598 cp->cp_LUN = 0; /* In the EATA packet */ 599 cp->cp_lun = 0; /* In the SCSI command */ 600 cp->cp_channel = 0; 601 602 cp->cp_scsi_cmd = EATA_CMD_DMA_SEND_CP; 603 cp->cp_len = 56; 604 605 cp->cp_extent = 0; 606 cp->cp_page = 0; 607 cp->cp_identify = 1; 608 cp->cp_dispri = 1; 609 610 /* 611 * Build the EATA Command Packet structure 612 * for a Log Sense Command. 613 */ 614 cp->cp_cdb[0] = 0x4d; 615 cp->cp_cdb[1] = 0x0; 616 cp->cp_cdb[2] = 0x40 | 0x33; 617 cp->cp_cdb[7] = 1; 618 619 cp->cp_datalen = htonl(512); 620 621 crit_enter(); 622 result = dpt_send_eata_command(dpt, cp, dccb_busaddr, 623 EATA_CMD_DMA_SEND_CP, 624 10000, 0, 0, 0); 625 if (result != 0) { 626 kprintf("dpt%d WARNING: detect_cache() failed (%d) to send " 627 "EATA_CMD_DMA_SEND_CP\n", dpt->unit, result); 628 crit_exit(); 629 return; 630 } 631 /* Wait for two seconds for a response. This can be slow... */ 632 for (ndx = 0; 633 (ndx < 20000) && 634 !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ); 635 ndx++) { 636 DELAY(50); 637 } 638 639 /* Grab the status and clear interrupts */ 640 status = dpt_inb(dpt, HA_RSTATUS); 641 crit_exit(); 642 643 /* 644 * Sanity check 645 */ 646 if (buff[0] != 0x33) { 647 return; 648 } 649 bytes = DPT_HCP_LENGTH(buff); 650 param = DPT_HCP_FIRST(buff); 651 652 if (DPT_HCP_CODE(param) != 1) { 653 /* 654 * DPT Log Page layout error 655 */ 656 kprintf("dpt%d: NOTICE: Log Page (1) layout error\n", 657 dpt->unit); 658 return; 659 } 660 if (!(param[4] & 0x4)) { 661 dpt->cache_type = DPT_NO_CACHE; 662 return; 663 } 664 while (DPT_HCP_CODE(param) != 6) { 665 param = DPT_HCP_NEXT(param); 666 if ((param < buff) 667 || (param >= &buff[bytes])) { 668 return; 669 } 670 } 671 672 if (param[4] & 0x2) { 673 /* 674 * Cache disabled 675 */ 676 dpt->cache_type = DPT_NO_CACHE; 677 return; 678 } 679 680 if (param[4] & 0x4) { 681 dpt->cache_type = DPT_CACHE_WRITETHROUGH; 682 } 683 684 /* XXX This isn't correct. This log parameter only has two bytes.... */ 685 #if 0 686 dpt->cache_size = param[5] 687 | (param[6] << 8) 688 | (param[7] << 16) 689 | (param[8] << 24); 690 #endif 691 } 692 693 static void 694 dpt_poll(struct cam_sim *sim) 695 { 696 dpt_intr(cam_sim_softc(sim)); 697 } 698 699 static void 700 dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 701 { 702 struct dpt_ccb *dccb; 703 union ccb *ccb; 704 struct dpt_softc *dpt; 705 706 dccb = (struct dpt_ccb *)arg; 707 ccb = dccb->ccb; 708 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr; 709 710 if (error != 0) { 711 if (error != EFBIG) 712 kprintf("dpt%d: Unexpected error 0x%x returned from " 713 "bus_dmamap_load\n", dpt->unit, error); 714 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 715 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 716 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 717 } 718 dptfreeccb(dpt, dccb); 719 xpt_done(ccb); 720 return; 721 } 722 723 if (nseg != 0) { 724 dpt_sg_t *sg; 725 bus_dma_segment_t *end_seg; 726 bus_dmasync_op_t op; 727 728 end_seg = dm_segs + nseg; 729 730 /* Copy the segments into our SG list */ 731 sg = dccb->sg_list; 732 while (dm_segs < end_seg) { 733 sg->seg_len = htonl(dm_segs->ds_len); 734 sg->seg_addr = htonl(dm_segs->ds_addr); 735 sg++; 736 dm_segs++; 737 } 738 739 if (nseg > 1) { 740 dccb->eata_ccb.scatter = 1; 741 dccb->eata_ccb.cp_dataDMA = dccb->sg_busaddr; 742 dccb->eata_ccb.cp_datalen = 743 htonl(nseg * sizeof(dpt_sg_t)); 744 } else { 745 dccb->eata_ccb.cp_dataDMA = dccb->sg_list[0].seg_addr; 746 dccb->eata_ccb.cp_datalen = dccb->sg_list[0].seg_len; 747 } 748 749 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 750 op = BUS_DMASYNC_PREREAD; 751 else 752 op = BUS_DMASYNC_PREWRITE; 753 754 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op); 755 756 } else { 757 dccb->eata_ccb.cp_dataDMA = 0; 758 dccb->eata_ccb.cp_datalen = 0; 759 } 760 761 crit_enter(); 762 763 /* 764 * Last time we need to check if this CCB needs to 765 * be aborted. 766 */ 767 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 768 if (nseg != 0) 769 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); 770 dptfreeccb(dpt, dccb); 771 xpt_done(ccb); 772 crit_exit(); 773 return; 774 } 775 776 dccb->state |= DCCB_ACTIVE; 777 ccb->ccb_h.status |= CAM_SIM_QUEUED; 778 LIST_INSERT_HEAD(&dpt->pending_ccb_list, &ccb->ccb_h, sim_links.le); 779 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 780 dpttimeout, dccb); 781 if (dpt_send_eata_command(dpt, &dccb->eata_ccb, 782 dccb->eata_ccb.cp_busaddr, 783 EATA_CMD_DMA_SEND_CP, 0, 0, 0, 0) != 0) { 784 ccb->ccb_h.status = CAM_NO_HBA; /* HBA dead or just busy?? */ 785 if (nseg != 0) 786 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); 787 dptfreeccb(dpt, dccb); 788 xpt_done(ccb); 789 } 790 791 crit_exit(); 792 } 793 794 static void 795 dpt_action(struct cam_sim *sim, union ccb *ccb) 796 { 797 struct dpt_softc *dpt; 798 799 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("dpt_action\n")); 800 801 dpt = (struct dpt_softc *)cam_sim_softc(sim); 802 803 if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE) != 0) { 804 xpt_print_path(ccb->ccb_h.path); 805 kprintf("controller is shutdown. Aborting CCB.\n"); 806 ccb->ccb_h.status = CAM_NO_HBA; 807 xpt_done(ccb); 808 return; 809 } 810 811 switch (ccb->ccb_h.func_code) { 812 /* Common cases first */ 813 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 814 { 815 struct ccb_scsiio *csio; 816 struct ccb_hdr *ccbh; 817 struct dpt_ccb *dccb; 818 struct eata_ccb *eccb; 819 820 csio = &ccb->csio; 821 ccbh = &ccb->ccb_h; 822 /* Max CDB length is 12 bytes */ 823 if (csio->cdb_len > 12) { 824 ccb->ccb_h.status = CAM_REQ_INVALID; 825 xpt_done(ccb); 826 return; 827 } 828 if ((dccb = dptgetccb(dpt)) == NULL) { 829 crit_enter(); 830 dpt->resource_shortage = 1; 831 crit_exit(); 832 xpt_freeze_simq(sim, /*count*/1); 833 ccb->ccb_h.status = CAM_REQUEUE_REQ; 834 xpt_done(ccb); 835 return; 836 } 837 eccb = &dccb->eata_ccb; 838 839 /* Link dccb and ccb so we can find one from the other */ 840 dccb->ccb = ccb; 841 ccb->ccb_h.ccb_dccb_ptr = dccb; 842 ccb->ccb_h.ccb_dpt_ptr = dpt; 843 844 /* 845 * Explicitly set all flags so that the compiler can 846 * be smart about setting them. 847 */ 848 eccb->SCSI_Reset = 0; 849 eccb->HBA_Init = 0; 850 eccb->Auto_Req_Sen = (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) 851 ? 0 : 1; 852 eccb->scatter = 0; 853 eccb->Quick = 0; 854 eccb->Interpret = 855 ccb->ccb_h.target_id == dpt->hostid[cam_sim_bus(sim)] 856 ? 1 : 0; 857 eccb->DataOut = (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0; 858 eccb->DataIn = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0; 859 eccb->reqlen = csio->sense_len; 860 eccb->cp_id = ccb->ccb_h.target_id; 861 eccb->cp_channel = cam_sim_bus(sim); 862 eccb->cp_LUN = ccb->ccb_h.target_lun; 863 eccb->cp_luntar = 0; 864 eccb->cp_dispri = (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) 865 ? 0 : 1; 866 eccb->cp_identify = 1; 867 868 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 869 && csio->tag_action != CAM_TAG_ACTION_NONE) { 870 eccb->cp_msg[0] = csio->tag_action; 871 eccb->cp_msg[1] = dccb->tag; 872 } else { 873 eccb->cp_msg[0] = 0; 874 eccb->cp_msg[1] = 0; 875 } 876 eccb->cp_msg[2] = 0; 877 878 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 879 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 880 bcopy(csio->cdb_io.cdb_ptr, 881 eccb->cp_cdb, csio->cdb_len); 882 } else { 883 /* I guess I could map it in... */ 884 ccb->ccb_h.status = CAM_REQ_INVALID; 885 dptfreeccb(dpt, dccb); 886 xpt_done(ccb); 887 return; 888 } 889 } else { 890 bcopy(csio->cdb_io.cdb_bytes, 891 eccb->cp_cdb, csio->cdb_len); 892 } 893 /* 894 * If we have any data to send with this command, 895 * map it into bus space. 896 */ 897 /* Only use S/G if there is a transfer */ 898 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 899 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 900 /* 901 * We've been given a pointer 902 * to a single buffer. 903 */ 904 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 905 int error; 906 907 crit_enter(); 908 error = 909 bus_dmamap_load(dpt->buffer_dmat, 910 dccb->dmamap, 911 csio->data_ptr, 912 csio->dxfer_len, 913 dptexecuteccb, 914 dccb, /*flags*/0); 915 if (error == EINPROGRESS) { 916 /* 917 * So as to maintain ordering, 918 * freeze the controller queue 919 * until our mapping is 920 * returned. 921 */ 922 xpt_freeze_simq(sim, 1); 923 dccb->state |= CAM_RELEASE_SIMQ; 924 } 925 crit_exit(); 926 } else { 927 struct bus_dma_segment seg; 928 929 /* Pointer to physical buffer */ 930 seg.ds_addr = 931 (bus_addr_t)csio->data_ptr; 932 seg.ds_len = csio->dxfer_len; 933 dptexecuteccb(dccb, &seg, 1, 0); 934 } 935 } else { 936 struct bus_dma_segment *segs; 937 938 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 939 panic("dpt_action - Physical " 940 "segment pointers " 941 "unsupported"); 942 943 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 944 panic("dpt_action - Virtual " 945 "segment addresses " 946 "unsupported"); 947 948 /* Just use the segments provided */ 949 segs = (struct bus_dma_segment *)csio->data_ptr; 950 dptexecuteccb(dccb, segs, csio->sglist_cnt, 0); 951 } 952 } else { 953 /* 954 * XXX JGibbs. 955 * Does it want them both on or both off? 956 * CAM_DIR_NONE is both on, so this code can 957 * be removed if this is also what the DPT 958 * exptects. 959 */ 960 eccb->DataOut = 0; 961 eccb->DataIn = 0; 962 dptexecuteccb(dccb, NULL, 0, 0); 963 } 964 break; 965 } 966 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 967 case XPT_ABORT: /* Abort the specified CCB */ 968 /* XXX Implement */ 969 ccb->ccb_h.status = CAM_REQ_INVALID; 970 xpt_done(ccb); 971 break; 972 case XPT_SET_TRAN_SETTINGS: 973 { 974 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 975 xpt_done(ccb); 976 break; 977 } 978 case XPT_GET_TRAN_SETTINGS: 979 /* Get default/user set transfer settings for the target */ 980 { 981 struct ccb_trans_settings *cts = &ccb->cts; 982 #ifdef CAM_NEW_TRAN_CODE 983 struct ccb_trans_settings_scsi *scsi = 984 &cts->proto_specific.scsi; 985 struct ccb_trans_settings_spi *spi = 986 &cts->xport_specific.spi; 987 988 cts->protocol = PROTO_SCSI; 989 cts->protocol_version = SCSI_REV_2; 990 cts->transport = XPORT_SPI; 991 cts->transport_version = 2; 992 993 if (cts->type == CTS_TYPE_USER_SETTINGS) { 994 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 995 spi->bus_width = (dpt->max_id > 7) 996 ? MSG_EXT_WDTR_BUS_8_BIT 997 : MSG_EXT_WDTR_BUS_16_BIT; 998 spi->sync_period = 25; /* 10MHz */ 999 if (spi->sync_period != 0) 1000 spi->sync_offset = 15; 1001 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1002 1003 spi->valid = CTS_SPI_VALID_SYNC_RATE 1004 | CTS_SPI_VALID_SYNC_OFFSET 1005 | CTS_SPI_VALID_SYNC_RATE 1006 | CTS_SPI_VALID_BUS_WIDTH 1007 | CTS_SPI_VALID_DISC; 1008 scsi->valid = CTS_SCSI_VALID_TQ; 1009 ccb->ccb_h.status = CAM_REQ_CMP; 1010 } else { 1011 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1012 } 1013 #else 1014 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 1015 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB; 1016 cts->bus_width = (dpt->max_id > 7) 1017 ? MSG_EXT_WDTR_BUS_8_BIT 1018 : MSG_EXT_WDTR_BUS_16_BIT; 1019 cts->sync_period = 25; /* 10MHz */ 1020 1021 if (cts->sync_period != 0) 1022 cts->sync_offset = 15; 1023 1024 cts->valid = CCB_TRANS_SYNC_RATE_VALID 1025 | CCB_TRANS_SYNC_OFFSET_VALID 1026 | CCB_TRANS_BUS_WIDTH_VALID 1027 | CCB_TRANS_DISC_VALID 1028 | CCB_TRANS_TQ_VALID; 1029 ccb->ccb_h.status = CAM_REQ_CMP; 1030 } else { 1031 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1032 } 1033 #endif 1034 xpt_done(ccb); 1035 break; 1036 } 1037 case XPT_CALC_GEOMETRY: 1038 { 1039 struct ccb_calc_geometry *ccg; 1040 u_int32_t size_mb; 1041 u_int32_t secs_per_cylinder; 1042 int extended; 1043 1044 /* 1045 * XXX Use Adaptec translation until I find out how to 1046 * get this information from the card. 1047 */ 1048 ccg = &ccb->ccg; 1049 size_mb = ccg->volume_size 1050 / ((1024L * 1024L) / ccg->block_size); 1051 extended = 1; 1052 1053 if (size_mb > 1024 && extended) { 1054 ccg->heads = 255; 1055 ccg->secs_per_track = 63; 1056 } else { 1057 ccg->heads = 64; 1058 ccg->secs_per_track = 32; 1059 } 1060 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 1061 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 1062 ccb->ccb_h.status = CAM_REQ_CMP; 1063 xpt_done(ccb); 1064 break; 1065 } 1066 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 1067 { 1068 /* XXX Implement */ 1069 ccb->ccb_h.status = CAM_REQ_CMP; 1070 xpt_done(ccb); 1071 break; 1072 } 1073 case XPT_TERM_IO: /* Terminate the I/O process */ 1074 /* XXX Implement */ 1075 ccb->ccb_h.status = CAM_REQ_INVALID; 1076 xpt_done(ccb); 1077 break; 1078 case XPT_PATH_INQ: /* Path routing inquiry */ 1079 { 1080 struct ccb_pathinq *cpi = &ccb->cpi; 1081 1082 cpi->version_num = 1; 1083 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 1084 if (dpt->max_id > 7) 1085 cpi->hba_inquiry |= PI_WIDE_16; 1086 cpi->target_sprt = 0; 1087 cpi->hba_misc = 0; 1088 cpi->hba_eng_cnt = 0; 1089 cpi->max_target = dpt->max_id; 1090 cpi->max_lun = dpt->max_lun; 1091 cpi->initiator_id = dpt->hostid[cam_sim_bus(sim)]; 1092 cpi->bus_id = cam_sim_bus(sim); 1093 cpi->base_transfer_speed = 3300; 1094 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1095 strncpy(cpi->hba_vid, "DPT", HBA_IDLEN); 1096 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1097 cpi->unit_number = cam_sim_unit(sim); 1098 #ifdef CAM_NEW_TRAN_CODE 1099 cpi->transport = XPORT_SPI; 1100 cpi->transport_version = 2; 1101 cpi->protocol = PROTO_SCSI; 1102 cpi->protocol_version = SCSI_REV_2; 1103 #endif 1104 cpi->ccb_h.status = CAM_REQ_CMP; 1105 xpt_done(ccb); 1106 break; 1107 } 1108 default: 1109 ccb->ccb_h.status = CAM_REQ_INVALID; 1110 xpt_done(ccb); 1111 break; 1112 } 1113 } 1114 1115 /* 1116 * This routine will try to send an EATA command to the DPT HBA. 1117 * It will, by default, try 20,000 times, waiting 50us between tries. 1118 * It returns 0 on success and 1 on failure. 1119 * It is assumed to be called at splcam(). 1120 */ 1121 static int 1122 dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd_block, 1123 u_int32_t cmd_busaddr, u_int command, u_int retries, 1124 u_int ifc, u_int code, u_int code2) 1125 { 1126 u_int loop; 1127 1128 if (!retries) 1129 retries = 20000; 1130 1131 /* 1132 * I hate this polling nonsense. Wish there was a way to tell the DPT 1133 * to go get commands at its own pace, or to interrupt when ready. 1134 * In the mean time we will measure how many itterations it really 1135 * takes. 1136 */ 1137 for (loop = 0; loop < retries; loop++) { 1138 if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0) 1139 break; 1140 else 1141 DELAY(50); 1142 } 1143 1144 if (loop < retries) { 1145 #ifdef DPT_MEASURE_PERFORMANCE 1146 if (loop > dpt->performance.max_eata_tries) 1147 dpt->performance.max_eata_tries = loop; 1148 1149 if (loop < dpt->performance.min_eata_tries) 1150 dpt->performance.min_eata_tries = loop; 1151 #endif 1152 } else { 1153 #ifdef DPT_MEASURE_PERFORMANCE 1154 ++dpt->performance.command_too_busy; 1155 #endif 1156 return (1); 1157 } 1158 1159 /* The controller is alive, advance the wedge timer */ 1160 #ifdef DPT_RESET_HBA 1161 dpt->last_contact = microtime_now; 1162 #endif 1163 1164 if (cmd_block == NULL) 1165 cmd_busaddr = 0; 1166 #if (BYTE_ORDER == BIG_ENDIAN) 1167 else { 1168 cmd_busaddr = ((cmd_busaddr >> 24) & 0xFF) 1169 | ((cmd_busaddr >> 16) & 0xFF) 1170 | ((cmd_busaddr >> 8) & 0xFF) 1171 | (cmd_busaddr & 0xFF); 1172 } 1173 #endif 1174 /* And now the address */ 1175 dpt_outl(dpt, HA_WDMAADDR, cmd_busaddr); 1176 1177 if (command == EATA_CMD_IMMEDIATE) { 1178 if (cmd_block == NULL) { 1179 dpt_outb(dpt, HA_WCODE2, code2); 1180 dpt_outb(dpt, HA_WCODE, code); 1181 } 1182 dpt_outb(dpt, HA_WIFC, ifc); 1183 } 1184 dpt_outb(dpt, HA_WCOMMAND, command); 1185 1186 return (0); 1187 } 1188 1189 1190 /* ==================== Exported Function definitions =======================*/ 1191 dpt_softc_t * 1192 dpt_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh) 1193 { 1194 dpt_softc_t *dpt = device_get_softc(dev); 1195 int i; 1196 1197 bzero(dpt, sizeof(dpt_softc_t)); 1198 dpt->tag = tag; 1199 dpt->bsh = bsh; 1200 dpt->unit = device_get_unit(dev); 1201 SLIST_INIT(&dpt->free_dccb_list); 1202 LIST_INIT(&dpt->pending_ccb_list); 1203 TAILQ_INSERT_TAIL(&dpt_softcs, dpt, links); 1204 for (i = 0; i < MAX_CHANNELS; i++) 1205 dpt->resetlevel[i] = DPT_HA_OK; 1206 1207 #ifdef DPT_MEASURE_PERFORMANCE 1208 dpt_reset_performance(dpt); 1209 #endif /* DPT_MEASURE_PERFORMANCE */ 1210 return (dpt); 1211 } 1212 1213 void 1214 dpt_free(struct dpt_softc *dpt) 1215 { 1216 switch (dpt->init_level) { 1217 default: 1218 case 5: 1219 bus_dmamap_unload(dpt->dccb_dmat, dpt->dccb_dmamap); 1220 case 4: 1221 bus_dmamem_free(dpt->dccb_dmat, dpt->dpt_dccbs, 1222 dpt->dccb_dmamap); 1223 bus_dmamap_destroy(dpt->dccb_dmat, dpt->dccb_dmamap); 1224 case 3: 1225 bus_dma_tag_destroy(dpt->dccb_dmat); 1226 case 2: 1227 bus_dma_tag_destroy(dpt->buffer_dmat); 1228 case 1: 1229 { 1230 struct sg_map_node *sg_map; 1231 1232 while ((sg_map = SLIST_FIRST(&dpt->sg_maps)) != NULL) { 1233 SLIST_REMOVE_HEAD(&dpt->sg_maps, links); 1234 bus_dmamap_unload(dpt->sg_dmat, 1235 sg_map->sg_dmamap); 1236 bus_dmamem_free(dpt->sg_dmat, sg_map->sg_vaddr, 1237 sg_map->sg_dmamap); 1238 kfree(sg_map, M_DEVBUF); 1239 } 1240 bus_dma_tag_destroy(dpt->sg_dmat); 1241 } 1242 case 0: 1243 break; 1244 } 1245 TAILQ_REMOVE(&dpt_softcs, dpt, links); 1246 } 1247 1248 static u_int8_t string_sizes[] = 1249 { 1250 sizeof(((dpt_inq_t*)NULL)->vendor), 1251 sizeof(((dpt_inq_t*)NULL)->modelNum), 1252 sizeof(((dpt_inq_t*)NULL)->firmware), 1253 sizeof(((dpt_inq_t*)NULL)->protocol), 1254 }; 1255 1256 int 1257 dpt_init(struct dpt_softc *dpt) 1258 { 1259 dpt_conf_t conf; 1260 struct sg_map_node *sg_map; 1261 dpt_ccb_t *dccb; 1262 u_int8_t *strp; 1263 int index; 1264 int i; 1265 int retval; 1266 1267 dpt->init_level = 0; 1268 SLIST_INIT(&dpt->sg_maps); 1269 1270 #ifdef DPT_RESET_BOARD 1271 kprintf("dpt%d: resetting HBA\n", dpt->unit); 1272 dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET); 1273 DELAY(750000); 1274 /* XXX Shouldn't we poll a status register or something??? */ 1275 #endif 1276 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1277 if (bus_dma_tag_create(dpt->parent_dmat, /*alignment*/1, /*boundary*/0, 1278 /*lowaddr*/BUS_SPACE_MAXADDR, 1279 /*highaddr*/BUS_SPACE_MAXADDR, 1280 /*filter*/NULL, /*filterarg*/NULL, 1281 PAGE_SIZE, /*nsegments*/1, 1282 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1283 /*flags*/0, &dpt->sg_dmat) != 0) { 1284 goto error_exit; 1285 } 1286 1287 dpt->init_level++; 1288 1289 /* 1290 * We allocate our DPT ccbs as a contiguous array of bus dma'able 1291 * memory. To get the allocation size, we need to know how many 1292 * ccbs the card supports. This requires a ccb. We solve this 1293 * chicken and egg problem by allocating some re-usable S/G space 1294 * up front, and treating it as our status packet, CCB, and target 1295 * memory space for these commands. 1296 */ 1297 sg_map = dptallocsgmap(dpt); 1298 if (sg_map == NULL) 1299 goto error_exit; 1300 1301 dpt->sp = (volatile dpt_sp_t *)sg_map->sg_vaddr; 1302 dccb = (struct dpt_ccb *)(uintptr_t)(volatile void *)&dpt->sp[1]; 1303 bzero(dccb, sizeof(*dccb)); 1304 dpt->sp_physaddr = sg_map->sg_physaddr; 1305 dccb->eata_ccb.cp_dataDMA = 1306 htonl(sg_map->sg_physaddr + sizeof(dpt_sp_t) + sizeof(*dccb)); 1307 dccb->eata_ccb.cp_busaddr = ~0; 1308 dccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr); 1309 dccb->eata_ccb.cp_reqDMA = htonl(dpt->sp_physaddr + sizeof(*dccb) 1310 + offsetof(struct dpt_ccb, sense_data)); 1311 1312 /* Okay. Fetch our config */ 1313 bzero(&dccb[1], sizeof(conf)); /* data area */ 1314 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), 1315 sizeof(conf), 0xc1, 7, 1); 1316 1317 if (retval != 0) { 1318 kprintf("dpt%d: Failed to get board configuration\n", dpt->unit); 1319 return (retval); 1320 } 1321 bcopy(&dccb[1], &conf, sizeof(conf)); 1322 1323 bzero(&dccb[1], sizeof(dpt->board_data)); 1324 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), 1325 sizeof(dpt->board_data), 0, conf.scsi_id0, 0); 1326 if (retval != 0) { 1327 kprintf("dpt%d: Failed to get inquiry information\n", dpt->unit); 1328 return (retval); 1329 } 1330 bcopy(&dccb[1], &dpt->board_data, sizeof(dpt->board_data)); 1331 1332 dpt_detect_cache(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), 1333 (u_int8_t *)&dccb[1]); 1334 1335 switch (ntohl(conf.splen)) { 1336 case DPT_EATA_REVA: 1337 dpt->EATA_revision = 'a'; 1338 break; 1339 case DPT_EATA_REVB: 1340 dpt->EATA_revision = 'b'; 1341 break; 1342 case DPT_EATA_REVC: 1343 dpt->EATA_revision = 'c'; 1344 break; 1345 case DPT_EATA_REVZ: 1346 dpt->EATA_revision = 'z'; 1347 break; 1348 default: 1349 dpt->EATA_revision = '?'; 1350 } 1351 1352 dpt->max_id = conf.MAX_ID; 1353 dpt->max_lun = conf.MAX_LUN; 1354 dpt->irq = conf.IRQ; 1355 dpt->dma_channel = (8 - conf.DMA_channel) & 7; 1356 dpt->channels = conf.MAX_CHAN + 1; 1357 dpt->state |= DPT_HA_OK; 1358 if (conf.SECOND) 1359 dpt->primary = FALSE; 1360 else 1361 dpt->primary = TRUE; 1362 1363 dpt->more_support = conf.MORE_support; 1364 1365 if (strncmp(dpt->board_data.firmware, "07G0", 4) >= 0) 1366 dpt->immediate_support = 1; 1367 else 1368 dpt->immediate_support = 0; 1369 1370 dpt->broken_INQUIRY = FALSE; 1371 1372 dpt->cplen = ntohl(conf.cplen); 1373 dpt->cppadlen = ntohs(conf.cppadlen); 1374 dpt->max_dccbs = ntohs(conf.queuesiz); 1375 1376 if (dpt->max_dccbs > 256) { 1377 kprintf("dpt%d: Max CCBs reduced from %d to " 1378 "256 due to tag algorithm\n", dpt->unit, dpt->max_dccbs); 1379 dpt->max_dccbs = 256; 1380 } 1381 1382 dpt->hostid[0] = conf.scsi_id0; 1383 dpt->hostid[1] = conf.scsi_id1; 1384 dpt->hostid[2] = conf.scsi_id2; 1385 1386 if (conf.SG_64K) 1387 dpt->sgsize = 8192; 1388 else 1389 dpt->sgsize = ntohs(conf.SGsiz); 1390 1391 /* We can only get 64k buffers, so don't bother to waste space. */ 1392 if (dpt->sgsize < 17 || dpt->sgsize > 32) 1393 dpt->sgsize = 32; 1394 1395 if (dpt->sgsize > dpt_max_segs) 1396 dpt->sgsize = dpt_max_segs; 1397 1398 /* DMA tag for mapping buffers into device visible space. */ 1399 if (bus_dma_tag_create(dpt->parent_dmat, /*alignment*/1, /*boundary*/0, 1400 /*lowaddr*/BUS_SPACE_MAXADDR, 1401 /*highaddr*/BUS_SPACE_MAXADDR, 1402 /*filter*/NULL, /*filterarg*/NULL, 1403 /*maxsize*/MAXBSIZE, /*nsegments*/dpt->sgsize, 1404 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1405 /*flags*/BUS_DMA_ALLOCNOW, 1406 &dpt->buffer_dmat) != 0) { 1407 kprintf("dpt: bus_dma_tag_create(...,dpt->buffer_dmat) failed\n"); 1408 goto error_exit; 1409 } 1410 1411 dpt->init_level++; 1412 1413 /* DMA tag for our ccb structures and interrupt status packet */ 1414 if (bus_dma_tag_create(dpt->parent_dmat, /*alignment*/1, /*boundary*/0, 1415 /*lowaddr*/BUS_SPACE_MAXADDR, 1416 /*highaddr*/BUS_SPACE_MAXADDR, 1417 /*filter*/NULL, /*filterarg*/NULL, 1418 (dpt->max_dccbs * sizeof(struct dpt_ccb)) 1419 + sizeof(dpt_sp_t), 1420 /*nsegments*/1, 1421 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1422 /*flags*/0, &dpt->dccb_dmat) != 0) { 1423 kprintf("dpt: bus_dma_tag_create(...,dpt->dccb_dmat) failed\n"); 1424 goto error_exit; 1425 } 1426 1427 dpt->init_level++; 1428 1429 /* Allocation for our ccbs and interrupt status packet */ 1430 if (bus_dmamem_alloc(dpt->dccb_dmat, (void **)&dpt->dpt_dccbs, 1431 BUS_DMA_NOWAIT, &dpt->dccb_dmamap) != 0) { 1432 kprintf("dpt: bus_dmamem_alloc(dpt->dccb_dmat,...) failed\n"); 1433 goto error_exit; 1434 } 1435 1436 dpt->init_level++; 1437 1438 /* And permanently map them */ 1439 bus_dmamap_load(dpt->dccb_dmat, dpt->dccb_dmamap, 1440 dpt->dpt_dccbs, 1441 (dpt->max_dccbs * sizeof(struct dpt_ccb)) 1442 + sizeof(dpt_sp_t), 1443 dptmapmem, &dpt->dpt_ccb_busbase, /*flags*/0); 1444 1445 /* Clear them out. */ 1446 bzero(dpt->dpt_dccbs, 1447 (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t)); 1448 1449 dpt->dpt_ccb_busend = dpt->dpt_ccb_busbase; 1450 1451 dpt->sp = (dpt_sp_t*)&dpt->dpt_dccbs[dpt->max_dccbs]; 1452 dpt->sp_physaddr = dpt->dpt_ccb_busbase 1453 + (dpt->max_dccbs * sizeof(dpt_ccb_t)); 1454 dpt->init_level++; 1455 1456 /* Allocate our first batch of ccbs */ 1457 if (dptallocccbs(dpt) == 0) { 1458 kprintf("dpt: dptallocccbs(dpt) == 0\n"); 1459 return (2); 1460 } 1461 1462 /* Prepare for Target Mode */ 1463 dpt->target_mode_enabled = 1; 1464 1465 /* Nuke excess spaces from inquiry information */ 1466 strp = dpt->board_data.vendor; 1467 for (i = 0; i < sizeof(string_sizes); i++) { 1468 index = string_sizes[i] - 1; 1469 while (index && (strp[index] == ' ')) 1470 strp[index--] = '\0'; 1471 strp += string_sizes[i]; 1472 } 1473 1474 kprintf("dpt%d: %.8s %.16s FW Rev. %.4s, ", 1475 dpt->unit, dpt->board_data.vendor, 1476 dpt->board_data.modelNum, dpt->board_data.firmware); 1477 1478 kprintf("%d channel%s, ", dpt->channels, dpt->channels > 1 ? "s" : ""); 1479 1480 if (dpt->cache_type != DPT_NO_CACHE 1481 && dpt->cache_size != 0) { 1482 kprintf("%s Cache, ", 1483 dpt->cache_type == DPT_CACHE_WRITETHROUGH 1484 ? "Write-Through" : "Write-Back"); 1485 } 1486 1487 kprintf("%d CCBs\n", dpt->max_dccbs); 1488 return (0); 1489 1490 error_exit: 1491 return (1); 1492 } 1493 1494 int 1495 dpt_attach(dpt_softc_t *dpt) 1496 { 1497 struct cam_devq *devq; 1498 int i; 1499 1500 /* 1501 * Create the device queue for our SIM. 1502 */ 1503 devq = cam_simq_alloc(dpt->max_dccbs); 1504 if (devq == NULL) 1505 return (0); 1506 1507 for (i = 0; i < dpt->channels; i++) { 1508 /* 1509 * Construct our SIM entry 1510 */ 1511 dpt->sims[i] = cam_sim_alloc(dpt_action, dpt_poll, "dpt", 1512 dpt, dpt->unit, /*untagged*/2, 1513 /*tagged*/dpt->max_dccbs, devq); 1514 if (xpt_bus_register(dpt->sims[i], i) != CAM_SUCCESS) { 1515 cam_sim_free(dpt->sims[i]); 1516 break; 1517 } 1518 1519 if (xpt_create_path(&dpt->paths[i], /*periph*/NULL, 1520 cam_sim_path(dpt->sims[i]), 1521 CAM_TARGET_WILDCARD, 1522 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1523 xpt_bus_deregister(cam_sim_path(dpt->sims[i])); 1524 cam_sim_free(dpt->sims[i]); 1525 break; 1526 } 1527 1528 } 1529 cam_simq_release(devq); 1530 if (i > 0) 1531 EVENTHANDLER_REGISTER(shutdown_post_sync, dptshutdown, 1532 dpt, SHUTDOWN_PRI_DRIVER); 1533 return (i); 1534 } 1535 1536 1537 /* 1538 * This is the interrupt handler for the DPT driver. 1539 */ 1540 void 1541 dpt_intr(void *arg) 1542 { 1543 dpt_softc_t *dpt; 1544 dpt_ccb_t *dccb; 1545 union ccb *ccb; 1546 u_int status; 1547 u_int aux_status; 1548 u_int hba_stat; 1549 u_int scsi_stat; 1550 u_int32_t residue_len; /* Number of bytes not transferred */ 1551 1552 dpt = (dpt_softc_t *)arg; 1553 1554 /* First order of business is to check if this interrupt is for us */ 1555 while (((aux_status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ) != 0) { 1556 1557 /* 1558 * What we want to do now, is to capture the status, all of it, 1559 * move it where it belongs, wake up whoever sleeps waiting to 1560 * process this result, and get out of here. 1561 */ 1562 if (dpt->sp->ccb_busaddr < dpt->dpt_ccb_busbase 1563 || dpt->sp->ccb_busaddr >= dpt->dpt_ccb_busend) { 1564 kprintf("Encountered bogus status packet\n"); 1565 status = dpt_inb(dpt, HA_RSTATUS); 1566 return; 1567 } 1568 1569 dccb = dptccbptov(dpt, dpt->sp->ccb_busaddr); 1570 1571 dpt->sp->ccb_busaddr = ~0; 1572 1573 /* Ignore status packets with EOC not set */ 1574 if (dpt->sp->EOC == 0) { 1575 kprintf("dpt%d ERROR: Request %d received with " 1576 "clear EOC.\n Marking as LOST.\n", 1577 dpt->unit, dccb->transaction_id); 1578 1579 #ifdef DPT_HANDLE_TIMEOUTS 1580 dccb->state |= DPT_CCB_STATE_MARKED_LOST; 1581 #endif 1582 /* This CLEARS the interrupt! */ 1583 status = dpt_inb(dpt, HA_RSTATUS); 1584 continue; 1585 } 1586 dpt->sp->EOC = 0; 1587 1588 /* 1589 * Double buffer the status information so the hardware can 1590 * work on updating the status packet while we decifer the 1591 * one we were just interrupted for. 1592 * According to Mark Salyzyn, we only need few pieces of it. 1593 */ 1594 hba_stat = dpt->sp->hba_stat; 1595 scsi_stat = dpt->sp->scsi_stat; 1596 residue_len = dpt->sp->residue_len; 1597 1598 /* Clear interrupts, check for error */ 1599 if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) { 1600 /* 1601 * Error Condition. Check for magic cookie. Exit 1602 * this test on earliest sign of non-reset condition 1603 */ 1604 1605 /* Check that this is not a board reset interrupt */ 1606 if (dpt_just_reset(dpt)) { 1607 kprintf("dpt%d: HBA rebooted.\n" 1608 " All transactions should be " 1609 "resubmitted\n", 1610 dpt->unit); 1611 1612 kprintf("dpt%d: >>---->> This is incomplete, " 1613 "fix me.... <<----<<", dpt->unit); 1614 panic("DPT Rebooted"); 1615 1616 } 1617 } 1618 /* Process CCB */ 1619 ccb = dccb->ccb; 1620 callout_stop(&ccb->ccb_h.timeout_ch); 1621 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1622 bus_dmasync_op_t op; 1623 1624 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1625 op = BUS_DMASYNC_POSTREAD; 1626 else 1627 op = BUS_DMASYNC_POSTWRITE; 1628 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op); 1629 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); 1630 } 1631 1632 /* Common Case inline... */ 1633 if (hba_stat == HA_NO_ERROR) { 1634 ccb->csio.scsi_status = scsi_stat; 1635 ccb->ccb_h.status = 0; 1636 switch (scsi_stat) { 1637 case SCSI_STATUS_OK: 1638 ccb->ccb_h.status |= CAM_REQ_CMP; 1639 break; 1640 case SCSI_STATUS_CHECK_COND: 1641 case SCSI_STATUS_CMD_TERMINATED: 1642 bcopy(&dccb->sense_data, &ccb->csio.sense_data, 1643 ccb->csio.sense_len); 1644 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1645 /* FALLTHROUGH */ 1646 default: 1647 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1648 /* XXX Freeze DevQ */ 1649 break; 1650 } 1651 ccb->csio.resid = residue_len; 1652 dptfreeccb(dpt, dccb); 1653 xpt_done(ccb); 1654 } else { 1655 dptprocesserror(dpt, dccb, ccb, hba_stat, scsi_stat, 1656 residue_len); 1657 } 1658 } 1659 } 1660 1661 static void 1662 dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb, 1663 u_int hba_stat, u_int scsi_stat, u_int32_t resid) 1664 { 1665 ccb->csio.resid = resid; 1666 switch (hba_stat) { 1667 case HA_ERR_SEL_TO: 1668 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1669 break; 1670 case HA_ERR_CMD_TO: 1671 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1672 break; 1673 case HA_SCSIBUS_RESET: 1674 case HA_HBA_POWER_UP: /* Similar effect to a bus reset??? */ 1675 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1676 break; 1677 case HA_CP_ABORTED: 1678 case HA_CP_RESET: /* XXX ??? */ 1679 case HA_CP_ABORT_NA: /* XXX ??? */ 1680 case HA_CP_RESET_NA: /* XXX ??? */ 1681 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) 1682 ccb->ccb_h.status = CAM_REQ_ABORTED; 1683 break; 1684 case HA_PCI_PARITY: 1685 case HA_PCI_MABORT: 1686 case HA_PCI_TABORT: 1687 case HA_PCI_STABORT: 1688 case HA_BUS_PARITY: 1689 case HA_PARITY_ERR: 1690 case HA_ECC_ERR: 1691 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1692 break; 1693 case HA_UNX_MSGRJCT: 1694 ccb->ccb_h.status = CAM_MSG_REJECT_REC; 1695 break; 1696 case HA_UNX_BUSPHASE: 1697 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1698 break; 1699 case HA_UNX_BUS_FREE: 1700 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1701 break; 1702 case HA_SCSI_HUNG: 1703 case HA_RESET_STUCK: 1704 /* 1705 * Dead??? Can the controller get unstuck 1706 * from these conditions 1707 */ 1708 ccb->ccb_h.status = CAM_NO_HBA; 1709 break; 1710 case HA_RSENSE_FAIL: 1711 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1712 break; 1713 default: 1714 kprintf("dpt%d: Undocumented Error %x\n", dpt->unit, hba_stat); 1715 kprintf("Please mail this message to shimon@simon-shapiro.org\n"); 1716 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1717 break; 1718 } 1719 dptfreeccb(dpt, dccb); 1720 xpt_done(ccb); 1721 } 1722 1723 static void 1724 dpttimeout(void *arg) 1725 { 1726 struct dpt_ccb *dccb; 1727 union ccb *ccb; 1728 struct dpt_softc *dpt; 1729 1730 dccb = (struct dpt_ccb *)arg; 1731 ccb = dccb->ccb; 1732 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr; 1733 xpt_print_path(ccb->ccb_h.path); 1734 kprintf("CCB %p - timed out\n", (void *)dccb); 1735 1736 crit_enter(); 1737 1738 /* 1739 * Try to clear any pending jobs. FreeBSD will loose interrupts, 1740 * leaving the controller suspended, and commands timed-out. 1741 * By calling the interrupt handler, any command thus stuck will be 1742 * completed. 1743 */ 1744 dpt_intr(dpt); 1745 1746 if ((dccb->state & DCCB_ACTIVE) == 0) { 1747 xpt_print_path(ccb->ccb_h.path); 1748 kprintf("CCB %p - timed out CCB already completed\n", 1749 (void *)dccb); 1750 crit_exit(); 1751 return; 1752 } 1753 1754 /* Abort this particular command. Leave all others running */ 1755 dpt_send_immediate(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr, 1756 /*retries*/20000, EATA_SPECIFIC_ABORT, 0, 0); 1757 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1758 crit_exit(); 1759 } 1760 1761 /* 1762 * Shutdown the controller and ensure that the cache is completely flushed. 1763 * Called from the shutdown_final event after all disk access has completed. 1764 */ 1765 static void 1766 dptshutdown(void *arg, int howto) 1767 { 1768 dpt_softc_t *dpt; 1769 1770 dpt = (dpt_softc_t *)arg; 1771 1772 kprintf("dpt%d: Shutting down (mode %x) HBA. Please wait...\n", 1773 dpt->unit, howto); 1774 1775 /* 1776 * What we do for a shutdown, is give the DPT early power loss warning 1777 */ 1778 dpt_send_immediate(dpt, NULL, 0, EATA_POWER_OFF_WARN, 0, 0, 0); 1779 DELAY(1000 * 1000 * 5); 1780 kprintf("dpt%d: Controller was warned of shutdown and is now " 1781 "disabled\n", dpt->unit); 1782 } 1783 1784 /*============================================================================*/ 1785 1786 #if 0 1787 #ifdef DPT_RESET_HBA 1788 1789 /* 1790 ** Function name : dpt_reset_hba 1791 ** 1792 ** Description : Reset the HBA and properly discard all pending work 1793 ** Input : Softc 1794 ** Output : Nothing 1795 */ 1796 static void 1797 dpt_reset_hba(dpt_softc_t *dpt) 1798 { 1799 eata_ccb_t *ccb; 1800 dpt_ccb_t dccb, *dccbp; 1801 int result; 1802 struct scsi_xfer *xs; 1803 1804 /* Prepare a control block. The SCSI command part is immaterial */ 1805 dccb.xs = NULL; 1806 dccb.flags = 0; 1807 dccb.state = DPT_CCB_STATE_NEW; 1808 dccb.std_callback = NULL; 1809 dccb.wrbuff_callback = NULL; 1810 1811 ccb = &dccb.eata_ccb; 1812 ccb->CP_OpCode = EATA_CMD_RESET; 1813 ccb->SCSI_Reset = 0; 1814 ccb->HBA_Init = 1; 1815 ccb->Auto_Req_Sen = 1; 1816 ccb->cp_id = 0; /* Should be ignored */ 1817 ccb->DataIn = 1; 1818 ccb->DataOut = 0; 1819 ccb->Interpret = 1; 1820 ccb->reqlen = htonl(sizeof(struct scsi_sense_data)); 1821 ccb->cp_statDMA = htonl(vtophys(&ccb->cp_statDMA)); 1822 ccb->cp_reqDMA = htonl(vtophys(&ccb->cp_reqDMA)); 1823 ccb->cp_viraddr = (u_int32_t) & ccb; 1824 1825 ccb->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO; 1826 ccb->cp_scsi_cmd = 0; /* Should be ignored */ 1827 1828 /* Lock up the submitted queue. We are very persistant here */ 1829 crit_enter(); 1830 while (dpt->queue_status & DPT_SUBMITTED_QUEUE_ACTIVE) { 1831 DELAY(100); 1832 } 1833 1834 dpt->queue_status |= DPT_SUBMITTED_QUEUE_ACTIVE; 1835 crit_exit(); 1836 1837 /* Send the RESET message */ 1838 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb, 1839 EATA_CMD_RESET, 0, 0, 0, 0)) != 0) { 1840 kprintf("dpt%d: Failed to send the RESET message.\n" 1841 " Trying cold boot (ouch!)\n", dpt->unit); 1842 1843 1844 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb, 1845 EATA_COLD_BOOT, 0, 0, 1846 0, 0)) != 0) { 1847 panic("dpt%d: Faild to cold boot the HBA\n", 1848 dpt->unit); 1849 } 1850 #ifdef DPT_MEASURE_PERFORMANCE 1851 dpt->performance.cold_boots++; 1852 #endif /* DPT_MEASURE_PERFORMANCE */ 1853 } 1854 1855 #ifdef DPT_MEASURE_PERFORMANCE 1856 dpt->performance.warm_starts++; 1857 #endif /* DPT_MEASURE_PERFORMANCE */ 1858 1859 kprintf("dpt%d: Aborting pending requests. O/S should re-submit\n", 1860 dpt->unit); 1861 1862 while ((dccbp = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) { 1863 struct scsi_xfer *xs = dccbp->xs; 1864 1865 /* Not all transactions have xs structs */ 1866 if (xs != NULL) { 1867 /* Tell the kernel proper this did not complete well */ 1868 xs->error |= XS_SELTIMEOUT; 1869 xs->flags |= SCSI_ITSDONE; 1870 scsi_done(xs); 1871 } 1872 1873 dpt_Qremove_submitted(dpt, dccbp); 1874 1875 /* Remember, Callbacks are NOT in the standard queue */ 1876 if (dccbp->std_callback != NULL) { 1877 (dccbp->std_callback)(dpt, dccbp->eata_ccb.cp_channel, 1878 dccbp); 1879 } else { 1880 crit_enter(); 1881 dpt_Qpush_free(dpt, dccbp); 1882 crit_exit(); 1883 } 1884 } 1885 1886 kprintf("dpt%d: reset done aborting all pending commands\n", dpt->unit); 1887 dpt->queue_status &= ~DPT_SUBMITTED_QUEUE_ACTIVE; 1888 } 1889 1890 #endif /* DPT_RESET_HBA */ 1891 1892 /* 1893 * Build a Command Block for target mode READ/WRITE BUFFER, 1894 * with the ``sync'' bit ON. 1895 * 1896 * Although the length and offset are 24 bit fields in the command, they cannot 1897 * exceed 8192 bytes, so we take them as short integers andcheck their range. 1898 * If they are sensless, we round them to zero offset, maximum length and 1899 * complain. 1900 */ 1901 1902 static void 1903 dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun, 1904 dpt_ccb_t * ccb, int mode, u_int8_t command, 1905 u_int16_t length, u_int16_t offset) 1906 { 1907 eata_ccb_t *cp; 1908 1909 if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) { 1910 kprintf("dpt%d: Length of %d, and offset of %d are wrong\n", 1911 dpt->unit, length, offset); 1912 length = DPT_MAX_TARGET_MODE_BUFFER_SIZE; 1913 offset = 0; 1914 } 1915 ccb->xs = NULL; 1916 ccb->flags = 0; 1917 ccb->state = DPT_CCB_STATE_NEW; 1918 ccb->std_callback = (ccb_callback) dpt_target_done; 1919 ccb->wrbuff_callback = NULL; 1920 1921 cp = &ccb->eata_ccb; 1922 cp->CP_OpCode = EATA_CMD_DMA_SEND_CP; 1923 cp->SCSI_Reset = 0; 1924 cp->HBA_Init = 0; 1925 cp->Auto_Req_Sen = 1; 1926 cp->cp_id = target; 1927 cp->DataIn = 1; 1928 cp->DataOut = 0; 1929 cp->Interpret = 0; 1930 cp->reqlen = htonl(sizeof(struct scsi_sense_data)); 1931 cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA)); 1932 cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA)); 1933 cp->cp_viraddr = (u_int32_t) & ccb; 1934 1935 cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO; 1936 1937 cp->cp_scsi_cmd = command; 1938 cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK); 1939 cp->cp_lun = lun; /* Order is important here! */ 1940 cp->cp_cdb[2] = 0x00; /* Buffer Id, only 1 :-( */ 1941 cp->cp_cdb[3] = (length >> 16) & 0xFF; /* Buffer offset MSB */ 1942 cp->cp_cdb[4] = (length >> 8) & 0xFF; 1943 cp->cp_cdb[5] = length & 0xFF; 1944 cp->cp_cdb[6] = (length >> 16) & 0xFF; /* Length MSB */ 1945 cp->cp_cdb[7] = (length >> 8) & 0xFF; 1946 cp->cp_cdb[8] = length & 0xFF; /* Length LSB */ 1947 cp->cp_cdb[9] = 0; /* No sync, no match bits */ 1948 1949 /* 1950 * This could be optimized to live in dpt_register_buffer. 1951 * We keep it here, just in case the kernel decides to reallocate pages 1952 */ 1953 if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE, 1954 dpt->rw_buffer[bus][target][lun])) { 1955 kprintf("dpt%d: Failed to setup Scatter/Gather for " 1956 "Target-Mode buffer\n", dpt->unit); 1957 } 1958 } 1959 1960 /* Setup a target mode READ command */ 1961 1962 static void 1963 dpt_set_target(int redo, dpt_softc_t * dpt, 1964 u_int8_t bus, u_int8_t target, u_int8_t lun, int mode, 1965 u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb) 1966 { 1967 if (dpt->target_mode_enabled) { 1968 crit_enter(); 1969 1970 if (!redo) 1971 dpt_target_ccb(dpt, bus, target, lun, ccb, mode, 1972 SCSI_TM_READ_BUFFER, length, offset); 1973 1974 ccb->transaction_id = ++dpt->commands_processed; 1975 1976 #ifdef DPT_MEASURE_PERFORMANCE 1977 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++; 1978 ccb->command_started = microtime_now; 1979 #endif 1980 dpt_Qadd_waiting(dpt, ccb); 1981 dpt_sched_queue(dpt); 1982 1983 crit_exit(); 1984 } else { 1985 kprintf("dpt%d: Target Mode Request, but Target Mode is OFF\n", 1986 dpt->unit); 1987 } 1988 } 1989 1990 /* 1991 * Schedule a buffer to be sent to another target. 1992 * The work will be scheduled and the callback provided will be called when 1993 * the work is actually done. 1994 * 1995 * Please NOTE: ``Anyone'' can send a buffer, but only registered clients 1996 * get notified of receipt of buffers. 1997 */ 1998 1999 int 2000 dpt_send_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun, 2001 u_int8_t mode, u_int16_t length, u_int16_t offset, void *data, 2002 buff_wr_done callback) 2003 { 2004 dpt_softc_t *dpt; 2005 dpt_ccb_t *ccb = NULL; 2006 2007 /* This is an external call. Be a bit paranoid */ 2008 for (dpt = TAILQ_FIRST(&dpt_softc_list); 2009 dpt != NULL; 2010 dpt = TAILQ_NEXT(dpt, links)) { 2011 if (dpt->unit == unit) 2012 goto valid_unit; 2013 } 2014 2015 return (INVALID_UNIT); 2016 2017 valid_unit: 2018 2019 if (dpt->target_mode_enabled) { 2020 if ((channel >= dpt->channels) || (target > dpt->max_id) || 2021 (lun > dpt->max_lun)) { 2022 return (INVALID_SENDER); 2023 } 2024 if ((dpt->rw_buffer[channel][target][lun] == NULL) || 2025 (dpt->buffer_receiver[channel][target][lun] == NULL)) 2026 return (NOT_REGISTERED); 2027 2028 crit_enter(); 2029 /* Process the free list */ 2030 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { 2031 kprintf("dpt%d ERROR: Cannot allocate any more free CCB's.\n" 2032 " Please try later\n", 2033 dpt->unit); 2034 crit_exit(); 2035 return (NO_RESOURCES); 2036 } 2037 /* Now grab the newest CCB */ 2038 if ((ccb = dpt_Qpop_free(dpt)) == NULL) { 2039 crit_exit(); 2040 panic("dpt%d: Got a NULL CCB from pop_free()\n", dpt->unit); 2041 } 2042 crit_exit(); 2043 2044 bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length); 2045 dpt_target_ccb(dpt, channel, target, lun, ccb, mode, 2046 SCSI_TM_WRITE_BUFFER, 2047 length, offset); 2048 ccb->std_callback = (ccb_callback) callback; /* Potential trouble */ 2049 2050 crit_enter(); 2051 ccb->transaction_id = ++dpt->commands_processed; 2052 2053 #ifdef DPT_MEASURE_PERFORMANCE 2054 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++; 2055 ccb->command_started = microtime_now; 2056 #endif 2057 dpt_Qadd_waiting(dpt, ccb); 2058 dpt_sched_queue(dpt); 2059 2060 crit_exit(); 2061 return (0); 2062 } 2063 return (DRIVER_DOWN); 2064 } 2065 2066 static void 2067 dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb) 2068 { 2069 eata_ccb_t *cp; 2070 2071 cp = &ccb->eata_ccb; 2072 2073 /* 2074 * Remove the CCB from the waiting queue. 2075 * We do NOT put it back on the free, etc., queues as it is a special 2076 * ccb, owned by the dpt_softc of this unit. 2077 */ 2078 crit_enter(); 2079 dpt_Qremove_completed(dpt, ccb); 2080 crit_exit(); 2081 2082 #define br_channel (ccb->eata_ccb.cp_channel) 2083 #define br_target (ccb->eata_ccb.cp_id) 2084 #define br_lun (ccb->eata_ccb.cp_LUN) 2085 #define br_index [br_channel][br_target][br_lun] 2086 #define read_buffer_callback (dpt->buffer_receiver br_index ) 2087 #define read_buffer (dpt->rw_buffer[br_channel][br_target][br_lun]) 2088 #define cb(offset) (ccb->eata_ccb.cp_cdb[offset]) 2089 #define br_offset ((cb(3) << 16) | (cb(4) << 8) | cb(5)) 2090 #define br_length ((cb(6) << 16) | (cb(7) << 8) | cb(8)) 2091 2092 /* Different reasons for being here, you know... */ 2093 switch (ccb->eata_ccb.cp_scsi_cmd) { 2094 case SCSI_TM_READ_BUFFER: 2095 if (read_buffer_callback != NULL) { 2096 /* This is a buffer generated by a kernel process */ 2097 read_buffer_callback(dpt->unit, br_channel, 2098 br_target, br_lun, 2099 read_buffer, 2100 br_offset, br_length); 2101 } else { 2102 /* 2103 * This is a buffer waited for by a user (sleeping) 2104 * command 2105 */ 2106 wakeup(ccb); 2107 } 2108 2109 /* We ALWAYS re-issue the same command; args are don't-care */ 2110 dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0); 2111 break; 2112 2113 case SCSI_TM_WRITE_BUFFER: 2114 (ccb->wrbuff_callback) (dpt->unit, br_channel, br_target, 2115 br_offset, br_length, 2116 br_lun, ccb->status_packet.hba_stat); 2117 break; 2118 default: 2119 kprintf("dpt%d: %s is an unsupported command for target mode\n", 2120 dpt->unit, scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd)); 2121 } 2122 crit_enter(); 2123 dpt->target_ccb[br_channel][br_target][br_lun] = NULL; 2124 dpt_Qpush_free(dpt, ccb); 2125 crit_exit(); 2126 } 2127 2128 2129 /* 2130 * Use this function to register a client for a buffer read target operation. 2131 * The function you register will be called every time a buffer is received 2132 * by the target mode code. 2133 */ 2134 dpt_rb_t 2135 dpt_register_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun, 2136 u_int8_t mode, u_int16_t length, u_int16_t offset, 2137 dpt_rec_buff callback, dpt_rb_op_t op) 2138 { 2139 dpt_softc_t *dpt; 2140 dpt_ccb_t *ccb = NULL; 2141 2142 for (dpt = TAILQ_FIRST(&dpt_softc_list); 2143 dpt != NULL; 2144 dpt = TAILQ_NEXT(dpt, links)) { 2145 if (dpt->unit == unit) 2146 goto valid_unit; 2147 } 2148 2149 return (INVALID_UNIT); 2150 2151 valid_unit: 2152 2153 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE) 2154 return (DRIVER_DOWN); 2155 2156 if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) || 2157 (lun > (dpt->max_lun - 1))) 2158 return (INVALID_SENDER); 2159 2160 if (dpt->buffer_receiver[channel][target][lun] == NULL) { 2161 if (op == REGISTER_BUFFER) { 2162 /* Assign the requested callback */ 2163 dpt->buffer_receiver[channel][target][lun] = callback; 2164 /* Get a CCB */ 2165 crit_enter(); 2166 2167 /* Process the free list */ 2168 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { 2169 kprintf("dpt%d ERROR: Cannot allocate any more free CCB's.\n" 2170 " Please try later\n", 2171 dpt->unit); 2172 crit_exit(); 2173 return (NO_RESOURCES); 2174 } 2175 /* Now grab the newest CCB */ 2176 if ((ccb = dpt_Qpop_free(dpt)) == NULL) { 2177 crit_exit(); 2178 panic("dpt%d: Got a NULL CCB from pop_free()\n", 2179 dpt->unit); 2180 } 2181 crit_exit(); 2182 2183 /* Clean up the leftover of the previous tenant */ 2184 ccb->status = DPT_CCB_STATE_NEW; 2185 dpt->target_ccb[channel][target][lun] = ccb; 2186 2187 dpt->rw_buffer[channel][target][lun] = 2188 kmalloc(DPT_RW_BUFFER_SIZE, M_DEVBUF, M_INTWAIT); 2189 dpt_set_target(0, dpt, channel, target, lun, mode, 2190 length, offset, ccb); 2191 return (SUCCESSFULLY_REGISTERED); 2192 } else 2193 return (NOT_REGISTERED); 2194 } else { 2195 if (op == REGISTER_BUFFER) { 2196 if (dpt->buffer_receiver[channel][target][lun] == callback) 2197 return (ALREADY_REGISTERED); 2198 else 2199 return (REGISTERED_TO_ANOTHER); 2200 } else { 2201 if (dpt->buffer_receiver[channel][target][lun] == callback) { 2202 dpt->buffer_receiver[channel][target][lun] = NULL; 2203 crit_enter(); 2204 dpt_Qpush_free(dpt, ccb); 2205 crit_exit(); 2206 kfree(dpt->rw_buffer[channel][target][lun], M_DEVBUF); 2207 return (SUCCESSFULLY_REGISTERED); 2208 } else 2209 return (INVALID_CALLBACK); 2210 } 2211 2212 } 2213 } 2214 2215 /* Return the state of the blinking DPT LED's */ 2216 u_int8_t 2217 dpt_blinking_led(dpt_softc_t * dpt) 2218 { 2219 int ndx; 2220 u_int32_t state; 2221 u_int32_t previous; 2222 u_int8_t result; 2223 2224 crit_enter(); 2225 2226 result = 0; 2227 2228 for (ndx = 0, state = 0, previous = 0; 2229 (ndx < 10) && (state != previous); 2230 ndx++) { 2231 previous = state; 2232 state = dpt_inl(dpt, 1); 2233 } 2234 2235 if ((state == previous) && (state == DPT_BLINK_INDICATOR)) 2236 result = dpt_inb(dpt, 5); 2237 2238 crit_exit(); 2239 return (result); 2240 } 2241 2242 /* 2243 * Execute a command which did not come from the kernel's SCSI layer. 2244 * The only way to map user commands to bus and target is to comply with the 2245 * standard DPT wire-down scheme: 2246 */ 2247 int 2248 dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd, 2249 caddr_t cmdarg, int minor_no) 2250 { 2251 dpt_ccb_t *ccb; 2252 void *data; 2253 int channel, target, lun; 2254 int huh; 2255 int result; 2256 int submitted; 2257 2258 data = NULL; 2259 channel = minor2hba(minor_no); 2260 target = minor2target(minor_no); 2261 lun = minor2lun(minor_no); 2262 2263 if ((channel > (dpt->channels - 1)) 2264 || (target > dpt->max_id) 2265 || (lun > dpt->max_lun)) 2266 return (ENXIO); 2267 2268 if (target == dpt->sc_scsi_link[channel].adapter_targ) { 2269 /* This one is for the controller itself */ 2270 if ((user_cmd->eataID[0] != 'E') 2271 || (user_cmd->eataID[1] != 'A') 2272 || (user_cmd->eataID[2] != 'T') 2273 || (user_cmd->eataID[3] != 'A')) { 2274 return (ENXIO); 2275 } 2276 } 2277 /* Get a DPT CCB, so we can prepare a command */ 2278 crit_enter(); 2279 2280 /* Process the free list */ 2281 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { 2282 kprintf("dpt%d ERROR: Cannot allocate any more free CCB's.\n" 2283 " Please try later\n", 2284 dpt->unit); 2285 crit_exit(); 2286 return (EFAULT); 2287 } 2288 /* Now grab the newest CCB */ 2289 if ((ccb = dpt_Qpop_free(dpt)) == NULL) { 2290 crit_exit(); 2291 panic("dpt%d: Got a NULL CCB from pop_free()\n", dpt->unit); 2292 } else { 2293 crit_exit(); 2294 /* Clean up the leftover of the previous tenant */ 2295 ccb->status = DPT_CCB_STATE_NEW; 2296 } 2297 2298 bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb, 2299 sizeof(eata_ccb_t)); 2300 2301 /* We do not want to do user specified scatter/gather. Why?? */ 2302 if (ccb->eata_ccb.scatter == 1) 2303 return (EINVAL); 2304 2305 ccb->eata_ccb.Auto_Req_Sen = 1; 2306 ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data)); 2307 ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen)); 2308 ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA)); 2309 ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA)); 2310 ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA)); 2311 ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb; 2312 2313 if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) { 2314 /* Data I/O is involved in this command. Alocate buffer */ 2315 if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) { 2316 data = contigmalloc(ccb->eata_ccb.cp_datalen, 2317 M_TEMP, M_WAITOK, 0, ~0, 2318 ccb->eata_ccb.cp_datalen, 2319 0x10000); 2320 } else { 2321 data = kmalloc(ccb->eata_ccb.cp_datalen, M_TEMP, 2322 M_WAITOK); 2323 } 2324 2325 if (data == NULL) { 2326 kprintf("dpt%d: Cannot allocate %d bytes " 2327 "for EATA command\n", dpt->unit, 2328 ccb->eata_ccb.cp_datalen); 2329 return (EFAULT); 2330 } 2331 #define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA 2332 if (ccb->eata_ccb.DataIn == 1) { 2333 if (copyin(usr_cmd_DMA, 2334 data, ccb->eata_ccb.cp_datalen) == -1) 2335 return (EFAULT); 2336 } 2337 } else { 2338 /* No data I/O involved here. Make sure the DPT knows that */ 2339 ccb->eata_ccb.cp_datalen = 0; 2340 data = NULL; 2341 } 2342 2343 if (ccb->eata_ccb.FWNEST == 1) 2344 ccb->eata_ccb.FWNEST = 0; 2345 2346 if (ccb->eata_ccb.cp_datalen != 0) { 2347 if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen, 2348 data) != 0) { 2349 if (data != NULL) 2350 kfree(data, M_TEMP); 2351 return (EFAULT); 2352 } 2353 } 2354 /** 2355 * We are required to quiet a SCSI bus. 2356 * since we do not queue comands on a bus basis, 2357 * we wait for ALL commands on a controller to complete. 2358 * In the mean time, sched_queue() will not schedule new commands. 2359 */ 2360 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD) 2361 && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) { 2362 /* We wait for ALL traffic for this HBa to subside */ 2363 crit_enter(); 2364 dpt->state |= DPT_HA_QUIET; 2365 crit_exit(); 2366 2367 while ((submitted = dpt->submitted_ccbs_count) != 0) { 2368 huh = tsleep((void *) dpt, PCATCH, "dptqt", 100 * hz); 2369 switch (huh) { 2370 case 0: 2371 /* Wakeup call received */ 2372 break; 2373 case EWOULDBLOCK: 2374 /* Timer Expired */ 2375 break; 2376 default: 2377 /* anything else */ 2378 break; 2379 } 2380 } 2381 } 2382 /* Resume normal operation */ 2383 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD) 2384 && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) { 2385 crit_enter(); 2386 dpt->state &= ~DPT_HA_QUIET; 2387 crit_exit(); 2388 } 2389 /** 2390 * Schedule the command and submit it. 2391 * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET 2392 */ 2393 ccb->xs = NULL; 2394 ccb->flags = 0; 2395 ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */ 2396 2397 ccb->transaction_id = ++dpt->commands_processed; 2398 ccb->std_callback = (ccb_callback) dpt_user_cmd_done; 2399 ccb->result = (u_int32_t) & cmdarg; 2400 ccb->data = data; 2401 2402 #ifdef DPT_MEASURE_PERFORMANCE 2403 ++dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]; 2404 ccb->command_started = microtime_now; 2405 #endif 2406 crit_enter(); 2407 dpt_Qadd_waiting(dpt, ccb); 2408 crit_exit(); 2409 2410 dpt_sched_queue(dpt); 2411 2412 /* Wait for the command to complete */ 2413 (void) tsleep((void *) ccb, PCATCH, "dptucw", 100 * hz); 2414 2415 /* Free allocated memory */ 2416 if (data != NULL) 2417 kfree(data, M_TEMP); 2418 2419 return (0); 2420 } 2421 2422 static void 2423 dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb) 2424 { 2425 u_int32_t result; 2426 caddr_t cmd_arg; 2427 2428 crit_enter(); 2429 2430 /** 2431 * If Auto Request Sense is on, copyout the sense struct 2432 */ 2433 #define usr_pckt_DMA (caddr_t)(intptr_t)ntohl(ccb->eata_ccb.cp_reqDMA) 2434 #define usr_pckt_len ntohl(ccb->eata_ccb.cp_datalen) 2435 if (ccb->eata_ccb.Auto_Req_Sen == 1) { 2436 if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA, 2437 sizeof(struct scsi_sense_data))) { 2438 ccb->result = EFAULT; 2439 dpt_Qpush_free(dpt, ccb); 2440 crit_exit(); 2441 wakeup(ccb); 2442 return; 2443 } 2444 } 2445 /* If DataIn is on, copyout the data */ 2446 if ((ccb->eata_ccb.DataIn == 1) 2447 && (ccb->status_packet.hba_stat == HA_NO_ERROR)) { 2448 if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) { 2449 dpt_Qpush_free(dpt, ccb); 2450 ccb->result = EFAULT; 2451 2452 crit_exit(); 2453 wakeup(ccb); 2454 return; 2455 } 2456 } 2457 /* Copyout the status */ 2458 result = ccb->status_packet.hba_stat; 2459 cmd_arg = (caddr_t) ccb->result; 2460 2461 if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) { 2462 dpt_Qpush_free(dpt, ccb); 2463 ccb->result = EFAULT; 2464 crit_exit(); 2465 wakeup(ccb); 2466 return; 2467 } 2468 /* Put the CCB back in the freelist */ 2469 ccb->state |= DPT_CCB_STATE_COMPLETED; 2470 dpt_Qpush_free(dpt, ccb); 2471 2472 /* Free allocated memory */ 2473 crit_exit(); 2474 return; 2475 } 2476 2477 #ifdef DPT_HANDLE_TIMEOUTS 2478 /** 2479 * This function walks down the SUBMITTED queue. 2480 * Every request that is too old gets aborted and marked. 2481 * Since the DPT will complete (interrupt) immediately (what does that mean?), 2482 * We just walk the list, aborting old commands and marking them as such. 2483 * The dpt_complete function will get rid of the that were interrupted in the 2484 * normal manner. 2485 * 2486 * This function needs to run at splcam(), as it interacts with the submitted 2487 * queue, as well as the completed and free queues. Just like dpt_intr() does. 2488 * To run it at any ISPL other than that of dpt_intr(), will mean that dpt_intr 2489 * willbe able to pre-empt it, grab a transaction in progress (towards 2490 * destruction) and operate on it. The state of this transaction will be not 2491 * very clear. 2492 * The only other option, is to lock it only as long as necessary but have 2493 * dpt_intr() spin-wait on it. In a UP environment this makes no sense and in 2494 * a SMP environment, the advantage is dubvious for a function that runs once 2495 * every ten seconds for few microseconds and, on systems with healthy 2496 * hardware, does not do anything anyway. 2497 */ 2498 2499 static void 2500 dpt_handle_timeouts(dpt_softc_t * dpt) 2501 { 2502 dpt_ccb_t *ccb; 2503 2504 crit_enter(); 2505 2506 if (dpt->state & DPT_HA_TIMEOUTS_ACTIVE) { 2507 kprintf("dpt%d WARNING: Timeout Handling Collision\n", 2508 dpt->unit); 2509 crit_exit(); 2510 return; 2511 } 2512 dpt->state |= DPT_HA_TIMEOUTS_ACTIVE; 2513 2514 /* Loop through the entire submitted queue, looking for lost souls */ 2515 for (ccb = TAILQ_FIRST(&dpt->submitted_ccbs); 2516 ccb != NULL; 2517 ccb = TAILQ_NEXT(ccb, links)) { 2518 struct scsi_xfer *xs; 2519 u_int32_t age, max_age; 2520 2521 xs = ccb->xs; 2522 age = dpt_time_delta(ccb->command_started, microtime_now); 2523 2524 #define TenSec 10000000 2525 2526 if (xs == NULL) { /* Local, non-kernel call */ 2527 max_age = TenSec; 2528 } else { 2529 max_age = (((xs->timeout * (dpt->submitted_ccbs_count 2530 + DPT_TIMEOUT_FACTOR)) 2531 > TenSec) 2532 ? (xs->timeout * (dpt->submitted_ccbs_count 2533 + DPT_TIMEOUT_FACTOR)) 2534 : TenSec); 2535 } 2536 2537 /* 2538 * If a transaction is marked lost and is TWICE as old as we 2539 * care, then, and only then do we destroy it! 2540 */ 2541 if (ccb->state & DPT_CCB_STATE_MARKED_LOST) { 2542 /* Remember who is next */ 2543 if (age > (max_age * 2)) { 2544 dpt_Qremove_submitted(dpt, ccb); 2545 ccb->state &= ~DPT_CCB_STATE_MARKED_LOST; 2546 ccb->state |= DPT_CCB_STATE_ABORTED; 2547 #define cmd_name scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd) 2548 if (ccb->retries++ > DPT_RETRIES) { 2549 kprintf("dpt%d ERROR: Destroying stale " 2550 "%d (%s)\n" 2551 " on " 2552 "c%db%dt%du%d (%d/%d)\n", 2553 dpt->unit, ccb->transaction_id, 2554 cmd_name, 2555 dpt->unit, 2556 ccb->eata_ccb.cp_channel, 2557 ccb->eata_ccb.cp_id, 2558 ccb->eata_ccb.cp_LUN, age, 2559 ccb->retries); 2560 #define send_ccb &ccb->eata_ccb 2561 #define ESA EATA_SPECIFIC_ABORT 2562 (void) dpt_send_immediate(dpt, 2563 send_ccb, 2564 ESA, 2565 0, 0); 2566 dpt_Qpush_free(dpt, ccb); 2567 2568 /* The SCSI layer should re-try */ 2569 xs->error |= XS_TIMEOUT; 2570 xs->flags |= SCSI_ITSDONE; 2571 scsi_done(xs); 2572 } else { 2573 kprintf("dpt%d ERROR: Stale %d (%s) on " 2574 "c%db%dt%du%d (%d)\n" 2575 " gets another " 2576 "chance(%d/%d)\n", 2577 dpt->unit, ccb->transaction_id, 2578 cmd_name, 2579 dpt->unit, 2580 ccb->eata_ccb.cp_channel, 2581 ccb->eata_ccb.cp_id, 2582 ccb->eata_ccb.cp_LUN, 2583 age, ccb->retries, DPT_RETRIES); 2584 2585 dpt_Qpush_waiting(dpt, ccb); 2586 dpt_sched_queue(dpt); 2587 } 2588 } 2589 } else { 2590 /* 2591 * This is a transaction that is not to be destroyed 2592 * (yet) But it is too old for our liking. We wait as 2593 * long as the upper layer thinks. Not really, we 2594 * multiply that by the number of commands in the 2595 * submitted queue + 1. 2596 */ 2597 if (!(ccb->state & DPT_CCB_STATE_MARKED_LOST) && 2598 (age != ~0) && (age > max_age)) { 2599 kprintf("dpt%d ERROR: Marking %d (%s) on " 2600 "c%db%dt%du%d \n" 2601 " as late after %dusec\n", 2602 dpt->unit, ccb->transaction_id, 2603 cmd_name, 2604 dpt->unit, ccb->eata_ccb.cp_channel, 2605 ccb->eata_ccb.cp_id, 2606 ccb->eata_ccb.cp_LUN, age); 2607 ccb->state |= DPT_CCB_STATE_MARKED_LOST; 2608 } 2609 } 2610 } 2611 2612 dpt->state &= ~DPT_HA_TIMEOUTS_ACTIVE; 2613 crit_exit(); 2614 } 2615 2616 #endif /* DPT_HANDLE_TIMEOUTS */ 2617 2618 #endif 2619