1 /*- 2 * Copyright (c) 2000, 2001 Michael Smith 3 * Copyright (c) 2000 BSDi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/mly/mly.c,v 1.3.2.3 2001/03/05 20:17:24 msmith Exp $ 28 * $DragonFly: src/sys/dev/raid/mly/mly.c,v 1.17 2006/12/22 23:26:24 swildner Exp $ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/bus.h> 36 #include <sys/conf.h> 37 #include <sys/device.h> 38 #include <sys/ctype.h> 39 #include <sys/ioccom.h> 40 #include <sys/stat.h> 41 #include <sys/rman.h> 42 #include <sys/thread2.h> 43 44 #include <bus/cam/scsi/scsi_all.h> 45 46 #include "mlyreg.h" 47 #include "mlyio.h" 48 #include "mlyvar.h" 49 #define MLY_DEFINE_TABLES 50 #include "mly_tables.h" 51 52 static int mly_get_controllerinfo(struct mly_softc *sc); 53 static void mly_scan_devices(struct mly_softc *sc); 54 static void mly_rescan_btl(struct mly_softc *sc, int bus, int target); 55 static void mly_complete_rescan(struct mly_command *mc); 56 static int mly_get_eventstatus(struct mly_softc *sc); 57 static int mly_enable_mmbox(struct mly_softc *sc); 58 static int mly_flush(struct mly_softc *sc); 59 static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, 60 size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length); 61 static void mly_fetch_event(struct mly_softc *sc); 62 static void mly_complete_event(struct mly_command *mc); 63 static void mly_process_event(struct mly_softc *sc, struct mly_event *me); 64 static void mly_periodic(void *data); 65 66 static int mly_immediate_command(struct mly_command *mc); 67 static int mly_start(struct mly_command *mc); 68 static void mly_complete(void *context, int pending); 69 70 static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); 71 static int mly_alloc_commands(struct mly_softc *sc); 72 static void mly_map_command(struct mly_command *mc); 73 static void mly_unmap_command(struct mly_command *mc); 74 75 static int mly_fwhandshake(struct mly_softc *sc); 76 77 static void mly_describe_controller(struct mly_softc *sc); 78 #ifdef MLY_DEBUG 79 static void mly_printstate(struct mly_softc *sc); 80 static void mly_print_command(struct mly_command *mc); 81 static void mly_print_packet(struct mly_command *mc); 82 static void mly_panic(struct mly_softc *sc, char *reason); 83 #endif 84 void mly_print_controller(int controller); 85 86 static d_open_t mly_user_open; 87 static d_close_t mly_user_close; 88 static d_ioctl_t mly_user_ioctl; 89 static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc); 90 static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh); 91 92 #define MLY_CDEV_MAJOR 158 93 94 static struct dev_ops mly_ops = { 95 { "mly", MLY_CDEV_MAJOR, 0 }, 96 .d_open = mly_user_open, 97 .d_close = mly_user_close, 98 .d_ioctl = mly_user_ioctl, 99 }; 100 101 /******************************************************************************** 102 ******************************************************************************** 103 Device Interface 104 ******************************************************************************** 105 ********************************************************************************/ 106 107 /******************************************************************************** 108 * Initialise the controller and softc 109 */ 110 int 111 mly_attach(struct mly_softc *sc) 112 { 113 int error; 114 115 debug_called(1); 116 117 callout_init(&sc->mly_periodic); 118 119 /* 120 * Initialise per-controller queues. 121 */ 122 mly_initq_free(sc); 123 mly_initq_ready(sc); 124 mly_initq_busy(sc); 125 mly_initq_complete(sc); 126 127 #if defined(__FreeBSD__) && __FreeBSD_version >= 500005 128 /* 129 * Initialise command-completion task. 130 */ 131 TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc); 132 #endif 133 134 /* disable interrupts before we start talking to the controller */ 135 MLY_MASK_INTERRUPTS(sc); 136 137 /* 138 * Wait for the controller to come ready, handshake with the firmware if required. 139 * This is typically only necessary on platforms where the controller BIOS does not 140 * run. 141 */ 142 if ((error = mly_fwhandshake(sc))) 143 return(error); 144 145 /* 146 * Allocate command buffers 147 */ 148 if ((error = mly_alloc_commands(sc))) 149 return(error); 150 151 /* 152 * Obtain controller feature information 153 */ 154 if ((error = mly_get_controllerinfo(sc))) 155 return(error); 156 157 /* 158 * Get the current event counter for health purposes, populate the initial 159 * health status buffer. 160 */ 161 if ((error = mly_get_eventstatus(sc))) 162 return(error); 163 164 /* 165 * Enable memory-mailbox mode 166 */ 167 if ((error = mly_enable_mmbox(sc))) 168 return(error); 169 170 /* 171 * Attach to CAM. 172 */ 173 if ((error = mly_cam_attach(sc))) 174 return(error); 175 176 /* 177 * Print a little information about the controller 178 */ 179 mly_describe_controller(sc); 180 181 /* 182 * Mark all attached devices for rescan 183 */ 184 mly_scan_devices(sc); 185 186 /* 187 * Instigate the first status poll immediately. Rescan completions won't 188 * happen until interrupts are enabled, which should still be before 189 * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven 190 * discovery here...) 191 */ 192 mly_periodic((void *)sc); 193 194 /* 195 * Create the control device. 196 */ 197 dev_ops_add(&mly_ops, -1, device_get_unit(sc->mly_dev)); 198 sc->mly_dev_t = make_dev(&mly_ops, device_get_unit(sc->mly_dev), 199 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, 200 "mly%d", device_get_unit(sc->mly_dev)); 201 sc->mly_dev_t->si_drv1 = sc; 202 203 /* enable interrupts now */ 204 MLY_UNMASK_INTERRUPTS(sc); 205 206 return(0); 207 } 208 209 /******************************************************************************** 210 * Bring the controller to a state where it can be safely left alone. 211 */ 212 void 213 mly_detach(struct mly_softc *sc) 214 { 215 216 debug_called(1); 217 218 /* kill the periodic event */ 219 callout_stop(&sc->mly_periodic); 220 221 sc->mly_state |= MLY_STATE_SUSPEND; 222 223 /* flush controller */ 224 mly_printf(sc, "flushing cache..."); 225 kprintf("%s\n", mly_flush(sc) ? "failed" : "done"); 226 227 MLY_MASK_INTERRUPTS(sc); 228 } 229 230 /******************************************************************************** 231 ******************************************************************************** 232 Command Wrappers 233 ******************************************************************************** 234 ********************************************************************************/ 235 236 /******************************************************************************** 237 * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc. 238 */ 239 static int 240 mly_get_controllerinfo(struct mly_softc *sc) 241 { 242 struct mly_command_ioctl mci; 243 u_int8_t status; 244 int error; 245 246 debug_called(1); 247 248 if (sc->mly_controllerinfo != NULL) 249 kfree(sc->mly_controllerinfo, M_DEVBUF); 250 251 /* build the getcontrollerinfo ioctl and send it */ 252 bzero(&mci, sizeof(mci)); 253 sc->mly_controllerinfo = NULL; 254 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO; 255 if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo), 256 &status, NULL, NULL))) 257 return(error); 258 if (status != 0) 259 return(EIO); 260 261 if (sc->mly_controllerparam != NULL) 262 kfree(sc->mly_controllerparam, M_DEVBUF); 263 264 /* build the getcontrollerparameter ioctl and send it */ 265 bzero(&mci, sizeof(mci)); 266 sc->mly_controllerparam = NULL; 267 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER; 268 if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam), 269 &status, NULL, NULL))) 270 return(error); 271 if (status != 0) 272 return(EIO); 273 274 return(0); 275 } 276 277 /******************************************************************************** 278 * Schedule all possible devices for a rescan. 279 * 280 */ 281 static void 282 mly_scan_devices(struct mly_softc *sc) 283 { 284 int bus, target, nchn; 285 286 debug_called(1); 287 288 /* 289 * Clear any previous BTL information. 290 */ 291 bzero(&sc->mly_btl, sizeof(sc->mly_btl)); 292 293 /* 294 * Mark all devices as requiring a rescan, and let the early periodic scan collect them. 295 */ 296 nchn = sc->mly_controllerinfo->physical_channels_present + 297 sc->mly_controllerinfo->virtual_channels_present; 298 for (bus = 0; bus < nchn; bus++) 299 for (target = 0; target < MLY_MAX_TARGETS; target++) 300 sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN; 301 302 } 303 304 /******************************************************************************** 305 * Rescan a device, possibly as a consequence of getting an event which suggests 306 * that it may have changed. 307 */ 308 static void 309 mly_rescan_btl(struct mly_softc *sc, int bus, int target) 310 { 311 struct mly_command *mc; 312 struct mly_command_ioctl *mci; 313 314 debug_called(2); 315 316 /* get a command */ 317 mc = NULL; 318 if (mly_alloc_command(sc, &mc)) 319 return; /* we'll be retried soon */ 320 321 /* set up the data buffer */ 322 mc->mc_data = kmalloc(sizeof(union mly_devinfo), M_DEVBUF, M_INTWAIT | M_ZERO); 323 mc->mc_flags |= MLY_CMD_DATAIN; 324 mc->mc_complete = mly_complete_rescan; 325 326 sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN; 327 328 /* 329 * Build the ioctl. 330 * 331 * At this point we are committed to sending this request, as it 332 * will be the only one constructed for this particular update. 333 */ 334 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; 335 mci->opcode = MDACMD_IOCTL; 336 mci->addr.phys.controller = 0; 337 mci->timeout.value = 30; 338 mci->timeout.scale = MLY_TIMEOUT_SECONDS; 339 if (bus >= sc->mly_controllerinfo->physical_channels_present) { 340 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid); 341 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID; 342 mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS) 343 + target; 344 debug(2, "logical device %d", mci->addr.log.logdev); 345 } else { 346 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid); 347 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID; 348 mci->addr.phys.lun = 0; 349 mci->addr.phys.target = target; 350 mci->addr.phys.channel = bus; 351 debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target); 352 } 353 354 /* 355 * Use the ready queue to get this command dispatched. 356 */ 357 mly_enqueue_ready(mc); 358 mly_startio(sc); 359 } 360 361 /******************************************************************************** 362 * Handle the completion of a rescan operation 363 */ 364 static void 365 mly_complete_rescan(struct mly_command *mc) 366 { 367 struct mly_softc *sc = mc->mc_sc; 368 struct mly_ioctl_getlogdevinfovalid *ldi; 369 struct mly_ioctl_getphysdevinfovalid *pdi; 370 int bus, target; 371 372 debug_called(2); 373 374 /* iff the command completed OK, we should use the result to update our data */ 375 if (mc->mc_status == 0) { 376 if (mc->mc_length == sizeof(*ldi)) { 377 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data; 378 bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number); 379 target = MLY_LOGDEV_TARGET(ldi->logical_device_number); 380 sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL; /* clears all other flags */ 381 sc->mly_btl[bus][target].mb_type = ldi->raid_level; 382 sc->mly_btl[bus][target].mb_state = ldi->state; 383 debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number, 384 mly_describe_code(mly_table_device_type, ldi->raid_level), 385 mly_describe_code(mly_table_device_state, ldi->state)); 386 } else if (mc->mc_length == sizeof(*pdi)) { 387 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data; 388 bus = pdi->channel; 389 target = pdi->target; 390 sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL; /* clears all other flags */ 391 sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL; 392 sc->mly_btl[bus][target].mb_state = pdi->state; 393 sc->mly_btl[bus][target].mb_speed = pdi->speed; 394 sc->mly_btl[bus][target].mb_width = pdi->width; 395 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED) 396 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED; 397 debug(2, "BTL rescan for %d:%d returns %s", bus, target, 398 mly_describe_code(mly_table_device_state, pdi->state)); 399 } else { 400 mly_printf(sc, "BTL rescan result corrupted\n"); 401 } 402 } else { 403 /* 404 * A request sent for a device beyond the last device present will fail. 405 * We don't care about this, so we do nothing about it. 406 */ 407 } 408 kfree(mc->mc_data, M_DEVBUF); 409 mly_release_command(mc); 410 } 411 412 /******************************************************************************** 413 * Get the current health status and set the 'next event' counter to suit. 414 */ 415 static int 416 mly_get_eventstatus(struct mly_softc *sc) 417 { 418 struct mly_command_ioctl mci; 419 struct mly_health_status *mh; 420 u_int8_t status; 421 int error; 422 423 /* build the gethealthstatus ioctl and send it */ 424 bzero(&mci, sizeof(mci)); 425 mh = NULL; 426 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS; 427 428 if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL))) 429 return(error); 430 if (status != 0) 431 return(EIO); 432 433 /* get the event counter */ 434 sc->mly_event_change = mh->change_counter; 435 sc->mly_event_waiting = mh->next_event; 436 sc->mly_event_counter = mh->next_event; 437 438 /* save the health status into the memory mailbox */ 439 bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh)); 440 441 debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event); 442 443 kfree(mh, M_DEVBUF); 444 return(0); 445 } 446 447 /******************************************************************************** 448 * Enable the memory mailbox mode. 449 */ 450 static int 451 mly_enable_mmbox(struct mly_softc *sc) 452 { 453 struct mly_command_ioctl mci; 454 u_int8_t *sp, status; 455 int error; 456 457 debug_called(1); 458 459 /* build the ioctl and send it */ 460 bzero(&mci, sizeof(mci)); 461 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX; 462 /* set buffer addresses */ 463 mci.param.setmemorymailbox.command_mailbox_physaddr = 464 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); 465 mci.param.setmemorymailbox.status_mailbox_physaddr = 466 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); 467 mci.param.setmemorymailbox.health_buffer_physaddr = 468 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); 469 470 /* set buffer sizes - abuse of data_size field is revolting */ 471 sp = (u_int8_t *)&mci.data_size; 472 sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024); 473 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024; 474 mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024; 475 476 debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox, 477 mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0], 478 mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1], 479 mci.param.setmemorymailbox.health_buffer_physaddr, 480 mci.param.setmemorymailbox.health_buffer_size); 481 482 if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) 483 return(error); 484 if (status != 0) 485 return(EIO); 486 sc->mly_state |= MLY_STATE_MMBOX_ACTIVE; 487 debug(1, "memory mailbox active"); 488 return(0); 489 } 490 491 /******************************************************************************** 492 * Flush all pending I/O from the controller. 493 */ 494 static int 495 mly_flush(struct mly_softc *sc) 496 { 497 struct mly_command_ioctl mci; 498 u_int8_t status; 499 int error; 500 501 debug_called(1); 502 503 /* build the ioctl */ 504 bzero(&mci, sizeof(mci)); 505 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA; 506 mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER; 507 508 /* pass it off to the controller */ 509 if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) 510 return(error); 511 512 return((status == 0) ? 0 : EIO); 513 } 514 515 /******************************************************************************** 516 * Perform an ioctl command. 517 * 518 * If (data) is not NULL, the command requires data transfer. If (*data) is NULL 519 * the command requires data transfer from the controller, and we will allocate 520 * a buffer for it. If (*data) is not NULL, the command requires data transfer 521 * to the controller. 522 * 523 * XXX passing in the whole ioctl structure is ugly. Better ideas? 524 * 525 * XXX we don't even try to handle the case where datasize > 4k. We should. 526 */ 527 static int 528 mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize, 529 u_int8_t *status, void *sense_buffer, size_t *sense_length) 530 { 531 struct mly_command *mc; 532 struct mly_command_ioctl *mci; 533 int error; 534 535 debug_called(1); 536 537 mc = NULL; 538 if (mly_alloc_command(sc, &mc)) { 539 error = ENOMEM; 540 goto out; 541 } 542 543 /* copy the ioctl structure, but save some important fields and then fixup */ 544 mci = &mc->mc_packet->ioctl; 545 ioctl->sense_buffer_address = mci->sense_buffer_address; 546 ioctl->maximum_sense_size = mci->maximum_sense_size; 547 *mci = *ioctl; 548 mci->opcode = MDACMD_IOCTL; 549 mci->timeout.value = 30; 550 mci->timeout.scale = MLY_TIMEOUT_SECONDS; 551 552 /* handle the data buffer */ 553 if (data != NULL) { 554 if (*data == NULL) { 555 /* allocate data buffer */ 556 mc->mc_data = kmalloc(datasize, M_DEVBUF, M_INTWAIT); 557 mc->mc_flags |= MLY_CMD_DATAIN; 558 } else { 559 mc->mc_data = *data; 560 mc->mc_flags |= MLY_CMD_DATAOUT; 561 } 562 mc->mc_length = datasize; 563 mc->mc_packet->generic.data_size = datasize; 564 } 565 566 /* run the command */ 567 if ((error = mly_immediate_command(mc))) 568 goto out; 569 570 /* clean up and return any data */ 571 *status = mc->mc_status; 572 if ((mc->mc_sense > 0) && (sense_buffer != NULL)) { 573 bcopy(mc->mc_packet, sense_buffer, mc->mc_sense); 574 *sense_length = mc->mc_sense; 575 goto out; 576 } 577 578 /* should we return a data pointer? */ 579 if ((data != NULL) && (*data == NULL)) 580 *data = mc->mc_data; 581 582 /* command completed OK */ 583 error = 0; 584 585 out: 586 if (mc != NULL) { 587 /* do we need to free a data buffer we allocated? */ 588 if (error && (mc->mc_data != NULL) && (*data == NULL)) 589 kfree(mc->mc_data, M_DEVBUF); 590 mly_release_command(mc); 591 } 592 return(error); 593 } 594 595 /******************************************************************************** 596 * Fetch one event from the controller. 597 */ 598 static void 599 mly_fetch_event(struct mly_softc *sc) 600 { 601 struct mly_command *mc; 602 struct mly_command_ioctl *mci; 603 u_int32_t event; 604 605 debug_called(2); 606 607 /* get a command */ 608 mc = NULL; 609 if (mly_alloc_command(sc, &mc)) 610 return; /* we'll get retried the next time a command completes */ 611 612 /* set up the data buffer */ 613 mc->mc_data = kmalloc(sizeof(struct mly_event), M_DEVBUF, M_INTWAIT|M_ZERO); 614 mc->mc_length = sizeof(struct mly_event); 615 mc->mc_flags |= MLY_CMD_DATAIN; 616 mc->mc_complete = mly_complete_event; 617 618 /* 619 * Get an event number to fetch. It's possible that we've raced with another 620 * context for the last event, in which case there will be no more events. 621 */ 622 crit_enter(); 623 if (sc->mly_event_counter == sc->mly_event_waiting) { 624 mly_release_command(mc); 625 crit_exit(); 626 return; 627 } 628 event = sc->mly_event_counter++; 629 crit_exit(); 630 631 /* 632 * Build the ioctl. 633 * 634 * At this point we are committed to sending this request, as it 635 * will be the only one constructed for this particular event number. 636 */ 637 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; 638 mci->opcode = MDACMD_IOCTL; 639 mci->data_size = sizeof(struct mly_event); 640 mci->addr.phys.lun = (event >> 16) & 0xff; 641 mci->addr.phys.target = (event >> 24) & 0xff; 642 mci->addr.phys.channel = 0; 643 mci->addr.phys.controller = 0; 644 mci->timeout.value = 30; 645 mci->timeout.scale = MLY_TIMEOUT_SECONDS; 646 mci->sub_ioctl = MDACIOCTL_GETEVENT; 647 mci->param.getevent.sequence_number_low = event & 0xffff; 648 649 debug(2, "fetch event %u", event); 650 651 /* 652 * Use the ready queue to get this command dispatched. 653 */ 654 mly_enqueue_ready(mc); 655 mly_startio(sc); 656 } 657 658 /******************************************************************************** 659 * Handle the completion of an event poll. 660 * 661 * Note that we don't actually have to instigate another poll; the completion of 662 * this command will trigger that if there are any more events to poll for. 663 */ 664 static void 665 mly_complete_event(struct mly_command *mc) 666 { 667 struct mly_softc *sc = mc->mc_sc; 668 struct mly_event *me = (struct mly_event *)mc->mc_data; 669 670 debug_called(2); 671 672 /* 673 * If the event was successfully fetched, process it. 674 */ 675 if (mc->mc_status == SCSI_STATUS_OK) { 676 mly_process_event(sc, me); 677 kfree(me, M_DEVBUF); 678 } 679 mly_release_command(mc); 680 } 681 682 /******************************************************************************** 683 * Process a controller event. 684 */ 685 static void 686 mly_process_event(struct mly_softc *sc, struct mly_event *me) 687 { 688 struct scsi_sense_data *ssd = (struct scsi_sense_data *)&me->sense[0]; 689 char *fp, *tp; 690 int bus, target, event, class, action; 691 692 /* 693 * Errors can be reported using vendor-unique sense data. In this case, the 694 * event code will be 0x1c (Request sense data present), the sense key will 695 * be 0x09 (vendor specific), the MSB of the ASC will be set, and the 696 * actual event code will be a 16-bit value comprised of the ASCQ (low byte) 697 * and low seven bits of the ASC (low seven bits of the high byte). 698 */ 699 if ((me->code == 0x1c) && 700 ((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) && 701 (ssd->add_sense_code & 0x80)) { 702 event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual; 703 } else { 704 event = me->code; 705 } 706 707 /* look up event, get codes */ 708 fp = mly_describe_code(mly_table_event, event); 709 710 debug(2, "Event %d code 0x%x", me->sequence_number, me->code); 711 712 /* quiet event? */ 713 class = fp[0]; 714 if (isupper(class) && bootverbose) 715 class = tolower(class); 716 717 /* get action code, text string */ 718 action = fp[1]; 719 tp = &fp[2]; 720 721 /* 722 * Print some information about the event. 723 * 724 * This code uses a table derived from the corresponding portion of the Linux 725 * driver, and thus the parser is very similar. 726 */ 727 switch(class) { 728 case 'p': /* error on physical device */ 729 mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp); 730 if (action == 'r') 731 sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN; 732 break; 733 case 'l': /* error on logical unit */ 734 case 'm': /* message about logical unit */ 735 bus = MLY_LOGDEV_BUS(sc, me->lun); 736 target = MLY_LOGDEV_TARGET(me->lun); 737 mly_name_device(sc, bus, target); 738 mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp); 739 if (action == 'r') 740 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN; 741 break; 742 break; 743 case 's': /* report of sense data */ 744 if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) || 745 (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) && 746 (ssd->add_sense_code == 0x04) && 747 ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02)))) 748 break; /* ignore NO_SENSE or NOT_READY in one case */ 749 750 mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp); 751 mly_printf(sc, " sense key %d asc %02x ascq %02x\n", 752 ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual); 753 mly_printf(sc, " info %4D csi %4D\n", ssd->info, "", ssd->cmd_spec_info, ""); 754 if (action == 'r') 755 sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN; 756 break; 757 case 'e': 758 mly_printf(sc, tp, me->target, me->lun); 759 break; 760 case 'c': 761 mly_printf(sc, "controller %s\n", tp); 762 break; 763 case '?': 764 mly_printf(sc, "%s - %d\n", tp, me->code); 765 break; 766 default: /* probably a 'noisy' event being ignored */ 767 break; 768 } 769 } 770 771 /******************************************************************************** 772 * Perform periodic activities. 773 */ 774 static void 775 mly_periodic(void *data) 776 { 777 struct mly_softc *sc = (struct mly_softc *)data; 778 int nchn, bus, target; 779 780 debug_called(2); 781 782 /* 783 * Scan devices. 784 */ 785 nchn = sc->mly_controllerinfo->physical_channels_present + 786 sc->mly_controllerinfo->virtual_channels_present; 787 for (bus = 0; bus < nchn; bus++) { 788 for (target = 0; target < MLY_MAX_TARGETS; target++) { 789 790 /* ignore the controller in this scan */ 791 if (target == sc->mly_controllerparam->initiator_id) 792 continue; 793 794 /* perform device rescan? */ 795 if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN) 796 mly_rescan_btl(sc, bus, target); 797 } 798 } 799 800 callout_reset(&sc->mly_periodic, hz, mly_periodic, sc); 801 } 802 803 /******************************************************************************** 804 ******************************************************************************** 805 Command Processing 806 ******************************************************************************** 807 ********************************************************************************/ 808 809 /******************************************************************************** 810 * Run a command and wait for it to complete. 811 * 812 */ 813 static int 814 mly_immediate_command(struct mly_command *mc) 815 { 816 struct mly_softc *sc = mc->mc_sc; 817 int error; 818 819 debug_called(2); 820 821 /* spinning at splcam is ugly, but we're only used during controller init */ 822 crit_enter(); 823 if ((error = mly_start(mc))) 824 return(error); 825 826 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) { 827 /* sleep on the command */ 828 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { 829 tsleep(mc, 0, "mlywait", 0); 830 } 831 } else { 832 /* spin and collect status while we do */ 833 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { 834 mly_done(mc->mc_sc); 835 } 836 } 837 crit_exit(); 838 return(0); 839 } 840 841 /******************************************************************************** 842 * Start as much queued I/O as possible on the controller 843 */ 844 void 845 mly_startio(struct mly_softc *sc) 846 { 847 struct mly_command *mc; 848 849 debug_called(2); 850 851 for (;;) { 852 853 /* try for a ready command */ 854 mc = mly_dequeue_ready(sc); 855 856 /* try to build a command from a queued ccb */ 857 if (!mc) 858 mly_cam_command(sc, &mc); 859 860 /* no command == nothing to do */ 861 if (!mc) 862 break; 863 864 /* try to post the command */ 865 if (mly_start(mc)) { 866 /* controller busy, or no resources - defer for later */ 867 mly_requeue_ready(mc); 868 break; 869 } 870 } 871 } 872 873 /******************************************************************************** 874 * Deliver a command to the controller; allocate controller resources at the 875 * last moment. 876 */ 877 static int 878 mly_start(struct mly_command *mc) 879 { 880 struct mly_softc *sc = mc->mc_sc; 881 union mly_command_packet *pkt; 882 883 debug_called(2); 884 885 /* 886 * Set the command up for delivery to the controller. 887 */ 888 mly_map_command(mc); 889 mc->mc_packet->generic.command_id = mc->mc_slot; 890 891 crit_enter(); 892 893 /* 894 * Do we have to use the hardware mailbox? 895 */ 896 if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) { 897 /* 898 * Check to see if the controller is ready for us. 899 */ 900 if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) { 901 crit_exit(); 902 return(EBUSY); 903 } 904 mc->mc_flags |= MLY_CMD_BUSY; 905 906 /* 907 * It's ready, send the command. 908 */ 909 MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys); 910 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT); 911 912 } else { /* use memory-mailbox mode */ 913 914 pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index]; 915 916 /* check to see if the next index is free yet */ 917 if (pkt->mmbox.flag != 0) { 918 crit_exit(); 919 return(EBUSY); 920 } 921 mc->mc_flags |= MLY_CMD_BUSY; 922 923 /* copy in new command */ 924 bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data)); 925 /* barrier to ensure completion of previous write before we write the flag */ 926 bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle? */ 927 /* copy flag last */ 928 pkt->mmbox.flag = mc->mc_packet->mmbox.flag; 929 /* barrier to ensure completion of previous write before we notify the controller */ 930 bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle */ 931 932 /* signal controller, update index */ 933 MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT); 934 sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS; 935 } 936 937 mly_enqueue_busy(mc); 938 crit_exit(); 939 return(0); 940 } 941 942 /******************************************************************************** 943 * Pick up command status from the controller, schedule a completion event 944 */ 945 void 946 mly_done(struct mly_softc *sc) 947 { 948 struct mly_command *mc; 949 union mly_status_packet *sp; 950 u_int16_t slot; 951 int worked; 952 953 crit_enter(); 954 worked = 0; 955 956 /* pick up hardware-mailbox commands */ 957 if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) { 958 slot = MLY_GET_REG2(sc, sc->mly_status_mailbox); 959 if (slot < MLY_SLOT_MAX) { 960 mc = &sc->mly_command[slot - MLY_SLOT_START]; 961 mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2); 962 mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3); 963 mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4); 964 mly_remove_busy(mc); 965 mc->mc_flags &= ~MLY_CMD_BUSY; 966 mly_enqueue_complete(mc); 967 worked = 1; 968 } else { 969 /* slot 0xffff may mean "extremely bogus command" */ 970 mly_printf(sc, "got HM completion for illegal slot %u\n", slot); 971 } 972 /* unconditionally acknowledge status */ 973 MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY); 974 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK); 975 } 976 977 /* pick up memory-mailbox commands */ 978 if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) { 979 for (;;) { 980 sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index]; 981 982 /* check for more status */ 983 if (sp->mmbox.flag == 0) 984 break; 985 986 /* get slot number */ 987 slot = sp->status.command_id; 988 if (slot < MLY_SLOT_MAX) { 989 mc = &sc->mly_command[slot - MLY_SLOT_START]; 990 mc->mc_status = sp->status.status; 991 mc->mc_sense = sp->status.sense_length; 992 mc->mc_resid = sp->status.residue; 993 mly_remove_busy(mc); 994 mc->mc_flags &= ~MLY_CMD_BUSY; 995 mly_enqueue_complete(mc); 996 worked = 1; 997 } else { 998 /* slot 0xffff may mean "extremely bogus command" */ 999 mly_printf(sc, "got AM completion for illegal slot %u at %d\n", 1000 slot, sc->mly_mmbox_status_index); 1001 } 1002 1003 /* clear and move to next index */ 1004 sp->mmbox.flag = 0; 1005 sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS; 1006 } 1007 /* acknowledge that we have collected status value(s) */ 1008 MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY); 1009 } 1010 1011 crit_exit(); 1012 if (worked) { 1013 #if defined(__FreeBSD__) && __FreeBSD_version >= 500005 1014 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) 1015 taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete); 1016 else 1017 #endif 1018 mly_complete(sc, 0); 1019 } 1020 } 1021 1022 /******************************************************************************** 1023 * Process completed commands 1024 */ 1025 static void 1026 mly_complete(void *context, int pending) 1027 { 1028 struct mly_softc *sc = (struct mly_softc *)context; 1029 struct mly_command *mc; 1030 void (* mc_complete)(struct mly_command *mc); 1031 1032 1033 debug_called(2); 1034 1035 /* 1036 * Spin pulling commands off the completed queue and processing them. 1037 */ 1038 while ((mc = mly_dequeue_complete(sc)) != NULL) { 1039 1040 /* 1041 * Free controller resources, mark command complete. 1042 * 1043 * Note that as soon as we mark the command complete, it may be freed 1044 * out from under us, so we need to save the mc_complete field in 1045 * order to later avoid dereferencing mc. (We would not expect to 1046 * have a polling/sleeping consumer with mc_complete != NULL). 1047 */ 1048 mly_unmap_command(mc); 1049 mc_complete = mc->mc_complete; 1050 mc->mc_flags |= MLY_CMD_COMPLETE; 1051 1052 /* 1053 * Call completion handler or wake up sleeping consumer. 1054 */ 1055 if (mc_complete != NULL) { 1056 mc_complete(mc); 1057 } else { 1058 wakeup(mc); 1059 } 1060 } 1061 1062 /* 1063 * We may have freed up controller resources which would allow us 1064 * to push more commands onto the controller, so we check here. 1065 */ 1066 mly_startio(sc); 1067 1068 /* 1069 * The controller may have updated the health status information, 1070 * so check for it here. 1071 * 1072 * Note that we only check for health status after a completed command. It 1073 * might be wise to ping the controller occasionally if it's been idle for 1074 * a while just to check up on it. While a filesystem is mounted, or I/O is 1075 * active this isn't really an issue. 1076 */ 1077 if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) { 1078 sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter; 1079 debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change, 1080 sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event); 1081 sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event; 1082 1083 /* wake up anyone that might be interested in this */ 1084 wakeup(&sc->mly_event_change); 1085 } 1086 if (sc->mly_event_counter != sc->mly_event_waiting) 1087 mly_fetch_event(sc); 1088 } 1089 1090 /******************************************************************************** 1091 ******************************************************************************** 1092 Command Buffer Management 1093 ******************************************************************************** 1094 ********************************************************************************/ 1095 1096 /******************************************************************************** 1097 * Allocate a command. 1098 */ 1099 int 1100 mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp) 1101 { 1102 struct mly_command *mc; 1103 1104 debug_called(3); 1105 1106 if ((mc = mly_dequeue_free(sc)) == NULL) 1107 return(ENOMEM); 1108 1109 *mcp = mc; 1110 return(0); 1111 } 1112 1113 /******************************************************************************** 1114 * Release a command back to the freelist. 1115 */ 1116 void 1117 mly_release_command(struct mly_command *mc) 1118 { 1119 debug_called(3); 1120 1121 /* 1122 * Fill in parts of the command that may cause confusion if 1123 * a consumer doesn't when we are later allocated. 1124 */ 1125 mc->mc_data = NULL; 1126 mc->mc_flags = 0; 1127 mc->mc_complete = NULL; 1128 mc->mc_private = NULL; 1129 1130 /* 1131 * By default, we set up to overwrite the command packet with 1132 * sense information. 1133 */ 1134 mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys; 1135 mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet); 1136 1137 mly_enqueue_free(mc); 1138 } 1139 1140 /******************************************************************************** 1141 * Map helper for command allocation. 1142 */ 1143 static void 1144 mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1145 { 1146 struct mly_softc *sc = (struct mly_softc *)arg 1147 1148 debug_called(2); 1149 1150 sc->mly_packetphys = segs[0].ds_addr; 1151 } 1152 1153 /******************************************************************************** 1154 * Allocate and initialise command and packet structures. 1155 */ 1156 static int 1157 mly_alloc_commands(struct mly_softc *sc) 1158 { 1159 struct mly_command *mc; 1160 int i; 1161 1162 /* 1163 * Allocate enough space for all the command packets in one chunk and 1164 * map them permanently into controller-visible space. 1165 */ 1166 if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet, 1167 BUS_DMA_NOWAIT, &sc->mly_packetmap)) { 1168 return(ENOMEM); 1169 } 1170 bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet, 1171 MLY_MAXCOMMANDS * sizeof(union mly_command_packet), 1172 mly_alloc_commands_map, sc, 0); 1173 1174 for (i = 0; i < MLY_MAXCOMMANDS; i++) { 1175 mc = &sc->mly_command[i]; 1176 bzero(mc, sizeof(*mc)); 1177 mc->mc_sc = sc; 1178 mc->mc_slot = MLY_SLOT_START + i; 1179 mc->mc_packet = sc->mly_packet + i; 1180 mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet)); 1181 if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap)) 1182 mly_release_command(mc); 1183 } 1184 return(0); 1185 } 1186 1187 /******************************************************************************** 1188 * Command-mapping helper function - populate this command's s/g table 1189 * with the s/g entries for its data. 1190 */ 1191 static void 1192 mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1193 { 1194 struct mly_command *mc = (struct mly_command *)arg; 1195 struct mly_softc *sc = mc->mc_sc; 1196 struct mly_command_generic *gen = &(mc->mc_packet->generic); 1197 struct mly_sg_entry *sg; 1198 int i, tabofs; 1199 1200 debug_called(3); 1201 1202 /* can we use the transfer structure directly? */ 1203 if (nseg <= 2) { 1204 sg = &gen->transfer.direct.sg[0]; 1205 gen->command_control.extended_sg_table = 0; 1206 } else { 1207 tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAXSGENTRIES); 1208 sg = sc->mly_sg_table + tabofs; 1209 gen->transfer.indirect.entries[0] = nseg; 1210 gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry)); 1211 gen->command_control.extended_sg_table = 1; 1212 } 1213 1214 /* copy the s/g table */ 1215 for (i = 0; i < nseg; i++) { 1216 sg[i].physaddr = segs[i].ds_addr; 1217 sg[i].length = segs[i].ds_len; 1218 } 1219 1220 } 1221 1222 #if 0 1223 /******************************************************************************** 1224 * Command-mapping helper function - save the cdb's physical address. 1225 * 1226 * We don't support 'large' SCSI commands at this time, so this is unused. 1227 */ 1228 static void 1229 mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1230 { 1231 struct mly_command *mc = (struct mly_command *)arg; 1232 1233 debug_called(3); 1234 1235 /* XXX can we safely assume that a CDB will never cross a page boundary? */ 1236 if ((segs[0].ds_addr % PAGE_SIZE) > 1237 ((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE)) 1238 panic("cdb crosses page boundary"); 1239 1240 /* fix up fields in the command packet */ 1241 mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr; 1242 } 1243 #endif 1244 1245 /******************************************************************************** 1246 * Map a command into controller-visible space 1247 */ 1248 static void 1249 mly_map_command(struct mly_command *mc) 1250 { 1251 struct mly_softc *sc = mc->mc_sc; 1252 1253 debug_called(2); 1254 1255 /* don't map more than once */ 1256 if (mc->mc_flags & MLY_CMD_MAPPED) 1257 return; 1258 1259 /* does the command have a data buffer? */ 1260 if (mc->mc_data != NULL) 1261 bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length, 1262 mly_map_command_sg, mc, 0); 1263 1264 if (mc->mc_flags & MLY_CMD_DATAIN) 1265 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD); 1266 if (mc->mc_flags & MLY_CMD_DATAOUT) 1267 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE); 1268 1269 mc->mc_flags |= MLY_CMD_MAPPED; 1270 } 1271 1272 /******************************************************************************** 1273 * Unmap a command from controller-visible space 1274 */ 1275 static void 1276 mly_unmap_command(struct mly_command *mc) 1277 { 1278 struct mly_softc *sc = mc->mc_sc; 1279 1280 debug_called(2); 1281 1282 if (!(mc->mc_flags & MLY_CMD_MAPPED)) 1283 return; 1284 1285 if (mc->mc_flags & MLY_CMD_DATAIN) 1286 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD); 1287 if (mc->mc_flags & MLY_CMD_DATAOUT) 1288 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE); 1289 1290 /* does the command have a data buffer? */ 1291 if (mc->mc_data != NULL) 1292 bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap); 1293 1294 mc->mc_flags &= ~MLY_CMD_MAPPED; 1295 } 1296 1297 /******************************************************************************** 1298 ******************************************************************************** 1299 Hardware Control 1300 ******************************************************************************** 1301 ********************************************************************************/ 1302 1303 /******************************************************************************** 1304 * Handshake with the firmware while the card is being initialised. 1305 */ 1306 static int 1307 mly_fwhandshake(struct mly_softc *sc) 1308 { 1309 u_int8_t error, param0, param1; 1310 int spinup = 0; 1311 1312 debug_called(1); 1313 1314 /* set HM_STSACK and let the firmware initialise */ 1315 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK); 1316 DELAY(1000); /* too short? */ 1317 1318 /* if HM_STSACK is still true, the controller is initialising */ 1319 if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) 1320 return(0); 1321 mly_printf(sc, "controller initialisation started\n"); 1322 1323 /* spin waiting for initialisation to finish, or for a message to be delivered */ 1324 while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) { 1325 /* check for a message */ 1326 if (MLY_ERROR_VALID(sc)) { 1327 error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY; 1328 param0 = MLY_GET_REG(sc, sc->mly_command_mailbox); 1329 param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1); 1330 1331 switch(error) { 1332 case MLY_MSG_SPINUP: 1333 if (!spinup) { 1334 mly_printf(sc, "drive spinup in progress\n"); 1335 spinup = 1; /* only print this once (should print drive being spun?) */ 1336 } 1337 break; 1338 case MLY_MSG_RACE_RECOVERY_FAIL: 1339 mly_printf(sc, "mirror race recovery failed, one or more drives offline\n"); 1340 break; 1341 case MLY_MSG_RACE_IN_PROGRESS: 1342 mly_printf(sc, "mirror race recovery in progress\n"); 1343 break; 1344 case MLY_MSG_RACE_ON_CRITICAL: 1345 mly_printf(sc, "mirror race recovery on a critical drive\n"); 1346 break; 1347 case MLY_MSG_PARITY_ERROR: 1348 mly_printf(sc, "FATAL MEMORY PARITY ERROR\n"); 1349 return(ENXIO); 1350 default: 1351 mly_printf(sc, "unknown initialisation code 0x%x\n", error); 1352 } 1353 } 1354 } 1355 return(0); 1356 } 1357 1358 /******************************************************************************** 1359 ******************************************************************************** 1360 Debugging and Diagnostics 1361 ******************************************************************************** 1362 ********************************************************************************/ 1363 1364 /******************************************************************************** 1365 * Print some information about the controller. 1366 */ 1367 static void 1368 mly_describe_controller(struct mly_softc *sc) 1369 { 1370 struct mly_ioctl_getcontrollerinfo *mi = sc->mly_controllerinfo; 1371 1372 mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n", 1373 mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "", 1374 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, /* XXX turn encoding? */ 1375 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day, 1376 mi->memory_size); 1377 1378 if (bootverbose) { 1379 mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n", 1380 mly_describe_code(mly_table_oemname, mi->oem_information), 1381 mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type, 1382 mi->interface_speed, mi->interface_width, mi->interface_name); 1383 mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n", 1384 mi->memory_size, mi->memory_speed, mi->memory_width, 1385 mly_describe_code(mly_table_memorytype, mi->memory_type), 1386 mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "", 1387 mi->cache_size); 1388 mly_printf(sc, "CPU: %s @ %dMHZ\n", 1389 mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed); 1390 if (mi->l2cache_size != 0) 1391 mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size); 1392 if (mi->exmemory_size != 0) 1393 mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n", 1394 mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width, 1395 mly_describe_code(mly_table_memorytype, mi->exmemory_type), 1396 mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": ""); 1397 mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed"); 1398 mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n", 1399 mi->maximum_block_count, mi->maximum_sg_entries); 1400 mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n", 1401 mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline); 1402 mly_printf(sc, "physical devices present %d\n", 1403 mi->physical_devices_present); 1404 mly_printf(sc, "physical disks present/offline %d/%d\n", 1405 mi->physical_disks_present, mi->physical_disks_offline); 1406 mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n", 1407 mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s", 1408 mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s", 1409 mi->virtual_channels_possible); 1410 mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands); 1411 mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n", 1412 mi->flash_size, mi->flash_age, mi->flash_maximum_age); 1413 } 1414 } 1415 1416 #ifdef MLY_DEBUG 1417 /******************************************************************************** 1418 * Print some controller state 1419 */ 1420 static void 1421 mly_printstate(struct mly_softc *sc) 1422 { 1423 mly_printf(sc, "IDBR %02x ODBR %02x ERROR %02x (%x %x %x)\n", 1424 MLY_GET_REG(sc, sc->mly_idbr), 1425 MLY_GET_REG(sc, sc->mly_odbr), 1426 MLY_GET_REG(sc, sc->mly_error_status), 1427 sc->mly_idbr, 1428 sc->mly_odbr, 1429 sc->mly_error_status); 1430 mly_printf(sc, "IMASK %02x ISTATUS %02x\n", 1431 MLY_GET_REG(sc, sc->mly_interrupt_mask), 1432 MLY_GET_REG(sc, sc->mly_interrupt_status)); 1433 mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n", 1434 MLY_GET_REG(sc, sc->mly_command_mailbox), 1435 MLY_GET_REG(sc, sc->mly_command_mailbox + 1), 1436 MLY_GET_REG(sc, sc->mly_command_mailbox + 2), 1437 MLY_GET_REG(sc, sc->mly_command_mailbox + 3), 1438 MLY_GET_REG(sc, sc->mly_command_mailbox + 4), 1439 MLY_GET_REG(sc, sc->mly_command_mailbox + 5), 1440 MLY_GET_REG(sc, sc->mly_command_mailbox + 6), 1441 MLY_GET_REG(sc, sc->mly_command_mailbox + 7)); 1442 mly_printf(sc, "STATUS %02x %02x %02x %02x %02x %02x %02x %02x\n", 1443 MLY_GET_REG(sc, sc->mly_status_mailbox), 1444 MLY_GET_REG(sc, sc->mly_status_mailbox + 1), 1445 MLY_GET_REG(sc, sc->mly_status_mailbox + 2), 1446 MLY_GET_REG(sc, sc->mly_status_mailbox + 3), 1447 MLY_GET_REG(sc, sc->mly_status_mailbox + 4), 1448 MLY_GET_REG(sc, sc->mly_status_mailbox + 5), 1449 MLY_GET_REG(sc, sc->mly_status_mailbox + 6), 1450 MLY_GET_REG(sc, sc->mly_status_mailbox + 7)); 1451 mly_printf(sc, " %04x %08x\n", 1452 MLY_GET_REG2(sc, sc->mly_status_mailbox), 1453 MLY_GET_REG4(sc, sc->mly_status_mailbox + 4)); 1454 } 1455 1456 struct mly_softc *mly_softc0 = NULL; 1457 void 1458 mly_printstate0(void) 1459 { 1460 if (mly_softc0 != NULL) 1461 mly_printstate(mly_softc0); 1462 } 1463 1464 /******************************************************************************** 1465 * Print a command 1466 */ 1467 static void 1468 mly_print_command(struct mly_command *mc) 1469 { 1470 struct mly_softc *sc = mc->mc_sc; 1471 1472 mly_printf(sc, "COMMAND @ %p\n", mc); 1473 mly_printf(sc, " slot %d\n", mc->mc_slot); 1474 mly_printf(sc, " status 0x%x\n", mc->mc_status); 1475 mly_printf(sc, " sense len %d\n", mc->mc_sense); 1476 mly_printf(sc, " resid %d\n", mc->mc_resid); 1477 mly_printf(sc, " packet %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys); 1478 if (mc->mc_packet != NULL) 1479 mly_print_packet(mc); 1480 mly_printf(sc, " data %p/%d\n", mc->mc_data, mc->mc_length); 1481 mly_printf(sc, " flags %b\n", mc->mc_flags, "\20\1busy\2complete\3slotted\4mapped\5datain\6dataout\n"); 1482 mly_printf(sc, " complete %p\n", mc->mc_complete); 1483 mly_printf(sc, " private %p\n", mc->mc_private); 1484 } 1485 1486 /******************************************************************************** 1487 * Print a command packet 1488 */ 1489 static void 1490 mly_print_packet(struct mly_command *mc) 1491 { 1492 struct mly_softc *sc = mc->mc_sc; 1493 struct mly_command_generic *ge = (struct mly_command_generic *)mc->mc_packet; 1494 struct mly_command_scsi_small *ss = (struct mly_command_scsi_small *)mc->mc_packet; 1495 struct mly_command_scsi_large *sl = (struct mly_command_scsi_large *)mc->mc_packet; 1496 struct mly_command_ioctl *io = (struct mly_command_ioctl *)mc->mc_packet; 1497 int transfer; 1498 1499 mly_printf(sc, " command_id %d\n", ge->command_id); 1500 mly_printf(sc, " opcode %d\n", ge->opcode); 1501 mly_printf(sc, " command_control fua %d dpo %d est %d dd %s nas %d ddis %d\n", 1502 ge->command_control.force_unit_access, 1503 ge->command_control.disable_page_out, 1504 ge->command_control.extended_sg_table, 1505 (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ", 1506 ge->command_control.no_auto_sense, 1507 ge->command_control.disable_disconnect); 1508 mly_printf(sc, " data_size %d\n", ge->data_size); 1509 mly_printf(sc, " sense_buffer_address 0x%llx\n", ge->sense_buffer_address); 1510 mly_printf(sc, " lun %d\n", ge->addr.phys.lun); 1511 mly_printf(sc, " target %d\n", ge->addr.phys.target); 1512 mly_printf(sc, " channel %d\n", ge->addr.phys.channel); 1513 mly_printf(sc, " logical device %d\n", ge->addr.log.logdev); 1514 mly_printf(sc, " controller %d\n", ge->addr.phys.controller); 1515 mly_printf(sc, " timeout %d %s\n", 1516 ge->timeout.value, 1517 (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" : 1518 ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours")); 1519 mly_printf(sc, " maximum_sense_size %d\n", ge->maximum_sense_size); 1520 switch(ge->opcode) { 1521 case MDACMD_SCSIPT: 1522 case MDACMD_SCSI: 1523 mly_printf(sc, " cdb length %d\n", ss->cdb_length); 1524 mly_printf(sc, " cdb %*D\n", ss->cdb_length, ss->cdb, " "); 1525 transfer = 1; 1526 break; 1527 case MDACMD_SCSILC: 1528 case MDACMD_SCSILCPT: 1529 mly_printf(sc, " cdb length %d\n", sl->cdb_length); 1530 mly_printf(sc, " cdb 0x%llx\n", sl->cdb_physaddr); 1531 transfer = 1; 1532 break; 1533 case MDACMD_IOCTL: 1534 mly_printf(sc, " sub_ioctl 0x%x\n", io->sub_ioctl); 1535 switch(io->sub_ioctl) { 1536 case MDACIOCTL_SETMEMORYMAILBOX: 1537 mly_printf(sc, " health_buffer_size %d\n", 1538 io->param.setmemorymailbox.health_buffer_size); 1539 mly_printf(sc, " health_buffer_phys 0x%llx\n", 1540 io->param.setmemorymailbox.health_buffer_physaddr); 1541 mly_printf(sc, " command_mailbox 0x%llx\n", 1542 io->param.setmemorymailbox.command_mailbox_physaddr); 1543 mly_printf(sc, " status_mailbox 0x%llx\n", 1544 io->param.setmemorymailbox.status_mailbox_physaddr); 1545 transfer = 0; 1546 break; 1547 1548 case MDACIOCTL_SETREALTIMECLOCK: 1549 case MDACIOCTL_GETHEALTHSTATUS: 1550 case MDACIOCTL_GETCONTROLLERINFO: 1551 case MDACIOCTL_GETLOGDEVINFOVALID: 1552 case MDACIOCTL_GETPHYSDEVINFOVALID: 1553 case MDACIOCTL_GETPHYSDEVSTATISTICS: 1554 case MDACIOCTL_GETLOGDEVSTATISTICS: 1555 case MDACIOCTL_GETCONTROLLERSTATISTICS: 1556 case MDACIOCTL_GETBDT_FOR_SYSDRIVE: 1557 case MDACIOCTL_CREATENEWCONF: 1558 case MDACIOCTL_ADDNEWCONF: 1559 case MDACIOCTL_GETDEVCONFINFO: 1560 case MDACIOCTL_GETFREESPACELIST: 1561 case MDACIOCTL_MORE: 1562 case MDACIOCTL_SETPHYSDEVPARAMETER: 1563 case MDACIOCTL_GETPHYSDEVPARAMETER: 1564 case MDACIOCTL_GETLOGDEVPARAMETER: 1565 case MDACIOCTL_SETLOGDEVPARAMETER: 1566 mly_printf(sc, " param %10D\n", io->param.data.param, " "); 1567 transfer = 1; 1568 break; 1569 1570 case MDACIOCTL_GETEVENT: 1571 mly_printf(sc, " event %d\n", 1572 io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16)); 1573 transfer = 1; 1574 break; 1575 1576 case MDACIOCTL_SETRAIDDEVSTATE: 1577 mly_printf(sc, " state %d\n", io->param.setraiddevstate.state); 1578 transfer = 0; 1579 break; 1580 1581 case MDACIOCTL_XLATEPHYSDEVTORAIDDEV: 1582 mly_printf(sc, " raid_device %d\n", io->param.xlatephysdevtoraiddev.raid_device); 1583 mly_printf(sc, " controller %d\n", io->param.xlatephysdevtoraiddev.controller); 1584 mly_printf(sc, " channel %d\n", io->param.xlatephysdevtoraiddev.channel); 1585 mly_printf(sc, " target %d\n", io->param.xlatephysdevtoraiddev.target); 1586 mly_printf(sc, " lun %d\n", io->param.xlatephysdevtoraiddev.lun); 1587 transfer = 0; 1588 break; 1589 1590 case MDACIOCTL_GETGROUPCONFINFO: 1591 mly_printf(sc, " group %d\n", io->param.getgroupconfinfo.group); 1592 transfer = 1; 1593 break; 1594 1595 case MDACIOCTL_GET_SUBSYSTEM_DATA: 1596 case MDACIOCTL_SET_SUBSYSTEM_DATA: 1597 case MDACIOCTL_STARTDISOCVERY: 1598 case MDACIOCTL_INITPHYSDEVSTART: 1599 case MDACIOCTL_INITPHYSDEVSTOP: 1600 case MDACIOCTL_INITRAIDDEVSTART: 1601 case MDACIOCTL_INITRAIDDEVSTOP: 1602 case MDACIOCTL_REBUILDRAIDDEVSTART: 1603 case MDACIOCTL_REBUILDRAIDDEVSTOP: 1604 case MDACIOCTL_MAKECONSISTENTDATASTART: 1605 case MDACIOCTL_MAKECONSISTENTDATASTOP: 1606 case MDACIOCTL_CONSISTENCYCHECKSTART: 1607 case MDACIOCTL_CONSISTENCYCHECKSTOP: 1608 case MDACIOCTL_RESETDEVICE: 1609 case MDACIOCTL_FLUSHDEVICEDATA: 1610 case MDACIOCTL_PAUSEDEVICE: 1611 case MDACIOCTL_UNPAUSEDEVICE: 1612 case MDACIOCTL_LOCATEDEVICE: 1613 case MDACIOCTL_SETMASTERSLAVEMODE: 1614 case MDACIOCTL_DELETERAIDDEV: 1615 case MDACIOCTL_REPLACEINTERNALDEV: 1616 case MDACIOCTL_CLEARCONF: 1617 case MDACIOCTL_GETCONTROLLERPARAMETER: 1618 case MDACIOCTL_SETCONTRLLERPARAMETER: 1619 case MDACIOCTL_CLEARCONFSUSPMODE: 1620 case MDACIOCTL_STOREIMAGE: 1621 case MDACIOCTL_READIMAGE: 1622 case MDACIOCTL_FLASHIMAGES: 1623 case MDACIOCTL_RENAMERAIDDEV: 1624 default: /* no idea what to print */ 1625 transfer = 0; 1626 break; 1627 } 1628 break; 1629 1630 case MDACMD_IOCTLCHECK: 1631 case MDACMD_MEMCOPY: 1632 default: 1633 transfer = 0; 1634 break; /* print nothing */ 1635 } 1636 if (transfer) { 1637 if (ge->command_control.extended_sg_table) { 1638 mly_printf(sc, " sg table 0x%llx/%d\n", 1639 ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]); 1640 } else { 1641 mly_printf(sc, " 0000 0x%llx/%lld\n", 1642 ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length); 1643 mly_printf(sc, " 0001 0x%llx/%lld\n", 1644 ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length); 1645 } 1646 } 1647 } 1648 1649 /******************************************************************************** 1650 * Panic in a slightly informative fashion 1651 */ 1652 static void 1653 mly_panic(struct mly_softc *sc, char *reason) 1654 { 1655 mly_printstate(sc); 1656 panic(reason); 1657 } 1658 #endif 1659 1660 /******************************************************************************** 1661 * Print queue statistics, callable from DDB. 1662 */ 1663 void 1664 mly_print_controller(int controller) 1665 { 1666 struct mly_softc *sc; 1667 1668 if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) { 1669 kprintf("mly: controller %d invalid\n", controller); 1670 } else { 1671 device_printf(sc->mly_dev, "queue curr max\n"); 1672 device_printf(sc->mly_dev, "free %04d/%04d\n", 1673 sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max); 1674 device_printf(sc->mly_dev, "ready %04d/%04d\n", 1675 sc->mly_qstat[MLYQ_READY].q_length, sc->mly_qstat[MLYQ_READY].q_max); 1676 device_printf(sc->mly_dev, "busy %04d/%04d\n", 1677 sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max); 1678 device_printf(sc->mly_dev, "complete %04d/%04d\n", 1679 sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max); 1680 } 1681 } 1682 1683 1684 /******************************************************************************** 1685 ******************************************************************************** 1686 Control device interface 1687 ******************************************************************************** 1688 ********************************************************************************/ 1689 1690 /******************************************************************************** 1691 * Accept an open operation on the control device. 1692 */ 1693 static int 1694 mly_user_open(struct dev_open_args *ap) 1695 { 1696 cdev_t dev = ap->a_head.a_dev; 1697 int unit = minor(dev); 1698 struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit); 1699 1700 sc->mly_state |= MLY_STATE_OPEN; 1701 return(0); 1702 } 1703 1704 /******************************************************************************** 1705 * Accept the last close on the control device. 1706 */ 1707 static int 1708 mly_user_close(struct dev_close_args *ap) 1709 { 1710 cdev_t dev = ap->a_head.a_dev; 1711 int unit = minor(dev); 1712 struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit); 1713 1714 sc->mly_state &= ~MLY_STATE_OPEN; 1715 return (0); 1716 } 1717 1718 /******************************************************************************** 1719 * Handle controller-specific control operations. 1720 */ 1721 static int 1722 mly_user_ioctl(struct dev_ioctl_args *ap) 1723 { 1724 cdev_t dev = ap->a_head.a_dev; 1725 struct mly_softc *sc = (struct mly_softc *)dev->si_drv1; 1726 struct mly_user_command *uc = (struct mly_user_command *)ap->a_data; 1727 struct mly_user_health *uh = (struct mly_user_health *)ap->a_data; 1728 1729 switch(ap->a_cmd) { 1730 case MLYIO_COMMAND: 1731 return(mly_user_command(sc, uc)); 1732 case MLYIO_HEALTH: 1733 return(mly_user_health(sc, uh)); 1734 default: 1735 return(ENOIOCTL); 1736 } 1737 } 1738 1739 /******************************************************************************** 1740 * Execute a command passed in from userspace. 1741 * 1742 * The control structure contains the actual command for the controller, as well 1743 * as the user-space data pointer and data size, and an optional sense buffer 1744 * size/pointer. On completion, the data size is adjusted to the command 1745 * residual, and the sense buffer size to the size of the returned sense data. 1746 * 1747 */ 1748 static int 1749 mly_user_command(struct mly_softc *sc, struct mly_user_command *uc) 1750 { 1751 struct mly_command *mc; 1752 int error; 1753 1754 /* allocate a command */ 1755 if (mly_alloc_command(sc, &mc)) { 1756 error = ENOMEM; 1757 goto out; /* XXX Linux version will wait for a command */ 1758 } 1759 1760 /* handle data size/direction */ 1761 mc->mc_length = (uc->DataTransferLength >= 0) ? uc->DataTransferLength : -uc->DataTransferLength; 1762 if (mc->mc_length > 0) 1763 mc->mc_data = kmalloc(mc->mc_length, M_DEVBUF, M_INTWAIT); 1764 if (uc->DataTransferLength > 0) { 1765 mc->mc_flags |= MLY_CMD_DATAIN; 1766 bzero(mc->mc_data, mc->mc_length); 1767 } 1768 if (uc->DataTransferLength < 0) { 1769 mc->mc_flags |= MLY_CMD_DATAOUT; 1770 if ((error = copyin(uc->DataTransferBuffer, mc->mc_data, mc->mc_length)) != 0) 1771 goto out; 1772 } 1773 1774 /* copy the controller command */ 1775 bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox)); 1776 1777 /* clear command completion handler so that we get woken up */ 1778 mc->mc_complete = NULL; 1779 1780 /* execute the command */ 1781 crit_enter(); 1782 mly_requeue_ready(mc); 1783 mly_startio(sc); 1784 while (!(mc->mc_flags & MLY_CMD_COMPLETE)) 1785 tsleep(mc, 0, "mlyioctl", 0); 1786 crit_exit(); 1787 1788 /* return the data to userspace */ 1789 if (uc->DataTransferLength > 0) 1790 if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0) 1791 goto out; 1792 1793 /* return the sense buffer to userspace */ 1794 if ((uc->RequestSenseLength > 0) && (mc->mc_sense > 0)) { 1795 if ((error = copyout(mc->mc_packet, uc->RequestSenseBuffer, 1796 min(uc->RequestSenseLength, mc->mc_sense))) != 0) 1797 goto out; 1798 } 1799 1800 /* return command results to userspace (caller will copy out) */ 1801 uc->DataTransferLength = mc->mc_resid; 1802 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense); 1803 uc->CommandStatus = mc->mc_status; 1804 error = 0; 1805 1806 out: 1807 if (mc->mc_data != NULL) 1808 kfree(mc->mc_data, M_DEVBUF); 1809 if (mc != NULL) 1810 mly_release_command(mc); 1811 return(error); 1812 } 1813 1814 /******************************************************************************** 1815 * Return health status to userspace. If the health change index in the user 1816 * structure does not match that currently exported by the controller, we 1817 * return the current status immediately. Otherwise, we block until either 1818 * interrupted or new status is delivered. 1819 */ 1820 static int 1821 mly_user_health(struct mly_softc *sc, struct mly_user_health *uh) 1822 { 1823 struct mly_health_status mh; 1824 int error; 1825 1826 /* fetch the current health status from userspace */ 1827 if ((error = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh))) != 0) 1828 return(error); 1829 1830 /* spin waiting for a status update */ 1831 crit_enter(); 1832 error = EWOULDBLOCK; 1833 while ((error != 0) && (sc->mly_event_change == mh.change_counter)) 1834 error = tsleep(&sc->mly_event_change, PCATCH, "mlyhealth", 0); 1835 crit_exit(); 1836 1837 /* copy the controller's health status buffer out (there is a race here if it changes again) */ 1838 error = copyout(&sc->mly_mmbox->mmm_health.status, uh->HealthStatusBuffer, 1839 sizeof(uh->HealthStatusBuffer)); 1840 return(error); 1841 } 1842