1 /*- 2 * Copyright (c) 2008 Yahoo!, Inc. 3 * All rights reserved. 4 * Written by: John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of the author nor the names of any co-contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * LSI MPT-Fusion Host Adapter FreeBSD userland interface 31 */ 32 /*- 33 * Copyright (c) 2011 LSI Corp. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * LSI MPT-Fusion Host Adapter FreeBSD 58 * 59 * $FreeBSD: src/sys/dev/mps/mps_user.c,v 1.10 2012/01/26 18:17:21 ken Exp $ 60 */ 61 62 /* TODO Move headers to mpsvar */ 63 #include <sys/types.h> 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/kernel.h> 67 #include <sys/module.h> 68 #include <sys/bus.h> 69 #include <sys/conf.h> 70 #include <sys/eventhandler.h> 71 #include <sys/bio.h> 72 #include <sys/malloc.h> 73 #include <sys/uio.h> 74 #include <sys/sysctl.h> 75 #include <sys/ioccom.h> 76 #include <sys/endian.h> 77 #include <sys/queue.h> 78 #include <sys/kthread.h> 79 #include <sys/taskqueue.h> 80 #include <sys/proc.h> 81 #include <sys/sysent.h> 82 83 #include <sys/rman.h> 84 #include <sys/device.h> 85 86 #include <bus/cam/cam.h> 87 #include <bus/cam/scsi/scsi_all.h> 88 89 #include <dev/raid/mps/mpi/mpi2_type.h> 90 #include <dev/raid/mps/mpi/mpi2.h> 91 #include <dev/raid/mps/mpi/mpi2_ioc.h> 92 #include <dev/raid/mps/mpi/mpi2_cnfg.h> 93 #include <dev/raid/mps/mpi/mpi2_init.h> 94 #include <dev/raid/mps/mpi/mpi2_tool.h> 95 #include <dev/raid/mps/mps_ioctl.h> 96 #include <dev/raid/mps/mpsvar.h> 97 #include <dev/raid/mps/mps_table.h> 98 #include <dev/raid/mps/mps_sas.h> 99 #include <bus/pci/pcivar.h> 100 #include <bus/pci/pcireg.h> 101 102 static d_open_t mps_open; 103 static d_close_t mps_close; 104 static d_ioctl_t mps_ioctl_devsw; 105 106 static struct dev_ops mps_ops = { 107 { "mps", 0, D_MPSAFE }, 108 .d_open = mps_open, 109 .d_close = mps_close, 110 .d_ioctl = mps_ioctl_devsw, 111 }; 112 113 typedef int (mps_user_f)(struct mps_command *, struct mps_usr_command *); 114 static mps_user_f mpi_pre_ioc_facts; 115 static mps_user_f mpi_pre_port_facts; 116 static mps_user_f mpi_pre_fw_download; 117 static mps_user_f mpi_pre_fw_upload; 118 static mps_user_f mpi_pre_sata_passthrough; 119 static mps_user_f mpi_pre_smp_passthrough; 120 static mps_user_f mpi_pre_config; 121 static mps_user_f mpi_pre_sas_io_unit_control; 122 123 static int mps_user_read_cfg_header(struct mps_softc *, 124 struct mps_cfg_page_req *); 125 static int mps_user_read_cfg_page(struct mps_softc *, 126 struct mps_cfg_page_req *, void *); 127 static int mps_user_read_extcfg_header(struct mps_softc *, 128 struct mps_ext_cfg_page_req *); 129 static int mps_user_read_extcfg_page(struct mps_softc *, 130 struct mps_ext_cfg_page_req *, void *); 131 static int mps_user_write_cfg_page(struct mps_softc *, 132 struct mps_cfg_page_req *, void *); 133 static int mps_user_setup_request(struct mps_command *, 134 struct mps_usr_command *); 135 static int mps_user_command(struct mps_softc *, struct mps_usr_command *); 136 137 static int mps_user_pass_thru(struct mps_softc *sc, mps_pass_thru_t *data); 138 static void mps_user_get_adapter_data(struct mps_softc *sc, 139 mps_adapter_data_t *data); 140 static void mps_user_read_pci_info(struct mps_softc *sc, 141 mps_pci_info_t *data); 142 static uint8_t mps_get_fw_diag_buffer_number(struct mps_softc *sc, 143 uint32_t unique_id); 144 static int mps_post_fw_diag_buffer(struct mps_softc *sc, 145 mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code); 146 static int mps_release_fw_diag_buffer(struct mps_softc *sc, 147 mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code, 148 uint32_t diag_type); 149 static int mps_diag_register(struct mps_softc *sc, 150 mps_fw_diag_register_t *diag_register, uint32_t *return_code); 151 static int mps_diag_unregister(struct mps_softc *sc, 152 mps_fw_diag_unregister_t *diag_unregister, uint32_t *return_code); 153 static int mps_diag_query(struct mps_softc *sc, mps_fw_diag_query_t *diag_query, 154 uint32_t *return_code); 155 static int mps_diag_read_buffer(struct mps_softc *sc, 156 mps_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf, 157 uint32_t *return_code); 158 static int mps_diag_release(struct mps_softc *sc, 159 mps_fw_diag_release_t *diag_release, uint32_t *return_code); 160 static int mps_do_diag_action(struct mps_softc *sc, uint32_t action, 161 uint8_t *diag_action, uint32_t length, uint32_t *return_code); 162 static int mps_user_diag_action(struct mps_softc *sc, mps_diag_action_t *data); 163 static void mps_user_event_query(struct mps_softc *sc, mps_event_query_t *data); 164 static void mps_user_event_enable(struct mps_softc *sc, 165 mps_event_enable_t *data); 166 static int mps_user_event_report(struct mps_softc *sc, 167 mps_event_report_t *data); 168 static int mps_user_reg_access(struct mps_softc *sc, mps_reg_access_t *data); 169 static int mps_user_btdh(struct mps_softc *sc, mps_btdh_mapping_t *data); 170 171 static MALLOC_DEFINE(M_MPSUSER, "mps_user", "Buffers for mps(4) ioctls"); 172 173 /* Macros from compat/freebsd32/freebsd32.h */ 174 #define PTRIN(v) (void *)(uintptr_t)(v) 175 #define PTROUT(v) (uint32_t)(uintptr_t)(v) 176 177 #define CP(src,dst,fld) do { (dst).fld = (src).fld; } while (0) 178 #define PTRIN_CP(src,dst,fld) \ 179 do { (dst).fld = PTRIN((src).fld); } while (0) 180 #define PTROUT_CP(src,dst,fld) \ 181 do { (dst).fld = PTROUT((src).fld); } while (0) 182 183 int 184 mps_attach_user(struct mps_softc *sc) 185 { 186 int unit; 187 188 unit = device_get_unit(sc->mps_dev); 189 sc->mps_cdev = make_dev(&mps_ops, unit, UID_ROOT, GID_OPERATOR, 0640, 190 "mps%d", unit); 191 if (sc->mps_cdev == NULL) { 192 return (ENOMEM); 193 } 194 sc->mps_cdev->si_drv1 = sc; 195 return (0); 196 } 197 198 void 199 mps_detach_user(struct mps_softc *sc) 200 { 201 202 /* XXX: do a purge of pending requests? */ 203 if (sc->mps_cdev != NULL) 204 destroy_dev(sc->mps_cdev); 205 } 206 207 static int 208 mps_open(struct dev_open_args *ap) 209 { 210 211 return (0); 212 } 213 214 static int 215 mps_close(struct dev_close_args *ap) 216 { 217 218 return (0); 219 } 220 221 static int 222 mps_user_read_cfg_header(struct mps_softc *sc, 223 struct mps_cfg_page_req *page_req) 224 { 225 MPI2_CONFIG_PAGE_HEADER *hdr; 226 struct mps_config_params params; 227 int error; 228 229 hdr = ¶ms.hdr.Struct; 230 params.action = MPI2_CONFIG_ACTION_PAGE_HEADER; 231 params.page_address = le32toh(page_req->page_address); 232 hdr->PageVersion = 0; 233 hdr->PageLength = 0; 234 hdr->PageNumber = page_req->header.PageNumber; 235 hdr->PageType = page_req->header.PageType; 236 params.buffer = NULL; 237 params.length = 0; 238 params.callback = NULL; 239 240 if ((error = mps_read_config_page(sc, ¶ms)) != 0) { 241 /* 242 * Leave the request. Without resetting the chip, it's 243 * still owned by it and we'll just get into trouble 244 * freeing it now. Mark it as abandoned so that if it 245 * shows up later it can be freed. 246 */ 247 mps_printf(sc, "read_cfg_header timed out\n"); 248 return (ETIMEDOUT); 249 } 250 251 page_req->ioc_status = htole16(params.status); 252 if ((page_req->ioc_status & MPI2_IOCSTATUS_MASK) == 253 MPI2_IOCSTATUS_SUCCESS) { 254 bcopy(hdr, &page_req->header, sizeof(page_req->header)); 255 } 256 257 return (0); 258 } 259 260 static int 261 mps_user_read_cfg_page(struct mps_softc *sc, struct mps_cfg_page_req *page_req, 262 void *buf) 263 { 264 MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr; 265 struct mps_config_params params; 266 int error; 267 268 reqhdr = buf; 269 hdr = ¶ms.hdr.Struct; 270 hdr->PageVersion = reqhdr->PageVersion; 271 hdr->PageLength = reqhdr->PageLength; 272 hdr->PageNumber = reqhdr->PageNumber; 273 hdr->PageType = reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK; 274 params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 275 params.page_address = le32toh(page_req->page_address); 276 params.buffer = buf; 277 params.length = le32toh(page_req->len); 278 params.callback = NULL; 279 280 if ((error = mps_read_config_page(sc, ¶ms)) != 0) { 281 mps_printf(sc, "mps_user_read_cfg_page timed out\n"); 282 return (ETIMEDOUT); 283 } 284 285 page_req->ioc_status = htole16(params.status); 286 return (0); 287 } 288 289 static int 290 mps_user_read_extcfg_header(struct mps_softc *sc, 291 struct mps_ext_cfg_page_req *ext_page_req) 292 { 293 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 294 struct mps_config_params params; 295 int error; 296 297 hdr = ¶ms.hdr.Ext; 298 params.action = MPI2_CONFIG_ACTION_PAGE_HEADER; 299 hdr->PageVersion = ext_page_req->header.PageVersion; 300 hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 301 hdr->ExtPageLength = 0; 302 hdr->PageNumber = ext_page_req->header.PageNumber; 303 hdr->ExtPageType = ext_page_req->header.ExtPageType; 304 params.page_address = le32toh(ext_page_req->page_address); 305 if ((error = mps_read_config_page(sc, ¶ms)) != 0) { 306 /* 307 * Leave the request. Without resetting the chip, it's 308 * still owned by it and we'll just get into trouble 309 * freeing it now. Mark it as abandoned so that if it 310 * shows up later it can be freed. 311 */ 312 mps_printf(sc, "mps_user_read_extcfg_header timed out\n"); 313 return (ETIMEDOUT); 314 } 315 316 ext_page_req->ioc_status = htole16(params.status); 317 if ((ext_page_req->ioc_status & MPI2_IOCSTATUS_MASK) == 318 MPI2_IOCSTATUS_SUCCESS) { 319 ext_page_req->header.PageVersion = hdr->PageVersion; 320 ext_page_req->header.PageNumber = hdr->PageNumber; 321 ext_page_req->header.PageType = hdr->PageType; 322 ext_page_req->header.ExtPageLength = hdr->ExtPageLength; 323 ext_page_req->header.ExtPageType = hdr->ExtPageType; 324 } 325 326 return (0); 327 } 328 329 static int 330 mps_user_read_extcfg_page(struct mps_softc *sc, 331 struct mps_ext_cfg_page_req *ext_page_req, void *buf) 332 { 333 MPI2_CONFIG_EXTENDED_PAGE_HEADER *reqhdr, *hdr; 334 struct mps_config_params params; 335 int error; 336 337 reqhdr = buf; 338 hdr = ¶ms.hdr.Ext; 339 params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 340 params.page_address = le32toh(ext_page_req->page_address); 341 hdr->PageVersion = reqhdr->PageVersion; 342 hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 343 hdr->PageNumber = reqhdr->PageNumber; 344 hdr->ExtPageType = reqhdr->ExtPageType; 345 hdr->ExtPageLength = reqhdr->ExtPageLength; 346 params.buffer = buf; 347 params.length = le32toh(ext_page_req->len); 348 params.callback = NULL; 349 350 if ((error = mps_read_config_page(sc, ¶ms)) != 0) { 351 mps_printf(sc, "mps_user_read_extcfg_page timed out\n"); 352 return (ETIMEDOUT); 353 } 354 355 ext_page_req->ioc_status = htole16(params.status); 356 return (0); 357 } 358 359 static int 360 mps_user_write_cfg_page(struct mps_softc *sc, 361 struct mps_cfg_page_req *page_req, void *buf) 362 { 363 MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr; 364 struct mps_config_params params; 365 u_int hdr_attr; 366 int error; 367 368 reqhdr = buf; 369 hdr = ¶ms.hdr.Struct; 370 hdr_attr = reqhdr->PageType & MPI2_CONFIG_PAGEATTR_MASK; 371 if (hdr_attr != MPI2_CONFIG_PAGEATTR_CHANGEABLE && 372 hdr_attr != MPI2_CONFIG_PAGEATTR_PERSISTENT) { 373 mps_printf(sc, "page type 0x%x not changeable\n", 374 reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK); 375 return (EINVAL); 376 } 377 378 /* 379 * There isn't any point in restoring stripped out attributes 380 * if you then mask them going down to issue the request. 381 */ 382 383 hdr->PageVersion = reqhdr->PageVersion; 384 hdr->PageLength = reqhdr->PageLength; 385 hdr->PageNumber = reqhdr->PageNumber; 386 hdr->PageType = reqhdr->PageType; 387 params.action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; 388 params.page_address = le32toh(page_req->page_address); 389 params.buffer = buf; 390 params.length = le32toh(page_req->len); 391 params.callback = NULL; 392 393 if ((error = mps_write_config_page(sc, ¶ms)) != 0) { 394 mps_printf(sc, "mps_write_cfg_page timed out\n"); 395 return (ETIMEDOUT); 396 } 397 398 page_req->ioc_status = htole16(params.status); 399 return (0); 400 } 401 402 void 403 mpi_init_sge(struct mps_command *cm, void *req, void *sge) 404 { 405 int off, space; 406 407 space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4; 408 off = (uintptr_t)sge - (uintptr_t)req; 409 410 KASSERT(off < space, ("bad pointers %p %p, off %d, space %d", 411 req, sge, off, space)); 412 413 cm->cm_sge = sge; 414 cm->cm_sglsize = space - off; 415 } 416 417 /* 418 * Prepare the mps_command for an IOC_FACTS request. 419 */ 420 static int 421 mpi_pre_ioc_facts(struct mps_command *cm, struct mps_usr_command *cmd) 422 { 423 MPI2_IOC_FACTS_REQUEST *req = (void *)cm->cm_req; 424 MPI2_IOC_FACTS_REPLY *rpl; 425 426 if (cmd->req_len != sizeof *req) 427 return (EINVAL); 428 if (cmd->rpl_len != sizeof *rpl) 429 return (EINVAL); 430 431 cm->cm_sge = NULL; 432 cm->cm_sglsize = 0; 433 return (0); 434 } 435 436 /* 437 * Prepare the mps_command for a PORT_FACTS request. 438 */ 439 static int 440 mpi_pre_port_facts(struct mps_command *cm, struct mps_usr_command *cmd) 441 { 442 MPI2_PORT_FACTS_REQUEST *req = (void *)cm->cm_req; 443 MPI2_PORT_FACTS_REPLY *rpl; 444 445 if (cmd->req_len != sizeof *req) 446 return (EINVAL); 447 if (cmd->rpl_len != sizeof *rpl) 448 return (EINVAL); 449 450 cm->cm_sge = NULL; 451 cm->cm_sglsize = 0; 452 return (0); 453 } 454 455 /* 456 * Prepare the mps_command for a FW_DOWNLOAD request. 457 */ 458 static int 459 mpi_pre_fw_download(struct mps_command *cm, struct mps_usr_command *cmd) 460 { 461 MPI2_FW_DOWNLOAD_REQUEST *req = (void *)cm->cm_req; 462 MPI2_FW_DOWNLOAD_REPLY *rpl; 463 MPI2_FW_DOWNLOAD_TCSGE tc; 464 int error; 465 466 /* 467 * This code assumes there is room in the request's SGL for 468 * the TransactionContext plus at least a SGL chain element. 469 */ 470 CTASSERT(sizeof req->SGL >= sizeof tc + MPS_SGC_SIZE); 471 472 if (cmd->req_len != sizeof *req) 473 return (EINVAL); 474 if (cmd->rpl_len != sizeof *rpl) 475 return (EINVAL); 476 477 if (cmd->len == 0) 478 return (EINVAL); 479 480 error = copyin(cmd->buf, cm->cm_data, cmd->len); 481 if (error != 0) 482 return (error); 483 484 mpi_init_sge(cm, req, &req->SGL); 485 bzero(&tc, sizeof tc); 486 487 /* 488 * For now, the F/W image must be provided in a single request. 489 */ 490 if ((req->MsgFlags & MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT) == 0) 491 return (EINVAL); 492 if (req->TotalImageSize != cmd->len) 493 return (EINVAL); 494 495 /* 496 * The value of the first two elements is specified in the 497 * Fusion-MPT Message Passing Interface document. 498 */ 499 tc.ContextSize = 0; 500 tc.DetailsLength = 12; 501 tc.ImageOffset = 0; 502 tc.ImageSize = cmd->len; 503 504 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT; 505 506 return (mps_push_sge(cm, &tc, sizeof tc, 0)); 507 } 508 509 /* 510 * Prepare the mps_command for a FW_UPLOAD request. 511 */ 512 static int 513 mpi_pre_fw_upload(struct mps_command *cm, struct mps_usr_command *cmd) 514 { 515 MPI2_FW_UPLOAD_REQUEST *req = (void *)cm->cm_req; 516 MPI2_FW_UPLOAD_REPLY *rpl; 517 MPI2_FW_UPLOAD_TCSGE tc; 518 519 /* 520 * This code assumes there is room in the request's SGL for 521 * the TransactionContext plus at least a SGL chain element. 522 */ 523 CTASSERT(sizeof req->SGL >= sizeof tc + MPS_SGC_SIZE); 524 525 if (cmd->req_len != sizeof *req) 526 return (EINVAL); 527 if (cmd->rpl_len != sizeof *rpl) 528 return (EINVAL); 529 530 mpi_init_sge(cm, req, &req->SGL); 531 bzero(&tc, sizeof tc); 532 533 /* 534 * The value of the first two elements is specified in the 535 * Fusion-MPT Message Passing Interface document. 536 */ 537 tc.ContextSize = 0; 538 tc.DetailsLength = 12; 539 /* 540 * XXX Is there any reason to fetch a partial image? I.e. to 541 * set ImageOffset to something other than 0? 542 */ 543 tc.ImageOffset = 0; 544 tc.ImageSize = cmd->len; 545 546 cm->cm_flags |= MPS_CM_FLAGS_DATAIN; 547 548 return (mps_push_sge(cm, &tc, sizeof tc, 0)); 549 } 550 551 /* 552 * Prepare the mps_command for a SATA_PASSTHROUGH request. 553 */ 554 static int 555 mpi_pre_sata_passthrough(struct mps_command *cm, struct mps_usr_command *cmd) 556 { 557 MPI2_SATA_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req; 558 MPI2_SATA_PASSTHROUGH_REPLY *rpl; 559 560 if (cmd->req_len != sizeof *req) 561 return (EINVAL); 562 if (cmd->rpl_len != sizeof *rpl) 563 return (EINVAL); 564 565 mpi_init_sge(cm, req, &req->SGL); 566 return (0); 567 } 568 569 /* 570 * Prepare the mps_command for a SMP_PASSTHROUGH request. 571 */ 572 static int 573 mpi_pre_smp_passthrough(struct mps_command *cm, struct mps_usr_command *cmd) 574 { 575 MPI2_SMP_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req; 576 MPI2_SMP_PASSTHROUGH_REPLY *rpl; 577 578 if (cmd->req_len != sizeof *req) 579 return (EINVAL); 580 if (cmd->rpl_len != sizeof *rpl) 581 return (EINVAL); 582 583 mpi_init_sge(cm, req, &req->SGL); 584 return (0); 585 } 586 587 /* 588 * Prepare the mps_command for a CONFIG request. 589 */ 590 static int 591 mpi_pre_config(struct mps_command *cm, struct mps_usr_command *cmd) 592 { 593 MPI2_CONFIG_REQUEST *req = (void *)cm->cm_req; 594 MPI2_CONFIG_REPLY *rpl; 595 596 if (cmd->req_len != sizeof *req) 597 return (EINVAL); 598 if (cmd->rpl_len != sizeof *rpl) 599 return (EINVAL); 600 601 mpi_init_sge(cm, req, &req->PageBufferSGE); 602 return (0); 603 } 604 605 /* 606 * Prepare the mps_command for a SAS_IO_UNIT_CONTROL request. 607 */ 608 static int 609 mpi_pre_sas_io_unit_control(struct mps_command *cm, 610 struct mps_usr_command *cmd) 611 { 612 613 cm->cm_sge = NULL; 614 cm->cm_sglsize = 0; 615 return (0); 616 } 617 618 /* 619 * A set of functions to prepare an mps_command for the various 620 * supported requests. 621 */ 622 struct mps_user_func { 623 U8 Function; 624 mps_user_f *f_pre; 625 } mps_user_func_list[] = { 626 { MPI2_FUNCTION_IOC_FACTS, mpi_pre_ioc_facts }, 627 { MPI2_FUNCTION_PORT_FACTS, mpi_pre_port_facts }, 628 { MPI2_FUNCTION_FW_DOWNLOAD, mpi_pre_fw_download }, 629 { MPI2_FUNCTION_FW_UPLOAD, mpi_pre_fw_upload }, 630 { MPI2_FUNCTION_SATA_PASSTHROUGH, mpi_pre_sata_passthrough }, 631 { MPI2_FUNCTION_SMP_PASSTHROUGH, mpi_pre_smp_passthrough}, 632 { MPI2_FUNCTION_CONFIG, mpi_pre_config}, 633 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, mpi_pre_sas_io_unit_control }, 634 { 0xFF, NULL } /* list end */ 635 }; 636 637 static int 638 mps_user_setup_request(struct mps_command *cm, struct mps_usr_command *cmd) 639 { 640 MPI2_REQUEST_HEADER *hdr = (MPI2_REQUEST_HEADER *)cm->cm_req; 641 struct mps_user_func *f; 642 643 for (f = mps_user_func_list; f->f_pre != NULL; f++) { 644 if (hdr->Function == f->Function) 645 return (f->f_pre(cm, cmd)); 646 } 647 return (EINVAL); 648 } 649 650 static int 651 mps_user_command(struct mps_softc *sc, struct mps_usr_command *cmd) 652 { 653 MPI2_REQUEST_HEADER *hdr; 654 MPI2_DEFAULT_REPLY *rpl; 655 void *buf = NULL; 656 struct mps_command *cm = NULL; 657 int err = 0; 658 int sz; 659 660 mps_lock(sc); 661 cm = mps_alloc_command(sc); 662 663 if (cm == NULL) { 664 mps_printf(sc, "mps_user_command: no mps requests\n"); 665 err = ENOMEM; 666 goto Ret; 667 } 668 mps_unlock(sc); 669 670 hdr = (MPI2_REQUEST_HEADER *)cm->cm_req; 671 672 mps_dprint(sc, MPS_INFO, "mps_user_command: req %p %d rpl %p %d\n", 673 cmd->req, cmd->req_len, cmd->rpl, cmd->rpl_len ); 674 675 if (cmd->req_len > (int)sc->facts->IOCRequestFrameSize * 4) { 676 err = EINVAL; 677 goto RetFreeUnlocked; 678 } 679 err = copyin(cmd->req, hdr, cmd->req_len); 680 if (err != 0) 681 goto RetFreeUnlocked; 682 683 mps_dprint(sc, MPS_INFO, "mps_user_command: Function %02X " 684 "MsgFlags %02X\n", hdr->Function, hdr->MsgFlags ); 685 686 if (cmd->len > 0) { 687 buf = kmalloc(cmd->len, M_MPSUSER, M_WAITOK|M_ZERO); 688 cm->cm_data = buf; 689 cm->cm_length = cmd->len; 690 } else { 691 cm->cm_data = NULL; 692 cm->cm_length = 0; 693 } 694 695 cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE; 696 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 697 698 err = mps_user_setup_request(cm, cmd); 699 if (err != 0) { 700 mps_printf(sc, "mps_user_command: unsupported function 0x%X\n", 701 hdr->Function ); 702 goto RetFreeUnlocked; 703 } 704 705 mps_lock(sc); 706 err = mps_wait_command(sc, cm, 60); 707 708 if (err) { 709 mps_printf(sc, "%s: invalid request: error %d\n", 710 __func__, err); 711 goto Ret; 712 } 713 714 rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply; 715 if (rpl != NULL) 716 sz = rpl->MsgLength * 4; 717 else 718 sz = 0; 719 720 if (sz > cmd->rpl_len) { 721 mps_printf(sc, 722 "mps_user_command: reply buffer too small %d required %d\n", 723 cmd->rpl_len, sz ); 724 err = EINVAL; 725 sz = cmd->rpl_len; 726 } 727 728 mps_unlock(sc); 729 copyout(rpl, cmd->rpl, sz); 730 if (buf != NULL) 731 copyout(buf, cmd->buf, cmd->len); 732 mps_dprint(sc, MPS_INFO, "mps_user_command: reply size %d\n", sz ); 733 734 RetFreeUnlocked: 735 mps_lock(sc); 736 if (cm != NULL) 737 mps_free_command(sc, cm); 738 Ret: 739 mps_unlock(sc); 740 if (buf != NULL) 741 kfree(buf, M_MPSUSER); 742 return (err); 743 } 744 745 static int 746 mps_user_pass_thru(struct mps_softc *sc, mps_pass_thru_t *data) 747 { 748 MPI2_REQUEST_HEADER *hdr, tmphdr; 749 MPI2_DEFAULT_REPLY *rpl; 750 struct mps_command *cm = NULL; 751 int err = 0, dir = 0, sz; 752 uint8_t function = 0; 753 u_int sense_len; 754 755 /* 756 * Only allow one passthru command at a time. Use the MPS_FLAGS_BUSY 757 * bit to denote that a passthru is being processed. 758 */ 759 mps_lock(sc); 760 if (sc->mps_flags & MPS_FLAGS_BUSY) { 761 mps_dprint(sc, MPS_INFO, "%s: Only one passthru command " 762 "allowed at a single time.", __func__); 763 mps_unlock(sc); 764 return (EBUSY); 765 } 766 sc->mps_flags |= MPS_FLAGS_BUSY; 767 mps_unlock(sc); 768 769 /* 770 * Do some validation on data direction. Valid cases are: 771 * 1) DataSize is 0 and direction is NONE 772 * 2) DataSize is non-zero and one of: 773 * a) direction is READ or 774 * b) direction is WRITE or 775 * c) direction is BOTH and DataOutSize is non-zero 776 * If valid and the direction is BOTH, change the direction to READ. 777 * if valid and the direction is not BOTH, make sure DataOutSize is 0. 778 */ 779 if (((data->DataSize == 0) && 780 (data->DataDirection == MPS_PASS_THRU_DIRECTION_NONE)) || 781 ((data->DataSize != 0) && 782 ((data->DataDirection == MPS_PASS_THRU_DIRECTION_READ) || 783 (data->DataDirection == MPS_PASS_THRU_DIRECTION_WRITE) || 784 ((data->DataDirection == MPS_PASS_THRU_DIRECTION_BOTH) && 785 (data->DataOutSize != 0))))) { 786 if (data->DataDirection == MPS_PASS_THRU_DIRECTION_BOTH) 787 data->DataDirection = MPS_PASS_THRU_DIRECTION_READ; 788 else 789 data->DataOutSize = 0; 790 } else 791 return (EINVAL); 792 793 mps_dprint(sc, MPS_INFO, "%s: req 0x%jx %d rpl 0x%jx %d " 794 "data in 0x%jx %d data out 0x%jx %d data dir %d\n", __func__, 795 data->PtrRequest, data->RequestSize, data->PtrReply, 796 data->ReplySize, data->PtrData, data->DataSize, 797 data->PtrDataOut, data->DataOutSize, data->DataDirection); 798 799 /* 800 * copy in the header so we know what we're dealing with before we 801 * commit to allocating a command for it. 802 */ 803 err = copyin(PTRIN(data->PtrRequest), &tmphdr, data->RequestSize); 804 if (err != 0) 805 goto RetFreeUnlocked; 806 807 if (data->RequestSize > (int)sc->facts->IOCRequestFrameSize * 4) { 808 err = EINVAL; 809 goto RetFreeUnlocked; 810 } 811 812 function = tmphdr.Function; 813 mps_dprint(sc, MPS_INFO, "%s: Function %02X MsgFlags %02X\n", __func__, 814 function, tmphdr.MsgFlags); 815 816 /* 817 * Handle a passthru TM request. 818 */ 819 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 820 MPI2_SCSI_TASK_MANAGE_REQUEST *task; 821 822 mps_lock(sc); 823 cm = mpssas_alloc_tm(sc); 824 if (cm == NULL) { 825 err = EINVAL; 826 goto Ret; 827 } 828 829 /* Copy the header in. Only a small fixup is needed. */ 830 task = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 831 bcopy(&tmphdr, task, data->RequestSize); 832 task->TaskMID = cm->cm_desc.Default.SMID; 833 834 cm->cm_data = NULL; 835 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 836 cm->cm_complete = NULL; 837 cm->cm_complete_data = NULL; 838 839 err = mps_wait_command(sc, cm, 30); 840 841 if (err != 0) { 842 err = EIO; 843 mps_dprint(sc, MPS_FAULT, "%s: task management failed", 844 __func__); 845 } 846 /* 847 * Copy the reply data and sense data to user space. 848 */ 849 if (cm->cm_reply != NULL) { 850 rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply; 851 sz = rpl->MsgLength * 4; 852 853 if (sz > data->ReplySize) { 854 mps_printf(sc, "%s: reply buffer too small: %d, " 855 "required: %d\n", __func__, data->ReplySize, sz); 856 err = EINVAL; 857 } else { 858 mps_unlock(sc); 859 copyout(cm->cm_reply, PTRIN(data->PtrReply), 860 data->ReplySize); 861 mps_lock(sc); 862 } 863 } 864 mpssas_free_tm(sc, cm); 865 goto Ret; 866 } 867 868 mps_lock(sc); 869 cm = mps_alloc_command(sc); 870 871 if (cm == NULL) { 872 mps_printf(sc, "%s: no mps requests\n", __func__); 873 err = ENOMEM; 874 goto Ret; 875 } 876 mps_unlock(sc); 877 878 hdr = (MPI2_REQUEST_HEADER *)cm->cm_req; 879 bcopy(&tmphdr, hdr, data->RequestSize); 880 881 /* 882 * Do some checking to make sure the IOCTL request contains a valid 883 * request. Then set the SGL info. 884 */ 885 mpi_init_sge(cm, hdr, (void *)((uint8_t *)hdr + data->RequestSize)); 886 887 /* 888 * Set up for read, write or both. From check above, DataOutSize will 889 * be 0 if direction is READ or WRITE, but it will have some non-zero 890 * value if the direction is BOTH. So, just use the biggest size to get 891 * the cm_data buffer size. If direction is BOTH, 2 SGLs need to be set 892 * up; the first is for the request and the second will contain the 893 * response data. cm_out_len needs to be set here and this will be used 894 * when the SGLs are set up. 895 */ 896 cm->cm_data = NULL; 897 cm->cm_length = MAX(data->DataSize, data->DataOutSize); 898 cm->cm_out_len = data->DataOutSize; 899 cm->cm_flags = 0; 900 if (cm->cm_length != 0) { 901 cm->cm_data = kmalloc(cm->cm_length, M_MPSUSER, M_WAITOK | 902 M_ZERO); 903 if (cm->cm_data == NULL) { 904 mps_dprint(sc, MPS_FAULT, "%s: alloc failed for IOCTL " 905 "passthru length %d\n", __func__, cm->cm_length); 906 } else { 907 cm->cm_flags = MPS_CM_FLAGS_DATAIN; 908 if (data->DataOutSize) { 909 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT; 910 err = copyin(PTRIN(data->PtrDataOut), 911 cm->cm_data, data->DataOutSize); 912 } else if (data->DataDirection == 913 MPS_PASS_THRU_DIRECTION_WRITE) { 914 cm->cm_flags = MPS_CM_FLAGS_DATAOUT; 915 err = copyin(PTRIN(data->PtrData), 916 cm->cm_data, data->DataSize); 917 } 918 if (err != 0) 919 mps_dprint(sc, MPS_FAULT, "%s: failed to copy " 920 "IOCTL data from user space\n", __func__); 921 } 922 } 923 cm->cm_flags |= MPS_CM_FLAGS_SGE_SIMPLE; 924 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 925 926 /* 927 * Set up Sense buffer and SGL offset for IO passthru. SCSI IO request 928 * uses SCSI IO descriptor. 929 */ 930 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) || 931 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 932 MPI2_SCSI_IO_REQUEST *scsi_io_req; 933 934 scsi_io_req = (MPI2_SCSI_IO_REQUEST *)hdr; 935 /* 936 * Put SGE for data and data_out buffer at the end of 937 * scsi_io_request message header (64 bytes in total). 938 * Following above SGEs, the residual space will be used by 939 * sense data. 940 */ 941 scsi_io_req->SenseBufferLength = (uint8_t)(data->RequestSize - 942 64); 943 scsi_io_req->SenseBufferLowAddress = cm->cm_sense_busaddr; 944 945 /* 946 * Set SGLOffset0 value. This is the number of dwords that SGL 947 * is offset from the beginning of MPI2_SCSI_IO_REQUEST struct. 948 */ 949 scsi_io_req->SGLOffset0 = 24; 950 951 /* 952 * Setup descriptor info. RAID passthrough must use the 953 * default request descriptor which is already set, so if this 954 * is a SCSI IO request, change the descriptor to SCSI IO. 955 * Also, if this is a SCSI IO request, handle the reply in the 956 * mpssas_scsio_complete function. 957 */ 958 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) { 959 cm->cm_desc.SCSIIO.RequestFlags = 960 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 961 cm->cm_desc.SCSIIO.DevHandle = scsi_io_req->DevHandle; 962 963 /* 964 * Make sure the DevHandle is not 0 because this is a 965 * likely error. 966 */ 967 if (scsi_io_req->DevHandle == 0) { 968 err = EINVAL; 969 goto RetFreeUnlocked; 970 } 971 } 972 } 973 974 mps_lock(sc); 975 976 err = mps_wait_command(sc, cm, 30); 977 978 if (err) { 979 mps_printf(sc, "%s: invalid request: error %d\n", __func__, 980 err); 981 mps_unlock(sc); 982 goto RetFreeUnlocked; 983 } 984 985 /* 986 * Sync the DMA data, if any. Then copy the data to user space. 987 */ 988 if (cm->cm_data != NULL) { 989 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) 990 dir = BUS_DMASYNC_POSTREAD; 991 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) 992 dir = BUS_DMASYNC_POSTWRITE; 993 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 994 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 995 996 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) { 997 mps_unlock(sc); 998 err = copyout(cm->cm_data, 999 PTRIN(data->PtrData), data->DataSize); 1000 mps_lock(sc); 1001 if (err != 0) 1002 mps_dprint(sc, MPS_FAULT, "%s: failed to copy " 1003 "IOCTL data to user space\n", __func__); 1004 } 1005 } 1006 1007 /* 1008 * Copy the reply data and sense data to user space. 1009 */ 1010 if (cm->cm_reply != NULL) { 1011 rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply; 1012 sz = rpl->MsgLength * 4; 1013 1014 if (sz > data->ReplySize) { 1015 mps_printf(sc, "%s: reply buffer too small: %d, " 1016 "required: %d\n", __func__, data->ReplySize, sz); 1017 err = EINVAL; 1018 } else { 1019 mps_unlock(sc); 1020 copyout(cm->cm_reply, PTRIN(data->PtrReply), 1021 data->ReplySize); 1022 mps_lock(sc); 1023 } 1024 1025 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) || 1026 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 1027 if (((MPI2_SCSI_IO_REPLY *)rpl)->SCSIState & 1028 MPI2_SCSI_STATE_AUTOSENSE_VALID) { 1029 sense_len = 1030 MIN(((MPI2_SCSI_IO_REPLY *)rpl)->SenseCount, 1031 sizeof(struct scsi_sense_data)); 1032 mps_unlock(sc); 1033 copyout(cm->cm_sense, cm->cm_req + 64, sense_len); 1034 mps_lock(sc); 1035 } 1036 } 1037 } 1038 mps_unlock(sc); 1039 1040 RetFreeUnlocked: 1041 mps_lock(sc); 1042 1043 if (cm != NULL) { 1044 if (cm->cm_data) 1045 kfree(cm->cm_data, M_MPSUSER); 1046 mps_free_command(sc, cm); 1047 } 1048 Ret: 1049 sc->mps_flags &= ~MPS_FLAGS_BUSY; 1050 mps_unlock(sc); 1051 1052 return (err); 1053 } 1054 1055 static void 1056 mps_user_get_adapter_data(struct mps_softc *sc, mps_adapter_data_t *data) 1057 { 1058 Mpi2ConfigReply_t mpi_reply; 1059 Mpi2BiosPage3_t config_page; 1060 1061 /* 1062 * Use the PCI interface functions to get the Bus, Device, and Function 1063 * information. 1064 */ 1065 data->PciInformation.u.bits.BusNumber = pci_get_bus(sc->mps_dev); 1066 data->PciInformation.u.bits.DeviceNumber = pci_get_slot(sc->mps_dev); 1067 data->PciInformation.u.bits.FunctionNumber = 1068 pci_get_function(sc->mps_dev); 1069 1070 /* 1071 * Get the FW version that should already be saved in IOC Facts. 1072 */ 1073 data->MpiFirmwareVersion = sc->facts->FWVersion.Word; 1074 1075 /* 1076 * General device info. 1077 */ 1078 data->AdapterType = MPSIOCTL_ADAPTER_TYPE_SAS2; 1079 if (sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) 1080 data->AdapterType = MPSIOCTL_ADAPTER_TYPE_SAS2_SSS6200; 1081 data->PCIDeviceHwId = pci_get_device(sc->mps_dev); 1082 data->PCIDeviceHwRev = pci_read_config(sc->mps_dev, PCIR_REVID, 1); 1083 data->SubSystemId = pci_get_subdevice(sc->mps_dev); 1084 data->SubsystemVendorId = pci_get_subvendor(sc->mps_dev); 1085 1086 /* 1087 * Get the driver version. 1088 */ 1089 strcpy((char *)&data->DriverVersion[0], MPS_DRIVER_VERSION); 1090 1091 /* 1092 * Need to get BIOS Config Page 3 for the BIOS Version. 1093 */ 1094 data->BiosVersion = 0; 1095 mps_lock(sc); 1096 if (mps_config_get_bios_pg3(sc, &mpi_reply, &config_page)) 1097 kprintf("%s: Error while retrieving BIOS Version\n", __func__); 1098 else 1099 data->BiosVersion = config_page.BiosVersion; 1100 mps_unlock(sc); 1101 } 1102 1103 static void 1104 mps_user_read_pci_info(struct mps_softc *sc, mps_pci_info_t *data) 1105 { 1106 int i; 1107 1108 /* 1109 * Use the PCI interface functions to get the Bus, Device, and Function 1110 * information. 1111 */ 1112 data->BusNumber = pci_get_bus(sc->mps_dev); 1113 data->DeviceNumber = pci_get_slot(sc->mps_dev); 1114 data->FunctionNumber = pci_get_function(sc->mps_dev); 1115 1116 /* 1117 * Now get the interrupt vector and the pci header. The vector can 1118 * only be 0 right now. The header is the first 256 bytes of config 1119 * space. 1120 */ 1121 data->InterruptVector = 0; 1122 for (i = 0; i < sizeof (data->PciHeader); i++) { 1123 data->PciHeader[i] = pci_read_config(sc->mps_dev, i, 1); 1124 } 1125 } 1126 1127 static uint8_t 1128 mps_get_fw_diag_buffer_number(struct mps_softc *sc, uint32_t unique_id) 1129 { 1130 uint8_t index; 1131 1132 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) { 1133 if (sc->fw_diag_buffer_list[index].unique_id == unique_id) { 1134 return (index); 1135 } 1136 } 1137 1138 return (MPS_FW_DIAGNOSTIC_UID_NOT_FOUND); 1139 } 1140 1141 static int 1142 mps_post_fw_diag_buffer(struct mps_softc *sc, 1143 mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code) 1144 { 1145 MPI2_DIAG_BUFFER_POST_REQUEST *req; 1146 MPI2_DIAG_BUFFER_POST_REPLY *reply; 1147 struct mps_command *cm = NULL; 1148 int i, status; 1149 1150 /* 1151 * If buffer is not enabled, just leave. 1152 */ 1153 *return_code = MPS_FW_DIAG_ERROR_POST_FAILED; 1154 if (!pBuffer->enabled) { 1155 return (MPS_DIAG_FAILURE); 1156 } 1157 1158 /* 1159 * Clear some flags initially. 1160 */ 1161 pBuffer->force_release = FALSE; 1162 pBuffer->valid_data = FALSE; 1163 pBuffer->owned_by_firmware = FALSE; 1164 1165 /* 1166 * Get a command. 1167 */ 1168 cm = mps_alloc_command(sc); 1169 if (cm == NULL) { 1170 mps_printf(sc, "%s: no mps requests\n", __func__); 1171 return (MPS_DIAG_FAILURE); 1172 } 1173 1174 /* 1175 * Build the request for releasing the FW Diag Buffer and send it. 1176 */ 1177 req = (MPI2_DIAG_BUFFER_POST_REQUEST *)cm->cm_req; 1178 req->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 1179 req->BufferType = pBuffer->buffer_type; 1180 req->ExtendedType = pBuffer->extended_type; 1181 req->BufferLength = pBuffer->size; 1182 for (i = 0; i < (sizeof(req->ProductSpecific) / 4); i++) 1183 req->ProductSpecific[i] = pBuffer->product_specific[i]; 1184 mps_from_u64(sc->fw_diag_busaddr, &req->BufferAddress); 1185 cm->cm_data = NULL; 1186 cm->cm_length = 0; 1187 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1188 cm->cm_complete_data = NULL; 1189 1190 /* 1191 * Send command synchronously. 1192 */ 1193 status = mps_wait_command(sc, cm, 30); 1194 if (status) { 1195 mps_printf(sc, "%s: invalid request: error %d\n", __func__, 1196 status); 1197 status = MPS_DIAG_FAILURE; 1198 goto done; 1199 } 1200 1201 /* 1202 * Process POST reply. 1203 */ 1204 reply = (MPI2_DIAG_BUFFER_POST_REPLY *)cm->cm_reply; 1205 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) { 1206 status = MPS_DIAG_FAILURE; 1207 mps_dprint(sc, MPS_FAULT, "%s: post of FW Diag Buffer failed " 1208 "with IOCStatus = 0x%x, IOCLogInfo = 0x%x and " 1209 "TransferLength = 0x%x\n", __func__, reply->IOCStatus, 1210 reply->IOCLogInfo, reply->TransferLength); 1211 goto done; 1212 } 1213 1214 /* 1215 * Post was successful. 1216 */ 1217 pBuffer->valid_data = TRUE; 1218 pBuffer->owned_by_firmware = TRUE; 1219 *return_code = MPS_FW_DIAG_ERROR_SUCCESS; 1220 status = MPS_DIAG_SUCCESS; 1221 1222 done: 1223 mps_free_command(sc, cm); 1224 return (status); 1225 } 1226 1227 static int 1228 mps_release_fw_diag_buffer(struct mps_softc *sc, 1229 mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code, 1230 uint32_t diag_type) 1231 { 1232 MPI2_DIAG_RELEASE_REQUEST *req; 1233 MPI2_DIAG_RELEASE_REPLY *reply; 1234 struct mps_command *cm = NULL; 1235 int status; 1236 1237 /* 1238 * If buffer is not enabled, just leave. 1239 */ 1240 *return_code = MPS_FW_DIAG_ERROR_RELEASE_FAILED; 1241 if (!pBuffer->enabled) { 1242 mps_dprint(sc, MPS_INFO, "%s: This buffer type is not supported " 1243 "by the IOC", __func__); 1244 return (MPS_DIAG_FAILURE); 1245 } 1246 1247 /* 1248 * Clear some flags initially. 1249 */ 1250 pBuffer->force_release = FALSE; 1251 pBuffer->valid_data = FALSE; 1252 pBuffer->owned_by_firmware = FALSE; 1253 1254 /* 1255 * Get a command. 1256 */ 1257 cm = mps_alloc_command(sc); 1258 if (cm == NULL) { 1259 mps_printf(sc, "%s: no mps requests\n", __func__); 1260 return (MPS_DIAG_FAILURE); 1261 } 1262 1263 /* 1264 * Build the request for releasing the FW Diag Buffer and send it. 1265 */ 1266 req = (MPI2_DIAG_RELEASE_REQUEST *)cm->cm_req; 1267 req->Function = MPI2_FUNCTION_DIAG_RELEASE; 1268 req->BufferType = pBuffer->buffer_type; 1269 cm->cm_data = NULL; 1270 cm->cm_length = 0; 1271 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1272 cm->cm_complete_data = NULL; 1273 1274 /* 1275 * Send command synchronously. 1276 */ 1277 status = mps_wait_command(sc, cm, 30); 1278 if (status) { 1279 mps_printf(sc, "%s: invalid request: error %d\n", __func__, 1280 status); 1281 status = MPS_DIAG_FAILURE; 1282 goto done; 1283 } 1284 1285 /* 1286 * Process RELEASE reply. 1287 */ 1288 reply = (MPI2_DIAG_RELEASE_REPLY *)cm->cm_reply; 1289 if ((reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) || 1290 pBuffer->owned_by_firmware) { 1291 status = MPS_DIAG_FAILURE; 1292 mps_dprint(sc, MPS_FAULT, "%s: release of FW Diag Buffer " 1293 "failed with IOCStatus = 0x%x and IOCLogInfo = 0x%x\n", 1294 __func__, reply->IOCStatus, reply->IOCLogInfo); 1295 goto done; 1296 } 1297 1298 /* 1299 * Release was successful. 1300 */ 1301 *return_code = MPS_FW_DIAG_ERROR_SUCCESS; 1302 status = MPS_DIAG_SUCCESS; 1303 1304 /* 1305 * If this was for an UNREGISTER diag type command, clear the unique ID. 1306 */ 1307 if (diag_type == MPS_FW_DIAG_TYPE_UNREGISTER) { 1308 pBuffer->unique_id = MPS_FW_DIAG_INVALID_UID; 1309 } 1310 1311 done: 1312 return (status); 1313 } 1314 1315 static int 1316 mps_diag_register(struct mps_softc *sc, mps_fw_diag_register_t *diag_register, 1317 uint32_t *return_code) 1318 { 1319 mps_fw_diagnostic_buffer_t *pBuffer; 1320 uint8_t extended_type, buffer_type, i; 1321 uint32_t buffer_size; 1322 uint32_t unique_id; 1323 int status; 1324 1325 extended_type = diag_register->ExtendedType; 1326 buffer_type = diag_register->BufferType; 1327 buffer_size = diag_register->RequestedBufferSize; 1328 unique_id = diag_register->UniqueId; 1329 1330 /* 1331 * Check for valid buffer type 1332 */ 1333 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) { 1334 *return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1335 return (MPS_DIAG_FAILURE); 1336 } 1337 1338 /* 1339 * Get the current buffer and look up the unique ID. The unique ID 1340 * should not be found. If it is, the ID is already in use. 1341 */ 1342 i = mps_get_fw_diag_buffer_number(sc, unique_id); 1343 pBuffer = &sc->fw_diag_buffer_list[buffer_type]; 1344 if (i != MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 1345 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1346 return (MPS_DIAG_FAILURE); 1347 } 1348 1349 /* 1350 * The buffer's unique ID should not be registered yet, and the given 1351 * unique ID cannot be 0. 1352 */ 1353 if ((pBuffer->unique_id != MPS_FW_DIAG_INVALID_UID) || 1354 (unique_id == MPS_FW_DIAG_INVALID_UID)) { 1355 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1356 return (MPS_DIAG_FAILURE); 1357 } 1358 1359 /* 1360 * If this buffer is already posted as immediate, just change owner. 1361 */ 1362 if (pBuffer->immediate && pBuffer->owned_by_firmware && 1363 (pBuffer->unique_id == MPS_FW_DIAG_INVALID_UID)) { 1364 pBuffer->immediate = FALSE; 1365 pBuffer->unique_id = unique_id; 1366 return (MPS_DIAG_SUCCESS); 1367 } 1368 1369 /* 1370 * Post a new buffer after checking if it's enabled. The DMA buffer 1371 * that is allocated will be contiguous (nsegments = 1). 1372 */ 1373 if (!pBuffer->enabled) { 1374 *return_code = MPS_FW_DIAG_ERROR_NO_BUFFER; 1375 return (MPS_DIAG_FAILURE); 1376 } 1377 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 1378 1, 0, /* algnmnt, boundary */ 1379 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1380 BUS_SPACE_MAXADDR, /* highaddr */ 1381 NULL, NULL, /* filter, filterarg */ 1382 buffer_size, /* maxsize */ 1383 1, /* nsegments */ 1384 buffer_size, /* maxsegsize */ 1385 0, /* flags */ 1386 &sc->fw_diag_dmat)) { 1387 device_printf(sc->mps_dev, "Cannot allocate FW diag buffer DMA " 1388 "tag\n"); 1389 return (ENOMEM); 1390 } 1391 if (bus_dmamem_alloc(sc->fw_diag_dmat, (void **)&sc->fw_diag_buffer, 1392 BUS_DMA_NOWAIT, &sc->fw_diag_map)) { 1393 device_printf(sc->mps_dev, "Cannot allocate FW diag buffer " 1394 "memory\n"); 1395 return (ENOMEM); 1396 } 1397 bzero(sc->fw_diag_buffer, buffer_size); 1398 bus_dmamap_load(sc->fw_diag_dmat, sc->fw_diag_map, sc->fw_diag_buffer, 1399 buffer_size, mps_memaddr_cb, &sc->fw_diag_busaddr, 0); 1400 pBuffer->size = buffer_size; 1401 1402 /* 1403 * Copy the given info to the diag buffer and post the buffer. 1404 */ 1405 pBuffer->buffer_type = buffer_type; 1406 pBuffer->immediate = FALSE; 1407 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) { 1408 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4); 1409 i++) { 1410 pBuffer->product_specific[i] = 1411 diag_register->ProductSpecific[i]; 1412 } 1413 } 1414 pBuffer->extended_type = extended_type; 1415 pBuffer->unique_id = unique_id; 1416 status = mps_post_fw_diag_buffer(sc, pBuffer, return_code); 1417 1418 /* 1419 * In case there was a failure, free the DMA buffer. 1420 */ 1421 if (status == MPS_DIAG_FAILURE) { 1422 if (sc->fw_diag_busaddr != 0) 1423 bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map); 1424 if (sc->fw_diag_buffer != NULL) 1425 bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer, 1426 sc->fw_diag_map); 1427 if (sc->fw_diag_dmat != NULL) 1428 bus_dma_tag_destroy(sc->fw_diag_dmat); 1429 } 1430 1431 return (status); 1432 } 1433 1434 static int 1435 mps_diag_unregister(struct mps_softc *sc, 1436 mps_fw_diag_unregister_t *diag_unregister, uint32_t *return_code) 1437 { 1438 mps_fw_diagnostic_buffer_t *pBuffer; 1439 uint8_t i; 1440 uint32_t unique_id; 1441 int status; 1442 1443 unique_id = diag_unregister->UniqueId; 1444 1445 /* 1446 * Get the current buffer and look up the unique ID. The unique ID 1447 * should be there. 1448 */ 1449 i = mps_get_fw_diag_buffer_number(sc, unique_id); 1450 if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 1451 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1452 return (MPS_DIAG_FAILURE); 1453 } 1454 1455 pBuffer = &sc->fw_diag_buffer_list[i]; 1456 1457 /* 1458 * Try to release the buffer from FW before freeing it. If release 1459 * fails, don't free the DMA buffer in case FW tries to access it 1460 * later. If buffer is not owned by firmware, can't release it. 1461 */ 1462 if (!pBuffer->owned_by_firmware) { 1463 status = MPS_DIAG_SUCCESS; 1464 } else { 1465 status = mps_release_fw_diag_buffer(sc, pBuffer, return_code, 1466 MPS_FW_DIAG_TYPE_UNREGISTER); 1467 } 1468 1469 /* 1470 * At this point, return the current status no matter what happens with 1471 * the DMA buffer. 1472 */ 1473 pBuffer->unique_id = MPS_FW_DIAG_INVALID_UID; 1474 if (status == MPS_DIAG_SUCCESS) { 1475 if (sc->fw_diag_busaddr != 0) 1476 bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map); 1477 if (sc->fw_diag_buffer != NULL) 1478 bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer, 1479 sc->fw_diag_map); 1480 if (sc->fw_diag_dmat != NULL) 1481 bus_dma_tag_destroy(sc->fw_diag_dmat); 1482 } 1483 1484 return (status); 1485 } 1486 1487 static int 1488 mps_diag_query(struct mps_softc *sc, mps_fw_diag_query_t *diag_query, 1489 uint32_t *return_code) 1490 { 1491 mps_fw_diagnostic_buffer_t *pBuffer; 1492 uint8_t i; 1493 uint32_t unique_id; 1494 1495 unique_id = diag_query->UniqueId; 1496 1497 /* 1498 * If ID is valid, query on ID. 1499 * If ID is invalid, query on buffer type. 1500 */ 1501 if (unique_id == MPS_FW_DIAG_INVALID_UID) { 1502 i = diag_query->BufferType; 1503 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) { 1504 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1505 return (MPS_DIAG_FAILURE); 1506 } 1507 } else { 1508 i = mps_get_fw_diag_buffer_number(sc, unique_id); 1509 if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 1510 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1511 return (MPS_DIAG_FAILURE); 1512 } 1513 } 1514 1515 /* 1516 * Fill query structure with the diag buffer info. 1517 */ 1518 pBuffer = &sc->fw_diag_buffer_list[i]; 1519 diag_query->BufferType = pBuffer->buffer_type; 1520 diag_query->ExtendedType = pBuffer->extended_type; 1521 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) { 1522 for (i = 0; i < (sizeof(diag_query->ProductSpecific) / 4); 1523 i++) { 1524 diag_query->ProductSpecific[i] = 1525 pBuffer->product_specific[i]; 1526 } 1527 } 1528 diag_query->TotalBufferSize = pBuffer->size; 1529 diag_query->DriverAddedBufferSize = 0; 1530 diag_query->UniqueId = pBuffer->unique_id; 1531 diag_query->ApplicationFlags = 0; 1532 diag_query->DiagnosticFlags = 0; 1533 1534 /* 1535 * Set/Clear application flags 1536 */ 1537 if (pBuffer->immediate) { 1538 diag_query->ApplicationFlags &= ~MPS_FW_DIAG_FLAG_APP_OWNED; 1539 } else { 1540 diag_query->ApplicationFlags |= MPS_FW_DIAG_FLAG_APP_OWNED; 1541 } 1542 if (pBuffer->valid_data || pBuffer->owned_by_firmware) { 1543 diag_query->ApplicationFlags |= MPS_FW_DIAG_FLAG_BUFFER_VALID; 1544 } else { 1545 diag_query->ApplicationFlags &= ~MPS_FW_DIAG_FLAG_BUFFER_VALID; 1546 } 1547 if (pBuffer->owned_by_firmware) { 1548 diag_query->ApplicationFlags |= 1549 MPS_FW_DIAG_FLAG_FW_BUFFER_ACCESS; 1550 } else { 1551 diag_query->ApplicationFlags &= 1552 ~MPS_FW_DIAG_FLAG_FW_BUFFER_ACCESS; 1553 } 1554 1555 return (MPS_DIAG_SUCCESS); 1556 } 1557 1558 static int 1559 mps_diag_read_buffer(struct mps_softc *sc, 1560 mps_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf, 1561 uint32_t *return_code) 1562 { 1563 mps_fw_diagnostic_buffer_t *pBuffer; 1564 uint8_t i, *pData; 1565 uint32_t unique_id; 1566 int status; 1567 1568 unique_id = diag_read_buffer->UniqueId; 1569 1570 /* 1571 * Get the current buffer and look up the unique ID. The unique ID 1572 * should be there. 1573 */ 1574 i = mps_get_fw_diag_buffer_number(sc, unique_id); 1575 if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 1576 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1577 return (MPS_DIAG_FAILURE); 1578 } 1579 1580 pBuffer = &sc->fw_diag_buffer_list[i]; 1581 1582 /* 1583 * Make sure requested read is within limits 1584 */ 1585 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead > 1586 pBuffer->size) { 1587 *return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1588 return (MPS_DIAG_FAILURE); 1589 } 1590 1591 /* 1592 * Copy the requested data from DMA to the diag_read_buffer. The DMA 1593 * buffer that was allocated is one contiguous buffer. 1594 */ 1595 pData = (uint8_t *)(sc->fw_diag_buffer + 1596 diag_read_buffer->StartingOffset); 1597 if (copyout(pData, ioctl_buf, diag_read_buffer->BytesToRead) != 0) 1598 return (MPS_DIAG_FAILURE); 1599 diag_read_buffer->Status = 0; 1600 1601 /* 1602 * Set or clear the Force Release flag. 1603 */ 1604 if (pBuffer->force_release) { 1605 diag_read_buffer->Flags |= MPS_FW_DIAG_FLAG_FORCE_RELEASE; 1606 } else { 1607 diag_read_buffer->Flags &= ~MPS_FW_DIAG_FLAG_FORCE_RELEASE; 1608 } 1609 1610 /* 1611 * If buffer is to be reregistered, make sure it's not already owned by 1612 * firmware first. 1613 */ 1614 status = MPS_DIAG_SUCCESS; 1615 if (!pBuffer->owned_by_firmware) { 1616 if (diag_read_buffer->Flags & MPS_FW_DIAG_FLAG_REREGISTER) { 1617 status = mps_post_fw_diag_buffer(sc, pBuffer, 1618 return_code); 1619 } 1620 } 1621 1622 return (status); 1623 } 1624 1625 static int 1626 mps_diag_release(struct mps_softc *sc, mps_fw_diag_release_t *diag_release, 1627 uint32_t *return_code) 1628 { 1629 mps_fw_diagnostic_buffer_t *pBuffer; 1630 uint8_t i; 1631 uint32_t unique_id; 1632 int status; 1633 1634 unique_id = diag_release->UniqueId; 1635 1636 /* 1637 * Get the current buffer and look up the unique ID. The unique ID 1638 * should be there. 1639 */ 1640 i = mps_get_fw_diag_buffer_number(sc, unique_id); 1641 if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 1642 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1643 return (MPS_DIAG_FAILURE); 1644 } 1645 1646 pBuffer = &sc->fw_diag_buffer_list[i]; 1647 1648 /* 1649 * If buffer is not owned by firmware, it's already been released. 1650 */ 1651 if (!pBuffer->owned_by_firmware) { 1652 *return_code = MPS_FW_DIAG_ERROR_ALREADY_RELEASED; 1653 return (MPS_DIAG_FAILURE); 1654 } 1655 1656 /* 1657 * Release the buffer. 1658 */ 1659 status = mps_release_fw_diag_buffer(sc, pBuffer, return_code, 1660 MPS_FW_DIAG_TYPE_RELEASE); 1661 return (status); 1662 } 1663 1664 static int 1665 mps_do_diag_action(struct mps_softc *sc, uint32_t action, uint8_t *diag_action, 1666 uint32_t length, uint32_t *return_code) 1667 { 1668 mps_fw_diag_register_t diag_register; 1669 mps_fw_diag_unregister_t diag_unregister; 1670 mps_fw_diag_query_t diag_query; 1671 mps_diag_read_buffer_t diag_read_buffer; 1672 mps_fw_diag_release_t diag_release; 1673 int status = MPS_DIAG_SUCCESS; 1674 uint32_t original_return_code; 1675 1676 original_return_code = *return_code; 1677 *return_code = MPS_FW_DIAG_ERROR_SUCCESS; 1678 1679 switch (action) { 1680 case MPS_FW_DIAG_TYPE_REGISTER: 1681 if (!length) { 1682 *return_code = 1683 MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1684 status = MPS_DIAG_FAILURE; 1685 break; 1686 } 1687 if (copyin(diag_action, &diag_register, 1688 sizeof(diag_register)) != 0) 1689 return (MPS_DIAG_FAILURE); 1690 status = mps_diag_register(sc, &diag_register, 1691 return_code); 1692 break; 1693 1694 case MPS_FW_DIAG_TYPE_UNREGISTER: 1695 if (length < sizeof(diag_unregister)) { 1696 *return_code = 1697 MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1698 status = MPS_DIAG_FAILURE; 1699 break; 1700 } 1701 if (copyin(diag_action, &diag_unregister, 1702 sizeof(diag_unregister)) != 0) 1703 return (MPS_DIAG_FAILURE); 1704 status = mps_diag_unregister(sc, &diag_unregister, 1705 return_code); 1706 break; 1707 1708 case MPS_FW_DIAG_TYPE_QUERY: 1709 if (length < sizeof (diag_query)) { 1710 *return_code = 1711 MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1712 status = MPS_DIAG_FAILURE; 1713 break; 1714 } 1715 if (copyin(diag_action, &diag_query, sizeof(diag_query)) 1716 != 0) 1717 return (MPS_DIAG_FAILURE); 1718 status = mps_diag_query(sc, &diag_query, return_code); 1719 if (status == MPS_DIAG_SUCCESS) 1720 if (copyout(&diag_query, diag_action, 1721 sizeof (diag_query)) != 0) 1722 return (MPS_DIAG_FAILURE); 1723 break; 1724 1725 case MPS_FW_DIAG_TYPE_READ_BUFFER: 1726 if (copyin(diag_action, &diag_read_buffer, 1727 sizeof(diag_read_buffer)) != 0) 1728 return (MPS_DIAG_FAILURE); 1729 if (length < diag_read_buffer.BytesToRead) { 1730 *return_code = 1731 MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1732 status = MPS_DIAG_FAILURE; 1733 break; 1734 } 1735 status = mps_diag_read_buffer(sc, &diag_read_buffer, 1736 PTRIN(diag_read_buffer.PtrDataBuffer), 1737 return_code); 1738 if (status == MPS_DIAG_SUCCESS) { 1739 if (copyout(&diag_read_buffer, diag_action, 1740 sizeof(diag_read_buffer) - 1741 sizeof(diag_read_buffer.PtrDataBuffer)) != 1742 0) 1743 return (MPS_DIAG_FAILURE); 1744 } 1745 break; 1746 1747 case MPS_FW_DIAG_TYPE_RELEASE: 1748 if (length < sizeof(diag_release)) { 1749 *return_code = 1750 MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1751 status = MPS_DIAG_FAILURE; 1752 break; 1753 } 1754 if (copyin(diag_action, &diag_release, 1755 sizeof(diag_release)) != 0) 1756 return (MPS_DIAG_FAILURE); 1757 status = mps_diag_release(sc, &diag_release, 1758 return_code); 1759 break; 1760 1761 default: 1762 *return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1763 status = MPS_DIAG_FAILURE; 1764 break; 1765 } 1766 1767 if ((status == MPS_DIAG_FAILURE) && 1768 (original_return_code == MPS_FW_DIAG_NEW) && 1769 (*return_code != MPS_FW_DIAG_ERROR_SUCCESS)) 1770 status = MPS_DIAG_SUCCESS; 1771 1772 return (status); 1773 } 1774 1775 static int 1776 mps_user_diag_action(struct mps_softc *sc, mps_diag_action_t *data) 1777 { 1778 int status; 1779 1780 /* 1781 * Only allow one diag action at one time. 1782 */ 1783 if (sc->mps_flags & MPS_FLAGS_BUSY) { 1784 mps_dprint(sc, MPS_INFO, "%s: Only one FW diag command " 1785 "allowed at a single time.", __func__); 1786 return (EBUSY); 1787 } 1788 sc->mps_flags |= MPS_FLAGS_BUSY; 1789 1790 /* 1791 * Send diag action request 1792 */ 1793 if (data->Action == MPS_FW_DIAG_TYPE_REGISTER || 1794 data->Action == MPS_FW_DIAG_TYPE_UNREGISTER || 1795 data->Action == MPS_FW_DIAG_TYPE_QUERY || 1796 data->Action == MPS_FW_DIAG_TYPE_READ_BUFFER || 1797 data->Action == MPS_FW_DIAG_TYPE_RELEASE) { 1798 status = mps_do_diag_action(sc, data->Action, 1799 PTRIN(data->PtrDiagAction), data->Length, 1800 &data->ReturnCode); 1801 } else 1802 status = EINVAL; 1803 1804 sc->mps_flags &= ~MPS_FLAGS_BUSY; 1805 return (status); 1806 } 1807 1808 /* 1809 * Copy the event recording mask and the event queue size out. For 1810 * clarification, the event recording mask (events_to_record) is not the same 1811 * thing as the event mask (event_mask). events_to_record has a bit set for 1812 * every event type that is to be recorded by the driver, and event_mask has a 1813 * bit cleared for every event that is allowed into the driver from the IOC. 1814 * They really have nothing to do with each other. 1815 */ 1816 static void 1817 mps_user_event_query(struct mps_softc *sc, mps_event_query_t *data) 1818 { 1819 uint8_t i; 1820 1821 mps_lock(sc); 1822 data->Entries = MPS_EVENT_QUEUE_SIZE; 1823 1824 for (i = 0; i < 4; i++) { 1825 data->Types[i] = sc->events_to_record[i]; 1826 } 1827 mps_unlock(sc); 1828 } 1829 1830 /* 1831 * Set the driver's event mask according to what's been given. See 1832 * mps_user_event_query for explanation of the event recording mask and the IOC 1833 * event mask. It's the app's responsibility to enable event logging by setting 1834 * the bits in events_to_record. Initially, no events will be logged. 1835 */ 1836 static void 1837 mps_user_event_enable(struct mps_softc *sc, mps_event_enable_t *data) 1838 { 1839 uint8_t i; 1840 1841 mps_lock(sc); 1842 for (i = 0; i < 4; i++) { 1843 sc->events_to_record[i] = data->Types[i]; 1844 } 1845 mps_unlock(sc); 1846 } 1847 1848 /* 1849 * Copy out the events that have been recorded, up to the max events allowed. 1850 */ 1851 static int 1852 mps_user_event_report(struct mps_softc *sc, mps_event_report_t *data) 1853 { 1854 int status = 0; 1855 uint32_t size; 1856 1857 mps_lock(sc); 1858 size = data->Size; 1859 if ((size >= sizeof(sc->recorded_events)) && (status == 0)) { 1860 mps_unlock(sc); 1861 if (copyout((void *)sc->recorded_events, 1862 PTRIN(data->PtrEvents), size) != 0) 1863 status = EFAULT; 1864 mps_lock(sc); 1865 } else { 1866 /* 1867 * data->Size value is not large enough to copy event data. 1868 */ 1869 status = EFAULT; 1870 } 1871 1872 /* 1873 * Change size value to match the number of bytes that were copied. 1874 */ 1875 if (status == 0) 1876 data->Size = sizeof(sc->recorded_events); 1877 mps_unlock(sc); 1878 1879 return (status); 1880 } 1881 1882 /* 1883 * Record events into the driver from the IOC if they are not masked. 1884 */ 1885 void 1886 mpssas_record_event(struct mps_softc *sc, 1887 MPI2_EVENT_NOTIFICATION_REPLY *event_reply) 1888 { 1889 uint32_t event; 1890 int i, j; 1891 uint16_t event_data_len; 1892 boolean_t sendAEN = FALSE; 1893 1894 event = event_reply->Event; 1895 1896 /* 1897 * Generate a system event to let anyone who cares know that a 1898 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the 1899 * event mask is set to. 1900 */ 1901 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { 1902 sendAEN = TRUE; 1903 } 1904 1905 /* 1906 * Record the event only if its corresponding bit is set in 1907 * events_to_record. event_index is the index into recorded_events and 1908 * event_number is the overall number of an event being recorded since 1909 * start-of-day. event_index will roll over; event_number will never 1910 * roll over. 1911 */ 1912 i = (uint8_t)(event / 32); 1913 j = (uint8_t)(event % 32); 1914 if ((i < 4) && ((1 << j) & sc->events_to_record[i])) { 1915 i = sc->event_index; 1916 sc->recorded_events[i].Type = event; 1917 sc->recorded_events[i].Number = ++sc->event_number; 1918 bzero(sc->recorded_events[i].Data, MPS_MAX_EVENT_DATA_LENGTH * 1919 4); 1920 event_data_len = event_reply->EventDataLength; 1921 1922 if (event_data_len > 0) { 1923 /* 1924 * Limit data to size in m_event entry 1925 */ 1926 if (event_data_len > MPS_MAX_EVENT_DATA_LENGTH) { 1927 event_data_len = MPS_MAX_EVENT_DATA_LENGTH; 1928 } 1929 for (j = 0; j < event_data_len; j++) { 1930 sc->recorded_events[i].Data[j] = 1931 event_reply->EventData[j]; 1932 } 1933 1934 /* 1935 * check for index wrap-around 1936 */ 1937 if (++i == MPS_EVENT_QUEUE_SIZE) { 1938 i = 0; 1939 } 1940 sc->event_index = (uint8_t)i; 1941 1942 /* 1943 * Set flag to send the event. 1944 */ 1945 sendAEN = TRUE; 1946 } 1947 } 1948 1949 /* 1950 * Generate a system event if flag is set to let anyone who cares know 1951 * that an event has occurred. 1952 */ 1953 if (sendAEN) { 1954 //SLM-how to send a system event (see kqueue, kevent) 1955 // (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS", 1956 // "SAS", NULL, NULL, DDI_NOSLEEP); 1957 } 1958 } 1959 1960 static int 1961 mps_user_reg_access(struct mps_softc *sc, mps_reg_access_t *data) 1962 { 1963 int status = 0; 1964 1965 switch (data->Command) { 1966 /* 1967 * IO access is not supported. 1968 */ 1969 case REG_IO_READ: 1970 case REG_IO_WRITE: 1971 mps_dprint(sc, MPS_INFO, "IO access is not supported. " 1972 "Use memory access."); 1973 status = EINVAL; 1974 break; 1975 1976 case REG_MEM_READ: 1977 data->RegData = mps_regread(sc, data->RegOffset); 1978 break; 1979 1980 case REG_MEM_WRITE: 1981 mps_regwrite(sc, data->RegOffset, data->RegData); 1982 break; 1983 1984 default: 1985 status = EINVAL; 1986 break; 1987 } 1988 1989 return (status); 1990 } 1991 1992 static int 1993 mps_user_btdh(struct mps_softc *sc, mps_btdh_mapping_t *data) 1994 { 1995 uint8_t bt2dh = FALSE; 1996 uint8_t dh2bt = FALSE; 1997 uint16_t dev_handle, bus, target; 1998 1999 bus = data->Bus; 2000 target = data->TargetID; 2001 dev_handle = data->DevHandle; 2002 2003 /* 2004 * When DevHandle is 0xFFFF and Bus/Target are not 0xFFFF, use Bus/ 2005 * Target to get DevHandle. When Bus/Target are 0xFFFF and DevHandle is 2006 * not 0xFFFF, use DevHandle to get Bus/Target. Anything else is 2007 * invalid. 2008 */ 2009 if ((bus == 0xFFFF) && (target == 0xFFFF) && (dev_handle != 0xFFFF)) 2010 dh2bt = TRUE; 2011 if ((dev_handle == 0xFFFF) && (bus != 0xFFFF) && (target != 0xFFFF)) 2012 bt2dh = TRUE; 2013 if (!dh2bt && !bt2dh) 2014 return (EINVAL); 2015 2016 /* 2017 * Only handle bus of 0. Make sure target is within range. 2018 */ 2019 if (bt2dh) { 2020 if (bus != 0) 2021 return (EINVAL); 2022 2023 if (target > sc->max_devices) { 2024 mps_dprint(sc, MPS_FAULT, "Target ID is out of range " 2025 "for Bus/Target to DevHandle mapping."); 2026 return (EINVAL); 2027 } 2028 dev_handle = sc->mapping_table[target].dev_handle; 2029 if (dev_handle) 2030 data->DevHandle = dev_handle; 2031 } else { 2032 bus = 0; 2033 target = mps_mapping_get_sas_id_from_handle(sc, dev_handle); 2034 data->Bus = bus; 2035 data->TargetID = target; 2036 } 2037 2038 return (0); 2039 } 2040 2041 static int 2042 mps_ioctl(struct cdev *dev, u_long cmd, void *arg, int flag) 2043 { 2044 struct mps_softc *sc; 2045 struct mps_cfg_page_req *page_req; 2046 struct mps_ext_cfg_page_req *ext_page_req; 2047 void *mps_page; 2048 int error, reset_loop; 2049 2050 mps_page = NULL; 2051 sc = dev->si_drv1; 2052 page_req = arg; 2053 ext_page_req = arg; 2054 2055 switch (cmd) { 2056 case MPSIO_READ_CFG_HEADER: 2057 mps_lock(sc); 2058 error = mps_user_read_cfg_header(sc, page_req); 2059 mps_unlock(sc); 2060 break; 2061 case MPSIO_READ_CFG_PAGE: 2062 mps_page = kmalloc(page_req->len, M_MPSUSER, M_WAITOK | M_ZERO); 2063 error = copyin(page_req->buf, mps_page, 2064 sizeof(MPI2_CONFIG_PAGE_HEADER)); 2065 if (error) 2066 break; 2067 mps_lock(sc); 2068 error = mps_user_read_cfg_page(sc, page_req, mps_page); 2069 mps_unlock(sc); 2070 if (error) 2071 break; 2072 error = copyout(mps_page, page_req->buf, page_req->len); 2073 break; 2074 case MPSIO_READ_EXT_CFG_HEADER: 2075 mps_lock(sc); 2076 error = mps_user_read_extcfg_header(sc, ext_page_req); 2077 mps_unlock(sc); 2078 break; 2079 case MPSIO_READ_EXT_CFG_PAGE: 2080 mps_page = kmalloc(ext_page_req->len, M_MPSUSER, M_WAITOK|M_ZERO); 2081 error = copyin(ext_page_req->buf, mps_page, 2082 sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); 2083 if (error) 2084 break; 2085 mps_lock(sc); 2086 error = mps_user_read_extcfg_page(sc, ext_page_req, mps_page); 2087 mps_unlock(sc); 2088 if (error) 2089 break; 2090 error = copyout(mps_page, ext_page_req->buf, ext_page_req->len); 2091 break; 2092 case MPSIO_WRITE_CFG_PAGE: 2093 mps_page = kmalloc(page_req->len, M_MPSUSER, M_WAITOK|M_ZERO); 2094 error = copyin(page_req->buf, mps_page, page_req->len); 2095 if (error) 2096 break; 2097 mps_lock(sc); 2098 error = mps_user_write_cfg_page(sc, page_req, mps_page); 2099 mps_unlock(sc); 2100 break; 2101 case MPSIO_MPS_COMMAND: 2102 error = mps_user_command(sc, (struct mps_usr_command *)arg); 2103 break; 2104 case MPTIOCTL_PASS_THRU: 2105 /* 2106 * The user has requested to pass through a command to be 2107 * executed by the MPT firmware. Call our routine which does 2108 * this. Only allow one passthru IOCTL at one time. 2109 */ 2110 error = mps_user_pass_thru(sc, (mps_pass_thru_t *)arg); 2111 break; 2112 case MPTIOCTL_GET_ADAPTER_DATA: 2113 /* 2114 * The user has requested to read adapter data. Call our 2115 * routine which does this. 2116 */ 2117 error = 0; 2118 mps_user_get_adapter_data(sc, (mps_adapter_data_t *)arg); 2119 break; 2120 case MPTIOCTL_GET_PCI_INFO: 2121 /* 2122 * The user has requested to read pci info. Call 2123 * our routine which does this. 2124 */ 2125 mps_lock(sc); 2126 error = 0; 2127 mps_user_read_pci_info(sc, (mps_pci_info_t *)arg); 2128 mps_unlock(sc); 2129 break; 2130 case MPTIOCTL_RESET_ADAPTER: 2131 mps_lock(sc); 2132 sc->port_enable_complete = 0; 2133 error = mps_reinit(sc); 2134 mps_unlock(sc); 2135 /* 2136 * Wait no more than 5 minutes for Port Enable to complete 2137 */ 2138 for (reset_loop = 0; (reset_loop < MPS_DIAG_RESET_TIMEOUT) && 2139 (!sc->port_enable_complete); reset_loop++) { 2140 DELAY(1000); 2141 } 2142 if (reset_loop == MPS_DIAG_RESET_TIMEOUT) { 2143 kprintf("Port Enable did not complete after Diag " 2144 "Reset.\n"); 2145 } 2146 break; 2147 case MPTIOCTL_DIAG_ACTION: 2148 /* 2149 * The user has done a diag buffer action. Call our routine 2150 * which does this. Only allow one diag action at one time. 2151 */ 2152 mps_lock(sc); 2153 error = mps_user_diag_action(sc, (mps_diag_action_t *)arg); 2154 mps_unlock(sc); 2155 break; 2156 case MPTIOCTL_EVENT_QUERY: 2157 /* 2158 * The user has done an event query. Call our routine which does 2159 * this. 2160 */ 2161 error = 0; 2162 mps_user_event_query(sc, (mps_event_query_t *)arg); 2163 break; 2164 case MPTIOCTL_EVENT_ENABLE: 2165 /* 2166 * The user has done an event enable. Call our routine which 2167 * does this. 2168 */ 2169 error = 0; 2170 mps_user_event_enable(sc, (mps_event_enable_t *)arg); 2171 break; 2172 case MPTIOCTL_EVENT_REPORT: 2173 /* 2174 * The user has done an event report. Call our routine which 2175 * does this. 2176 */ 2177 error = mps_user_event_report(sc, (mps_event_report_t *)arg); 2178 break; 2179 case MPTIOCTL_REG_ACCESS: 2180 /* 2181 * The user has requested register access. Call our routine 2182 * which does this. 2183 */ 2184 mps_lock(sc); 2185 error = mps_user_reg_access(sc, (mps_reg_access_t *)arg); 2186 mps_unlock(sc); 2187 break; 2188 case MPTIOCTL_BTDH_MAPPING: 2189 /* 2190 * The user has requested to translate a bus/target to a 2191 * DevHandle or a DevHandle to a bus/target. Call our routine 2192 * which does this. 2193 */ 2194 error = mps_user_btdh(sc, (mps_btdh_mapping_t *)arg); 2195 break; 2196 default: 2197 error = ENOIOCTL; 2198 break; 2199 } 2200 2201 if (mps_page != NULL) 2202 kfree(mps_page, M_MPSUSER); 2203 2204 return (error); 2205 } 2206 2207 #ifdef COMPAT_FREEBSD32 2208 2209 struct mps_cfg_page_req32 { 2210 MPI2_CONFIG_PAGE_HEADER header; 2211 uint32_t page_address; 2212 uint32_t buf; 2213 int len; 2214 uint16_t ioc_status; 2215 }; 2216 2217 struct mps_ext_cfg_page_req32 { 2218 MPI2_CONFIG_EXTENDED_PAGE_HEADER header; 2219 uint32_t page_address; 2220 uint32_t buf; 2221 int len; 2222 uint16_t ioc_status; 2223 }; 2224 2225 struct mps_raid_action32 { 2226 uint8_t action; 2227 uint8_t volume_bus; 2228 uint8_t volume_id; 2229 uint8_t phys_disk_num; 2230 uint32_t action_data_word; 2231 uint32_t buf; 2232 int len; 2233 uint32_t volume_status; 2234 uint32_t action_data[4]; 2235 uint16_t action_status; 2236 uint16_t ioc_status; 2237 uint8_t write; 2238 }; 2239 2240 struct mps_usr_command32 { 2241 uint32_t req; 2242 uint32_t req_len; 2243 uint32_t rpl; 2244 uint32_t rpl_len; 2245 uint32_t buf; 2246 int len; 2247 uint32_t flags; 2248 }; 2249 2250 #define MPSIO_READ_CFG_HEADER32 _IOWR('M', 200, struct mps_cfg_page_req32) 2251 #define MPSIO_READ_CFG_PAGE32 _IOWR('M', 201, struct mps_cfg_page_req32) 2252 #define MPSIO_READ_EXT_CFG_HEADER32 _IOWR('M', 202, struct mps_ext_cfg_page_req32) 2253 #define MPSIO_READ_EXT_CFG_PAGE32 _IOWR('M', 203, struct mps_ext_cfg_page_req32) 2254 #define MPSIO_WRITE_CFG_PAGE32 _IOWR('M', 204, struct mps_cfg_page_req32) 2255 #define MPSIO_RAID_ACTION32 _IOWR('M', 205, struct mps_raid_action32) 2256 #define MPSIO_MPS_COMMAND32 _IOWR('M', 210, struct mps_usr_command32) 2257 2258 static int 2259 mps_ioctl32(struct cdev *dev, u_long cmd32, void *_arg, int flag, 2260 struct thread *td) 2261 { 2262 struct mps_cfg_page_req32 *page32 = _arg; 2263 struct mps_ext_cfg_page_req32 *ext32 = _arg; 2264 struct mps_raid_action32 *raid32 = _arg; 2265 struct mps_usr_command32 *user32 = _arg; 2266 union { 2267 struct mps_cfg_page_req page; 2268 struct mps_ext_cfg_page_req ext; 2269 struct mps_raid_action raid; 2270 struct mps_usr_command user; 2271 } arg; 2272 u_long cmd; 2273 int error; 2274 2275 switch (cmd32) { 2276 case MPSIO_READ_CFG_HEADER32: 2277 case MPSIO_READ_CFG_PAGE32: 2278 case MPSIO_WRITE_CFG_PAGE32: 2279 if (cmd32 == MPSIO_READ_CFG_HEADER32) 2280 cmd = MPSIO_READ_CFG_HEADER; 2281 else if (cmd32 == MPSIO_READ_CFG_PAGE32) 2282 cmd = MPSIO_READ_CFG_PAGE; 2283 else 2284 cmd = MPSIO_WRITE_CFG_PAGE; 2285 CP(*page32, arg.page, header); 2286 CP(*page32, arg.page, page_address); 2287 PTRIN_CP(*page32, arg.page, buf); 2288 CP(*page32, arg.page, len); 2289 CP(*page32, arg.page, ioc_status); 2290 break; 2291 2292 case MPSIO_READ_EXT_CFG_HEADER32: 2293 case MPSIO_READ_EXT_CFG_PAGE32: 2294 if (cmd32 == MPSIO_READ_EXT_CFG_HEADER32) 2295 cmd = MPSIO_READ_EXT_CFG_HEADER; 2296 else 2297 cmd = MPSIO_READ_EXT_CFG_PAGE; 2298 CP(*ext32, arg.ext, header); 2299 CP(*ext32, arg.ext, page_address); 2300 PTRIN_CP(*ext32, arg.ext, buf); 2301 CP(*ext32, arg.ext, len); 2302 CP(*ext32, arg.ext, ioc_status); 2303 break; 2304 2305 case MPSIO_RAID_ACTION32: 2306 cmd = MPSIO_RAID_ACTION; 2307 CP(*raid32, arg.raid, action); 2308 CP(*raid32, arg.raid, volume_bus); 2309 CP(*raid32, arg.raid, volume_id); 2310 CP(*raid32, arg.raid, phys_disk_num); 2311 CP(*raid32, arg.raid, action_data_word); 2312 PTRIN_CP(*raid32, arg.raid, buf); 2313 CP(*raid32, arg.raid, len); 2314 CP(*raid32, arg.raid, volume_status); 2315 bcopy(raid32->action_data, arg.raid.action_data, 2316 sizeof arg.raid.action_data); 2317 CP(*raid32, arg.raid, ioc_status); 2318 CP(*raid32, arg.raid, write); 2319 break; 2320 2321 case MPSIO_MPS_COMMAND32: 2322 cmd = MPSIO_MPS_COMMAND; 2323 PTRIN_CP(*user32, arg.user, req); 2324 CP(*user32, arg.user, req_len); 2325 PTRIN_CP(*user32, arg.user, rpl); 2326 CP(*user32, arg.user, rpl_len); 2327 PTRIN_CP(*user32, arg.user, buf); 2328 CP(*user32, arg.user, len); 2329 CP(*user32, arg.user, flags); 2330 break; 2331 default: 2332 return (ENOIOCTL); 2333 } 2334 2335 error = mps_ioctl(dev, cmd, &arg, flag, td); 2336 if (error == 0 && (cmd32 & IOC_OUT) != 0) { 2337 switch (cmd32) { 2338 case MPSIO_READ_CFG_HEADER32: 2339 case MPSIO_READ_CFG_PAGE32: 2340 case MPSIO_WRITE_CFG_PAGE32: 2341 CP(arg.page, *page32, header); 2342 CP(arg.page, *page32, page_address); 2343 PTROUT_CP(arg.page, *page32, buf); 2344 CP(arg.page, *page32, len); 2345 CP(arg.page, *page32, ioc_status); 2346 break; 2347 2348 case MPSIO_READ_EXT_CFG_HEADER32: 2349 case MPSIO_READ_EXT_CFG_PAGE32: 2350 CP(arg.ext, *ext32, header); 2351 CP(arg.ext, *ext32, page_address); 2352 PTROUT_CP(arg.ext, *ext32, buf); 2353 CP(arg.ext, *ext32, len); 2354 CP(arg.ext, *ext32, ioc_status); 2355 break; 2356 2357 case MPSIO_RAID_ACTION32: 2358 CP(arg.raid, *raid32, action); 2359 CP(arg.raid, *raid32, volume_bus); 2360 CP(arg.raid, *raid32, volume_id); 2361 CP(arg.raid, *raid32, phys_disk_num); 2362 CP(arg.raid, *raid32, action_data_word); 2363 PTROUT_CP(arg.raid, *raid32, buf); 2364 CP(arg.raid, *raid32, len); 2365 CP(arg.raid, *raid32, volume_status); 2366 bcopy(arg.raid.action_data, raid32->action_data, 2367 sizeof arg.raid.action_data); 2368 CP(arg.raid, *raid32, ioc_status); 2369 CP(arg.raid, *raid32, write); 2370 break; 2371 2372 case MPSIO_MPS_COMMAND32: 2373 PTROUT_CP(arg.user, *user32, req); 2374 CP(arg.user, *user32, req_len); 2375 PTROUT_CP(arg.user, *user32, rpl); 2376 CP(arg.user, *user32, rpl_len); 2377 PTROUT_CP(arg.user, *user32, buf); 2378 CP(arg.user, *user32, len); 2379 CP(arg.user, *user32, flags); 2380 break; 2381 } 2382 } 2383 2384 return (error); 2385 } 2386 #endif /* COMPAT_FREEBSD32 */ 2387 2388 static int 2389 mps_ioctl_devsw(struct dev_ioctl_args *ap) 2390 { 2391 cdev_t dev = ap->a_head.a_dev; 2392 u_long com = ap->a_cmd; 2393 caddr_t arg = ap->a_data; 2394 int flag = ap->a_fflag; 2395 2396 #ifdef COMPAT_FREEBSD32 2397 if (SV_CURPROC_FLAG(SV_ILP32)) 2398 return (mps_ioctl32(dev, com, arg, flag, td)); 2399 #endif 2400 return (mps_ioctl(dev, com, arg, flag)); 2401 } 2402