1 /* $OpenBSD: mfii.c,v 1.89 2023/07/06 10:17:43 visa Exp $ */ 2 3 /* 4 * Copyright (c) 2012 David Gwynne <dlg@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "bio.h" 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/malloc.h> 24 #include <sys/device.h> 25 #include <sys/dkio.h> 26 #include <sys/pool.h> 27 #include <sys/task.h> 28 #include <sys/atomic.h> 29 #include <sys/sensors.h> 30 #include <sys/rwlock.h> 31 #include <sys/syslog.h> 32 #include <sys/smr.h> 33 34 #include <dev/biovar.h> 35 #include <dev/pci/pcidevs.h> 36 #include <dev/pci/pcivar.h> 37 38 #include <machine/bus.h> 39 40 #include <scsi/scsi_all.h> 41 #include <scsi/scsi_disk.h> 42 #include <scsi/scsiconf.h> 43 44 #include <dev/ic/mfireg.h> 45 #include <dev/pci/mpiireg.h> 46 47 #define MFII_BAR 0x14 48 #define MFII_BAR_35 0x10 49 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */ 50 51 #define MFII_OSTS_INTR_VALID 0x00000009 52 #define MFII_RPI 0x6c /* reply post host index */ 53 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */ 54 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */ 55 56 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO 57 #define MFII_REQ_TYPE_LDIO (0x7 << 1) 58 #define MFII_REQ_TYPE_MFA (0x1 << 1) 59 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1) 60 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1) 61 62 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA) 63 64 #define MFII_FUNCTION_PASSTHRU_IO (0xf0) 65 #define MFII_FUNCTION_LDIO_REQUEST (0xf1) 66 67 #define MFII_MAX_CHAIN_UNIT 0x00400000 68 #define MFII_MAX_CHAIN_MASK 0x000003E0 69 #define MFII_MAX_CHAIN_SHIFT 5 70 71 #define MFII_256K_IO 128 72 #define MFII_1MB_IO (MFII_256K_IO * 4) 73 74 #define MFII_CHAIN_FRAME_MIN 1024 75 76 struct mfii_request_descr { 77 u_int8_t flags; 78 u_int8_t msix_index; 79 u_int16_t smid; 80 81 u_int16_t lmid; 82 u_int16_t dev_handle; 83 } __packed; 84 85 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4) 86 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4) 87 88 struct mfii_raid_context { 89 u_int8_t type_nseg; 90 u_int8_t _reserved1; 91 u_int16_t timeout_value; 92 93 u_int16_t reg_lock_flags; 94 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08) 95 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00) 96 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10) 97 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80) 98 99 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4) 100 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0 101 u_int16_t virtual_disk_target_id; 102 103 u_int64_t reg_lock_row_lba; 104 105 u_int32_t reg_lock_length; 106 107 u_int16_t next_lm_id; 108 u_int8_t ex_status; 109 u_int8_t status; 110 111 u_int8_t raid_flags; 112 u_int8_t num_sge; 113 u_int16_t config_seq_num; 114 115 u_int8_t span_arm; 116 u_int8_t _reserved3[3]; 117 } __packed; 118 119 struct mfii_sge { 120 u_int64_t sg_addr; 121 u_int32_t sg_len; 122 u_int16_t _reserved; 123 u_int8_t sg_next_chain_offset; 124 u_int8_t sg_flags; 125 } __packed; 126 127 #define MFII_SGE_ADDR_MASK (0x03) 128 #define MFII_SGE_ADDR_SYSTEM (0x00) 129 #define MFII_SGE_ADDR_IOCDDR (0x01) 130 #define MFII_SGE_ADDR_IOCPLB (0x02) 131 #define MFII_SGE_ADDR_IOCPLBNTA (0x03) 132 #define MFII_SGE_END_OF_LIST (0x40) 133 #define MFII_SGE_CHAIN_ELEMENT (0x80) 134 135 #define MFII_REQUEST_SIZE 256 136 137 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 138 139 #define MFII_MAX_ROW 32 140 #define MFII_MAX_ARRAY 128 141 142 struct mfii_array_map { 143 uint16_t mam_pd[MFII_MAX_ROW]; 144 } __packed; 145 146 struct mfii_dev_handle { 147 uint16_t mdh_cur_handle; 148 uint8_t mdh_valid; 149 uint8_t mdh_reserved; 150 uint16_t mdh_handle[2]; 151 } __packed; 152 153 struct mfii_ld_map { 154 uint32_t mlm_total_size; 155 uint32_t mlm_reserved1[5]; 156 uint32_t mlm_num_lds; 157 uint32_t mlm_reserved2; 158 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD]; 159 uint8_t mlm_pd_timeout; 160 uint8_t mlm_reserved3[7]; 161 struct mfii_array_map mlm_am[MFII_MAX_ARRAY]; 162 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD]; 163 } __packed; 164 165 struct mfii_task_mgmt { 166 union { 167 uint8_t request[128]; 168 struct mpii_msg_scsi_task_request 169 mpii_request; 170 } __packed __aligned(8); 171 172 union { 173 uint8_t reply[128]; 174 uint32_t flags; 175 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0) 176 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1) 177 struct mpii_msg_scsi_task_reply 178 mpii_reply; 179 } __packed __aligned(8); 180 } __packed __aligned(8); 181 182 struct mfii_dmamem { 183 bus_dmamap_t mdm_map; 184 bus_dma_segment_t mdm_seg; 185 size_t mdm_size; 186 caddr_t mdm_kva; 187 }; 188 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map) 189 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size) 190 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr) 191 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva) 192 193 struct mfii_softc; 194 195 struct mfii_ccb { 196 void *ccb_request; 197 u_int64_t ccb_request_dva; 198 bus_addr_t ccb_request_offset; 199 200 void *ccb_mfi; 201 u_int64_t ccb_mfi_dva; 202 bus_addr_t ccb_mfi_offset; 203 204 struct mfi_sense *ccb_sense; 205 u_int64_t ccb_sense_dva; 206 bus_addr_t ccb_sense_offset; 207 208 struct mfii_sge *ccb_sgl; 209 u_int64_t ccb_sgl_dva; 210 bus_addr_t ccb_sgl_offset; 211 u_int ccb_sgl_len; 212 213 struct mfii_request_descr ccb_req; 214 215 bus_dmamap_t ccb_dmamap; 216 217 /* data for sgl */ 218 void *ccb_data; 219 size_t ccb_len; 220 221 int ccb_direction; 222 #define MFII_DATA_NONE 0 223 #define MFII_DATA_IN 1 224 #define MFII_DATA_OUT 2 225 226 void *ccb_cookie; 227 void (*ccb_done)(struct mfii_softc *, 228 struct mfii_ccb *); 229 230 u_int32_t ccb_flags; 231 #define MFI_CCB_F_ERR (1<<0) 232 u_int ccb_smid; 233 u_int ccb_refcnt; 234 SIMPLEQ_ENTRY(mfii_ccb) ccb_link; 235 }; 236 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb); 237 238 struct mfii_pd_dev_handles { 239 struct smr_entry pd_smr; 240 uint16_t pd_handles[MFI_MAX_PD]; 241 }; 242 243 struct mfii_pd_softc { 244 struct scsibus_softc *pd_scsibus; 245 struct mfii_pd_dev_handles *pd_dev_handles; 246 uint8_t pd_timeout; 247 }; 248 249 struct mfii_iop { 250 int bar; 251 int num_sge_loc; 252 #define MFII_IOP_NUM_SGE_LOC_ORIG 0 253 #define MFII_IOP_NUM_SGE_LOC_35 1 254 u_int16_t ldio_ctx_reg_lock_flags; 255 u_int8_t ldio_req_type; 256 u_int8_t ldio_ctx_type_nseg; 257 u_int8_t sge_flag_chain; 258 u_int8_t sge_flag_eol; 259 }; 260 261 struct mfii_softc { 262 struct device sc_dev; 263 const struct mfii_iop *sc_iop; 264 265 pci_chipset_tag_t sc_pc; 266 pcitag_t sc_tag; 267 268 bus_space_tag_t sc_iot; 269 bus_space_handle_t sc_ioh; 270 bus_size_t sc_ios; 271 bus_dma_tag_t sc_dmat; 272 273 void *sc_ih; 274 275 struct mutex sc_ccb_mtx; 276 struct mutex sc_post_mtx; 277 278 u_int sc_max_fw_cmds; 279 u_int sc_max_cmds; 280 u_int sc_max_sgl; 281 282 u_int sc_reply_postq_depth; 283 u_int sc_reply_postq_index; 284 struct mutex sc_reply_postq_mtx; 285 struct mfii_dmamem *sc_reply_postq; 286 287 struct mfii_dmamem *sc_requests; 288 struct mfii_dmamem *sc_mfi; 289 struct mfii_dmamem *sc_sense; 290 struct mfii_dmamem *sc_sgl; 291 292 struct mfii_ccb *sc_ccb; 293 struct mfii_ccb_list sc_ccb_freeq; 294 295 struct mfii_ccb *sc_aen_ccb; 296 struct task sc_aen_task; 297 298 struct mutex sc_abort_mtx; 299 struct mfii_ccb_list sc_abort_list; 300 struct task sc_abort_task; 301 302 struct scsibus_softc *sc_scsibus; 303 struct mfii_pd_softc *sc_pd; 304 struct scsi_iopool sc_iopool; 305 306 /* save some useful information for logical drives that is missing 307 * in sc_ld_list 308 */ 309 struct { 310 char ld_dev[16]; /* device name sd? */ 311 } sc_ld[MFI_MAX_LD]; 312 int sc_target_lds[MFI_MAX_LD]; 313 314 /* scsi ioctl from sd device */ 315 int (*sc_ioctl)(struct device *, u_long, caddr_t); 316 317 /* bio */ 318 struct mfi_conf *sc_cfg; 319 struct mfi_ctrl_info sc_info; 320 struct mfi_ld_list sc_ld_list; 321 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */ 322 int sc_no_pd; /* used physical disks */ 323 int sc_ld_sz; /* sizeof sc_ld_details */ 324 325 /* mgmt lock */ 326 struct rwlock sc_lock; 327 328 /* sensors */ 329 struct ksensordev sc_sensordev; 330 struct ksensor *sc_bbu; 331 struct ksensor *sc_bbu_status; 332 struct ksensor *sc_sensors; 333 }; 334 335 #ifdef MFII_DEBUG 336 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0) 337 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0) 338 #define MFII_D_CMD 0x0001 339 #define MFII_D_INTR 0x0002 340 #define MFII_D_MISC 0x0004 341 #define MFII_D_DMA 0x0008 342 #define MFII_D_IOCTL 0x0010 343 #define MFII_D_RW 0x0020 344 #define MFII_D_MEM 0x0040 345 #define MFII_D_CCB 0x0080 346 uint32_t mfii_debug = 0 347 /* | MFII_D_CMD */ 348 /* | MFII_D_INTR */ 349 | MFII_D_MISC 350 /* | MFII_D_DMA */ 351 /* | MFII_D_IOCTL */ 352 /* | MFII_D_RW */ 353 /* | MFII_D_MEM */ 354 /* | MFII_D_CCB */ 355 ; 356 #else 357 #define DPRINTF(x...) 358 #define DNPRINTF(n,x...) 359 #endif 360 361 int mfii_match(struct device *, void *, void *); 362 void mfii_attach(struct device *, struct device *, void *); 363 int mfii_detach(struct device *, int); 364 int mfii_activate(struct device *, int); 365 366 const struct cfattach mfii_ca = { 367 sizeof(struct mfii_softc), 368 mfii_match, 369 mfii_attach, 370 mfii_detach, 371 mfii_activate, 372 }; 373 374 struct cfdriver mfii_cd = { 375 NULL, 376 "mfii", 377 DV_DULL 378 }; 379 380 void mfii_scsi_cmd(struct scsi_xfer *); 381 void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *); 382 int mfii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int); 383 int mfii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *); 384 385 const struct scsi_adapter mfii_switch = { 386 mfii_scsi_cmd, NULL, NULL, NULL, mfii_scsi_ioctl 387 }; 388 389 void mfii_pd_scsi_cmd(struct scsi_xfer *); 390 int mfii_pd_scsi_probe(struct scsi_link *); 391 392 const struct scsi_adapter mfii_pd_switch = { 393 mfii_pd_scsi_cmd, NULL, mfii_pd_scsi_probe, NULL, NULL, 394 }; 395 396 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname) 397 398 u_int32_t mfii_read(struct mfii_softc *, bus_size_t); 399 void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t); 400 401 struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t); 402 void mfii_dmamem_free(struct mfii_softc *, 403 struct mfii_dmamem *); 404 405 void * mfii_get_ccb(void *); 406 void mfii_put_ccb(void *, void *); 407 int mfii_init_ccb(struct mfii_softc *); 408 void mfii_scrub_ccb(struct mfii_ccb *); 409 410 int mfii_reset_hard(struct mfii_softc *); 411 int mfii_transition_firmware(struct mfii_softc *); 412 int mfii_initialise_firmware(struct mfii_softc *); 413 int mfii_get_info(struct mfii_softc *); 414 int mfii_syspd(struct mfii_softc *); 415 416 void mfii_start(struct mfii_softc *, struct mfii_ccb *); 417 void mfii_done(struct mfii_softc *, struct mfii_ccb *); 418 int mfii_poll(struct mfii_softc *, struct mfii_ccb *); 419 void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *); 420 int mfii_exec(struct mfii_softc *, struct mfii_ccb *); 421 void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *); 422 int mfii_my_intr(struct mfii_softc *); 423 int mfii_intr(void *); 424 void mfii_postq(struct mfii_softc *); 425 426 int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *, 427 void *, int); 428 int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *, 429 void *, int); 430 431 int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *); 432 433 int mfii_mgmt(struct mfii_softc *, uint32_t, 434 const union mfi_mbox *, void *, size_t, int); 435 int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *, 436 uint32_t, const union mfi_mbox *, void *, size_t, 437 int); 438 void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *); 439 440 int mfii_scsi_cmd_io(struct mfii_softc *, 441 struct scsi_xfer *); 442 int mfii_scsi_cmd_cdb(struct mfii_softc *, 443 struct scsi_xfer *); 444 int mfii_pd_scsi_cmd_cdb(struct mfii_softc *, 445 struct scsi_xfer *); 446 void mfii_scsi_cmd_tmo(void *); 447 448 int mfii_dev_handles_update(struct mfii_softc *sc); 449 void mfii_dev_handles_smr(void *pd_arg); 450 451 void mfii_abort_task(void *); 452 void mfii_abort(struct mfii_softc *, struct mfii_ccb *, 453 uint16_t, uint16_t, uint8_t, uint32_t); 454 void mfii_scsi_cmd_abort_done(struct mfii_softc *, 455 struct mfii_ccb *); 456 457 int mfii_aen_register(struct mfii_softc *); 458 void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *, 459 struct mfii_dmamem *, uint32_t); 460 void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *); 461 void mfii_aen(void *); 462 void mfii_aen_unregister(struct mfii_softc *); 463 464 void mfii_aen_pd_insert(struct mfii_softc *, 465 const struct mfi_evtarg_pd_address *); 466 void mfii_aen_pd_remove(struct mfii_softc *, 467 const struct mfi_evtarg_pd_address *); 468 void mfii_aen_pd_state_change(struct mfii_softc *, 469 const struct mfi_evtarg_pd_state *); 470 void mfii_aen_ld_update(struct mfii_softc *); 471 472 #if NBIO > 0 473 int mfii_ioctl(struct device *, u_long, caddr_t); 474 int mfii_bio_getitall(struct mfii_softc *); 475 int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *); 476 int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *); 477 int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *); 478 int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *); 479 int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *); 480 int mfii_ioctl_setstate(struct mfii_softc *, 481 struct bioc_setstate *); 482 int mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *); 483 int mfii_bio_hs(struct mfii_softc *, int, int, void *); 484 485 #ifndef SMALL_KERNEL 486 static const char *mfi_bbu_indicators[] = { 487 "pack missing", 488 "voltage low", 489 "temp high", 490 "charge active", 491 "discharge active", 492 "learn cycle req'd", 493 "learn cycle active", 494 "learn cycle failed", 495 "learn cycle timeout", 496 "I2C errors", 497 "replace pack", 498 "low capacity", 499 "periodic learn req'd" 500 }; 501 502 void mfii_init_ld_sensor(struct mfii_softc *, int); 503 void mfii_refresh_ld_sensor(struct mfii_softc *, int); 504 int mfii_create_sensors(struct mfii_softc *); 505 void mfii_refresh_sensors(void *); 506 void mfii_bbu(struct mfii_softc *); 507 #endif /* SMALL_KERNEL */ 508 #endif /* NBIO > 0 */ 509 510 /* 511 * mfii boards support asynchronous (and non-polled) completion of 512 * dcmds by proxying them through a passthru mpii command that points 513 * at a dcmd frame. since the passthru command is submitted like 514 * the scsi commands using an SMID in the request descriptor, 515 * ccb_request memory * must contain the passthru command because 516 * that is what the SMID refers to. this means ccb_request cannot 517 * contain the dcmd. rather than allocating separate dma memory to 518 * hold the dcmd, we reuse the sense memory buffer for it. 519 */ 520 521 void mfii_dcmd_start(struct mfii_softc *, 522 struct mfii_ccb *); 523 524 static inline void 525 mfii_dcmd_scrub(struct mfii_ccb *ccb) 526 { 527 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense)); 528 } 529 530 static inline struct mfi_dcmd_frame * 531 mfii_dcmd_frame(struct mfii_ccb *ccb) 532 { 533 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense)); 534 return ((struct mfi_dcmd_frame *)ccb->ccb_sense); 535 } 536 537 static inline void 538 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags) 539 { 540 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense), 541 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags); 542 } 543 544 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP) 545 546 const struct mfii_iop mfii_iop_thunderbolt = { 547 MFII_BAR, 548 MFII_IOP_NUM_SGE_LOC_ORIG, 549 0, 550 MFII_REQ_TYPE_LDIO, 551 0, 552 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA, 553 0 554 }; 555 556 /* 557 * a lot of these values depend on us not implementing fastpath yet. 558 */ 559 const struct mfii_iop mfii_iop_25 = { 560 MFII_BAR, 561 MFII_IOP_NUM_SGE_LOC_ORIG, 562 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */ 563 MFII_REQ_TYPE_NO_LOCK, 564 MFII_RAID_CTX_TYPE_CUDA | 0x1, 565 MFII_SGE_CHAIN_ELEMENT, 566 MFII_SGE_END_OF_LIST 567 }; 568 569 const struct mfii_iop mfii_iop_35 = { 570 MFII_BAR_35, 571 MFII_IOP_NUM_SGE_LOC_35, 572 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */ 573 MFII_REQ_TYPE_NO_LOCK, 574 MFII_RAID_CTX_TYPE_CUDA | 0x1, 575 MFII_SGE_CHAIN_ELEMENT, 576 MFII_SGE_END_OF_LIST 577 }; 578 579 struct mfii_device { 580 pcireg_t mpd_vendor; 581 pcireg_t mpd_product; 582 const struct mfii_iop *mpd_iop; 583 }; 584 585 const struct mfii_device mfii_devices[] = { 586 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208, 587 &mfii_iop_thunderbolt }, 588 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008, 589 &mfii_iop_25 }, 590 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108, 591 &mfii_iop_25 }, 592 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404, 593 &mfii_iop_35 }, 594 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504, 595 &mfii_iop_35 }, 596 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408, 597 &mfii_iop_35 }, 598 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508, 599 &mfii_iop_35 }, 600 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416, 601 &mfii_iop_35 }, 602 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516, 603 &mfii_iop_35 } 604 }; 605 606 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *); 607 608 const struct mfii_iop * 609 mfii_find_iop(struct pci_attach_args *pa) 610 { 611 const struct mfii_device *mpd; 612 int i; 613 614 for (i = 0; i < nitems(mfii_devices); i++) { 615 mpd = &mfii_devices[i]; 616 617 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) && 618 mpd->mpd_product == PCI_PRODUCT(pa->pa_id)) 619 return (mpd->mpd_iop); 620 } 621 622 return (NULL); 623 } 624 625 int 626 mfii_match(struct device *parent, void *match, void *aux) 627 { 628 return ((mfii_find_iop(aux) != NULL) ? 1 : 0); 629 } 630 631 void 632 mfii_attach(struct device *parent, struct device *self, void *aux) 633 { 634 struct mfii_softc *sc = (struct mfii_softc *)self; 635 struct pci_attach_args *pa = aux; 636 pcireg_t memtype; 637 pci_intr_handle_t ih; 638 struct scsibus_attach_args saa; 639 u_int32_t status, scpad2, scpad3; 640 int chain_frame_sz, nsge_in_io, nsge_in_chain, i; 641 642 /* init sc */ 643 sc->sc_iop = mfii_find_iop(aux); 644 sc->sc_dmat = pa->pa_dmat; 645 SIMPLEQ_INIT(&sc->sc_ccb_freeq); 646 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 647 mtx_init(&sc->sc_post_mtx, IPL_BIO); 648 mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO); 649 scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb); 650 651 rw_init(&sc->sc_lock, "mfii_lock"); 652 653 sc->sc_aen_ccb = NULL; 654 task_set(&sc->sc_aen_task, mfii_aen, sc); 655 656 mtx_init(&sc->sc_abort_mtx, IPL_BIO); 657 SIMPLEQ_INIT(&sc->sc_abort_list); 658 task_set(&sc->sc_abort_task, mfii_abort_task, sc); 659 660 /* wire up the bus shizz */ 661 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar); 662 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0, 663 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) { 664 printf(": unable to map registers\n"); 665 return; 666 } 667 668 /* disable interrupts */ 669 mfii_write(sc, MFI_OMSK, 0xffffffff); 670 671 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 672 printf(": unable to map interrupt\n"); 673 goto pci_unmap; 674 } 675 printf(": %s\n", pci_intr_string(pa->pa_pc, ih)); 676 677 /* lets get started */ 678 if (mfii_transition_firmware(sc)) 679 goto pci_unmap; 680 681 /* determine max_cmds (refer to the Linux megaraid_sas driver) */ 682 scpad3 = mfii_read(sc, MFII_OSP3); 683 status = mfii_fw_state(sc); 684 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK; 685 if (sc->sc_max_fw_cmds == 0) 686 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK; 687 /* 688 * reduce max_cmds by 1 to ensure that the reply queue depth does not 689 * exceed FW supplied max_fw_cmds. 690 */ 691 sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1; 692 693 /* determine max_sgl (refer to the Linux megaraid_sas driver) */ 694 scpad2 = mfii_read(sc, MFII_OSP2); 695 chain_frame_sz = 696 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) * 697 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO); 698 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN) 699 chain_frame_sz = MFII_CHAIN_FRAME_MIN; 700 701 nsge_in_io = (MFII_REQUEST_SIZE - 702 sizeof(struct mpii_msg_scsi_io) - 703 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge); 704 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge); 705 706 /* round down to nearest power of two */ 707 sc->sc_max_sgl = 1; 708 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain)) 709 sc->sc_max_sgl <<= 1; 710 711 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n", 712 DEVNAME(sc), status, scpad2, scpad3); 713 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n", 714 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds); 715 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, " 716 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain, 717 sc->sc_max_sgl); 718 719 /* sense memory */ 720 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE); 721 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE); 722 if (sc->sc_sense == NULL) { 723 printf("%s: unable to allocate sense memory\n", DEVNAME(sc)); 724 goto pci_unmap; 725 } 726 727 /* reply post queue */ 728 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16); 729 730 sc->sc_reply_postq = mfii_dmamem_alloc(sc, 731 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr)); 732 if (sc->sc_reply_postq == NULL) 733 goto free_sense; 734 735 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff, 736 MFII_DMA_LEN(sc->sc_reply_postq)); 737 738 /* MPII request frame array */ 739 sc->sc_requests = mfii_dmamem_alloc(sc, 740 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1)); 741 if (sc->sc_requests == NULL) 742 goto free_reply_postq; 743 744 /* MFI command frame array */ 745 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE); 746 if (sc->sc_mfi == NULL) 747 goto free_requests; 748 749 /* MPII SGL array */ 750 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds * 751 sizeof(struct mfii_sge) * sc->sc_max_sgl); 752 if (sc->sc_sgl == NULL) 753 goto free_mfi; 754 755 if (mfii_init_ccb(sc) != 0) { 756 printf("%s: could not init ccb list\n", DEVNAME(sc)); 757 goto free_sgl; 758 } 759 760 /* kickstart firmware with all addresses and pointers */ 761 if (mfii_initialise_firmware(sc) != 0) { 762 printf("%s: could not initialize firmware\n", DEVNAME(sc)); 763 goto free_sgl; 764 } 765 766 if (mfii_get_info(sc) != 0) { 767 printf("%s: could not retrieve controller information\n", 768 DEVNAME(sc)); 769 goto free_sgl; 770 } 771 772 printf("%s: \"%s\", firmware %s", DEVNAME(sc), 773 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version); 774 if (letoh16(sc->sc_info.mci_memory_size) > 0) 775 printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size)); 776 printf("\n"); 777 778 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO, 779 mfii_intr, sc, DEVNAME(sc)); 780 if (sc->sc_ih == NULL) 781 goto free_sgl; 782 783 saa.saa_adapter_softc = sc; 784 saa.saa_adapter = &mfii_switch; 785 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET; 786 saa.saa_adapter_buswidth = sc->sc_info.mci_max_lds; 787 saa.saa_luns = 8; 788 saa.saa_openings = sc->sc_max_cmds; 789 saa.saa_pool = &sc->sc_iopool; 790 saa.saa_quirks = saa.saa_flags = 0; 791 saa.saa_wwpn = saa.saa_wwnn = 0; 792 793 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, &saa, 794 scsiprint); 795 796 mfii_syspd(sc); 797 798 if (mfii_aen_register(sc) != 0) { 799 /* error printed by mfii_aen_register */ 800 goto intr_disestablish; 801 } 802 803 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list, 804 sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) { 805 printf("%s: getting list of logical disks failed\n", DEVNAME(sc)); 806 goto intr_disestablish; 807 } 808 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds)); 809 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) { 810 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target; 811 sc->sc_target_lds[target] = i; 812 } 813 814 /* enable interrupts */ 815 mfii_write(sc, MFI_OSTS, 0xffffffff); 816 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID); 817 818 #if NBIO > 0 819 if (bio_register(&sc->sc_dev, mfii_ioctl) != 0) 820 panic("%s: controller registration failed", DEVNAME(sc)); 821 else 822 sc->sc_ioctl = mfii_ioctl; 823 824 #ifndef SMALL_KERNEL 825 if (mfii_create_sensors(sc) != 0) 826 printf("%s: unable to create sensors\n", DEVNAME(sc)); 827 #endif 828 #endif /* NBIO > 0 */ 829 830 return; 831 intr_disestablish: 832 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 833 free_sgl: 834 mfii_dmamem_free(sc, sc->sc_sgl); 835 free_mfi: 836 mfii_dmamem_free(sc, sc->sc_mfi); 837 free_requests: 838 mfii_dmamem_free(sc, sc->sc_requests); 839 free_reply_postq: 840 mfii_dmamem_free(sc, sc->sc_reply_postq); 841 free_sense: 842 mfii_dmamem_free(sc, sc->sc_sense); 843 pci_unmap: 844 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 845 } 846 847 static inline uint16_t 848 mfii_dev_handle(struct mfii_softc *sc, uint16_t target) 849 { 850 struct mfii_pd_dev_handles *handles; 851 uint16_t handle; 852 853 smr_read_enter(); 854 handles = SMR_PTR_GET(&sc->sc_pd->pd_dev_handles); 855 handle = handles->pd_handles[target]; 856 smr_read_leave(); 857 858 return (handle); 859 } 860 861 void 862 mfii_dev_handles_smr(void *pd_arg) 863 { 864 struct mfii_pd_dev_handles *handles = pd_arg; 865 866 free(handles, M_DEVBUF, sizeof(*handles)); 867 } 868 869 int 870 mfii_dev_handles_update(struct mfii_softc *sc) 871 { 872 struct mfii_ld_map *lm; 873 struct mfii_pd_dev_handles *handles, *old_handles; 874 int i; 875 int rv = 0; 876 877 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO); 878 879 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm), 880 SCSI_DATA_IN|SCSI_NOSLEEP); 881 882 if (rv != 0) { 883 rv = EIO; 884 goto free_lm; 885 } 886 887 handles = malloc(sizeof(*handles), M_DEVBUF, M_WAITOK); 888 smr_init(&handles->pd_smr); 889 for (i = 0; i < MFI_MAX_PD; i++) 890 handles->pd_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle; 891 892 /* commit the updated info */ 893 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout; 894 old_handles = SMR_PTR_GET_LOCKED(&sc->sc_pd->pd_dev_handles); 895 SMR_PTR_SET_LOCKED(&sc->sc_pd->pd_dev_handles, handles); 896 897 if (old_handles != NULL) 898 smr_call(&old_handles->pd_smr, mfii_dev_handles_smr, old_handles); 899 900 free_lm: 901 free(lm, M_TEMP, sizeof(*lm)); 902 903 return (rv); 904 } 905 906 int 907 mfii_syspd(struct mfii_softc *sc) 908 { 909 struct scsibus_attach_args saa; 910 911 sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO); 912 if (sc->sc_pd == NULL) 913 return (1); 914 915 if (mfii_dev_handles_update(sc) != 0) 916 goto free_pdsc; 917 918 saa.saa_adapter = &mfii_pd_switch; 919 saa.saa_adapter_softc = sc; 920 saa.saa_adapter_buswidth = MFI_MAX_PD; 921 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET; 922 saa.saa_luns = 8; 923 saa.saa_openings = sc->sc_max_cmds - 1; 924 saa.saa_pool = &sc->sc_iopool; 925 saa.saa_quirks = saa.saa_flags = 0; 926 saa.saa_wwpn = saa.saa_wwnn = 0; 927 928 sc->sc_pd->pd_scsibus = (struct scsibus_softc *) 929 config_found(&sc->sc_dev, &saa, scsiprint); 930 931 return (0); 932 933 free_pdsc: 934 free(sc->sc_pd, M_DEVBUF, sizeof(*sc->sc_pd)); 935 return (1); 936 } 937 938 int 939 mfii_detach(struct device *self, int flags) 940 { 941 struct mfii_softc *sc = (struct mfii_softc *)self; 942 943 if (sc->sc_ih == NULL) 944 return (0); 945 946 #ifndef SMALL_KERNEL 947 if (sc->sc_sensors) { 948 sensordev_deinstall(&sc->sc_sensordev); 949 free(sc->sc_sensors, M_DEVBUF, 950 MFI_MAX_LD * sizeof(struct ksensor)); 951 } 952 953 if (sc->sc_bbu) { 954 free(sc->sc_bbu, M_DEVBUF, 4 * sizeof(*sc->sc_bbu)); 955 } 956 957 if (sc->sc_bbu_status) { 958 free(sc->sc_bbu_status, M_DEVBUF, 959 sizeof(*sc->sc_bbu_status) * sizeof(mfi_bbu_indicators)); 960 } 961 #endif /* SMALL_KERNEL */ 962 963 mfii_aen_unregister(sc); 964 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 965 mfii_dmamem_free(sc, sc->sc_sgl); 966 mfii_dmamem_free(sc, sc->sc_mfi); 967 mfii_dmamem_free(sc, sc->sc_requests); 968 mfii_dmamem_free(sc, sc->sc_reply_postq); 969 mfii_dmamem_free(sc, sc->sc_sense); 970 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 971 972 return (0); 973 } 974 975 static void 976 mfii_flush_cache(struct mfii_softc *sc, struct mfii_ccb *ccb) 977 { 978 #if 0 979 union mfi_mbox mbox = { 980 .b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE, 981 }; 982 int rv; 983 984 mfii_scrub_ccb(ccb); 985 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox, 986 NULL, 0, SCSI_NOSLEEP); 987 if (rv != 0) { 988 printf("%s: unable to flush cache\n", DEVNAME(sc)); 989 return; 990 } 991 #endif 992 } 993 994 static void 995 mfii_shutdown(struct mfii_softc *sc, struct mfii_ccb *ccb) 996 { 997 #if 0 998 int rv; 999 1000 mfii_scrub_ccb(ccb); 1001 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, NULL, 1002 NULL, 0, SCSI_POLL); 1003 if (rv != 0) { 1004 printf("%s: unable to shutdown controller\n", DEVNAME(sc)); 1005 return; 1006 } 1007 #endif 1008 } 1009 1010 static void 1011 mfii_powerdown(struct mfii_softc *sc) 1012 { 1013 struct mfii_ccb *ccb; 1014 1015 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 1016 if (ccb == NULL) { 1017 printf("%s: unable to allocate ccb for shutdown\n", 1018 DEVNAME(sc)); 1019 return; 1020 } 1021 1022 mfii_flush_cache(sc, ccb); 1023 mfii_shutdown(sc, ccb); 1024 scsi_io_put(&sc->sc_iopool, ccb); 1025 } 1026 1027 int 1028 mfii_activate(struct device *self, int act) 1029 { 1030 struct mfii_softc *sc = (struct mfii_softc *)self; 1031 int rv; 1032 1033 switch (act) { 1034 case DVACT_POWERDOWN: 1035 rv = config_activate_children(&sc->sc_dev, act); 1036 mfii_powerdown(sc); 1037 break; 1038 default: 1039 rv = config_activate_children(&sc->sc_dev, act); 1040 break; 1041 } 1042 1043 return (rv); 1044 } 1045 1046 u_int32_t 1047 mfii_read(struct mfii_softc *sc, bus_size_t r) 1048 { 1049 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1050 BUS_SPACE_BARRIER_READ); 1051 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r)); 1052 } 1053 1054 void 1055 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v) 1056 { 1057 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 1058 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1059 BUS_SPACE_BARRIER_WRITE); 1060 } 1061 1062 struct mfii_dmamem * 1063 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size) 1064 { 1065 struct mfii_dmamem *m; 1066 int nsegs; 1067 1068 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 1069 if (m == NULL) 1070 return (NULL); 1071 1072 m->mdm_size = size; 1073 1074 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1075 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0) 1076 goto mdmfree; 1077 1078 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1, 1079 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 1080 goto destroy; 1081 1082 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva, 1083 BUS_DMA_NOWAIT) != 0) 1084 goto free; 1085 1086 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL, 1087 BUS_DMA_NOWAIT) != 0) 1088 goto unmap; 1089 1090 return (m); 1091 1092 unmap: 1093 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size); 1094 free: 1095 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1); 1096 destroy: 1097 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map); 1098 mdmfree: 1099 free(m, M_DEVBUF, sizeof *m); 1100 1101 return (NULL); 1102 } 1103 1104 void 1105 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m) 1106 { 1107 bus_dmamap_unload(sc->sc_dmat, m->mdm_map); 1108 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size); 1109 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1); 1110 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map); 1111 free(m, M_DEVBUF, sizeof *m); 1112 } 1113 1114 void 1115 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb) 1116 { 1117 struct mpii_msg_scsi_io *io = ccb->ccb_request; 1118 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1); 1119 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1); 1120 1121 io->function = MFII_FUNCTION_PASSTHRU_IO; 1122 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io; 1123 io->chain_offset = io->sgl_offset0 / 4; 1124 1125 htolem64(&sge->sg_addr, ccb->ccb_sense_dva); 1126 htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense)); 1127 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA; 1128 1129 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI; 1130 ccb->ccb_req.smid = letoh16(ccb->ccb_smid); 1131 1132 mfii_start(sc, ccb); 1133 } 1134 1135 int 1136 mfii_aen_register(struct mfii_softc *sc) 1137 { 1138 struct mfi_evt_log_info mel; 1139 struct mfii_ccb *ccb; 1140 struct mfii_dmamem *mdm; 1141 int rv; 1142 1143 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 1144 if (ccb == NULL) { 1145 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc)); 1146 return (ENOMEM); 1147 } 1148 1149 memset(&mel, 0, sizeof(mel)); 1150 mfii_scrub_ccb(ccb); 1151 1152 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL, 1153 &mel, sizeof(mel), SCSI_DATA_IN|SCSI_NOSLEEP); 1154 if (rv != 0) { 1155 scsi_io_put(&sc->sc_iopool, ccb); 1156 printf("%s: unable to get event info\n", DEVNAME(sc)); 1157 return (EIO); 1158 } 1159 1160 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail)); 1161 if (mdm == NULL) { 1162 scsi_io_put(&sc->sc_iopool, ccb); 1163 printf("%s: unable to allocate event data\n", DEVNAME(sc)); 1164 return (ENOMEM); 1165 } 1166 1167 /* replay all the events from boot */ 1168 mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num)); 1169 1170 return (0); 1171 } 1172 1173 void 1174 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb, 1175 struct mfii_dmamem *mdm, uint32_t seq) 1176 { 1177 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb); 1178 struct mfi_frame_header *hdr = &dcmd->mdf_header; 1179 union mfi_sgl *sgl = &dcmd->mdf_sgl; 1180 union mfi_evt_class_locale mec; 1181 1182 mfii_scrub_ccb(ccb); 1183 mfii_dcmd_scrub(ccb); 1184 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm)); 1185 1186 ccb->ccb_cookie = mdm; 1187 ccb->ccb_done = mfii_aen_done; 1188 sc->sc_aen_ccb = ccb; 1189 1190 mec.mec_members.class = MFI_EVT_CLASS_DEBUG; 1191 mec.mec_members.reserved = 0; 1192 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL); 1193 1194 hdr->mfh_cmd = MFI_CMD_DCMD; 1195 hdr->mfh_sg_count = 1; 1196 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64); 1197 htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm)); 1198 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT); 1199 htolem32(&dcmd->mdf_mbox.w[0], seq); 1200 htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word); 1201 htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm)); 1202 htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm)); 1203 1204 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm), 1205 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD); 1206 1207 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1208 mfii_dcmd_start(sc, ccb); 1209 } 1210 1211 void 1212 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb) 1213 { 1214 KASSERT(sc->sc_aen_ccb == ccb); 1215 1216 /* defer to a thread with KERNEL_LOCK so we can run autoconf */ 1217 task_add(systq, &sc->sc_aen_task); 1218 } 1219 1220 void 1221 mfii_aen(void *arg) 1222 { 1223 struct mfii_softc *sc = arg; 1224 struct mfii_ccb *ccb = sc->sc_aen_ccb; 1225 struct mfii_dmamem *mdm = ccb->ccb_cookie; 1226 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm); 1227 uint32_t code; 1228 1229 mfii_dcmd_sync(sc, ccb, 1230 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1231 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm), 1232 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD); 1233 1234 code = lemtoh32(&med->med_code); 1235 1236 #if 0 1237 log(LOG_DEBUG, "%s (seq %u, code %08x) %s\n", DEVNAME(sc), 1238 lemtoh32(&med->med_seq_num), code, med->med_description); 1239 #endif 1240 1241 switch (code) { 1242 case MFI_EVT_PD_INSERTED_EXT: 1243 if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS) 1244 break; 1245 1246 mfii_aen_pd_insert(sc, &med->args.pd_address); 1247 break; 1248 case MFI_EVT_PD_REMOVED_EXT: 1249 if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS) 1250 break; 1251 1252 mfii_aen_pd_remove(sc, &med->args.pd_address); 1253 break; 1254 1255 case MFI_EVT_PD_STATE_CHANGE: 1256 if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE) 1257 break; 1258 1259 mfii_aen_pd_state_change(sc, &med->args.pd_state); 1260 break; 1261 1262 case MFI_EVT_LD_CREATED: 1263 case MFI_EVT_LD_DELETED: 1264 mfii_aen_ld_update(sc); 1265 break; 1266 1267 default: 1268 break; 1269 } 1270 1271 mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num) + 1); 1272 } 1273 1274 void 1275 mfii_aen_pd_insert(struct mfii_softc *sc, 1276 const struct mfi_evtarg_pd_address *pd) 1277 { 1278 #if 0 1279 printf("%s: pd inserted ext\n", DEVNAME(sc)); 1280 printf("%s: device_id %04x encl_id: %04x type %x\n", DEVNAME(sc), 1281 lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id), 1282 pd->scsi_dev_type); 1283 printf("%s: connected %02x addrs %016llx %016llx\n", DEVNAME(sc), 1284 pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]), 1285 lemtoh64(&pd->sas_addr[1])); 1286 #endif 1287 1288 if (mfii_dev_handles_update(sc) != 0) /* refresh map */ 1289 return; 1290 1291 scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id)); 1292 } 1293 1294 void 1295 mfii_aen_pd_remove(struct mfii_softc *sc, 1296 const struct mfi_evtarg_pd_address *pd) 1297 { 1298 #if 0 1299 printf("%s: pd removed ext\n", DEVNAME(sc)); 1300 printf("%s: device_id %04x encl_id: %04x type %u\n", DEVNAME(sc), 1301 lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id), 1302 pd->scsi_dev_type); 1303 printf("%s: connected %02x addrs %016llx %016llx\n", DEVNAME(sc), 1304 pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]), 1305 lemtoh64(&pd->sas_addr[1])); 1306 #endif 1307 uint16_t target = lemtoh16(&pd->device_id); 1308 1309 scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE); 1310 1311 /* the firmware will abort outstanding commands for us */ 1312 1313 scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE); 1314 } 1315 1316 void 1317 mfii_aen_pd_state_change(struct mfii_softc *sc, 1318 const struct mfi_evtarg_pd_state *state) 1319 { 1320 uint16_t target = lemtoh16(&state->pd.mep_device_id); 1321 1322 if (state->prev_state == htole32(MFI_PD_SYSTEM) && 1323 state->new_state != htole32(MFI_PD_SYSTEM)) { 1324 /* it's been pulled or configured for raid */ 1325 1326 scsi_activate(sc->sc_pd->pd_scsibus, target, -1, 1327 DVACT_DEACTIVATE); 1328 /* outstanding commands will simply complete or get aborted */ 1329 scsi_detach_target(sc->sc_pd->pd_scsibus, target, 1330 DETACH_FORCE); 1331 1332 } else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD) && 1333 state->new_state == htole32(MFI_PD_SYSTEM)) { 1334 /* the firmware is handing the disk over */ 1335 1336 scsi_probe_target(sc->sc_pd->pd_scsibus, target); 1337 } 1338 } 1339 1340 void 1341 mfii_aen_ld_update(struct mfii_softc *sc) 1342 { 1343 int i, state, target, old, nld; 1344 int newlds[MFI_MAX_LD]; 1345 1346 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list, 1347 sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) { 1348 DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n", 1349 DEVNAME(sc)); 1350 return; 1351 } 1352 1353 memset(newlds, -1, sizeof(newlds)); 1354 1355 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) { 1356 state = sc->sc_ld_list.mll_list[i].mll_state; 1357 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target; 1358 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n", 1359 DEVNAME(sc), target, state); 1360 newlds[target] = i; 1361 } 1362 1363 for (i = 0; i < MFI_MAX_LD; i++) { 1364 old = sc->sc_target_lds[i]; 1365 nld = newlds[i]; 1366 1367 if (old == -1 && nld != -1) { 1368 DNPRINTF(MFII_D_MISC, "%s: attaching target %d\n", 1369 DEVNAME(sc), i); 1370 1371 scsi_probe_target(sc->sc_scsibus, i); 1372 1373 #ifndef SMALL_KERNEL 1374 mfii_init_ld_sensor(sc, nld); 1375 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]); 1376 #endif 1377 } else if (nld == -1 && old != -1) { 1378 DNPRINTF(MFII_D_MISC, "%s: detaching target %d\n", 1379 DEVNAME(sc), i); 1380 1381 scsi_activate(sc->sc_scsibus, i, -1, 1382 DVACT_DEACTIVATE); 1383 scsi_detach_target(sc->sc_scsibus, i, 1384 DETACH_FORCE); 1385 #ifndef SMALL_KERNEL 1386 sensor_detach(&sc->sc_sensordev, &sc->sc_sensors[i]); 1387 #endif 1388 } 1389 } 1390 1391 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds)); 1392 } 1393 1394 void 1395 mfii_aen_unregister(struct mfii_softc *sc) 1396 { 1397 /* XXX */ 1398 } 1399 1400 int 1401 mfii_reset_hard(struct mfii_softc *sc) 1402 { 1403 u_int16_t i; 1404 1405 mfii_write(sc, MFI_OSTS, 0); 1406 1407 /* enable diagnostic register */ 1408 mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH); 1409 mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1); 1410 mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2); 1411 mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3); 1412 mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4); 1413 mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5); 1414 mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6); 1415 1416 delay(100); 1417 1418 if ((mfii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) { 1419 printf("%s: failed to enable diagnostic read/write\n", 1420 DEVNAME(sc)); 1421 return(1); 1422 } 1423 1424 /* reset ioc */ 1425 mfii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER); 1426 1427 /* 240 milliseconds */ 1428 delay(240000); 1429 1430 for (i = 0; i < 30000; i++) { 1431 if ((mfii_read(sc, MPII_HOSTDIAG) & 1432 MPII_HOSTDIAG_RESET_ADAPTER) == 0) 1433 break; 1434 delay(10000); 1435 } 1436 if (i >= 30000) { 1437 printf("%s: failed to reset device\n", DEVNAME(sc)); 1438 return (1); 1439 } 1440 1441 /* disable diagnostic register */ 1442 mfii_write(sc, MPII_WRITESEQ, 0xff); 1443 1444 return(0); 1445 } 1446 1447 int 1448 mfii_transition_firmware(struct mfii_softc *sc) 1449 { 1450 int32_t fw_state, cur_state; 1451 int max_wait, i, reset_on_fault = 1; 1452 1453 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK; 1454 1455 while (fw_state != MFI_STATE_READY) { 1456 cur_state = fw_state; 1457 switch (fw_state) { 1458 case MFI_STATE_FAULT: 1459 if (!reset_on_fault) { 1460 printf("%s: firmware fault\n", DEVNAME(sc)); 1461 return (1); 1462 } 1463 printf("%s: firmware fault; attempting full device " 1464 "reset, this can take some time\n", DEVNAME(sc)); 1465 if (mfii_reset_hard(sc)) 1466 return (1); 1467 max_wait = 20; 1468 reset_on_fault = 0; 1469 break; 1470 case MFI_STATE_WAIT_HANDSHAKE: 1471 mfii_write(sc, MFI_SKINNY_IDB, 1472 MFI_INIT_CLEAR_HANDSHAKE); 1473 max_wait = 2; 1474 break; 1475 case MFI_STATE_OPERATIONAL: 1476 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY); 1477 max_wait = 10; 1478 break; 1479 case MFI_STATE_BB_INIT: 1480 max_wait = 20; 1481 break; 1482 case MFI_STATE_UNDEFINED: 1483 case MFI_STATE_FW_INIT: 1484 case MFI_STATE_FW_INIT_2: 1485 case MFI_STATE_DEVICE_SCAN: 1486 case MFI_STATE_FLUSH_CACHE: 1487 max_wait = 40; 1488 break; 1489 case MFI_STATE_BOOT_MESSAGE_PENDING: 1490 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG); 1491 max_wait = 10; 1492 break; 1493 default: 1494 printf("%s: unknown firmware state %#x\n", 1495 DEVNAME(sc), fw_state); 1496 return (1); 1497 } 1498 for (i = 0; i < (max_wait * 10); i++) { 1499 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK; 1500 if (fw_state == cur_state) 1501 DELAY(100000); 1502 else 1503 break; 1504 } 1505 if (fw_state == cur_state) { 1506 printf("%s: firmware stuck in state %#x\n", 1507 DEVNAME(sc), fw_state); 1508 return (1); 1509 } else { 1510 DPRINTF("%s: firmware state change %#x -> %#x after " 1511 "%d iterations\n", 1512 DEVNAME(sc), cur_state, fw_state, i); 1513 } 1514 } 1515 1516 return (0); 1517 } 1518 1519 int 1520 mfii_get_info(struct mfii_softc *sc) 1521 { 1522 int i, rv; 1523 1524 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info, 1525 sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP); 1526 1527 if (rv != 0) 1528 return (rv); 1529 1530 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) { 1531 DPRINTF("%s: active FW %s Version %s date %s time %s\n", 1532 DEVNAME(sc), 1533 sc->sc_info.mci_image_component[i].mic_name, 1534 sc->sc_info.mci_image_component[i].mic_version, 1535 sc->sc_info.mci_image_component[i].mic_build_date, 1536 sc->sc_info.mci_image_component[i].mic_build_time); 1537 } 1538 1539 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) { 1540 DPRINTF("%s: pending FW %s Version %s date %s time %s\n", 1541 DEVNAME(sc), 1542 sc->sc_info.mci_pending_image_component[i].mic_name, 1543 sc->sc_info.mci_pending_image_component[i].mic_version, 1544 sc->sc_info.mci_pending_image_component[i].mic_build_date, 1545 sc->sc_info.mci_pending_image_component[i].mic_build_time); 1546 } 1547 1548 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n", 1549 DEVNAME(sc), 1550 sc->sc_info.mci_max_arms, 1551 sc->sc_info.mci_max_spans, 1552 sc->sc_info.mci_max_arrays, 1553 sc->sc_info.mci_max_lds, 1554 sc->sc_info.mci_product_name); 1555 1556 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n", 1557 DEVNAME(sc), 1558 sc->sc_info.mci_serial_number, 1559 sc->sc_info.mci_hw_present, 1560 sc->sc_info.mci_current_fw_time, 1561 sc->sc_info.mci_max_cmds, 1562 sc->sc_info.mci_max_sg_elements); 1563 1564 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n", 1565 DEVNAME(sc), 1566 sc->sc_info.mci_max_request_size, 1567 sc->sc_info.mci_lds_present, 1568 sc->sc_info.mci_lds_degraded, 1569 sc->sc_info.mci_lds_offline, 1570 sc->sc_info.mci_pd_present); 1571 1572 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n", 1573 DEVNAME(sc), 1574 sc->sc_info.mci_pd_disks_present, 1575 sc->sc_info.mci_pd_disks_pred_failure, 1576 sc->sc_info.mci_pd_disks_failed); 1577 1578 DPRINTF("%s: nvram %d mem %d flash %d\n", 1579 DEVNAME(sc), 1580 sc->sc_info.mci_nvram_size, 1581 sc->sc_info.mci_memory_size, 1582 sc->sc_info.mci_flash_size); 1583 1584 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n", 1585 DEVNAME(sc), 1586 sc->sc_info.mci_ram_correctable_errors, 1587 sc->sc_info.mci_ram_uncorrectable_errors, 1588 sc->sc_info.mci_cluster_allowed, 1589 sc->sc_info.mci_cluster_active); 1590 1591 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n", 1592 DEVNAME(sc), 1593 sc->sc_info.mci_max_strips_per_io, 1594 sc->sc_info.mci_raid_levels, 1595 sc->sc_info.mci_adapter_ops, 1596 sc->sc_info.mci_ld_ops); 1597 1598 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n", 1599 DEVNAME(sc), 1600 sc->sc_info.mci_stripe_sz_ops.min, 1601 sc->sc_info.mci_stripe_sz_ops.max, 1602 sc->sc_info.mci_pd_ops, 1603 sc->sc_info.mci_pd_mix_support); 1604 1605 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n", 1606 DEVNAME(sc), 1607 sc->sc_info.mci_ecc_bucket_count, 1608 sc->sc_info.mci_package_version); 1609 1610 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n", 1611 DEVNAME(sc), 1612 sc->sc_info.mci_properties.mcp_seq_num, 1613 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval, 1614 sc->sc_info.mci_properties.mcp_intr_throttle_cnt, 1615 sc->sc_info.mci_properties.mcp_intr_throttle_timeout); 1616 1617 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n", 1618 DEVNAME(sc), 1619 sc->sc_info.mci_properties.mcp_rebuild_rate, 1620 sc->sc_info.mci_properties.mcp_patrol_read_rate, 1621 sc->sc_info.mci_properties.mcp_bgi_rate, 1622 sc->sc_info.mci_properties.mcp_cc_rate); 1623 1624 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n", 1625 DEVNAME(sc), 1626 sc->sc_info.mci_properties.mcp_recon_rate, 1627 sc->sc_info.mci_properties.mcp_cache_flush_interval, 1628 sc->sc_info.mci_properties.mcp_spinup_drv_cnt, 1629 sc->sc_info.mci_properties.mcp_spinup_delay, 1630 sc->sc_info.mci_properties.mcp_cluster_enable); 1631 1632 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n", 1633 DEVNAME(sc), 1634 sc->sc_info.mci_properties.mcp_coercion_mode, 1635 sc->sc_info.mci_properties.mcp_alarm_enable, 1636 sc->sc_info.mci_properties.mcp_disable_auto_rebuild, 1637 sc->sc_info.mci_properties.mcp_disable_battery_warn, 1638 sc->sc_info.mci_properties.mcp_ecc_bucket_size); 1639 1640 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n", 1641 DEVNAME(sc), 1642 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate, 1643 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion, 1644 sc->sc_info.mci_properties.mcp_expose_encl_devices); 1645 1646 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n", 1647 DEVNAME(sc), 1648 sc->sc_info.mci_pci.mip_vendor, 1649 sc->sc_info.mci_pci.mip_device, 1650 sc->sc_info.mci_pci.mip_subvendor, 1651 sc->sc_info.mci_pci.mip_subdevice); 1652 1653 DPRINTF("%s: type %#x port_count %d port_addr ", 1654 DEVNAME(sc), 1655 sc->sc_info.mci_host.mih_type, 1656 sc->sc_info.mci_host.mih_port_count); 1657 1658 for (i = 0; i < 8; i++) 1659 DPRINTF("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]); 1660 DPRINTF("\n"); 1661 1662 DPRINTF("%s: type %.x port_count %d port_addr ", 1663 DEVNAME(sc), 1664 sc->sc_info.mci_device.mid_type, 1665 sc->sc_info.mci_device.mid_port_count); 1666 1667 for (i = 0; i < 8; i++) 1668 DPRINTF("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]); 1669 DPRINTF("\n"); 1670 1671 return (0); 1672 } 1673 1674 int 1675 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb) 1676 { 1677 struct mfi_frame_header *hdr = ccb->ccb_request; 1678 u_int64_t r; 1679 int to = 0, rv = 0; 1680 1681 #ifdef DIAGNOSTIC 1682 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL) 1683 panic("mfii_mfa_poll called with cookie or done set"); 1684 #endif 1685 1686 hdr->mfh_context = ccb->ccb_smid; 1687 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS; 1688 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1689 1690 r = MFII_REQ_MFA(ccb->ccb_request_dva); 1691 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req)); 1692 1693 mfii_start(sc, ccb); 1694 1695 for (;;) { 1696 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests), 1697 ccb->ccb_request_offset, MFII_REQUEST_SIZE, 1698 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1699 1700 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS) 1701 break; 1702 1703 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */ 1704 printf("%s: timeout on ccb %d\n", DEVNAME(sc), 1705 ccb->ccb_smid); 1706 ccb->ccb_flags |= MFI_CCB_F_ERR; 1707 rv = 1; 1708 break; 1709 } 1710 1711 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests), 1712 ccb->ccb_request_offset, MFII_REQUEST_SIZE, 1713 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1714 1715 delay(1000); 1716 } 1717 1718 if (ccb->ccb_len > 0) { 1719 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 1720 0, ccb->ccb_dmamap->dm_mapsize, 1721 (ccb->ccb_direction == MFII_DATA_IN) ? 1722 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1723 1724 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 1725 } 1726 1727 return (rv); 1728 } 1729 1730 int 1731 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb) 1732 { 1733 void (*done)(struct mfii_softc *, struct mfii_ccb *); 1734 void *cookie; 1735 int rv = 1; 1736 1737 done = ccb->ccb_done; 1738 cookie = ccb->ccb_cookie; 1739 1740 ccb->ccb_done = mfii_poll_done; 1741 ccb->ccb_cookie = &rv; 1742 1743 mfii_start(sc, ccb); 1744 1745 do { 1746 delay(10); 1747 mfii_postq(sc); 1748 } while (rv == 1); 1749 1750 ccb->ccb_cookie = cookie; 1751 done(sc, ccb); 1752 1753 return (0); 1754 } 1755 1756 void 1757 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb) 1758 { 1759 int *rv = ccb->ccb_cookie; 1760 1761 *rv = 0; 1762 } 1763 1764 int 1765 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb) 1766 { 1767 struct mutex m; 1768 1769 mtx_init(&m, IPL_BIO); 1770 1771 #ifdef DIAGNOSTIC 1772 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL) 1773 panic("mfii_exec called with cookie or done set"); 1774 #endif 1775 1776 ccb->ccb_cookie = &m; 1777 ccb->ccb_done = mfii_exec_done; 1778 1779 mfii_start(sc, ccb); 1780 1781 mtx_enter(&m); 1782 while (ccb->ccb_cookie != NULL) 1783 msleep_nsec(ccb, &m, PRIBIO, "mfiiexec", INFSLP); 1784 mtx_leave(&m); 1785 1786 return (0); 1787 } 1788 1789 void 1790 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb) 1791 { 1792 struct mutex *m = ccb->ccb_cookie; 1793 1794 mtx_enter(m); 1795 ccb->ccb_cookie = NULL; 1796 wakeup_one(ccb); 1797 mtx_leave(m); 1798 } 1799 1800 int 1801 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox, 1802 void *buf, size_t len, int flags) 1803 { 1804 struct mfii_ccb *ccb; 1805 int rv; 1806 1807 ccb = scsi_io_get(&sc->sc_iopool, flags); 1808 if (ccb == NULL) 1809 return (ENOMEM); 1810 1811 mfii_scrub_ccb(ccb); 1812 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, flags); 1813 scsi_io_put(&sc->sc_iopool, ccb); 1814 1815 return (rv); 1816 } 1817 1818 int 1819 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc, 1820 const union mfi_mbox *mbox, void *buf, size_t len, int flags) 1821 { 1822 struct mpii_msg_scsi_io *io = ccb->ccb_request; 1823 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1); 1824 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1); 1825 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi; 1826 struct mfi_frame_header *hdr = &dcmd->mdf_header; 1827 u_int8_t *dma_buf = NULL; 1828 int rv = EIO; 1829 1830 if (cold) 1831 flags |= SCSI_NOSLEEP; 1832 1833 if (buf != NULL) { 1834 dma_buf = dma_alloc(len, PR_WAITOK); 1835 if (dma_buf == NULL) 1836 return (ENOMEM); 1837 } 1838 1839 ccb->ccb_data = dma_buf; 1840 ccb->ccb_len = len; 1841 switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1842 case SCSI_DATA_IN: 1843 ccb->ccb_direction = MFII_DATA_IN; 1844 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ); 1845 break; 1846 case SCSI_DATA_OUT: 1847 ccb->ccb_direction = MFII_DATA_OUT; 1848 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE); 1849 memcpy(dma_buf, buf, len); 1850 break; 1851 case 0: 1852 ccb->ccb_direction = MFII_DATA_NONE; 1853 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE); 1854 break; 1855 } 1856 1857 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, 1858 ISSET(flags, SCSI_NOSLEEP)) != 0) { 1859 rv = ENOMEM; 1860 goto done; 1861 } 1862 1863 hdr->mfh_cmd = MFI_CMD_DCMD; 1864 hdr->mfh_context = ccb->ccb_smid; 1865 hdr->mfh_data_len = htole32(len); 1866 hdr->mfh_sg_count = len ? ccb->ccb_dmamap->dm_nsegs : 0; 1867 1868 dcmd->mdf_opcode = opc; 1869 /* handle special opcodes */ 1870 if (mbox != NULL) 1871 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox)); 1872 1873 io->function = MFII_FUNCTION_PASSTHRU_IO; 1874 1875 if (len) { 1876 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4; 1877 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16; 1878 htolem64(&sge->sg_addr, ccb->ccb_mfi_dva); 1879 htolem32(&sge->sg_len, MFI_FRAME_SIZE); 1880 sge->sg_flags = 1881 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA; 1882 } 1883 1884 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI; 1885 ccb->ccb_req.smid = letoh16(ccb->ccb_smid); 1886 1887 if (ISSET(flags, SCSI_NOSLEEP)) { 1888 ccb->ccb_done = mfii_empty_done; 1889 mfii_poll(sc, ccb); 1890 } else 1891 mfii_exec(sc, ccb); 1892 1893 if (hdr->mfh_cmd_status == MFI_STAT_OK) { 1894 rv = 0; 1895 1896 if (ccb->ccb_direction == MFII_DATA_IN) 1897 memcpy(buf, dma_buf, len); 1898 } 1899 1900 done: 1901 if (buf != NULL) 1902 dma_free(dma_buf, len); 1903 1904 return (rv); 1905 } 1906 1907 void 1908 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb) 1909 { 1910 return; 1911 } 1912 1913 int 1914 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb, 1915 void *sglp, int nosleep) 1916 { 1917 union mfi_sgl *sgl = sglp; 1918 bus_dmamap_t dmap = ccb->ccb_dmamap; 1919 int error; 1920 int i; 1921 1922 if (ccb->ccb_len == 0) 1923 return (0); 1924 1925 error = bus_dmamap_load(sc->sc_dmat, dmap, 1926 ccb->ccb_data, ccb->ccb_len, NULL, 1927 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 1928 if (error) { 1929 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 1930 return (1); 1931 } 1932 1933 for (i = 0; i < dmap->dm_nsegs; i++) { 1934 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr); 1935 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len); 1936 } 1937 1938 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1939 ccb->ccb_direction == MFII_DATA_OUT ? 1940 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD); 1941 1942 return (0); 1943 } 1944 1945 void 1946 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb) 1947 { 1948 u_long *r = (u_long *)&ccb->ccb_req; 1949 1950 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests), 1951 ccb->ccb_request_offset, MFII_REQUEST_SIZE, 1952 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1953 1954 #if defined(__LP64__) 1955 bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r); 1956 #else 1957 mtx_enter(&sc->sc_post_mtx); 1958 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]); 1959 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 1960 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE); 1961 1962 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]); 1963 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 1964 MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE); 1965 mtx_leave(&sc->sc_post_mtx); 1966 #endif 1967 } 1968 1969 void 1970 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb) 1971 { 1972 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests), 1973 ccb->ccb_request_offset, MFII_REQUEST_SIZE, 1974 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1975 1976 if (ccb->ccb_sgl_len > 0) { 1977 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl), 1978 ccb->ccb_sgl_offset, ccb->ccb_sgl_len, 1979 BUS_DMASYNC_POSTWRITE); 1980 } 1981 1982 if (ccb->ccb_len > 0) { 1983 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 1984 0, ccb->ccb_dmamap->dm_mapsize, 1985 (ccb->ccb_direction == MFII_DATA_IN) ? 1986 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1987 1988 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 1989 } 1990 1991 ccb->ccb_done(sc, ccb); 1992 } 1993 1994 int 1995 mfii_initialise_firmware(struct mfii_softc *sc) 1996 { 1997 struct mpii_msg_iocinit_request *iiq; 1998 struct mfii_dmamem *m; 1999 struct mfii_ccb *ccb; 2000 struct mfi_init_frame *init; 2001 int rv; 2002 2003 m = mfii_dmamem_alloc(sc, sizeof(*iiq)); 2004 if (m == NULL) 2005 return (1); 2006 2007 iiq = MFII_DMA_KVA(m); 2008 memset(iiq, 0, sizeof(*iiq)); 2009 2010 iiq->function = MPII_FUNCTION_IOC_INIT; 2011 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER; 2012 2013 iiq->msg_version_maj = 0x02; 2014 iiq->msg_version_min = 0x00; 2015 iiq->hdr_version_unit = 0x10; 2016 iiq->hdr_version_dev = 0x0; 2017 2018 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4); 2019 2020 iiq->reply_descriptor_post_queue_depth = 2021 htole16(sc->sc_reply_postq_depth); 2022 iiq->reply_free_queue_depth = htole16(0); 2023 2024 htolem32(&iiq->sense_buffer_address_high, 2025 MFII_DMA_DVA(sc->sc_sense) >> 32); 2026 2027 htolem32(&iiq->reply_descriptor_post_queue_address_lo, 2028 MFII_DMA_DVA(sc->sc_reply_postq)); 2029 htolem32(&iiq->reply_descriptor_post_queue_address_hi, 2030 MFII_DMA_DVA(sc->sc_reply_postq) >> 32); 2031 2032 htolem32(&iiq->system_request_frame_base_address_lo, 2033 MFII_DMA_DVA(sc->sc_requests)); 2034 htolem32(&iiq->system_request_frame_base_address_hi, 2035 MFII_DMA_DVA(sc->sc_requests) >> 32); 2036 2037 iiq->timestamp = htole64(getuptime()); 2038 2039 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2040 if (ccb == NULL) { 2041 /* shouldn't ever run out of ccbs during attach */ 2042 return (1); 2043 } 2044 mfii_scrub_ccb(ccb); 2045 init = ccb->ccb_request; 2046 2047 init->mif_header.mfh_cmd = MFI_CMD_INIT; 2048 init->mif_header.mfh_data_len = htole32(sizeof(*iiq)); 2049 init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m)); 2050 2051 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq), 2052 0, MFII_DMA_LEN(sc->sc_reply_postq), 2053 BUS_DMASYNC_PREREAD); 2054 2055 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m), 2056 0, sizeof(*iiq), BUS_DMASYNC_PREREAD); 2057 2058 rv = mfii_mfa_poll(sc, ccb); 2059 2060 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m), 2061 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD); 2062 2063 scsi_io_put(&sc->sc_iopool, ccb); 2064 mfii_dmamem_free(sc, m); 2065 2066 return (rv); 2067 } 2068 2069 int 2070 mfii_my_intr(struct mfii_softc *sc) 2071 { 2072 u_int32_t status; 2073 2074 status = mfii_read(sc, MFI_OSTS); 2075 if (ISSET(status, 0x1)) { 2076 mfii_write(sc, MFI_OSTS, status); 2077 return (1); 2078 } 2079 2080 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0); 2081 } 2082 2083 int 2084 mfii_intr(void *arg) 2085 { 2086 struct mfii_softc *sc = arg; 2087 2088 if (!mfii_my_intr(sc)) 2089 return (0); 2090 2091 mfii_postq(sc); 2092 2093 return (1); 2094 } 2095 2096 void 2097 mfii_postq(struct mfii_softc *sc) 2098 { 2099 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs); 2100 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq); 2101 struct mpii_reply_descr *rdp; 2102 struct mfii_ccb *ccb; 2103 int rpi = 0; 2104 2105 mtx_enter(&sc->sc_reply_postq_mtx); 2106 2107 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq), 2108 0, MFII_DMA_LEN(sc->sc_reply_postq), 2109 BUS_DMASYNC_POSTREAD); 2110 2111 for (;;) { 2112 rdp = &postq[sc->sc_reply_postq_index]; 2113 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) == 2114 MPII_REPLY_DESCR_UNUSED) 2115 break; 2116 if (rdp->data == 0xffffffff) { 2117 /* 2118 * ioc is still writing to the reply post queue 2119 * race condition - bail! 2120 */ 2121 break; 2122 } 2123 2124 ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1]; 2125 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link); 2126 memset(rdp, 0xff, sizeof(*rdp)); 2127 2128 sc->sc_reply_postq_index++; 2129 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth; 2130 rpi = 1; 2131 } 2132 2133 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq), 2134 0, MFII_DMA_LEN(sc->sc_reply_postq), 2135 BUS_DMASYNC_PREREAD); 2136 2137 if (rpi) 2138 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index); 2139 2140 mtx_leave(&sc->sc_reply_postq_mtx); 2141 2142 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) { 2143 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link); 2144 mfii_done(sc, ccb); 2145 } 2146 } 2147 2148 void 2149 mfii_scsi_cmd(struct scsi_xfer *xs) 2150 { 2151 struct scsi_link *link = xs->sc_link; 2152 struct mfii_softc *sc = link->bus->sb_adapter_softc; 2153 struct mfii_ccb *ccb = xs->io; 2154 2155 mfii_scrub_ccb(ccb); 2156 ccb->ccb_cookie = xs; 2157 ccb->ccb_done = mfii_scsi_cmd_done; 2158 ccb->ccb_data = xs->data; 2159 ccb->ccb_len = xs->datalen; 2160 2161 timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs); 2162 2163 switch (xs->cmd.opcode) { 2164 case READ_COMMAND: 2165 case READ_10: 2166 case READ_12: 2167 case READ_16: 2168 case WRITE_COMMAND: 2169 case WRITE_10: 2170 case WRITE_12: 2171 case WRITE_16: 2172 if (mfii_scsi_cmd_io(sc, xs) != 0) 2173 goto stuffup; 2174 2175 break; 2176 2177 default: 2178 if (mfii_scsi_cmd_cdb(sc, xs) != 0) 2179 goto stuffup; 2180 break; 2181 } 2182 2183 xs->error = XS_NOERROR; 2184 xs->resid = 0; 2185 2186 if (ISSET(xs->flags, SCSI_POLL)) { 2187 if (mfii_poll(sc, ccb) != 0) 2188 goto stuffup; 2189 return; 2190 } 2191 2192 ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */ 2193 timeout_add_msec(&xs->stimeout, xs->timeout); 2194 mfii_start(sc, ccb); 2195 2196 return; 2197 2198 stuffup: 2199 xs->error = XS_DRIVER_STUFFUP; 2200 scsi_done(xs); 2201 } 2202 2203 void 2204 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb) 2205 { 2206 struct scsi_xfer *xs = ccb->ccb_cookie; 2207 struct mpii_msg_scsi_io *io = ccb->ccb_request; 2208 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1); 2209 u_int refs = 1; 2210 2211 if (timeout_del(&xs->stimeout)) 2212 refs = 2; 2213 2214 switch (ctx->status) { 2215 case MFI_STAT_OK: 2216 break; 2217 2218 case MFI_STAT_SCSI_DONE_WITH_ERROR: 2219 xs->error = XS_SENSE; 2220 memset(&xs->sense, 0, sizeof(xs->sense)); 2221 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense)); 2222 break; 2223 2224 case MFI_STAT_LD_OFFLINE: 2225 case MFI_STAT_DEVICE_NOT_FOUND: 2226 xs->error = XS_SELTIMEOUT; 2227 break; 2228 2229 default: 2230 xs->error = XS_DRIVER_STUFFUP; 2231 break; 2232 } 2233 2234 if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0) 2235 scsi_done(xs); 2236 } 2237 2238 int 2239 mfii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag) 2240 { 2241 struct mfii_softc *sc = link->bus->sb_adapter_softc; 2242 2243 DNPRINTF(MFII_D_IOCTL, "%s: mfii_scsi_ioctl\n", DEVNAME(sc)); 2244 2245 switch (cmd) { 2246 case DIOCGCACHE: 2247 case DIOCSCACHE: 2248 return (mfii_ioctl_cache(link, cmd, (struct dk_cache *)addr)); 2249 break; 2250 2251 default: 2252 if (sc->sc_ioctl) 2253 return (sc->sc_ioctl(&sc->sc_dev, cmd, addr)); 2254 break; 2255 } 2256 2257 return (ENOTTY); 2258 } 2259 2260 int 2261 mfii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc) 2262 { 2263 struct mfii_softc *sc = link->bus->sb_adapter_softc; 2264 int rv, wrenable, rdenable; 2265 struct mfi_ld_prop ldp; 2266 union mfi_mbox mbox; 2267 2268 if (mfii_get_info(sc)) { 2269 rv = EIO; 2270 goto done; 2271 } 2272 2273 if (sc->sc_target_lds[link->target] == -1) { 2274 rv = EIO; 2275 goto done; 2276 } 2277 2278 memset(&mbox, 0, sizeof(mbox)); 2279 mbox.b[0] = link->target; 2280 rv = mfii_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, &mbox, &ldp, sizeof(ldp), 2281 SCSI_DATA_IN); 2282 if (rv != 0) 2283 goto done; 2284 2285 if (sc->sc_info.mci_memory_size > 0) { 2286 wrenable = ISSET(ldp.mlp_cur_cache_policy, 2287 MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0; 2288 rdenable = ISSET(ldp.mlp_cur_cache_policy, 2289 MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0; 2290 } else { 2291 wrenable = ISSET(ldp.mlp_diskcache_policy, 2292 MR_LD_DISK_CACHE_ENABLE)? 1 : 0; 2293 rdenable = 0; 2294 } 2295 2296 if (cmd == DIOCGCACHE) { 2297 dc->wrcache = wrenable; 2298 dc->rdcache = rdenable; 2299 goto done; 2300 } /* else DIOCSCACHE */ 2301 2302 if (((dc->wrcache) ? 1 : 0) == wrenable && 2303 ((dc->rdcache) ? 1 : 0) == rdenable) 2304 goto done; 2305 2306 memset(&mbox, 0, sizeof(mbox)); 2307 mbox.b[0] = ldp.mlp_ld.mld_target; 2308 mbox.b[1] = ldp.mlp_ld.mld_res; 2309 mbox.s[1] = ldp.mlp_ld.mld_seq; 2310 2311 if (sc->sc_info.mci_memory_size > 0) { 2312 if (dc->rdcache) 2313 SET(ldp.mlp_cur_cache_policy, 2314 MR_LD_CACHE_ALLOW_READ_CACHE); 2315 else 2316 CLR(ldp.mlp_cur_cache_policy, 2317 MR_LD_CACHE_ALLOW_READ_CACHE); 2318 if (dc->wrcache) 2319 SET(ldp.mlp_cur_cache_policy, 2320 MR_LD_CACHE_ALLOW_WRITE_CACHE); 2321 else 2322 CLR(ldp.mlp_cur_cache_policy, 2323 MR_LD_CACHE_ALLOW_WRITE_CACHE); 2324 } else { 2325 if (dc->rdcache) { 2326 rv = EOPNOTSUPP; 2327 goto done; 2328 } 2329 if (dc->wrcache) 2330 ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE; 2331 else 2332 ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE; 2333 } 2334 2335 rv = mfii_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, &mbox, &ldp, sizeof(ldp), 2336 SCSI_DATA_OUT); 2337 done: 2338 return (rv); 2339 } 2340 2341 int 2342 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs) 2343 { 2344 struct scsi_link *link = xs->sc_link; 2345 struct mfii_ccb *ccb = xs->io; 2346 struct mpii_msg_scsi_io *io = ccb->ccb_request; 2347 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1); 2348 int segs; 2349 2350 io->dev_handle = htole16(link->target); 2351 io->function = MFII_FUNCTION_LDIO_REQUEST; 2352 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva); 2353 io->sgl_flags = htole16(0x02); /* XXX */ 2354 io->sense_buffer_length = sizeof(xs->sense); 2355 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4; 2356 io->data_length = htole32(xs->datalen); 2357 io->io_flags = htole16(xs->cmdlen); 2358 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 2359 case SCSI_DATA_IN: 2360 ccb->ccb_direction = MFII_DATA_IN; 2361 io->direction = MPII_SCSIIO_DIR_READ; 2362 break; 2363 case SCSI_DATA_OUT: 2364 ccb->ccb_direction = MFII_DATA_OUT; 2365 io->direction = MPII_SCSIIO_DIR_WRITE; 2366 break; 2367 default: 2368 ccb->ccb_direction = MFII_DATA_NONE; 2369 io->direction = MPII_SCSIIO_DIR_NONE; 2370 break; 2371 } 2372 memcpy(io->cdb, &xs->cmd, xs->cmdlen); 2373 2374 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg; 2375 ctx->timeout_value = htole16(0x14); /* XXX */ 2376 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags); 2377 ctx->virtual_disk_target_id = htole16(link->target); 2378 2379 if (mfii_load_ccb(sc, ccb, ctx + 1, 2380 ISSET(xs->flags, SCSI_NOSLEEP)) != 0) 2381 return (1); 2382 2383 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs; 2384 switch (sc->sc_iop->num_sge_loc) { 2385 case MFII_IOP_NUM_SGE_LOC_ORIG: 2386 ctx->num_sge = segs; 2387 break; 2388 case MFII_IOP_NUM_SGE_LOC_35: 2389 /* 12 bit field, but we're only using the lower 8 */ 2390 ctx->span_arm = segs; 2391 break; 2392 } 2393 2394 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type; 2395 ccb->ccb_req.smid = letoh16(ccb->ccb_smid); 2396 2397 return (0); 2398 } 2399 2400 int 2401 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs) 2402 { 2403 struct scsi_link *link = xs->sc_link; 2404 struct mfii_ccb *ccb = xs->io; 2405 struct mpii_msg_scsi_io *io = ccb->ccb_request; 2406 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1); 2407 2408 io->dev_handle = htole16(link->target); 2409 io->function = MFII_FUNCTION_LDIO_REQUEST; 2410 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva); 2411 io->sgl_flags = htole16(0x02); /* XXX */ 2412 io->sense_buffer_length = sizeof(xs->sense); 2413 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4; 2414 io->data_length = htole32(xs->datalen); 2415 io->io_flags = htole16(xs->cmdlen); 2416 io->lun[0] = htobe16(link->lun); 2417 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 2418 case SCSI_DATA_IN: 2419 ccb->ccb_direction = MFII_DATA_IN; 2420 io->direction = MPII_SCSIIO_DIR_READ; 2421 break; 2422 case SCSI_DATA_OUT: 2423 ccb->ccb_direction = MFII_DATA_OUT; 2424 io->direction = MPII_SCSIIO_DIR_WRITE; 2425 break; 2426 default: 2427 ccb->ccb_direction = MFII_DATA_NONE; 2428 io->direction = MPII_SCSIIO_DIR_NONE; 2429 break; 2430 } 2431 memcpy(io->cdb, &xs->cmd, xs->cmdlen); 2432 2433 ctx->virtual_disk_target_id = htole16(link->target); 2434 2435 if (mfii_load_ccb(sc, ccb, ctx + 1, 2436 ISSET(xs->flags, SCSI_NOSLEEP)) != 0) 2437 return (1); 2438 2439 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs; 2440 2441 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI; 2442 ccb->ccb_req.smid = letoh16(ccb->ccb_smid); 2443 2444 return (0); 2445 } 2446 2447 void 2448 mfii_pd_scsi_cmd(struct scsi_xfer *xs) 2449 { 2450 struct scsi_link *link = xs->sc_link; 2451 struct mfii_softc *sc = link->bus->sb_adapter_softc; 2452 struct mfii_ccb *ccb = xs->io; 2453 2454 mfii_scrub_ccb(ccb); 2455 ccb->ccb_cookie = xs; 2456 ccb->ccb_done = mfii_scsi_cmd_done; 2457 ccb->ccb_data = xs->data; 2458 ccb->ccb_len = xs->datalen; 2459 2460 timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs); 2461 2462 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs); 2463 if (xs->error != XS_NOERROR) 2464 goto done; 2465 2466 xs->resid = 0; 2467 2468 if (ISSET(xs->flags, SCSI_POLL)) { 2469 if (mfii_poll(sc, ccb) != 0) 2470 goto stuffup; 2471 return; 2472 } 2473 2474 ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */ 2475 timeout_add_msec(&xs->stimeout, xs->timeout); 2476 mfii_start(sc, ccb); 2477 2478 return; 2479 2480 stuffup: 2481 xs->error = XS_DRIVER_STUFFUP; 2482 done: 2483 scsi_done(xs); 2484 } 2485 2486 int 2487 mfii_pd_scsi_probe(struct scsi_link *link) 2488 { 2489 struct mfii_softc *sc = link->bus->sb_adapter_softc; 2490 struct mfi_pd_details mpd; 2491 union mfi_mbox mbox; 2492 int rv; 2493 2494 if (link->lun > 0) 2495 return (0); 2496 2497 memset(&mbox, 0, sizeof(mbox)); 2498 mbox.s[0] = htole16(link->target); 2499 2500 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd), 2501 SCSI_DATA_IN|SCSI_NOSLEEP); 2502 if (rv != 0) 2503 return (EIO); 2504 2505 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM)) 2506 return (ENXIO); 2507 2508 return (0); 2509 } 2510 2511 int 2512 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs) 2513 { 2514 struct scsi_link *link = xs->sc_link; 2515 struct mfii_ccb *ccb = xs->io; 2516 struct mpii_msg_scsi_io *io = ccb->ccb_request; 2517 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1); 2518 uint16_t dev_handle; 2519 2520 dev_handle = mfii_dev_handle(sc, link->target); 2521 if (dev_handle == htole16(0xffff)) 2522 return (XS_SELTIMEOUT); 2523 2524 io->dev_handle = dev_handle; 2525 io->function = 0; 2526 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva); 2527 io->sgl_flags = htole16(0x02); /* XXX */ 2528 io->sense_buffer_length = sizeof(xs->sense); 2529 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4; 2530 io->data_length = htole32(xs->datalen); 2531 io->io_flags = htole16(xs->cmdlen); 2532 io->lun[0] = htobe16(link->lun); 2533 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 2534 case SCSI_DATA_IN: 2535 ccb->ccb_direction = MFII_DATA_IN; 2536 io->direction = MPII_SCSIIO_DIR_READ; 2537 break; 2538 case SCSI_DATA_OUT: 2539 ccb->ccb_direction = MFII_DATA_OUT; 2540 io->direction = MPII_SCSIIO_DIR_WRITE; 2541 break; 2542 default: 2543 ccb->ccb_direction = MFII_DATA_NONE; 2544 io->direction = MPII_SCSIIO_DIR_NONE; 2545 break; 2546 } 2547 memcpy(io->cdb, &xs->cmd, xs->cmdlen); 2548 2549 ctx->virtual_disk_target_id = htole16(link->target); 2550 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD; 2551 ctx->timeout_value = sc->sc_pd->pd_timeout; 2552 2553 if (mfii_load_ccb(sc, ccb, ctx + 1, 2554 ISSET(xs->flags, SCSI_NOSLEEP)) != 0) 2555 return (XS_DRIVER_STUFFUP); 2556 2557 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs; 2558 2559 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI; 2560 ccb->ccb_req.smid = letoh16(ccb->ccb_smid); 2561 ccb->ccb_req.dev_handle = dev_handle; 2562 2563 return (XS_NOERROR); 2564 } 2565 2566 int 2567 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp, 2568 int nosleep) 2569 { 2570 struct mpii_msg_request *req = ccb->ccb_request; 2571 struct mfii_sge *sge = NULL, *nsge = sglp; 2572 struct mfii_sge *ce = NULL; 2573 bus_dmamap_t dmap = ccb->ccb_dmamap; 2574 u_int space; 2575 int i; 2576 2577 int error; 2578 2579 if (ccb->ccb_len == 0) 2580 return (0); 2581 2582 error = bus_dmamap_load(sc->sc_dmat, dmap, 2583 ccb->ccb_data, ccb->ccb_len, NULL, 2584 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 2585 if (error) { 2586 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 2587 return (1); 2588 } 2589 2590 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) / 2591 sizeof(*nsge); 2592 if (dmap->dm_nsegs > space) { 2593 space--; 2594 2595 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge); 2596 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len); 2597 2598 ce = nsge + space; 2599 ce->sg_addr = htole64(ccb->ccb_sgl_dva); 2600 ce->sg_len = htole32(ccb->ccb_sgl_len); 2601 ce->sg_flags = sc->sc_iop->sge_flag_chain; 2602 2603 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16; 2604 } 2605 2606 for (i = 0; i < dmap->dm_nsegs; i++) { 2607 if (nsge == ce) 2608 nsge = ccb->ccb_sgl; 2609 2610 sge = nsge; 2611 2612 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr); 2613 sge->sg_len = htole32(dmap->dm_segs[i].ds_len); 2614 sge->sg_flags = MFII_SGE_ADDR_SYSTEM; 2615 2616 nsge = sge + 1; 2617 } 2618 sge->sg_flags |= sc->sc_iop->sge_flag_eol; 2619 2620 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 2621 ccb->ccb_direction == MFII_DATA_OUT ? 2622 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD); 2623 2624 if (ccb->ccb_sgl_len > 0) { 2625 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl), 2626 ccb->ccb_sgl_offset, ccb->ccb_sgl_len, 2627 BUS_DMASYNC_PREWRITE); 2628 } 2629 2630 return (0); 2631 } 2632 2633 void 2634 mfii_scsi_cmd_tmo(void *xsp) 2635 { 2636 struct scsi_xfer *xs = xsp; 2637 struct scsi_link *link = xs->sc_link; 2638 struct mfii_softc *sc = link->bus->sb_adapter_softc; 2639 struct mfii_ccb *ccb = xs->io; 2640 2641 mtx_enter(&sc->sc_abort_mtx); 2642 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link); 2643 mtx_leave(&sc->sc_abort_mtx); 2644 2645 task_add(systqmp, &sc->sc_abort_task); 2646 } 2647 2648 void 2649 mfii_abort_task(void *scp) 2650 { 2651 struct mfii_softc *sc = scp; 2652 struct mfii_ccb *list; 2653 2654 mtx_enter(&sc->sc_abort_mtx); 2655 list = SIMPLEQ_FIRST(&sc->sc_abort_list); 2656 SIMPLEQ_INIT(&sc->sc_abort_list); 2657 mtx_leave(&sc->sc_abort_mtx); 2658 2659 while (list != NULL) { 2660 struct mfii_ccb *ccb = list; 2661 struct scsi_xfer *xs = ccb->ccb_cookie; 2662 struct scsi_link *link = xs->sc_link; 2663 2664 uint16_t dev_handle; 2665 struct mfii_ccb *accb; 2666 2667 list = SIMPLEQ_NEXT(ccb, ccb_link); 2668 2669 dev_handle = mfii_dev_handle(sc, link->target); 2670 if (dev_handle == htole16(0xffff)) { 2671 /* device is gone */ 2672 if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0) 2673 scsi_done(xs); 2674 continue; 2675 } 2676 2677 accb = scsi_io_get(&sc->sc_iopool, 0); 2678 mfii_scrub_ccb(accb); 2679 mfii_abort(sc, accb, dev_handle, ccb->ccb_smid, 2680 MPII_SCSI_TASK_ABORT_TASK, 2681 htole32(MFII_TASK_MGMT_FLAGS_PD)); 2682 2683 accb->ccb_cookie = ccb; 2684 accb->ccb_done = mfii_scsi_cmd_abort_done; 2685 2686 mfii_start(sc, accb); 2687 } 2688 } 2689 2690 void 2691 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle, 2692 uint16_t smid, uint8_t type, uint32_t flags) 2693 { 2694 struct mfii_task_mgmt *msg; 2695 struct mpii_msg_scsi_task_request *req; 2696 2697 msg = accb->ccb_request; 2698 req = &msg->mpii_request; 2699 req->dev_handle = dev_handle; 2700 req->function = MPII_FUNCTION_SCSI_TASK_MGMT; 2701 req->task_type = type; 2702 htolem16(&req->task_mid, smid); 2703 msg->flags = flags; 2704 2705 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI; 2706 accb->ccb_req.smid = letoh16(accb->ccb_smid); 2707 } 2708 2709 void 2710 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb) 2711 { 2712 struct mfii_ccb *ccb = accb->ccb_cookie; 2713 struct scsi_xfer *xs = ccb->ccb_cookie; 2714 2715 /* XXX check accb completion? */ 2716 2717 scsi_io_put(&sc->sc_iopool, accb); 2718 2719 if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0) 2720 scsi_done(xs); 2721 } 2722 2723 void * 2724 mfii_get_ccb(void *cookie) 2725 { 2726 struct mfii_softc *sc = cookie; 2727 struct mfii_ccb *ccb; 2728 2729 mtx_enter(&sc->sc_ccb_mtx); 2730 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq); 2731 if (ccb != NULL) 2732 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link); 2733 mtx_leave(&sc->sc_ccb_mtx); 2734 2735 return (ccb); 2736 } 2737 2738 void 2739 mfii_scrub_ccb(struct mfii_ccb *ccb) 2740 { 2741 ccb->ccb_cookie = NULL; 2742 ccb->ccb_done = NULL; 2743 ccb->ccb_flags = 0; 2744 ccb->ccb_data = NULL; 2745 ccb->ccb_direction = 0; 2746 ccb->ccb_len = 0; 2747 ccb->ccb_sgl_len = 0; 2748 ccb->ccb_refcnt = 1; 2749 2750 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req)); 2751 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE); 2752 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE); 2753 } 2754 2755 void 2756 mfii_put_ccb(void *cookie, void *io) 2757 { 2758 struct mfii_softc *sc = cookie; 2759 struct mfii_ccb *ccb = io; 2760 2761 mtx_enter(&sc->sc_ccb_mtx); 2762 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link); 2763 mtx_leave(&sc->sc_ccb_mtx); 2764 } 2765 2766 int 2767 mfii_init_ccb(struct mfii_softc *sc) 2768 { 2769 struct mfii_ccb *ccb; 2770 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests); 2771 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi); 2772 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense); 2773 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl); 2774 u_int i; 2775 int error; 2776 2777 sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb), 2778 M_DEVBUF, M_WAITOK|M_ZERO); 2779 2780 for (i = 0; i < sc->sc_max_cmds; i++) { 2781 ccb = &sc->sc_ccb[i]; 2782 2783 /* create a dma map for transfer */ 2784 error = bus_dmamap_create(sc->sc_dmat, 2785 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0, 2786 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap); 2787 if (error) { 2788 printf("%s: cannot create ccb dmamap (%d)\n", 2789 DEVNAME(sc), error); 2790 goto destroy; 2791 } 2792 2793 /* select i + 1'th request. 0 is reserved for events */ 2794 ccb->ccb_smid = i + 1; 2795 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1); 2796 ccb->ccb_request = request + ccb->ccb_request_offset; 2797 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) + 2798 ccb->ccb_request_offset; 2799 2800 /* select i'th MFI command frame */ 2801 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i; 2802 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset; 2803 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) + 2804 ccb->ccb_mfi_offset; 2805 2806 /* select i'th sense */ 2807 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i; 2808 ccb->ccb_sense = (struct mfi_sense *)(sense + 2809 ccb->ccb_sense_offset); 2810 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) + 2811 ccb->ccb_sense_offset; 2812 2813 /* select i'th sgl */ 2814 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) * 2815 sc->sc_max_sgl * i; 2816 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset); 2817 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) + 2818 ccb->ccb_sgl_offset; 2819 2820 /* add ccb to queue */ 2821 mfii_put_ccb(sc, ccb); 2822 } 2823 2824 return (0); 2825 2826 destroy: 2827 /* free dma maps and ccb memory */ 2828 while ((ccb = mfii_get_ccb(sc)) != NULL) 2829 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 2830 2831 free(sc->sc_ccb, M_DEVBUF, 0); 2832 2833 return (1); 2834 } 2835 2836 #if NBIO > 0 2837 int 2838 mfii_ioctl(struct device *dev, u_long cmd, caddr_t addr) 2839 { 2840 struct mfii_softc *sc = (struct mfii_softc *)dev; 2841 int error = 0; 2842 2843 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc)); 2844 2845 rw_enter_write(&sc->sc_lock); 2846 2847 switch (cmd) { 2848 case BIOCINQ: 2849 DNPRINTF(MFII_D_IOCTL, "inq\n"); 2850 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr); 2851 break; 2852 2853 case BIOCVOL: 2854 DNPRINTF(MFII_D_IOCTL, "vol\n"); 2855 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr); 2856 break; 2857 2858 case BIOCDISK: 2859 DNPRINTF(MFII_D_IOCTL, "disk\n"); 2860 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr); 2861 break; 2862 2863 case BIOCALARM: 2864 DNPRINTF(MFII_D_IOCTL, "alarm\n"); 2865 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr); 2866 break; 2867 2868 case BIOCBLINK: 2869 DNPRINTF(MFII_D_IOCTL, "blink\n"); 2870 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr); 2871 break; 2872 2873 case BIOCSETSTATE: 2874 DNPRINTF(MFII_D_IOCTL, "setstate\n"); 2875 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr); 2876 break; 2877 2878 case BIOCPATROL: 2879 DNPRINTF(MFII_D_IOCTL, "patrol\n"); 2880 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr); 2881 break; 2882 2883 default: 2884 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n"); 2885 error = ENOTTY; 2886 } 2887 2888 rw_exit_write(&sc->sc_lock); 2889 2890 return (error); 2891 } 2892 2893 int 2894 mfii_bio_getitall(struct mfii_softc *sc) 2895 { 2896 int i, d, rv = EINVAL; 2897 size_t size; 2898 union mfi_mbox mbox; 2899 struct mfi_conf *cfg = NULL; 2900 struct mfi_ld_details *ld_det = NULL; 2901 2902 /* get info */ 2903 if (mfii_get_info(sc)) { 2904 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n", 2905 DEVNAME(sc)); 2906 goto done; 2907 } 2908 2909 /* send single element command to retrieve size for full structure */ 2910 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO); 2911 if (cfg == NULL) 2912 goto done; 2913 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg), 2914 SCSI_DATA_IN)) { 2915 free(cfg, M_DEVBUF, sizeof *cfg); 2916 goto done; 2917 } 2918 2919 size = cfg->mfc_size; 2920 free(cfg, M_DEVBUF, sizeof *cfg); 2921 2922 /* memory for read config */ 2923 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 2924 if (cfg == NULL) 2925 goto done; 2926 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN)) { 2927 free(cfg, M_DEVBUF, size); 2928 goto done; 2929 } 2930 2931 /* replace current pointer with new one */ 2932 if (sc->sc_cfg) 2933 free(sc->sc_cfg, M_DEVBUF, 0); 2934 sc->sc_cfg = cfg; 2935 2936 /* get all ld info */ 2937 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list, 2938 sizeof(sc->sc_ld_list), SCSI_DATA_IN)) 2939 goto done; 2940 2941 /* get memory for all ld structures */ 2942 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details); 2943 if (sc->sc_ld_sz != size) { 2944 if (sc->sc_ld_details) 2945 free(sc->sc_ld_details, M_DEVBUF, 0); 2946 2947 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 2948 if (ld_det == NULL) 2949 goto done; 2950 sc->sc_ld_sz = size; 2951 sc->sc_ld_details = ld_det; 2952 } 2953 2954 /* find used physical disks */ 2955 size = sizeof(struct mfi_ld_details); 2956 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) { 2957 memset(&mbox, 0, sizeof(mbox)); 2958 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target; 2959 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox, &sc->sc_ld_details[i], size, 2960 SCSI_DATA_IN)) 2961 goto done; 2962 2963 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span * 2964 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth; 2965 } 2966 sc->sc_no_pd = d; 2967 2968 rv = 0; 2969 done: 2970 return (rv); 2971 } 2972 2973 int 2974 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi) 2975 { 2976 int rv = EINVAL; 2977 struct mfi_conf *cfg = NULL; 2978 2979 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc)); 2980 2981 if (mfii_bio_getitall(sc)) { 2982 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n", 2983 DEVNAME(sc)); 2984 goto done; 2985 } 2986 2987 /* count unused disks as volumes */ 2988 if (sc->sc_cfg == NULL) 2989 goto done; 2990 cfg = sc->sc_cfg; 2991 2992 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present; 2993 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs; 2994 #if notyet 2995 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs + 2996 (bi->bi_nodisk - sc->sc_no_pd); 2997 #endif 2998 /* tell bio who we are */ 2999 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 3000 3001 rv = 0; 3002 done: 3003 return (rv); 3004 } 3005 3006 int 3007 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv) 3008 { 3009 int i, per, target, rv = EINVAL; 3010 struct scsi_link *link; 3011 struct device *dev; 3012 3013 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n", 3014 DEVNAME(sc), bv->bv_volid); 3015 3016 /* we really could skip and expect that inq took care of it */ 3017 if (mfii_bio_getitall(sc)) { 3018 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n", 3019 DEVNAME(sc)); 3020 goto done; 3021 } 3022 3023 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) { 3024 /* go do hotspares & unused disks */ 3025 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv); 3026 goto done; 3027 } 3028 3029 i = bv->bv_volid; 3030 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target; 3031 link = scsi_get_link(sc->sc_scsibus, target, 0); 3032 if (link == NULL) { 3033 strlcpy(bv->bv_dev, "cache", sizeof(bv->bv_dev)); 3034 } else { 3035 dev = link->device_softc; 3036 if (dev == NULL) 3037 goto done; 3038 3039 strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev)); 3040 } 3041 3042 switch(sc->sc_ld_list.mll_list[i].mll_state) { 3043 case MFI_LD_OFFLINE: 3044 bv->bv_status = BIOC_SVOFFLINE; 3045 break; 3046 3047 case MFI_LD_PART_DEGRADED: 3048 case MFI_LD_DEGRADED: 3049 bv->bv_status = BIOC_SVDEGRADED; 3050 break; 3051 3052 case MFI_LD_ONLINE: 3053 bv->bv_status = BIOC_SVONLINE; 3054 break; 3055 3056 default: 3057 bv->bv_status = BIOC_SVINVALID; 3058 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n", 3059 DEVNAME(sc), 3060 sc->sc_ld_list.mll_list[i].mll_state); 3061 } 3062 3063 /* additional status can modify MFI status */ 3064 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) { 3065 case MFI_LD_PROG_CC: 3066 bv->bv_status = BIOC_SVSCRUB; 3067 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress; 3068 bv->bv_percent = (per * 100) / 0xffff; 3069 bv->bv_seconds = 3070 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds; 3071 break; 3072 3073 case MFI_LD_PROG_BGI: 3074 bv->bv_status = BIOC_SVSCRUB; 3075 per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress; 3076 bv->bv_percent = (per * 100) / 0xffff; 3077 bv->bv_seconds = 3078 sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds; 3079 break; 3080 3081 case MFI_LD_PROG_FGI: 3082 case MFI_LD_PROG_RECONSTRUCT: 3083 /* nothing yet */ 3084 break; 3085 } 3086 3087 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01) 3088 bv->bv_cache = BIOC_CVWRITEBACK; 3089 else 3090 bv->bv_cache = BIOC_CVWRITETHROUGH; 3091 3092 /* 3093 * The RAID levels are determined per the SNIA DDF spec, this is only 3094 * a subset that is valid for the MFI controller. 3095 */ 3096 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid; 3097 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1) 3098 bv->bv_level *= 10; 3099 3100 bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span * 3101 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth; 3102 3103 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */ 3104 3105 rv = 0; 3106 done: 3107 return (rv); 3108 } 3109 3110 int 3111 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd) 3112 { 3113 struct mfi_conf *cfg; 3114 struct mfi_array *ar; 3115 struct mfi_ld_cfg *ld; 3116 struct mfi_pd_details *pd; 3117 struct mfi_pd_list *pl; 3118 struct mfi_pd_progress *mfp; 3119 struct mfi_progress *mp; 3120 struct scsi_inquiry_data *inqbuf; 3121 char vend[8+16+4+1], *vendp; 3122 int i, rv = EINVAL; 3123 int arr, vol, disk, span; 3124 union mfi_mbox mbox; 3125 3126 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n", 3127 DEVNAME(sc), bd->bd_diskid); 3128 3129 /* we really could skip and expect that inq took care of it */ 3130 if (mfii_bio_getitall(sc)) { 3131 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n", 3132 DEVNAME(sc)); 3133 return (rv); 3134 } 3135 cfg = sc->sc_cfg; 3136 3137 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK); 3138 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK); 3139 3140 ar = cfg->mfc_array; 3141 vol = bd->bd_volid; 3142 if (vol >= cfg->mfc_no_ld) { 3143 /* do hotspares */ 3144 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd); 3145 goto freeme; 3146 } 3147 3148 /* calculate offset to ld structure */ 3149 ld = (struct mfi_ld_cfg *)( 3150 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) + 3151 cfg->mfc_array_size * cfg->mfc_no_array); 3152 3153 /* use span 0 only when raid group is not spanned */ 3154 if (ld[vol].mlc_parm.mpa_span_depth > 1) 3155 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span; 3156 else 3157 span = 0; 3158 arr = ld[vol].mlc_span[span].mls_index; 3159 3160 /* offset disk into pd list */ 3161 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span; 3162 3163 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) { 3164 /* disk is missing but succeed command */ 3165 bd->bd_status = BIOC_SDFAILED; 3166 rv = 0; 3167 3168 /* try to find an unused disk for the target to rebuild */ 3169 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl), 3170 SCSI_DATA_IN)) 3171 goto freeme; 3172 3173 for (i = 0; i < pl->mpl_no_pd; i++) { 3174 if (pl->mpl_address[i].mpa_scsi_type != 0) 3175 continue; 3176 3177 memset(&mbox, 0, sizeof(mbox)); 3178 mbox.s[0] = pl->mpl_address[i].mpa_pd_id; 3179 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), 3180 SCSI_DATA_IN)) 3181 continue; 3182 3183 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD || 3184 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) 3185 break; 3186 } 3187 3188 if (i == pl->mpl_no_pd) 3189 goto freeme; 3190 } else { 3191 memset(&mbox, 0, sizeof(mbox)); 3192 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id; 3193 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), 3194 SCSI_DATA_IN)) { 3195 bd->bd_status = BIOC_SDINVALID; 3196 goto freeme; 3197 } 3198 } 3199 3200 /* get the remaining fields */ 3201 bd->bd_channel = pd->mpd_enc_idx; 3202 bd->bd_target = pd->mpd_enc_slot; 3203 3204 /* get status */ 3205 switch (pd->mpd_fw_state){ 3206 case MFI_PD_UNCONFIG_GOOD: 3207 case MFI_PD_UNCONFIG_BAD: 3208 bd->bd_status = BIOC_SDUNUSED; 3209 break; 3210 3211 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */ 3212 bd->bd_status = BIOC_SDHOTSPARE; 3213 break; 3214 3215 case MFI_PD_OFFLINE: 3216 bd->bd_status = BIOC_SDOFFLINE; 3217 break; 3218 3219 case MFI_PD_FAILED: 3220 bd->bd_status = BIOC_SDFAILED; 3221 break; 3222 3223 case MFI_PD_REBUILD: 3224 bd->bd_status = BIOC_SDREBUILD; 3225 break; 3226 3227 case MFI_PD_ONLINE: 3228 bd->bd_status = BIOC_SDONLINE; 3229 break; 3230 3231 case MFI_PD_COPYBACK: 3232 case MFI_PD_SYSTEM: 3233 default: 3234 bd->bd_status = BIOC_SDINVALID; 3235 break; 3236 } 3237 3238 bd->bd_size = pd->mpd_size * 512; /* bytes per block */ 3239 3240 inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data; 3241 vendp = inqbuf->vendor; 3242 memcpy(vend, vendp, sizeof vend - 1); 3243 vend[sizeof vend - 1] = '\0'; 3244 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor)); 3245 3246 /* XXX find a way to retrieve serial nr from drive */ 3247 /* XXX find a way to get bd_procdev */ 3248 3249 mfp = &pd->mpd_progress; 3250 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) { 3251 mp = &mfp->mfp_patrol_read; 3252 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff; 3253 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds; 3254 } 3255 3256 rv = 0; 3257 freeme: 3258 free(pd, M_DEVBUF, sizeof *pd); 3259 free(pl, M_DEVBUF, sizeof *pl); 3260 3261 return (rv); 3262 } 3263 3264 int 3265 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba) 3266 { 3267 uint32_t opc, flags = 0; 3268 int rv = 0; 3269 int8_t ret; 3270 3271 switch(ba->ba_opcode) { 3272 case BIOC_SADISABLE: 3273 opc = MR_DCMD_SPEAKER_DISABLE; 3274 break; 3275 3276 case BIOC_SAENABLE: 3277 opc = MR_DCMD_SPEAKER_ENABLE; 3278 break; 3279 3280 case BIOC_SASILENCE: 3281 opc = MR_DCMD_SPEAKER_SILENCE; 3282 break; 3283 3284 case BIOC_GASTATUS: 3285 opc = MR_DCMD_SPEAKER_GET; 3286 flags = SCSI_DATA_IN; 3287 break; 3288 3289 case BIOC_SATEST: 3290 opc = MR_DCMD_SPEAKER_TEST; 3291 break; 3292 3293 default: 3294 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid " 3295 "opcode %x\n", DEVNAME(sc), ba->ba_opcode); 3296 return (EINVAL); 3297 } 3298 3299 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), flags)) 3300 rv = EINVAL; 3301 else 3302 if (ba->ba_opcode == BIOC_GASTATUS) 3303 ba->ba_status = ret; 3304 else 3305 ba->ba_status = 0; 3306 3307 return (rv); 3308 } 3309 3310 int 3311 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb) 3312 { 3313 int i, found, rv = EINVAL; 3314 union mfi_mbox mbox; 3315 uint32_t cmd; 3316 struct mfi_pd_list *pd; 3317 3318 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc), 3319 bb->bb_status); 3320 3321 /* channel 0 means not in an enclosure so can't be blinked */ 3322 if (bb->bb_channel == 0) 3323 return (EINVAL); 3324 3325 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK); 3326 3327 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd), SCSI_DATA_IN)) 3328 goto done; 3329 3330 for (i = 0, found = 0; i < pd->mpl_no_pd; i++) 3331 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index && 3332 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) { 3333 found = 1; 3334 break; 3335 } 3336 3337 if (!found) 3338 goto done; 3339 3340 memset(&mbox, 0, sizeof(mbox)); 3341 mbox.s[0] = pd->mpl_address[i].mpa_pd_id; 3342 3343 switch (bb->bb_status) { 3344 case BIOC_SBUNBLINK: 3345 cmd = MR_DCMD_PD_UNBLINK; 3346 break; 3347 3348 case BIOC_SBBLINK: 3349 cmd = MR_DCMD_PD_BLINK; 3350 break; 3351 3352 case BIOC_SBALARM: 3353 default: 3354 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid " 3355 "opcode %x\n", DEVNAME(sc), bb->bb_status); 3356 goto done; 3357 } 3358 3359 3360 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, 0) == 0) 3361 rv = 0; 3362 3363 done: 3364 free(pd, M_DEVBUF, sizeof *pd); 3365 return (rv); 3366 } 3367 3368 static int 3369 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id) 3370 { 3371 struct mfii_foreign_scan_info *fsi; 3372 struct mfi_pd_details *pd; 3373 union mfi_mbox mbox; 3374 int rv; 3375 3376 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK); 3377 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK); 3378 3379 memset(&mbox, 0, sizeof mbox); 3380 mbox.s[0] = pd_id; 3381 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN); 3382 if (rv != 0) 3383 goto done; 3384 3385 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) { 3386 mbox.s[0] = pd_id; 3387 mbox.s[1] = pd->mpd_pd.mfp_seq; 3388 mbox.b[4] = MFI_PD_UNCONFIG_GOOD; 3389 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0); 3390 if (rv != 0) 3391 goto done; 3392 } 3393 3394 memset(&mbox, 0, sizeof mbox); 3395 mbox.s[0] = pd_id; 3396 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN); 3397 if (rv != 0) 3398 goto done; 3399 3400 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) { 3401 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL, fsi, sizeof(*fsi), 3402 SCSI_DATA_IN); 3403 if (rv != 0) 3404 goto done; 3405 3406 if (fsi->count > 0) { 3407 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL, NULL, 0, 0); 3408 if (rv != 0) 3409 goto done; 3410 } 3411 } 3412 3413 memset(&mbox, 0, sizeof mbox); 3414 mbox.s[0] = pd_id; 3415 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN); 3416 if (rv != 0) 3417 goto done; 3418 3419 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD || 3420 pd->mpd_ddf_state & MFI_DDF_FOREIGN) 3421 rv = ENXIO; 3422 3423 done: 3424 free(fsi, M_DEVBUF, sizeof *fsi); 3425 free(pd, M_DEVBUF, sizeof *pd); 3426 3427 return (rv); 3428 } 3429 3430 static int 3431 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id) 3432 { 3433 struct mfi_hotspare *hs; 3434 struct mfi_pd_details *pd; 3435 union mfi_mbox mbox; 3436 size_t size; 3437 int rv = EINVAL; 3438 3439 /* we really could skip and expect that inq took care of it */ 3440 if (mfii_bio_getitall(sc)) { 3441 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n", 3442 DEVNAME(sc)); 3443 return (rv); 3444 } 3445 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array; 3446 3447 hs = malloc(size, M_DEVBUF, M_WAITOK); 3448 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK); 3449 3450 memset(&mbox, 0, sizeof mbox); 3451 mbox.s[0] = pd_id; 3452 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), 3453 SCSI_DATA_IN); 3454 if (rv != 0) 3455 goto done; 3456 3457 memset(hs, 0, size); 3458 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id; 3459 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq; 3460 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size, SCSI_DATA_OUT); 3461 3462 done: 3463 free(hs, M_DEVBUF, size); 3464 free(pd, M_DEVBUF, sizeof *pd); 3465 3466 return (rv); 3467 } 3468 3469 int 3470 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs) 3471 { 3472 struct mfi_pd_details *pd; 3473 struct mfi_pd_list *pl; 3474 int i, found, rv = EINVAL; 3475 union mfi_mbox mbox; 3476 3477 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc), 3478 bs->bs_status); 3479 3480 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK); 3481 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK); 3482 3483 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl), SCSI_DATA_IN)) 3484 goto done; 3485 3486 for (i = 0, found = 0; i < pl->mpl_no_pd; i++) 3487 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index && 3488 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) { 3489 found = 1; 3490 break; 3491 } 3492 3493 if (!found) 3494 goto done; 3495 3496 memset(&mbox, 0, sizeof(mbox)); 3497 mbox.s[0] = pl->mpl_address[i].mpa_pd_id; 3498 3499 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN)) 3500 goto done; 3501 3502 mbox.s[0] = pl->mpl_address[i].mpa_pd_id; 3503 mbox.s[1] = pd->mpd_pd.mfp_seq; 3504 3505 switch (bs->bs_status) { 3506 case BIOC_SSONLINE: 3507 mbox.b[4] = MFI_PD_ONLINE; 3508 break; 3509 3510 case BIOC_SSOFFLINE: 3511 mbox.b[4] = MFI_PD_OFFLINE; 3512 break; 3513 3514 case BIOC_SSHOTSPARE: 3515 mbox.b[4] = MFI_PD_HOTSPARE; 3516 break; 3517 3518 case BIOC_SSREBUILD: 3519 if (pd->mpd_fw_state != MFI_PD_OFFLINE) { 3520 if ((rv = mfii_makegood(sc, 3521 pl->mpl_address[i].mpa_pd_id))) 3522 goto done; 3523 3524 if ((rv = mfii_makespare(sc, 3525 pl->mpl_address[i].mpa_pd_id))) 3526 goto done; 3527 3528 memset(&mbox, 0, sizeof(mbox)); 3529 mbox.s[0] = pl->mpl_address[i].mpa_pd_id; 3530 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), 3531 SCSI_DATA_IN); 3532 if (rv != 0) 3533 goto done; 3534 3535 /* rebuilding might be started by mfii_makespare() */ 3536 if (pd->mpd_fw_state == MFI_PD_REBUILD) { 3537 rv = 0; 3538 goto done; 3539 } 3540 3541 mbox.s[0] = pl->mpl_address[i].mpa_pd_id; 3542 mbox.s[1] = pd->mpd_pd.mfp_seq; 3543 } 3544 mbox.b[4] = MFI_PD_REBUILD; 3545 break; 3546 3547 default: 3548 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid " 3549 "opcode %x\n", DEVNAME(sc), bs->bs_status); 3550 goto done; 3551 } 3552 3553 3554 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0); 3555 done: 3556 free(pd, M_DEVBUF, sizeof *pd); 3557 free(pl, M_DEVBUF, sizeof *pl); 3558 return (rv); 3559 } 3560 3561 int 3562 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp) 3563 { 3564 uint32_t opc; 3565 int rv = 0; 3566 struct mfi_pr_properties prop; 3567 struct mfi_pr_status status; 3568 uint32_t time, exec_freq; 3569 3570 switch (bp->bp_opcode) { 3571 case BIOC_SPSTOP: 3572 case BIOC_SPSTART: 3573 if (bp->bp_opcode == BIOC_SPSTART) 3574 opc = MR_DCMD_PR_START; 3575 else 3576 opc = MR_DCMD_PR_STOP; 3577 if (mfii_mgmt(sc, opc, NULL, NULL, 0, SCSI_DATA_IN)) 3578 return (EINVAL); 3579 break; 3580 3581 case BIOC_SPMANUAL: 3582 case BIOC_SPDISABLE: 3583 case BIOC_SPAUTO: 3584 /* Get device's time. */ 3585 opc = MR_DCMD_TIME_SECS_GET; 3586 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN)) 3587 return (EINVAL); 3588 3589 opc = MR_DCMD_PR_GET_PROPERTIES; 3590 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN)) 3591 return (EINVAL); 3592 3593 switch (bp->bp_opcode) { 3594 case BIOC_SPMANUAL: 3595 prop.op_mode = MFI_PR_OPMODE_MANUAL; 3596 break; 3597 case BIOC_SPDISABLE: 3598 prop.op_mode = MFI_PR_OPMODE_DISABLED; 3599 break; 3600 case BIOC_SPAUTO: 3601 if (bp->bp_autoival != 0) { 3602 if (bp->bp_autoival == -1) 3603 /* continuously */ 3604 exec_freq = 0xffffffffU; 3605 else if (bp->bp_autoival > 0) 3606 exec_freq = bp->bp_autoival; 3607 else 3608 return (EINVAL); 3609 prop.exec_freq = exec_freq; 3610 } 3611 if (bp->bp_autonext != 0) { 3612 if (bp->bp_autonext < 0) 3613 return (EINVAL); 3614 else 3615 prop.next_exec = time + bp->bp_autonext; 3616 } 3617 prop.op_mode = MFI_PR_OPMODE_AUTO; 3618 break; 3619 } 3620 3621 opc = MR_DCMD_PR_SET_PROPERTIES; 3622 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_OUT)) 3623 return (EINVAL); 3624 3625 break; 3626 3627 case BIOC_GPSTATUS: 3628 opc = MR_DCMD_PR_GET_PROPERTIES; 3629 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN)) 3630 return (EINVAL); 3631 3632 opc = MR_DCMD_PR_GET_STATUS; 3633 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status), SCSI_DATA_IN)) 3634 return (EINVAL); 3635 3636 /* Get device's time. */ 3637 opc = MR_DCMD_TIME_SECS_GET; 3638 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN)) 3639 return (EINVAL); 3640 3641 switch (prop.op_mode) { 3642 case MFI_PR_OPMODE_AUTO: 3643 bp->bp_mode = BIOC_SPMAUTO; 3644 bp->bp_autoival = prop.exec_freq; 3645 bp->bp_autonext = prop.next_exec; 3646 bp->bp_autonow = time; 3647 break; 3648 case MFI_PR_OPMODE_MANUAL: 3649 bp->bp_mode = BIOC_SPMMANUAL; 3650 break; 3651 case MFI_PR_OPMODE_DISABLED: 3652 bp->bp_mode = BIOC_SPMDISABLED; 3653 break; 3654 default: 3655 printf("%s: unknown patrol mode %d\n", 3656 DEVNAME(sc), prop.op_mode); 3657 break; 3658 } 3659 3660 switch (status.state) { 3661 case MFI_PR_STATE_STOPPED: 3662 bp->bp_status = BIOC_SPSSTOPPED; 3663 break; 3664 case MFI_PR_STATE_READY: 3665 bp->bp_status = BIOC_SPSREADY; 3666 break; 3667 case MFI_PR_STATE_ACTIVE: 3668 bp->bp_status = BIOC_SPSACTIVE; 3669 break; 3670 case MFI_PR_STATE_ABORTED: 3671 bp->bp_status = BIOC_SPSABORTED; 3672 break; 3673 default: 3674 printf("%s: unknown patrol state %d\n", 3675 DEVNAME(sc), status.state); 3676 break; 3677 } 3678 3679 break; 3680 3681 default: 3682 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid " 3683 "opcode %x\n", DEVNAME(sc), bp->bp_opcode); 3684 return (EINVAL); 3685 } 3686 3687 return (rv); 3688 } 3689 3690 int 3691 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs) 3692 { 3693 struct mfi_conf *cfg; 3694 struct mfi_hotspare *hs; 3695 struct mfi_pd_details *pd; 3696 struct bioc_disk *sdhs; 3697 struct bioc_vol *vdhs; 3698 struct scsi_inquiry_data *inqbuf; 3699 char vend[8+16+4+1], *vendp; 3700 int i, rv = EINVAL; 3701 uint32_t size; 3702 union mfi_mbox mbox; 3703 3704 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid); 3705 3706 if (!bio_hs) 3707 return (EINVAL); 3708 3709 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK); 3710 3711 /* send single element command to retrieve size for full structure */ 3712 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 3713 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg), SCSI_DATA_IN)) 3714 goto freeme; 3715 3716 size = cfg->mfc_size; 3717 free(cfg, M_DEVBUF, sizeof *cfg); 3718 3719 /* memory for read config */ 3720 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO); 3721 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN)) 3722 goto freeme; 3723 3724 /* calculate offset to hs structure */ 3725 hs = (struct mfi_hotspare *)( 3726 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) + 3727 cfg->mfc_array_size * cfg->mfc_no_array + 3728 cfg->mfc_ld_size * cfg->mfc_no_ld); 3729 3730 if (volid < cfg->mfc_no_ld) 3731 goto freeme; /* not a hotspare */ 3732 3733 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs)) 3734 goto freeme; /* not a hotspare */ 3735 3736 /* offset into hotspare structure */ 3737 i = volid - cfg->mfc_no_ld; 3738 3739 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d " 3740 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld, 3741 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id); 3742 3743 /* get pd fields */ 3744 memset(&mbox, 0, sizeof(mbox)); 3745 mbox.s[0] = hs[i].mhs_pd.mfp_id; 3746 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), 3747 SCSI_DATA_IN)) { 3748 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n", 3749 DEVNAME(sc)); 3750 goto freeme; 3751 } 3752 3753 switch (type) { 3754 case MFI_MGMT_VD: 3755 vdhs = bio_hs; 3756 vdhs->bv_status = BIOC_SVONLINE; 3757 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */ 3758 vdhs->bv_level = -1; /* hotspare */ 3759 vdhs->bv_nodisk = 1; 3760 break; 3761 3762 case MFI_MGMT_SD: 3763 sdhs = bio_hs; 3764 sdhs->bd_status = BIOC_SDHOTSPARE; 3765 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */ 3766 sdhs->bd_channel = pd->mpd_enc_idx; 3767 sdhs->bd_target = pd->mpd_enc_slot; 3768 inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data; 3769 vendp = inqbuf->vendor; 3770 memcpy(vend, vendp, sizeof vend - 1); 3771 vend[sizeof vend - 1] = '\0'; 3772 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor)); 3773 break; 3774 3775 default: 3776 goto freeme; 3777 } 3778 3779 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc)); 3780 rv = 0; 3781 freeme: 3782 free(pd, M_DEVBUF, sizeof *pd); 3783 free(cfg, M_DEVBUF, 0); 3784 3785 return (rv); 3786 } 3787 3788 #ifndef SMALL_KERNEL 3789 3790 #define MFI_BBU_SENSORS 4 3791 3792 void 3793 mfii_bbu(struct mfii_softc *sc) 3794 { 3795 struct mfi_bbu_status bbu; 3796 u_int32_t status; 3797 u_int32_t mask; 3798 u_int32_t soh_bad; 3799 int i; 3800 3801 if (mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu, 3802 sizeof(bbu), SCSI_DATA_IN) != 0) { 3803 for (i = 0; i < MFI_BBU_SENSORS; i++) { 3804 sc->sc_bbu[i].value = 0; 3805 sc->sc_bbu[i].status = SENSOR_S_UNKNOWN; 3806 } 3807 for (i = 0; i < nitems(mfi_bbu_indicators); i++) { 3808 sc->sc_bbu_status[i].value = 0; 3809 sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN; 3810 } 3811 return; 3812 } 3813 3814 switch (bbu.battery_type) { 3815 case MFI_BBU_TYPE_IBBU: 3816 mask = MFI_BBU_STATE_BAD_IBBU; 3817 soh_bad = 0; 3818 break; 3819 case MFI_BBU_TYPE_BBU: 3820 mask = MFI_BBU_STATE_BAD_BBU; 3821 soh_bad = (bbu.detail.bbu.is_SOH_good == 0); 3822 break; 3823 3824 case MFI_BBU_TYPE_NONE: 3825 default: 3826 sc->sc_bbu[0].value = 0; 3827 sc->sc_bbu[0].status = SENSOR_S_CRIT; 3828 for (i = 1; i < MFI_BBU_SENSORS; i++) { 3829 sc->sc_bbu[i].value = 0; 3830 sc->sc_bbu[i].status = SENSOR_S_UNKNOWN; 3831 } 3832 for (i = 0; i < nitems(mfi_bbu_indicators); i++) { 3833 sc->sc_bbu_status[i].value = 0; 3834 sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN; 3835 } 3836 return; 3837 } 3838 3839 status = letoh32(bbu.fw_status); 3840 3841 sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1; 3842 sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT : 3843 SENSOR_S_OK; 3844 3845 sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000; 3846 sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000; 3847 sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000; 3848 for (i = 1; i < MFI_BBU_SENSORS; i++) 3849 sc->sc_bbu[i].status = SENSOR_S_UNSPEC; 3850 3851 for (i = 0; i < nitems(mfi_bbu_indicators); i++) { 3852 sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0; 3853 sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC; 3854 } 3855 } 3856 3857 void 3858 mfii_refresh_ld_sensor(struct mfii_softc *sc, int ld) 3859 { 3860 struct ksensor *sensor; 3861 int target; 3862 3863 target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target; 3864 sensor = &sc->sc_sensors[target]; 3865 3866 switch(sc->sc_ld_list.mll_list[ld].mll_state) { 3867 case MFI_LD_OFFLINE: 3868 sensor->value = SENSOR_DRIVE_FAIL; 3869 sensor->status = SENSOR_S_CRIT; 3870 break; 3871 3872 case MFI_LD_PART_DEGRADED: 3873 case MFI_LD_DEGRADED: 3874 sensor->value = SENSOR_DRIVE_PFAIL; 3875 sensor->status = SENSOR_S_WARN; 3876 break; 3877 3878 case MFI_LD_ONLINE: 3879 sensor->value = SENSOR_DRIVE_ONLINE; 3880 sensor->status = SENSOR_S_OK; 3881 break; 3882 3883 default: 3884 sensor->value = 0; /* unknown */ 3885 sensor->status = SENSOR_S_UNKNOWN; 3886 break; 3887 } 3888 } 3889 3890 void 3891 mfii_init_ld_sensor(struct mfii_softc *sc, int ld) 3892 { 3893 struct device *dev; 3894 struct scsi_link *link; 3895 struct ksensor *sensor; 3896 int target; 3897 3898 target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target; 3899 sensor = &sc->sc_sensors[target]; 3900 3901 link = scsi_get_link(sc->sc_scsibus, target, 0); 3902 if (link == NULL) { 3903 strlcpy(sensor->desc, "cache", sizeof(sensor->desc)); 3904 } else { 3905 dev = link->device_softc; 3906 if (dev != NULL) 3907 strlcpy(sensor->desc, dev->dv_xname, 3908 sizeof(sensor->desc)); 3909 } 3910 sensor->type = SENSOR_DRIVE; 3911 mfii_refresh_ld_sensor(sc, ld); 3912 } 3913 3914 int 3915 mfii_create_sensors(struct mfii_softc *sc) 3916 { 3917 int i, target; 3918 3919 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc), 3920 sizeof(sc->sc_sensordev.xname)); 3921 3922 if (ISSET(letoh32(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) { 3923 sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu), 3924 M_DEVBUF, M_WAITOK | M_ZERO); 3925 3926 sc->sc_bbu[0].type = SENSOR_INDICATOR; 3927 sc->sc_bbu[0].status = SENSOR_S_UNKNOWN; 3928 strlcpy(sc->sc_bbu[0].desc, "bbu ok", 3929 sizeof(sc->sc_bbu[0].desc)); 3930 sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]); 3931 3932 sc->sc_bbu[1].type = SENSOR_VOLTS_DC; 3933 sc->sc_bbu[1].status = SENSOR_S_UNSPEC; 3934 sc->sc_bbu[2].type = SENSOR_AMPS; 3935 sc->sc_bbu[2].status = SENSOR_S_UNSPEC; 3936 sc->sc_bbu[3].type = SENSOR_TEMP; 3937 sc->sc_bbu[3].status = SENSOR_S_UNSPEC; 3938 for (i = 1; i < MFI_BBU_SENSORS; i++) { 3939 strlcpy(sc->sc_bbu[i].desc, "bbu", 3940 sizeof(sc->sc_bbu[i].desc)); 3941 sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]); 3942 } 3943 3944 sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) * 3945 sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO); 3946 3947 for (i = 0; i < nitems(mfi_bbu_indicators); i++) { 3948 sc->sc_bbu_status[i].type = SENSOR_INDICATOR; 3949 sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC; 3950 strlcpy(sc->sc_bbu_status[i].desc, 3951 mfi_bbu_indicators[i], 3952 sizeof(sc->sc_bbu_status[i].desc)); 3953 3954 sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]); 3955 } 3956 } 3957 3958 sc->sc_sensors = mallocarray(MFI_MAX_LD, sizeof(struct ksensor), 3959 M_DEVBUF, M_NOWAIT | M_ZERO); 3960 if (sc->sc_sensors == NULL) 3961 return (1); 3962 3963 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) { 3964 mfii_init_ld_sensor(sc, i); 3965 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target; 3966 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[target]); 3967 } 3968 3969 if (sensor_task_register(sc, mfii_refresh_sensors, 10) == NULL) 3970 goto bad; 3971 3972 sensordev_install(&sc->sc_sensordev); 3973 3974 return (0); 3975 3976 bad: 3977 free(sc->sc_sensors, M_DEVBUF, 3978 MFI_MAX_LD * sizeof(struct ksensor)); 3979 3980 return (1); 3981 } 3982 3983 void 3984 mfii_refresh_sensors(void *arg) 3985 { 3986 struct mfii_softc *sc = arg; 3987 int i; 3988 3989 rw_enter_write(&sc->sc_lock); 3990 if (sc->sc_bbu != NULL) 3991 mfii_bbu(sc); 3992 3993 mfii_bio_getitall(sc); 3994 rw_exit_write(&sc->sc_lock); 3995 3996 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) 3997 mfii_refresh_ld_sensor(sc, i); 3998 } 3999 #endif /* SMALL_KERNEL */ 4000 #endif /* NBIO > 0 */ 4001