1 /* $NetBSD: mfii.c,v 1.28 2022/09/29 10:27:02 bouyer Exp $ */
2 /* $OpenBSD: mfii.c,v 1.58 2018/08/14 05:22:21 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2018 Manuel Bouyer <Manuel.Bouyer@lip6.fr>
6 * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: mfii.c,v 1.28 2022/09/29 10:27:02 bouyer Exp $");
23
24 #include "bio.h"
25
26 #include <sys/atomic.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/ioctl.h>
31 #include <sys/device.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/cpu.h>
35 #include <sys/conf.h>
36 #include <sys/kauth.h>
37 #include <sys/workqueue.h>
38 #include <sys/malloc.h>
39
40 #include <uvm/uvm_param.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcivar.h>
44
45 #include <sys/bus.h>
46
47 #include <dev/sysmon/sysmonvar.h>
48 #include <sys/envsys.h>
49
50 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_spc.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsi_disk.h>
55 #include <dev/scsipi/scsiconf.h>
56
57 #if NBIO > 0
58 #include <dev/biovar.h>
59 #endif /* NBIO > 0 */
60
61 #include <dev/ic/mfireg.h>
62 #include <dev/pci/mpiireg.h>
63
64 #define MFII_BAR 0x14
65 #define MFII_BAR_35 0x10
66 #define MFII_PCI_MEMSIZE 0x2000 /* 8k */
67
68 #define MFII_OSTS_INTR_VALID 0x00000009
69 #define MFII_RPI 0x6c /* reply post host index */
70 #define MFII_OSP2 0xb4 /* outbound scratch pad 2 */
71 #define MFII_OSP3 0xb8 /* outbound scratch pad 3 */
72
73 #define MFII_REQ_TYPE_SCSI MPII_REQ_DESCR_SCSI_IO
74 #define MFII_REQ_TYPE_LDIO (0x7 << 1)
75 #define MFII_REQ_TYPE_MFA (0x1 << 1)
76 #define MFII_REQ_TYPE_NO_LOCK (0x2 << 1)
77 #define MFII_REQ_TYPE_HI_PRI (0x6 << 1)
78
79 #define MFII_REQ_MFA(_a) htole64((_a) | MFII_REQ_TYPE_MFA)
80
81 #define MFII_FUNCTION_PASSTHRU_IO (0xf0)
82 #define MFII_FUNCTION_LDIO_REQUEST (0xf1)
83
84 #define MFII_MAX_CHAIN_UNIT 0x00400000
85 #define MFII_MAX_CHAIN_MASK 0x000003E0
86 #define MFII_MAX_CHAIN_SHIFT 5
87
88 #define MFII_256K_IO 128
89 #define MFII_1MB_IO (MFII_256K_IO * 4)
90
91 #define MFII_CHAIN_FRAME_MIN 1024
92
93 struct mfii_request_descr {
94 u_int8_t flags;
95 u_int8_t msix_index;
96 u_int16_t smid;
97
98 u_int16_t lmid;
99 u_int16_t dev_handle;
100 } __packed;
101
102 #define MFII_RAID_CTX_IO_TYPE_SYSPD (0x1 << 4)
103 #define MFII_RAID_CTX_TYPE_CUDA (0x2 << 4)
104
105 struct mfii_raid_context {
106 u_int8_t type_nseg;
107 u_int8_t _reserved1;
108 u_int16_t timeout_value;
109
110 u_int16_t reg_lock_flags;
111 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN (0x08)
112 #define MFII_RAID_CTX_RL_FLAGS_CPU0 (0x00)
113 #define MFII_RAID_CTX_RL_FLAGS_CPU1 (0x10)
114 #define MFII_RAID_CTX_RL_FLAGS_CUDA (0x80)
115
116 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN (1 << 4)
117 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
118 u_int16_t virtual_disk_target_id;
119
120 u_int64_t reg_lock_row_lba;
121
122 u_int32_t reg_lock_length;
123
124 u_int16_t next_lm_id;
125 u_int8_t ex_status;
126 u_int8_t status;
127
128 u_int8_t raid_flags;
129 u_int8_t num_sge;
130 u_int16_t config_seq_num;
131
132 u_int8_t span_arm;
133 u_int8_t _reserved3[3];
134 } __packed;
135
136 struct mfii_sge {
137 u_int64_t sg_addr;
138 u_int32_t sg_len;
139 u_int16_t _reserved;
140 u_int8_t sg_next_chain_offset;
141 u_int8_t sg_flags;
142 } __packed;
143
144 #define MFII_SGE_ADDR_MASK (0x03)
145 #define MFII_SGE_ADDR_SYSTEM (0x00)
146 #define MFII_SGE_ADDR_IOCDDR (0x01)
147 #define MFII_SGE_ADDR_IOCPLB (0x02)
148 #define MFII_SGE_ADDR_IOCPLBNTA (0x03)
149 #define MFII_SGE_END_OF_LIST (0x40)
150 #define MFII_SGE_CHAIN_ELEMENT (0x80)
151
152 #define MFII_REQUEST_SIZE 256
153
154 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
155
156 #define MFII_MAX_ROW 32
157 #define MFII_MAX_ARRAY 128
158
159 struct mfii_array_map {
160 uint16_t mam_pd[MFII_MAX_ROW];
161 } __packed;
162
163 struct mfii_dev_handle {
164 uint16_t mdh_cur_handle;
165 uint8_t mdh_valid;
166 uint8_t mdh_reserved;
167 uint16_t mdh_handle[2];
168 } __packed;
169
170 struct mfii_ld_map {
171 uint32_t mlm_total_size;
172 uint32_t mlm_reserved1[5];
173 uint32_t mlm_num_lds;
174 uint32_t mlm_reserved2;
175 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD];
176 uint8_t mlm_pd_timeout;
177 uint8_t mlm_reserved3[7];
178 struct mfii_array_map mlm_am[MFII_MAX_ARRAY];
179 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD];
180 } __packed;
181
182 struct mfii_task_mgmt {
183 union {
184 uint8_t request[128];
185 struct mpii_msg_scsi_task_request
186 mpii_request;
187 } __packed __aligned(8);
188
189 union {
190 uint8_t reply[128];
191 uint32_t flags;
192 #define MFII_TASK_MGMT_FLAGS_LD (1 << 0)
193 #define MFII_TASK_MGMT_FLAGS_PD (1 << 1)
194 struct mpii_msg_scsi_task_reply
195 mpii_reply;
196 } __packed __aligned(8);
197 } __packed __aligned(8);
198
199 /* We currently don't know the full details of the following struct */
200 struct mfii_foreign_scan_cfg {
201 char data[24];
202 } __packed;
203
204 struct mfii_foreign_scan_info {
205 uint32_t count; /* Number of foreign configs found */
206 struct mfii_foreign_scan_cfg cfgs[8];
207 } __packed;
208
209 #define MFII_MAX_LD_EXT 256
210
211 struct mfii_ld_list_ext {
212 uint32_t mll_no_ld;
213 uint32_t mll_res;
214 struct {
215 struct mfi_ld mll_ld;
216 uint8_t mll_state; /* states are the same as MFI_ */
217 uint8_t mll_res2;
218 uint8_t mll_res3;
219 uint8_t mll_res4;
220 uint64_t mll_size;
221 } mll_list[MFII_MAX_LD_EXT];
222 } __packed;
223
224 struct mfii_dmamem {
225 bus_dmamap_t mdm_map;
226 bus_dma_segment_t mdm_seg;
227 size_t mdm_size;
228 void * mdm_kva;
229 };
230 #define MFII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
231 #define MFII_DMA_LEN(_mdm) ((_mdm)->mdm_size)
232 #define MFII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
233 #define MFII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
234
235 struct mfii_softc;
236
237 typedef enum mfii_direction {
238 MFII_DATA_NONE = 0,
239 MFII_DATA_IN,
240 MFII_DATA_OUT
241 } mfii_direction_t;
242
243 struct mfii_ccb {
244 struct mfii_softc *ccb_sc;
245 void *ccb_request;
246 u_int64_t ccb_request_dva;
247 bus_addr_t ccb_request_offset;
248
249 void *ccb_mfi;
250 u_int64_t ccb_mfi_dva;
251 bus_addr_t ccb_mfi_offset;
252
253 struct mfi_sense *ccb_sense;
254 u_int64_t ccb_sense_dva;
255 bus_addr_t ccb_sense_offset;
256
257 struct mfii_sge *ccb_sgl;
258 u_int64_t ccb_sgl_dva;
259 bus_addr_t ccb_sgl_offset;
260 u_int ccb_sgl_len;
261
262 struct mfii_request_descr ccb_req;
263
264 bus_dmamap_t ccb_dmamap64;
265 bus_dmamap_t ccb_dmamap32;
266 bool ccb_dma64;
267
268 /* data for sgl */
269 void *ccb_data;
270 size_t ccb_len;
271
272 mfii_direction_t ccb_direction;
273
274 void *ccb_cookie;
275 kmutex_t ccb_mtx;
276 kcondvar_t ccb_cv;
277 void (*ccb_done)(struct mfii_softc *,
278 struct mfii_ccb *);
279
280 u_int32_t ccb_flags;
281 #define MFI_CCB_F_ERR (1<<0)
282 u_int ccb_smid;
283 SIMPLEQ_ENTRY(mfii_ccb) ccb_link;
284 };
285 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
286
287 struct mfii_iop {
288 int bar;
289 int num_sge_loc;
290 #define MFII_IOP_NUM_SGE_LOC_ORIG 0
291 #define MFII_IOP_NUM_SGE_LOC_35 1
292 u_int16_t ldio_ctx_reg_lock_flags;
293 u_int8_t ldio_req_type;
294 u_int8_t ldio_ctx_type_nseg;
295 u_int8_t sge_flag_chain;
296 u_int8_t sge_flag_eol;
297 u_int8_t iop_flag;
298 #define MFII_IOP_QUIRK_REGREAD 0x01
299 #define MFII_IOP_HAS_32BITDESC_BIT 0x02
300 };
301
302 struct mfii_softc {
303 device_t sc_dev;
304 struct scsipi_channel sc_chan;
305 struct scsipi_adapter sc_adapt;
306
307 const struct mfii_iop *sc_iop;
308 u_int sc_iop_flag;
309 #define MFII_IOP_DESC_32BIT 0x01
310
311 pci_chipset_tag_t sc_pc;
312 pcitag_t sc_tag;
313
314 bus_space_tag_t sc_iot;
315 bus_space_handle_t sc_ioh;
316 bus_size_t sc_ios;
317 bus_dma_tag_t sc_dmat;
318 bus_dma_tag_t sc_dmat64;
319 bool sc_64bit_dma;
320
321 void *sc_ih;
322
323 kmutex_t sc_ccb_mtx;
324 kmutex_t sc_post_mtx;
325
326 u_int sc_max_fw_cmds;
327 u_int sc_max_cmds;
328 u_int sc_max_sgl;
329
330 u_int sc_reply_postq_depth;
331 u_int sc_reply_postq_index;
332 kmutex_t sc_reply_postq_mtx;
333 struct mfii_dmamem *sc_reply_postq;
334
335 struct mfii_dmamem *sc_requests;
336 struct mfii_dmamem *sc_mfi;
337 struct mfii_dmamem *sc_sense;
338 struct mfii_dmamem *sc_sgl;
339
340 struct mfii_ccb *sc_ccb;
341 struct mfii_ccb_list sc_ccb_freeq;
342
343 struct mfii_ccb *sc_aen_ccb;
344 struct workqueue *sc_aen_wq;
345 struct work sc_aen_work;
346
347 kmutex_t sc_abort_mtx;
348 struct mfii_ccb_list sc_abort_list;
349 struct workqueue *sc_abort_wq;
350 struct work sc_abort_work;
351
352 /* save some useful information for logical drives that is missing
353 * in sc_ld_list
354 */
355 struct {
356 bool ld_present;
357 char ld_dev[16]; /* device name sd? */
358 int ld_target_id;
359 } sc_ld[MFII_MAX_LD_EXT];
360 int sc_target_lds[MFII_MAX_LD_EXT];
361 bool sc_max256vd;
362
363 /* bio */
364 struct mfi_conf *sc_cfg;
365 struct mfi_ctrl_info sc_info;
366 struct mfii_ld_list_ext sc_ld_list;
367 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
368 int sc_no_pd; /* used physical disks */
369 int sc_ld_sz; /* sizeof sc_ld_details */
370
371 /* mgmt lock */
372 kmutex_t sc_lock;
373 bool sc_running;
374
375 /* sensors */
376 struct sysmon_envsys *sc_sme;
377 envsys_data_t *sc_sensors;
378 bool sc_bbuok;
379
380 device_t sc_child;
381 };
382
383 // #define MFII_DEBUG
384 #ifdef MFII_DEBUG
385 #define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
386 #define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
387 #define MFII_D_CMD 0x0001
388 #define MFII_D_INTR 0x0002
389 #define MFII_D_MISC 0x0004
390 #define MFII_D_DMA 0x0008
391 #define MFII_D_IOCTL 0x0010
392 #define MFII_D_RW 0x0020
393 #define MFII_D_MEM 0x0040
394 #define MFII_D_CCB 0x0080
395 uint32_t mfii_debug = 0
396 /* | MFII_D_CMD */
397 /* | MFII_D_INTR */
398 | MFII_D_MISC
399 /* | MFII_D_DMA */
400 /* | MFII_D_IOCTL */
401 /* | MFII_D_RW */
402 /* | MFII_D_MEM */
403 /* | MFII_D_CCB */
404 ;
405 #else
406 #define DPRINTF(x...)
407 #define DNPRINTF(n,x...)
408 #endif
409
410 static int mfii_match(device_t, cfdata_t, void *);
411 static void mfii_attach(device_t, device_t, void *);
412 static int mfii_detach(device_t, int);
413 static int mfii_rescan(device_t, const char *, const int *);
414 static void mfii_childdetached(device_t, device_t);
415 static bool mfii_suspend(device_t, const pmf_qual_t *);
416 static bool mfii_resume(device_t, const pmf_qual_t *);
417 static bool mfii_shutdown(device_t, int);
418
419
420 CFATTACH_DECL3_NEW(mfii, sizeof(struct mfii_softc),
421 mfii_match, mfii_attach, mfii_detach, NULL, mfii_rescan,
422 mfii_childdetached, DVF_DETACH_SHUTDOWN);
423
424 static void mfii_scsipi_request(struct scsipi_channel *,
425 scsipi_adapter_req_t, void *);
426 static void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
427
428 #define DEVNAME(_sc) (device_xname((_sc)->sc_dev))
429
430 static u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
431 static void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
432
433 static struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
434 static void mfii_dmamem_free(struct mfii_softc *,
435 struct mfii_dmamem *);
436
437 static struct mfii_ccb * mfii_get_ccb(struct mfii_softc *);
438 static void mfii_put_ccb(struct mfii_softc *, struct mfii_ccb *);
439 static int mfii_init_ccb(struct mfii_softc *);
440 static void mfii_scrub_ccb(struct mfii_ccb *);
441
442 static int mfii_transition_firmware(struct mfii_softc *);
443 static int mfii_initialise_firmware(struct mfii_softc *);
444 static int mfii_get_info(struct mfii_softc *);
445
446 static void mfii_start(struct mfii_softc *, struct mfii_ccb *);
447 static void mfii_start64(struct mfii_softc *, struct mfii_ccb *);
448 static void mfii_start_common(struct mfii_softc *,
449 struct mfii_ccb *, bool);
450 static void mfii_done(struct mfii_softc *, struct mfii_ccb *);
451 static int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
452 static void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
453 static int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
454 static void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
455 static int mfii_my_intr(struct mfii_softc *);
456 static int mfii_intr(void *);
457 static void mfii_postq(struct mfii_softc *);
458
459 static int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
460 void *, int);
461 static int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
462 void *, int);
463
464 static int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
465
466 static int mfii_mgmt(struct mfii_softc *, uint32_t,
467 const union mfi_mbox *, void *, size_t,
468 mfii_direction_t, bool);
469 static int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
470 uint32_t, const union mfi_mbox *, void *, size_t,
471 mfii_direction_t, bool);
472 static void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
473
474 static int mfii_scsi_cmd_io(struct mfii_softc *,
475 struct mfii_ccb *, struct scsipi_xfer *);
476 static int mfii_scsi_cmd_cdb(struct mfii_softc *,
477 struct mfii_ccb *, struct scsipi_xfer *);
478 static void mfii_scsi_cmd_tmo(void *);
479
480 static void mfii_abort_task(struct work *, void *);
481 static void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
482 uint16_t, uint16_t, uint8_t, uint32_t);
483 static void mfii_scsi_cmd_abort_done(struct mfii_softc *,
484 struct mfii_ccb *);
485
486 static int mfii_aen_register(struct mfii_softc *);
487 static void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
488 struct mfii_dmamem *, uint32_t);
489 static void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
490 static void mfii_aen(struct work *, void *);
491 static void mfii_aen_unregister(struct mfii_softc *);
492
493 static void mfii_aen_pd_insert(struct mfii_softc *,
494 const struct mfi_evtarg_pd_address *);
495 static void mfii_aen_pd_remove(struct mfii_softc *,
496 const struct mfi_evtarg_pd_address *);
497 static void mfii_aen_pd_state_change(struct mfii_softc *,
498 const struct mfi_evtarg_pd_state *);
499 static void mfii_aen_ld_update(struct mfii_softc *);
500
501 #if NBIO > 0
502 static int mfii_ioctl(device_t, u_long, void *);
503 static int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
504 static int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
505 static int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
506 static int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
507 static int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
508 static int mfii_ioctl_setstate(struct mfii_softc *,
509 struct bioc_setstate *);
510 static int mfii_bio_hs(struct mfii_softc *, int, int, void *);
511 static int mfii_bio_getitall(struct mfii_softc *);
512 #endif /* NBIO > 0 */
513
514 #if 0
515 static const char *mfi_bbu_indicators[] = {
516 "pack missing",
517 "voltage low",
518 "temp high",
519 "charge active",
520 "discharge active",
521 "learn cycle req'd",
522 "learn cycle active",
523 "learn cycle failed",
524 "learn cycle timeout",
525 "I2C errors",
526 "replace pack",
527 "low capacity",
528 "periodic learn req'd"
529 };
530 #endif
531
532 static void mfii_init_ld_sensor(struct mfii_softc *, envsys_data_t *, int);
533 static void mfii_refresh_ld_sensor(struct mfii_softc *, envsys_data_t *);
534 static void mfii_attach_sensor(struct mfii_softc *, envsys_data_t *);
535 static int mfii_create_sensors(struct mfii_softc *);
536 static int mfii_destroy_sensors(struct mfii_softc *);
537 static void mfii_refresh_sensor(struct sysmon_envsys *, envsys_data_t *);
538 static void mfii_bbu(struct mfii_softc *, envsys_data_t *);
539
540 /*
541 * mfii boards support asynchronous (and non-polled) completion of
542 * dcmds by proxying them through a passthru mpii command that points
543 * at a dcmd frame. since the passthru command is submitted like
544 * the scsi commands using an SMID in the request descriptor,
545 * ccb_request memory * must contain the passthru command because
546 * that is what the SMID refers to. this means ccb_request cannot
547 * contain the dcmd. rather than allocating separate dma memory to
548 * hold the dcmd, we reuse the sense memory buffer for it.
549 */
550
551 static void mfii_dcmd_start(struct mfii_softc *, struct mfii_ccb *);
552
553 static inline void
mfii_dcmd_scrub(struct mfii_ccb * ccb)554 mfii_dcmd_scrub(struct mfii_ccb *ccb)
555 {
556 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
557 }
558
559 static inline struct mfi_dcmd_frame *
mfii_dcmd_frame(struct mfii_ccb * ccb)560 mfii_dcmd_frame(struct mfii_ccb *ccb)
561 {
562 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
563 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
564 }
565
566 static inline void
mfii_dcmd_sync(struct mfii_softc * sc,struct mfii_ccb * ccb,int flags)567 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
568 {
569 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
570 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
571 }
572
573 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
574
575 static const struct mfii_iop mfii_iop_thunderbolt = {
576 MFII_BAR,
577 MFII_IOP_NUM_SGE_LOC_ORIG,
578 0,
579 MFII_REQ_TYPE_LDIO,
580 0,
581 MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
582 0,
583 0
584 };
585
586 /*
587 * a lot of these values depend on us not implementing fastpath yet.
588 */
589 static const struct mfii_iop mfii_iop_25 = {
590 MFII_BAR,
591 MFII_IOP_NUM_SGE_LOC_ORIG,
592 MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
593 MFII_REQ_TYPE_NO_LOCK,
594 MFII_RAID_CTX_TYPE_CUDA | 0x1,
595 MFII_SGE_CHAIN_ELEMENT,
596 MFII_SGE_END_OF_LIST,
597 0
598 };
599
600 static const struct mfii_iop mfii_iop_35 = {
601 MFII_BAR_35,
602 MFII_IOP_NUM_SGE_LOC_35,
603 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
604 MFII_REQ_TYPE_NO_LOCK,
605 MFII_RAID_CTX_TYPE_CUDA | 0x1,
606 MFII_SGE_CHAIN_ELEMENT,
607 MFII_SGE_END_OF_LIST,
608 0
609 };
610
611 static const struct mfii_iop mfii_iop_aero = {
612 MFII_BAR_35,
613 MFII_IOP_NUM_SGE_LOC_35,
614 MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
615 MFII_REQ_TYPE_NO_LOCK,
616 MFII_RAID_CTX_TYPE_CUDA | 0x1,
617 MFII_SGE_CHAIN_ELEMENT,
618 MFII_SGE_END_OF_LIST,
619 MFII_IOP_QUIRK_REGREAD | MFII_IOP_HAS_32BITDESC_BIT
620 };
621
622 struct mfii_device {
623 pcireg_t mpd_vendor;
624 pcireg_t mpd_product;
625 const struct mfii_iop *mpd_iop;
626 };
627
628 static const struct mfii_device mfii_devices[] = {
629 /* Fusion */
630 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
631 &mfii_iop_thunderbolt },
632 /* Fury */
633 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
634 &mfii_iop_25 },
635 /* Invader */
636 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
637 &mfii_iop_25 },
638 /* Intruder */
639 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3316,
640 &mfii_iop_25 },
641 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3324,
642 &mfii_iop_25 },
643 /* Cutlass */
644 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_1,
645 &mfii_iop_25 },
646 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_32XX_2,
647 &mfii_iop_25 },
648 /* Crusader */
649 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
650 &mfii_iop_35 },
651 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
652 &mfii_iop_35 },
653 /* Ventura */
654 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
655 &mfii_iop_35 },
656 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
657 &mfii_iop_35 },
658 /* Tomcat */
659 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
660 &mfii_iop_35 },
661 /* Harpoon */
662 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
663 &mfii_iop_35 },
664 /* Aero */
665 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_2,
666 &mfii_iop_aero },
667 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_3,
668 &mfii_iop_aero },
669 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_2,
670 &mfii_iop_aero },
671 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_3,
672 &mfii_iop_aero }
673 };
674
675 static const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
676
677 static const struct mfii_iop *
mfii_find_iop(struct pci_attach_args * pa)678 mfii_find_iop(struct pci_attach_args *pa)
679 {
680 const struct mfii_device *mpd;
681 int i;
682
683 for (i = 0; i < __arraycount(mfii_devices); i++) {
684 mpd = &mfii_devices[i];
685
686 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
687 mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
688 return (mpd->mpd_iop);
689 }
690
691 return (NULL);
692 }
693
694 static int
mfii_match(device_t parent,cfdata_t match,void * aux)695 mfii_match(device_t parent, cfdata_t match, void *aux)
696 {
697 return ((mfii_find_iop(aux) != NULL) ? 2 : 0);
698 }
699
700 static void
mfii_attach(device_t parent,device_t self,void * aux)701 mfii_attach(device_t parent, device_t self, void *aux)
702 {
703 struct mfii_softc *sc = device_private(self);
704 struct pci_attach_args *pa = aux;
705 pcireg_t memtype;
706 pci_intr_handle_t *ihp;
707 char intrbuf[PCI_INTRSTR_LEN];
708 const char *intrstr;
709 u_int32_t status, scpad2, scpad3;
710 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
711 struct scsipi_adapter *adapt = &sc->sc_adapt;
712 struct scsipi_channel *chan = &sc->sc_chan;
713 union mfi_mbox mbox;
714
715 /* init sc */
716 sc->sc_dev = self;
717 sc->sc_iop = mfii_find_iop(aux);
718 sc->sc_dmat = pa->pa_dmat;
719 if (pci_dma64_available(pa)) {
720 sc->sc_dmat64 = pa->pa_dmat64;
721 sc->sc_64bit_dma = 1;
722 } else {
723 sc->sc_dmat64 = pa->pa_dmat;
724 sc->sc_64bit_dma = 0;
725 }
726 SIMPLEQ_INIT(&sc->sc_ccb_freeq);
727 mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
728 mutex_init(&sc->sc_post_mtx, MUTEX_DEFAULT, IPL_BIO);
729 mutex_init(&sc->sc_reply_postq_mtx, MUTEX_DEFAULT, IPL_BIO);
730
731 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
732
733 sc->sc_aen_ccb = NULL;
734 snprintf(intrbuf, sizeof(intrbuf) - 1, "%saen", device_xname(self));
735 workqueue_create(&sc->sc_aen_wq, intrbuf, mfii_aen, sc,
736 PRI_BIO, IPL_BIO, WQ_MPSAFE);
737
738 snprintf(intrbuf, sizeof(intrbuf) - 1, "%sabrt", device_xname(self));
739 workqueue_create(&sc->sc_abort_wq, intrbuf, mfii_abort_task,
740 sc, PRI_BIO, IPL_BIO, WQ_MPSAFE);
741
742 mutex_init(&sc->sc_abort_mtx, MUTEX_DEFAULT, IPL_BIO);
743 SIMPLEQ_INIT(&sc->sc_abort_list);
744
745 /* wire up the bus shizz */
746 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
747 memtype |= PCI_MAPREG_MEM_TYPE_32BIT;
748 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
749 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)) {
750 aprint_error(": unable to map registers\n");
751 return;
752 }
753
754 /* disable interrupts */
755 mfii_write(sc, MFI_OMSK, 0xffffffff);
756
757 if (pci_intr_alloc(pa, &ihp, NULL, 0)) {
758 aprint_error(": unable to map interrupt\n");
759 goto pci_unmap;
760 }
761 intrstr = pci_intr_string(pa->pa_pc, ihp[0], intrbuf, sizeof(intrbuf));
762 pci_intr_setattr(pa->pa_pc, &ihp[0], PCI_INTR_MPSAFE, true);
763
764 /* lets get started */
765 if (mfii_transition_firmware(sc))
766 goto pci_unmap;
767 sc->sc_running = true;
768
769 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
770 scpad3 = mfii_read(sc, MFII_OSP3);
771 status = mfii_fw_state(sc);
772 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
773 if (sc->sc_max_fw_cmds == 0)
774 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
775 /*
776 * reduce max_cmds by 1 to ensure that the reply queue depth does not
777 * exceed FW supplied max_fw_cmds.
778 */
779 sc->sc_max_cmds = uimin(sc->sc_max_fw_cmds, 1024) - 1;
780
781 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
782 scpad2 = mfii_read(sc, MFII_OSP2);
783 chain_frame_sz =
784 ((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
785 ((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
786 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
787 chain_frame_sz = MFII_CHAIN_FRAME_MIN;
788
789 nsge_in_io = (MFII_REQUEST_SIZE -
790 sizeof(struct mpii_msg_scsi_io) -
791 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
792 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
793
794 /* round down to nearest power of two */
795 sc->sc_max_sgl = 1;
796 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
797 sc->sc_max_sgl <<= 1;
798
799 /* Check for atomic(32bit) descriptor */
800 if (((sc->sc_iop->iop_flag & MFII_IOP_HAS_32BITDESC_BIT) != 0) &&
801 ((scpad2 & MFI_STATE_ATOMIC_DESCRIPTOR) != 0))
802 sc->sc_iop_flag |= MFII_IOP_DESC_32BIT;
803
804 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
805 DEVNAME(sc), status, scpad2, scpad3);
806 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
807 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
808 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
809 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
810 sc->sc_max_sgl);
811
812 /* sense memory */
813 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
814 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
815 if (sc->sc_sense == NULL) {
816 aprint_error(": unable to allocate sense memory\n");
817 goto pci_unmap;
818 }
819
820 /* reply post queue */
821 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
822
823 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
824 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
825 if (sc->sc_reply_postq == NULL)
826 goto free_sense;
827
828 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
829 MFII_DMA_LEN(sc->sc_reply_postq));
830
831 /* MPII request frame array */
832 sc->sc_requests = mfii_dmamem_alloc(sc,
833 MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
834 if (sc->sc_requests == NULL)
835 goto free_reply_postq;
836
837 /* MFI command frame array */
838 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
839 if (sc->sc_mfi == NULL)
840 goto free_requests;
841
842 /* MPII SGL array */
843 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
844 sizeof(struct mfii_sge) * sc->sc_max_sgl);
845 if (sc->sc_sgl == NULL)
846 goto free_mfi;
847
848 if (mfii_init_ccb(sc) != 0) {
849 aprint_error(": could not init ccb list\n");
850 goto free_sgl;
851 }
852
853 /* kickstart firmware with all addresses and pointers */
854 if (mfii_initialise_firmware(sc) != 0) {
855 aprint_error(": could not initialize firmware\n");
856 goto free_sgl;
857 }
858
859 mutex_enter(&sc->sc_lock);
860 if (mfii_get_info(sc) != 0) {
861 mutex_exit(&sc->sc_lock);
862 aprint_error(": could not retrieve controller information\n");
863 goto free_sgl;
864 }
865 mutex_exit(&sc->sc_lock);
866
867 aprint_normal(": \"%s\", firmware %s",
868 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
869 if (le16toh(sc->sc_info.mci_memory_size) > 0) {
870 aprint_normal(", %uMB cache",
871 le16toh(sc->sc_info.mci_memory_size));
872 }
873 aprint_normal("\n");
874 aprint_naive("\n");
875
876 sc->sc_ih = pci_intr_establish_xname(sc->sc_pc, ihp[0], IPL_BIO,
877 mfii_intr, sc, DEVNAME(sc));
878 if (sc->sc_ih == NULL) {
879 aprint_error_dev(self, "can't establish interrupt");
880 if (intrstr)
881 aprint_error(" at %s", intrstr);
882 aprint_error("\n");
883 goto free_sgl;
884 }
885 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
886
887 for (i = 0; i < sc->sc_info.mci_lds_present; i++)
888 sc->sc_ld[i].ld_present = 1;
889
890 sc->sc_max256vd =
891 (sc->sc_info.mci_adapter_ops3 & MFI_INFO_AOPS3_SUPP_MAX_EXT_LDS) ?
892 true : false;
893
894 if (sc->sc_max256vd)
895 aprint_verbose_dev(self, "Max 256 VD support\n");
896
897 memset(adapt, 0, sizeof(*adapt));
898 adapt->adapt_dev = sc->sc_dev;
899 adapt->adapt_nchannels = 1;
900 /* keep a few commands for management */
901 if (sc->sc_max_cmds > 4)
902 adapt->adapt_openings = sc->sc_max_cmds - 4;
903 else
904 adapt->adapt_openings = sc->sc_max_cmds;
905 adapt->adapt_max_periph = adapt->adapt_openings;
906 adapt->adapt_request = mfii_scsipi_request;
907 adapt->adapt_minphys = minphys;
908 adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE;
909
910 memset(chan, 0, sizeof(*chan));
911 chan->chan_adapter = adapt;
912 chan->chan_bustype = &scsi_sas_bustype;
913 chan->chan_channel = 0;
914 chan->chan_flags = 0;
915 chan->chan_nluns = 8;
916 chan->chan_ntargets = sc->sc_info.mci_max_lds;
917 chan->chan_id = sc->sc_info.mci_max_lds;
918
919 mfii_rescan(sc->sc_dev, NULL, NULL);
920
921 if (mfii_aen_register(sc) != 0) {
922 /* error printed by mfii_aen_register */
923 goto intr_disestablish;
924 }
925
926 memset(&mbox, 0, sizeof(mbox));
927 if (sc->sc_max256vd)
928 mbox.b[0] = 1;
929 mutex_enter(&sc->sc_lock);
930 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
931 sizeof(sc->sc_ld_list), MFII_DATA_IN, true) != 0) {
932 mutex_exit(&sc->sc_lock);
933 aprint_error_dev(self,
934 "getting list of logical disks failed\n");
935 goto intr_disestablish;
936 }
937 mutex_exit(&sc->sc_lock);
938 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
939 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
940 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
941 sc->sc_target_lds[target] = i;
942 sc->sc_ld[i].ld_target_id = target;
943 }
944
945 /* enable interrupts */
946 mfii_write(sc, MFI_OSTS, 0xffffffff);
947 mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
948
949 #if NBIO > 0
950 if (bio_register(sc->sc_dev, mfii_ioctl) != 0)
951 panic("%s: controller registration failed", DEVNAME(sc));
952 #endif /* NBIO > 0 */
953
954 if (mfii_create_sensors(sc) != 0)
955 aprint_error_dev(self, "unable to create sensors\n");
956
957 if (!pmf_device_register1(sc->sc_dev, mfii_suspend, mfii_resume,
958 mfii_shutdown))
959 aprint_error_dev(self, "couldn't establish power handler\n");
960 return;
961 intr_disestablish:
962 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
963 free_sgl:
964 mfii_dmamem_free(sc, sc->sc_sgl);
965 free_mfi:
966 mfii_dmamem_free(sc, sc->sc_mfi);
967 free_requests:
968 mfii_dmamem_free(sc, sc->sc_requests);
969 free_reply_postq:
970 mfii_dmamem_free(sc, sc->sc_reply_postq);
971 free_sense:
972 mfii_dmamem_free(sc, sc->sc_sense);
973 pci_unmap:
974 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
975 }
976
977 #if 0
978 struct srp_gc mfii_dev_handles_gc =
979 SRP_GC_INITIALIZER(mfii_dev_handles_dtor, NULL);
980
981 static inline uint16_t
982 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
983 {
984 struct srp_ref sr;
985 uint16_t *map, handle;
986
987 map = srp_enter(&sr, &sc->sc_pd->pd_dev_handles);
988 handle = map[target];
989 srp_leave(&sr);
990
991 return (handle);
992 }
993
994 static int
995 mfii_dev_handles_update(struct mfii_softc *sc)
996 {
997 struct mfii_ld_map *lm;
998 uint16_t *dev_handles = NULL;
999 int i;
1000 int rv = 0;
1001
1002 lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
1003
1004 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
1005 MFII_DATA_IN, false);
1006
1007 if (rv != 0) {
1008 rv = EIO;
1009 goto free_lm;
1010 }
1011
1012 dev_handles = mallocarray(MFI_MAX_PD, sizeof(*dev_handles),
1013 M_DEVBUF, M_WAITOK);
1014
1015 for (i = 0; i < MFI_MAX_PD; i++)
1016 dev_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
1017
1018 /* commit the updated info */
1019 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
1020 srp_update_locked(&mfii_dev_handles_gc,
1021 &sc->sc_pd->pd_dev_handles, dev_handles);
1022
1023 free_lm:
1024 free(lm, M_TEMP, sizeof(*lm));
1025
1026 return (rv);
1027 }
1028
1029 static void
1030 mfii_dev_handles_dtor(void *null, void *v)
1031 {
1032 uint16_t *dev_handles = v;
1033
1034 free(dev_handles, M_DEVBUF, sizeof(*dev_handles) * MFI_MAX_PD);
1035 }
1036 #endif /* 0 */
1037
1038 static int
mfii_detach(device_t self,int flags)1039 mfii_detach(device_t self, int flags)
1040 {
1041 struct mfii_softc *sc = device_private(self);
1042 int error;
1043
1044 if (sc->sc_ih == NULL)
1045 return (0);
1046
1047 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
1048 return error;
1049
1050 mfii_destroy_sensors(sc);
1051 #if NBIO > 0
1052 bio_unregister(sc->sc_dev);
1053 #endif
1054 mfii_shutdown(sc->sc_dev, 0);
1055 mfii_write(sc, MFI_OMSK, 0xffffffff);
1056
1057 mfii_aen_unregister(sc);
1058 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
1059 mfii_dmamem_free(sc, sc->sc_sgl);
1060 mfii_dmamem_free(sc, sc->sc_mfi);
1061 mfii_dmamem_free(sc, sc->sc_requests);
1062 mfii_dmamem_free(sc, sc->sc_reply_postq);
1063 mfii_dmamem_free(sc, sc->sc_sense);
1064 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
1065
1066 return (0);
1067 }
1068
1069 static int
mfii_rescan(device_t self,const char * ifattr,const int * locators)1070 mfii_rescan(device_t self, const char *ifattr, const int *locators)
1071 {
1072 struct mfii_softc *sc = device_private(self);
1073
1074 if (sc->sc_child != NULL)
1075 return 0;
1076
1077 sc->sc_child = config_found(self, &sc->sc_chan, scsiprint,
1078 CFARGS_NONE);
1079 return 0;
1080 }
1081
1082 static void
mfii_childdetached(device_t self,device_t child)1083 mfii_childdetached(device_t self, device_t child)
1084 {
1085 struct mfii_softc *sc = device_private(self);
1086
1087 KASSERT(self == sc->sc_dev);
1088 KASSERT(child == sc->sc_child);
1089
1090 if (child == sc->sc_child)
1091 sc->sc_child = NULL;
1092 }
1093
1094 static bool
mfii_suspend(device_t dev,const pmf_qual_t * q)1095 mfii_suspend(device_t dev, const pmf_qual_t *q)
1096 {
1097 /* XXX to be implemented */
1098 return false;
1099 }
1100
1101 static bool
mfii_resume(device_t dev,const pmf_qual_t * q)1102 mfii_resume(device_t dev, const pmf_qual_t *q)
1103 {
1104 /* XXX to be implemented */
1105 return false;
1106 }
1107
1108 static bool
mfii_shutdown(device_t dev,int how)1109 mfii_shutdown(device_t dev, int how)
1110 {
1111 struct mfii_softc *sc = device_private(dev);
1112 struct mfii_ccb *ccb;
1113 union mfi_mbox mbox;
1114 bool rv = true;
1115
1116 memset(&mbox, 0, sizeof(mbox));
1117
1118 mutex_enter(&sc->sc_lock);
1119 DNPRINTF(MFII_D_MISC, "%s: mfii_shutdown\n", DEVNAME(sc));
1120 ccb = mfii_get_ccb(sc);
1121 if (ccb == NULL)
1122 return false;
1123 mutex_enter(&sc->sc_ccb_mtx);
1124 if (sc->sc_running) {
1125 sc->sc_running = 0; /* prevent new commands */
1126 mutex_exit(&sc->sc_ccb_mtx);
1127 #if 0 /* XXX why does this hang ? */
1128 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1129 mfii_scrub_ccb(ccb);
1130 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
1131 NULL, 0, MFII_DATA_NONE, true)) {
1132 aprint_error_dev(dev,
1133 "shutdown: cache flush failed\n");
1134 rv = false;
1135 goto fail;
1136 }
1137 printf("ok1\n");
1138 #endif
1139 mbox.b[0] = 0;
1140 mfii_scrub_ccb(ccb);
1141 if (mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, &mbox,
1142 NULL, 0, MFII_DATA_NONE, true)) {
1143 aprint_error_dev(dev, "shutdown: "
1144 "firmware shutdown failed\n");
1145 rv = false;
1146 goto fail;
1147 }
1148 } else {
1149 mutex_exit(&sc->sc_ccb_mtx);
1150 }
1151 fail:
1152 mfii_put_ccb(sc, ccb);
1153 mutex_exit(&sc->sc_lock);
1154 return rv;
1155 }
1156
1157 /* Register read function without retry */
1158 static inline u_int32_t
mfii_read_wor(struct mfii_softc * sc,bus_size_t r)1159 mfii_read_wor(struct mfii_softc *sc, bus_size_t r)
1160 {
1161 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1162 BUS_SPACE_BARRIER_READ);
1163 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1164 }
1165
1166 static u_int32_t
mfii_read(struct mfii_softc * sc,bus_size_t r)1167 mfii_read(struct mfii_softc *sc, bus_size_t r)
1168 {
1169 uint32_t rv;
1170 int i = 0;
1171
1172 if ((sc->sc_iop->iop_flag & MFII_IOP_QUIRK_REGREAD) != 0) {
1173 do {
1174 rv = mfii_read_wor(sc, r);
1175 i++;
1176 } while ((rv == 0) && (i < 3));
1177 } else
1178 rv = mfii_read_wor(sc, r);
1179
1180 return rv;
1181 }
1182
1183 static void
mfii_write(struct mfii_softc * sc,bus_size_t r,u_int32_t v)1184 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1185 {
1186 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1187 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1188 BUS_SPACE_BARRIER_WRITE);
1189 }
1190
1191 static struct mfii_dmamem *
mfii_dmamem_alloc(struct mfii_softc * sc,size_t size)1192 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1193 {
1194 struct mfii_dmamem *m;
1195 int nsegs;
1196
1197 m = malloc(sizeof(*m), M_DEVBUF, M_WAITOK | M_ZERO);
1198 m->mdm_size = size;
1199
1200 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1201 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1202 goto mdmfree;
1203
1204 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1205 &nsegs, BUS_DMA_NOWAIT) != 0)
1206 goto destroy;
1207
1208 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1209 BUS_DMA_NOWAIT) != 0)
1210 goto free;
1211
1212 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1213 BUS_DMA_NOWAIT) != 0)
1214 goto unmap;
1215
1216 memset(m->mdm_kva, 0, size);
1217 return (m);
1218
1219 unmap:
1220 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1221 free:
1222 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1223 destroy:
1224 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1225 mdmfree:
1226 free(m, M_DEVBUF);
1227
1228 return (NULL);
1229 }
1230
1231 static void
mfii_dmamem_free(struct mfii_softc * sc,struct mfii_dmamem * m)1232 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1233 {
1234 bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1235 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1236 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1237 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1238 free(m, M_DEVBUF);
1239 }
1240
1241 static void
mfii_dcmd_start(struct mfii_softc * sc,struct mfii_ccb * ccb)1242 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1243 {
1244 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1245 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1246 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1247
1248 io->function = MFII_FUNCTION_PASSTHRU_IO;
1249 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1250 io->chain_offset = io->sgl_offset0 / 4;
1251
1252 sge->sg_addr = htole64(ccb->ccb_sense_dva);
1253 sge->sg_len = htole32(sizeof(*ccb->ccb_sense));
1254 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1255
1256 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1257 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1258
1259 mfii_start(sc, ccb);
1260 }
1261
1262 static int
mfii_aen_register(struct mfii_softc * sc)1263 mfii_aen_register(struct mfii_softc *sc)
1264 {
1265 struct mfi_evt_log_info mel;
1266 struct mfii_ccb *ccb;
1267 struct mfii_dmamem *mdm;
1268 int rv;
1269
1270 ccb = mfii_get_ccb(sc);
1271 if (ccb == NULL) {
1272 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1273 return (ENOMEM);
1274 }
1275
1276 memset(&mel, 0, sizeof(mel));
1277 mfii_scrub_ccb(ccb);
1278
1279 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1280 &mel, sizeof(mel), MFII_DATA_IN, true);
1281 if (rv != 0) {
1282 mfii_put_ccb(sc, ccb);
1283 aprint_error_dev(sc->sc_dev, "unable to get event info\n");
1284 return (EIO);
1285 }
1286
1287 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1288 if (mdm == NULL) {
1289 mfii_put_ccb(sc, ccb);
1290 aprint_error_dev(sc->sc_dev,
1291 "unable to allocate event data\n");
1292 return (ENOMEM);
1293 }
1294
1295 /* replay all the events from boot */
1296 mfii_aen_start(sc, ccb, mdm, le32toh(mel.mel_boot_seq_num));
1297
1298 return (0);
1299 }
1300
1301 static void
mfii_aen_start(struct mfii_softc * sc,struct mfii_ccb * ccb,struct mfii_dmamem * mdm,uint32_t seq)1302 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1303 struct mfii_dmamem *mdm, uint32_t seq)
1304 {
1305 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1306 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1307 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1308 union mfi_evt_class_locale mec;
1309
1310 mfii_scrub_ccb(ccb);
1311 mfii_dcmd_scrub(ccb);
1312 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1313
1314 ccb->ccb_cookie = mdm;
1315 ccb->ccb_done = mfii_aen_done;
1316 sc->sc_aen_ccb = ccb;
1317
1318 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1319 mec.mec_members.reserved = 0;
1320 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1321
1322 hdr->mfh_cmd = MFI_CMD_DCMD;
1323 hdr->mfh_sg_count = 1;
1324 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1325 hdr->mfh_data_len = htole32(MFII_DMA_LEN(mdm));
1326 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1327 dcmd->mdf_mbox.w[0] = htole32(seq);
1328 dcmd->mdf_mbox.w[1] = htole32(mec.mec_word);
1329 sgl->sg64[0].addr = htole64(MFII_DMA_DVA(mdm));
1330 sgl->sg64[0].len = htole32(MFII_DMA_LEN(mdm));
1331
1332 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1333 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1334
1335 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1336 mfii_dcmd_start(sc, ccb);
1337 }
1338
1339 static void
mfii_aen_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1340 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1341 {
1342 KASSERT(sc->sc_aen_ccb == ccb);
1343
1344 /*
1345 * defer to a thread with KERNEL_LOCK so we can run autoconf
1346 * We shouldn't have more than one AEN command pending at a time,
1347 * so no need to lock
1348 */
1349 if (sc->sc_running)
1350 workqueue_enqueue(sc->sc_aen_wq, &sc->sc_aen_work, NULL);
1351 }
1352
1353 static void
mfii_aen(struct work * wk,void * arg)1354 mfii_aen(struct work *wk, void *arg)
1355 {
1356 struct mfii_softc *sc = arg;
1357 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1358 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1359 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1360
1361 mfii_dcmd_sync(sc, ccb,
1362 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1363 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1364 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1365
1366 DNPRINTF(MFII_D_MISC, "%s: %u %08x %02x %s\n", DEVNAME(sc),
1367 le32toh(med->med_seq_num), le32toh(med->med_code),
1368 med->med_arg_type, med->med_description);
1369
1370 switch (le32toh(med->med_code)) {
1371 case MR_EVT_PD_INSERTED_EXT:
1372 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1373 break;
1374
1375 mfii_aen_pd_insert(sc, &med->args.pd_address);
1376 break;
1377 case MR_EVT_PD_REMOVED_EXT:
1378 if (med->med_arg_type != MR_EVT_ARGS_PD_ADDRESS)
1379 break;
1380
1381 mfii_aen_pd_remove(sc, &med->args.pd_address);
1382 break;
1383
1384 case MR_EVT_PD_STATE_CHANGE:
1385 if (med->med_arg_type != MR_EVT_ARGS_PD_STATE)
1386 break;
1387
1388 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1389 break;
1390
1391 case MR_EVT_LD_CREATED:
1392 case MR_EVT_LD_DELETED:
1393 mfii_aen_ld_update(sc);
1394 break;
1395
1396 default:
1397 break;
1398 }
1399
1400 mfii_aen_start(sc, ccb, mdm, le32toh(med->med_seq_num) + 1);
1401 }
1402
1403 static void
mfii_aen_pd_insert(struct mfii_softc * sc,const struct mfi_evtarg_pd_address * pd)1404 mfii_aen_pd_insert(struct mfii_softc *sc,
1405 const struct mfi_evtarg_pd_address *pd)
1406 {
1407 printf("%s: physical disk inserted id %d enclosure %d\n", DEVNAME(sc),
1408 le16toh(pd->device_id), le16toh(pd->encl_id));
1409 }
1410
1411 static void
mfii_aen_pd_remove(struct mfii_softc * sc,const struct mfi_evtarg_pd_address * pd)1412 mfii_aen_pd_remove(struct mfii_softc *sc,
1413 const struct mfi_evtarg_pd_address *pd)
1414 {
1415 printf("%s: physical disk removed id %d enclosure %d\n", DEVNAME(sc),
1416 le16toh(pd->device_id), le16toh(pd->encl_id));
1417 }
1418
1419 static void
mfii_aen_pd_state_change(struct mfii_softc * sc,const struct mfi_evtarg_pd_state * state)1420 mfii_aen_pd_state_change(struct mfii_softc *sc,
1421 const struct mfi_evtarg_pd_state *state)
1422 {
1423 return;
1424 }
1425
1426 static void
mfii_aen_ld_update(struct mfii_softc * sc)1427 mfii_aen_ld_update(struct mfii_softc *sc)
1428 {
1429 union mfi_mbox mbox;
1430 int i, target, old, nld;
1431 int newlds[MFII_MAX_LD_EXT];
1432
1433 memset(&mbox, 0, sizeof(mbox));
1434 if (sc->sc_max256vd)
1435 mbox.b[0] = 1;
1436 mutex_enter(&sc->sc_lock);
1437 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
1438 sizeof(sc->sc_ld_list), MFII_DATA_IN, false) != 0) {
1439 mutex_exit(&sc->sc_lock);
1440 DNPRINTF(MFII_D_MISC,
1441 "%s: getting list of logical disks failed\n", DEVNAME(sc));
1442 return;
1443 }
1444 mutex_exit(&sc->sc_lock);
1445
1446 memset(newlds, -1, sizeof(newlds));
1447
1448 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1449 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1450 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1451 DEVNAME(sc), target, sc->sc_ld_list.mll_list[i].mll_state);
1452 newlds[target] = i;
1453 sc->sc_ld[i].ld_target_id = target;
1454 }
1455
1456 for (i = 0; i < MFII_MAX_LD_EXT; i++) {
1457 old = sc->sc_target_lds[i];
1458 nld = newlds[i];
1459
1460 if (old == -1 && nld != -1) {
1461 printf("%s: logical drive %d added (target %d)\n",
1462 DEVNAME(sc), i, nld);
1463
1464 // XXX scsi_probe_target(sc->sc_scsibus, i);
1465
1466 mfii_init_ld_sensor(sc, &sc->sc_sensors[i], i);
1467 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
1468 } else if (nld == -1 && old != -1) {
1469 printf("%s: logical drive %d removed (target %d)\n",
1470 DEVNAME(sc), i, old);
1471
1472 scsipi_target_detach(&sc->sc_chan, i, 0, DETACH_FORCE);
1473 sysmon_envsys_sensor_detach(sc->sc_sme,
1474 &sc->sc_sensors[i]);
1475 }
1476 }
1477
1478 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1479 }
1480
1481 static void
mfii_aen_unregister(struct mfii_softc * sc)1482 mfii_aen_unregister(struct mfii_softc *sc)
1483 {
1484 /* XXX */
1485 }
1486
1487 static int
mfii_transition_firmware(struct mfii_softc * sc)1488 mfii_transition_firmware(struct mfii_softc *sc)
1489 {
1490 int32_t fw_state, cur_state;
1491 int max_wait, i;
1492
1493 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1494
1495 while (fw_state != MFI_STATE_READY) {
1496 cur_state = fw_state;
1497 switch (fw_state) {
1498 case MFI_STATE_FAULT:
1499 printf("%s: firmware fault\n", DEVNAME(sc));
1500 return (1);
1501 case MFI_STATE_WAIT_HANDSHAKE:
1502 mfii_write(sc, MFI_SKINNY_IDB,
1503 MFI_INIT_CLEAR_HANDSHAKE);
1504 max_wait = 2;
1505 break;
1506 case MFI_STATE_OPERATIONAL:
1507 mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1508 max_wait = 10;
1509 break;
1510 case MFI_STATE_UNDEFINED:
1511 case MFI_STATE_BB_INIT:
1512 max_wait = 2;
1513 break;
1514 case MFI_STATE_FW_INIT:
1515 case MFI_STATE_DEVICE_SCAN:
1516 case MFI_STATE_FLUSH_CACHE:
1517 max_wait = 20;
1518 break;
1519 default:
1520 printf("%s: unknown firmware state %d\n",
1521 DEVNAME(sc), fw_state);
1522 return (1);
1523 }
1524 for (i = 0; i < (max_wait * 10); i++) {
1525 fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1526 if (fw_state == cur_state)
1527 DELAY(100000);
1528 else
1529 break;
1530 }
1531 if (fw_state == cur_state) {
1532 printf("%s: firmware stuck in state %#x\n",
1533 DEVNAME(sc), fw_state);
1534 return (1);
1535 }
1536 }
1537
1538 return (0);
1539 }
1540
1541 static int
mfii_get_info(struct mfii_softc * sc)1542 mfii_get_info(struct mfii_softc *sc)
1543 {
1544 int i, rv;
1545
1546 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1547 sizeof(sc->sc_info), MFII_DATA_IN, true);
1548
1549 if (rv != 0)
1550 return (rv);
1551
1552 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1553 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1554 DEVNAME(sc),
1555 sc->sc_info.mci_image_component[i].mic_name,
1556 sc->sc_info.mci_image_component[i].mic_version,
1557 sc->sc_info.mci_image_component[i].mic_build_date,
1558 sc->sc_info.mci_image_component[i].mic_build_time);
1559 }
1560
1561 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1562 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1563 DEVNAME(sc),
1564 sc->sc_info.mci_pending_image_component[i].mic_name,
1565 sc->sc_info.mci_pending_image_component[i].mic_version,
1566 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1567 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1568 }
1569
1570 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1571 DEVNAME(sc),
1572 sc->sc_info.mci_max_arms,
1573 sc->sc_info.mci_max_spans,
1574 sc->sc_info.mci_max_arrays,
1575 sc->sc_info.mci_max_lds,
1576 sc->sc_info.mci_product_name);
1577
1578 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1579 DEVNAME(sc),
1580 sc->sc_info.mci_serial_number,
1581 sc->sc_info.mci_hw_present,
1582 sc->sc_info.mci_current_fw_time,
1583 sc->sc_info.mci_max_cmds,
1584 sc->sc_info.mci_max_sg_elements);
1585
1586 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1587 DEVNAME(sc),
1588 sc->sc_info.mci_max_request_size,
1589 sc->sc_info.mci_lds_present,
1590 sc->sc_info.mci_lds_degraded,
1591 sc->sc_info.mci_lds_offline,
1592 sc->sc_info.mci_pd_present);
1593
1594 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1595 DEVNAME(sc),
1596 sc->sc_info.mci_pd_disks_present,
1597 sc->sc_info.mci_pd_disks_pred_failure,
1598 sc->sc_info.mci_pd_disks_failed);
1599
1600 DPRINTF("%s: nvram %d mem %d flash %d\n",
1601 DEVNAME(sc),
1602 sc->sc_info.mci_nvram_size,
1603 sc->sc_info.mci_memory_size,
1604 sc->sc_info.mci_flash_size);
1605
1606 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1607 DEVNAME(sc),
1608 sc->sc_info.mci_ram_correctable_errors,
1609 sc->sc_info.mci_ram_uncorrectable_errors,
1610 sc->sc_info.mci_cluster_allowed,
1611 sc->sc_info.mci_cluster_active);
1612
1613 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1614 DEVNAME(sc),
1615 sc->sc_info.mci_max_strips_per_io,
1616 sc->sc_info.mci_raid_levels,
1617 sc->sc_info.mci_adapter_ops,
1618 sc->sc_info.mci_ld_ops);
1619
1620 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1621 DEVNAME(sc),
1622 sc->sc_info.mci_stripe_sz_ops.min,
1623 sc->sc_info.mci_stripe_sz_ops.max,
1624 sc->sc_info.mci_pd_ops,
1625 sc->sc_info.mci_pd_mix_support);
1626
1627 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1628 DEVNAME(sc),
1629 sc->sc_info.mci_ecc_bucket_count,
1630 sc->sc_info.mci_package_version);
1631
1632 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1633 DEVNAME(sc),
1634 sc->sc_info.mci_properties.mcp_seq_num,
1635 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1636 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1637 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1638
1639 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1640 DEVNAME(sc),
1641 sc->sc_info.mci_properties.mcp_rebuild_rate,
1642 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1643 sc->sc_info.mci_properties.mcp_bgi_rate,
1644 sc->sc_info.mci_properties.mcp_cc_rate);
1645
1646 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1647 DEVNAME(sc),
1648 sc->sc_info.mci_properties.mcp_recon_rate,
1649 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1650 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1651 sc->sc_info.mci_properties.mcp_spinup_delay,
1652 sc->sc_info.mci_properties.mcp_cluster_enable);
1653
1654 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1655 DEVNAME(sc),
1656 sc->sc_info.mci_properties.mcp_coercion_mode,
1657 sc->sc_info.mci_properties.mcp_alarm_enable,
1658 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1659 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1660 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1661
1662 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1663 DEVNAME(sc),
1664 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1665 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1666 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1667
1668 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1669 DEVNAME(sc),
1670 sc->sc_info.mci_pci.mip_vendor,
1671 sc->sc_info.mci_pci.mip_device,
1672 sc->sc_info.mci_pci.mip_subvendor,
1673 sc->sc_info.mci_pci.mip_subdevice);
1674
1675 DPRINTF("%s: type %#x port_count %d port_addr ",
1676 DEVNAME(sc),
1677 sc->sc_info.mci_host.mih_type,
1678 sc->sc_info.mci_host.mih_port_count);
1679
1680 for (i = 0; i < 8; i++)
1681 DPRINTF("%.0" PRIx64 " ",
1682 sc->sc_info.mci_host.mih_port_addr[i]);
1683 DPRINTF("\n");
1684
1685 DPRINTF("%s: type %.x port_count %d port_addr ",
1686 DEVNAME(sc),
1687 sc->sc_info.mci_device.mid_type,
1688 sc->sc_info.mci_device.mid_port_count);
1689
1690 for (i = 0; i < 8; i++)
1691 DPRINTF("%.0" PRIx64 " ",
1692 sc->sc_info.mci_device.mid_port_addr[i]);
1693 DPRINTF("\n");
1694
1695 return (0);
1696 }
1697
1698 static int
mfii_mfa_poll(struct mfii_softc * sc,struct mfii_ccb * ccb)1699 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1700 {
1701 struct mfi_frame_header *hdr = ccb->ccb_request;
1702 u_int64_t r;
1703 int to = 0, rv = 0;
1704
1705 #ifdef DIAGNOSTIC
1706 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1707 panic("mfii_mfa_poll called with cookie or done set");
1708 #endif
1709
1710 hdr->mfh_context = ccb->ccb_smid;
1711 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1712 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1713
1714 r = MFII_REQ_MFA(ccb->ccb_request_dva);
1715 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1716
1717 /*
1718 * Even if the Aero card supports 32bit descriptor, 64bit descriptor
1719 * access is required for MFI_CMD_INIT.
1720 * Currently, mfii_mfa_poll() is called for MFI_CMD_INIT only.
1721 */
1722 mfii_start64(sc, ccb);
1723
1724 for (;;) {
1725 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1726 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1727 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1728
1729 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1730 break;
1731
1732 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1733 printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1734 ccb->ccb_smid);
1735 ccb->ccb_flags |= MFI_CCB_F_ERR;
1736 rv = 1;
1737 break;
1738 }
1739
1740 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1741 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1742 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1743
1744 delay(1000);
1745 }
1746
1747 if (ccb->ccb_len > 0) {
1748 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
1749 0, ccb->ccb_dmamap32->dm_mapsize,
1750 (ccb->ccb_direction == MFII_DATA_IN) ?
1751 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1752
1753 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
1754 }
1755
1756 return (rv);
1757 }
1758
1759 static int
mfii_poll(struct mfii_softc * sc,struct mfii_ccb * ccb)1760 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1761 {
1762 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1763 void *cookie;
1764 int rv = 1;
1765
1766 done = ccb->ccb_done;
1767 cookie = ccb->ccb_cookie;
1768
1769 ccb->ccb_done = mfii_poll_done;
1770 ccb->ccb_cookie = &rv;
1771
1772 mfii_start(sc, ccb);
1773
1774 do {
1775 delay(10);
1776 mfii_postq(sc);
1777 } while (rv == 1);
1778
1779 ccb->ccb_cookie = cookie;
1780 done(sc, ccb);
1781
1782 return (0);
1783 }
1784
1785 static void
mfii_poll_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1786 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1787 {
1788 int *rv = ccb->ccb_cookie;
1789
1790 *rv = 0;
1791 }
1792
1793 static int
mfii_exec(struct mfii_softc * sc,struct mfii_ccb * ccb)1794 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1795 {
1796 #ifdef DIAGNOSTIC
1797 if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1798 panic("mfii_exec called with cookie or done set");
1799 #endif
1800
1801 ccb->ccb_cookie = ccb;
1802 ccb->ccb_done = mfii_exec_done;
1803
1804 mfii_start(sc, ccb);
1805
1806 mutex_enter(&ccb->ccb_mtx);
1807 while (ccb->ccb_cookie != NULL)
1808 cv_wait(&ccb->ccb_cv, &ccb->ccb_mtx);
1809 mutex_exit(&ccb->ccb_mtx);
1810
1811 return (0);
1812 }
1813
1814 static void
mfii_exec_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1815 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1816 {
1817 mutex_enter(&ccb->ccb_mtx);
1818 ccb->ccb_cookie = NULL;
1819 cv_signal(&ccb->ccb_cv);
1820 mutex_exit(&ccb->ccb_mtx);
1821 }
1822
1823 static int
mfii_mgmt(struct mfii_softc * sc,uint32_t opc,const union mfi_mbox * mbox,void * buf,size_t len,mfii_direction_t dir,bool poll)1824 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1825 void *buf, size_t len, mfii_direction_t dir, bool poll)
1826 {
1827 struct mfii_ccb *ccb;
1828 int rv;
1829
1830 KASSERT(mutex_owned(&sc->sc_lock));
1831 if (!sc->sc_running)
1832 return EAGAIN;
1833
1834 ccb = mfii_get_ccb(sc);
1835 if (ccb == NULL)
1836 return (ENOMEM);
1837
1838 mfii_scrub_ccb(ccb);
1839 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, dir, poll);
1840 mfii_put_ccb(sc, ccb);
1841
1842 return (rv);
1843 }
1844
1845 static int
mfii_do_mgmt(struct mfii_softc * sc,struct mfii_ccb * ccb,uint32_t opc,const union mfi_mbox * mbox,void * buf,size_t len,mfii_direction_t dir,bool poll)1846 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1847 const union mfi_mbox *mbox, void *buf, size_t len, mfii_direction_t dir,
1848 bool poll)
1849 {
1850 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1851 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1852 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1853 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1854 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1855 int rv = EIO;
1856
1857 if (cold)
1858 poll = true;
1859
1860 ccb->ccb_data = buf;
1861 ccb->ccb_len = len;
1862 ccb->ccb_direction = dir;
1863 switch (dir) {
1864 case MFII_DATA_IN:
1865 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1866 break;
1867 case MFII_DATA_OUT:
1868 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1869 break;
1870 case MFII_DATA_NONE:
1871 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1872 break;
1873 }
1874
1875 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl, poll) != 0) {
1876 rv = ENOMEM;
1877 goto done;
1878 }
1879
1880 hdr->mfh_cmd = MFI_CMD_DCMD;
1881 hdr->mfh_context = ccb->ccb_smid;
1882 hdr->mfh_data_len = htole32(len);
1883 hdr->mfh_sg_count = ccb->ccb_dmamap32->dm_nsegs;
1884 KASSERT(!ccb->ccb_dma64);
1885
1886 dcmd->mdf_opcode = opc;
1887 /* handle special opcodes */
1888 if (mbox != NULL)
1889 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1890
1891 io->function = MFII_FUNCTION_PASSTHRU_IO;
1892 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1893 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1894
1895 sge->sg_addr = htole64(ccb->ccb_mfi_dva);
1896 sge->sg_len = htole32(MFI_FRAME_SIZE);
1897 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1898
1899 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1900 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
1901
1902 if (poll) {
1903 ccb->ccb_done = mfii_empty_done;
1904 mfii_poll(sc, ccb);
1905 } else
1906 mfii_exec(sc, ccb);
1907
1908 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1909 rv = 0;
1910 }
1911
1912 done:
1913 return (rv);
1914 }
1915
1916 static void
mfii_empty_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1917 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1918 {
1919 return;
1920 }
1921
1922 static int
mfii_load_mfa(struct mfii_softc * sc,struct mfii_ccb * ccb,void * sglp,int nosleep)1923 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1924 void *sglp, int nosleep)
1925 {
1926 union mfi_sgl *sgl = sglp;
1927 bus_dmamap_t dmap = ccb->ccb_dmamap32;
1928 int error;
1929 int i;
1930
1931 KASSERT(!ccb->ccb_dma64);
1932 if (ccb->ccb_len == 0)
1933 return (0);
1934
1935 error = bus_dmamap_load(sc->sc_dmat, dmap,
1936 ccb->ccb_data, ccb->ccb_len, NULL,
1937 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1938 if (error) {
1939 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1940 return (1);
1941 }
1942
1943 for (i = 0; i < dmap->dm_nsegs; i++) {
1944 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1945 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1946 }
1947
1948 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1949 ccb->ccb_direction == MFII_DATA_OUT ?
1950 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1951
1952 return (0);
1953 }
1954
1955 static void
mfii_start(struct mfii_softc * sc,struct mfii_ccb * ccb)1956 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1957 {
1958
1959 mfii_start_common(sc, ccb,
1960 ((sc->sc_iop_flag & MFII_IOP_DESC_32BIT) != 0) ? true : false);
1961 }
1962
1963 static void
mfii_start64(struct mfii_softc * sc,struct mfii_ccb * ccb)1964 mfii_start64(struct mfii_softc *sc, struct mfii_ccb *ccb)
1965 {
1966
1967 mfii_start_common(sc, ccb, false);
1968 }
1969
1970 static void
mfii_start_common(struct mfii_softc * sc,struct mfii_ccb * ccb,bool do32)1971 mfii_start_common(struct mfii_softc *sc, struct mfii_ccb *ccb, bool do32)
1972 {
1973 uint32_t *r = (uint32_t *)&ccb->ccb_req;
1974
1975 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1976 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1977 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1978
1979 if (do32)
1980 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_ISQP, r[0]);
1981 else {
1982 #if defined(__LP64__)
1983 uint64_t buf;
1984
1985 buf = ((uint64_t)r[1] << 32) | r[0];
1986 bus_space_write_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, buf);
1987 #else
1988 mutex_enter(&sc->sc_post_mtx);
1989 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1990 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1991 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1992 MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1993 mutex_exit(&sc->sc_post_mtx);
1994 #endif
1995 }
1996 }
1997
1998 static void
mfii_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1999 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2000 {
2001 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
2002 ccb->ccb_request_offset, MFII_REQUEST_SIZE,
2003 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2004
2005 if (ccb->ccb_sgl_len > 0) {
2006 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2007 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2008 BUS_DMASYNC_POSTWRITE);
2009 }
2010
2011 if (ccb->ccb_dma64) {
2012 KASSERT(ccb->ccb_len > 0);
2013 bus_dmamap_sync(sc->sc_dmat64, ccb->ccb_dmamap64,
2014 0, ccb->ccb_dmamap64->dm_mapsize,
2015 (ccb->ccb_direction == MFII_DATA_IN) ?
2016 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2017
2018 bus_dmamap_unload(sc->sc_dmat64, ccb->ccb_dmamap64);
2019 } else if (ccb->ccb_len > 0) {
2020 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap32,
2021 0, ccb->ccb_dmamap32->dm_mapsize,
2022 (ccb->ccb_direction == MFII_DATA_IN) ?
2023 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2024
2025 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap32);
2026 }
2027
2028 ccb->ccb_done(sc, ccb);
2029 }
2030
2031 static int
mfii_initialise_firmware(struct mfii_softc * sc)2032 mfii_initialise_firmware(struct mfii_softc *sc)
2033 {
2034 struct mpii_msg_iocinit_request *iiq;
2035 struct mfii_dmamem *m;
2036 struct mfii_ccb *ccb;
2037 struct mfi_init_frame *init;
2038 int rv;
2039
2040 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
2041 if (m == NULL)
2042 return (1);
2043
2044 iiq = MFII_DMA_KVA(m);
2045 memset(iiq, 0, sizeof(*iiq));
2046
2047 iiq->function = MPII_FUNCTION_IOC_INIT;
2048 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
2049
2050 iiq->msg_version_maj = 0x02;
2051 iiq->msg_version_min = 0x00;
2052 iiq->hdr_version_unit = 0x10;
2053 iiq->hdr_version_dev = 0x0;
2054
2055 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
2056
2057 iiq->reply_descriptor_post_queue_depth =
2058 htole16(sc->sc_reply_postq_depth);
2059 iiq->reply_free_queue_depth = htole16(0);
2060
2061 iiq->sense_buffer_address_high = htole32(
2062 MFII_DMA_DVA(sc->sc_sense) >> 32);
2063
2064 iiq->reply_descriptor_post_queue_address_lo =
2065 htole32(MFII_DMA_DVA(sc->sc_reply_postq));
2066 iiq->reply_descriptor_post_queue_address_hi =
2067 htole32(MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
2068
2069 iiq->system_request_frame_base_address_lo =
2070 htole32(MFII_DMA_DVA(sc->sc_requests));
2071 iiq->system_request_frame_base_address_hi =
2072 htole32(MFII_DMA_DVA(sc->sc_requests) >> 32);
2073
2074 iiq->timestamp = htole64(time_uptime);
2075
2076 ccb = mfii_get_ccb(sc);
2077 if (ccb == NULL) {
2078 /* shouldn't ever run out of ccbs during attach */
2079 return (1);
2080 }
2081 mfii_scrub_ccb(ccb);
2082 init = ccb->ccb_request;
2083
2084 init->mif_header.mfh_cmd = MFI_CMD_INIT;
2085 init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
2086 init->mif_qinfo_new_addr_lo = htole32(MFII_DMA_DVA(m));
2087 init->mif_qinfo_new_addr_hi = htole32((uint64_t)MFII_DMA_DVA(m) >> 32);
2088
2089 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2090 0, MFII_DMA_LEN(sc->sc_reply_postq),
2091 BUS_DMASYNC_PREREAD);
2092
2093 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2094 0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
2095
2096 rv = mfii_mfa_poll(sc, ccb);
2097
2098 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2099 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
2100
2101 mfii_put_ccb(sc, ccb);
2102 mfii_dmamem_free(sc, m);
2103
2104 return (rv);
2105 }
2106
2107 static int
mfii_my_intr(struct mfii_softc * sc)2108 mfii_my_intr(struct mfii_softc *sc)
2109 {
2110 u_int32_t status;
2111
2112 status = mfii_read(sc, MFI_OSTS);
2113
2114 DNPRINTF(MFII_D_INTR, "%s: intr status 0x%x\n", DEVNAME(sc), status);
2115 if (ISSET(status, 0x1)) {
2116 mfii_write(sc, MFI_OSTS, status);
2117 return (1);
2118 }
2119
2120 return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2121 }
2122
2123 static int
mfii_intr(void * arg)2124 mfii_intr(void *arg)
2125 {
2126 struct mfii_softc *sc = arg;
2127
2128 if (!mfii_my_intr(sc))
2129 return (0);
2130
2131 mfii_postq(sc);
2132
2133 return (1);
2134 }
2135
2136 static void
mfii_postq(struct mfii_softc * sc)2137 mfii_postq(struct mfii_softc *sc)
2138 {
2139 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2140 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2141 struct mpii_reply_descr *rdp;
2142 struct mfii_ccb *ccb;
2143 int rpi = 0;
2144
2145 mutex_enter(&sc->sc_reply_postq_mtx);
2146
2147 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2148 0, MFII_DMA_LEN(sc->sc_reply_postq),
2149 BUS_DMASYNC_POSTREAD);
2150
2151 for (;;) {
2152 rdp = &postq[sc->sc_reply_postq_index];
2153 DNPRINTF(MFII_D_INTR,
2154 "%s: mfii_postq index %d flags 0x%x data 0x%x\n",
2155 DEVNAME(sc), sc->sc_reply_postq_index, rdp->reply_flags,
2156 rdp->data == 0xffffffff);
2157 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2158 MPII_REPLY_DESCR_UNUSED)
2159 break;
2160 if (rdp->data == 0xffffffff) {
2161 /*
2162 * ioc is still writing to the reply post queue
2163 * race condition - bail!
2164 */
2165 break;
2166 }
2167
2168 ccb = &sc->sc_ccb[le16toh(rdp->smid) - 1];
2169 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2170 memset(rdp, 0xff, sizeof(*rdp));
2171
2172 sc->sc_reply_postq_index++;
2173 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2174 rpi = 1;
2175 }
2176
2177 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2178 0, MFII_DMA_LEN(sc->sc_reply_postq),
2179 BUS_DMASYNC_PREREAD);
2180
2181 if (rpi)
2182 mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2183
2184 mutex_exit(&sc->sc_reply_postq_mtx);
2185
2186 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2187 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2188 mfii_done(sc, ccb);
2189 }
2190 }
2191
2192 static void
mfii_scsipi_request(struct scsipi_channel * chan,scsipi_adapter_req_t req,void * arg)2193 mfii_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
2194 void *arg)
2195 {
2196 struct scsipi_periph *periph;
2197 struct scsipi_xfer *xs;
2198 struct scsipi_adapter *adapt = chan->chan_adapter;
2199 struct mfii_softc *sc = device_private(adapt->adapt_dev);
2200 struct mfii_ccb *ccb;
2201 int timeout;
2202 int target;
2203
2204 switch (req) {
2205 case ADAPTER_REQ_GROW_RESOURCES:
2206 /* Not supported. */
2207 return;
2208 case ADAPTER_REQ_SET_XFER_MODE:
2209 {
2210 struct scsipi_xfer_mode *xm = arg;
2211 xm->xm_mode = PERIPH_CAP_TQING;
2212 xm->xm_period = 0;
2213 xm->xm_offset = 0;
2214 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
2215 return;
2216 }
2217 case ADAPTER_REQ_RUN_XFER:
2218 break;
2219 }
2220
2221 xs = arg;
2222 periph = xs->xs_periph;
2223 target = periph->periph_target;
2224
2225 if (target >= MFII_MAX_LD_EXT || !sc->sc_ld[target].ld_present ||
2226 periph->periph_lun != 0) {
2227 xs->error = XS_SELTIMEOUT;
2228 scsipi_done(xs);
2229 return;
2230 }
2231
2232 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
2233 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
2234 /* the cache is stable storage, don't flush */
2235 xs->error = XS_NOERROR;
2236 xs->status = SCSI_OK;
2237 xs->resid = 0;
2238 scsipi_done(xs);
2239 return;
2240 }
2241
2242 ccb = mfii_get_ccb(sc);
2243 if (ccb == NULL) {
2244 xs->error = XS_RESOURCE_SHORTAGE;
2245 scsipi_done(xs);
2246 return;
2247 }
2248 mfii_scrub_ccb(ccb);
2249 ccb->ccb_cookie = xs;
2250 ccb->ccb_done = mfii_scsi_cmd_done;
2251 ccb->ccb_data = xs->data;
2252 ccb->ccb_len = xs->datalen;
2253
2254 timeout = mstohz(xs->timeout);
2255 if (timeout == 0)
2256 timeout = 1;
2257 callout_reset(&xs->xs_callout, timeout, mfii_scsi_cmd_tmo, ccb);
2258
2259 switch (xs->cmd->opcode) {
2260 case SCSI_READ_6_COMMAND:
2261 case READ_10:
2262 case READ_12:
2263 case READ_16:
2264 case SCSI_WRITE_6_COMMAND:
2265 case WRITE_10:
2266 case WRITE_12:
2267 case WRITE_16:
2268 if (mfii_scsi_cmd_io(sc, ccb, xs) != 0)
2269 goto stuffup;
2270 break;
2271
2272 default:
2273 if (mfii_scsi_cmd_cdb(sc, ccb, xs) != 0)
2274 goto stuffup;
2275 break;
2276 }
2277
2278 xs->error = XS_NOERROR;
2279 xs->resid = 0;
2280
2281 DNPRINTF(MFII_D_CMD, "%s: start io %d cmd %d\n", DEVNAME(sc), target,
2282 xs->cmd->opcode);
2283
2284 if (xs->xs_control & XS_CTL_POLL) {
2285 if (mfii_poll(sc, ccb) != 0)
2286 goto stuffup;
2287 return;
2288 }
2289
2290 mfii_start(sc, ccb);
2291
2292 return;
2293
2294 stuffup:
2295 xs->error = XS_DRIVER_STUFFUP;
2296 scsipi_done(xs);
2297 mfii_put_ccb(sc, ccb);
2298 }
2299
2300 static void
mfii_scsi_cmd_done(struct mfii_softc * sc,struct mfii_ccb * ccb)2301 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2302 {
2303 struct scsipi_xfer *xs = ccb->ccb_cookie;
2304 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2305 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2306
2307 if (callout_stop(&xs->xs_callout) != 0)
2308 return;
2309
2310 switch (ctx->status) {
2311 case MFI_STAT_OK:
2312 break;
2313
2314 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2315 xs->error = XS_SENSE;
2316 memset(&xs->sense, 0, sizeof(xs->sense));
2317 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2318 break;
2319
2320 case MFI_STAT_LD_OFFLINE:
2321 case MFI_STAT_DEVICE_NOT_FOUND:
2322 xs->error = XS_SELTIMEOUT;
2323 break;
2324
2325 default:
2326 xs->error = XS_DRIVER_STUFFUP;
2327 break;
2328 }
2329
2330 scsipi_done(xs);
2331 mfii_put_ccb(sc, ccb);
2332 }
2333
2334 static int
mfii_scsi_cmd_io(struct mfii_softc * sc,struct mfii_ccb * ccb,struct scsipi_xfer * xs)2335 mfii_scsi_cmd_io(struct mfii_softc *sc, struct mfii_ccb *ccb,
2336 struct scsipi_xfer *xs)
2337 {
2338 struct scsipi_periph *periph = xs->xs_periph;
2339 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2340 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2341 int segs, target;
2342
2343 target = sc->sc_ld[periph->periph_target].ld_target_id;
2344 io->dev_handle = htole16(target);
2345 io->function = MFII_FUNCTION_LDIO_REQUEST;
2346 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2347 io->sgl_flags = htole16(0x02); /* XXX */
2348 io->sense_buffer_length = sizeof(xs->sense);
2349 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2350 io->data_length = htole32(xs->datalen);
2351 io->io_flags = htole16(xs->cmdlen);
2352 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2353 case XS_CTL_DATA_IN:
2354 ccb->ccb_direction = MFII_DATA_IN;
2355 io->direction = MPII_SCSIIO_DIR_READ;
2356 break;
2357 case XS_CTL_DATA_OUT:
2358 ccb->ccb_direction = MFII_DATA_OUT;
2359 io->direction = MPII_SCSIIO_DIR_WRITE;
2360 break;
2361 default:
2362 ccb->ccb_direction = MFII_DATA_NONE;
2363 io->direction = MPII_SCSIIO_DIR_NONE;
2364 break;
2365 }
2366 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2367
2368 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2369 ctx->timeout_value = htole16(0x14); /* XXX */
2370 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2371 ctx->virtual_disk_target_id = htole16(target);
2372
2373 if (mfii_load_ccb(sc, ccb, ctx + 1,
2374 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2375 return (1);
2376
2377 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2378 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2379 switch (sc->sc_iop->num_sge_loc) {
2380 case MFII_IOP_NUM_SGE_LOC_ORIG:
2381 ctx->num_sge = segs;
2382 break;
2383 case MFII_IOP_NUM_SGE_LOC_35:
2384 /* 12 bit field, but we're only using the lower 8 */
2385 ctx->span_arm = segs;
2386 break;
2387 }
2388
2389 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2390 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2391
2392 return (0);
2393 }
2394
2395 static int
mfii_scsi_cmd_cdb(struct mfii_softc * sc,struct mfii_ccb * ccb,struct scsipi_xfer * xs)2396 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2397 struct scsipi_xfer *xs)
2398 {
2399 struct scsipi_periph *periph = xs->xs_periph;
2400 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2401 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2402 int target;
2403
2404 target = sc->sc_ld[periph->periph_target].ld_target_id;
2405 io->dev_handle = htole16(target);
2406 io->function = MFII_FUNCTION_LDIO_REQUEST;
2407 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2408 io->sgl_flags = htole16(0x02); /* XXX */
2409 io->sense_buffer_length = sizeof(xs->sense);
2410 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2411 io->data_length = htole32(xs->datalen);
2412 io->io_flags = htole16(xs->cmdlen);
2413 io->lun[0] = htobe16(periph->periph_lun);
2414 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2415 case XS_CTL_DATA_IN:
2416 ccb->ccb_direction = MFII_DATA_IN;
2417 io->direction = MPII_SCSIIO_DIR_READ;
2418 break;
2419 case XS_CTL_DATA_OUT:
2420 ccb->ccb_direction = MFII_DATA_OUT;
2421 io->direction = MPII_SCSIIO_DIR_WRITE;
2422 break;
2423 default:
2424 ccb->ccb_direction = MFII_DATA_NONE;
2425 io->direction = MPII_SCSIIO_DIR_NONE;
2426 break;
2427 }
2428 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2429
2430 ctx->virtual_disk_target_id = htole16(target);
2431
2432 if (mfii_load_ccb(sc, ccb, ctx + 1,
2433 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2434 return (1);
2435
2436 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2437 KASSERT(ccb->ccb_len == 0 || ccb->ccb_dma64);
2438
2439 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2440 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2441
2442 return (0);
2443 }
2444
2445 #if 0
2446 void
2447 mfii_pd_scsi_cmd(struct scsipi_xfer *xs)
2448 {
2449 struct scsi_link *link = xs->sc_link;
2450 struct mfii_softc *sc = link->adapter_softc;
2451 struct mfii_ccb *ccb = xs->io;
2452
2453 mfii_scrub_ccb(ccb);
2454 ccb->ccb_cookie = xs;
2455 ccb->ccb_done = mfii_scsi_cmd_done;
2456 ccb->ccb_data = xs->data;
2457 ccb->ccb_len = xs->datalen;
2458
2459 // XXX timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2460
2461 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2462 if (xs->error != XS_NOERROR)
2463 goto done;
2464
2465 xs->resid = 0;
2466
2467 if (ISSET(xs->xs_control, XS_CTL_POLL)) {
2468 if (mfii_poll(sc, ccb) != 0)
2469 goto stuffup;
2470 return;
2471 }
2472
2473 // XXX timeout_add_msec(&xs->stimeout, xs->timeout);
2474 mfii_start(sc, ccb);
2475
2476 return;
2477
2478 stuffup:
2479 xs->error = XS_DRIVER_STUFFUP;
2480 done:
2481 scsi_done(xs);
2482 }
2483
2484 int
2485 mfii_pd_scsi_probe(struct scsi_link *link)
2486 {
2487 struct mfii_softc *sc = link->adapter_softc;
2488 struct mfi_pd_details mpd;
2489 union mfi_mbox mbox;
2490 int rv;
2491
2492 if (link->lun > 0)
2493 return (0);
2494
2495 memset(&mbox, 0, sizeof(mbox));
2496 mbox.s[0] = htole16(link->target);
2497
2498 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2499 MFII_DATA_IN, true);
2500 if (rv != 0)
2501 return (EIO);
2502
2503 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2504 return (ENXIO);
2505
2506 return (0);
2507 }
2508
2509 int
2510 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct mfii_ccb *ccb,
2511 struct scsipi_xfer *xs)
2512 {
2513 struct scsi_link *link = xs->sc_link;
2514 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2515 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2516 uint16_t dev_handle;
2517
2518 dev_handle = mfii_dev_handle(sc, link->target);
2519 if (dev_handle == htole16(0xffff))
2520 return (XS_SELTIMEOUT);
2521
2522 io->dev_handle = dev_handle;
2523 io->function = 0;
2524 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2525 io->sgl_flags = htole16(0x02); /* XXX */
2526 io->sense_buffer_length = sizeof(xs->sense);
2527 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2528 io->data_length = htole32(xs->datalen);
2529 io->io_flags = htole16(xs->cmdlen);
2530 io->lun[0] = htobe16(link->lun);
2531 switch (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
2532 case XS_CTL_DATA_IN:
2533 ccb->ccb_direction = MFII_DATA_IN;
2534 io->direction = MPII_SCSIIO_DIR_READ;
2535 break;
2536 case XS_CTL_DATA_OUT:
2537 ccb->ccb_direction = MFII_DATA_OUT;
2538 io->direction = MPII_SCSIIO_DIR_WRITE;
2539 break;
2540 default:
2541 ccb->ccb_direction = MFII_DATA_NONE;
2542 io->direction = MPII_SCSIIO_DIR_NONE;
2543 break;
2544 }
2545 memcpy(io->cdb, xs->cmd, xs->cmdlen);
2546
2547 ctx->virtual_disk_target_id = htole16(link->target);
2548 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2549 ctx->timeout_value = sc->sc_pd->pd_timeout;
2550
2551 if (mfii_load_ccb(sc, ccb, ctx + 1,
2552 ISSET(xs->xs_control, XS_CTL_NOSLEEP)) != 0)
2553 return (XS_DRIVER_STUFFUP);
2554
2555 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap64->dm_nsegs;
2556 KASSERT(ccb->ccb_dma64);
2557
2558 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2559 ccb->ccb_req.smid = le16toh(ccb->ccb_smid);
2560 ccb->ccb_req.dev_handle = dev_handle;
2561
2562 return (XS_NOERROR);
2563 }
2564 #endif
2565
2566 static int
mfii_load_ccb(struct mfii_softc * sc,struct mfii_ccb * ccb,void * sglp,int nosleep)2567 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2568 int nosleep)
2569 {
2570 struct mpii_msg_request *req = ccb->ccb_request;
2571 struct mfii_sge *sge = NULL, *nsge = sglp;
2572 struct mfii_sge *ce = NULL;
2573 bus_dmamap_t dmap = ccb->ccb_dmamap64;
2574 u_int space;
2575 int i;
2576
2577 int error;
2578
2579 if (ccb->ccb_len == 0)
2580 return (0);
2581
2582 ccb->ccb_dma64 = true;
2583 error = bus_dmamap_load(sc->sc_dmat64, dmap,
2584 ccb->ccb_data, ccb->ccb_len, NULL,
2585 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2586 if (error) {
2587 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2588 return (1);
2589 }
2590
2591 space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2592 sizeof(*nsge);
2593 if (dmap->dm_nsegs > space) {
2594 space--;
2595
2596 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2597 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2598
2599 ce = nsge + space;
2600 ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2601 ce->sg_len = htole32(ccb->ccb_sgl_len);
2602 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2603
2604 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2605 }
2606
2607 for (i = 0; i < dmap->dm_nsegs; i++) {
2608 if (nsge == ce)
2609 nsge = ccb->ccb_sgl;
2610
2611 sge = nsge;
2612
2613 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2614 sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2615 sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2616
2617 nsge = sge + 1;
2618 }
2619 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2620
2621 bus_dmamap_sync(sc->sc_dmat64, dmap, 0, dmap->dm_mapsize,
2622 ccb->ccb_direction == MFII_DATA_OUT ?
2623 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2624
2625 if (ccb->ccb_sgl_len > 0) {
2626 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2627 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2628 BUS_DMASYNC_PREWRITE);
2629 }
2630
2631 return (0);
2632 }
2633
2634 static void
mfii_scsi_cmd_tmo(void * p)2635 mfii_scsi_cmd_tmo(void *p)
2636 {
2637 struct mfii_ccb *ccb = p;
2638 struct mfii_softc *sc = ccb->ccb_sc;
2639 bool start_abort;
2640
2641 printf("%s: cmd timeout ccb %p\n", DEVNAME(sc), p);
2642
2643 mutex_enter(&sc->sc_abort_mtx);
2644 start_abort = (SIMPLEQ_FIRST(&sc->sc_abort_list) == 0);
2645 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2646 if (start_abort)
2647 workqueue_enqueue(sc->sc_abort_wq, &sc->sc_abort_work, NULL);
2648 mutex_exit(&sc->sc_abort_mtx);
2649 }
2650
2651 static void
mfii_abort_task(struct work * wk,void * scp)2652 mfii_abort_task(struct work *wk, void *scp)
2653 {
2654 struct mfii_softc *sc = scp;
2655 struct mfii_ccb *list;
2656
2657 mutex_enter(&sc->sc_abort_mtx);
2658 list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2659 SIMPLEQ_INIT(&sc->sc_abort_list);
2660 mutex_exit(&sc->sc_abort_mtx);
2661
2662 while (list != NULL) {
2663 struct mfii_ccb *ccb = list;
2664 struct scsipi_xfer *xs = ccb->ccb_cookie;
2665 struct scsipi_periph *periph = xs->xs_periph;
2666 struct mfii_ccb *accb;
2667
2668 list = SIMPLEQ_NEXT(ccb, ccb_link);
2669
2670 if (!sc->sc_ld[periph->periph_target].ld_present) {
2671 /* device is gone */
2672 xs->error = XS_SELTIMEOUT;
2673 scsipi_done(xs);
2674 mfii_put_ccb(sc, ccb);
2675 continue;
2676 }
2677
2678 accb = mfii_get_ccb(sc);
2679 mfii_scrub_ccb(accb);
2680 mfii_abort(sc, accb, periph->periph_target, ccb->ccb_smid,
2681 MPII_SCSI_TASK_ABORT_TASK,
2682 htole32(MFII_TASK_MGMT_FLAGS_PD));
2683
2684 accb->ccb_cookie = ccb;
2685 accb->ccb_done = mfii_scsi_cmd_abort_done;
2686
2687 mfii_start(sc, accb);
2688 }
2689 }
2690
2691 static void
mfii_abort(struct mfii_softc * sc,struct mfii_ccb * accb,uint16_t dev_handle,uint16_t smid,uint8_t type,uint32_t flags)2692 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2693 uint16_t smid, uint8_t type, uint32_t flags)
2694 {
2695 struct mfii_task_mgmt *msg;
2696 struct mpii_msg_scsi_task_request *req;
2697
2698 msg = accb->ccb_request;
2699 req = &msg->mpii_request;
2700 req->dev_handle = dev_handle;
2701 req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2702 req->task_type = type;
2703 req->task_mid = htole16( smid);
2704 msg->flags = flags;
2705
2706 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2707 accb->ccb_req.smid = le16toh(accb->ccb_smid);
2708 }
2709
2710 static void
mfii_scsi_cmd_abort_done(struct mfii_softc * sc,struct mfii_ccb * accb)2711 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2712 {
2713 struct mfii_ccb *ccb = accb->ccb_cookie;
2714 struct scsipi_xfer *xs = ccb->ccb_cookie;
2715
2716 /* XXX check accb completion? */
2717
2718 mfii_put_ccb(sc, accb);
2719 printf("%s: cmd aborted ccb %p\n", DEVNAME(sc), ccb);
2720
2721 xs->error = XS_TIMEOUT;
2722 scsipi_done(xs);
2723 mfii_put_ccb(sc, ccb);
2724 }
2725
2726 static struct mfii_ccb *
mfii_get_ccb(struct mfii_softc * sc)2727 mfii_get_ccb(struct mfii_softc *sc)
2728 {
2729 struct mfii_ccb *ccb;
2730
2731 mutex_enter(&sc->sc_ccb_mtx);
2732 if (!sc->sc_running) {
2733 ccb = NULL;
2734 } else {
2735 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2736 if (ccb != NULL)
2737 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2738 }
2739 mutex_exit(&sc->sc_ccb_mtx);
2740 return (ccb);
2741 }
2742
2743 static void
mfii_scrub_ccb(struct mfii_ccb * ccb)2744 mfii_scrub_ccb(struct mfii_ccb *ccb)
2745 {
2746 ccb->ccb_cookie = NULL;
2747 ccb->ccb_done = NULL;
2748 ccb->ccb_flags = 0;
2749 ccb->ccb_data = NULL;
2750 ccb->ccb_direction = MFII_DATA_NONE;
2751 ccb->ccb_dma64 = false;
2752 ccb->ccb_len = 0;
2753 ccb->ccb_sgl_len = 0;
2754 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2755 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2756 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2757 }
2758
2759 static void
mfii_put_ccb(struct mfii_softc * sc,struct mfii_ccb * ccb)2760 mfii_put_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb)
2761 {
2762 mutex_enter(&sc->sc_ccb_mtx);
2763 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2764 mutex_exit(&sc->sc_ccb_mtx);
2765 }
2766
2767 static int
mfii_init_ccb(struct mfii_softc * sc)2768 mfii_init_ccb(struct mfii_softc *sc)
2769 {
2770 struct mfii_ccb *ccb;
2771 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2772 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2773 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2774 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2775 u_int i;
2776 int error;
2777
2778 sc->sc_ccb = malloc(sc->sc_max_cmds * sizeof(struct mfii_ccb),
2779 M_DEVBUF, M_WAITOK|M_ZERO);
2780
2781 for (i = 0; i < sc->sc_max_cmds; i++) {
2782 ccb = &sc->sc_ccb[i];
2783 ccb->ccb_sc = sc;
2784
2785 /* create a dma map for transfer */
2786 error = bus_dmamap_create(sc->sc_dmat,
2787 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2788 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap32);
2789 if (error) {
2790 printf("%s: cannot create ccb dmamap32 (%d)\n",
2791 DEVNAME(sc), error);
2792 goto destroy;
2793 }
2794 error = bus_dmamap_create(sc->sc_dmat64,
2795 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2796 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap64);
2797 if (error) {
2798 printf("%s: cannot create ccb dmamap64 (%d)\n",
2799 DEVNAME(sc), error);
2800 goto destroy32;
2801 }
2802
2803 /* select i + 1'th request. 0 is reserved for events */
2804 ccb->ccb_smid = i + 1;
2805 ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2806 ccb->ccb_request = request + ccb->ccb_request_offset;
2807 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2808 ccb->ccb_request_offset;
2809
2810 /* select i'th MFI command frame */
2811 ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2812 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2813 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2814 ccb->ccb_mfi_offset;
2815
2816 /* select i'th sense */
2817 ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2818 ccb->ccb_sense = (struct mfi_sense *)(sense +
2819 ccb->ccb_sense_offset);
2820 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2821 ccb->ccb_sense_offset;
2822
2823 /* select i'th sgl */
2824 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2825 sc->sc_max_sgl * i;
2826 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2827 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2828 ccb->ccb_sgl_offset;
2829
2830 mutex_init(&ccb->ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
2831 cv_init(&ccb->ccb_cv, "mfiiexec");
2832
2833 /* add ccb to queue */
2834 mfii_put_ccb(sc, ccb);
2835 }
2836
2837 return (0);
2838
2839 destroy32:
2840 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2841 destroy:
2842 /* free dma maps and ccb memory */
2843 while ((ccb = mfii_get_ccb(sc)) != NULL) {
2844 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap32);
2845 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap64);
2846 }
2847
2848 free(sc->sc_ccb, M_DEVBUF);
2849
2850 return (1);
2851 }
2852
2853 #if NBIO > 0
2854 static int
mfii_ioctl(device_t dev,u_long cmd,void * addr)2855 mfii_ioctl(device_t dev, u_long cmd, void *addr)
2856 {
2857 struct mfii_softc *sc = device_private(dev);
2858 int error = 0;
2859
2860 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2861
2862 mutex_enter(&sc->sc_lock);
2863
2864 switch (cmd) {
2865 case BIOCINQ:
2866 DNPRINTF(MFII_D_IOCTL, "inq\n");
2867 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2868 break;
2869
2870 case BIOCVOL:
2871 DNPRINTF(MFII_D_IOCTL, "vol\n");
2872 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2873 break;
2874
2875 case BIOCDISK:
2876 DNPRINTF(MFII_D_IOCTL, "disk\n");
2877 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2878 break;
2879
2880 case BIOCALARM:
2881 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2882 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2883 break;
2884
2885 case BIOCBLINK:
2886 DNPRINTF(MFII_D_IOCTL, "blink\n");
2887 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2888 break;
2889
2890 case BIOCSETSTATE:
2891 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2892 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2893 break;
2894
2895 #if 0
2896 case BIOCPATROL:
2897 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2898 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2899 break;
2900 #endif
2901
2902 default:
2903 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2904 error = ENOTTY;
2905 }
2906
2907 mutex_exit(&sc->sc_lock);
2908
2909 return (error);
2910 }
2911
2912 static int
mfii_bio_getitall(struct mfii_softc * sc)2913 mfii_bio_getitall(struct mfii_softc *sc)
2914 {
2915 int i, d, rv = EINVAL;
2916 size_t size;
2917 union mfi_mbox mbox;
2918 struct mfi_conf *cfg = NULL;
2919 struct mfi_ld_details *ld_det = NULL;
2920
2921 /* get info */
2922 if (mfii_get_info(sc)) {
2923 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2924 DEVNAME(sc));
2925 goto done;
2926 }
2927
2928 /* send single element command to retrieve size for full structure */
2929 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2930 if (cfg == NULL)
2931 goto done;
2932 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2933 MFII_DATA_IN, false)) {
2934 free(cfg, M_DEVBUF);
2935 goto done;
2936 }
2937
2938 size = cfg->mfc_size;
2939 free(cfg, M_DEVBUF);
2940
2941 /* memory for read config */
2942 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2943 if (cfg == NULL)
2944 goto done;
2945 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
2946 MFII_DATA_IN, false)) {
2947 free(cfg, M_DEVBUF);
2948 goto done;
2949 }
2950
2951 /* replace current pointer with new one */
2952 if (sc->sc_cfg)
2953 free(sc->sc_cfg, M_DEVBUF);
2954 sc->sc_cfg = cfg;
2955
2956 /* get all ld info */
2957 memset(&mbox, 0, sizeof(mbox));
2958 if (sc->sc_max256vd)
2959 mbox.b[0] = 1;
2960 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, &mbox, &sc->sc_ld_list,
2961 sizeof(sc->sc_ld_list), MFII_DATA_IN, false))
2962 goto done;
2963
2964 /* get memory for all ld structures */
2965 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2966 if (sc->sc_ld_sz != size) {
2967 if (sc->sc_ld_details)
2968 free(sc->sc_ld_details, M_DEVBUF);
2969
2970 ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2971 if (ld_det == NULL)
2972 goto done;
2973 sc->sc_ld_sz = size;
2974 sc->sc_ld_details = ld_det;
2975 }
2976
2977 /* find used physical disks */
2978 size = sizeof(struct mfi_ld_details);
2979 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2980 memset(&mbox, 0, sizeof(mbox));
2981 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2982 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox,
2983 &sc->sc_ld_details[i], size, MFII_DATA_IN, false))
2984 goto done;
2985
2986 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2987 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2988 }
2989 sc->sc_no_pd = d;
2990
2991 rv = 0;
2992 done:
2993 return (rv);
2994 }
2995
2996 static int
mfii_ioctl_inq(struct mfii_softc * sc,struct bioc_inq * bi)2997 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2998 {
2999 int rv = EINVAL;
3000 struct mfi_conf *cfg = NULL;
3001
3002 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
3003
3004 if (mfii_bio_getitall(sc)) {
3005 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3006 DEVNAME(sc));
3007 goto done;
3008 }
3009
3010 /* count unused disks as volumes */
3011 if (sc->sc_cfg == NULL)
3012 goto done;
3013 cfg = sc->sc_cfg;
3014
3015 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
3016 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
3017 #if notyet
3018 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
3019 (bi->bi_nodisk - sc->sc_no_pd);
3020 #endif
3021 /* tell bio who we are */
3022 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3023
3024 rv = 0;
3025 done:
3026 return (rv);
3027 }
3028
3029 static int
mfii_ioctl_vol(struct mfii_softc * sc,struct bioc_vol * bv)3030 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
3031 {
3032 int i, per, rv = EINVAL;
3033
3034 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
3035 DEVNAME(sc), bv->bv_volid);
3036
3037 /* we really could skip and expect that inq took care of it */
3038 if (mfii_bio_getitall(sc)) {
3039 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3040 DEVNAME(sc));
3041 goto done;
3042 }
3043
3044 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
3045 /* go do hotspares & unused disks */
3046 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
3047 goto done;
3048 }
3049
3050 i = bv->bv_volid;
3051 strlcpy(bv->bv_dev, sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_name,
3052 sizeof(bv->bv_dev));
3053
3054 switch (sc->sc_ld_list.mll_list[i].mll_state) {
3055 case MFI_LD_OFFLINE:
3056 bv->bv_status = BIOC_SVOFFLINE;
3057 break;
3058
3059 case MFI_LD_PART_DEGRADED:
3060 case MFI_LD_DEGRADED:
3061 bv->bv_status = BIOC_SVDEGRADED;
3062 break;
3063
3064 case MFI_LD_ONLINE:
3065 bv->bv_status = BIOC_SVONLINE;
3066 break;
3067
3068 default:
3069 bv->bv_status = BIOC_SVINVALID;
3070 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
3071 DEVNAME(sc),
3072 sc->sc_ld_list.mll_list[i].mll_state);
3073 }
3074
3075 /* additional status can modify MFI status */
3076 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
3077 case MFI_LD_PROG_CC:
3078 bv->bv_status = BIOC_SVSCRUB;
3079 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3080 bv->bv_percent = (per * 100) / 0xffff;
3081 bv->bv_seconds =
3082 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3083 break;
3084
3085 case MFI_LD_PROG_BGI:
3086 bv->bv_status = BIOC_SVSCRUB;
3087 per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3088 bv->bv_percent = (per * 100) / 0xffff;
3089 bv->bv_seconds =
3090 sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3091 break;
3092
3093 case MFI_LD_PROG_FGI:
3094 case MFI_LD_PROG_RECONSTRUCT:
3095 /* nothing yet */
3096 break;
3097 }
3098
3099 #if 0
3100 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3101 bv->bv_cache = BIOC_CVWRITEBACK;
3102 else
3103 bv->bv_cache = BIOC_CVWRITETHROUGH;
3104 #endif
3105
3106 /*
3107 * The RAID levels are determined per the SNIA DDF spec, this is only
3108 * a subset that is valid for the MFI controller.
3109 */
3110 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3111 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3112 bv->bv_level *= 10;
3113
3114 bv->bv_nodisk =
3115 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3116 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3117
3118 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3119 bv->bv_stripe_size =
3120 (512 << sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_stripe_size)
3121 / 1024; /* in KB */
3122
3123 rv = 0;
3124 done:
3125 return (rv);
3126 }
3127
3128 static int
mfii_ioctl_disk(struct mfii_softc * sc,struct bioc_disk * bd)3129 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3130 {
3131 struct mfi_conf *cfg;
3132 struct mfi_array *ar;
3133 struct mfi_ld_cfg *ld;
3134 struct mfi_pd_details *pd;
3135 struct mfi_pd_list *pl;
3136 struct scsipi_inquiry_data *inqbuf;
3137 char vend[8+16+4+1], *vendp;
3138 int i, rv = EINVAL;
3139 int arr, vol, disk, span;
3140 union mfi_mbox mbox;
3141
3142 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3143 DEVNAME(sc), bd->bd_diskid);
3144
3145 /* we really could skip and expect that inq took care of it */
3146 if (mfii_bio_getitall(sc)) {
3147 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3148 DEVNAME(sc));
3149 return (rv);
3150 }
3151 cfg = sc->sc_cfg;
3152
3153 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3154 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3155
3156 ar = cfg->mfc_array;
3157 vol = bd->bd_volid;
3158 if (vol >= cfg->mfc_no_ld) {
3159 /* do hotspares */
3160 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3161 goto freeme;
3162 }
3163
3164 /* calculate offset to ld structure */
3165 ld = (struct mfi_ld_cfg *)(
3166 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3167 cfg->mfc_array_size * cfg->mfc_no_array);
3168
3169 /* use span 0 only when raid group is not spanned */
3170 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3171 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3172 else
3173 span = 0;
3174 arr = ld[vol].mlc_span[span].mls_index;
3175
3176 /* offset disk into pd list */
3177 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3178
3179 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3180 /* disk is missing but succeed command */
3181 bd->bd_status = BIOC_SDFAILED;
3182 rv = 0;
3183
3184 /* try to find an unused disk for the target to rebuild */
3185 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3186 MFII_DATA_IN, false))
3187 goto freeme;
3188
3189 for (i = 0; i < pl->mpl_no_pd; i++) {
3190 if (pl->mpl_address[i].mpa_scsi_type != 0)
3191 continue;
3192
3193 memset(&mbox, 0, sizeof(mbox));
3194 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3195 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3196 pd, sizeof(*pd), MFII_DATA_IN, false))
3197 continue;
3198
3199 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3200 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3201 break;
3202 }
3203
3204 if (i == pl->mpl_no_pd)
3205 goto freeme;
3206 } else {
3207 memset(&mbox, 0, sizeof(mbox));
3208 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3209 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3210 MFII_DATA_IN, false)) {
3211 bd->bd_status = BIOC_SDINVALID;
3212 goto freeme;
3213 }
3214 }
3215
3216 /* get the remaining fields */
3217 bd->bd_channel = pd->mpd_enc_idx;
3218 bd->bd_target = pd->mpd_enc_slot;
3219
3220 /* get status */
3221 switch (pd->mpd_fw_state){
3222 case MFI_PD_UNCONFIG_GOOD:
3223 case MFI_PD_UNCONFIG_BAD:
3224 bd->bd_status = BIOC_SDUNUSED;
3225 break;
3226
3227 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3228 bd->bd_status = BIOC_SDHOTSPARE;
3229 break;
3230
3231 case MFI_PD_OFFLINE:
3232 bd->bd_status = BIOC_SDOFFLINE;
3233 break;
3234
3235 case MFI_PD_FAILED:
3236 bd->bd_status = BIOC_SDFAILED;
3237 break;
3238
3239 case MFI_PD_REBUILD:
3240 bd->bd_status = BIOC_SDREBUILD;
3241 break;
3242
3243 case MFI_PD_ONLINE:
3244 bd->bd_status = BIOC_SDONLINE;
3245 break;
3246
3247 case MFI_PD_COPYBACK:
3248 case MFI_PD_SYSTEM:
3249 default:
3250 bd->bd_status = BIOC_SDINVALID;
3251 break;
3252 }
3253
3254 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3255
3256 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3257 vendp = inqbuf->vendor;
3258 memcpy(vend, vendp, sizeof vend - 1);
3259 vend[sizeof vend - 1] = '\0';
3260 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3261
3262 /* XXX find a way to retrieve serial nr from drive */
3263 /* XXX find a way to get bd_procdev */
3264
3265 #if 0
3266 mfp = &pd->mpd_progress;
3267 if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3268 mp = &mfp->mfp_patrol_read;
3269 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3270 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3271 }
3272 #endif
3273
3274 rv = 0;
3275 freeme:
3276 free(pd, M_DEVBUF);
3277 free(pl, M_DEVBUF);
3278
3279 return (rv);
3280 }
3281
3282 static int
mfii_ioctl_alarm(struct mfii_softc * sc,struct bioc_alarm * ba)3283 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3284 {
3285 uint32_t opc;
3286 int rv = 0;
3287 int8_t ret;
3288 mfii_direction_t dir = MFII_DATA_NONE;
3289
3290 switch (ba->ba_opcode) {
3291 case BIOC_SADISABLE:
3292 opc = MR_DCMD_SPEAKER_DISABLE;
3293 break;
3294
3295 case BIOC_SAENABLE:
3296 opc = MR_DCMD_SPEAKER_ENABLE;
3297 break;
3298
3299 case BIOC_SASILENCE:
3300 opc = MR_DCMD_SPEAKER_SILENCE;
3301 break;
3302
3303 case BIOC_GASTATUS:
3304 opc = MR_DCMD_SPEAKER_GET;
3305 dir = MFII_DATA_IN;
3306 break;
3307
3308 case BIOC_SATEST:
3309 opc = MR_DCMD_SPEAKER_TEST;
3310 break;
3311
3312 default:
3313 DNPRINTF(MFII_D_IOCTL,
3314 "%s: mfii_ioctl_alarm biocalarm invalid opcode %x\n",
3315 DEVNAME(sc), ba->ba_opcode);
3316 return (EINVAL);
3317 }
3318
3319 if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), dir, false))
3320 rv = EINVAL;
3321 else
3322 if (ba->ba_opcode == BIOC_GASTATUS)
3323 ba->ba_status = ret;
3324 else
3325 ba->ba_status = 0;
3326
3327 return (rv);
3328 }
3329
3330 static int
mfii_ioctl_blink(struct mfii_softc * sc,struct bioc_blink * bb)3331 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3332 {
3333 int i, found, rv = EINVAL;
3334 union mfi_mbox mbox;
3335 uint32_t cmd;
3336 struct mfi_pd_list *pd;
3337
3338 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3339 bb->bb_status);
3340
3341 /* channel 0 means not in an enclosure so can't be blinked */
3342 if (bb->bb_channel == 0)
3343 return (EINVAL);
3344
3345 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3346
3347 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd),
3348 MFII_DATA_IN, false))
3349 goto done;
3350
3351 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3352 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3353 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3354 found = 1;
3355 break;
3356 }
3357
3358 if (!found)
3359 goto done;
3360
3361 memset(&mbox, 0, sizeof(mbox));
3362 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3363
3364 switch (bb->bb_status) {
3365 case BIOC_SBUNBLINK:
3366 cmd = MR_DCMD_PD_UNBLINK;
3367 break;
3368
3369 case BIOC_SBBLINK:
3370 cmd = MR_DCMD_PD_BLINK;
3371 break;
3372
3373 case BIOC_SBALARM:
3374 default:
3375 DNPRINTF(MFII_D_IOCTL,
3376 "%s: mfii_ioctl_blink biocblink invalid opcode %x\n",
3377 DEVNAME(sc), bb->bb_status);
3378 goto done;
3379 }
3380
3381
3382 if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, MFII_DATA_NONE, false))
3383 goto done;
3384
3385 rv = 0;
3386 done:
3387 free(pd, M_DEVBUF);
3388 return (rv);
3389 }
3390
3391 static int
mfii_makegood(struct mfii_softc * sc,uint16_t pd_id)3392 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3393 {
3394 struct mfii_foreign_scan_info *fsi;
3395 struct mfi_pd_details *pd;
3396 union mfi_mbox mbox;
3397 int rv;
3398
3399 fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3400 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3401
3402 memset(&mbox, 0, sizeof mbox);
3403 mbox.s[0] = pd_id;
3404 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3405 MFII_DATA_IN, false);
3406 if (rv != 0)
3407 goto done;
3408
3409 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3410 mbox.s[0] = pd_id;
3411 mbox.s[1] = pd->mpd_pd.mfp_seq;
3412 mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3413 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3414 MFII_DATA_NONE, false);
3415 if (rv != 0)
3416 goto done;
3417 }
3418
3419 memset(&mbox, 0, sizeof mbox);
3420 mbox.s[0] = pd_id;
3421 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3422 MFII_DATA_IN, false);
3423 if (rv != 0)
3424 goto done;
3425
3426 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3427 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL,
3428 fsi, sizeof(*fsi), MFII_DATA_IN, false);
3429 if (rv != 0)
3430 goto done;
3431
3432 if (fsi->count > 0) {
3433 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL,
3434 NULL, 0, MFII_DATA_NONE, false);
3435 if (rv != 0)
3436 goto done;
3437 }
3438 }
3439
3440 memset(&mbox, 0, sizeof mbox);
3441 mbox.s[0] = pd_id;
3442 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3443 MFII_DATA_IN, false);
3444 if (rv != 0)
3445 goto done;
3446
3447 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3448 pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3449 rv = ENXIO;
3450
3451 done:
3452 free(fsi, M_DEVBUF);
3453 free(pd, M_DEVBUF);
3454
3455 return (rv);
3456 }
3457
3458 static int
mfii_makespare(struct mfii_softc * sc,uint16_t pd_id)3459 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3460 {
3461 struct mfi_hotspare *hs;
3462 struct mfi_pd_details *pd;
3463 union mfi_mbox mbox;
3464 size_t size;
3465 int rv = EINVAL;
3466
3467 /* we really could skip and expect that inq took care of it */
3468 if (mfii_bio_getitall(sc)) {
3469 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3470 DEVNAME(sc));
3471 return (rv);
3472 }
3473 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3474
3475 hs = malloc(size, M_DEVBUF, M_WAITOK);
3476 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3477
3478 memset(&mbox, 0, sizeof mbox);
3479 mbox.s[0] = pd_id;
3480 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3481 MFII_DATA_IN, false);
3482 if (rv != 0)
3483 goto done;
3484
3485 memset(hs, 0, size);
3486 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3487 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3488 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size,
3489 MFII_DATA_OUT, false);
3490
3491 done:
3492 free(hs, M_DEVBUF);
3493 free(pd, M_DEVBUF);
3494
3495 return (rv);
3496 }
3497
3498 static int
mfii_ioctl_setstate(struct mfii_softc * sc,struct bioc_setstate * bs)3499 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3500 {
3501 struct mfi_pd_details *pd;
3502 struct mfi_pd_list *pl;
3503 int i, found, rv = EINVAL;
3504 union mfi_mbox mbox;
3505
3506 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3507 bs->bs_status);
3508
3509 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3510 pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3511
3512 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3513 MFII_DATA_IN, false))
3514 goto done;
3515
3516 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3517 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3518 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3519 found = 1;
3520 break;
3521 }
3522
3523 if (!found)
3524 goto done;
3525
3526 memset(&mbox, 0, sizeof(mbox));
3527 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3528
3529 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3530 MFII_DATA_IN, false))
3531 goto done;
3532
3533 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3534 mbox.s[1] = pd->mpd_pd.mfp_seq;
3535
3536 switch (bs->bs_status) {
3537 case BIOC_SSONLINE:
3538 mbox.b[4] = MFI_PD_ONLINE;
3539 break;
3540
3541 case BIOC_SSOFFLINE:
3542 mbox.b[4] = MFI_PD_OFFLINE;
3543 break;
3544
3545 case BIOC_SSHOTSPARE:
3546 mbox.b[4] = MFI_PD_HOTSPARE;
3547 break;
3548
3549 case BIOC_SSREBUILD:
3550 if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3551 if ((rv = mfii_makegood(sc,
3552 pl->mpl_address[i].mpa_pd_id)))
3553 goto done;
3554
3555 if ((rv = mfii_makespare(sc,
3556 pl->mpl_address[i].mpa_pd_id)))
3557 goto done;
3558
3559 memset(&mbox, 0, sizeof(mbox));
3560 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3561 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox,
3562 pd, sizeof(*pd), MFII_DATA_IN, false);
3563 if (rv != 0)
3564 goto done;
3565
3566 /* rebuilding might be started by mfii_makespare() */
3567 if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3568 rv = 0;
3569 goto done;
3570 }
3571
3572 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3573 mbox.s[1] = pd->mpd_pd.mfp_seq;
3574 }
3575 mbox.b[4] = MFI_PD_REBUILD;
3576 break;
3577
3578 default:
3579 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3580 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3581 goto done;
3582 }
3583
3584
3585 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0,
3586 MFII_DATA_NONE, false);
3587 done:
3588 free(pd, M_DEVBUF);
3589 free(pl, M_DEVBUF);
3590 return (rv);
3591 }
3592
3593 #if 0
3594 int
3595 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3596 {
3597 uint32_t opc;
3598 int rv = 0;
3599 struct mfi_pr_properties prop;
3600 struct mfi_pr_status status;
3601 uint32_t time, exec_freq;
3602
3603 switch (bp->bp_opcode) {
3604 case BIOC_SPSTOP:
3605 case BIOC_SPSTART:
3606 if (bp->bp_opcode == BIOC_SPSTART)
3607 opc = MR_DCMD_PR_START;
3608 else
3609 opc = MR_DCMD_PR_STOP;
3610 if (mfii_mgmt(sc, opc, NULL, NULL, 0, MFII_DATA_IN, false))
3611 return (EINVAL);
3612 break;
3613
3614 case BIOC_SPMANUAL:
3615 case BIOC_SPDISABLE:
3616 case BIOC_SPAUTO:
3617 /* Get device's time. */
3618 opc = MR_DCMD_TIME_SECS_GET;
3619 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3620 MFII_DATA_IN, false))
3621 return (EINVAL);
3622
3623 opc = MR_DCMD_PR_GET_PROPERTIES;
3624 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3625 MFII_DATA_IN, false))
3626 return (EINVAL);
3627
3628 switch (bp->bp_opcode) {
3629 case BIOC_SPMANUAL:
3630 prop.op_mode = MFI_PR_OPMODE_MANUAL;
3631 break;
3632 case BIOC_SPDISABLE:
3633 prop.op_mode = MFI_PR_OPMODE_DISABLED;
3634 break;
3635 case BIOC_SPAUTO:
3636 if (bp->bp_autoival != 0) {
3637 if (bp->bp_autoival == -1)
3638 /* continuously */
3639 exec_freq = 0xffffffffU;
3640 else if (bp->bp_autoival > 0)
3641 exec_freq = bp->bp_autoival;
3642 else
3643 return (EINVAL);
3644 prop.exec_freq = exec_freq;
3645 }
3646 if (bp->bp_autonext != 0) {
3647 if (bp->bp_autonext < 0)
3648 return (EINVAL);
3649 else
3650 prop.next_exec =
3651 time + bp->bp_autonext;
3652 }
3653 prop.op_mode = MFI_PR_OPMODE_AUTO;
3654 break;
3655 }
3656
3657 opc = MR_DCMD_PR_SET_PROPERTIES;
3658 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3659 MFII_DATA_OUT, false))
3660 return (EINVAL);
3661
3662 break;
3663
3664 case BIOC_GPSTATUS:
3665 opc = MR_DCMD_PR_GET_PROPERTIES;
3666 if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop),
3667 MFII_DATA_IN, false))
3668 return (EINVAL);
3669
3670 opc = MR_DCMD_PR_GET_STATUS;
3671 if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status),
3672 MFII_DATA_IN, false))
3673 return (EINVAL);
3674
3675 /* Get device's time. */
3676 opc = MR_DCMD_TIME_SECS_GET;
3677 if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time),
3678 MFII_DATA_IN, false))
3679 return (EINVAL);
3680
3681 switch (prop.op_mode) {
3682 case MFI_PR_OPMODE_AUTO:
3683 bp->bp_mode = BIOC_SPMAUTO;
3684 bp->bp_autoival = prop.exec_freq;
3685 bp->bp_autonext = prop.next_exec;
3686 bp->bp_autonow = time;
3687 break;
3688 case MFI_PR_OPMODE_MANUAL:
3689 bp->bp_mode = BIOC_SPMMANUAL;
3690 break;
3691 case MFI_PR_OPMODE_DISABLED:
3692 bp->bp_mode = BIOC_SPMDISABLED;
3693 break;
3694 default:
3695 printf("%s: unknown patrol mode %d\n",
3696 DEVNAME(sc), prop.op_mode);
3697 break;
3698 }
3699
3700 switch (status.state) {
3701 case MFI_PR_STATE_STOPPED:
3702 bp->bp_status = BIOC_SPSSTOPPED;
3703 break;
3704 case MFI_PR_STATE_READY:
3705 bp->bp_status = BIOC_SPSREADY;
3706 break;
3707 case MFI_PR_STATE_ACTIVE:
3708 bp->bp_status = BIOC_SPSACTIVE;
3709 break;
3710 case MFI_PR_STATE_ABORTED:
3711 bp->bp_status = BIOC_SPSABORTED;
3712 break;
3713 default:
3714 printf("%s: unknown patrol state %d\n",
3715 DEVNAME(sc), status.state);
3716 break;
3717 }
3718
3719 break;
3720
3721 default:
3722 DNPRINTF(MFII_D_IOCTL,
3723 "%s: mfii_ioctl_patrol biocpatrol invalid opcode %x\n",
3724 DEVNAME(sc), bp->bp_opcode);
3725 return (EINVAL);
3726 }
3727
3728 return (rv);
3729 }
3730 #endif
3731
3732 static int
mfii_bio_hs(struct mfii_softc * sc,int volid,int type,void * bio_hs)3733 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3734 {
3735 struct mfi_conf *cfg;
3736 struct mfi_hotspare *hs;
3737 struct mfi_pd_details *pd;
3738 struct bioc_disk *sdhs;
3739 struct bioc_vol *vdhs;
3740 struct scsipi_inquiry_data *inqbuf;
3741 char vend[8+16+4+1], *vendp;
3742 int i, rv = EINVAL;
3743 uint32_t size;
3744 union mfi_mbox mbox;
3745
3746 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3747
3748 if (!bio_hs)
3749 return (EINVAL);
3750
3751 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3752
3753 /* send single element command to retrieve size for full structure */
3754 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3755 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
3756 MFII_DATA_IN, false))
3757 goto freeme;
3758
3759 size = cfg->mfc_size;
3760 free(cfg, M_DEVBUF);
3761
3762 /* memory for read config */
3763 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3764 if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size,
3765 MFII_DATA_IN, false))
3766 goto freeme;
3767
3768 /* calculate offset to hs structure */
3769 hs = (struct mfi_hotspare *)(
3770 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3771 cfg->mfc_array_size * cfg->mfc_no_array +
3772 cfg->mfc_ld_size * cfg->mfc_no_ld);
3773
3774 if (volid < cfg->mfc_no_ld)
3775 goto freeme; /* not a hotspare */
3776
3777 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3778 goto freeme; /* not a hotspare */
3779
3780 /* offset into hotspare structure */
3781 i = volid - cfg->mfc_no_ld;
3782
3783 DNPRINTF(MFII_D_IOCTL,
3784 "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3785 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3786 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3787
3788 /* get pd fields */
3789 memset(&mbox, 0, sizeof(mbox));
3790 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3791 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3792 MFII_DATA_IN, false)) {
3793 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3794 DEVNAME(sc));
3795 goto freeme;
3796 }
3797
3798 switch (type) {
3799 case MFI_MGMT_VD:
3800 vdhs = bio_hs;
3801 vdhs->bv_status = BIOC_SVONLINE;
3802 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3803 vdhs->bv_level = -1; /* hotspare */
3804 vdhs->bv_nodisk = 1;
3805 break;
3806
3807 case MFI_MGMT_SD:
3808 sdhs = bio_hs;
3809 sdhs->bd_status = BIOC_SDHOTSPARE;
3810 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3811 sdhs->bd_channel = pd->mpd_enc_idx;
3812 sdhs->bd_target = pd->mpd_enc_slot;
3813 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
3814 vendp = inqbuf->vendor;
3815 memcpy(vend, vendp, sizeof vend - 1);
3816 vend[sizeof vend - 1] = '\0';
3817 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3818 break;
3819
3820 default:
3821 goto freeme;
3822 }
3823
3824 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3825 rv = 0;
3826 freeme:
3827 free(pd, M_DEVBUF);
3828 free(cfg, M_DEVBUF);
3829
3830 return (rv);
3831 }
3832
3833 #endif /* NBIO > 0 */
3834
3835 #define MFI_BBU_SENSORS 4
3836
3837 static void
mfii_bbu(struct mfii_softc * sc,envsys_data_t * edata)3838 mfii_bbu(struct mfii_softc *sc, envsys_data_t *edata)
3839 {
3840 struct mfi_bbu_status bbu;
3841 u_int32_t status;
3842 u_int32_t mask;
3843 u_int32_t soh_bad;
3844 int rv;
3845
3846 mutex_enter(&sc->sc_lock);
3847 rv = mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3848 sizeof(bbu), MFII_DATA_IN, false);
3849 mutex_exit(&sc->sc_lock);
3850 if (rv != 0) {
3851 edata->state = ENVSYS_SINVALID;
3852 edata->value_cur = 0;
3853 return;
3854 }
3855
3856 switch (bbu.battery_type) {
3857 case MFI_BBU_TYPE_IBBU:
3858 case MFI_BBU_TYPE_IBBU09:
3859 case MFI_BBU_TYPE_CVPM02:
3860 mask = MFI_BBU_STATE_BAD_IBBU;
3861 soh_bad = 0;
3862 break;
3863 case MFI_BBU_TYPE_BBU:
3864 mask = MFI_BBU_STATE_BAD_BBU;
3865 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3866 break;
3867
3868 case MFI_BBU_TYPE_NONE:
3869 default:
3870 edata->state = ENVSYS_SCRITICAL;
3871 edata->value_cur = 0;
3872 return;
3873 }
3874
3875 status = le32toh(bbu.fw_status) & mask;
3876 switch (edata->sensor) {
3877 case 0:
3878 edata->value_cur = (status || soh_bad) ? 0 : 1;
3879 edata->state =
3880 edata->value_cur ? ENVSYS_SVALID : ENVSYS_SCRITICAL;
3881 return;
3882 case 1:
3883 edata->value_cur = le16toh(bbu.voltage) * 1000;
3884 edata->state = ENVSYS_SVALID;
3885 return;
3886 case 2:
3887 edata->value_cur = (int16_t)le16toh(bbu.current) * 1000;
3888 edata->state = ENVSYS_SVALID;
3889 return;
3890 case 3:
3891 edata->value_cur =
3892 le16toh(bbu.temperature) * 1000000 + 273150000;
3893 edata->state = ENVSYS_SVALID;
3894 return;
3895 }
3896 }
3897
3898 static void
mfii_refresh_ld_sensor(struct mfii_softc * sc,envsys_data_t * edata)3899 mfii_refresh_ld_sensor(struct mfii_softc *sc, envsys_data_t *edata)
3900 {
3901 struct bioc_vol bv;
3902 int error;
3903
3904 memset(&bv, 0, sizeof(bv));
3905 bv.bv_volid = edata->sensor - MFI_BBU_SENSORS;
3906 mutex_enter(&sc->sc_lock);
3907 error = mfii_ioctl_vol(sc, &bv);
3908 mutex_exit(&sc->sc_lock);
3909 if (error)
3910 bv.bv_status = BIOC_SVINVALID;
3911 bio_vol_to_envsys(edata, &bv);
3912 }
3913
3914 static void
mfii_init_ld_sensor(struct mfii_softc * sc,envsys_data_t * sensor,int i)3915 mfii_init_ld_sensor(struct mfii_softc *sc, envsys_data_t *sensor, int i)
3916 {
3917 sensor->units = ENVSYS_DRIVE;
3918 sensor->state = ENVSYS_SINVALID;
3919 sensor->value_cur = ENVSYS_DRIVE_EMPTY;
3920 /* Enable monitoring for drive state changes */
3921 sensor->flags |= ENVSYS_FMONSTCHANGED;
3922 snprintf(sensor->desc, sizeof(sensor->desc), "%s:%d", DEVNAME(sc), i);
3923 }
3924
3925 static void
mfii_attach_sensor(struct mfii_softc * sc,envsys_data_t * s)3926 mfii_attach_sensor(struct mfii_softc *sc, envsys_data_t *s)
3927 {
3928 if (sysmon_envsys_sensor_attach(sc->sc_sme, s))
3929 aprint_error_dev(sc->sc_dev,
3930 "failed to attach sensor %s\n", s->desc);
3931 }
3932
3933 static int
mfii_create_sensors(struct mfii_softc * sc)3934 mfii_create_sensors(struct mfii_softc *sc)
3935 {
3936 int i, rv;
3937 const int nsensors = MFI_BBU_SENSORS + MFII_MAX_LD_EXT;
3938
3939 sc->sc_sme = sysmon_envsys_create();
3940 sc->sc_sensors = malloc(sizeof(envsys_data_t) * nsensors,
3941 M_DEVBUF, M_WAITOK | M_ZERO);
3942
3943 /* BBU */
3944 sc->sc_sensors[0].units = ENVSYS_INDICATOR;
3945 sc->sc_sensors[0].state = ENVSYS_SINVALID;
3946 sc->sc_sensors[0].value_cur = 0;
3947 sc->sc_sensors[1].units = ENVSYS_SVOLTS_DC;
3948 sc->sc_sensors[1].state = ENVSYS_SINVALID;
3949 sc->sc_sensors[1].value_cur = 0;
3950 sc->sc_sensors[2].units = ENVSYS_SAMPS;
3951 sc->sc_sensors[2].state = ENVSYS_SINVALID;
3952 sc->sc_sensors[2].value_cur = 0;
3953 sc->sc_sensors[3].units = ENVSYS_STEMP;
3954 sc->sc_sensors[3].state = ENVSYS_SINVALID;
3955 sc->sc_sensors[3].value_cur = 0;
3956
3957 if (ISSET(le32toh(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3958 sc->sc_bbuok = true;
3959 sc->sc_sensors[0].flags |= ENVSYS_FMONCRITICAL;
3960 snprintf(sc->sc_sensors[0].desc, sizeof(sc->sc_sensors[0].desc),
3961 "%s BBU state", DEVNAME(sc));
3962 snprintf(sc->sc_sensors[1].desc, sizeof(sc->sc_sensors[1].desc),
3963 "%s BBU voltage", DEVNAME(sc));
3964 snprintf(sc->sc_sensors[2].desc, sizeof(sc->sc_sensors[2].desc),
3965 "%s BBU current", DEVNAME(sc));
3966 snprintf(sc->sc_sensors[3].desc, sizeof(sc->sc_sensors[3].desc),
3967 "%s BBU temperature", DEVNAME(sc));
3968 for (i = 0; i < MFI_BBU_SENSORS; i++) {
3969 mfii_attach_sensor(sc, &sc->sc_sensors[i]);
3970 }
3971 }
3972
3973 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3974 mfii_init_ld_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS], i);
3975 mfii_attach_sensor(sc, &sc->sc_sensors[i + MFI_BBU_SENSORS]);
3976 }
3977
3978 sc->sc_sme->sme_name = DEVNAME(sc);
3979 sc->sc_sme->sme_cookie = sc;
3980 sc->sc_sme->sme_refresh = mfii_refresh_sensor;
3981 rv = sysmon_envsys_register(sc->sc_sme);
3982 if (rv) {
3983 aprint_error_dev(sc->sc_dev,
3984 "unable to register with sysmon (rv = %d)\n", rv);
3985 sysmon_envsys_destroy(sc->sc_sme);
3986 sc->sc_sme = NULL;
3987 }
3988 return rv;
3989
3990 }
3991
3992 static int
mfii_destroy_sensors(struct mfii_softc * sc)3993 mfii_destroy_sensors(struct mfii_softc *sc)
3994 {
3995 if (sc->sc_sme == NULL)
3996 return 0;
3997 sysmon_envsys_unregister(sc->sc_sme);
3998 sc->sc_sme = NULL;
3999 free(sc->sc_sensors, M_DEVBUF);
4000 return 0;
4001 }
4002
4003 static void
mfii_refresh_sensor(struct sysmon_envsys * sme,envsys_data_t * edata)4004 mfii_refresh_sensor(struct sysmon_envsys *sme, envsys_data_t *edata)
4005 {
4006 struct mfii_softc *sc = sme->sme_cookie;
4007
4008 if (edata->sensor >= MFI_BBU_SENSORS + MFII_MAX_LD_EXT)
4009 return;
4010
4011 if (edata->sensor < MFI_BBU_SENSORS) {
4012 if (sc->sc_bbuok)
4013 mfii_bbu(sc, edata);
4014 } else {
4015 mfii_refresh_ld_sensor(sc, edata);
4016 }
4017 }
4018