xref: /openbsd/sys/dev/pci/mfii.c (revision 0f9891f1)
1 /* $OpenBSD: mfii.c,v 1.91 2024/05/24 06:02:58 jsg Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/dkio.h>
26 #include <sys/pool.h>
27 #include <sys/task.h>
28 #include <sys/atomic.h>
29 #include <sys/sensors.h>
30 #include <sys/rwlock.h>
31 #include <sys/smr.h>
32 
33 #include <dev/biovar.h>
34 #include <dev/pci/pcidevs.h>
35 #include <dev/pci/pcivar.h>
36 
37 #include <machine/bus.h>
38 
39 #include <scsi/scsi_all.h>
40 #include <scsi/scsi_disk.h>
41 #include <scsi/scsiconf.h>
42 
43 #include <dev/ic/mfireg.h>
44 #include <dev/pci/mpiireg.h>
45 
46 #define	MFII_BAR		0x14
47 #define MFII_BAR_35		0x10
48 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
49 
50 #define MFII_OSTS_INTR_VALID	0x00000009
51 #define MFII_RPI		0x6c /* reply post host index */
52 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
53 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
54 
55 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
56 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
57 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
58 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
59 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
60 
61 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
62 
63 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
64 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
65 
66 #define MFII_MAX_CHAIN_UNIT	0x00400000
67 #define MFII_MAX_CHAIN_MASK	0x000003E0
68 #define MFII_MAX_CHAIN_SHIFT	5
69 
70 #define MFII_256K_IO		128
71 #define MFII_1MB_IO		(MFII_256K_IO * 4)
72 
73 #define MFII_CHAIN_FRAME_MIN	1024
74 
75 struct mfii_request_descr {
76 	u_int8_t	flags;
77 	u_int8_t	msix_index;
78 	u_int16_t	smid;
79 
80 	u_int16_t	lmid;
81 	u_int16_t	dev_handle;
82 } __packed;
83 
84 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
85 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
86 
87 struct mfii_raid_context {
88 	u_int8_t	type_nseg;
89 	u_int8_t	_reserved1;
90 	u_int16_t	timeout_value;
91 
92 	u_int16_t	reg_lock_flags;
93 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
94 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
95 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
96 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
97 
98 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
99 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
100 	u_int16_t	virtual_disk_target_id;
101 
102 	u_int64_t	reg_lock_row_lba;
103 
104 	u_int32_t	reg_lock_length;
105 
106 	u_int16_t	next_lm_id;
107 	u_int8_t	ex_status;
108 	u_int8_t	status;
109 
110 	u_int8_t	raid_flags;
111 	u_int8_t	num_sge;
112 	u_int16_t	config_seq_num;
113 
114 	u_int8_t	span_arm;
115 	u_int8_t	_reserved3[3];
116 } __packed;
117 
118 struct mfii_sge {
119 	u_int64_t	sg_addr;
120 	u_int32_t	sg_len;
121 	u_int16_t	_reserved;
122 	u_int8_t	sg_next_chain_offset;
123 	u_int8_t	sg_flags;
124 } __packed;
125 
126 #define MFII_SGE_ADDR_MASK		(0x03)
127 #define MFII_SGE_ADDR_SYSTEM		(0x00)
128 #define MFII_SGE_ADDR_IOCDDR		(0x01)
129 #define MFII_SGE_ADDR_IOCPLB		(0x02)
130 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
131 #define MFII_SGE_END_OF_LIST		(0x40)
132 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
133 
134 #define MFII_REQUEST_SIZE	256
135 
136 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
137 
138 #define MFII_MAX_ROW		32
139 #define MFII_MAX_ARRAY		128
140 
141 struct mfii_array_map {
142 	uint16_t		mam_pd[MFII_MAX_ROW];
143 } __packed;
144 
145 struct mfii_dev_handle {
146 	uint16_t		mdh_cur_handle;
147 	uint8_t			mdh_valid;
148 	uint8_t			mdh_reserved;
149 	uint16_t		mdh_handle[2];
150 } __packed;
151 
152 struct mfii_ld_map {
153 	uint32_t		mlm_total_size;
154 	uint32_t		mlm_reserved1[5];
155 	uint32_t		mlm_num_lds;
156 	uint32_t		mlm_reserved2;
157 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
158 	uint8_t			mlm_pd_timeout;
159 	uint8_t			mlm_reserved3[7];
160 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
161 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
162 } __packed;
163 
164 struct mfii_task_mgmt {
165 	union {
166 		uint8_t			request[128];
167 		struct mpii_msg_scsi_task_request
168 					mpii_request;
169 	} __packed __aligned(8);
170 
171 	union {
172 		uint8_t			reply[128];
173 		uint32_t		flags;
174 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
175 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
176 		struct mpii_msg_scsi_task_reply
177 					mpii_reply;
178 	} __packed __aligned(8);
179 } __packed __aligned(8);
180 
181 struct mfii_dmamem {
182 	bus_dmamap_t		mdm_map;
183 	bus_dma_segment_t	mdm_seg;
184 	size_t			mdm_size;
185 	caddr_t			mdm_kva;
186 };
187 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
188 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
189 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
190 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
191 
192 struct mfii_softc;
193 
194 struct mfii_ccb {
195 	void			*ccb_request;
196 	u_int64_t		ccb_request_dva;
197 	bus_addr_t		ccb_request_offset;
198 
199 	void			*ccb_mfi;
200 	u_int64_t		ccb_mfi_dva;
201 	bus_addr_t		ccb_mfi_offset;
202 
203 	struct mfi_sense	*ccb_sense;
204 	u_int64_t		ccb_sense_dva;
205 	bus_addr_t		ccb_sense_offset;
206 
207 	struct mfii_sge		*ccb_sgl;
208 	u_int64_t		ccb_sgl_dva;
209 	bus_addr_t		ccb_sgl_offset;
210 	u_int			ccb_sgl_len;
211 
212 	struct mfii_request_descr ccb_req;
213 
214 	bus_dmamap_t		ccb_dmamap;
215 
216 	/* data for sgl */
217 	void			*ccb_data;
218 	size_t			ccb_len;
219 
220 	int			ccb_direction;
221 #define MFII_DATA_NONE			0
222 #define MFII_DATA_IN			1
223 #define MFII_DATA_OUT			2
224 
225 	void			*ccb_cookie;
226 	void			(*ccb_done)(struct mfii_softc *,
227 				    struct mfii_ccb *);
228 
229 	u_int32_t		ccb_flags;
230 #define MFI_CCB_F_ERR			(1<<0)
231 	u_int			ccb_smid;
232 	u_int			ccb_refcnt;
233 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
234 };
235 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
236 
237 struct mfii_pd_dev_handles {
238 	struct smr_entry	pd_smr;
239 	uint16_t		pd_handles[MFI_MAX_PD];
240 };
241 
242 struct mfii_pd_softc {
243 	struct scsibus_softc	*pd_scsibus;
244 	struct mfii_pd_dev_handles *pd_dev_handles;
245 	uint8_t			pd_timeout;
246 };
247 
248 struct mfii_iop {
249 	int bar;
250 	int num_sge_loc;
251 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
252 #define MFII_IOP_NUM_SGE_LOC_35		1
253 	u_int16_t ldio_ctx_reg_lock_flags;
254 	u_int8_t ldio_req_type;
255 	u_int8_t ldio_ctx_type_nseg;
256 	u_int8_t sge_flag_chain;
257 	u_int8_t sge_flag_eol;
258 };
259 
260 struct mfii_softc {
261 	struct device		sc_dev;
262 	const struct mfii_iop	*sc_iop;
263 
264 	pci_chipset_tag_t	sc_pc;
265 	pcitag_t		sc_tag;
266 
267 	bus_space_tag_t		sc_iot;
268 	bus_space_handle_t	sc_ioh;
269 	bus_size_t		sc_ios;
270 	bus_dma_tag_t		sc_dmat;
271 
272 	void			*sc_ih;
273 
274 	struct mutex		sc_ccb_mtx;
275 	struct mutex		sc_post_mtx;
276 
277 	u_int			sc_max_fw_cmds;
278 	u_int			sc_max_cmds;
279 	u_int			sc_max_sgl;
280 
281 	u_int			sc_reply_postq_depth;
282 	u_int			sc_reply_postq_index;
283 	struct mutex		sc_reply_postq_mtx;
284 	struct mfii_dmamem	*sc_reply_postq;
285 
286 	struct mfii_dmamem	*sc_requests;
287 	struct mfii_dmamem	*sc_mfi;
288 	struct mfii_dmamem	*sc_sense;
289 	struct mfii_dmamem	*sc_sgl;
290 
291 	struct mfii_ccb		*sc_ccb;
292 	struct mfii_ccb_list	sc_ccb_freeq;
293 
294 	struct mfii_ccb		*sc_aen_ccb;
295 	struct task		sc_aen_task;
296 
297 	struct mutex		sc_abort_mtx;
298 	struct mfii_ccb_list	sc_abort_list;
299 	struct task		sc_abort_task;
300 
301 	struct scsibus_softc	*sc_scsibus;
302 	struct mfii_pd_softc	*sc_pd;
303 	struct scsi_iopool	sc_iopool;
304 
305 	/* save some useful information for logical drives that is missing
306 	 * in sc_ld_list
307 	 */
308 	struct {
309 		char		ld_dev[16];	/* device name sd? */
310 	}			sc_ld[MFI_MAX_LD];
311 	int			sc_target_lds[MFI_MAX_LD];
312 
313 	/* scsi ioctl from sd device */
314 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
315 
316 	/* bio */
317 	struct mfi_conf		*sc_cfg;
318 	struct mfi_ctrl_info	sc_info;
319 	struct mfi_ld_list	sc_ld_list;
320 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
321 	int			sc_no_pd; /* used physical disks */
322 	int			sc_ld_sz; /* sizeof sc_ld_details */
323 
324 	/* mgmt lock */
325 	struct rwlock		sc_lock;
326 
327 	/* sensors */
328 	struct ksensordev	sc_sensordev;
329 	struct ksensor		*sc_bbu;
330 	struct ksensor		*sc_bbu_status;
331 	struct ksensor		*sc_sensors;
332 };
333 
334 #ifdef MFII_DEBUG
335 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
336 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
337 #define	MFII_D_CMD		0x0001
338 #define	MFII_D_INTR		0x0002
339 #define	MFII_D_MISC		0x0004
340 #define	MFII_D_DMA		0x0008
341 #define	MFII_D_IOCTL		0x0010
342 #define	MFII_D_RW		0x0020
343 #define	MFII_D_MEM		0x0040
344 #define	MFII_D_CCB		0x0080
345 uint32_t	mfii_debug = 0
346 /*		    | MFII_D_CMD */
347 /*		    | MFII_D_INTR */
348 		    | MFII_D_MISC
349 /*		    | MFII_D_DMA */
350 /*		    | MFII_D_IOCTL */
351 /*		    | MFII_D_RW */
352 /*		    | MFII_D_MEM */
353 /*		    | MFII_D_CCB */
354 		;
355 #else
356 #define DPRINTF(x...)
357 #define DNPRINTF(n,x...)
358 #endif
359 
360 int		mfii_match(struct device *, void *, void *);
361 void		mfii_attach(struct device *, struct device *, void *);
362 int		mfii_detach(struct device *, int);
363 int		mfii_activate(struct device *, int);
364 
365 const struct cfattach mfii_ca = {
366 	sizeof(struct mfii_softc),
367 	mfii_match,
368 	mfii_attach,
369 	mfii_detach,
370 	mfii_activate,
371 };
372 
373 struct cfdriver mfii_cd = {
374 	NULL,
375 	"mfii",
376 	DV_DULL
377 };
378 
379 void		mfii_scsi_cmd(struct scsi_xfer *);
380 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
381 int		mfii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
382 int		mfii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
383 
384 const struct scsi_adapter mfii_switch = {
385 	mfii_scsi_cmd, NULL, NULL, NULL, mfii_scsi_ioctl
386 };
387 
388 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
389 int		mfii_pd_scsi_probe(struct scsi_link *);
390 
391 const struct scsi_adapter mfii_pd_switch = {
392 	mfii_pd_scsi_cmd, NULL, mfii_pd_scsi_probe, NULL, NULL,
393 };
394 
395 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
396 
397 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
398 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
399 
400 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
401 void			mfii_dmamem_free(struct mfii_softc *,
402 			    struct mfii_dmamem *);
403 
404 void *			mfii_get_ccb(void *);
405 void			mfii_put_ccb(void *, void *);
406 int			mfii_init_ccb(struct mfii_softc *);
407 void			mfii_scrub_ccb(struct mfii_ccb *);
408 
409 int			mfii_reset_hard(struct mfii_softc *);
410 int			mfii_transition_firmware(struct mfii_softc *);
411 int			mfii_initialise_firmware(struct mfii_softc *);
412 int			mfii_get_info(struct mfii_softc *);
413 int			mfii_syspd(struct mfii_softc *);
414 
415 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
416 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
417 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
418 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
419 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
420 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
421 int			mfii_my_intr(struct mfii_softc *);
422 int			mfii_intr(void *);
423 void			mfii_postq(struct mfii_softc *);
424 
425 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
426 			    void *, int);
427 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
428 			    void *, int);
429 
430 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
431 
432 int			mfii_mgmt(struct mfii_softc *, uint32_t,
433 			    const union mfi_mbox *, void *, size_t, int);
434 int			mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
435 			    uint32_t, const union mfi_mbox *, void *, size_t,
436 			    int);
437 void			mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
438 
439 int			mfii_scsi_cmd_io(struct mfii_softc *,
440 			    struct scsi_xfer *);
441 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
442 			    struct scsi_xfer *);
443 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
444 			    struct scsi_xfer *);
445 void			mfii_scsi_cmd_tmo(void *);
446 
447 int			mfii_dev_handles_update(struct mfii_softc *sc);
448 void			mfii_dev_handles_smr(void *pd_arg);
449 
450 void			mfii_abort_task(void *);
451 void			mfii_abort(struct mfii_softc *, struct mfii_ccb *,
452 			    uint16_t, uint16_t, uint8_t, uint32_t);
453 void			mfii_scsi_cmd_abort_done(struct mfii_softc *,
454 			    struct mfii_ccb *);
455 
456 int			mfii_aen_register(struct mfii_softc *);
457 void			mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
458 			    struct mfii_dmamem *, uint32_t);
459 void			mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
460 void			mfii_aen(void *);
461 void			mfii_aen_unregister(struct mfii_softc *);
462 
463 void			mfii_aen_pd_insert(struct mfii_softc *,
464 			    const struct mfi_evtarg_pd_address *);
465 void			mfii_aen_pd_remove(struct mfii_softc *,
466 			    const struct mfi_evtarg_pd_address *);
467 void			mfii_aen_pd_state_change(struct mfii_softc *,
468 			    const struct mfi_evtarg_pd_state *);
469 void			mfii_aen_ld_update(struct mfii_softc *);
470 
471 #if NBIO > 0
472 int		mfii_ioctl(struct device *, u_long, caddr_t);
473 int		mfii_bio_getitall(struct mfii_softc *);
474 int		mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
475 int		mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
476 int		mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
477 int		mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
478 int		mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
479 int		mfii_ioctl_setstate(struct mfii_softc *,
480 		    struct bioc_setstate *);
481 int		mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *);
482 int		mfii_bio_hs(struct mfii_softc *, int, int, void *);
483 
484 #ifndef SMALL_KERNEL
485 static const char *mfi_bbu_indicators[] = {
486 	"pack missing",
487 	"voltage low",
488 	"temp high",
489 	"charge active",
490 	"discharge active",
491 	"learn cycle req'd",
492 	"learn cycle active",
493 	"learn cycle failed",
494 	"learn cycle timeout",
495 	"I2C errors",
496 	"replace pack",
497 	"low capacity",
498 	"periodic learn req'd"
499 };
500 
501 void		mfii_init_ld_sensor(struct mfii_softc *, int);
502 void		mfii_refresh_ld_sensor(struct mfii_softc *, int);
503 int		mfii_create_sensors(struct mfii_softc *);
504 void		mfii_refresh_sensors(void *);
505 void		mfii_bbu(struct mfii_softc *);
506 #endif /* SMALL_KERNEL */
507 #endif /* NBIO > 0 */
508 
509 /*
510  * mfii boards support asynchronous (and non-polled) completion of
511  * dcmds by proxying them through a passthru mpii command that points
512  * at a dcmd frame. since the passthru command is submitted like
513  * the scsi commands using an SMID in the request descriptor,
514  * ccb_request memory * must contain the passthru command because
515  * that is what the SMID refers to. this means ccb_request cannot
516  * contain the dcmd. rather than allocating separate dma memory to
517  * hold the dcmd, we reuse the sense memory buffer for it.
518  */
519 
520 void			mfii_dcmd_start(struct mfii_softc *,
521 			    struct mfii_ccb *);
522 
523 static inline void
mfii_dcmd_scrub(struct mfii_ccb * ccb)524 mfii_dcmd_scrub(struct mfii_ccb *ccb)
525 {
526 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
527 }
528 
529 static inline struct mfi_dcmd_frame *
mfii_dcmd_frame(struct mfii_ccb * ccb)530 mfii_dcmd_frame(struct mfii_ccb *ccb)
531 {
532 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
533 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
534 }
535 
536 static inline void
mfii_dcmd_sync(struct mfii_softc * sc,struct mfii_ccb * ccb,int flags)537 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
538 {
539 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
540 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
541 }
542 
543 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
544 
545 const struct mfii_iop mfii_iop_thunderbolt = {
546 	MFII_BAR,
547 	MFII_IOP_NUM_SGE_LOC_ORIG,
548 	0,
549 	MFII_REQ_TYPE_LDIO,
550 	0,
551 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
552 	0
553 };
554 
555 /*
556  * a lot of these values depend on us not implementing fastpath yet.
557  */
558 const struct mfii_iop mfii_iop_25 = {
559 	MFII_BAR,
560 	MFII_IOP_NUM_SGE_LOC_ORIG,
561 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
562 	MFII_REQ_TYPE_NO_LOCK,
563 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
564 	MFII_SGE_CHAIN_ELEMENT,
565 	MFII_SGE_END_OF_LIST
566 };
567 
568 const struct mfii_iop mfii_iop_35 = {
569 	MFII_BAR_35,
570 	MFII_IOP_NUM_SGE_LOC_35,
571 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
572 	MFII_REQ_TYPE_NO_LOCK,
573 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
574 	MFII_SGE_CHAIN_ELEMENT,
575 	MFII_SGE_END_OF_LIST
576 };
577 
578 struct mfii_device {
579 	pcireg_t		mpd_vendor;
580 	pcireg_t		mpd_product;
581 	const struct mfii_iop	*mpd_iop;
582 };
583 
584 const struct mfii_device mfii_devices[] = {
585 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
586 	    &mfii_iop_thunderbolt },
587 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
588 	    &mfii_iop_25 },
589 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
590 	    &mfii_iop_25 },
591 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
592 	    &mfii_iop_35 },
593 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
594 	    &mfii_iop_35 },
595 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
596 	    &mfii_iop_35 },
597 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
598 	    &mfii_iop_35 },
599 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
600 	    &mfii_iop_35 },
601 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
602 	    &mfii_iop_35 },
603 	{ PCI_VENDOR_SYMBIOS,   PCI_PRODUCT_SYMBIOS_MEGARAID_38XX,
604 	    &mfii_iop_35 },
605 	{ PCI_VENDOR_SYMBIOS,   PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_2,
606 	    &mfii_iop_35 },
607 	{ PCI_VENDOR_SYMBIOS,   PCI_PRODUCT_SYMBIOS_MEGARAID_39XX,
608 	    &mfii_iop_35 },
609 	{ PCI_VENDOR_SYMBIOS,   PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_2,
610 	    &mfii_iop_35 }
611 };
612 
613 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
614 
615 const struct mfii_iop *
mfii_find_iop(struct pci_attach_args * pa)616 mfii_find_iop(struct pci_attach_args *pa)
617 {
618 	const struct mfii_device *mpd;
619 	int i;
620 
621 	for (i = 0; i < nitems(mfii_devices); i++) {
622 		mpd = &mfii_devices[i];
623 
624 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
625 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
626 			return (mpd->mpd_iop);
627 	}
628 
629 	return (NULL);
630 }
631 
632 int
mfii_match(struct device * parent,void * match,void * aux)633 mfii_match(struct device *parent, void *match, void *aux)
634 {
635 	return ((mfii_find_iop(aux) != NULL) ? 1 : 0);
636 }
637 
638 void
mfii_attach(struct device * parent,struct device * self,void * aux)639 mfii_attach(struct device *parent, struct device *self, void *aux)
640 {
641 	struct mfii_softc *sc = (struct mfii_softc *)self;
642 	struct pci_attach_args *pa = aux;
643 	pcireg_t memtype;
644 	pci_intr_handle_t ih;
645 	struct scsibus_attach_args saa;
646 	u_int32_t status, scpad2, scpad3;
647 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
648 
649 	/* init sc */
650 	sc->sc_iop = mfii_find_iop(aux);
651 	sc->sc_dmat = pa->pa_dmat;
652 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
653 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
654 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
655 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
656 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
657 
658 	rw_init(&sc->sc_lock, "mfii_lock");
659 
660 	sc->sc_aen_ccb = NULL;
661 	task_set(&sc->sc_aen_task, mfii_aen, sc);
662 
663 	mtx_init(&sc->sc_abort_mtx, IPL_BIO);
664 	SIMPLEQ_INIT(&sc->sc_abort_list);
665 	task_set(&sc->sc_abort_task, mfii_abort_task, sc);
666 
667 	/* wire up the bus shizz */
668 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
669 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
670 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
671 		printf(": unable to map registers\n");
672 		return;
673 	}
674 
675 	/* disable interrupts */
676 	mfii_write(sc, MFI_OMSK, 0xffffffff);
677 
678 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
679 		printf(": unable to map interrupt\n");
680 		goto pci_unmap;
681 	}
682 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
683 
684 	/* lets get started */
685 	if (mfii_transition_firmware(sc))
686 		goto pci_unmap;
687 
688 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
689 	scpad3 = mfii_read(sc, MFII_OSP3);
690 	status = mfii_fw_state(sc);
691 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
692 	if (sc->sc_max_fw_cmds == 0)
693 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
694 	/*
695 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
696 	 * exceed FW supplied max_fw_cmds.
697 	 */
698 	sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1;
699 
700 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
701 	scpad2 = mfii_read(sc, MFII_OSP2);
702 	chain_frame_sz =
703 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
704 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
705 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
706 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
707 
708 	nsge_in_io = (MFII_REQUEST_SIZE -
709 		sizeof(struct mpii_msg_scsi_io) -
710 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
711 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
712 
713 	/* round down to nearest power of two */
714 	sc->sc_max_sgl = 1;
715 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
716 		sc->sc_max_sgl <<= 1;
717 
718 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
719 	    DEVNAME(sc), status, scpad2, scpad3);
720 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
721 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
722 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
723 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
724 	    sc->sc_max_sgl);
725 
726 	/* sense memory */
727 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
728 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
729 	if (sc->sc_sense == NULL) {
730 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
731 		goto pci_unmap;
732 	}
733 
734 	/* reply post queue */
735 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
736 
737 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
738 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
739 	if (sc->sc_reply_postq == NULL)
740 		goto free_sense;
741 
742 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
743 	    MFII_DMA_LEN(sc->sc_reply_postq));
744 
745 	/* MPII request frame array */
746 	sc->sc_requests = mfii_dmamem_alloc(sc,
747 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
748 	if (sc->sc_requests == NULL)
749 		goto free_reply_postq;
750 
751 	/* MFI command frame array */
752 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
753 	if (sc->sc_mfi == NULL)
754 		goto free_requests;
755 
756 	/* MPII SGL array */
757 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
758 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
759 	if (sc->sc_sgl == NULL)
760 		goto free_mfi;
761 
762 	if (mfii_init_ccb(sc) != 0) {
763 		printf("%s: could not init ccb list\n", DEVNAME(sc));
764 		goto free_sgl;
765 	}
766 
767 	/* kickstart firmware with all addresses and pointers */
768 	if (mfii_initialise_firmware(sc) != 0) {
769 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
770 		goto free_sgl;
771 	}
772 
773 	if (mfii_get_info(sc) != 0) {
774 		printf("%s: could not retrieve controller information\n",
775 		    DEVNAME(sc));
776 		goto free_sgl;
777 	}
778 
779 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
780 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
781 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
782 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
783 	printf("\n");
784 
785 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
786 	    mfii_intr, sc, DEVNAME(sc));
787 	if (sc->sc_ih == NULL)
788 		goto free_sgl;
789 
790 	saa.saa_adapter_softc = sc;
791 	saa.saa_adapter = &mfii_switch;
792 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
793 	saa.saa_adapter_buswidth = sc->sc_info.mci_max_lds;
794 	saa.saa_luns = 8;
795 	saa.saa_openings = sc->sc_max_cmds;
796 	saa.saa_pool = &sc->sc_iopool;
797 	saa.saa_quirks = saa.saa_flags = 0;
798 	saa.saa_wwpn = saa.saa_wwnn = 0;
799 
800 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, &saa,
801 	    scsiprint);
802 
803 	mfii_syspd(sc);
804 
805 	if (mfii_aen_register(sc) != 0) {
806 		/* error printed by mfii_aen_register */
807 		goto intr_disestablish;
808 	}
809 
810 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
811 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
812 		printf("%s: getting list of logical disks failed\n", DEVNAME(sc));
813 		goto intr_disestablish;
814 	}
815 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
816 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
817 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
818 		sc->sc_target_lds[target] = i;
819 	}
820 
821 	/* enable interrupts */
822 	mfii_write(sc, MFI_OSTS, 0xffffffff);
823 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
824 
825 #if NBIO > 0
826 	if (bio_register(&sc->sc_dev, mfii_ioctl) != 0)
827 		panic("%s: controller registration failed", DEVNAME(sc));
828 	else
829 		sc->sc_ioctl = mfii_ioctl;
830 
831 #ifndef SMALL_KERNEL
832 	if (mfii_create_sensors(sc) != 0)
833 		printf("%s: unable to create sensors\n", DEVNAME(sc));
834 #endif
835 #endif /* NBIO > 0 */
836 
837 	return;
838 intr_disestablish:
839 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
840 free_sgl:
841 	mfii_dmamem_free(sc, sc->sc_sgl);
842 free_mfi:
843 	mfii_dmamem_free(sc, sc->sc_mfi);
844 free_requests:
845 	mfii_dmamem_free(sc, sc->sc_requests);
846 free_reply_postq:
847 	mfii_dmamem_free(sc, sc->sc_reply_postq);
848 free_sense:
849 	mfii_dmamem_free(sc, sc->sc_sense);
850 pci_unmap:
851 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
852 }
853 
854 static inline uint16_t
mfii_dev_handle(struct mfii_softc * sc,uint16_t target)855 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
856 {
857 	struct mfii_pd_dev_handles *handles;
858 	uint16_t handle;
859 
860 	smr_read_enter();
861 	handles = SMR_PTR_GET(&sc->sc_pd->pd_dev_handles);
862 	handle = handles->pd_handles[target];
863 	smr_read_leave();
864 
865 	return (handle);
866 }
867 
868 void
mfii_dev_handles_smr(void * pd_arg)869 mfii_dev_handles_smr(void *pd_arg)
870 {
871 	struct mfii_pd_dev_handles *handles = pd_arg;
872 
873 	free(handles, M_DEVBUF, sizeof(*handles));
874 }
875 
876 int
mfii_dev_handles_update(struct mfii_softc * sc)877 mfii_dev_handles_update(struct mfii_softc *sc)
878 {
879 	struct mfii_ld_map *lm;
880 	struct mfii_pd_dev_handles *handles, *old_handles;
881 	int i;
882 	int rv = 0;
883 
884 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
885 
886 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
887 	    SCSI_DATA_IN|SCSI_NOSLEEP);
888 
889 	if (rv != 0) {
890 		rv = EIO;
891 		goto free_lm;
892 	}
893 
894 	handles = malloc(sizeof(*handles), M_DEVBUF, M_WAITOK);
895 	smr_init(&handles->pd_smr);
896 	for (i = 0; i < MFI_MAX_PD; i++)
897 		handles->pd_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
898 
899 	/* commit the updated info */
900 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
901 	old_handles = SMR_PTR_GET_LOCKED(&sc->sc_pd->pd_dev_handles);
902 	SMR_PTR_SET_LOCKED(&sc->sc_pd->pd_dev_handles, handles);
903 
904 	if (old_handles != NULL)
905 		smr_call(&old_handles->pd_smr, mfii_dev_handles_smr, old_handles);
906 
907 free_lm:
908 	free(lm, M_TEMP, sizeof(*lm));
909 
910 	return (rv);
911 }
912 
913 int
mfii_syspd(struct mfii_softc * sc)914 mfii_syspd(struct mfii_softc *sc)
915 {
916 	struct scsibus_attach_args saa;
917 
918 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
919 	if (sc->sc_pd == NULL)
920 		return (1);
921 
922 	if (mfii_dev_handles_update(sc) != 0)
923 		goto free_pdsc;
924 
925 	saa.saa_adapter =  &mfii_pd_switch;
926 	saa.saa_adapter_softc = sc;
927 	saa.saa_adapter_buswidth = MFI_MAX_PD;
928 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
929 	saa.saa_luns = 8;
930 	saa.saa_openings = sc->sc_max_cmds - 1;
931 	saa.saa_pool = &sc->sc_iopool;
932 	saa.saa_quirks = saa.saa_flags = 0;
933 	saa.saa_wwpn = saa.saa_wwnn = 0;
934 
935 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
936 	    config_found(&sc->sc_dev, &saa, scsiprint);
937 
938 	return (0);
939 
940 free_pdsc:
941 	free(sc->sc_pd, M_DEVBUF, sizeof(*sc->sc_pd));
942 	return (1);
943 }
944 
945 int
mfii_detach(struct device * self,int flags)946 mfii_detach(struct device *self, int flags)
947 {
948 	struct mfii_softc *sc = (struct mfii_softc *)self;
949 
950 	if (sc->sc_ih == NULL)
951 		return (0);
952 
953 #ifndef SMALL_KERNEL
954 	if (sc->sc_sensors) {
955 		sensordev_deinstall(&sc->sc_sensordev);
956 		free(sc->sc_sensors, M_DEVBUF,
957 		    MFI_MAX_LD * sizeof(struct ksensor));
958 	}
959 
960 	if (sc->sc_bbu) {
961 		free(sc->sc_bbu, M_DEVBUF, 4 * sizeof(*sc->sc_bbu));
962 	}
963 
964 	if (sc->sc_bbu_status) {
965 		free(sc->sc_bbu_status, M_DEVBUF,
966 		    sizeof(*sc->sc_bbu_status) * sizeof(mfi_bbu_indicators));
967 	}
968 #endif /* SMALL_KERNEL */
969 
970 	mfii_aen_unregister(sc);
971 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
972 	mfii_dmamem_free(sc, sc->sc_sgl);
973 	mfii_dmamem_free(sc, sc->sc_mfi);
974 	mfii_dmamem_free(sc, sc->sc_requests);
975 	mfii_dmamem_free(sc, sc->sc_reply_postq);
976 	mfii_dmamem_free(sc, sc->sc_sense);
977 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
978 
979 	return (0);
980 }
981 
982 static void
mfii_flush_cache(struct mfii_softc * sc,struct mfii_ccb * ccb)983 mfii_flush_cache(struct mfii_softc *sc, struct mfii_ccb *ccb)
984 {
985 #if 0
986 	union mfi_mbox mbox = {
987 		.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE,
988 	};
989 	int rv;
990 
991 	mfii_scrub_ccb(ccb);
992 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
993 	    NULL, 0, SCSI_NOSLEEP);
994 	if (rv != 0) {
995 		printf("%s: unable to flush cache\n", DEVNAME(sc));
996 		return;
997 	}
998 #endif
999 }
1000 
1001 static void
mfii_shutdown(struct mfii_softc * sc,struct mfii_ccb * ccb)1002 mfii_shutdown(struct mfii_softc *sc, struct mfii_ccb *ccb)
1003 {
1004 #if 0
1005 	int rv;
1006 
1007 	mfii_scrub_ccb(ccb);
1008 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, NULL,
1009 	    NULL, 0, SCSI_POLL);
1010 	if (rv != 0) {
1011 		printf("%s: unable to shutdown controller\n", DEVNAME(sc));
1012 		return;
1013 	}
1014 #endif
1015 }
1016 
1017 static void
mfii_powerdown(struct mfii_softc * sc)1018 mfii_powerdown(struct mfii_softc *sc)
1019 {
1020 	struct mfii_ccb *ccb;
1021 
1022 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1023 	if (ccb == NULL) {
1024 		printf("%s: unable to allocate ccb for shutdown\n",
1025 		    DEVNAME(sc));
1026 		return;
1027 	}
1028 
1029 	mfii_flush_cache(sc, ccb);
1030 	mfii_shutdown(sc, ccb);
1031 	scsi_io_put(&sc->sc_iopool, ccb);
1032 }
1033 
1034 int
mfii_activate(struct device * self,int act)1035 mfii_activate(struct device *self, int act)
1036 {
1037 	struct mfii_softc *sc = (struct mfii_softc *)self;
1038 	int rv;
1039 
1040 	switch (act) {
1041 	case DVACT_POWERDOWN:
1042 		rv = config_activate_children(&sc->sc_dev, act);
1043 		mfii_powerdown(sc);
1044 		break;
1045 	default:
1046 		rv = config_activate_children(&sc->sc_dev, act);
1047 		break;
1048 	}
1049 
1050 	return (rv);
1051 }
1052 
1053 u_int32_t
mfii_read(struct mfii_softc * sc,bus_size_t r)1054 mfii_read(struct mfii_softc *sc, bus_size_t r)
1055 {
1056 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1057 	    BUS_SPACE_BARRIER_READ);
1058 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1059 }
1060 
1061 void
mfii_write(struct mfii_softc * sc,bus_size_t r,u_int32_t v)1062 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1063 {
1064 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1065 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1066 	    BUS_SPACE_BARRIER_WRITE);
1067 }
1068 
1069 struct mfii_dmamem *
mfii_dmamem_alloc(struct mfii_softc * sc,size_t size)1070 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1071 {
1072 	struct mfii_dmamem *m;
1073 	int nsegs;
1074 
1075 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1076 	if (m == NULL)
1077 		return (NULL);
1078 
1079 	m->mdm_size = size;
1080 
1081 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1082 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1083 		goto mdmfree;
1084 
1085 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1086 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1087 		goto destroy;
1088 
1089 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1090 	    BUS_DMA_NOWAIT) != 0)
1091 		goto free;
1092 
1093 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1094 	    BUS_DMA_NOWAIT) != 0)
1095 		goto unmap;
1096 
1097 	return (m);
1098 
1099 unmap:
1100 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1101 free:
1102 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1103 destroy:
1104 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1105 mdmfree:
1106 	free(m, M_DEVBUF, sizeof *m);
1107 
1108 	return (NULL);
1109 }
1110 
1111 void
mfii_dmamem_free(struct mfii_softc * sc,struct mfii_dmamem * m)1112 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1113 {
1114 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1115 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1116 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1117 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1118 	free(m, M_DEVBUF, sizeof *m);
1119 }
1120 
1121 void
mfii_dcmd_start(struct mfii_softc * sc,struct mfii_ccb * ccb)1122 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1123 {
1124 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1125 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1126 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1127 
1128 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1129 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1130 	io->chain_offset = io->sgl_offset0 / 4;
1131 
1132 	htolem64(&sge->sg_addr, ccb->ccb_sense_dva);
1133 	htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense));
1134 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1135 
1136 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1137 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1138 
1139 	mfii_start(sc, ccb);
1140 }
1141 
1142 int
mfii_aen_register(struct mfii_softc * sc)1143 mfii_aen_register(struct mfii_softc *sc)
1144 {
1145 	struct mfi_evt_log_info mel;
1146 	struct mfii_ccb *ccb;
1147 	struct mfii_dmamem *mdm;
1148 	int rv;
1149 
1150 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1151 	if (ccb == NULL) {
1152 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1153 		return (ENOMEM);
1154 	}
1155 
1156 	memset(&mel, 0, sizeof(mel));
1157 	mfii_scrub_ccb(ccb);
1158 
1159 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1160 	    &mel, sizeof(mel), SCSI_DATA_IN|SCSI_NOSLEEP);
1161 	if (rv != 0) {
1162 		scsi_io_put(&sc->sc_iopool, ccb);
1163 		printf("%s: unable to get event info\n", DEVNAME(sc));
1164 		return (EIO);
1165 	}
1166 
1167 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1168 	if (mdm == NULL) {
1169 		scsi_io_put(&sc->sc_iopool, ccb);
1170 		printf("%s: unable to allocate event data\n", DEVNAME(sc));
1171 		return (ENOMEM);
1172 	}
1173 
1174 	/* replay all the events from boot */
1175 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num));
1176 
1177 	return (0);
1178 }
1179 
1180 void
mfii_aen_start(struct mfii_softc * sc,struct mfii_ccb * ccb,struct mfii_dmamem * mdm,uint32_t seq)1181 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1182     struct mfii_dmamem *mdm, uint32_t seq)
1183 {
1184 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1185 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1186 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1187 	union mfi_evt_class_locale mec;
1188 
1189 	mfii_scrub_ccb(ccb);
1190 	mfii_dcmd_scrub(ccb);
1191 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1192 
1193 	ccb->ccb_cookie = mdm;
1194 	ccb->ccb_done = mfii_aen_done;
1195 	sc->sc_aen_ccb = ccb;
1196 
1197 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1198 	mec.mec_members.reserved = 0;
1199 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1200 
1201 	hdr->mfh_cmd = MFI_CMD_DCMD;
1202 	hdr->mfh_sg_count = 1;
1203 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1204 	htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm));
1205 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1206 	htolem32(&dcmd->mdf_mbox.w[0], seq);
1207 	htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word);
1208 	htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm));
1209 	htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm));
1210 
1211 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1212 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1213 
1214 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1215 	mfii_dcmd_start(sc, ccb);
1216 }
1217 
1218 void
mfii_aen_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1219 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1220 {
1221 	KASSERT(sc->sc_aen_ccb == ccb);
1222 
1223 	/* defer to a thread with KERNEL_LOCK so we can run autoconf */
1224 	task_add(systq, &sc->sc_aen_task);
1225 }
1226 
1227 void
mfii_aen(void * arg)1228 mfii_aen(void *arg)
1229 {
1230 	struct mfii_softc *sc = arg;
1231 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1232 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1233 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1234 	uint32_t code;
1235 
1236 	mfii_dcmd_sync(sc, ccb,
1237 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1238 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1239 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1240 
1241 	code = lemtoh32(&med->med_code);
1242 
1243 #if 0
1244 	log(LOG_DEBUG, "%s (seq %u, code %08x) %s\n", DEVNAME(sc),
1245 	    lemtoh32(&med->med_seq_num), code, med->med_description);
1246 #endif
1247 
1248 	switch (code) {
1249 	case MFI_EVT_PD_INSERTED_EXT:
1250 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1251 			break;
1252 
1253 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1254 		break;
1255  	case MFI_EVT_PD_REMOVED_EXT:
1256 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1257 			break;
1258 
1259 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1260 		break;
1261 
1262 	case MFI_EVT_PD_STATE_CHANGE:
1263 		if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE)
1264 			break;
1265 
1266 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1267 		break;
1268 
1269 	case MFI_EVT_LD_CREATED:
1270 	case MFI_EVT_LD_DELETED:
1271 		mfii_aen_ld_update(sc);
1272 		break;
1273 
1274 	default:
1275 		break;
1276 	}
1277 
1278 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num) + 1);
1279 }
1280 
1281 void
mfii_aen_pd_insert(struct mfii_softc * sc,const struct mfi_evtarg_pd_address * pd)1282 mfii_aen_pd_insert(struct mfii_softc *sc,
1283     const struct mfi_evtarg_pd_address *pd)
1284 {
1285 #if 0
1286 	printf("%s: pd inserted ext\n", DEVNAME(sc));
1287 	printf("%s:  device_id %04x encl_id: %04x type %x\n", DEVNAME(sc),
1288 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1289 	    pd->scsi_dev_type);
1290 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1291 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1292 	    lemtoh64(&pd->sas_addr[1]));
1293 #endif
1294 
1295 	if (mfii_dev_handles_update(sc) != 0) /* refresh map */
1296 		return;
1297 
1298 	scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id));
1299 }
1300 
1301 void
mfii_aen_pd_remove(struct mfii_softc * sc,const struct mfi_evtarg_pd_address * pd)1302 mfii_aen_pd_remove(struct mfii_softc *sc,
1303     const struct mfi_evtarg_pd_address *pd)
1304 {
1305 #if 0
1306 	printf("%s: pd removed ext\n", DEVNAME(sc));
1307 	printf("%s:  device_id %04x encl_id: %04x type %u\n", DEVNAME(sc),
1308 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1309 	    pd->scsi_dev_type);
1310 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1311 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1312 	    lemtoh64(&pd->sas_addr[1]));
1313 #endif
1314 	uint16_t target = lemtoh16(&pd->device_id);
1315 
1316 	scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE);
1317 
1318 	/* the firmware will abort outstanding commands for us */
1319 
1320 	scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE);
1321 }
1322 
1323 void
mfii_aen_pd_state_change(struct mfii_softc * sc,const struct mfi_evtarg_pd_state * state)1324 mfii_aen_pd_state_change(struct mfii_softc *sc,
1325     const struct mfi_evtarg_pd_state *state)
1326 {
1327 	uint16_t target = lemtoh16(&state->pd.mep_device_id);
1328 
1329 	if (state->prev_state == htole32(MFI_PD_SYSTEM) &&
1330 	    state->new_state != htole32(MFI_PD_SYSTEM)) {
1331 		/* it's been pulled or configured for raid */
1332 
1333 		scsi_activate(sc->sc_pd->pd_scsibus, target, -1,
1334 		    DVACT_DEACTIVATE);
1335 		/* outstanding commands will simply complete or get aborted */
1336 		scsi_detach_target(sc->sc_pd->pd_scsibus, target,
1337 		    DETACH_FORCE);
1338 
1339 	} else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD) &&
1340 	    state->new_state == htole32(MFI_PD_SYSTEM)) {
1341 		/* the firmware is handing the disk over */
1342 
1343 		scsi_probe_target(sc->sc_pd->pd_scsibus, target);
1344 	}
1345 }
1346 
1347 void
mfii_aen_ld_update(struct mfii_softc * sc)1348 mfii_aen_ld_update(struct mfii_softc *sc)
1349 {
1350 	int i, state, target, old, nld;
1351 	int newlds[MFI_MAX_LD];
1352 
1353 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1354 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
1355 		DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1356 		    DEVNAME(sc));
1357 		return;
1358 	}
1359 
1360 	memset(newlds, -1, sizeof(newlds));
1361 
1362 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1363 		state = sc->sc_ld_list.mll_list[i].mll_state;
1364 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1365 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1366 		    DEVNAME(sc), target, state);
1367 		newlds[target] = i;
1368 	}
1369 
1370 	for (i = 0; i < MFI_MAX_LD; i++) {
1371 		old = sc->sc_target_lds[i];
1372 		nld = newlds[i];
1373 
1374 		if (old == -1 && nld != -1) {
1375 			DNPRINTF(MFII_D_MISC, "%s: attaching target %d\n",
1376 			    DEVNAME(sc), i);
1377 
1378 			scsi_probe_target(sc->sc_scsibus, i);
1379 
1380 #ifndef SMALL_KERNEL
1381 			mfii_init_ld_sensor(sc, nld);
1382 			sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1383 #endif
1384 		} else if (nld == -1 && old != -1) {
1385 			DNPRINTF(MFII_D_MISC, "%s: detaching target %d\n",
1386 			    DEVNAME(sc), i);
1387 
1388 			scsi_activate(sc->sc_scsibus, i, -1,
1389 			    DVACT_DEACTIVATE);
1390 			scsi_detach_target(sc->sc_scsibus, i,
1391 			    DETACH_FORCE);
1392 #ifndef SMALL_KERNEL
1393 			sensor_detach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1394 #endif
1395 		}
1396 	}
1397 
1398 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1399 }
1400 
1401 void
mfii_aen_unregister(struct mfii_softc * sc)1402 mfii_aen_unregister(struct mfii_softc *sc)
1403 {
1404 	/* XXX */
1405 }
1406 
1407 int
mfii_reset_hard(struct mfii_softc * sc)1408 mfii_reset_hard(struct mfii_softc *sc)
1409 {
1410 	u_int16_t		i;
1411 
1412 	mfii_write(sc, MFI_OSTS, 0);
1413 
1414 	/* enable diagnostic register */
1415 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1416 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1417 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1418 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1419 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1420 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1421 	mfii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1422 
1423 	delay(100);
1424 
1425 	if ((mfii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1426 		printf("%s: failed to enable diagnostic read/write\n",
1427 		    DEVNAME(sc));
1428 		return(1);
1429 	}
1430 
1431 	/* reset ioc */
1432 	mfii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1433 
1434 	/* 240 milliseconds */
1435 	delay(240000);
1436 
1437 	for (i = 0; i < 30000; i++) {
1438 		if ((mfii_read(sc, MPII_HOSTDIAG) &
1439 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1440 			break;
1441 		delay(10000);
1442 	}
1443 	if (i >= 30000) {
1444 		printf("%s: failed to reset device\n", DEVNAME(sc));
1445 		return (1);
1446 	}
1447 
1448 	/* disable diagnostic register */
1449 	mfii_write(sc, MPII_WRITESEQ, 0xff);
1450 
1451 	return(0);
1452 }
1453 
1454 int
mfii_transition_firmware(struct mfii_softc * sc)1455 mfii_transition_firmware(struct mfii_softc *sc)
1456 {
1457 	int32_t			fw_state, cur_state;
1458 	int			max_wait, i, reset_on_fault = 1;
1459 
1460 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1461 
1462 	while (fw_state != MFI_STATE_READY) {
1463 		cur_state = fw_state;
1464 		switch (fw_state) {
1465 		case MFI_STATE_FAULT:
1466 			if (!reset_on_fault) {
1467 				printf("%s: firmware fault\n", DEVNAME(sc));
1468 				return (1);
1469 			}
1470 			printf("%s: firmware fault; attempting full device "
1471 			    "reset, this can take some time\n", DEVNAME(sc));
1472 			if (mfii_reset_hard(sc))
1473 				return (1);
1474 			max_wait = 20;
1475 			reset_on_fault = 0;
1476 			break;
1477 		case MFI_STATE_WAIT_HANDSHAKE:
1478 			mfii_write(sc, MFI_SKINNY_IDB,
1479 			    MFI_INIT_CLEAR_HANDSHAKE);
1480 			max_wait = 2;
1481 			break;
1482 		case MFI_STATE_OPERATIONAL:
1483 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1484 			max_wait = 10;
1485 			break;
1486 		case MFI_STATE_BB_INIT:
1487 			max_wait = 20;
1488 			break;
1489 		case MFI_STATE_UNDEFINED:
1490 		case MFI_STATE_FW_INIT:
1491 		case MFI_STATE_FW_INIT_2:
1492 		case MFI_STATE_DEVICE_SCAN:
1493 		case MFI_STATE_FLUSH_CACHE:
1494 			max_wait = 40;
1495 			break;
1496 		case MFI_STATE_BOOT_MESSAGE_PENDING:
1497 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG);
1498 			max_wait = 10;
1499 			break;
1500 		default:
1501 			printf("%s: unknown firmware state %#x\n",
1502 			    DEVNAME(sc), fw_state);
1503 			return (1);
1504 		}
1505 		for (i = 0; i < (max_wait * 10); i++) {
1506 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1507 			if (fw_state == cur_state)
1508 				DELAY(100000);
1509 			else
1510 				break;
1511 		}
1512 		if (fw_state == cur_state) {
1513 			printf("%s: firmware stuck in state %#x\n",
1514 			    DEVNAME(sc), fw_state);
1515 			return (1);
1516 		} else {
1517 			DPRINTF("%s: firmware state change %#x -> %#x after "
1518 			    "%d iterations\n",
1519 			    DEVNAME(sc), cur_state, fw_state, i);
1520 		}
1521 	}
1522 
1523 	return (0);
1524 }
1525 
1526 int
mfii_get_info(struct mfii_softc * sc)1527 mfii_get_info(struct mfii_softc *sc)
1528 {
1529 	int i, rv;
1530 
1531 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1532 	    sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1533 
1534 	if (rv != 0)
1535 		return (rv);
1536 
1537 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1538 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1539 		    DEVNAME(sc),
1540 		    sc->sc_info.mci_image_component[i].mic_name,
1541 		    sc->sc_info.mci_image_component[i].mic_version,
1542 		    sc->sc_info.mci_image_component[i].mic_build_date,
1543 		    sc->sc_info.mci_image_component[i].mic_build_time);
1544 	}
1545 
1546 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1547 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1548 		    DEVNAME(sc),
1549 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1550 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1551 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1552 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1553 	}
1554 
1555 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1556 	    DEVNAME(sc),
1557 	    sc->sc_info.mci_max_arms,
1558 	    sc->sc_info.mci_max_spans,
1559 	    sc->sc_info.mci_max_arrays,
1560 	    sc->sc_info.mci_max_lds,
1561 	    sc->sc_info.mci_product_name);
1562 
1563 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1564 	    DEVNAME(sc),
1565 	    sc->sc_info.mci_serial_number,
1566 	    sc->sc_info.mci_hw_present,
1567 	    sc->sc_info.mci_current_fw_time,
1568 	    sc->sc_info.mci_max_cmds,
1569 	    sc->sc_info.mci_max_sg_elements);
1570 
1571 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1572 	    DEVNAME(sc),
1573 	    sc->sc_info.mci_max_request_size,
1574 	    sc->sc_info.mci_lds_present,
1575 	    sc->sc_info.mci_lds_degraded,
1576 	    sc->sc_info.mci_lds_offline,
1577 	    sc->sc_info.mci_pd_present);
1578 
1579 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1580 	    DEVNAME(sc),
1581 	    sc->sc_info.mci_pd_disks_present,
1582 	    sc->sc_info.mci_pd_disks_pred_failure,
1583 	    sc->sc_info.mci_pd_disks_failed);
1584 
1585 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1586 	    DEVNAME(sc),
1587 	    sc->sc_info.mci_nvram_size,
1588 	    sc->sc_info.mci_memory_size,
1589 	    sc->sc_info.mci_flash_size);
1590 
1591 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1592 	    DEVNAME(sc),
1593 	    sc->sc_info.mci_ram_correctable_errors,
1594 	    sc->sc_info.mci_ram_uncorrectable_errors,
1595 	    sc->sc_info.mci_cluster_allowed,
1596 	    sc->sc_info.mci_cluster_active);
1597 
1598 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1599 	    DEVNAME(sc),
1600 	    sc->sc_info.mci_max_strips_per_io,
1601 	    sc->sc_info.mci_raid_levels,
1602 	    sc->sc_info.mci_adapter_ops,
1603 	    sc->sc_info.mci_ld_ops);
1604 
1605 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1606 	    DEVNAME(sc),
1607 	    sc->sc_info.mci_stripe_sz_ops.min,
1608 	    sc->sc_info.mci_stripe_sz_ops.max,
1609 	    sc->sc_info.mci_pd_ops,
1610 	    sc->sc_info.mci_pd_mix_support);
1611 
1612 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1613 	    DEVNAME(sc),
1614 	    sc->sc_info.mci_ecc_bucket_count,
1615 	    sc->sc_info.mci_package_version);
1616 
1617 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1618 	    DEVNAME(sc),
1619 	    sc->sc_info.mci_properties.mcp_seq_num,
1620 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1621 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1622 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1623 
1624 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1625 	    DEVNAME(sc),
1626 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1627 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1628 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1629 	    sc->sc_info.mci_properties.mcp_cc_rate);
1630 
1631 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1632 	    DEVNAME(sc),
1633 	    sc->sc_info.mci_properties.mcp_recon_rate,
1634 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1635 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1636 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1637 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1638 
1639 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1640 	    DEVNAME(sc),
1641 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1642 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1643 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1644 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1645 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1646 
1647 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1648 	    DEVNAME(sc),
1649 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1650 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1651 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1652 
1653 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1654 	    DEVNAME(sc),
1655 	    sc->sc_info.mci_pci.mip_vendor,
1656 	    sc->sc_info.mci_pci.mip_device,
1657 	    sc->sc_info.mci_pci.mip_subvendor,
1658 	    sc->sc_info.mci_pci.mip_subdevice);
1659 
1660 	DPRINTF("%s: type %#x port_count %d port_addr ",
1661 	    DEVNAME(sc),
1662 	    sc->sc_info.mci_host.mih_type,
1663 	    sc->sc_info.mci_host.mih_port_count);
1664 
1665 	for (i = 0; i < 8; i++)
1666 		DPRINTF("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
1667 	DPRINTF("\n");
1668 
1669 	DPRINTF("%s: type %.x port_count %d port_addr ",
1670 	    DEVNAME(sc),
1671 	    sc->sc_info.mci_device.mid_type,
1672 	    sc->sc_info.mci_device.mid_port_count);
1673 
1674 	for (i = 0; i < 8; i++)
1675 		DPRINTF("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
1676 	DPRINTF("\n");
1677 
1678 	return (0);
1679 }
1680 
1681 int
mfii_mfa_poll(struct mfii_softc * sc,struct mfii_ccb * ccb)1682 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1683 {
1684 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1685 	u_int64_t r;
1686 	int to = 0, rv = 0;
1687 
1688 #ifdef DIAGNOSTIC
1689 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1690 		panic("mfii_mfa_poll called with cookie or done set");
1691 #endif
1692 
1693 	hdr->mfh_context = ccb->ccb_smid;
1694 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1695 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1696 
1697 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1698 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1699 
1700 	mfii_start(sc, ccb);
1701 
1702 	for (;;) {
1703 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1704 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1705 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1706 
1707 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1708 			break;
1709 
1710 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1711 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1712 			    ccb->ccb_smid);
1713 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1714 			rv = 1;
1715 			break;
1716 		}
1717 
1718 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1719 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1720 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1721 
1722 		delay(1000);
1723 	}
1724 
1725 	if (ccb->ccb_len > 0) {
1726 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1727 		    0, ccb->ccb_dmamap->dm_mapsize,
1728 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1729 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1730 
1731 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1732 	}
1733 
1734 	return (rv);
1735 }
1736 
1737 int
mfii_poll(struct mfii_softc * sc,struct mfii_ccb * ccb)1738 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1739 {
1740 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1741 	void *cookie;
1742 	int rv = 1;
1743 
1744 	done = ccb->ccb_done;
1745 	cookie = ccb->ccb_cookie;
1746 
1747 	ccb->ccb_done = mfii_poll_done;
1748 	ccb->ccb_cookie = &rv;
1749 
1750 	mfii_start(sc, ccb);
1751 
1752 	do {
1753 		delay(10);
1754 		mfii_postq(sc);
1755 	} while (rv == 1);
1756 
1757 	ccb->ccb_cookie = cookie;
1758 	done(sc, ccb);
1759 
1760 	return (0);
1761 }
1762 
1763 void
mfii_poll_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1764 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1765 {
1766 	int *rv = ccb->ccb_cookie;
1767 
1768 	*rv = 0;
1769 }
1770 
1771 int
mfii_exec(struct mfii_softc * sc,struct mfii_ccb * ccb)1772 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1773 {
1774 	struct mutex m;
1775 
1776 	mtx_init(&m, IPL_BIO);
1777 
1778 #ifdef DIAGNOSTIC
1779 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1780 		panic("mfii_exec called with cookie or done set");
1781 #endif
1782 
1783 	ccb->ccb_cookie = &m;
1784 	ccb->ccb_done = mfii_exec_done;
1785 
1786 	mfii_start(sc, ccb);
1787 
1788 	mtx_enter(&m);
1789 	while (ccb->ccb_cookie != NULL)
1790 		msleep_nsec(ccb, &m, PRIBIO, "mfiiexec", INFSLP);
1791 	mtx_leave(&m);
1792 
1793 	return (0);
1794 }
1795 
1796 void
mfii_exec_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1797 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1798 {
1799 	struct mutex *m = ccb->ccb_cookie;
1800 
1801 	mtx_enter(m);
1802 	ccb->ccb_cookie = NULL;
1803 	wakeup_one(ccb);
1804 	mtx_leave(m);
1805 }
1806 
1807 int
mfii_mgmt(struct mfii_softc * sc,uint32_t opc,const union mfi_mbox * mbox,void * buf,size_t len,int flags)1808 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1809     void *buf, size_t len, int flags)
1810 {
1811 	struct mfii_ccb *ccb;
1812 	int rv;
1813 
1814 	ccb = scsi_io_get(&sc->sc_iopool, flags);
1815 	if (ccb == NULL)
1816 		return (ENOMEM);
1817 
1818 	mfii_scrub_ccb(ccb);
1819 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, flags);
1820 	scsi_io_put(&sc->sc_iopool, ccb);
1821 
1822 	return (rv);
1823 }
1824 
1825 int
mfii_do_mgmt(struct mfii_softc * sc,struct mfii_ccb * ccb,uint32_t opc,const union mfi_mbox * mbox,void * buf,size_t len,int flags)1826 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1827     const union mfi_mbox *mbox, void *buf, size_t len, int flags)
1828 {
1829 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1830 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1831 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1832 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1833 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1834 	u_int8_t *dma_buf = NULL;
1835 	int rv = EIO;
1836 
1837 	if (cold)
1838 		flags |= SCSI_NOSLEEP;
1839 
1840 	if (buf != NULL) {
1841 		dma_buf = dma_alloc(len, PR_WAITOK);
1842 		if (dma_buf == NULL)
1843 			return (ENOMEM);
1844 	}
1845 
1846 	ccb->ccb_data = dma_buf;
1847 	ccb->ccb_len = len;
1848 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1849 	case SCSI_DATA_IN:
1850 		ccb->ccb_direction = MFII_DATA_IN;
1851 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1852 		break;
1853 	case SCSI_DATA_OUT:
1854 		ccb->ccb_direction = MFII_DATA_OUT;
1855 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1856 		memcpy(dma_buf, buf, len);
1857 		break;
1858 	case 0:
1859 		ccb->ccb_direction = MFII_DATA_NONE;
1860 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1861 		break;
1862 	}
1863 
1864 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1865 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1866 		rv = ENOMEM;
1867 		goto done;
1868 	}
1869 
1870 	hdr->mfh_cmd = MFI_CMD_DCMD;
1871 	hdr->mfh_context = ccb->ccb_smid;
1872 	hdr->mfh_data_len = htole32(len);
1873 	hdr->mfh_sg_count = len ? ccb->ccb_dmamap->dm_nsegs : 0;
1874 
1875 	dcmd->mdf_opcode = opc;
1876 	/* handle special opcodes */
1877 	if (mbox != NULL)
1878 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1879 
1880 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1881 
1882 	if (len) {
1883 		io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1884 		io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1885 		htolem64(&sge->sg_addr, ccb->ccb_mfi_dva);
1886 		htolem32(&sge->sg_len, MFI_FRAME_SIZE);
1887 		sge->sg_flags =
1888 		    MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1889 	}
1890 
1891 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1892 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1893 
1894 	if (ISSET(flags, SCSI_NOSLEEP)) {
1895 		ccb->ccb_done = mfii_empty_done;
1896 		mfii_poll(sc, ccb);
1897 	} else
1898 		mfii_exec(sc, ccb);
1899 
1900 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1901 		rv = 0;
1902 
1903 		if (ccb->ccb_direction == MFII_DATA_IN)
1904 			memcpy(buf, dma_buf, len);
1905 	}
1906 
1907 done:
1908 	if (buf != NULL)
1909 		dma_free(dma_buf, len);
1910 
1911 	return (rv);
1912 }
1913 
1914 void
mfii_empty_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1915 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1916 {
1917 	return;
1918 }
1919 
1920 int
mfii_load_mfa(struct mfii_softc * sc,struct mfii_ccb * ccb,void * sglp,int nosleep)1921 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1922     void *sglp, int nosleep)
1923 {
1924 	union mfi_sgl *sgl = sglp;
1925 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1926 	int error;
1927 	int i;
1928 
1929 	if (ccb->ccb_len == 0)
1930 		return (0);
1931 
1932 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1933 	    ccb->ccb_data, ccb->ccb_len, NULL,
1934 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1935 	if (error) {
1936 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1937 		return (1);
1938 	}
1939 
1940 	for (i = 0; i < dmap->dm_nsegs; i++) {
1941 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1942 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1943 	}
1944 
1945 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1946 	    ccb->ccb_direction == MFII_DATA_OUT ?
1947 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1948 
1949 	return (0);
1950 }
1951 
1952 void
mfii_start(struct mfii_softc * sc,struct mfii_ccb * ccb)1953 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1954 {
1955 	u_long *r = (u_long *)&ccb->ccb_req;
1956 
1957 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1958 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1959 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1960 
1961 #if defined(__LP64__)
1962 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1963 #else
1964 	mtx_enter(&sc->sc_post_mtx);
1965 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1966 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1967 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1968 
1969 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1970 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1971 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1972 	mtx_leave(&sc->sc_post_mtx);
1973 #endif
1974 }
1975 
1976 void
mfii_done(struct mfii_softc * sc,struct mfii_ccb * ccb)1977 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1978 {
1979 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1980 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1981 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1982 
1983 	if (ccb->ccb_sgl_len > 0) {
1984 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1985 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1986 		    BUS_DMASYNC_POSTWRITE);
1987 	}
1988 
1989 	if (ccb->ccb_len > 0) {
1990 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1991 		    0, ccb->ccb_dmamap->dm_mapsize,
1992 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1993 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1994 
1995 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1996 	}
1997 
1998 	ccb->ccb_done(sc, ccb);
1999 }
2000 
2001 int
mfii_initialise_firmware(struct mfii_softc * sc)2002 mfii_initialise_firmware(struct mfii_softc *sc)
2003 {
2004 	struct mpii_msg_iocinit_request *iiq;
2005 	struct mfii_dmamem *m;
2006 	struct mfii_ccb *ccb;
2007 	struct mfi_init_frame *init;
2008 	int rv;
2009 
2010 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
2011 	if (m == NULL)
2012 		return (1);
2013 
2014 	iiq = MFII_DMA_KVA(m);
2015 	memset(iiq, 0, sizeof(*iiq));
2016 
2017 	iiq->function = MPII_FUNCTION_IOC_INIT;
2018 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
2019 
2020 	iiq->msg_version_maj = 0x02;
2021 	iiq->msg_version_min = 0x00;
2022 	iiq->hdr_version_unit = 0x10;
2023 	iiq->hdr_version_dev = 0x0;
2024 
2025 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
2026 
2027 	iiq->reply_descriptor_post_queue_depth =
2028 	    htole16(sc->sc_reply_postq_depth);
2029 	iiq->reply_free_queue_depth = htole16(0);
2030 
2031 	htolem32(&iiq->sense_buffer_address_high,
2032 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
2033 
2034 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
2035 	    MFII_DMA_DVA(sc->sc_reply_postq));
2036 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
2037 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
2038 
2039 	htolem32(&iiq->system_request_frame_base_address_lo,
2040 	    MFII_DMA_DVA(sc->sc_requests));
2041 	htolem32(&iiq->system_request_frame_base_address_hi,
2042 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
2043 
2044 	iiq->timestamp = htole64(getuptime());
2045 
2046 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2047 	if (ccb == NULL) {
2048 		/* shouldn't ever run out of ccbs during attach */
2049 		return (1);
2050 	}
2051 	mfii_scrub_ccb(ccb);
2052 	init = ccb->ccb_request;
2053 
2054 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
2055 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
2056 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
2057 
2058 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2059 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2060 	    BUS_DMASYNC_PREREAD);
2061 
2062 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2063 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
2064 
2065 	rv = mfii_mfa_poll(sc, ccb);
2066 
2067 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
2068 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
2069 
2070 	scsi_io_put(&sc->sc_iopool, ccb);
2071 	mfii_dmamem_free(sc, m);
2072 
2073 	return (rv);
2074 }
2075 
2076 int
mfii_my_intr(struct mfii_softc * sc)2077 mfii_my_intr(struct mfii_softc *sc)
2078 {
2079 	u_int32_t status;
2080 
2081 	status = mfii_read(sc, MFI_OSTS);
2082 	if (ISSET(status, 0x1)) {
2083 		mfii_write(sc, MFI_OSTS, status);
2084 		return (1);
2085 	}
2086 
2087 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2088 }
2089 
2090 int
mfii_intr(void * arg)2091 mfii_intr(void *arg)
2092 {
2093 	struct mfii_softc *sc = arg;
2094 
2095 	if (!mfii_my_intr(sc))
2096 		return (0);
2097 
2098 	mfii_postq(sc);
2099 
2100 	return (1);
2101 }
2102 
2103 void
mfii_postq(struct mfii_softc * sc)2104 mfii_postq(struct mfii_softc *sc)
2105 {
2106 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2107 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2108 	struct mpii_reply_descr *rdp;
2109 	struct mfii_ccb *ccb;
2110 	int rpi = 0;
2111 
2112 	mtx_enter(&sc->sc_reply_postq_mtx);
2113 
2114 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2115 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2116 	    BUS_DMASYNC_POSTREAD);
2117 
2118 	for (;;) {
2119 		rdp = &postq[sc->sc_reply_postq_index];
2120 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2121 		    MPII_REPLY_DESCR_UNUSED)
2122 			break;
2123 		if (rdp->data == 0xffffffff) {
2124 			/*
2125 			 * ioc is still writing to the reply post queue
2126 			 * race condition - bail!
2127 			 */
2128 			break;
2129 		}
2130 
2131 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
2132 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2133 		memset(rdp, 0xff, sizeof(*rdp));
2134 
2135 		sc->sc_reply_postq_index++;
2136 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2137 		rpi = 1;
2138 	}
2139 
2140 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2141 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2142 	    BUS_DMASYNC_PREREAD);
2143 
2144 	if (rpi)
2145 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2146 
2147 	mtx_leave(&sc->sc_reply_postq_mtx);
2148 
2149 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2150 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2151 		mfii_done(sc, ccb);
2152 	}
2153 }
2154 
2155 void
mfii_scsi_cmd(struct scsi_xfer * xs)2156 mfii_scsi_cmd(struct scsi_xfer *xs)
2157 {
2158 	struct scsi_link *link = xs->sc_link;
2159 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2160 	struct mfii_ccb *ccb = xs->io;
2161 
2162 	mfii_scrub_ccb(ccb);
2163 	ccb->ccb_cookie = xs;
2164 	ccb->ccb_done = mfii_scsi_cmd_done;
2165 	ccb->ccb_data = xs->data;
2166 	ccb->ccb_len = xs->datalen;
2167 
2168 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2169 
2170 	switch (xs->cmd.opcode) {
2171 	case READ_COMMAND:
2172 	case READ_10:
2173 	case READ_12:
2174 	case READ_16:
2175 	case WRITE_COMMAND:
2176 	case WRITE_10:
2177 	case WRITE_12:
2178 	case WRITE_16:
2179 		if (mfii_scsi_cmd_io(sc, xs) != 0)
2180 			goto stuffup;
2181 
2182 		break;
2183 
2184 	default:
2185 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
2186 			goto stuffup;
2187 		break;
2188 	}
2189 
2190 	xs->error = XS_NOERROR;
2191 	xs->resid = 0;
2192 
2193 	if (ISSET(xs->flags, SCSI_POLL)) {
2194 		if (mfii_poll(sc, ccb) != 0)
2195 			goto stuffup;
2196 		return;
2197 	}
2198 
2199 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2200 	timeout_add_msec(&xs->stimeout, xs->timeout);
2201 	mfii_start(sc, ccb);
2202 
2203 	return;
2204 
2205 stuffup:
2206 	xs->error = XS_DRIVER_STUFFUP;
2207 	scsi_done(xs);
2208 }
2209 
2210 void
mfii_scsi_cmd_done(struct mfii_softc * sc,struct mfii_ccb * ccb)2211 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2212 {
2213 	struct scsi_xfer *xs = ccb->ccb_cookie;
2214 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2215 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2216 	u_int refs = 1;
2217 
2218 	if (timeout_del(&xs->stimeout))
2219 		refs = 2;
2220 
2221 	switch (ctx->status) {
2222 	case MFI_STAT_OK:
2223 		break;
2224 
2225 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2226 		xs->error = XS_SENSE;
2227 		memset(&xs->sense, 0, sizeof(xs->sense));
2228 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2229 		break;
2230 
2231 	case MFI_STAT_LD_OFFLINE:
2232 	case MFI_STAT_DEVICE_NOT_FOUND:
2233 		xs->error = XS_SELTIMEOUT;
2234 		break;
2235 
2236 	default:
2237 		xs->error = XS_DRIVER_STUFFUP;
2238 		break;
2239 	}
2240 
2241 	if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0)
2242 		scsi_done(xs);
2243 }
2244 
2245 int
mfii_scsi_ioctl(struct scsi_link * link,u_long cmd,caddr_t addr,int flag)2246 mfii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2247 {
2248 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2249 
2250 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_scsi_ioctl\n", DEVNAME(sc));
2251 
2252 	switch (cmd) {
2253 	case DIOCGCACHE:
2254 	case DIOCSCACHE:
2255 		return (mfii_ioctl_cache(link, cmd, (struct dk_cache *)addr));
2256 		break;
2257 
2258 	default:
2259 		if (sc->sc_ioctl)
2260 			return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
2261 		break;
2262 	}
2263 
2264 	return (ENOTTY);
2265 }
2266 
2267 int
mfii_ioctl_cache(struct scsi_link * link,u_long cmd,struct dk_cache * dc)2268 mfii_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
2269 {
2270 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2271 	int			 rv, wrenable, rdenable;
2272 	struct mfi_ld_prop	 ldp;
2273 	union mfi_mbox		 mbox;
2274 
2275 	if (mfii_get_info(sc)) {
2276 		rv = EIO;
2277 		goto done;
2278 	}
2279 
2280 	if (sc->sc_target_lds[link->target] == -1) {
2281 		rv = EIO;
2282 		goto done;
2283 	}
2284 
2285 	memset(&mbox, 0, sizeof(mbox));
2286 	mbox.b[0] = link->target;
2287 	rv = mfii_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2288 	    SCSI_DATA_IN);
2289 	if (rv != 0)
2290 		goto done;
2291 
2292 	if (sc->sc_info.mci_memory_size > 0) {
2293 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
2294 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
2295 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
2296 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
2297 	} else {
2298 		wrenable = ISSET(ldp.mlp_diskcache_policy,
2299 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
2300 		rdenable = 0;
2301 	}
2302 
2303 	if (cmd == DIOCGCACHE) {
2304 		dc->wrcache = wrenable;
2305 		dc->rdcache = rdenable;
2306 		goto done;
2307 	} /* else DIOCSCACHE */
2308 
2309 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
2310 	    ((dc->rdcache) ? 1 : 0) == rdenable)
2311 		goto done;
2312 
2313 	memset(&mbox, 0, sizeof(mbox));
2314 	mbox.b[0] = ldp.mlp_ld.mld_target;
2315 	mbox.b[1] = ldp.mlp_ld.mld_res;
2316 	mbox.s[1] = ldp.mlp_ld.mld_seq;
2317 
2318 	if (sc->sc_info.mci_memory_size > 0) {
2319 		if (dc->rdcache)
2320 			SET(ldp.mlp_cur_cache_policy,
2321 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2322 		else
2323 			CLR(ldp.mlp_cur_cache_policy,
2324 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2325 		if (dc->wrcache)
2326 			SET(ldp.mlp_cur_cache_policy,
2327 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2328 		else
2329 			CLR(ldp.mlp_cur_cache_policy,
2330 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2331 	} else {
2332 		if (dc->rdcache) {
2333 			rv = EOPNOTSUPP;
2334 			goto done;
2335 		}
2336 		if (dc->wrcache)
2337 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
2338 		else
2339 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
2340 	}
2341 
2342 	rv = mfii_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2343 	    SCSI_DATA_OUT);
2344 done:
2345 	return (rv);
2346 }
2347 
2348 int
mfii_scsi_cmd_io(struct mfii_softc * sc,struct scsi_xfer * xs)2349 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
2350 {
2351 	struct scsi_link *link = xs->sc_link;
2352 	struct mfii_ccb *ccb = xs->io;
2353 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2354 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2355 	int segs;
2356 
2357 	io->dev_handle = htole16(link->target);
2358 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2359 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2360 	io->sgl_flags = htole16(0x02); /* XXX */
2361 	io->sense_buffer_length = sizeof(xs->sense);
2362 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2363 	io->data_length = htole32(xs->datalen);
2364 	io->io_flags = htole16(xs->cmdlen);
2365 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2366 	case SCSI_DATA_IN:
2367 		ccb->ccb_direction = MFII_DATA_IN;
2368 		io->direction = MPII_SCSIIO_DIR_READ;
2369 		break;
2370 	case SCSI_DATA_OUT:
2371 		ccb->ccb_direction = MFII_DATA_OUT;
2372 		io->direction = MPII_SCSIIO_DIR_WRITE;
2373 		break;
2374 	default:
2375 		ccb->ccb_direction = MFII_DATA_NONE;
2376 		io->direction = MPII_SCSIIO_DIR_NONE;
2377 		break;
2378 	}
2379 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2380 
2381 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2382 	ctx->timeout_value = htole16(0x14); /* XXX */
2383 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2384 	ctx->virtual_disk_target_id = htole16(link->target);
2385 
2386 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2387 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2388 		return (1);
2389 
2390 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2391 	switch (sc->sc_iop->num_sge_loc) {
2392 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2393 		ctx->num_sge = segs;
2394 		break;
2395 	case MFII_IOP_NUM_SGE_LOC_35:
2396 		/* 12 bit field, but we're only using the lower 8 */
2397 		ctx->span_arm = segs;
2398 		break;
2399 	}
2400 
2401 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2402 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2403 
2404 	return (0);
2405 }
2406 
2407 int
mfii_scsi_cmd_cdb(struct mfii_softc * sc,struct scsi_xfer * xs)2408 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2409 {
2410 	struct scsi_link *link = xs->sc_link;
2411 	struct mfii_ccb *ccb = xs->io;
2412 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2413 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2414 
2415 	io->dev_handle = htole16(link->target);
2416 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2417 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2418 	io->sgl_flags = htole16(0x02); /* XXX */
2419 	io->sense_buffer_length = sizeof(xs->sense);
2420 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2421 	io->data_length = htole32(xs->datalen);
2422 	io->io_flags = htole16(xs->cmdlen);
2423 	io->lun[0] = htobe16(link->lun);
2424 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2425 	case SCSI_DATA_IN:
2426 		ccb->ccb_direction = MFII_DATA_IN;
2427 		io->direction = MPII_SCSIIO_DIR_READ;
2428 		break;
2429 	case SCSI_DATA_OUT:
2430 		ccb->ccb_direction = MFII_DATA_OUT;
2431 		io->direction = MPII_SCSIIO_DIR_WRITE;
2432 		break;
2433 	default:
2434 		ccb->ccb_direction = MFII_DATA_NONE;
2435 		io->direction = MPII_SCSIIO_DIR_NONE;
2436 		break;
2437 	}
2438 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2439 
2440 	ctx->virtual_disk_target_id = htole16(link->target);
2441 
2442 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2443 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2444 		return (1);
2445 
2446 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2447 
2448 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2449 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2450 
2451 	return (0);
2452 }
2453 
2454 void
mfii_pd_scsi_cmd(struct scsi_xfer * xs)2455 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
2456 {
2457 	struct scsi_link *link = xs->sc_link;
2458 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2459 	struct mfii_ccb *ccb = xs->io;
2460 
2461 	mfii_scrub_ccb(ccb);
2462 	ccb->ccb_cookie = xs;
2463 	ccb->ccb_done = mfii_scsi_cmd_done;
2464 	ccb->ccb_data = xs->data;
2465 	ccb->ccb_len = xs->datalen;
2466 
2467 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2468 
2469 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2470 	if (xs->error != XS_NOERROR)
2471 		goto done;
2472 
2473 	xs->resid = 0;
2474 
2475 	if (ISSET(xs->flags, SCSI_POLL)) {
2476 		if (mfii_poll(sc, ccb) != 0)
2477 			goto stuffup;
2478 		return;
2479 	}
2480 
2481 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2482 	timeout_add_msec(&xs->stimeout, xs->timeout);
2483 	mfii_start(sc, ccb);
2484 
2485 	return;
2486 
2487 stuffup:
2488 	xs->error = XS_DRIVER_STUFFUP;
2489 done:
2490 	scsi_done(xs);
2491 }
2492 
2493 int
mfii_pd_scsi_probe(struct scsi_link * link)2494 mfii_pd_scsi_probe(struct scsi_link *link)
2495 {
2496 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2497 	struct mfi_pd_details mpd;
2498 	union mfi_mbox mbox;
2499 	int rv;
2500 
2501 	if (link->lun > 0)
2502 		return (0);
2503 
2504 	memset(&mbox, 0, sizeof(mbox));
2505 	mbox.s[0] = htole16(link->target);
2506 
2507 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2508 	    SCSI_DATA_IN|SCSI_NOSLEEP);
2509 	if (rv != 0)
2510 		return (EIO);
2511 
2512 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2513 		return (ENXIO);
2514 
2515 	return (0);
2516 }
2517 
2518 int
mfii_pd_scsi_cmd_cdb(struct mfii_softc * sc,struct scsi_xfer * xs)2519 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2520 {
2521 	struct scsi_link *link = xs->sc_link;
2522 	struct mfii_ccb *ccb = xs->io;
2523 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2524 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2525 	uint16_t dev_handle;
2526 
2527 	dev_handle = mfii_dev_handle(sc, link->target);
2528 	if (dev_handle == htole16(0xffff))
2529 		return (XS_SELTIMEOUT);
2530 
2531 	io->dev_handle = dev_handle;
2532 	io->function = 0;
2533 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2534 	io->sgl_flags = htole16(0x02); /* XXX */
2535 	io->sense_buffer_length = sizeof(xs->sense);
2536 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2537 	io->data_length = htole32(xs->datalen);
2538 	io->io_flags = htole16(xs->cmdlen);
2539 	io->lun[0] = htobe16(link->lun);
2540 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2541 	case SCSI_DATA_IN:
2542 		ccb->ccb_direction = MFII_DATA_IN;
2543 		io->direction = MPII_SCSIIO_DIR_READ;
2544 		break;
2545 	case SCSI_DATA_OUT:
2546 		ccb->ccb_direction = MFII_DATA_OUT;
2547 		io->direction = MPII_SCSIIO_DIR_WRITE;
2548 		break;
2549 	default:
2550 		ccb->ccb_direction = MFII_DATA_NONE;
2551 		io->direction = MPII_SCSIIO_DIR_NONE;
2552 		break;
2553 	}
2554 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2555 
2556 	ctx->virtual_disk_target_id = htole16(link->target);
2557 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2558 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2559 
2560 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2561 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2562 		return (XS_DRIVER_STUFFUP);
2563 
2564 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2565 
2566 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2567 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2568 	ccb->ccb_req.dev_handle = dev_handle;
2569 
2570 	return (XS_NOERROR);
2571 }
2572 
2573 int
mfii_load_ccb(struct mfii_softc * sc,struct mfii_ccb * ccb,void * sglp,int nosleep)2574 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2575     int nosleep)
2576 {
2577 	struct mpii_msg_request *req = ccb->ccb_request;
2578 	struct mfii_sge *sge = NULL, *nsge = sglp;
2579 	struct mfii_sge *ce = NULL;
2580 	bus_dmamap_t dmap = ccb->ccb_dmamap;
2581 	u_int space;
2582 	int i;
2583 
2584 	int error;
2585 
2586 	if (ccb->ccb_len == 0)
2587 		return (0);
2588 
2589 	error = bus_dmamap_load(sc->sc_dmat, dmap,
2590 	    ccb->ccb_data, ccb->ccb_len, NULL,
2591 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2592 	if (error) {
2593 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2594 		return (1);
2595 	}
2596 
2597 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2598 	    sizeof(*nsge);
2599 	if (dmap->dm_nsegs > space) {
2600 		space--;
2601 
2602 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2603 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2604 
2605 		ce = nsge + space;
2606 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2607 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2608 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2609 
2610 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2611 	}
2612 
2613 	for (i = 0; i < dmap->dm_nsegs; i++) {
2614 		if (nsge == ce)
2615 			nsge = ccb->ccb_sgl;
2616 
2617 		sge = nsge;
2618 
2619 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2620 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2621 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2622 
2623 		nsge = sge + 1;
2624 	}
2625 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2626 
2627 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2628 	    ccb->ccb_direction == MFII_DATA_OUT ?
2629 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2630 
2631 	if (ccb->ccb_sgl_len > 0) {
2632 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2633 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2634 		    BUS_DMASYNC_PREWRITE);
2635 	}
2636 
2637 	return (0);
2638 }
2639 
2640 void
mfii_scsi_cmd_tmo(void * xsp)2641 mfii_scsi_cmd_tmo(void *xsp)
2642 {
2643 	struct scsi_xfer *xs = xsp;
2644 	struct scsi_link *link = xs->sc_link;
2645 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2646 	struct mfii_ccb *ccb = xs->io;
2647 
2648 	mtx_enter(&sc->sc_abort_mtx);
2649 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2650 	mtx_leave(&sc->sc_abort_mtx);
2651 
2652 	task_add(systqmp, &sc->sc_abort_task);
2653 }
2654 
2655 void
mfii_abort_task(void * scp)2656 mfii_abort_task(void *scp)
2657 {
2658 	struct mfii_softc *sc = scp;
2659 	struct mfii_ccb *list;
2660 
2661 	mtx_enter(&sc->sc_abort_mtx);
2662 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2663 	SIMPLEQ_INIT(&sc->sc_abort_list);
2664 	mtx_leave(&sc->sc_abort_mtx);
2665 
2666 	while (list != NULL) {
2667 		struct mfii_ccb *ccb = list;
2668 		struct scsi_xfer *xs = ccb->ccb_cookie;
2669 		struct scsi_link *link = xs->sc_link;
2670 
2671 		uint16_t dev_handle;
2672 		struct mfii_ccb *accb;
2673 
2674 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2675 
2676 		dev_handle = mfii_dev_handle(sc, link->target);
2677 		if (dev_handle == htole16(0xffff)) {
2678 			/* device is gone */
2679 			if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2680 				scsi_done(xs);
2681 			continue;
2682 		}
2683 
2684 		accb = scsi_io_get(&sc->sc_iopool, 0);
2685 		mfii_scrub_ccb(accb);
2686 		mfii_abort(sc, accb, dev_handle, ccb->ccb_smid,
2687 		    MPII_SCSI_TASK_ABORT_TASK,
2688 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2689 
2690 		accb->ccb_cookie = ccb;
2691 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2692 
2693 		mfii_start(sc, accb);
2694 	}
2695 }
2696 
2697 void
mfii_abort(struct mfii_softc * sc,struct mfii_ccb * accb,uint16_t dev_handle,uint16_t smid,uint8_t type,uint32_t flags)2698 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2699     uint16_t smid, uint8_t type, uint32_t flags)
2700 {
2701 	struct mfii_task_mgmt *msg;
2702 	struct mpii_msg_scsi_task_request *req;
2703 
2704 	msg = accb->ccb_request;
2705 	req = &msg->mpii_request;
2706 	req->dev_handle = dev_handle;
2707 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2708 	req->task_type = type;
2709 	htolem16(&req->task_mid, smid);
2710 	msg->flags = flags;
2711 
2712 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2713 	accb->ccb_req.smid = letoh16(accb->ccb_smid);
2714 }
2715 
2716 void
mfii_scsi_cmd_abort_done(struct mfii_softc * sc,struct mfii_ccb * accb)2717 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2718 {
2719 	struct mfii_ccb *ccb = accb->ccb_cookie;
2720 	struct scsi_xfer *xs = ccb->ccb_cookie;
2721 
2722 	/* XXX check accb completion? */
2723 
2724 	scsi_io_put(&sc->sc_iopool, accb);
2725 
2726 	if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2727 		scsi_done(xs);
2728 }
2729 
2730 void *
mfii_get_ccb(void * cookie)2731 mfii_get_ccb(void *cookie)
2732 {
2733 	struct mfii_softc *sc = cookie;
2734 	struct mfii_ccb *ccb;
2735 
2736 	mtx_enter(&sc->sc_ccb_mtx);
2737 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2738 	if (ccb != NULL)
2739 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2740 	mtx_leave(&sc->sc_ccb_mtx);
2741 
2742 	return (ccb);
2743 }
2744 
2745 void
mfii_scrub_ccb(struct mfii_ccb * ccb)2746 mfii_scrub_ccb(struct mfii_ccb *ccb)
2747 {
2748 	ccb->ccb_cookie = NULL;
2749 	ccb->ccb_done = NULL;
2750 	ccb->ccb_flags = 0;
2751 	ccb->ccb_data = NULL;
2752 	ccb->ccb_direction = 0;
2753 	ccb->ccb_len = 0;
2754 	ccb->ccb_sgl_len = 0;
2755 	ccb->ccb_refcnt = 1;
2756 
2757 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2758 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2759 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2760 }
2761 
2762 void
mfii_put_ccb(void * cookie,void * io)2763 mfii_put_ccb(void *cookie, void *io)
2764 {
2765 	struct mfii_softc *sc = cookie;
2766 	struct mfii_ccb *ccb = io;
2767 
2768 	mtx_enter(&sc->sc_ccb_mtx);
2769 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2770 	mtx_leave(&sc->sc_ccb_mtx);
2771 }
2772 
2773 int
mfii_init_ccb(struct mfii_softc * sc)2774 mfii_init_ccb(struct mfii_softc *sc)
2775 {
2776 	struct mfii_ccb *ccb;
2777 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2778 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2779 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2780 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2781 	u_int i;
2782 	int error;
2783 
2784 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
2785 	    M_DEVBUF, M_WAITOK|M_ZERO);
2786 
2787 	for (i = 0; i < sc->sc_max_cmds; i++) {
2788 		ccb = &sc->sc_ccb[i];
2789 
2790 		/* create a dma map for transfer */
2791 		error = bus_dmamap_create(sc->sc_dmat,
2792 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2793 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
2794 		if (error) {
2795 			printf("%s: cannot create ccb dmamap (%d)\n",
2796 			    DEVNAME(sc), error);
2797 			goto destroy;
2798 		}
2799 
2800 		/* select i + 1'th request. 0 is reserved for events */
2801 		ccb->ccb_smid = i + 1;
2802 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2803 		ccb->ccb_request = request + ccb->ccb_request_offset;
2804 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2805 		    ccb->ccb_request_offset;
2806 
2807 		/* select i'th MFI command frame */
2808 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2809 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2810 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2811 		    ccb->ccb_mfi_offset;
2812 
2813 		/* select i'th sense */
2814 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2815 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2816 		    ccb->ccb_sense_offset);
2817 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2818 		    ccb->ccb_sense_offset;
2819 
2820 		/* select i'th sgl */
2821 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2822 		    sc->sc_max_sgl * i;
2823 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2824 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2825 		    ccb->ccb_sgl_offset;
2826 
2827 		/* add ccb to queue */
2828 		mfii_put_ccb(sc, ccb);
2829 	}
2830 
2831 	return (0);
2832 
2833 destroy:
2834 	/* free dma maps and ccb memory */
2835 	while ((ccb = mfii_get_ccb(sc)) != NULL)
2836 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2837 
2838 	free(sc->sc_ccb, M_DEVBUF, 0);
2839 
2840 	return (1);
2841 }
2842 
2843 #if NBIO > 0
2844 int
mfii_ioctl(struct device * dev,u_long cmd,caddr_t addr)2845 mfii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
2846 {
2847 	struct mfii_softc	*sc = (struct mfii_softc *)dev;
2848 	int error = 0;
2849 
2850 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2851 
2852 	rw_enter_write(&sc->sc_lock);
2853 
2854 	switch (cmd) {
2855 	case BIOCINQ:
2856 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2857 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2858 		break;
2859 
2860 	case BIOCVOL:
2861 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2862 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2863 		break;
2864 
2865 	case BIOCDISK:
2866 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2867 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2868 		break;
2869 
2870 	case BIOCALARM:
2871 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2872 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2873 		break;
2874 
2875 	case BIOCBLINK:
2876 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2877 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2878 		break;
2879 
2880 	case BIOCSETSTATE:
2881 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2882 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2883 		break;
2884 
2885 	case BIOCPATROL:
2886 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2887 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2888 		break;
2889 
2890 	default:
2891 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2892 		error = ENOTTY;
2893 	}
2894 
2895 	rw_exit_write(&sc->sc_lock);
2896 
2897 	return (error);
2898 }
2899 
2900 int
mfii_bio_getitall(struct mfii_softc * sc)2901 mfii_bio_getitall(struct mfii_softc *sc)
2902 {
2903 	int			i, d, rv = EINVAL;
2904 	size_t			size;
2905 	union mfi_mbox		mbox;
2906 	struct mfi_conf		*cfg = NULL;
2907 	struct mfi_ld_details	*ld_det = NULL;
2908 
2909 	/* get info */
2910 	if (mfii_get_info(sc)) {
2911 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2912 		    DEVNAME(sc));
2913 		goto done;
2914 	}
2915 
2916 	/* send single element command to retrieve size for full structure */
2917 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2918 	if (cfg == NULL)
2919 		goto done;
2920 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2921 	    SCSI_DATA_IN)) {
2922 		free(cfg, M_DEVBUF, sizeof *cfg);
2923 		goto done;
2924 	}
2925 
2926 	size = cfg->mfc_size;
2927 	free(cfg, M_DEVBUF, sizeof *cfg);
2928 
2929 	/* memory for read config */
2930 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2931 	if (cfg == NULL)
2932 		goto done;
2933 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN)) {
2934 		free(cfg, M_DEVBUF, size);
2935 		goto done;
2936 	}
2937 
2938 	/* replace current pointer with new one */
2939 	if (sc->sc_cfg)
2940 		free(sc->sc_cfg, M_DEVBUF, 0);
2941 	sc->sc_cfg = cfg;
2942 
2943 	/* get all ld info */
2944 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2945 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN))
2946 		goto done;
2947 
2948 	/* get memory for all ld structures */
2949 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2950 	if (sc->sc_ld_sz != size) {
2951 		if (sc->sc_ld_details)
2952 			free(sc->sc_ld_details, M_DEVBUF, 0);
2953 
2954 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2955 		if (ld_det == NULL)
2956 			goto done;
2957 		sc->sc_ld_sz = size;
2958 		sc->sc_ld_details = ld_det;
2959 	}
2960 
2961 	/* find used physical disks */
2962 	size = sizeof(struct mfi_ld_details);
2963 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2964 		memset(&mbox, 0, sizeof(mbox));
2965 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2966 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox, &sc->sc_ld_details[i], size,
2967 		    SCSI_DATA_IN))
2968 			goto done;
2969 
2970 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2971 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2972 	}
2973 	sc->sc_no_pd = d;
2974 
2975 	rv = 0;
2976 done:
2977 	return (rv);
2978 }
2979 
2980 int
mfii_ioctl_inq(struct mfii_softc * sc,struct bioc_inq * bi)2981 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2982 {
2983 	int			rv = EINVAL;
2984 	struct mfi_conf		*cfg = NULL;
2985 
2986 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2987 
2988 	if (mfii_bio_getitall(sc)) {
2989 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2990 		    DEVNAME(sc));
2991 		goto done;
2992 	}
2993 
2994 	/* count unused disks as volumes */
2995 	if (sc->sc_cfg == NULL)
2996 		goto done;
2997 	cfg = sc->sc_cfg;
2998 
2999 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
3000 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
3001 #if notyet
3002 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
3003 	    (bi->bi_nodisk - sc->sc_no_pd);
3004 #endif
3005 	/* tell bio who we are */
3006 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3007 
3008 	rv = 0;
3009 done:
3010 	return (rv);
3011 }
3012 
3013 int
mfii_ioctl_vol(struct mfii_softc * sc,struct bioc_vol * bv)3014 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
3015 {
3016 	int			i, per, target, rv = EINVAL;
3017 	struct scsi_link	*link;
3018 	struct device		*dev;
3019 
3020 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
3021 	    DEVNAME(sc), bv->bv_volid);
3022 
3023 	/* we really could skip and expect that inq took care of it */
3024 	if (mfii_bio_getitall(sc)) {
3025 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3026 		    DEVNAME(sc));
3027 		goto done;
3028 	}
3029 
3030 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
3031 		/* go do hotspares & unused disks */
3032 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
3033 		goto done;
3034 	}
3035 
3036 	i = bv->bv_volid;
3037 	target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3038 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3039 	if (link == NULL) {
3040 		strlcpy(bv->bv_dev, "cache", sizeof(bv->bv_dev));
3041 	} else {
3042 		dev = link->device_softc;
3043 		if (dev == NULL)
3044 			goto done;
3045 
3046 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
3047 	}
3048 
3049 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
3050 	case MFI_LD_OFFLINE:
3051 		bv->bv_status = BIOC_SVOFFLINE;
3052 		break;
3053 
3054 	case MFI_LD_PART_DEGRADED:
3055 	case MFI_LD_DEGRADED:
3056 		bv->bv_status = BIOC_SVDEGRADED;
3057 		break;
3058 
3059 	case MFI_LD_ONLINE:
3060 		bv->bv_status = BIOC_SVONLINE;
3061 		break;
3062 
3063 	default:
3064 		bv->bv_status = BIOC_SVINVALID;
3065 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
3066 		    DEVNAME(sc),
3067 		    sc->sc_ld_list.mll_list[i].mll_state);
3068 	}
3069 
3070 	/* additional status can modify MFI status */
3071 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
3072 	case MFI_LD_PROG_CC:
3073 		bv->bv_status = BIOC_SVSCRUB;
3074 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3075 		bv->bv_percent = (per * 100) / 0xffff;
3076 		bv->bv_seconds =
3077 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3078 		break;
3079 
3080 	case MFI_LD_PROG_BGI:
3081 		bv->bv_status = BIOC_SVSCRUB;
3082 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3083 		bv->bv_percent = (per * 100) / 0xffff;
3084 		bv->bv_seconds =
3085 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3086 		break;
3087 
3088 	case MFI_LD_PROG_FGI:
3089 	case MFI_LD_PROG_RECONSTRUCT:
3090 		/* nothing yet */
3091 		break;
3092 	}
3093 
3094 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3095 		bv->bv_cache = BIOC_CVWRITEBACK;
3096 	else
3097 		bv->bv_cache = BIOC_CVWRITETHROUGH;
3098 
3099 	/*
3100 	 * The RAID levels are determined per the SNIA DDF spec, this is only
3101 	 * a subset that is valid for the MFI controller.
3102 	 */
3103 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3104 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3105 		bv->bv_level *= 10;
3106 
3107 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3108 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3109 
3110 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3111 
3112 	rv = 0;
3113 done:
3114 	return (rv);
3115 }
3116 
3117 int
mfii_ioctl_disk(struct mfii_softc * sc,struct bioc_disk * bd)3118 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3119 {
3120 	struct mfi_conf		*cfg;
3121 	struct mfi_array	*ar;
3122 	struct mfi_ld_cfg	*ld;
3123 	struct mfi_pd_details	*pd;
3124 	struct mfi_pd_list	*pl;
3125 	struct mfi_pd_progress	*mfp;
3126 	struct mfi_progress	*mp;
3127 	struct scsi_inquiry_data *inqbuf;
3128 	char			vend[8+16+4+1], *vendp;
3129 	int			i, rv = EINVAL;
3130 	int			arr, vol, disk, span;
3131 	union mfi_mbox		mbox;
3132 
3133 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3134 	    DEVNAME(sc), bd->bd_diskid);
3135 
3136 	/* we really could skip and expect that inq took care of it */
3137 	if (mfii_bio_getitall(sc)) {
3138 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3139 		    DEVNAME(sc));
3140 		return (rv);
3141 	}
3142 	cfg = sc->sc_cfg;
3143 
3144 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3145 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3146 
3147 	ar = cfg->mfc_array;
3148 	vol = bd->bd_volid;
3149 	if (vol >= cfg->mfc_no_ld) {
3150 		/* do hotspares */
3151 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3152 		goto freeme;
3153 	}
3154 
3155 	/* calculate offset to ld structure */
3156 	ld = (struct mfi_ld_cfg *)(
3157 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3158 	    cfg->mfc_array_size * cfg->mfc_no_array);
3159 
3160 	/* use span 0 only when raid group is not spanned */
3161 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3162 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3163 	else
3164 		span = 0;
3165 	arr = ld[vol].mlc_span[span].mls_index;
3166 
3167 	/* offset disk into pd list */
3168 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3169 
3170 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3171 		/* disk is missing but succeed command */
3172 		bd->bd_status = BIOC_SDFAILED;
3173 		rv = 0;
3174 
3175 		/* try to find an unused disk for the target to rebuild */
3176 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3177 		    SCSI_DATA_IN))
3178 			goto freeme;
3179 
3180 		for (i = 0; i < pl->mpl_no_pd; i++) {
3181 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3182 				continue;
3183 
3184 			memset(&mbox, 0, sizeof(mbox));
3185 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3186 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3187 			    SCSI_DATA_IN))
3188 				continue;
3189 
3190 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3191 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3192 				break;
3193 		}
3194 
3195 		if (i == pl->mpl_no_pd)
3196 			goto freeme;
3197 	} else {
3198 		memset(&mbox, 0, sizeof(mbox));
3199 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3200 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3201 		    SCSI_DATA_IN)) {
3202 			bd->bd_status = BIOC_SDINVALID;
3203 			goto freeme;
3204 		}
3205 	}
3206 
3207 	/* get the remaining fields */
3208 	bd->bd_channel = pd->mpd_enc_idx;
3209 	bd->bd_target = pd->mpd_enc_slot;
3210 
3211 	/* get status */
3212 	switch (pd->mpd_fw_state){
3213 	case MFI_PD_UNCONFIG_GOOD:
3214 	case MFI_PD_UNCONFIG_BAD:
3215 		bd->bd_status = BIOC_SDUNUSED;
3216 		break;
3217 
3218 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3219 		bd->bd_status = BIOC_SDHOTSPARE;
3220 		break;
3221 
3222 	case MFI_PD_OFFLINE:
3223 		bd->bd_status = BIOC_SDOFFLINE;
3224 		break;
3225 
3226 	case MFI_PD_FAILED:
3227 		bd->bd_status = BIOC_SDFAILED;
3228 		break;
3229 
3230 	case MFI_PD_REBUILD:
3231 		bd->bd_status = BIOC_SDREBUILD;
3232 		break;
3233 
3234 	case MFI_PD_ONLINE:
3235 		bd->bd_status = BIOC_SDONLINE;
3236 		break;
3237 
3238 	case MFI_PD_COPYBACK:
3239 	case MFI_PD_SYSTEM:
3240 	default:
3241 		bd->bd_status = BIOC_SDINVALID;
3242 		break;
3243 	}
3244 
3245 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3246 
3247 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3248 	vendp = inqbuf->vendor;
3249 	memcpy(vend, vendp, sizeof vend - 1);
3250 	vend[sizeof vend - 1] = '\0';
3251 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3252 
3253 	/* XXX find a way to retrieve serial nr from drive */
3254 	/* XXX find a way to get bd_procdev */
3255 
3256 	mfp = &pd->mpd_progress;
3257 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3258 		mp = &mfp->mfp_patrol_read;
3259 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3260 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3261 	}
3262 
3263 	rv = 0;
3264 freeme:
3265 	free(pd, M_DEVBUF, sizeof *pd);
3266 	free(pl, M_DEVBUF, sizeof *pl);
3267 
3268 	return (rv);
3269 }
3270 
3271 int
mfii_ioctl_alarm(struct mfii_softc * sc,struct bioc_alarm * ba)3272 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3273 {
3274 	uint32_t		opc, flags = 0;
3275 	int			rv = 0;
3276 	int8_t			ret;
3277 
3278 	switch(ba->ba_opcode) {
3279 	case BIOC_SADISABLE:
3280 		opc = MR_DCMD_SPEAKER_DISABLE;
3281 		break;
3282 
3283 	case BIOC_SAENABLE:
3284 		opc = MR_DCMD_SPEAKER_ENABLE;
3285 		break;
3286 
3287 	case BIOC_SASILENCE:
3288 		opc = MR_DCMD_SPEAKER_SILENCE;
3289 		break;
3290 
3291 	case BIOC_GASTATUS:
3292 		opc = MR_DCMD_SPEAKER_GET;
3293 		flags = SCSI_DATA_IN;
3294 		break;
3295 
3296 	case BIOC_SATEST:
3297 		opc = MR_DCMD_SPEAKER_TEST;
3298 		break;
3299 
3300 	default:
3301 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3302 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3303 		return (EINVAL);
3304 	}
3305 
3306 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), flags))
3307 		rv = EINVAL;
3308 	else
3309 		if (ba->ba_opcode == BIOC_GASTATUS)
3310 			ba->ba_status = ret;
3311 		else
3312 			ba->ba_status = 0;
3313 
3314 	return (rv);
3315 }
3316 
3317 int
mfii_ioctl_blink(struct mfii_softc * sc,struct bioc_blink * bb)3318 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3319 {
3320 	int			i, found, rv = EINVAL;
3321 	union mfi_mbox		mbox;
3322 	uint32_t		cmd;
3323 	struct mfi_pd_list	*pd;
3324 
3325 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3326 	    bb->bb_status);
3327 
3328 	/* channel 0 means not in an enclosure so can't be blinked */
3329 	if (bb->bb_channel == 0)
3330 		return (EINVAL);
3331 
3332 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3333 
3334 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd), SCSI_DATA_IN))
3335 		goto done;
3336 
3337 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3338 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3339 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3340 			found = 1;
3341 			break;
3342 		}
3343 
3344 	if (!found)
3345 		goto done;
3346 
3347 	memset(&mbox, 0, sizeof(mbox));
3348 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3349 
3350 	switch (bb->bb_status) {
3351 	case BIOC_SBUNBLINK:
3352 		cmd = MR_DCMD_PD_UNBLINK;
3353 		break;
3354 
3355 	case BIOC_SBBLINK:
3356 		cmd = MR_DCMD_PD_BLINK;
3357 		break;
3358 
3359 	case BIOC_SBALARM:
3360 	default:
3361 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3362 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
3363 		goto done;
3364 	}
3365 
3366 
3367 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, 0) == 0)
3368 		rv = 0;
3369 
3370 done:
3371 	free(pd, M_DEVBUF, sizeof *pd);
3372 	return (rv);
3373 }
3374 
3375 static int
mfii_makegood(struct mfii_softc * sc,uint16_t pd_id)3376 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3377 {
3378 	struct mfii_foreign_scan_info *fsi;
3379 	struct mfi_pd_details	*pd;
3380 	union mfi_mbox		mbox;
3381 	int			rv;
3382 
3383 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3384 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3385 
3386 	memset(&mbox, 0, sizeof mbox);
3387 	mbox.s[0] = pd_id;
3388 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3389 	if (rv != 0)
3390 		goto done;
3391 
3392 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3393 		mbox.s[0] = pd_id;
3394 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3395 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3396 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3397 		if (rv != 0)
3398 			goto done;
3399 	}
3400 
3401 	memset(&mbox, 0, sizeof mbox);
3402 	mbox.s[0] = pd_id;
3403 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3404 	if (rv != 0)
3405 		goto done;
3406 
3407 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3408 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL, fsi, sizeof(*fsi),
3409 		    SCSI_DATA_IN);
3410 		if (rv != 0)
3411 			goto done;
3412 
3413 		if (fsi->count > 0) {
3414 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL, NULL, 0, 0);
3415 			if (rv != 0)
3416 				goto done;
3417 		}
3418 	}
3419 
3420 	memset(&mbox, 0, sizeof mbox);
3421 	mbox.s[0] = pd_id;
3422 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3423 	if (rv != 0)
3424 		goto done;
3425 
3426 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3427 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3428 		rv = ENXIO;
3429 
3430 done:
3431 	free(fsi, M_DEVBUF, sizeof *fsi);
3432 	free(pd, M_DEVBUF, sizeof *pd);
3433 
3434 	return (rv);
3435 }
3436 
3437 static int
mfii_makespare(struct mfii_softc * sc,uint16_t pd_id)3438 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3439 {
3440 	struct mfi_hotspare	*hs;
3441 	struct mfi_pd_details	*pd;
3442 	union mfi_mbox		mbox;
3443 	size_t			size;
3444 	int			rv = EINVAL;
3445 
3446 	/* we really could skip and expect that inq took care of it */
3447 	if (mfii_bio_getitall(sc)) {
3448 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3449 		    DEVNAME(sc));
3450 		return (rv);
3451 	}
3452 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3453 
3454 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3455 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3456 
3457 	memset(&mbox, 0, sizeof mbox);
3458 	mbox.s[0] = pd_id;
3459 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3460 	    SCSI_DATA_IN);
3461 	if (rv != 0)
3462 		goto done;
3463 
3464 	memset(hs, 0, size);
3465 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3466 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3467 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size, SCSI_DATA_OUT);
3468 
3469 done:
3470 	free(hs, M_DEVBUF, size);
3471 	free(pd, M_DEVBUF, sizeof *pd);
3472 
3473 	return (rv);
3474 }
3475 
3476 int
mfii_ioctl_setstate(struct mfii_softc * sc,struct bioc_setstate * bs)3477 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3478 {
3479 	struct mfi_pd_details	*pd;
3480 	struct mfi_pd_list	*pl;
3481 	int			i, found, rv = EINVAL;
3482 	union mfi_mbox		mbox;
3483 
3484 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3485 	    bs->bs_status);
3486 
3487 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3488 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3489 
3490 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl), SCSI_DATA_IN))
3491 		goto done;
3492 
3493 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3494 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3495 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3496 			found = 1;
3497 			break;
3498 		}
3499 
3500 	if (!found)
3501 		goto done;
3502 
3503 	memset(&mbox, 0, sizeof(mbox));
3504 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3505 
3506 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN))
3507 		goto done;
3508 
3509 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3510 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3511 
3512 	switch (bs->bs_status) {
3513 	case BIOC_SSONLINE:
3514 		mbox.b[4] = MFI_PD_ONLINE;
3515 		break;
3516 
3517 	case BIOC_SSOFFLINE:
3518 		mbox.b[4] = MFI_PD_OFFLINE;
3519 		break;
3520 
3521 	case BIOC_SSHOTSPARE:
3522 		mbox.b[4] = MFI_PD_HOTSPARE;
3523 		break;
3524 
3525 	case BIOC_SSREBUILD:
3526 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3527 			if ((rv = mfii_makegood(sc,
3528 			    pl->mpl_address[i].mpa_pd_id)))
3529 				goto done;
3530 
3531 			if ((rv = mfii_makespare(sc,
3532 			    pl->mpl_address[i].mpa_pd_id)))
3533 				goto done;
3534 
3535 			memset(&mbox, 0, sizeof(mbox));
3536 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3537 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3538 			    SCSI_DATA_IN);
3539 			if (rv != 0)
3540 				goto done;
3541 
3542 			/* rebuilding might be started by mfii_makespare() */
3543 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3544 				rv = 0;
3545 				goto done;
3546 			}
3547 
3548 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3549 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3550 		}
3551 		mbox.b[4] = MFI_PD_REBUILD;
3552 		break;
3553 
3554 	default:
3555 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3556 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3557 		goto done;
3558 	}
3559 
3560 
3561 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3562 done:
3563 	free(pd, M_DEVBUF, sizeof *pd);
3564 	free(pl, M_DEVBUF, sizeof *pl);
3565 	return (rv);
3566 }
3567 
3568 int
mfii_ioctl_patrol(struct mfii_softc * sc,struct bioc_patrol * bp)3569 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3570 {
3571 	uint32_t		opc;
3572 	int			rv = 0;
3573 	struct mfi_pr_properties prop;
3574 	struct mfi_pr_status	status;
3575 	uint32_t		time, exec_freq;
3576 
3577 	switch (bp->bp_opcode) {
3578 	case BIOC_SPSTOP:
3579 	case BIOC_SPSTART:
3580 		if (bp->bp_opcode == BIOC_SPSTART)
3581 			opc = MR_DCMD_PR_START;
3582 		else
3583 			opc = MR_DCMD_PR_STOP;
3584 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, SCSI_DATA_IN))
3585 			return (EINVAL);
3586 		break;
3587 
3588 	case BIOC_SPMANUAL:
3589 	case BIOC_SPDISABLE:
3590 	case BIOC_SPAUTO:
3591 		/* Get device's time. */
3592 		opc = MR_DCMD_TIME_SECS_GET;
3593 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3594 			return (EINVAL);
3595 
3596 		opc = MR_DCMD_PR_GET_PROPERTIES;
3597 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3598 			return (EINVAL);
3599 
3600 		switch (bp->bp_opcode) {
3601 		case BIOC_SPMANUAL:
3602 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3603 			break;
3604 		case BIOC_SPDISABLE:
3605 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3606 			break;
3607 		case BIOC_SPAUTO:
3608 			if (bp->bp_autoival != 0) {
3609 				if (bp->bp_autoival == -1)
3610 					/* continuously */
3611 					exec_freq = 0xffffffffU;
3612 				else if (bp->bp_autoival > 0)
3613 					exec_freq = bp->bp_autoival;
3614 				else
3615 					return (EINVAL);
3616 				prop.exec_freq = exec_freq;
3617 			}
3618 			if (bp->bp_autonext != 0) {
3619 				if (bp->bp_autonext < 0)
3620 					return (EINVAL);
3621 				else
3622 					prop.next_exec = time + bp->bp_autonext;
3623 			}
3624 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3625 			break;
3626 		}
3627 
3628 		opc = MR_DCMD_PR_SET_PROPERTIES;
3629 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_OUT))
3630 			return (EINVAL);
3631 
3632 		break;
3633 
3634 	case BIOC_GPSTATUS:
3635 		opc = MR_DCMD_PR_GET_PROPERTIES;
3636 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3637 			return (EINVAL);
3638 
3639 		opc = MR_DCMD_PR_GET_STATUS;
3640 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status), SCSI_DATA_IN))
3641 			return (EINVAL);
3642 
3643 		/* Get device's time. */
3644 		opc = MR_DCMD_TIME_SECS_GET;
3645 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3646 			return (EINVAL);
3647 
3648 		switch (prop.op_mode) {
3649 		case MFI_PR_OPMODE_AUTO:
3650 			bp->bp_mode = BIOC_SPMAUTO;
3651 			bp->bp_autoival = prop.exec_freq;
3652 			bp->bp_autonext = prop.next_exec;
3653 			bp->bp_autonow = time;
3654 			break;
3655 		case MFI_PR_OPMODE_MANUAL:
3656 			bp->bp_mode = BIOC_SPMMANUAL;
3657 			break;
3658 		case MFI_PR_OPMODE_DISABLED:
3659 			bp->bp_mode = BIOC_SPMDISABLED;
3660 			break;
3661 		default:
3662 			printf("%s: unknown patrol mode %d\n",
3663 			    DEVNAME(sc), prop.op_mode);
3664 			break;
3665 		}
3666 
3667 		switch (status.state) {
3668 		case MFI_PR_STATE_STOPPED:
3669 			bp->bp_status = BIOC_SPSSTOPPED;
3670 			break;
3671 		case MFI_PR_STATE_READY:
3672 			bp->bp_status = BIOC_SPSREADY;
3673 			break;
3674 		case MFI_PR_STATE_ACTIVE:
3675 			bp->bp_status = BIOC_SPSACTIVE;
3676 			break;
3677 		case MFI_PR_STATE_ABORTED:
3678 			bp->bp_status = BIOC_SPSABORTED;
3679 			break;
3680 		default:
3681 			printf("%s: unknown patrol state %d\n",
3682 			    DEVNAME(sc), status.state);
3683 			break;
3684 		}
3685 
3686 		break;
3687 
3688 	default:
3689 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3690 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3691 		return (EINVAL);
3692 	}
3693 
3694 	return (rv);
3695 }
3696 
3697 int
mfii_bio_hs(struct mfii_softc * sc,int volid,int type,void * bio_hs)3698 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3699 {
3700 	struct mfi_conf		*cfg;
3701 	struct mfi_hotspare	*hs;
3702 	struct mfi_pd_details	*pd;
3703 	struct bioc_disk	*sdhs;
3704 	struct bioc_vol		*vdhs;
3705 	struct scsi_inquiry_data *inqbuf;
3706 	char			vend[8+16+4+1], *vendp;
3707 	int			i, rv = EINVAL;
3708 	uint32_t		size;
3709 	union mfi_mbox		mbox;
3710 
3711 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3712 
3713 	if (!bio_hs)
3714 		return (EINVAL);
3715 
3716 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3717 
3718 	/* send single element command to retrieve size for full structure */
3719 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3720 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg), SCSI_DATA_IN))
3721 		goto freeme;
3722 
3723 	size = cfg->mfc_size;
3724 	free(cfg, M_DEVBUF, sizeof *cfg);
3725 
3726 	/* memory for read config */
3727 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3728 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN))
3729 		goto freeme;
3730 
3731 	/* calculate offset to hs structure */
3732 	hs = (struct mfi_hotspare *)(
3733 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3734 	    cfg->mfc_array_size * cfg->mfc_no_array +
3735 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3736 
3737 	if (volid < cfg->mfc_no_ld)
3738 		goto freeme; /* not a hotspare */
3739 
3740 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3741 		goto freeme; /* not a hotspare */
3742 
3743 	/* offset into hotspare structure */
3744 	i = volid - cfg->mfc_no_ld;
3745 
3746 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3747 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3748 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3749 
3750 	/* get pd fields */
3751 	memset(&mbox, 0, sizeof(mbox));
3752 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3753 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3754 	    SCSI_DATA_IN)) {
3755 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3756 		    DEVNAME(sc));
3757 		goto freeme;
3758 	}
3759 
3760 	switch (type) {
3761 	case MFI_MGMT_VD:
3762 		vdhs = bio_hs;
3763 		vdhs->bv_status = BIOC_SVONLINE;
3764 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3765 		vdhs->bv_level = -1; /* hotspare */
3766 		vdhs->bv_nodisk = 1;
3767 		break;
3768 
3769 	case MFI_MGMT_SD:
3770 		sdhs = bio_hs;
3771 		sdhs->bd_status = BIOC_SDHOTSPARE;
3772 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3773 		sdhs->bd_channel = pd->mpd_enc_idx;
3774 		sdhs->bd_target = pd->mpd_enc_slot;
3775 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3776 		vendp = inqbuf->vendor;
3777 		memcpy(vend, vendp, sizeof vend - 1);
3778 		vend[sizeof vend - 1] = '\0';
3779 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3780 		break;
3781 
3782 	default:
3783 		goto freeme;
3784 	}
3785 
3786 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3787 	rv = 0;
3788 freeme:
3789 	free(pd, M_DEVBUF, sizeof *pd);
3790 	free(cfg, M_DEVBUF, 0);
3791 
3792 	return (rv);
3793 }
3794 
3795 #ifndef SMALL_KERNEL
3796 
3797 #define MFI_BBU_SENSORS 4
3798 
3799 void
mfii_bbu(struct mfii_softc * sc)3800 mfii_bbu(struct mfii_softc *sc)
3801 {
3802 	struct mfi_bbu_status bbu;
3803 	u_int32_t status;
3804 	u_int32_t mask;
3805 	u_int32_t soh_bad;
3806 	int i;
3807 
3808 	if (mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3809 	    sizeof(bbu), SCSI_DATA_IN) != 0) {
3810 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
3811 			sc->sc_bbu[i].value = 0;
3812 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3813 		}
3814 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3815 			sc->sc_bbu_status[i].value = 0;
3816 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3817 		}
3818 		return;
3819 	}
3820 
3821 	switch (bbu.battery_type) {
3822 	case MFI_BBU_TYPE_IBBU:
3823 		mask = MFI_BBU_STATE_BAD_IBBU;
3824 		soh_bad = 0;
3825 		break;
3826 	case MFI_BBU_TYPE_BBU:
3827 		mask = MFI_BBU_STATE_BAD_BBU;
3828 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3829 		break;
3830 
3831 	case MFI_BBU_TYPE_NONE:
3832 	default:
3833 		sc->sc_bbu[0].value = 0;
3834 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
3835 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3836 			sc->sc_bbu[i].value = 0;
3837 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3838 		}
3839 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3840 			sc->sc_bbu_status[i].value = 0;
3841 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3842 		}
3843 		return;
3844 	}
3845 
3846 	status = letoh32(bbu.fw_status);
3847 
3848 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
3849 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
3850 	    SENSOR_S_OK;
3851 
3852 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
3853 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
3854 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
3855 	for (i = 1; i < MFI_BBU_SENSORS; i++)
3856 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
3857 
3858 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3859 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
3860 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3861 	}
3862 }
3863 
3864 void
mfii_refresh_ld_sensor(struct mfii_softc * sc,int ld)3865 mfii_refresh_ld_sensor(struct mfii_softc *sc, int ld)
3866 {
3867 	struct ksensor *sensor;
3868 	int target;
3869 
3870 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3871 	sensor = &sc->sc_sensors[target];
3872 
3873 	switch(sc->sc_ld_list.mll_list[ld].mll_state) {
3874 	case MFI_LD_OFFLINE:
3875 		sensor->value = SENSOR_DRIVE_FAIL;
3876 		sensor->status = SENSOR_S_CRIT;
3877 		break;
3878 
3879 	case MFI_LD_PART_DEGRADED:
3880 	case MFI_LD_DEGRADED:
3881 		sensor->value = SENSOR_DRIVE_PFAIL;
3882 		sensor->status = SENSOR_S_WARN;
3883 		break;
3884 
3885 	case MFI_LD_ONLINE:
3886 		sensor->value = SENSOR_DRIVE_ONLINE;
3887 		sensor->status = SENSOR_S_OK;
3888 		break;
3889 
3890 	default:
3891 		sensor->value = 0; /* unknown */
3892 		sensor->status = SENSOR_S_UNKNOWN;
3893 		break;
3894 	}
3895 }
3896 
3897 void
mfii_init_ld_sensor(struct mfii_softc * sc,int ld)3898 mfii_init_ld_sensor(struct mfii_softc *sc, int ld)
3899 {
3900 	struct device		*dev;
3901 	struct scsi_link	*link;
3902 	struct ksensor		*sensor;
3903 	int			target;
3904 
3905 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3906 	sensor = &sc->sc_sensors[target];
3907 
3908 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3909 	if (link == NULL) {
3910 		strlcpy(sensor->desc, "cache", sizeof(sensor->desc));
3911 	} else {
3912 		dev = link->device_softc;
3913 		if (dev != NULL)
3914 			strlcpy(sensor->desc, dev->dv_xname,
3915 			    sizeof(sensor->desc));
3916 	}
3917 	sensor->type = SENSOR_DRIVE;
3918 	mfii_refresh_ld_sensor(sc, ld);
3919 }
3920 
3921 int
mfii_create_sensors(struct mfii_softc * sc)3922 mfii_create_sensors(struct mfii_softc *sc)
3923 {
3924 	int			i, target;
3925 
3926 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3927 	    sizeof(sc->sc_sensordev.xname));
3928 
3929 	if (ISSET(letoh32(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3930 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
3931 		    M_DEVBUF, M_WAITOK | M_ZERO);
3932 
3933 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
3934 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
3935 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
3936 		    sizeof(sc->sc_bbu[0].desc));
3937 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
3938 
3939 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
3940 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
3941 		sc->sc_bbu[2].type = SENSOR_AMPS;
3942 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
3943 		sc->sc_bbu[3].type = SENSOR_TEMP;
3944 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
3945 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3946 			strlcpy(sc->sc_bbu[i].desc, "bbu",
3947 			    sizeof(sc->sc_bbu[i].desc));
3948 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
3949 		}
3950 
3951 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
3952 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
3953 
3954 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3955 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
3956 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3957 			strlcpy(sc->sc_bbu_status[i].desc,
3958 			    mfi_bbu_indicators[i],
3959 			    sizeof(sc->sc_bbu_status[i].desc));
3960 
3961 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
3962 		}
3963 	}
3964 
3965 	sc->sc_sensors = mallocarray(MFI_MAX_LD, sizeof(struct ksensor),
3966 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3967 	if (sc->sc_sensors == NULL)
3968 		return (1);
3969 
3970 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3971 		mfii_init_ld_sensor(sc, i);
3972 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3973 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[target]);
3974 	}
3975 
3976 	if (sensor_task_register(sc, mfii_refresh_sensors, 10) == NULL)
3977 		goto bad;
3978 
3979 	sensordev_install(&sc->sc_sensordev);
3980 
3981 	return (0);
3982 
3983 bad:
3984 	free(sc->sc_sensors, M_DEVBUF,
3985 	    MFI_MAX_LD * sizeof(struct ksensor));
3986 
3987 	return (1);
3988 }
3989 
3990 void
mfii_refresh_sensors(void * arg)3991 mfii_refresh_sensors(void *arg)
3992 {
3993 	struct mfii_softc	*sc = arg;
3994 	int			i;
3995 
3996 	rw_enter_write(&sc->sc_lock);
3997 	if (sc->sc_bbu != NULL)
3998 		mfii_bbu(sc);
3999 
4000 	mfii_bio_getitall(sc);
4001 	rw_exit_write(&sc->sc_lock);
4002 
4003 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++)
4004 		mfii_refresh_ld_sensor(sc, i);
4005 }
4006 #endif /* SMALL_KERNEL */
4007 #endif /* NBIO > 0 */
4008