xref: /dragonfly/sys/dev/disk/mpt/mpt_raid.c (revision 0dace59e)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  *
42  * $FreeBSD: src/sys/dev/mpt/mpt_raid.c,v 1.30 2011/07/29 18:38:31 marius Exp $
43  */
44 
45 #include <dev/disk/mpt/mpt.h>
46 #include <dev/disk/mpt/mpt_raid.h>
47 
48 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
49 #include "dev/disk/mpt/mpilib/mpi_raid.h"
50 
51 #include <bus/cam/cam.h>
52 #include <bus/cam/cam_ccb.h>
53 #include <bus/cam/cam_sim.h>
54 #include <bus/cam/cam_xpt_sim.h>
55 #include <bus/cam/cam_periph.h>
56 
57 #include <sys/callout.h>
58 #include <sys/kthread.h>
59 #include <sys/sysctl.h>
60 
61 #include <machine/stdarg.h>
62 
63 struct mpt_raid_action_result
64 {
65 	union {
66 		MPI_RAID_VOL_INDICATOR	indicator_struct;
67 		uint32_t		new_settings;
68 		uint8_t			phys_disk_num;
69 	} action_data;
70 	uint16_t			action_status;
71 };
72 
73 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
74 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
75 
76 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
77 
78 static mpt_probe_handler_t	mpt_raid_probe;
79 static mpt_attach_handler_t	mpt_raid_attach;
80 static mpt_enable_handler_t	mpt_raid_enable;
81 static mpt_event_handler_t	mpt_raid_event;
82 static mpt_shutdown_handler_t	mpt_raid_shutdown;
83 static mpt_reset_handler_t	mpt_raid_ioc_reset;
84 static mpt_detach_handler_t	mpt_raid_detach;
85 
86 static struct mpt_personality mpt_raid_personality =
87 {
88 	.name		= "mpt_raid",
89 	.probe		= mpt_raid_probe,
90 	.attach		= mpt_raid_attach,
91 	.enable		= mpt_raid_enable,
92 	.event		= mpt_raid_event,
93 	.reset		= mpt_raid_ioc_reset,
94 	.shutdown	= mpt_raid_shutdown,
95 	.detach		= mpt_raid_detach,
96 };
97 
98 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
99 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
100 
101 static mpt_reply_handler_t mpt_raid_reply_handler;
102 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
103 					MSG_DEFAULT_REPLY *reply_frame);
104 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
105 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
106 static void mpt_raid_thread(void *arg);
107 static timeout_t mpt_raid_timer;
108 #if 0
109 static void mpt_enable_vol(struct mpt_softc *mpt,
110 			   struct mpt_raid_volume *mpt_vol, int enable);
111 #endif
112 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
113 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
114     struct cam_path *);
115 static void mpt_raid_sysctl_attach(struct mpt_softc *);
116 
117 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
118 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
119 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
120 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
121     const char *fmt, ...) __printflike(3, 4);
122 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
123     const char *fmt, ...) __printflike(3, 4);
124 
125 static int mpt_issue_raid_req(struct mpt_softc *mpt,
126     struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
127     u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
128     int write, int wait);
129 
130 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
131 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
132 
133 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
134 
135 static const char *
136 mpt_vol_type(struct mpt_raid_volume *vol)
137 {
138 	switch (vol->config_page->VolumeType) {
139 	case MPI_RAID_VOL_TYPE_IS:
140 		return ("RAID-0");
141 	case MPI_RAID_VOL_TYPE_IME:
142 		return ("RAID-1E");
143 	case MPI_RAID_VOL_TYPE_IM:
144 		return ("RAID-1");
145 	default:
146 		return ("Unknown");
147 	}
148 }
149 
150 static const char *
151 mpt_vol_state(struct mpt_raid_volume *vol)
152 {
153 	switch (vol->config_page->VolumeStatus.State) {
154 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
155 		return ("Optimal");
156 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
157 		return ("Degraded");
158 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
159 		return ("Failed");
160 	default:
161 		return ("Unknown");
162 	}
163 }
164 
165 static const char *
166 mpt_disk_state(struct mpt_raid_disk *disk)
167 {
168 	switch (disk->config_page.PhysDiskStatus.State) {
169 	case MPI_PHYSDISK0_STATUS_ONLINE:
170 		return ("Online");
171 	case MPI_PHYSDISK0_STATUS_MISSING:
172 		return ("Missing");
173 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
174 		return ("Incompatible");
175 	case MPI_PHYSDISK0_STATUS_FAILED:
176 		return ("Failed");
177 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
178 		return ("Initializing");
179 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
180 		return ("Offline Requested");
181 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
182 		return ("Failed per Host Request");
183 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
184 		return ("Offline");
185 	default:
186 		return ("Unknown");
187 	}
188 }
189 
190 static void
191 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
192 	    const char *fmt, ...)
193 {
194 	__va_list ap;
195 
196 	kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
197 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
198 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
199 	__va_start(ap, fmt);
200 	kvprintf(fmt, ap);
201 	__va_end(ap);
202 }
203 
204 static void
205 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
206 	     const char *fmt, ...)
207 {
208 	__va_list ap;
209 
210 	if (disk->volume != NULL) {
211 		kprintf("(%s:vol%d:%d): ",
212 		       device_get_nameunit(mpt->dev),
213 		       disk->volume->config_page->VolumeID,
214 		       disk->member_number);
215 	} else {
216 		kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
217 		       disk->config_page.PhysDiskBus,
218 		       disk->config_page.PhysDiskID);
219 	}
220 	__va_start(ap, fmt);
221 	kvprintf(fmt, ap);
222 	__va_end(ap);
223 }
224 
225 static void
226 mpt_raid_async(void *callback_arg, u_int32_t code,
227 	       struct cam_path *path, void *arg)
228 {
229 	struct mpt_softc *mpt;
230 
231 	mpt = (struct mpt_softc*)callback_arg;
232 	switch (code) {
233 	case AC_FOUND_DEVICE:
234 	{
235 		struct ccb_getdev *cgd;
236 		struct mpt_raid_volume *mpt_vol;
237 
238 		cgd = (struct ccb_getdev *)arg;
239 		if (cgd == NULL) {
240 			break;
241 		}
242 
243 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
244 			 cgd->ccb_h.target_id);
245 
246 		RAID_VOL_FOREACH(mpt, mpt_vol) {
247 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
248 				continue;
249 
250 			if (mpt_vol->config_page->VolumeID
251 			 == cgd->ccb_h.target_id) {
252 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
253 				break;
254 			}
255 		}
256 	}
257 	default:
258 		break;
259 	}
260 }
261 
262 static int
263 mpt_raid_probe(struct mpt_softc *mpt)
264 {
265 
266 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
267 		return (ENODEV);
268 	}
269 	return (0);
270 }
271 
272 static int
273 mpt_raid_attach(struct mpt_softc *mpt)
274 {
275 	struct ccb_setasync csa;
276 	mpt_handler_t	 handler;
277 	int		 error;
278 
279 	mpt_callout_init(mpt, &mpt->raid_timer);
280 
281 	error = mpt_spawn_raid_thread(mpt);
282 	if (error != 0) {
283 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
284 		goto cleanup;
285 	}
286 
287 	MPT_LOCK(mpt);
288 	handler.reply_handler = mpt_raid_reply_handler;
289 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
290 				     &raid_handler_id);
291 	if (error != 0) {
292 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
293 		goto cleanup;
294 	}
295 
296 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
297 	csa.ccb_h.func_code = XPT_SASYNC_CB;
298 	csa.event_enable = AC_FOUND_DEVICE;
299 	csa.callback = mpt_raid_async;
300 	csa.callback_arg = mpt;
301 	xpt_action((union ccb *)&csa);
302 	if (csa.ccb_h.status != CAM_REQ_CMP) {
303 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
304 			"CAM async handler.\n");
305 	}
306 	MPT_UNLOCK(mpt);
307 
308 	mpt_raid_sysctl_attach(mpt);
309 	return (0);
310 cleanup:
311 	MPT_UNLOCK(mpt);
312 	mpt_raid_detach(mpt);
313 	return (error);
314 }
315 
316 static int
317 mpt_raid_enable(struct mpt_softc *mpt)
318 {
319 
320 	return (0);
321 }
322 
323 static void
324 mpt_raid_detach(struct mpt_softc *mpt)
325 {
326 	struct ccb_setasync csa;
327 	mpt_handler_t handler;
328 
329 	callout_stop(&mpt->raid_timer);
330 
331 	MPT_LOCK(mpt);
332 	mpt_terminate_raid_thread(mpt);
333 	handler.reply_handler = mpt_raid_reply_handler;
334 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
335 			       raid_handler_id);
336 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
337 	csa.ccb_h.func_code = XPT_SASYNC_CB;
338 	csa.event_enable = 0;
339 	csa.callback = mpt_raid_async;
340 	csa.callback_arg = mpt;
341 	xpt_action((union ccb *)&csa);
342 	MPT_UNLOCK(mpt);
343 }
344 
345 static void
346 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
347 {
348 
349 	/* Nothing to do yet. */
350 }
351 
352 static const char *raid_event_txt[] =
353 {
354 	"Volume Created",
355 	"Volume Deleted",
356 	"Volume Settings Changed",
357 	"Volume Status Changed",
358 	"Volume Physical Disk Membership Changed",
359 	"Physical Disk Created",
360 	"Physical Disk Deleted",
361 	"Physical Disk Settings Changed",
362 	"Physical Disk Status Changed",
363 	"Domain Validation Required",
364 	"SMART Data Received",
365 	"Replace Action Started",
366 };
367 
368 static int
369 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
370 	       MSG_EVENT_NOTIFY_REPLY *msg)
371 {
372 	EVENT_DATA_RAID *raid_event;
373 	struct mpt_raid_volume *mpt_vol;
374 	struct mpt_raid_disk *mpt_disk;
375 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
376 	int i;
377 	int print_event;
378 
379 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
380 		return (0);
381 	}
382 
383 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
384 
385 	mpt_vol = NULL;
386 	vol_pg = NULL;
387 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
388 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
389 			mpt_vol = &mpt->raid_volumes[i];
390 			vol_pg = mpt_vol->config_page;
391 
392 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
393 				continue;
394 
395 			if (vol_pg->VolumeID == raid_event->VolumeID
396 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
397 				break;
398 		}
399 		if (i >= mpt->ioc_page2->MaxVolumes) {
400 			mpt_vol = NULL;
401 			vol_pg = NULL;
402 		}
403 	}
404 
405 	mpt_disk = NULL;
406 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
407 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
408 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
409 			mpt_disk = NULL;
410 		}
411 	}
412 
413 	print_event = 1;
414 	switch(raid_event->ReasonCode) {
415 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
416 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
417 		break;
418 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
419 		if (mpt_vol != NULL) {
420 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
421 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
422 			} else {
423 				/*
424 				 * Coalesce status messages into one
425 				 * per background run of our RAID thread.
426 				 * This removes "spurious" status messages
427 				 * from our output.
428 				 */
429 				print_event = 0;
430 			}
431 		}
432 		break;
433 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
434 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
435 		mpt->raid_rescan++;
436 		if (mpt_vol != NULL) {
437 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
438 		}
439 		break;
440 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
441 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
442 		mpt->raid_rescan++;
443 		break;
444 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
445 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
446 		mpt->raid_rescan++;
447 		if (mpt_disk != NULL) {
448 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
449 		}
450 		break;
451 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
452 		mpt->raid_rescan++;
453 		break;
454 	case MPI_EVENT_RAID_RC_SMART_DATA:
455 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
456 		break;
457 	}
458 
459 	if (print_event) {
460 		if (mpt_disk != NULL) {
461 			mpt_disk_prt(mpt, mpt_disk, "%s", "");
462 		} else if (mpt_vol != NULL) {
463 			mpt_vol_prt(mpt, mpt_vol, "%s", "");
464 		} else {
465 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
466 				raid_event->VolumeID);
467 
468 			if (raid_event->PhysDiskNum != 0xFF)
469 				mpt_prtc(mpt, ":%d): ",
470 					 raid_event->PhysDiskNum);
471 			else
472 				mpt_prtc(mpt, "): ");
473 		}
474 
475 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
476 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
477 				 raid_event->ReasonCode);
478 		else
479 			mpt_prtc(mpt, "%s\n",
480 				 raid_event_txt[raid_event->ReasonCode]);
481 	}
482 
483 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
484 		/* XXX Use CAM's print sense for this... */
485 		if (mpt_disk != NULL)
486 			mpt_disk_prt(mpt, mpt_disk, "%s", "");
487 		else
488 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
489 			    raid_event->VolumeBus, raid_event->VolumeID,
490 			    raid_event->PhysDiskNum);
491 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
492 			 raid_event->ASC, raid_event->ASCQ);
493 	}
494 
495 	mpt_raid_wakeup(mpt);
496 	return (1);
497 }
498 
499 static void
500 mpt_raid_shutdown(struct mpt_softc *mpt)
501 {
502 	struct mpt_raid_volume *mpt_vol;
503 
504 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
505 		return;
506 	}
507 
508 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
509 	RAID_VOL_FOREACH(mpt, mpt_vol) {
510 		mpt_verify_mwce(mpt, mpt_vol);
511 	}
512 }
513 
514 static int
515 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
516     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
517 {
518 	int free_req;
519 
520 	if (req == NULL)
521 		return (TRUE);
522 
523 	free_req = TRUE;
524 	if (reply_frame != NULL)
525 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
526 #ifdef NOTYET
527 	else if (req->ccb != NULL) {
528 		/* Complete Quiesce CCB with error... */
529 	}
530 #endif
531 
532 	req->state &= ~REQ_STATE_QUEUED;
533 	req->state |= REQ_STATE_DONE;
534 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
535 
536 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
537 		wakeup(req);
538 	} else if (free_req) {
539 		mpt_free_request(mpt, req);
540 	}
541 
542 	return (TRUE);
543 }
544 
545 /*
546  * Parse additional completion information in the reply
547  * frame for RAID I/O requests.
548  */
549 static int
550 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
551     MSG_DEFAULT_REPLY *reply_frame)
552 {
553 	MSG_RAID_ACTION_REPLY *reply;
554 	struct mpt_raid_action_result *action_result;
555 	MSG_RAID_ACTION_REQUEST *rap;
556 
557 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
558 	req->IOCStatus = le16toh(reply->IOCStatus);
559 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
560 
561 	switch (rap->Action) {
562 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
563 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
564 		break;
565 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
566 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
567 		break;
568 	default:
569 		break;
570 	}
571 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
572 	memcpy(&action_result->action_data, &reply->ActionData,
573 	    sizeof(action_result->action_data));
574 	action_result->action_status = le16toh(reply->ActionStatus);
575 	return (TRUE);
576 }
577 
578 /*
579  * Utiltity routine to perform a RAID action command;
580  */
581 static int
582 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
583 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
584 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
585 		   int write, int wait)
586 {
587 	MSG_RAID_ACTION_REQUEST *rap;
588 	SGE_SIMPLE32 *se;
589 
590 	rap = req->req_vbuf;
591 	memset(rap, 0, sizeof *rap);
592 	rap->Action = Action;
593 	rap->ActionDataWord = htole32(ActionDataWord);
594 	rap->Function = MPI_FUNCTION_RAID_ACTION;
595 	rap->VolumeID = vol->config_page->VolumeID;
596 	rap->VolumeBus = vol->config_page->VolumeBus;
597 	if (disk != NULL)
598 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
599 	else
600 		rap->PhysDiskNum = 0xFF;
601 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
602 	se->Address = htole32(addr);
603 	MPI_pSGE_SET_LENGTH(se, len);
604 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
605 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
606 	    MPI_SGE_FLAGS_END_OF_LIST |
607 	    (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
608 	se->FlagsLength = htole32(se->FlagsLength);
609 	rap->MsgContext = htole32(req->index | raid_handler_id);
610 
611 	mpt_check_doorbell(mpt);
612 	mpt_send_cmd(mpt, req);
613 
614 	if (wait) {
615 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
616 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
617 	} else {
618 		return (0);
619 	}
620 }
621 
622 /*************************** RAID Status Monitoring ***************************/
623 static int
624 mpt_spawn_raid_thread(struct mpt_softc *mpt)
625 {
626 	int error;
627 
628 	/*
629 	 * Freeze out any CAM transactions until our thread
630 	 * is able to run at least once.  We need to update
631 	 * our RAID pages before acception I/O or we may
632 	 * reject I/O to an ID we later determine is for a
633 	 * hidden physdisk.
634 	 */
635 	MPT_LOCK(mpt);
636 	xpt_freeze_simq(mpt->phydisk_sim, 1);
637 	MPT_UNLOCK(mpt);
638 	error = mpt_kthread_create(mpt_raid_thread, mpt,
639 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
640 	    "mpt_raid%d", mpt->unit);
641 	if (error != 0) {
642 		MPT_LOCK(mpt);
643 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
644 		MPT_UNLOCK(mpt);
645 	}
646 	return (error);
647 }
648 
649 static void
650 mpt_terminate_raid_thread(struct mpt_softc *mpt)
651 {
652 
653 	if (mpt->raid_thread == NULL) {
654 		return;
655 	}
656 	mpt->shutdwn_raid = 1;
657 	wakeup(&mpt->raid_volumes);
658 	/*
659 	 * Sleep on a slightly different location
660 	 * for this interlock just for added safety.
661 	 */
662 	mpt_sleep(mpt, &mpt->raid_thread, 0, "thtrm", 0);
663 }
664 
665 static void
666 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
667 {
668     xpt_free_path(ccb->ccb_h.path);
669     kfree(ccb, M_TEMP);
670 }
671 
672 static void
673 mpt_raid_thread(void *arg)
674 {
675 	struct mpt_softc *mpt;
676 	int firstrun;
677 
678 	mpt = (struct mpt_softc *)arg;
679 	firstrun = 1;
680 	MPT_LOCK(mpt);
681 	while (mpt->shutdwn_raid == 0) {
682 
683 		if (mpt->raid_wakeup == 0) {
684 			mpt_sleep(mpt, &mpt->raid_volumes, 0, "idle", 0);
685 			continue;
686 		}
687 
688 		mpt->raid_wakeup = 0;
689 
690 		if (mpt_refresh_raid_data(mpt)) {
691 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
692 			continue;
693 		}
694 
695 		/*
696 		 * Now that we have our first snapshot of RAID data,
697 		 * allow CAM to access our physical disk bus.
698 		 */
699 		if (firstrun) {
700 			firstrun = 0;
701 			xpt_release_simq(mpt->phydisk_sim, TRUE);
702 		}
703 
704 		if (mpt->raid_rescan != 0) {
705 			union ccb *ccb;
706 			int error;
707 
708 			mpt->raid_rescan = 0;
709 			MPT_UNLOCK(mpt);
710 
711 			ccb = kmalloc(sizeof(union ccb), M_TEMP,
712 			    M_WAITOK | M_ZERO);
713 
714 			MPT_LOCK(mpt);
715 			error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
716 			    cam_sim_path(mpt->phydisk_sim),
717 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
718 			if (error != CAM_REQ_CMP) {
719 				kfree(ccb, M_TEMP);
720 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
721 			} else {
722 				xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
723 				    5/*priority (low)*/);
724 				ccb->ccb_h.func_code = XPT_SCAN_BUS;
725 				ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
726 				ccb->crcn.flags = CAM_FLAG_NONE;
727 				xpt_action(ccb);
728 
729 				/* scan is now in progress */
730 			}
731 		}
732 	}
733 	mpt->raid_thread = NULL;
734 	wakeup(&mpt->raid_thread);
735 	MPT_UNLOCK(mpt);
736 	mpt_kthread_exit(0);
737 }
738 
739 #if 0
740 static void
741 mpt_raid_quiesce_timeout(void *arg)
742 {
743 
744 	/* Complete the CCB with error */
745 	/* COWWWW */
746 }
747 
748 static timeout_t mpt_raid_quiesce_timeout;
749 cam_status
750 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
751 		      request_t *req)
752 {
753 	union ccb *ccb;
754 
755 	ccb = req->ccb;
756 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
757 		return (CAM_REQ_CMP);
758 
759 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
760 		int rv;
761 
762 		mpt_disk->flags |= MPT_RDF_QUIESCING;
763 		xpt_freeze_devq(ccb->ccb_h.path, 1);
764 
765 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
766 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
767 					/*ActionData*/0, /*addr*/0,
768 					/*len*/0, /*write*/FALSE,
769 					/*wait*/FALSE);
770 		if (rv != 0)
771 			return (CAM_REQ_CMP_ERR);
772 
773 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
774 #if 0
775 		if (rv == ETIMEDOUT) {
776 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
777 				     "Quiece Timed-out\n");
778 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
779 			return (CAM_REQ_CMP_ERR);
780 		}
781 
782 		ar = REQ_TO_RAID_ACTION_RESULT(req);
783 		if (rv != 0
784 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
785 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
786 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
787 				    "%d:%x:%x\n", rv, req->IOCStatus,
788 				    ar->action_status);
789 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
790 			return (CAM_REQ_CMP_ERR);
791 		}
792 #endif
793 		return (CAM_REQ_INPROG);
794 	}
795 	return (CAM_REQUEUE_REQ);
796 }
797 #endif
798 
799 /* XXX Ignores that there may be multiple busses/IOCs involved. */
800 cam_status
801 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
802 {
803 	struct mpt_raid_disk *mpt_disk;
804 
805 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
806 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
807 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
808 		*tgt = mpt_disk->config_page.PhysDiskID;
809 		return (0);
810 	}
811 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
812 		 ccb->ccb_h.target_id);
813 	return (-1);
814 }
815 
816 /* XXX Ignores that there may be multiple busses/IOCs involved. */
817 int
818 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
819 {
820 	struct mpt_raid_disk *mpt_disk;
821 	int i;
822 
823 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
824 		return (0);
825 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
826 		mpt_disk = &mpt->raid_disks[i];
827 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
828 		    mpt_disk->config_page.PhysDiskID == tgt)
829 			return (1);
830 	}
831 	return (0);
832 
833 }
834 
835 /* XXX Ignores that there may be multiple busses/IOCs involved. */
836 int
837 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
838 {
839 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
840 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
841 
842 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
843 		return (0);
844 	}
845 	ioc_vol = mpt->ioc_page2->RaidVolume;
846 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
847 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
848 		if (ioc_vol->VolumeID == tgt) {
849 			return (1);
850 		}
851 	}
852 	return (0);
853 }
854 
855 #if 0
856 static void
857 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
858 	       int enable)
859 {
860 	request_t *req;
861 	struct mpt_raid_action_result *ar;
862 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
863 	int enabled;
864 	int rv;
865 
866 	vol_pg = mpt_vol->config_page;
867 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
868 
869 	/*
870 	 * If the setting matches the configuration,
871 	 * there is nothing to do.
872 	 */
873 	if ((enabled && enable)
874 	 || (!enabled && !enable))
875 		return;
876 
877 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
878 	if (req == NULL) {
879 		mpt_vol_prt(mpt, mpt_vol,
880 			    "mpt_enable_vol: Get request failed!\n");
881 		return;
882 	}
883 
884 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
885 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
886 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
887 				/*data*/0, /*addr*/0, /*len*/0,
888 				/*write*/FALSE, /*wait*/TRUE);
889 	if (rv == ETIMEDOUT) {
890 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
891 			    "%s Volume Timed-out\n",
892 			    enable ? "Enable" : "Disable");
893 		return;
894 	}
895 	ar = REQ_TO_RAID_ACTION_RESULT(req);
896 	if (rv != 0
897 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
898 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
899 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
900 			    enable ? "Enable" : "Disable",
901 			    rv, req->IOCStatus, ar->action_status);
902 	}
903 
904 	mpt_free_request(mpt, req);
905 }
906 #endif
907 
908 static void
909 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
910 {
911 	request_t *req;
912 	struct mpt_raid_action_result *ar;
913 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
914 	uint32_t data;
915 	int rv;
916 	int resyncing;
917 	int mwce;
918 
919 	vol_pg = mpt_vol->config_page;
920 	resyncing = vol_pg->VolumeStatus.Flags
921 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
922 	mwce = vol_pg->VolumeSettings.Settings
923 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
924 
925 	/*
926 	 * If the setting matches the configuration,
927 	 * there is nothing to do.
928 	 */
929 	switch (mpt->raid_mwce_setting) {
930 	case MPT_RAID_MWCE_REBUILD_ONLY:
931 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
932 			return;
933 		}
934 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
935 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
936 			/*
937 			 * Wait one more status update to see if
938 			 * resyncing gets enabled.  It gets disabled
939 			 * temporarilly when WCE is changed.
940 			 */
941 			return;
942 		}
943 		break;
944 	case MPT_RAID_MWCE_ON:
945 		if (mwce)
946 			return;
947 		break;
948 	case MPT_RAID_MWCE_OFF:
949 		if (!mwce)
950 			return;
951 		break;
952 	case MPT_RAID_MWCE_NC:
953 		return;
954 	}
955 
956 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
957 	if (req == NULL) {
958 		mpt_vol_prt(mpt, mpt_vol,
959 			    "mpt_verify_mwce: Get request failed!\n");
960 		return;
961 	}
962 
963 	vol_pg->VolumeSettings.Settings ^=
964 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
965 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
966 	vol_pg->VolumeSettings.Settings ^=
967 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
968 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
969 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
970 				data, /*addr*/0, /*len*/0,
971 				/*write*/FALSE, /*wait*/TRUE);
972 	if (rv == ETIMEDOUT) {
973 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
974 			    "Write Cache Enable Timed-out\n");
975 		return;
976 	}
977 	ar = REQ_TO_RAID_ACTION_RESULT(req);
978 	if (rv != 0
979 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
980 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
981 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
982 			    "%d:%x:%x\n", rv, req->IOCStatus,
983 			    ar->action_status);
984 	} else {
985 		vol_pg->VolumeSettings.Settings ^=
986 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
987 	}
988 	mpt_free_request(mpt, req);
989 }
990 
991 static void
992 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
993 {
994 	request_t *req;
995 	struct mpt_raid_action_result *ar;
996 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
997 	u_int prio;
998 	int rv;
999 
1000 	vol_pg = mpt_vol->config_page;
1001 
1002 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
1003 		return;
1004 
1005 	/*
1006 	 * If the current RAID resync rate does not
1007 	 * match our configured rate, update it.
1008 	 */
1009 	prio = vol_pg->VolumeSettings.Settings
1010 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1011 	if (vol_pg->ResyncRate != 0
1012 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1013 
1014 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1015 		if (req == NULL) {
1016 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1017 				    "Get request failed!\n");
1018 			return;
1019 		}
1020 
1021 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1022 					MPI_RAID_ACTION_SET_RESYNC_RATE,
1023 					mpt->raid_resync_rate, /*addr*/0,
1024 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
1025 		if (rv == ETIMEDOUT) {
1026 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1027 				    "Resync Rate Setting Timed-out\n");
1028 			return;
1029 		}
1030 
1031 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1032 		if (rv != 0
1033 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1034 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1035 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1036 				    "%d:%x:%x\n", rv, req->IOCStatus,
1037 				    ar->action_status);
1038 		} else
1039 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1040 		mpt_free_request(mpt, req);
1041 	} else if ((prio && mpt->raid_resync_rate < 128)
1042 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1043 		uint32_t data;
1044 
1045 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1046 		if (req == NULL) {
1047 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1048 				    "Get request failed!\n");
1049 			return;
1050 		}
1051 
1052 		vol_pg->VolumeSettings.Settings ^=
1053 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1054 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1055 		vol_pg->VolumeSettings.Settings ^=
1056 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1057 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1058 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1059 					data, /*addr*/0, /*len*/0,
1060 					/*write*/FALSE, /*wait*/TRUE);
1061 		if (rv == ETIMEDOUT) {
1062 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1063 				    "Resync Rate Setting Timed-out\n");
1064 			return;
1065 		}
1066 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1067 		if (rv != 0
1068 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1069 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1070 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1071 				    "%d:%x:%x\n", rv, req->IOCStatus,
1072 				    ar->action_status);
1073 		} else {
1074 			vol_pg->VolumeSettings.Settings ^=
1075 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1076 		}
1077 
1078 		mpt_free_request(mpt, req);
1079 	}
1080 }
1081 
1082 static void
1083 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1084 		       struct cam_path *path)
1085 {
1086 	struct ccb_relsim crs;
1087 
1088 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1089 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1090 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1091 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1092 	crs.openings = mpt->raid_queue_depth;
1093 	xpt_action((union ccb *)&crs);
1094 	if (crs.ccb_h.status != CAM_REQ_CMP)
1095 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1096 			    "with CAM status %#x\n", crs.ccb_h.status);
1097 }
1098 
1099 static void
1100 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1101 {
1102 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1103 	u_int i;
1104 
1105 	vol_pg = mpt_vol->config_page;
1106 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1107 	for (i = 1; i <= 0x8000; i <<= 1) {
1108 		switch (vol_pg->VolumeSettings.Settings & i) {
1109 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1110 			mpt_prtc(mpt, " Member-WCE");
1111 			break;
1112 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1113 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1114 			break;
1115 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1116 			mpt_prtc(mpt, " Hot-Plug-Spares");
1117 			break;
1118 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1119 			mpt_prtc(mpt, " High-Priority-ReSync");
1120 			break;
1121 		default:
1122 			break;
1123 		}
1124 	}
1125 	mpt_prtc(mpt, " )\n");
1126 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1127 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1128 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1129 			  ? ":" : "s:");
1130 		for (i = 0; i < 8; i++) {
1131 			u_int mask;
1132 
1133 			mask = 0x1 << i;
1134 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1135 				continue;
1136 			mpt_prtc(mpt, " %d", i);
1137 		}
1138 		mpt_prtc(mpt, "\n");
1139 	}
1140 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1141 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1142 		struct mpt_raid_disk *mpt_disk;
1143 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1144 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1145 		U8 f, s;
1146 
1147 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1148 		disk_pg = &mpt_disk->config_page;
1149 		mpt_prtc(mpt, "      ");
1150 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1151 			 pt_bus, disk_pg->PhysDiskID);
1152 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1153 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1154 			    "Primary" : "Secondary");
1155 		} else {
1156 			mpt_prtc(mpt, "Stripe Position %d",
1157 				 mpt_disk->member_number);
1158 		}
1159 		f = disk_pg->PhysDiskStatus.Flags;
1160 		s = disk_pg->PhysDiskStatus.State;
1161 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1162 			mpt_prtc(mpt, " Out of Sync");
1163 		}
1164 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1165 			mpt_prtc(mpt, " Quiesced");
1166 		}
1167 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1168 			mpt_prtc(mpt, " Inactive");
1169 		}
1170 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1171 			mpt_prtc(mpt, " Was Optimal");
1172 		}
1173 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1174 			mpt_prtc(mpt, " Was Non-Optimal");
1175 		}
1176 		switch (s) {
1177 		case MPI_PHYSDISK0_STATUS_ONLINE:
1178 			mpt_prtc(mpt, " Online");
1179 			break;
1180 		case MPI_PHYSDISK0_STATUS_MISSING:
1181 			mpt_prtc(mpt, " Missing");
1182 			break;
1183 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1184 			mpt_prtc(mpt, " Incompatible");
1185 			break;
1186 		case MPI_PHYSDISK0_STATUS_FAILED:
1187 			mpt_prtc(mpt, " Failed");
1188 			break;
1189 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1190 			mpt_prtc(mpt, " Initializing");
1191 			break;
1192 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1193 			mpt_prtc(mpt, " Requested Offline");
1194 			break;
1195 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1196 			mpt_prtc(mpt, " Requested Failed");
1197 			break;
1198 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1199 		default:
1200 			mpt_prtc(mpt, " Offline Other (%x)", s);
1201 			break;
1202 		}
1203 		mpt_prtc(mpt, "\n");
1204 	}
1205 }
1206 
1207 static void
1208 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1209 {
1210 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1211 	int rd_bus = cam_sim_bus(mpt->sim);
1212 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1213 	u_int i;
1214 
1215 	disk_pg = &mpt_disk->config_page;
1216 	mpt_disk_prt(mpt, mpt_disk,
1217 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1218 		     device_get_nameunit(mpt->dev), rd_bus,
1219 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1220 		     pt_bus, (int)(mpt_disk - mpt->raid_disks));
1221 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1222 		return;
1223 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1224 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1225 		   ? ":" : "s:");
1226 	for (i = 0; i < 8; i++) {
1227 		u_int mask;
1228 
1229 		mask = 0x1 << i;
1230 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1231 			continue;
1232 		mpt_prtc(mpt, " %d", i);
1233 	}
1234 	mpt_prtc(mpt, "\n");
1235 }
1236 
1237 static void
1238 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1239 		      IOC_3_PHYS_DISK *ioc_disk)
1240 {
1241 	int rv;
1242 
1243 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1244 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1245 				 &mpt_disk->config_page.Header,
1246 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1247 	if (rv != 0) {
1248 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1249 			"Failed to read RAID Disk Hdr(%d)\n",
1250 			ioc_disk->PhysDiskNum);
1251 		return;
1252 	}
1253 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1254 				   &mpt_disk->config_page.Header,
1255 				   sizeof(mpt_disk->config_page),
1256 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1257 	if (rv != 0)
1258 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1259 			"Failed to read RAID Disk Page(%d)\n",
1260 			ioc_disk->PhysDiskNum);
1261 	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1262 }
1263 
1264 static void
1265 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1266     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1267 {
1268 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1269 	struct mpt_raid_action_result *ar;
1270 	request_t *req;
1271 	int rv;
1272 	int i;
1273 
1274 	vol_pg = mpt_vol->config_page;
1275 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1276 
1277 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1278 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1279 	if (rv != 0) {
1280 		mpt_vol_prt(mpt, mpt_vol,
1281 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1282 		    ioc_vol->VolumePageNumber);
1283 		return;
1284 	}
1285 
1286 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1287 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1288 	if (rv != 0) {
1289 		mpt_vol_prt(mpt, mpt_vol,
1290 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1291 		    ioc_vol->VolumePageNumber);
1292 		return;
1293 	}
1294 	mpt2host_config_page_raid_vol_0(vol_pg);
1295 
1296 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1297 
1298 	/* Update disk entry array data. */
1299 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1300 		struct mpt_raid_disk *mpt_disk;
1301 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1302 		mpt_disk->volume = mpt_vol;
1303 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1304 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1305 			mpt_disk->member_number--;
1306 		}
1307 	}
1308 
1309 	if ((vol_pg->VolumeStatus.Flags
1310 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1311 		return;
1312 
1313 	req = mpt_get_request(mpt, TRUE);
1314 	if (req == NULL) {
1315 		mpt_vol_prt(mpt, mpt_vol,
1316 		    "mpt_refresh_raid_vol: Get request failed!\n");
1317 		return;
1318 	}
1319 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1320 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1321 	if (rv == ETIMEDOUT) {
1322 		mpt_vol_prt(mpt, mpt_vol,
1323 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1324 		mpt_free_request(mpt, req);
1325 		return;
1326 	}
1327 
1328 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1329 	if (rv == 0
1330 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1331 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1332 		memcpy(&mpt_vol->sync_progress,
1333 		       &ar->action_data.indicator_struct,
1334 		       sizeof(mpt_vol->sync_progress));
1335 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1336 	} else {
1337 		mpt_vol_prt(mpt, mpt_vol,
1338 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1339 	}
1340 	mpt_free_request(mpt, req);
1341 }
1342 
1343 /*
1344  * Update in-core information about RAID support.  We update any entries
1345  * that didn't previously exists or have been marked as needing to
1346  * be updated by our event handler.  Interesting changes are displayed
1347  * to the console.
1348  */
1349 static int
1350 mpt_refresh_raid_data(struct mpt_softc *mpt)
1351 {
1352 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1353 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1354 	IOC_3_PHYS_DISK *ioc_disk;
1355 	IOC_3_PHYS_DISK *ioc_last_disk;
1356 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1357 	size_t len;
1358 	int rv;
1359 	int i;
1360 	u_int nonopt_volumes;
1361 
1362 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1363 		return (0);
1364 	}
1365 
1366 	/*
1367 	 * Mark all items as unreferenced by the configuration.
1368 	 * This allows us to find, report, and discard stale
1369 	 * entries.
1370 	 */
1371 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1372 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1373 	}
1374 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1375 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1376 	}
1377 
1378 	/*
1379 	 * Get Physical Disk information.
1380 	 */
1381 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1382 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1383 				   &mpt->ioc_page3->Header, len,
1384 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1385 	if (rv) {
1386 		mpt_prt(mpt,
1387 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1388 		return (-1);
1389 	}
1390 	mpt2host_config_page_ioc3(mpt->ioc_page3);
1391 
1392 	ioc_disk = mpt->ioc_page3->PhysDisk;
1393 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1394 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1395 		struct mpt_raid_disk *mpt_disk;
1396 
1397 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1398 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1399 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1400 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1401 
1402 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1403 
1404 		}
1405 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1406 		mpt->raid_rescan++;
1407 	}
1408 
1409 	/*
1410 	 * Refresh volume data.
1411 	 */
1412 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1413 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1414 				   &mpt->ioc_page2->Header, len,
1415 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1416 	if (rv) {
1417 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1418 			"Failed to read IOC Page 2\n");
1419 		return (-1);
1420 	}
1421 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1422 
1423 	ioc_vol = mpt->ioc_page2->RaidVolume;
1424 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1425 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1426 		struct mpt_raid_volume *mpt_vol;
1427 
1428 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1429 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1430 		vol_pg = mpt_vol->config_page;
1431 		if (vol_pg == NULL)
1432 			continue;
1433 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1434 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1435 		 || (vol_pg->VolumeStatus.Flags
1436 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1437 
1438 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1439 		}
1440 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1441 	}
1442 
1443 	nonopt_volumes = 0;
1444 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1445 		struct mpt_raid_volume *mpt_vol;
1446 		uint64_t total;
1447 		uint64_t left;
1448 		int m;
1449 		u_int prio;
1450 
1451 		mpt_vol = &mpt->raid_volumes[i];
1452 
1453 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1454 			continue;
1455 		}
1456 
1457 		vol_pg = mpt_vol->config_page;
1458 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1459 		 == MPT_RVF_ANNOUNCED) {
1460 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1461 			mpt_vol->flags = 0;
1462 			continue;
1463 		}
1464 
1465 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1466 			mpt_announce_vol(mpt, mpt_vol);
1467 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1468 		}
1469 
1470 		if (vol_pg->VolumeStatus.State !=
1471 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1472 			nonopt_volumes++;
1473 
1474 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1475 			continue;
1476 
1477 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1478 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1479 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1480 		mpt_verify_mwce(mpt, mpt_vol);
1481 
1482 		if (vol_pg->VolumeStatus.Flags == 0) {
1483 			continue;
1484 		}
1485 
1486 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1487 		for (m = 1; m <= 0x80; m <<= 1) {
1488 			switch (vol_pg->VolumeStatus.Flags & m) {
1489 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1490 				mpt_prtc(mpt, " Enabled");
1491 				break;
1492 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1493 				mpt_prtc(mpt, " Quiesced");
1494 				break;
1495 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1496 				mpt_prtc(mpt, " Re-Syncing");
1497 				break;
1498 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1499 				mpt_prtc(mpt, " Inactive");
1500 				break;
1501 			default:
1502 				break;
1503 			}
1504 		}
1505 		mpt_prtc(mpt, " )\n");
1506 
1507 		if ((vol_pg->VolumeStatus.Flags
1508 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1509 			continue;
1510 
1511 		mpt_verify_resync_rate(mpt, mpt_vol);
1512 
1513 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1514 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1515 		if (vol_pg->ResyncRate != 0) {
1516 
1517 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1518 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1519 			    prio / 1000, prio % 1000);
1520 		} else {
1521 			prio = vol_pg->VolumeSettings.Settings
1522 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1523 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1524 			    prio ? "High" : "Low");
1525 		}
1526 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1527 			    "blocks remaining\n", (uintmax_t)left,
1528 			    (uintmax_t)total);
1529 
1530 		/* Periodically report on sync progress. */
1531 		mpt_schedule_raid_refresh(mpt);
1532 	}
1533 
1534 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1535 		struct mpt_raid_disk *mpt_disk;
1536 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1537 		int m;
1538 
1539 		mpt_disk = &mpt->raid_disks[i];
1540 		disk_pg = &mpt_disk->config_page;
1541 
1542 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1543 			continue;
1544 
1545 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1546 		 == MPT_RDF_ANNOUNCED) {
1547 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1548 			mpt_disk->flags = 0;
1549 			mpt->raid_rescan++;
1550 			continue;
1551 		}
1552 
1553 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1554 
1555 			mpt_announce_disk(mpt, mpt_disk);
1556 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1557 		}
1558 
1559 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1560 			continue;
1561 
1562 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1563 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1564 		if (disk_pg->PhysDiskStatus.Flags == 0)
1565 			continue;
1566 
1567 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1568 		for (m = 1; m <= 0x80; m <<= 1) {
1569 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1570 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1571 				mpt_prtc(mpt, " Out-Of-Sync");
1572 				break;
1573 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1574 				mpt_prtc(mpt, " Quiesced");
1575 				break;
1576 			default:
1577 				break;
1578 			}
1579 		}
1580 		mpt_prtc(mpt, " )\n");
1581 	}
1582 
1583 	mpt->raid_nonopt_volumes = nonopt_volumes;
1584 	return (0);
1585 }
1586 
1587 static void
1588 mpt_raid_timer(void *arg)
1589 {
1590 	struct mpt_softc *mpt;
1591 
1592 	mpt = (struct mpt_softc *)arg;
1593 	MPT_LOCK(mpt);
1594 	mpt_raid_wakeup(mpt);
1595 	MPT_UNLOCK(mpt);
1596 }
1597 
1598 static void
1599 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1600 {
1601 
1602 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1603 		      mpt_raid_timer, mpt);
1604 }
1605 
1606 void
1607 mpt_raid_free_mem(struct mpt_softc *mpt)
1608 {
1609 
1610 	if (mpt->raid_volumes) {
1611 		struct mpt_raid_volume *mpt_raid;
1612 		int i;
1613 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1614 			mpt_raid = &mpt->raid_volumes[i];
1615 			if (mpt_raid->config_page) {
1616 				kfree(mpt_raid->config_page, M_DEVBUF);
1617 				mpt_raid->config_page = NULL;
1618 			}
1619 		}
1620 		kfree(mpt->raid_volumes, M_DEVBUF);
1621 		mpt->raid_volumes = NULL;
1622 	}
1623 	if (mpt->raid_disks) {
1624 		kfree(mpt->raid_disks, M_DEVBUF);
1625 		mpt->raid_disks = NULL;
1626 	}
1627 	if (mpt->ioc_page2) {
1628 		kfree(mpt->ioc_page2, M_DEVBUF);
1629 		mpt->ioc_page2 = NULL;
1630 	}
1631 	if (mpt->ioc_page3) {
1632 		kfree(mpt->ioc_page3, M_DEVBUF);
1633 		mpt->ioc_page3 = NULL;
1634 	}
1635 	mpt->raid_max_volumes =  0;
1636 	mpt->raid_max_disks =  0;
1637 }
1638 
1639 static int
1640 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1641 {
1642 	struct mpt_raid_volume *mpt_vol;
1643 
1644 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1645 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1646 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1647 		return (EINVAL);
1648 
1649 	MPT_LOCK(mpt);
1650 	mpt->raid_resync_rate = rate;
1651 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1652 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1653 			continue;
1654 		}
1655 		mpt_verify_resync_rate(mpt, mpt_vol);
1656 	}
1657 	MPT_UNLOCK(mpt);
1658 	return (0);
1659 }
1660 
1661 static int
1662 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1663 {
1664 	struct mpt_raid_volume *mpt_vol;
1665 
1666 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1667 		return (EINVAL);
1668 
1669 	MPT_LOCK(mpt);
1670 	mpt->raid_queue_depth = vol_queue_depth;
1671 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1672 		struct cam_path *path;
1673 		int error;
1674 
1675 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1676 			continue;
1677 
1678 		mpt->raid_rescan = 0;
1679 
1680 		error = xpt_create_path(&path, xpt_periph,
1681 					cam_sim_path(mpt->sim),
1682 					mpt_vol->config_page->VolumeID,
1683 					/*lun*/0);
1684 		if (error != CAM_REQ_CMP) {
1685 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1686 			continue;
1687 		}
1688 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1689 		xpt_free_path(path);
1690 	}
1691 	MPT_UNLOCK(mpt);
1692 	return (0);
1693 }
1694 
1695 static int
1696 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1697 {
1698 	struct mpt_raid_volume *mpt_vol;
1699 	int force_full_resync;
1700 
1701 	MPT_LOCK(mpt);
1702 	if (mwce == mpt->raid_mwce_setting) {
1703 		MPT_UNLOCK(mpt);
1704 		return (0);
1705 	}
1706 
1707 	/*
1708 	 * Catch MWCE being left on due to a failed shutdown.  Since
1709 	 * sysctls cannot be set by the loader, we treat the first
1710 	 * setting of this varible specially and force a full volume
1711 	 * resync if MWCE is enabled and a resync is in progress.
1712 	 */
1713 	force_full_resync = 0;
1714 	if (mpt->raid_mwce_set == 0
1715 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1716 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1717 		force_full_resync = 1;
1718 
1719 	mpt->raid_mwce_setting = mwce;
1720 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1721 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1722 		int resyncing;
1723 		int mwce;
1724 
1725 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1726 			continue;
1727 
1728 		vol_pg = mpt_vol->config_page;
1729 		resyncing = vol_pg->VolumeStatus.Flags
1730 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1731 		mwce = vol_pg->VolumeSettings.Settings
1732 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1733 		if (force_full_resync && resyncing && mwce) {
1734 
1735 			/*
1736 			 * XXX disable/enable volume should force a resync,
1737 			 *     but we'll need to queice, drain, and restart
1738 			 *     I/O to do that.
1739 			 */
1740 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1741 				    "detected.  Suggest full resync.\n");
1742 		}
1743 		mpt_verify_mwce(mpt, mpt_vol);
1744 	}
1745 	mpt->raid_mwce_set = 1;
1746 	MPT_UNLOCK(mpt);
1747 	return (0);
1748 }
1749 
1750 static const char *mpt_vol_mwce_strs[] =
1751 {
1752 	"On",
1753 	"Off",
1754 	"On-During-Rebuild",
1755 	"NC"
1756 };
1757 
1758 static int
1759 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1760 {
1761 	char inbuf[20];
1762 	struct mpt_softc *mpt;
1763 	const char *str;
1764 	int error;
1765 	u_int size;
1766 	u_int i;
1767 
1768 	mpt = (struct mpt_softc *)arg1;
1769 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1770 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1771 	if (error || !req->newptr) {
1772 		return (error);
1773 	}
1774 
1775 	size = req->newlen - req->newidx;
1776 	if (size >= sizeof(inbuf)) {
1777 		return (EINVAL);
1778 	}
1779 
1780 	error = SYSCTL_IN(req, inbuf, size);
1781 	if (error) {
1782 		return (error);
1783 	}
1784 	inbuf[size] = '\0';
1785 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1786 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1787 			return (mpt_raid_set_vol_mwce(mpt, i));
1788 		}
1789 	}
1790 	return (EINVAL);
1791 }
1792 
1793 static int
1794 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1795 {
1796 	struct mpt_softc *mpt;
1797 	u_int raid_resync_rate;
1798 	int error;
1799 
1800 	mpt = (struct mpt_softc *)arg1;
1801 	raid_resync_rate = mpt->raid_resync_rate;
1802 
1803 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1804 	if (error || !req->newptr) {
1805 		return error;
1806 	}
1807 
1808 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1809 }
1810 
1811 static int
1812 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1813 {
1814 	struct mpt_softc *mpt;
1815 	u_int raid_queue_depth;
1816 	int error;
1817 
1818 	mpt = (struct mpt_softc *)arg1;
1819 	raid_queue_depth = mpt->raid_queue_depth;
1820 
1821 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1822 	if (error || !req->newptr) {
1823 		return error;
1824 	}
1825 
1826 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1827 }
1828 
1829 static void
1830 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1831 {
1832 	SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1833 			SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1834 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1835 			mpt_raid_sysctl_vol_member_wce, "A",
1836 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1837 
1838 	SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1839 			SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1840 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1841 			mpt_raid_sysctl_vol_queue_depth, "I",
1842 			"default volume queue depth");
1843 
1844 	SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1845 			SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1846 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1847 			mpt_raid_sysctl_vol_resync_rate, "I",
1848 			"volume resync priority (0 == NC, 1 - 255)");
1849 	SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
1850 			SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1851 			"nonoptimal_volumes", CTLFLAG_RD,
1852 			&mpt->raid_nonopt_volumes, 0,
1853 			"number of nonoptimal volumes");
1854 }
1855