xref: /dragonfly/sys/dev/disk/mpt/mpt_raid.c (revision d9f85b33)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  *
42  * $FreeBSD: head/sys/dev/mpt/mpt_raid.c 260058 2013-12-29 20:41:32Z marius $
43  */
44 
45 #include <dev/disk/mpt/mpt.h>
46 #include <dev/disk/mpt/mpt_raid.h>
47 
48 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
49 #include "dev/disk/mpt/mpilib/mpi_raid.h"
50 
51 #include <bus/cam/cam.h>
52 #include <bus/cam/cam_ccb.h>
53 #include <bus/cam/cam_periph.h>
54 #include <bus/cam/cam_sim.h>
55 #include <bus/cam/cam_xpt_sim.h>
56 
57 #include <sys/callout.h>
58 #include <sys/kthread.h>
59 #include <sys/sysctl.h>
60 
61 #include <machine/stdarg.h>
62 
63 struct mpt_raid_action_result
64 {
65 	union {
66 		MPI_RAID_VOL_INDICATOR	indicator_struct;
67 		uint32_t		new_settings;
68 		uint8_t			phys_disk_num;
69 	} action_data;
70 	uint16_t			action_status;
71 };
72 
73 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
74 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
75 
76 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
77 
78 static mpt_probe_handler_t	mpt_raid_probe;
79 static mpt_attach_handler_t	mpt_raid_attach;
80 static mpt_enable_handler_t	mpt_raid_enable;
81 static mpt_event_handler_t	mpt_raid_event;
82 static mpt_shutdown_handler_t	mpt_raid_shutdown;
83 static mpt_reset_handler_t	mpt_raid_ioc_reset;
84 static mpt_detach_handler_t	mpt_raid_detach;
85 
86 static struct mpt_personality mpt_raid_personality =
87 {
88 	.name		= "mpt_raid",
89 	.probe		= mpt_raid_probe,
90 	.attach		= mpt_raid_attach,
91 	.enable		= mpt_raid_enable,
92 	.event		= mpt_raid_event,
93 	.reset		= mpt_raid_ioc_reset,
94 	.shutdown	= mpt_raid_shutdown,
95 	.detach		= mpt_raid_detach,
96 };
97 
98 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
99 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
100 
101 static mpt_reply_handler_t mpt_raid_reply_handler;
102 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
103 					MSG_DEFAULT_REPLY *reply_frame);
104 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
105 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
106 static void mpt_raid_thread(void *arg);
107 static timeout_t mpt_raid_timer;
108 #if 0
109 static void mpt_enable_vol(struct mpt_softc *mpt,
110 			   struct mpt_raid_volume *mpt_vol, int enable);
111 #endif
112 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
113 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
114     struct cam_path *);
115 static void mpt_raid_sysctl_attach(struct mpt_softc *);
116 
117 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
118 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
119 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
120 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
121     const char *fmt, ...) __printflike(3, 4);
122 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
123     const char *fmt, ...) __printflike(3, 4);
124 
125 static int mpt_issue_raid_req(struct mpt_softc *mpt,
126     struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
127     u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
128     int write, int wait);
129 
130 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
131 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
132 
133 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
134 
135 static const char *
136 mpt_vol_type(struct mpt_raid_volume *vol)
137 {
138 	switch (vol->config_page->VolumeType) {
139 	case MPI_RAID_VOL_TYPE_IS:
140 		return ("RAID-0");
141 	case MPI_RAID_VOL_TYPE_IME:
142 		return ("RAID-1E");
143 	case MPI_RAID_VOL_TYPE_IM:
144 		return ("RAID-1");
145 	default:
146 		return ("Unknown");
147 	}
148 }
149 
150 static const char *
151 mpt_vol_state(struct mpt_raid_volume *vol)
152 {
153 	switch (vol->config_page->VolumeStatus.State) {
154 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
155 		return ("Optimal");
156 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
157 		return ("Degraded");
158 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
159 		return ("Failed");
160 	default:
161 		return ("Unknown");
162 	}
163 }
164 
165 static const char *
166 mpt_disk_state(struct mpt_raid_disk *disk)
167 {
168 	switch (disk->config_page.PhysDiskStatus.State) {
169 	case MPI_PHYSDISK0_STATUS_ONLINE:
170 		return ("Online");
171 	case MPI_PHYSDISK0_STATUS_MISSING:
172 		return ("Missing");
173 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
174 		return ("Incompatible");
175 	case MPI_PHYSDISK0_STATUS_FAILED:
176 		return ("Failed");
177 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
178 		return ("Initializing");
179 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
180 		return ("Offline Requested");
181 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
182 		return ("Failed per Host Request");
183 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
184 		return ("Offline");
185 	default:
186 		return ("Unknown");
187 	}
188 }
189 
190 static void
191 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
192 	    const char *fmt, ...)
193 {
194 	__va_list ap;
195 
196 	kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
197 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
198 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
199 	__va_start(ap, fmt);
200 	kvprintf(fmt, ap);
201 	__va_end(ap);
202 }
203 
204 static void
205 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
206 	     const char *fmt, ...)
207 {
208 	__va_list ap;
209 
210 	if (disk->volume != NULL) {
211 		kprintf("(%s:vol%d:%d): ",
212 		       device_get_nameunit(mpt->dev),
213 		       disk->volume->config_page->VolumeID,
214 		       disk->member_number);
215 	} else {
216 		kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
217 		       disk->config_page.PhysDiskBus,
218 		       disk->config_page.PhysDiskID);
219 	}
220 	__va_start(ap, fmt);
221 	kvprintf(fmt, ap);
222 	__va_end(ap);
223 }
224 
225 static void
226 mpt_raid_async(void *callback_arg, u_int32_t code,
227 	       struct cam_path *path, void *arg)
228 {
229 	struct mpt_softc *mpt;
230 
231 	mpt = (struct mpt_softc*)callback_arg;
232 	switch (code) {
233 	case AC_FOUND_DEVICE:
234 	{
235 		struct ccb_getdev *cgd;
236 		struct mpt_raid_volume *mpt_vol;
237 
238 		cgd = (struct ccb_getdev *)arg;
239 		if (cgd == NULL) {
240 			break;
241 		}
242 
243 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
244 			 cgd->ccb_h.target_id);
245 
246 		RAID_VOL_FOREACH(mpt, mpt_vol) {
247 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
248 				continue;
249 
250 			if (mpt_vol->config_page->VolumeID
251 			 == cgd->ccb_h.target_id) {
252 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
253 				break;
254 			}
255 		}
256 	}
257 	default:
258 		break;
259 	}
260 }
261 
262 static int
263 mpt_raid_probe(struct mpt_softc *mpt)
264 {
265 
266 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
267 		return (ENODEV);
268 	}
269 	return (0);
270 }
271 
272 static int
273 mpt_raid_attach(struct mpt_softc *mpt)
274 {
275 	struct ccb_setasync csa;
276 	mpt_handler_t	 handler;
277 	int		 error;
278 
279 	mpt_callout_init(mpt, &mpt->raid_timer);
280 
281 	error = mpt_spawn_raid_thread(mpt);
282 	if (error != 0) {
283 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
284 		goto cleanup;
285 	}
286 
287 	MPT_LOCK(mpt);
288 	handler.reply_handler = mpt_raid_reply_handler;
289 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
290 				     &raid_handler_id);
291 	if (error != 0) {
292 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
293 		goto cleanup;
294 	}
295 
296 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
297 	csa.ccb_h.func_code = XPT_SASYNC_CB;
298 	csa.event_enable = AC_FOUND_DEVICE;
299 	csa.callback = mpt_raid_async;
300 	csa.callback_arg = mpt;
301 	xpt_action((union ccb *)&csa);
302 	if (csa.ccb_h.status != CAM_REQ_CMP) {
303 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
304 			"CAM async handler.\n");
305 	}
306 	MPT_UNLOCK(mpt);
307 
308 	mpt_raid_sysctl_attach(mpt);
309 	return (0);
310 cleanup:
311 	MPT_UNLOCK(mpt);
312 	mpt_raid_detach(mpt);
313 	return (error);
314 }
315 
316 static int
317 mpt_raid_enable(struct mpt_softc *mpt)
318 {
319 
320 	return (0);
321 }
322 
323 static void
324 mpt_raid_detach(struct mpt_softc *mpt)
325 {
326 	struct ccb_setasync csa;
327 	mpt_handler_t handler;
328 
329 	mpt_callout_drain(mpt, &mpt->raid_timer);
330 
331 	MPT_LOCK(mpt);
332 	mpt_terminate_raid_thread(mpt);
333 	handler.reply_handler = mpt_raid_reply_handler;
334 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
335 			       raid_handler_id);
336 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
337 	csa.ccb_h.func_code = XPT_SASYNC_CB;
338 	csa.event_enable = 0;
339 	csa.callback = mpt_raid_async;
340 	csa.callback_arg = mpt;
341 	xpt_action((union ccb *)&csa);
342 	MPT_UNLOCK(mpt);
343 }
344 
345 static void
346 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
347 {
348 
349 	/* Nothing to do yet. */
350 }
351 
352 static const char *raid_event_txt[] =
353 {
354 	"Volume Created",
355 	"Volume Deleted",
356 	"Volume Settings Changed",
357 	"Volume Status Changed",
358 	"Volume Physical Disk Membership Changed",
359 	"Physical Disk Created",
360 	"Physical Disk Deleted",
361 	"Physical Disk Settings Changed",
362 	"Physical Disk Status Changed",
363 	"Domain Validation Required",
364 	"SMART Data Received",
365 	"Replace Action Started",
366 };
367 
368 static int
369 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
370 	       MSG_EVENT_NOTIFY_REPLY *msg)
371 {
372 	EVENT_DATA_RAID *raid_event;
373 	struct mpt_raid_volume *mpt_vol;
374 	struct mpt_raid_disk *mpt_disk;
375 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
376 	int i;
377 	int print_event;
378 
379 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
380 		return (0);
381 	}
382 
383 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
384 
385 	mpt_vol = NULL;
386 	vol_pg = NULL;
387 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
388 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
389 			mpt_vol = &mpt->raid_volumes[i];
390 			vol_pg = mpt_vol->config_page;
391 
392 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
393 				continue;
394 
395 			if (vol_pg->VolumeID == raid_event->VolumeID
396 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
397 				break;
398 		}
399 		if (i >= mpt->ioc_page2->MaxVolumes) {
400 			mpt_vol = NULL;
401 			vol_pg = NULL;
402 		}
403 	}
404 
405 	mpt_disk = NULL;
406 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
407 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
408 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
409 			mpt_disk = NULL;
410 		}
411 	}
412 
413 	print_event = 1;
414 	switch(raid_event->ReasonCode) {
415 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
416 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
417 		break;
418 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
419 		if (mpt_vol != NULL) {
420 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
421 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
422 			} else {
423 				/*
424 				 * Coalesce status messages into one
425 				 * per background run of our RAID thread.
426 				 * This removes "spurious" status messages
427 				 * from our output.
428 				 */
429 				print_event = 0;
430 			}
431 		}
432 		break;
433 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
434 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
435 		mpt->raid_rescan++;
436 		if (mpt_vol != NULL) {
437 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
438 		}
439 		break;
440 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
441 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
442 		mpt->raid_rescan++;
443 		break;
444 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
445 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
446 		mpt->raid_rescan++;
447 		if (mpt_disk != NULL) {
448 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
449 		}
450 		break;
451 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
452 		mpt->raid_rescan++;
453 		break;
454 	case MPI_EVENT_RAID_RC_SMART_DATA:
455 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
456 		break;
457 	}
458 
459 	if (print_event) {
460 		if (mpt_disk != NULL) {
461 			mpt_disk_prt(mpt, mpt_disk, "%s", "");
462 		} else if (mpt_vol != NULL) {
463 			mpt_vol_prt(mpt, mpt_vol, "%s", "");
464 		} else {
465 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
466 				raid_event->VolumeID);
467 
468 			if (raid_event->PhysDiskNum != 0xFF)
469 				mpt_prtc(mpt, ":%d): ",
470 					 raid_event->PhysDiskNum);
471 			else
472 				mpt_prtc(mpt, "): ");
473 		}
474 
475 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
476 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
477 				 raid_event->ReasonCode);
478 		else
479 			mpt_prtc(mpt, "%s\n",
480 				 raid_event_txt[raid_event->ReasonCode]);
481 	}
482 
483 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
484 		/* XXX Use CAM's print sense for this... */
485 		if (mpt_disk != NULL)
486 			mpt_disk_prt(mpt, mpt_disk, "%s", "");
487 		else
488 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
489 			    raid_event->VolumeBus, raid_event->VolumeID,
490 			    raid_event->PhysDiskNum);
491 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
492 			 raid_event->ASC, raid_event->ASCQ);
493 	}
494 
495 	mpt_raid_wakeup(mpt);
496 	return (1);
497 }
498 
499 static void
500 mpt_raid_shutdown(struct mpt_softc *mpt)
501 {
502 	struct mpt_raid_volume *mpt_vol;
503 
504 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
505 		return;
506 	}
507 
508 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
509 	RAID_VOL_FOREACH(mpt, mpt_vol) {
510 		mpt_verify_mwce(mpt, mpt_vol);
511 	}
512 }
513 
514 static int
515 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
516     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
517 {
518 	int free_req;
519 
520 	if (req == NULL)
521 		return (TRUE);
522 
523 	free_req = TRUE;
524 	if (reply_frame != NULL)
525 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
526 #ifdef NOTYET
527 	else if (req->ccb != NULL) {
528 		/* Complete Quiesce CCB with error... */
529 	}
530 #endif
531 
532 	req->state &= ~REQ_STATE_QUEUED;
533 	req->state |= REQ_STATE_DONE;
534 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
535 
536 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
537 		wakeup(req);
538 	} else if (free_req) {
539 		mpt_free_request(mpt, req);
540 	}
541 
542 	return (TRUE);
543 }
544 
545 /*
546  * Parse additional completion information in the reply
547  * frame for RAID I/O requests.
548  */
549 static int
550 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
551     MSG_DEFAULT_REPLY *reply_frame)
552 {
553 	MSG_RAID_ACTION_REPLY *reply;
554 	struct mpt_raid_action_result *action_result;
555 	MSG_RAID_ACTION_REQUEST *rap;
556 
557 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
558 	req->IOCStatus = le16toh(reply->IOCStatus);
559 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
560 
561 	switch (rap->Action) {
562 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
563 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
564 		break;
565 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
566 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
567 		break;
568 	default:
569 		break;
570 	}
571 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
572 	memcpy(&action_result->action_data, &reply->ActionData,
573 	    sizeof(action_result->action_data));
574 	action_result->action_status = le16toh(reply->ActionStatus);
575 	return (TRUE);
576 }
577 
578 /*
579  * Utiltity routine to perform a RAID action command;
580  */
581 static int
582 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
583 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
584 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
585 		   int write, int wait)
586 {
587 	MSG_RAID_ACTION_REQUEST *rap;
588 	SGE_SIMPLE32 *se;
589 
590 	rap = req->req_vbuf;
591 	memset(rap, 0, sizeof *rap);
592 	rap->Action = Action;
593 	rap->ActionDataWord = htole32(ActionDataWord);
594 	rap->Function = MPI_FUNCTION_RAID_ACTION;
595 	rap->VolumeID = vol->config_page->VolumeID;
596 	rap->VolumeBus = vol->config_page->VolumeBus;
597 	if (disk != NULL)
598 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
599 	else
600 		rap->PhysDiskNum = 0xFF;
601 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
602 	se->Address = htole32(addr);
603 	MPI_pSGE_SET_LENGTH(se, len);
604 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
605 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
606 	    MPI_SGE_FLAGS_END_OF_LIST |
607 	    (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
608 	se->FlagsLength = htole32(se->FlagsLength);
609 	rap->MsgContext = htole32(req->index | raid_handler_id);
610 
611 	mpt_check_doorbell(mpt);
612 	mpt_send_cmd(mpt, req);
613 
614 	if (wait) {
615 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
616 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
617 	} else {
618 		return (0);
619 	}
620 }
621 
622 /*************************** RAID Status Monitoring ***************************/
623 static int
624 mpt_spawn_raid_thread(struct mpt_softc *mpt)
625 {
626 	int error;
627 
628 	/*
629 	 * Freeze out any CAM transactions until our thread
630 	 * is able to run at least once.  We need to update
631 	 * our RAID pages before acception I/O or we may
632 	 * reject I/O to an ID we later determine is for a
633 	 * hidden physdisk.
634 	 */
635 	MPT_LOCK(mpt);
636 	xpt_freeze_simq(mpt->phydisk_sim, 1);
637 	MPT_UNLOCK(mpt);
638 	error = kthread_create(mpt_raid_thread, mpt,
639 	    &mpt->raid_thread, "mpt_raid%d", mpt->unit);
640 	if (error != 0) {
641 		MPT_LOCK(mpt);
642 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
643 		MPT_UNLOCK(mpt);
644 	}
645 	return (error);
646 }
647 
648 static void
649 mpt_terminate_raid_thread(struct mpt_softc *mpt)
650 {
651 
652 	if (mpt->raid_thread == NULL) {
653 		return;
654 	}
655 	mpt->shutdwn_raid = 1;
656 	wakeup(&mpt->raid_volumes);
657 	/*
658 	 * Sleep on a slightly different location
659 	 * for this interlock just for added safety.
660 	 */
661 	mpt_sleep(mpt, &mpt->raid_thread, 0, "thtrm", 0);
662 }
663 
664 static void
665 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
666 {
667     xpt_free_path(ccb->ccb_h.path);
668     kfree(ccb, M_TEMP);
669 }
670 
671 static void
672 mpt_raid_thread(void *arg)
673 {
674 	struct mpt_softc *mpt;
675 	int firstrun;
676 
677 	mpt = (struct mpt_softc *)arg;
678 	firstrun = 1;
679 	MPT_LOCK(mpt);
680 	while (mpt->shutdwn_raid == 0) {
681 
682 		if (mpt->raid_wakeup == 0) {
683 			mpt_sleep(mpt, &mpt->raid_volumes, 0, "idle", 0);
684 			continue;
685 		}
686 
687 		mpt->raid_wakeup = 0;
688 
689 		if (mpt_refresh_raid_data(mpt)) {
690 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
691 			continue;
692 		}
693 
694 		/*
695 		 * Now that we have our first snapshot of RAID data,
696 		 * allow CAM to access our physical disk bus.
697 		 */
698 		if (firstrun) {
699 			firstrun = 0;
700 			xpt_release_simq(mpt->phydisk_sim, TRUE);
701 		}
702 
703 		if (mpt->raid_rescan != 0) {
704 			union ccb *ccb;
705 			int error;
706 
707 			mpt->raid_rescan = 0;
708 			MPT_UNLOCK(mpt);
709 
710 			ccb = kmalloc(sizeof(union ccb), M_TEMP,
711 			    M_WAITOK | M_ZERO);
712 
713 			MPT_LOCK(mpt);
714 			error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
715 			    cam_sim_path(mpt->phydisk_sim),
716 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
717 			if (error != CAM_REQ_CMP) {
718 				kfree(ccb, M_TEMP);
719 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
720 			} else {
721 				xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
722 				    5/*priority (low)*/);
723 				ccb->ccb_h.func_code = XPT_SCAN_BUS;
724 				ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
725 				ccb->crcn.flags = CAM_FLAG_NONE;
726 				xpt_action(ccb);
727 
728 				/* scan is now in progress */
729 			}
730 		}
731 	}
732 	mpt->raid_thread = NULL;
733 	wakeup(&mpt->raid_thread);
734 	MPT_UNLOCK(mpt);
735 	kthread_exit();
736 }
737 
738 #if 0
739 static void
740 mpt_raid_quiesce_timeout(void *arg)
741 {
742 
743 	/* Complete the CCB with error */
744 	/* COWWWW */
745 }
746 
747 static timeout_t mpt_raid_quiesce_timeout;
748 cam_status
749 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
750 		      request_t *req)
751 {
752 	union ccb *ccb;
753 
754 	ccb = req->ccb;
755 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
756 		return (CAM_REQ_CMP);
757 
758 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
759 		int rv;
760 
761 		mpt_disk->flags |= MPT_RDF_QUIESCING;
762 		xpt_freeze_devq(ccb->ccb_h.path, 1);
763 
764 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
765 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
766 					/*ActionData*/0, /*addr*/0,
767 					/*len*/0, /*write*/FALSE,
768 					/*wait*/FALSE);
769 		if (rv != 0)
770 			return (CAM_REQ_CMP_ERR);
771 
772 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
773 #if 0
774 		if (rv == ETIMEDOUT) {
775 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
776 				     "Quiece Timed-out\n");
777 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
778 			return (CAM_REQ_CMP_ERR);
779 		}
780 
781 		ar = REQ_TO_RAID_ACTION_RESULT(req);
782 		if (rv != 0
783 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
784 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
785 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
786 				    "%d:%x:%x\n", rv, req->IOCStatus,
787 				    ar->action_status);
788 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
789 			return (CAM_REQ_CMP_ERR);
790 		}
791 #endif
792 		return (CAM_REQ_INPROG);
793 	}
794 	return (CAM_REQUEUE_REQ);
795 }
796 #endif
797 
798 /* XXX Ignores that there may be multiple busses/IOCs involved. */
799 cam_status
800 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
801 {
802 	struct mpt_raid_disk *mpt_disk;
803 
804 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
805 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
806 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
807 		*tgt = mpt_disk->config_page.PhysDiskID;
808 		return (0);
809 	}
810 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
811 		 ccb->ccb_h.target_id);
812 	return (-1);
813 }
814 
815 /* XXX Ignores that there may be multiple busses/IOCs involved. */
816 int
817 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
818 {
819 	struct mpt_raid_disk *mpt_disk;
820 	int i;
821 
822 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
823 		return (0);
824 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
825 		mpt_disk = &mpt->raid_disks[i];
826 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
827 		    mpt_disk->config_page.PhysDiskID == tgt)
828 			return (1);
829 	}
830 	return (0);
831 
832 }
833 
834 /* XXX Ignores that there may be multiple busses/IOCs involved. */
835 int
836 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
837 {
838 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
839 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
840 
841 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
842 		return (0);
843 	}
844 	ioc_vol = mpt->ioc_page2->RaidVolume;
845 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
846 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
847 		if (ioc_vol->VolumeID == tgt) {
848 			return (1);
849 		}
850 	}
851 	return (0);
852 }
853 
854 #if 0
855 static void
856 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
857 	       int enable)
858 {
859 	request_t *req;
860 	struct mpt_raid_action_result *ar;
861 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
862 	int enabled;
863 	int rv;
864 
865 	vol_pg = mpt_vol->config_page;
866 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
867 
868 	/*
869 	 * If the setting matches the configuration,
870 	 * there is nothing to do.
871 	 */
872 	if ((enabled && enable)
873 	 || (!enabled && !enable))
874 		return;
875 
876 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
877 	if (req == NULL) {
878 		mpt_vol_prt(mpt, mpt_vol,
879 			    "mpt_enable_vol: Get request failed!\n");
880 		return;
881 	}
882 
883 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
884 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
885 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
886 				/*data*/0, /*addr*/0, /*len*/0,
887 				/*write*/FALSE, /*wait*/TRUE);
888 	if (rv == ETIMEDOUT) {
889 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
890 			    "%s Volume Timed-out\n",
891 			    enable ? "Enable" : "Disable");
892 		return;
893 	}
894 	ar = REQ_TO_RAID_ACTION_RESULT(req);
895 	if (rv != 0
896 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
897 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
898 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
899 			    enable ? "Enable" : "Disable",
900 			    rv, req->IOCStatus, ar->action_status);
901 	}
902 
903 	mpt_free_request(mpt, req);
904 }
905 #endif
906 
907 static void
908 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
909 {
910 	request_t *req;
911 	struct mpt_raid_action_result *ar;
912 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
913 	uint32_t data;
914 	int rv;
915 	int resyncing;
916 	int mwce;
917 
918 	vol_pg = mpt_vol->config_page;
919 	resyncing = vol_pg->VolumeStatus.Flags
920 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
921 	mwce = vol_pg->VolumeSettings.Settings
922 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
923 
924 	/*
925 	 * If the setting matches the configuration,
926 	 * there is nothing to do.
927 	 */
928 	switch (mpt->raid_mwce_setting) {
929 	case MPT_RAID_MWCE_REBUILD_ONLY:
930 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
931 			return;
932 		}
933 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
934 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
935 			/*
936 			 * Wait one more status update to see if
937 			 * resyncing gets enabled.  It gets disabled
938 			 * temporarilly when WCE is changed.
939 			 */
940 			return;
941 		}
942 		break;
943 	case MPT_RAID_MWCE_ON:
944 		if (mwce)
945 			return;
946 		break;
947 	case MPT_RAID_MWCE_OFF:
948 		if (!mwce)
949 			return;
950 		break;
951 	case MPT_RAID_MWCE_NC:
952 		return;
953 	}
954 
955 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
956 	if (req == NULL) {
957 		mpt_vol_prt(mpt, mpt_vol,
958 			    "mpt_verify_mwce: Get request failed!\n");
959 		return;
960 	}
961 
962 	vol_pg->VolumeSettings.Settings ^=
963 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
964 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
965 	vol_pg->VolumeSettings.Settings ^=
966 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
967 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
968 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
969 				data, /*addr*/0, /*len*/0,
970 				/*write*/FALSE, /*wait*/TRUE);
971 	if (rv == ETIMEDOUT) {
972 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
973 			    "Write Cache Enable Timed-out\n");
974 		return;
975 	}
976 	ar = REQ_TO_RAID_ACTION_RESULT(req);
977 	if (rv != 0
978 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
979 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
980 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
981 			    "%d:%x:%x\n", rv, req->IOCStatus,
982 			    ar->action_status);
983 	} else {
984 		vol_pg->VolumeSettings.Settings ^=
985 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
986 	}
987 	mpt_free_request(mpt, req);
988 }
989 
990 static void
991 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
992 {
993 	request_t *req;
994 	struct mpt_raid_action_result *ar;
995 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
996 	u_int prio;
997 	int rv;
998 
999 	vol_pg = mpt_vol->config_page;
1000 
1001 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
1002 		return;
1003 
1004 	/*
1005 	 * If the current RAID resync rate does not
1006 	 * match our configured rate, update it.
1007 	 */
1008 	prio = vol_pg->VolumeSettings.Settings
1009 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1010 	if (vol_pg->ResyncRate != 0
1011 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1012 
1013 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1014 		if (req == NULL) {
1015 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1016 				    "Get request failed!\n");
1017 			return;
1018 		}
1019 
1020 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1021 					MPI_RAID_ACTION_SET_RESYNC_RATE,
1022 					mpt->raid_resync_rate, /*addr*/0,
1023 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
1024 		if (rv == ETIMEDOUT) {
1025 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1026 				    "Resync Rate Setting Timed-out\n");
1027 			return;
1028 		}
1029 
1030 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1031 		if (rv != 0
1032 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1033 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1034 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1035 				    "%d:%x:%x\n", rv, req->IOCStatus,
1036 				    ar->action_status);
1037 		} else
1038 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1039 		mpt_free_request(mpt, req);
1040 	} else if ((prio && mpt->raid_resync_rate < 128)
1041 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1042 		uint32_t data;
1043 
1044 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1045 		if (req == NULL) {
1046 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1047 				    "Get request failed!\n");
1048 			return;
1049 		}
1050 
1051 		vol_pg->VolumeSettings.Settings ^=
1052 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1053 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1054 		vol_pg->VolumeSettings.Settings ^=
1055 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1056 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1057 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1058 					data, /*addr*/0, /*len*/0,
1059 					/*write*/FALSE, /*wait*/TRUE);
1060 		if (rv == ETIMEDOUT) {
1061 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1062 				    "Resync Rate Setting Timed-out\n");
1063 			return;
1064 		}
1065 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1066 		if (rv != 0
1067 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1068 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1069 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1070 				    "%d:%x:%x\n", rv, req->IOCStatus,
1071 				    ar->action_status);
1072 		} else {
1073 			vol_pg->VolumeSettings.Settings ^=
1074 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1075 		}
1076 
1077 		mpt_free_request(mpt, req);
1078 	}
1079 }
1080 
1081 static void
1082 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1083 		       struct cam_path *path)
1084 {
1085 	struct ccb_relsim crs;
1086 
1087 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1088 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1089 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1090 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1091 	crs.openings = mpt->raid_queue_depth;
1092 	xpt_action((union ccb *)&crs);
1093 	if (crs.ccb_h.status != CAM_REQ_CMP)
1094 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1095 			    "with CAM status %#x\n", crs.ccb_h.status);
1096 }
1097 
1098 static void
1099 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1100 {
1101 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1102 	u_int i;
1103 
1104 	vol_pg = mpt_vol->config_page;
1105 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1106 	for (i = 1; i <= 0x8000; i <<= 1) {
1107 		switch (vol_pg->VolumeSettings.Settings & i) {
1108 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1109 			mpt_prtc(mpt, " Member-WCE");
1110 			break;
1111 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1112 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1113 			break;
1114 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1115 			mpt_prtc(mpt, " Hot-Plug-Spares");
1116 			break;
1117 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1118 			mpt_prtc(mpt, " High-Priority-ReSync");
1119 			break;
1120 		default:
1121 			break;
1122 		}
1123 	}
1124 	mpt_prtc(mpt, " )\n");
1125 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1126 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1127 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1128 			  ? ":" : "s:");
1129 		for (i = 0; i < 8; i++) {
1130 			u_int mask;
1131 
1132 			mask = 0x1 << i;
1133 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1134 				continue;
1135 			mpt_prtc(mpt, " %d", i);
1136 		}
1137 		mpt_prtc(mpt, "\n");
1138 	}
1139 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1140 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1141 		struct mpt_raid_disk *mpt_disk;
1142 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1143 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1144 		U8 f, s;
1145 
1146 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1147 		disk_pg = &mpt_disk->config_page;
1148 		mpt_prtc(mpt, "      ");
1149 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1150 			 pt_bus, disk_pg->PhysDiskID);
1151 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1152 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1153 			    "Primary" : "Secondary");
1154 		} else {
1155 			mpt_prtc(mpt, "Stripe Position %d",
1156 				 mpt_disk->member_number);
1157 		}
1158 		f = disk_pg->PhysDiskStatus.Flags;
1159 		s = disk_pg->PhysDiskStatus.State;
1160 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1161 			mpt_prtc(mpt, " Out of Sync");
1162 		}
1163 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1164 			mpt_prtc(mpt, " Quiesced");
1165 		}
1166 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1167 			mpt_prtc(mpt, " Inactive");
1168 		}
1169 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1170 			mpt_prtc(mpt, " Was Optimal");
1171 		}
1172 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1173 			mpt_prtc(mpt, " Was Non-Optimal");
1174 		}
1175 		switch (s) {
1176 		case MPI_PHYSDISK0_STATUS_ONLINE:
1177 			mpt_prtc(mpt, " Online");
1178 			break;
1179 		case MPI_PHYSDISK0_STATUS_MISSING:
1180 			mpt_prtc(mpt, " Missing");
1181 			break;
1182 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1183 			mpt_prtc(mpt, " Incompatible");
1184 			break;
1185 		case MPI_PHYSDISK0_STATUS_FAILED:
1186 			mpt_prtc(mpt, " Failed");
1187 			break;
1188 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1189 			mpt_prtc(mpt, " Initializing");
1190 			break;
1191 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1192 			mpt_prtc(mpt, " Requested Offline");
1193 			break;
1194 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1195 			mpt_prtc(mpt, " Requested Failed");
1196 			break;
1197 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1198 		default:
1199 			mpt_prtc(mpt, " Offline Other (%x)", s);
1200 			break;
1201 		}
1202 		mpt_prtc(mpt, "\n");
1203 	}
1204 }
1205 
1206 static void
1207 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1208 {
1209 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1210 	int rd_bus = cam_sim_bus(mpt->sim);
1211 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1212 	u_int i;
1213 
1214 	disk_pg = &mpt_disk->config_page;
1215 	mpt_disk_prt(mpt, mpt_disk,
1216 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1217 		     device_get_nameunit(mpt->dev), rd_bus,
1218 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1219 		     pt_bus, (int)(mpt_disk - mpt->raid_disks));
1220 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1221 		return;
1222 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1223 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1224 		   ? ":" : "s:");
1225 	for (i = 0; i < 8; i++) {
1226 		u_int mask;
1227 
1228 		mask = 0x1 << i;
1229 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1230 			continue;
1231 		mpt_prtc(mpt, " %d", i);
1232 	}
1233 	mpt_prtc(mpt, "\n");
1234 }
1235 
1236 static void
1237 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1238 		      IOC_3_PHYS_DISK *ioc_disk)
1239 {
1240 	int rv;
1241 
1242 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1243 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1244 				 &mpt_disk->config_page.Header,
1245 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1246 	if (rv != 0) {
1247 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1248 			"Failed to read RAID Disk Hdr(%d)\n",
1249 			ioc_disk->PhysDiskNum);
1250 		return;
1251 	}
1252 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1253 				   &mpt_disk->config_page.Header,
1254 				   sizeof(mpt_disk->config_page),
1255 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1256 	if (rv != 0)
1257 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1258 			"Failed to read RAID Disk Page(%d)\n",
1259 			ioc_disk->PhysDiskNum);
1260 	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1261 }
1262 
1263 static void
1264 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1265     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1266 {
1267 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1268 	struct mpt_raid_action_result *ar;
1269 	request_t *req;
1270 	int rv;
1271 	int i;
1272 
1273 	vol_pg = mpt_vol->config_page;
1274 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1275 
1276 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1277 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1278 	if (rv != 0) {
1279 		mpt_vol_prt(mpt, mpt_vol,
1280 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1281 		    ioc_vol->VolumePageNumber);
1282 		return;
1283 	}
1284 
1285 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1286 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1287 	if (rv != 0) {
1288 		mpt_vol_prt(mpt, mpt_vol,
1289 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1290 		    ioc_vol->VolumePageNumber);
1291 		return;
1292 	}
1293 	mpt2host_config_page_raid_vol_0(vol_pg);
1294 
1295 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1296 
1297 	/* Update disk entry array data. */
1298 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1299 		struct mpt_raid_disk *mpt_disk;
1300 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1301 		mpt_disk->volume = mpt_vol;
1302 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1303 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1304 			mpt_disk->member_number--;
1305 		}
1306 	}
1307 
1308 	if ((vol_pg->VolumeStatus.Flags
1309 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1310 		return;
1311 
1312 	req = mpt_get_request(mpt, TRUE);
1313 	if (req == NULL) {
1314 		mpt_vol_prt(mpt, mpt_vol,
1315 		    "mpt_refresh_raid_vol: Get request failed!\n");
1316 		return;
1317 	}
1318 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1319 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1320 	if (rv == ETIMEDOUT) {
1321 		mpt_vol_prt(mpt, mpt_vol,
1322 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1323 		mpt_free_request(mpt, req);
1324 		return;
1325 	}
1326 
1327 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1328 	if (rv == 0
1329 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1330 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1331 		memcpy(&mpt_vol->sync_progress,
1332 		       &ar->action_data.indicator_struct,
1333 		       sizeof(mpt_vol->sync_progress));
1334 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1335 	} else {
1336 		mpt_vol_prt(mpt, mpt_vol,
1337 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1338 	}
1339 	mpt_free_request(mpt, req);
1340 }
1341 
1342 /*
1343  * Update in-core information about RAID support.  We update any entries
1344  * that didn't previously exists or have been marked as needing to
1345  * be updated by our event handler.  Interesting changes are displayed
1346  * to the console.
1347  */
1348 static int
1349 mpt_refresh_raid_data(struct mpt_softc *mpt)
1350 {
1351 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1352 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1353 	IOC_3_PHYS_DISK *ioc_disk;
1354 	IOC_3_PHYS_DISK *ioc_last_disk;
1355 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1356 	size_t len;
1357 	int rv;
1358 	int i;
1359 	u_int nonopt_volumes;
1360 
1361 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1362 		return (0);
1363 	}
1364 
1365 	/*
1366 	 * Mark all items as unreferenced by the configuration.
1367 	 * This allows us to find, report, and discard stale
1368 	 * entries.
1369 	 */
1370 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1371 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1372 	}
1373 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1374 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1375 	}
1376 
1377 	/*
1378 	 * Get Physical Disk information.
1379 	 */
1380 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1381 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1382 				   &mpt->ioc_page3->Header, len,
1383 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1384 	if (rv) {
1385 		mpt_prt(mpt,
1386 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1387 		return (-1);
1388 	}
1389 	mpt2host_config_page_ioc3(mpt->ioc_page3);
1390 
1391 	ioc_disk = mpt->ioc_page3->PhysDisk;
1392 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1393 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1394 		struct mpt_raid_disk *mpt_disk;
1395 
1396 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1397 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1398 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1399 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1400 
1401 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1402 
1403 		}
1404 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1405 		mpt->raid_rescan++;
1406 	}
1407 
1408 	/*
1409 	 * Refresh volume data.
1410 	 */
1411 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1412 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1413 				   &mpt->ioc_page2->Header, len,
1414 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1415 	if (rv) {
1416 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1417 			"Failed to read IOC Page 2\n");
1418 		return (-1);
1419 	}
1420 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1421 
1422 	ioc_vol = mpt->ioc_page2->RaidVolume;
1423 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1424 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1425 		struct mpt_raid_volume *mpt_vol;
1426 
1427 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1428 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1429 		vol_pg = mpt_vol->config_page;
1430 		if (vol_pg == NULL)
1431 			continue;
1432 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1433 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1434 		 || (vol_pg->VolumeStatus.Flags
1435 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1436 
1437 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1438 		}
1439 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1440 	}
1441 
1442 	nonopt_volumes = 0;
1443 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1444 		struct mpt_raid_volume *mpt_vol;
1445 		uint64_t total;
1446 		uint64_t left;
1447 		int m;
1448 		u_int prio;
1449 
1450 		mpt_vol = &mpt->raid_volumes[i];
1451 
1452 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1453 			continue;
1454 		}
1455 
1456 		vol_pg = mpt_vol->config_page;
1457 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1458 		 == MPT_RVF_ANNOUNCED) {
1459 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1460 			mpt_vol->flags = 0;
1461 			continue;
1462 		}
1463 
1464 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1465 			mpt_announce_vol(mpt, mpt_vol);
1466 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1467 		}
1468 
1469 		if (vol_pg->VolumeStatus.State !=
1470 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1471 			nonopt_volumes++;
1472 
1473 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1474 			continue;
1475 
1476 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1477 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1478 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1479 		mpt_verify_mwce(mpt, mpt_vol);
1480 
1481 		if (vol_pg->VolumeStatus.Flags == 0) {
1482 			continue;
1483 		}
1484 
1485 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1486 		for (m = 1; m <= 0x80; m <<= 1) {
1487 			switch (vol_pg->VolumeStatus.Flags & m) {
1488 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1489 				mpt_prtc(mpt, " Enabled");
1490 				break;
1491 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1492 				mpt_prtc(mpt, " Quiesced");
1493 				break;
1494 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1495 				mpt_prtc(mpt, " Re-Syncing");
1496 				break;
1497 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1498 				mpt_prtc(mpt, " Inactive");
1499 				break;
1500 			default:
1501 				break;
1502 			}
1503 		}
1504 		mpt_prtc(mpt, " )\n");
1505 
1506 		if ((vol_pg->VolumeStatus.Flags
1507 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1508 			continue;
1509 
1510 		mpt_verify_resync_rate(mpt, mpt_vol);
1511 
1512 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1513 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1514 		if (vol_pg->ResyncRate != 0) {
1515 
1516 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1517 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1518 			    prio / 1000, prio % 1000);
1519 		} else {
1520 			prio = vol_pg->VolumeSettings.Settings
1521 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1522 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1523 			    prio ? "High" : "Low");
1524 		}
1525 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1526 			    "blocks remaining\n", (uintmax_t)left,
1527 			    (uintmax_t)total);
1528 
1529 		/* Periodically report on sync progress. */
1530 		mpt_schedule_raid_refresh(mpt);
1531 	}
1532 
1533 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1534 		struct mpt_raid_disk *mpt_disk;
1535 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1536 		int m;
1537 
1538 		mpt_disk = &mpt->raid_disks[i];
1539 		disk_pg = &mpt_disk->config_page;
1540 
1541 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1542 			continue;
1543 
1544 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1545 		 == MPT_RDF_ANNOUNCED) {
1546 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1547 			mpt_disk->flags = 0;
1548 			mpt->raid_rescan++;
1549 			continue;
1550 		}
1551 
1552 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1553 
1554 			mpt_announce_disk(mpt, mpt_disk);
1555 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1556 		}
1557 
1558 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1559 			continue;
1560 
1561 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1562 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1563 		if (disk_pg->PhysDiskStatus.Flags == 0)
1564 			continue;
1565 
1566 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1567 		for (m = 1; m <= 0x80; m <<= 1) {
1568 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1569 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1570 				mpt_prtc(mpt, " Out-Of-Sync");
1571 				break;
1572 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1573 				mpt_prtc(mpt, " Quiesced");
1574 				break;
1575 			default:
1576 				break;
1577 			}
1578 		}
1579 		mpt_prtc(mpt, " )\n");
1580 	}
1581 
1582 	mpt->raid_nonopt_volumes = nonopt_volumes;
1583 	return (0);
1584 }
1585 
1586 static void
1587 mpt_raid_timer(void *arg)
1588 {
1589 	struct mpt_softc *mpt;
1590 
1591 	mpt = (struct mpt_softc *)arg;
1592 	MPT_LOCK(mpt);
1593 	mpt_raid_wakeup(mpt);
1594 	MPT_UNLOCK(mpt);
1595 }
1596 
1597 static void
1598 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1599 {
1600 
1601 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1602 		      mpt_raid_timer, mpt);
1603 }
1604 
1605 void
1606 mpt_raid_free_mem(struct mpt_softc *mpt)
1607 {
1608 
1609 	if (mpt->raid_volumes) {
1610 		struct mpt_raid_volume *mpt_raid;
1611 		int i;
1612 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1613 			mpt_raid = &mpt->raid_volumes[i];
1614 			if (mpt_raid->config_page) {
1615 				kfree(mpt_raid->config_page, M_DEVBUF);
1616 				mpt_raid->config_page = NULL;
1617 			}
1618 		}
1619 		kfree(mpt->raid_volumes, M_DEVBUF);
1620 		mpt->raid_volumes = NULL;
1621 	}
1622 	if (mpt->raid_disks) {
1623 		kfree(mpt->raid_disks, M_DEVBUF);
1624 		mpt->raid_disks = NULL;
1625 	}
1626 	if (mpt->ioc_page2) {
1627 		kfree(mpt->ioc_page2, M_DEVBUF);
1628 		mpt->ioc_page2 = NULL;
1629 	}
1630 	if (mpt->ioc_page3) {
1631 		kfree(mpt->ioc_page3, M_DEVBUF);
1632 		mpt->ioc_page3 = NULL;
1633 	}
1634 	mpt->raid_max_volumes =  0;
1635 	mpt->raid_max_disks =  0;
1636 }
1637 
1638 static int
1639 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1640 {
1641 	struct mpt_raid_volume *mpt_vol;
1642 
1643 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1644 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1645 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1646 		return (EINVAL);
1647 
1648 	MPT_LOCK(mpt);
1649 	mpt->raid_resync_rate = rate;
1650 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1651 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1652 			continue;
1653 		}
1654 		mpt_verify_resync_rate(mpt, mpt_vol);
1655 	}
1656 	MPT_UNLOCK(mpt);
1657 	return (0);
1658 }
1659 
1660 static int
1661 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1662 {
1663 	struct mpt_raid_volume *mpt_vol;
1664 
1665 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1666 		return (EINVAL);
1667 
1668 	MPT_LOCK(mpt);
1669 	mpt->raid_queue_depth = vol_queue_depth;
1670 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1671 		struct cam_path *path;
1672 		int error;
1673 
1674 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1675 			continue;
1676 
1677 		mpt->raid_rescan = 0;
1678 
1679 		error = xpt_create_path(&path, xpt_periph,
1680 					cam_sim_path(mpt->sim),
1681 					mpt_vol->config_page->VolumeID,
1682 					/*lun*/0);
1683 		if (error != CAM_REQ_CMP) {
1684 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1685 			continue;
1686 		}
1687 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1688 		xpt_free_path(path);
1689 	}
1690 	MPT_UNLOCK(mpt);
1691 	return (0);
1692 }
1693 
1694 static int
1695 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1696 {
1697 	struct mpt_raid_volume *mpt_vol;
1698 	int force_full_resync;
1699 
1700 	MPT_LOCK(mpt);
1701 	if (mwce == mpt->raid_mwce_setting) {
1702 		MPT_UNLOCK(mpt);
1703 		return (0);
1704 	}
1705 
1706 	/*
1707 	 * Catch MWCE being left on due to a failed shutdown.  Since
1708 	 * sysctls cannot be set by the loader, we treat the first
1709 	 * setting of this varible specially and force a full volume
1710 	 * resync if MWCE is enabled and a resync is in progress.
1711 	 */
1712 	force_full_resync = 0;
1713 	if (mpt->raid_mwce_set == 0
1714 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1715 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1716 		force_full_resync = 1;
1717 
1718 	mpt->raid_mwce_setting = mwce;
1719 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1720 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1721 		int resyncing;
1722 		int mwce;
1723 
1724 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1725 			continue;
1726 
1727 		vol_pg = mpt_vol->config_page;
1728 		resyncing = vol_pg->VolumeStatus.Flags
1729 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1730 		mwce = vol_pg->VolumeSettings.Settings
1731 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1732 		if (force_full_resync && resyncing && mwce) {
1733 
1734 			/*
1735 			 * XXX disable/enable volume should force a resync,
1736 			 *     but we'll need to queice, drain, and restart
1737 			 *     I/O to do that.
1738 			 */
1739 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1740 				    "detected.  Suggest full resync.\n");
1741 		}
1742 		mpt_verify_mwce(mpt, mpt_vol);
1743 	}
1744 	mpt->raid_mwce_set = 1;
1745 	MPT_UNLOCK(mpt);
1746 	return (0);
1747 }
1748 
1749 static const char *mpt_vol_mwce_strs[] =
1750 {
1751 	"On",
1752 	"Off",
1753 	"On-During-Rebuild",
1754 	"NC"
1755 };
1756 
1757 static int
1758 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1759 {
1760 	char inbuf[20];
1761 	struct mpt_softc *mpt;
1762 	const char *str;
1763 	int error;
1764 	u_int size;
1765 	u_int i;
1766 
1767 	mpt = (struct mpt_softc *)arg1;
1768 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1769 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1770 	if (error || !req->newptr) {
1771 		return (error);
1772 	}
1773 
1774 	size = req->newlen - req->newidx;
1775 	if (size >= sizeof(inbuf)) {
1776 		return (EINVAL);
1777 	}
1778 
1779 	error = SYSCTL_IN(req, inbuf, size);
1780 	if (error) {
1781 		return (error);
1782 	}
1783 	inbuf[size] = '\0';
1784 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1785 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1786 			return (mpt_raid_set_vol_mwce(mpt, i));
1787 		}
1788 	}
1789 	return (EINVAL);
1790 }
1791 
1792 static int
1793 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1794 {
1795 	struct mpt_softc *mpt;
1796 	u_int raid_resync_rate;
1797 	int error;
1798 
1799 	mpt = (struct mpt_softc *)arg1;
1800 	raid_resync_rate = mpt->raid_resync_rate;
1801 
1802 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1803 	if (error || !req->newptr) {
1804 		return error;
1805 	}
1806 
1807 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1808 }
1809 
1810 static int
1811 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1812 {
1813 	struct mpt_softc *mpt;
1814 	u_int raid_queue_depth;
1815 	int error;
1816 
1817 	mpt = (struct mpt_softc *)arg1;
1818 	raid_queue_depth = mpt->raid_queue_depth;
1819 
1820 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1821 	if (error || !req->newptr) {
1822 		return error;
1823 	}
1824 
1825 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1826 }
1827 
1828 static void
1829 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1830 {
1831 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1832 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1833 
1834 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1835 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1836 			mpt_raid_sysctl_vol_member_wce, "A",
1837 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1838 
1839 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1840 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1841 			mpt_raid_sysctl_vol_queue_depth, "I",
1842 			"default volume queue depth");
1843 
1844 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1845 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1846 			mpt_raid_sysctl_vol_resync_rate, "I",
1847 			"volume resync priority (0 == NC, 1 - 255)");
1848 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1849 			"nonoptimal_volumes", CTLFLAG_RD,
1850 			&mpt->raid_nonopt_volumes, 0,
1851 			"number of nonoptimal volumes");
1852 }
1853