xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision aa0a1e58)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48 
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51 
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_sim.h>
56 
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define	GIANT_REQUIRED
60 #endif
61 #include <cam/cam_periph.h>
62 
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
66 
67 #include <machine/stdarg.h>
68 
69 struct mpt_raid_action_result
70 {
71 	union {
72 		MPI_RAID_VOL_INDICATOR	indicator_struct;
73 		uint32_t		new_settings;
74 		uint8_t			phys_disk_num;
75 	} action_data;
76 	uint16_t			action_status;
77 };
78 
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
81 
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
83 
84 
85 static mpt_probe_handler_t	mpt_raid_probe;
86 static mpt_attach_handler_t	mpt_raid_attach;
87 static mpt_enable_handler_t	mpt_raid_enable;
88 static mpt_event_handler_t	mpt_raid_event;
89 static mpt_shutdown_handler_t	mpt_raid_shutdown;
90 static mpt_reset_handler_t	mpt_raid_ioc_reset;
91 static mpt_detach_handler_t	mpt_raid_detach;
92 
93 static struct mpt_personality mpt_raid_personality =
94 {
95 	.name		= "mpt_raid",
96 	.probe		= mpt_raid_probe,
97 	.attach		= mpt_raid_attach,
98 	.enable		= mpt_raid_enable,
99 	.event		= mpt_raid_event,
100 	.reset		= mpt_raid_ioc_reset,
101 	.shutdown	= mpt_raid_shutdown,
102 	.detach		= mpt_raid_detach,
103 };
104 
105 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
106 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
107 
108 static mpt_reply_handler_t mpt_raid_reply_handler;
109 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
110 					MSG_DEFAULT_REPLY *reply_frame);
111 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
112 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
113 static void mpt_raid_thread(void *arg);
114 static timeout_t mpt_raid_timer;
115 #if 0
116 static void mpt_enable_vol(struct mpt_softc *mpt,
117 			   struct mpt_raid_volume *mpt_vol, int enable);
118 #endif
119 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
120 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
121     struct cam_path *);
122 #if __FreeBSD_version < 500000
123 #define	mpt_raid_sysctl_attach(x)	do { } while (0)
124 #else
125 static void mpt_raid_sysctl_attach(struct mpt_softc *);
126 #endif
127 
128 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
129 
130 const char *
131 mpt_vol_type(struct mpt_raid_volume *vol)
132 {
133 	switch (vol->config_page->VolumeType) {
134 	case MPI_RAID_VOL_TYPE_IS:
135 		return ("RAID-0");
136 	case MPI_RAID_VOL_TYPE_IME:
137 		return ("RAID-1E");
138 	case MPI_RAID_VOL_TYPE_IM:
139 		return ("RAID-1");
140 	default:
141 		return ("Unknown");
142 	}
143 }
144 
145 const char *
146 mpt_vol_state(struct mpt_raid_volume *vol)
147 {
148 	switch (vol->config_page->VolumeStatus.State) {
149 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
150 		return ("Optimal");
151 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
152 		return ("Degraded");
153 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
154 		return ("Failed");
155 	default:
156 		return ("Unknown");
157 	}
158 }
159 
160 const char *
161 mpt_disk_state(struct mpt_raid_disk *disk)
162 {
163 	switch (disk->config_page.PhysDiskStatus.State) {
164 	case MPI_PHYSDISK0_STATUS_ONLINE:
165 		return ("Online");
166 	case MPI_PHYSDISK0_STATUS_MISSING:
167 		return ("Missing");
168 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
169 		return ("Incompatible");
170 	case MPI_PHYSDISK0_STATUS_FAILED:
171 		return ("Failed");
172 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
173 		return ("Initializing");
174 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
175 		return ("Offline Requested");
176 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
177 		return ("Failed per Host Request");
178 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
179 		return ("Offline");
180 	default:
181 		return ("Unknown");
182 	}
183 }
184 
185 void
186 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
187 	    const char *fmt, ...)
188 {
189 	va_list ap;
190 
191 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
192 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
193 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
194 	va_start(ap, fmt);
195 	vprintf(fmt, ap);
196 	va_end(ap);
197 }
198 
199 void
200 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
201 	     const char *fmt, ...)
202 {
203 	va_list ap;
204 
205 	if (disk->volume != NULL) {
206 		printf("(%s:vol%d:%d): ",
207 		       device_get_nameunit(mpt->dev),
208 		       disk->volume->config_page->VolumeID,
209 		       disk->member_number);
210 	} else {
211 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
212 		       disk->config_page.PhysDiskBus,
213 		       disk->config_page.PhysDiskID);
214 	}
215 	va_start(ap, fmt);
216 	vprintf(fmt, ap);
217 	va_end(ap);
218 }
219 
220 static void
221 mpt_raid_async(void *callback_arg, u_int32_t code,
222 	       struct cam_path *path, void *arg)
223 {
224 	struct mpt_softc *mpt;
225 
226 	mpt = (struct mpt_softc*)callback_arg;
227 	switch (code) {
228 	case AC_FOUND_DEVICE:
229 	{
230 		struct ccb_getdev *cgd;
231 		struct mpt_raid_volume *mpt_vol;
232 
233 		cgd = (struct ccb_getdev *)arg;
234 		if (cgd == NULL) {
235 			break;
236 		}
237 
238 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
239 			 cgd->ccb_h.target_id);
240 
241 		RAID_VOL_FOREACH(mpt, mpt_vol) {
242 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
243 				continue;
244 
245 			if (mpt_vol->config_page->VolumeID
246 			 == cgd->ccb_h.target_id) {
247 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
248 				break;
249 			}
250 		}
251 	}
252 	default:
253 		break;
254 	}
255 }
256 
257 int
258 mpt_raid_probe(struct mpt_softc *mpt)
259 {
260 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
261 		return (ENODEV);
262 	}
263 	return (0);
264 }
265 
266 int
267 mpt_raid_attach(struct mpt_softc *mpt)
268 {
269 	struct ccb_setasync csa;
270 	mpt_handler_t	 handler;
271 	int		 error;
272 
273 	mpt_callout_init(mpt, &mpt->raid_timer);
274 
275 	error = mpt_spawn_raid_thread(mpt);
276 	if (error != 0) {
277 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
278 		goto cleanup;
279 	}
280 
281 	MPT_LOCK(mpt);
282 	handler.reply_handler = mpt_raid_reply_handler;
283 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
284 				     &raid_handler_id);
285 	if (error != 0) {
286 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
287 		goto cleanup;
288 	}
289 
290 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
291 	csa.ccb_h.func_code = XPT_SASYNC_CB;
292 	csa.event_enable = AC_FOUND_DEVICE;
293 	csa.callback = mpt_raid_async;
294 	csa.callback_arg = mpt;
295 	xpt_action((union ccb *)&csa);
296 	if (csa.ccb_h.status != CAM_REQ_CMP) {
297 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
298 			"CAM async handler.\n");
299 	}
300 	MPT_UNLOCK(mpt);
301 
302 	mpt_raid_sysctl_attach(mpt);
303 	return (0);
304 cleanup:
305 	MPT_UNLOCK(mpt);
306 	mpt_raid_detach(mpt);
307 	return (error);
308 }
309 
310 int
311 mpt_raid_enable(struct mpt_softc *mpt)
312 {
313 	return (0);
314 }
315 
316 void
317 mpt_raid_detach(struct mpt_softc *mpt)
318 {
319 	struct ccb_setasync csa;
320 	mpt_handler_t handler;
321 
322 	mpt_callout_drain(mpt, &mpt->raid_timer);
323 
324 	MPT_LOCK(mpt);
325 	mpt_terminate_raid_thread(mpt);
326 	handler.reply_handler = mpt_raid_reply_handler;
327 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
328 			       raid_handler_id);
329 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
330 	csa.ccb_h.func_code = XPT_SASYNC_CB;
331 	csa.event_enable = 0;
332 	csa.callback = mpt_raid_async;
333 	csa.callback_arg = mpt;
334 	xpt_action((union ccb *)&csa);
335 	MPT_UNLOCK(mpt);
336 }
337 
338 static void
339 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
340 {
341 	/* Nothing to do yet. */
342 }
343 
344 static const char *raid_event_txt[] =
345 {
346 	"Volume Created",
347 	"Volume Deleted",
348 	"Volume Settings Changed",
349 	"Volume Status Changed",
350 	"Volume Physical Disk Membership Changed",
351 	"Physical Disk Created",
352 	"Physical Disk Deleted",
353 	"Physical Disk Settings Changed",
354 	"Physical Disk Status Changed",
355 	"Domain Validation Required",
356 	"SMART Data Received",
357 	"Replace Action Started",
358 };
359 
360 static int
361 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
362 	       MSG_EVENT_NOTIFY_REPLY *msg)
363 {
364 	EVENT_DATA_RAID *raid_event;
365 	struct mpt_raid_volume *mpt_vol;
366 	struct mpt_raid_disk *mpt_disk;
367 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
368 	int i;
369 	int print_event;
370 
371 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
372 		return (0);
373 	}
374 
375 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
376 
377 	mpt_vol = NULL;
378 	vol_pg = NULL;
379 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
380 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
381 			mpt_vol = &mpt->raid_volumes[i];
382 			vol_pg = mpt_vol->config_page;
383 
384 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
385 				continue;
386 
387 			if (vol_pg->VolumeID == raid_event->VolumeID
388 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
389 				break;
390 		}
391 		if (i >= mpt->ioc_page2->MaxVolumes) {
392 			mpt_vol = NULL;
393 			vol_pg = NULL;
394 		}
395 	}
396 
397 	mpt_disk = NULL;
398 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
399 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
400 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
401 			mpt_disk = NULL;
402 		}
403 	}
404 
405 	print_event = 1;
406 	switch(raid_event->ReasonCode) {
407 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
408 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
409 		break;
410 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
411 		if (mpt_vol != NULL) {
412 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
413 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
414 			} else {
415 				/*
416 				 * Coalesce status messages into one
417 				 * per background run of our RAID thread.
418 				 * This removes "spurious" status messages
419 				 * from our output.
420 				 */
421 				print_event = 0;
422 			}
423 		}
424 		break;
425 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
426 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
427 		mpt->raid_rescan++;
428 		if (mpt_vol != NULL) {
429 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
430 		}
431 		break;
432 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
433 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
434 		mpt->raid_rescan++;
435 		break;
436 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
437 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
438 		mpt->raid_rescan++;
439 		if (mpt_disk != NULL) {
440 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
441 		}
442 		break;
443 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
444 		mpt->raid_rescan++;
445 		break;
446 	case MPI_EVENT_RAID_RC_SMART_DATA:
447 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
448 		break;
449 	}
450 
451 	if (print_event) {
452 		if (mpt_disk != NULL) {
453 			mpt_disk_prt(mpt, mpt_disk, "");
454 		} else if (mpt_vol != NULL) {
455 			mpt_vol_prt(mpt, mpt_vol, "");
456 		} else {
457 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
458 				raid_event->VolumeID);
459 
460 			if (raid_event->PhysDiskNum != 0xFF)
461 				mpt_prtc(mpt, ":%d): ",
462 					 raid_event->PhysDiskNum);
463 			else
464 				mpt_prtc(mpt, "): ");
465 		}
466 
467 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
468 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
469 				 raid_event->ReasonCode);
470 		else
471 			mpt_prtc(mpt, "%s\n",
472 				 raid_event_txt[raid_event->ReasonCode]);
473 	}
474 
475 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
476 		/* XXX Use CAM's print sense for this... */
477 		if (mpt_disk != NULL)
478 			mpt_disk_prt(mpt, mpt_disk, "");
479 		else
480 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
481 			    raid_event->VolumeBus, raid_event->VolumeID,
482 			    raid_event->PhysDiskNum);
483 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
484 			 raid_event->ASC, raid_event->ASCQ);
485 	}
486 
487 	mpt_raid_wakeup(mpt);
488 	return (1);
489 }
490 
491 static void
492 mpt_raid_shutdown(struct mpt_softc *mpt)
493 {
494 	struct mpt_raid_volume *mpt_vol;
495 
496 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
497 		return;
498 	}
499 
500 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
501 	RAID_VOL_FOREACH(mpt, mpt_vol) {
502 		mpt_verify_mwce(mpt, mpt_vol);
503 	}
504 }
505 
506 static int
507 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
508     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
509 {
510 	int free_req;
511 
512 	if (req == NULL)
513 		return (TRUE);
514 
515 	free_req = TRUE;
516 	if (reply_frame != NULL)
517 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
518 #ifdef NOTYET
519 	else if (req->ccb != NULL) {
520 		/* Complete Quiesce CCB with error... */
521 	}
522 #endif
523 
524 	req->state &= ~REQ_STATE_QUEUED;
525 	req->state |= REQ_STATE_DONE;
526 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
527 
528 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
529 		wakeup(req);
530 	} else if (free_req) {
531 		mpt_free_request(mpt, req);
532 	}
533 
534 	return (TRUE);
535 }
536 
537 /*
538  * Parse additional completion information in the reply
539  * frame for RAID I/O requests.
540  */
541 static int
542 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
543     MSG_DEFAULT_REPLY *reply_frame)
544 {
545 	MSG_RAID_ACTION_REPLY *reply;
546 	struct mpt_raid_action_result *action_result;
547 	MSG_RAID_ACTION_REQUEST *rap;
548 
549 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
550 	req->IOCStatus = le16toh(reply->IOCStatus);
551 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
552 
553 	switch (rap->Action) {
554 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
555 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
556 		break;
557 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
558 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
559 		break;
560 	default:
561 		break;
562 	}
563 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
564 	memcpy(&action_result->action_data, &reply->ActionData,
565 	    sizeof(action_result->action_data));
566 	action_result->action_status = le16toh(reply->ActionStatus);
567 	return (TRUE);
568 }
569 
570 /*
571  * Utiltity routine to perform a RAID action command;
572  */
573 int
574 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
575 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
576 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
577 		   int write, int wait)
578 {
579 	MSG_RAID_ACTION_REQUEST *rap;
580 	SGE_SIMPLE32 *se;
581 
582 	rap = req->req_vbuf;
583 	memset(rap, 0, sizeof *rap);
584 	rap->Action = Action;
585 	rap->ActionDataWord = htole32(ActionDataWord);
586 	rap->Function = MPI_FUNCTION_RAID_ACTION;
587 	rap->VolumeID = vol->config_page->VolumeID;
588 	rap->VolumeBus = vol->config_page->VolumeBus;
589 	if (disk != 0)
590 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
591 	else
592 		rap->PhysDiskNum = 0xFF;
593 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
594 	se->Address = htole32(addr);
595 	MPI_pSGE_SET_LENGTH(se, len);
596 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
597 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
598 	    MPI_SGE_FLAGS_END_OF_LIST |
599 	    write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
600 	se->FlagsLength = htole32(se->FlagsLength);
601 	rap->MsgContext = htole32(req->index | raid_handler_id);
602 
603 	mpt_check_doorbell(mpt);
604 	mpt_send_cmd(mpt, req);
605 
606 	if (wait) {
607 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
608 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
609 	} else {
610 		return (0);
611 	}
612 }
613 
614 /*************************** RAID Status Monitoring ***************************/
615 static int
616 mpt_spawn_raid_thread(struct mpt_softc *mpt)
617 {
618 	int error;
619 
620 	/*
621 	 * Freeze out any CAM transactions until our thread
622 	 * is able to run at least once.  We need to update
623 	 * our RAID pages before acception I/O or we may
624 	 * reject I/O to an ID we later determine is for a
625 	 * hidden physdisk.
626 	 */
627 	MPT_LOCK(mpt);
628 	xpt_freeze_simq(mpt->phydisk_sim, 1);
629 	MPT_UNLOCK(mpt);
630 	error = mpt_kthread_create(mpt_raid_thread, mpt,
631 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
632 	    "mpt_raid%d", mpt->unit);
633 	if (error != 0) {
634 		MPT_LOCK(mpt);
635 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
636 		MPT_UNLOCK(mpt);
637 	}
638 	return (error);
639 }
640 
641 static void
642 mpt_terminate_raid_thread(struct mpt_softc *mpt)
643 {
644 
645 	if (mpt->raid_thread == NULL) {
646 		return;
647 	}
648 	mpt->shutdwn_raid = 1;
649 	wakeup(&mpt->raid_volumes);
650 	/*
651 	 * Sleep on a slightly different location
652 	 * for this interlock just for added safety.
653 	 */
654 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
655 }
656 
657 static void
658 mpt_raid_thread(void *arg)
659 {
660 	struct mpt_softc *mpt;
661 	int firstrun;
662 
663 	mpt = (struct mpt_softc *)arg;
664 	firstrun = 1;
665 	MPT_LOCK(mpt);
666 	while (mpt->shutdwn_raid == 0) {
667 
668 		if (mpt->raid_wakeup == 0) {
669 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
670 			continue;
671 		}
672 
673 		mpt->raid_wakeup = 0;
674 
675 		if (mpt_refresh_raid_data(mpt)) {
676 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
677 			continue;
678 		}
679 
680 		/*
681 		 * Now that we have our first snapshot of RAID data,
682 		 * allow CAM to access our physical disk bus.
683 		 */
684 		if (firstrun) {
685 			firstrun = 0;
686 			MPTLOCK_2_CAMLOCK(mpt);
687 			xpt_release_simq(mpt->phydisk_sim, TRUE);
688 			CAMLOCK_2_MPTLOCK(mpt);
689 		}
690 
691 		if (mpt->raid_rescan != 0) {
692 			union ccb *ccb;
693 			int error;
694 
695 			mpt->raid_rescan = 0;
696 			MPT_UNLOCK(mpt);
697 
698 			ccb = xpt_alloc_ccb();
699 
700 			MPT_LOCK(mpt);
701 			error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
702 			    cam_sim_path(mpt->phydisk_sim),
703 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
704 			if (error != CAM_REQ_CMP) {
705 				xpt_free_ccb(ccb);
706 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
707 			} else {
708 				xpt_rescan(ccb);
709 			}
710 		}
711 	}
712 	mpt->raid_thread = NULL;
713 	wakeup(&mpt->raid_thread);
714 	MPT_UNLOCK(mpt);
715 	mpt_kthread_exit(0);
716 }
717 
718 #if 0
719 static void
720 mpt_raid_quiesce_timeout(void *arg)
721 {
722 	/* Complete the CCB with error */
723 	/* COWWWW */
724 }
725 
726 static timeout_t mpt_raid_quiesce_timeout;
727 cam_status
728 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
729 		      request_t *req)
730 {
731 	union ccb *ccb;
732 
733 	ccb = req->ccb;
734 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
735 		return (CAM_REQ_CMP);
736 
737 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
738 		int rv;
739 
740 		mpt_disk->flags |= MPT_RDF_QUIESCING;
741 		xpt_freeze_devq(ccb->ccb_h.path, 1);
742 
743 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
744 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
745 					/*ActionData*/0, /*addr*/0,
746 					/*len*/0, /*write*/FALSE,
747 					/*wait*/FALSE);
748 		if (rv != 0)
749 			return (CAM_REQ_CMP_ERR);
750 
751 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
752 #if 0
753 		if (rv == ETIMEDOUT) {
754 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
755 				     "Quiece Timed-out\n");
756 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
757 			return (CAM_REQ_CMP_ERR);
758 		}
759 
760 		ar = REQ_TO_RAID_ACTION_RESULT(req);
761 		if (rv != 0
762 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
763 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
764 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
765 				    "%d:%x:%x\n", rv, req->IOCStatus,
766 				    ar->action_status);
767 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
768 			return (CAM_REQ_CMP_ERR);
769 		}
770 #endif
771 		return (CAM_REQ_INPROG);
772 	}
773 	return (CAM_REQUEUE_REQ);
774 }
775 #endif
776 
777 /* XXX Ignores that there may be multiple busses/IOCs involved. */
778 cam_status
779 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
780 {
781 	struct mpt_raid_disk *mpt_disk;
782 
783 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
784 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
785 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
786 		*tgt = mpt_disk->config_page.PhysDiskID;
787 		return (0);
788 	}
789 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
790 		 ccb->ccb_h.target_id);
791 	return (-1);
792 }
793 
794 /* XXX Ignores that there may be multiple busses/IOCs involved. */
795 int
796 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
797 {
798 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
799 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
800 
801 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
802 		return (0);
803 	}
804 	ioc_vol = mpt->ioc_page2->RaidVolume;
805 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
806 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
807 		if (ioc_vol->VolumeID == tgt) {
808 			return (1);
809 		}
810 	}
811 	return (0);
812 }
813 
814 #if 0
815 static void
816 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
817 	       int enable)
818 {
819 	request_t *req;
820 	struct mpt_raid_action_result *ar;
821 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
822 	int enabled;
823 	int rv;
824 
825 	vol_pg = mpt_vol->config_page;
826 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
827 
828 	/*
829 	 * If the setting matches the configuration,
830 	 * there is nothing to do.
831 	 */
832 	if ((enabled && enable)
833 	 || (!enabled && !enable))
834 		return;
835 
836 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
837 	if (req == NULL) {
838 		mpt_vol_prt(mpt, mpt_vol,
839 			    "mpt_enable_vol: Get request failed!\n");
840 		return;
841 	}
842 
843 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
844 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
845 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
846 				/*data*/0, /*addr*/0, /*len*/0,
847 				/*write*/FALSE, /*wait*/TRUE);
848 	if (rv == ETIMEDOUT) {
849 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
850 			    "%s Volume Timed-out\n",
851 			    enable ? "Enable" : "Disable");
852 		return;
853 	}
854 	ar = REQ_TO_RAID_ACTION_RESULT(req);
855 	if (rv != 0
856 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
857 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
858 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
859 			    enable ? "Enable" : "Disable",
860 			    rv, req->IOCStatus, ar->action_status);
861 	}
862 
863 	mpt_free_request(mpt, req);
864 }
865 #endif
866 
867 static void
868 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
869 {
870 	request_t *req;
871 	struct mpt_raid_action_result *ar;
872 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
873 	uint32_t data;
874 	int rv;
875 	int resyncing;
876 	int mwce;
877 
878 	vol_pg = mpt_vol->config_page;
879 	resyncing = vol_pg->VolumeStatus.Flags
880 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
881 	mwce = vol_pg->VolumeSettings.Settings
882 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
883 
884 	/*
885 	 * If the setting matches the configuration,
886 	 * there is nothing to do.
887 	 */
888 	switch (mpt->raid_mwce_setting) {
889 	case MPT_RAID_MWCE_REBUILD_ONLY:
890 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
891 			return;
892 		}
893 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
894 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
895 			/*
896 			 * Wait one more status update to see if
897 			 * resyncing gets enabled.  It gets disabled
898 			 * temporarilly when WCE is changed.
899 			 */
900 			return;
901 		}
902 		break;
903 	case MPT_RAID_MWCE_ON:
904 		if (mwce)
905 			return;
906 		break;
907 	case MPT_RAID_MWCE_OFF:
908 		if (!mwce)
909 			return;
910 		break;
911 	case MPT_RAID_MWCE_NC:
912 		return;
913 	}
914 
915 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
916 	if (req == NULL) {
917 		mpt_vol_prt(mpt, mpt_vol,
918 			    "mpt_verify_mwce: Get request failed!\n");
919 		return;
920 	}
921 
922 	vol_pg->VolumeSettings.Settings ^=
923 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
924 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
925 	vol_pg->VolumeSettings.Settings ^=
926 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
927 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
928 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
929 				data, /*addr*/0, /*len*/0,
930 				/*write*/FALSE, /*wait*/TRUE);
931 	if (rv == ETIMEDOUT) {
932 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
933 			    "Write Cache Enable Timed-out\n");
934 		return;
935 	}
936 	ar = REQ_TO_RAID_ACTION_RESULT(req);
937 	if (rv != 0
938 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
939 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
940 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
941 			    "%d:%x:%x\n", rv, req->IOCStatus,
942 			    ar->action_status);
943 	} else {
944 		vol_pg->VolumeSettings.Settings ^=
945 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
946 	}
947 	mpt_free_request(mpt, req);
948 }
949 
950 static void
951 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
952 {
953 	request_t *req;
954 	struct mpt_raid_action_result *ar;
955 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
956 	u_int prio;
957 	int rv;
958 
959 	vol_pg = mpt_vol->config_page;
960 
961 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
962 		return;
963 
964 	/*
965 	 * If the current RAID resync rate does not
966 	 * match our configured rate, update it.
967 	 */
968 	prio = vol_pg->VolumeSettings.Settings
969 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
970 	if (vol_pg->ResyncRate != 0
971 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
972 
973 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
974 		if (req == NULL) {
975 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
976 				    "Get request failed!\n");
977 			return;
978 		}
979 
980 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
981 					MPI_RAID_ACTION_SET_RESYNC_RATE,
982 					mpt->raid_resync_rate, /*addr*/0,
983 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
984 		if (rv == ETIMEDOUT) {
985 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
986 				    "Resync Rate Setting Timed-out\n");
987 			return;
988 		}
989 
990 		ar = REQ_TO_RAID_ACTION_RESULT(req);
991 		if (rv != 0
992 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
993 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
994 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
995 				    "%d:%x:%x\n", rv, req->IOCStatus,
996 				    ar->action_status);
997 		} else
998 			vol_pg->ResyncRate = mpt->raid_resync_rate;
999 		mpt_free_request(mpt, req);
1000 	} else if ((prio && mpt->raid_resync_rate < 128)
1001 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1002 		uint32_t data;
1003 
1004 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1005 		if (req == NULL) {
1006 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1007 				    "Get request failed!\n");
1008 			return;
1009 		}
1010 
1011 		vol_pg->VolumeSettings.Settings ^=
1012 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1013 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1014 		vol_pg->VolumeSettings.Settings ^=
1015 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1016 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1017 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1018 					data, /*addr*/0, /*len*/0,
1019 					/*write*/FALSE, /*wait*/TRUE);
1020 		if (rv == ETIMEDOUT) {
1021 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1022 				    "Resync Rate Setting Timed-out\n");
1023 			return;
1024 		}
1025 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1026 		if (rv != 0
1027 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1028 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1029 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1030 				    "%d:%x:%x\n", rv, req->IOCStatus,
1031 				    ar->action_status);
1032 		} else {
1033 			vol_pg->VolumeSettings.Settings ^=
1034 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1035 		}
1036 
1037 		mpt_free_request(mpt, req);
1038 	}
1039 }
1040 
1041 static void
1042 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1043 		       struct cam_path *path)
1044 {
1045 	struct ccb_relsim crs;
1046 
1047 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1048 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1049 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1050 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1051 	crs.openings = mpt->raid_queue_depth;
1052 	xpt_action((union ccb *)&crs);
1053 	if (crs.ccb_h.status != CAM_REQ_CMP)
1054 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1055 			    "with CAM status %#x\n", crs.ccb_h.status);
1056 }
1057 
1058 static void
1059 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1060 {
1061 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1062 	u_int i;
1063 
1064 	vol_pg = mpt_vol->config_page;
1065 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1066 	for (i = 1; i <= 0x8000; i <<= 1) {
1067 		switch (vol_pg->VolumeSettings.Settings & i) {
1068 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1069 			mpt_prtc(mpt, " Member-WCE");
1070 			break;
1071 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1072 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1073 			break;
1074 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1075 			mpt_prtc(mpt, " Hot-Plug-Spares");
1076 			break;
1077 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1078 			mpt_prtc(mpt, " High-Priority-ReSync");
1079 			break;
1080 		default:
1081 			break;
1082 		}
1083 	}
1084 	mpt_prtc(mpt, " )\n");
1085 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1086 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1087 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1088 			  ? ":" : "s:");
1089 		for (i = 0; i < 8; i++) {
1090 			u_int mask;
1091 
1092 			mask = 0x1 << i;
1093 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1094 				continue;
1095 			mpt_prtc(mpt, " %d", i);
1096 		}
1097 		mpt_prtc(mpt, "\n");
1098 	}
1099 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1100 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1101 		struct mpt_raid_disk *mpt_disk;
1102 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1103 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1104 		U8 f, s;
1105 
1106 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1107 		disk_pg = &mpt_disk->config_page;
1108 		mpt_prtc(mpt, "      ");
1109 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1110 			 pt_bus, disk_pg->PhysDiskID);
1111 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1112 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1113 			    "Primary" : "Secondary");
1114 		} else {
1115 			mpt_prtc(mpt, "Stripe Position %d",
1116 				 mpt_disk->member_number);
1117 		}
1118 		f = disk_pg->PhysDiskStatus.Flags;
1119 		s = disk_pg->PhysDiskStatus.State;
1120 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1121 			mpt_prtc(mpt, " Out of Sync");
1122 		}
1123 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1124 			mpt_prtc(mpt, " Quiesced");
1125 		}
1126 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1127 			mpt_prtc(mpt, " Inactive");
1128 		}
1129 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1130 			mpt_prtc(mpt, " Was Optimal");
1131 		}
1132 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1133 			mpt_prtc(mpt, " Was Non-Optimal");
1134 		}
1135 		switch (s) {
1136 		case MPI_PHYSDISK0_STATUS_ONLINE:
1137 			mpt_prtc(mpt, " Online");
1138 			break;
1139 		case MPI_PHYSDISK0_STATUS_MISSING:
1140 			mpt_prtc(mpt, " Missing");
1141 			break;
1142 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1143 			mpt_prtc(mpt, " Incompatible");
1144 			break;
1145 		case MPI_PHYSDISK0_STATUS_FAILED:
1146 			mpt_prtc(mpt, " Failed");
1147 			break;
1148 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1149 			mpt_prtc(mpt, " Initializing");
1150 			break;
1151 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1152 			mpt_prtc(mpt, " Requested Offline");
1153 			break;
1154 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1155 			mpt_prtc(mpt, " Requested Failed");
1156 			break;
1157 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1158 		default:
1159 			mpt_prtc(mpt, " Offline Other (%x)", s);
1160 			break;
1161 		}
1162 		mpt_prtc(mpt, "\n");
1163 	}
1164 }
1165 
1166 static void
1167 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1168 {
1169 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1170 	int rd_bus = cam_sim_bus(mpt->sim);
1171 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1172 	u_int i;
1173 
1174 	disk_pg = &mpt_disk->config_page;
1175 	mpt_disk_prt(mpt, mpt_disk,
1176 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1177 		     device_get_nameunit(mpt->dev), rd_bus,
1178 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1179 		     pt_bus, mpt_disk - mpt->raid_disks);
1180 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1181 		return;
1182 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1183 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1184 		   ? ":" : "s:");
1185 	for (i = 0; i < 8; i++) {
1186 		u_int mask;
1187 
1188 		mask = 0x1 << i;
1189 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1190 			continue;
1191 		mpt_prtc(mpt, " %d", i);
1192 	}
1193 	mpt_prtc(mpt, "\n");
1194 }
1195 
1196 static void
1197 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1198 		      IOC_3_PHYS_DISK *ioc_disk)
1199 {
1200 	int rv;
1201 
1202 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1203 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1204 				 &mpt_disk->config_page.Header,
1205 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1206 	if (rv != 0) {
1207 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1208 			"Failed to read RAID Disk Hdr(%d)\n",
1209 		 	ioc_disk->PhysDiskNum);
1210 		return;
1211 	}
1212 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1213 				   &mpt_disk->config_page.Header,
1214 				   sizeof(mpt_disk->config_page),
1215 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1216 	if (rv != 0)
1217 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1218 			"Failed to read RAID Disk Page(%d)\n",
1219 		 	ioc_disk->PhysDiskNum);
1220 	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1221 }
1222 
1223 static void
1224 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1225     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1226 {
1227 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1228 	struct mpt_raid_action_result *ar;
1229 	request_t *req;
1230 	int rv;
1231 	int i;
1232 
1233 	vol_pg = mpt_vol->config_page;
1234 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1235 
1236 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1237 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1238 	if (rv != 0) {
1239 		mpt_vol_prt(mpt, mpt_vol,
1240 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1241 		    ioc_vol->VolumePageNumber);
1242 		return;
1243 	}
1244 
1245 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1246 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1247 	if (rv != 0) {
1248 		mpt_vol_prt(mpt, mpt_vol,
1249 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1250 		    ioc_vol->VolumePageNumber);
1251 		return;
1252 	}
1253 	mpt2host_config_page_raid_vol_0(vol_pg);
1254 
1255 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1256 
1257 	/* Update disk entry array data. */
1258 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1259 		struct mpt_raid_disk *mpt_disk;
1260 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1261 		mpt_disk->volume = mpt_vol;
1262 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1263 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1264 			mpt_disk->member_number--;
1265 		}
1266 	}
1267 
1268 	if ((vol_pg->VolumeStatus.Flags
1269 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1270 		return;
1271 
1272 	req = mpt_get_request(mpt, TRUE);
1273 	if (req == NULL) {
1274 		mpt_vol_prt(mpt, mpt_vol,
1275 		    "mpt_refresh_raid_vol: Get request failed!\n");
1276 		return;
1277 	}
1278 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1279 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1280 	if (rv == ETIMEDOUT) {
1281 		mpt_vol_prt(mpt, mpt_vol,
1282 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1283 		mpt_free_request(mpt, req);
1284 		return;
1285 	}
1286 
1287 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1288 	if (rv == 0
1289 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1290 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1291 		memcpy(&mpt_vol->sync_progress,
1292 		       &ar->action_data.indicator_struct,
1293 		       sizeof(mpt_vol->sync_progress));
1294 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1295 	} else {
1296 		mpt_vol_prt(mpt, mpt_vol,
1297 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1298 	}
1299 	mpt_free_request(mpt, req);
1300 }
1301 
1302 /*
1303  * Update in-core information about RAID support.  We update any entries
1304  * that didn't previously exists or have been marked as needing to
1305  * be updated by our event handler.  Interesting changes are displayed
1306  * to the console.
1307  */
1308 int
1309 mpt_refresh_raid_data(struct mpt_softc *mpt)
1310 {
1311 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1312 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1313 	IOC_3_PHYS_DISK *ioc_disk;
1314 	IOC_3_PHYS_DISK *ioc_last_disk;
1315 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1316 	size_t len;
1317 	int rv;
1318 	int i;
1319 	u_int nonopt_volumes;
1320 
1321 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1322 		return (0);
1323 	}
1324 
1325 	/*
1326 	 * Mark all items as unreferenced by the configuration.
1327 	 * This allows us to find, report, and discard stale
1328 	 * entries.
1329 	 */
1330 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1331 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1332 	}
1333 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1334 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1335 	}
1336 
1337 	/*
1338 	 * Get Physical Disk information.
1339 	 */
1340 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1341 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1342 				   &mpt->ioc_page3->Header, len,
1343 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1344 	if (rv) {
1345 		mpt_prt(mpt,
1346 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1347 		return (-1);
1348 	}
1349 	mpt2host_config_page_ioc3(mpt->ioc_page3);
1350 
1351 	ioc_disk = mpt->ioc_page3->PhysDisk;
1352 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1353 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1354 		struct mpt_raid_disk *mpt_disk;
1355 
1356 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1357 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1358 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1359 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1360 
1361 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1362 
1363 		}
1364 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1365 		mpt->raid_rescan++;
1366 	}
1367 
1368 	/*
1369 	 * Refresh volume data.
1370 	 */
1371 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1372 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1373 				   &mpt->ioc_page2->Header, len,
1374 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1375 	if (rv) {
1376 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1377 			"Failed to read IOC Page 2\n");
1378 		return (-1);
1379 	}
1380 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1381 
1382 	ioc_vol = mpt->ioc_page2->RaidVolume;
1383 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1384 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1385 		struct mpt_raid_volume *mpt_vol;
1386 
1387 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1388 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1389 		vol_pg = mpt_vol->config_page;
1390 		if (vol_pg == NULL)
1391 			continue;
1392 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1393 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1394 		 || (vol_pg->VolumeStatus.Flags
1395 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1396 
1397 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1398 		}
1399 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1400 	}
1401 
1402 	nonopt_volumes = 0;
1403 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1404 		struct mpt_raid_volume *mpt_vol;
1405 		uint64_t total;
1406 		uint64_t left;
1407 		int m;
1408 		u_int prio;
1409 
1410 		mpt_vol = &mpt->raid_volumes[i];
1411 
1412 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1413 			continue;
1414 		}
1415 
1416 		vol_pg = mpt_vol->config_page;
1417 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1418 		 == MPT_RVF_ANNOUNCED) {
1419 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1420 			mpt_vol->flags = 0;
1421 			continue;
1422 		}
1423 
1424 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1425 			mpt_announce_vol(mpt, mpt_vol);
1426 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1427 		}
1428 
1429 		if (vol_pg->VolumeStatus.State !=
1430 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1431 			nonopt_volumes++;
1432 
1433 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1434 			continue;
1435 
1436 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1437 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1438 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1439 		mpt_verify_mwce(mpt, mpt_vol);
1440 
1441 		if (vol_pg->VolumeStatus.Flags == 0) {
1442 			continue;
1443 		}
1444 
1445 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1446 		for (m = 1; m <= 0x80; m <<= 1) {
1447 			switch (vol_pg->VolumeStatus.Flags & m) {
1448 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1449 				mpt_prtc(mpt, " Enabled");
1450 				break;
1451 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1452 				mpt_prtc(mpt, " Quiesced");
1453 				break;
1454 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1455 				mpt_prtc(mpt, " Re-Syncing");
1456 				break;
1457 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1458 				mpt_prtc(mpt, " Inactive");
1459 				break;
1460 			default:
1461 				break;
1462 			}
1463 		}
1464 		mpt_prtc(mpt, " )\n");
1465 
1466 		if ((vol_pg->VolumeStatus.Flags
1467 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1468 			continue;
1469 
1470 		mpt_verify_resync_rate(mpt, mpt_vol);
1471 
1472 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1473 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1474 		if (vol_pg->ResyncRate != 0) {
1475 
1476 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1477 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1478 			    prio / 1000, prio % 1000);
1479 		} else {
1480 			prio = vol_pg->VolumeSettings.Settings
1481 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1482 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1483 			    prio ? "High" : "Low");
1484 		}
1485 #if __FreeBSD_version >= 500000
1486 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1487 			    "blocks remaining\n", (uintmax_t)left,
1488 			    (uintmax_t)total);
1489 #else
1490 		mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1491 			    "blocks remaining\n", (uint64_t)left,
1492 			    (uint64_t)total);
1493 #endif
1494 
1495 		/* Periodically report on sync progress. */
1496 		mpt_schedule_raid_refresh(mpt);
1497 	}
1498 
1499 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1500 		struct mpt_raid_disk *mpt_disk;
1501 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1502 		int m;
1503 
1504 		mpt_disk = &mpt->raid_disks[i];
1505 		disk_pg = &mpt_disk->config_page;
1506 
1507 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1508 			continue;
1509 
1510 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1511 		 == MPT_RDF_ANNOUNCED) {
1512 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1513 			mpt_disk->flags = 0;
1514 			mpt->raid_rescan++;
1515 			continue;
1516 		}
1517 
1518 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1519 
1520 			mpt_announce_disk(mpt, mpt_disk);
1521 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1522 		}
1523 
1524 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1525 			continue;
1526 
1527 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1528 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1529 		if (disk_pg->PhysDiskStatus.Flags == 0)
1530 			continue;
1531 
1532 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1533 		for (m = 1; m <= 0x80; m <<= 1) {
1534 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1535 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1536 				mpt_prtc(mpt, " Out-Of-Sync");
1537 				break;
1538 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1539 				mpt_prtc(mpt, " Quiesced");
1540 				break;
1541 			default:
1542 				break;
1543 			}
1544 		}
1545 		mpt_prtc(mpt, " )\n");
1546 	}
1547 
1548 	mpt->raid_nonopt_volumes = nonopt_volumes;
1549 	return (0);
1550 }
1551 
1552 static void
1553 mpt_raid_timer(void *arg)
1554 {
1555 	struct mpt_softc *mpt;
1556 
1557 	mpt = (struct mpt_softc *)arg;
1558 #if __FreeBSD_version < 500000
1559 	MPT_LOCK(mpt);
1560 #endif
1561 	MPT_LOCK_ASSERT(mpt);
1562 	mpt_raid_wakeup(mpt);
1563 #if __FreeBSD_version < 500000
1564 	MPT_UNLOCK(mpt);
1565 #endif
1566 }
1567 
1568 void
1569 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1570 {
1571 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1572 		      mpt_raid_timer, mpt);
1573 }
1574 
1575 void
1576 mpt_raid_free_mem(struct mpt_softc *mpt)
1577 {
1578 
1579 	if (mpt->raid_volumes) {
1580 		struct mpt_raid_volume *mpt_raid;
1581 		int i;
1582 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1583 			mpt_raid = &mpt->raid_volumes[i];
1584 			if (mpt_raid->config_page) {
1585 				free(mpt_raid->config_page, M_DEVBUF);
1586 				mpt_raid->config_page = NULL;
1587 			}
1588 		}
1589 		free(mpt->raid_volumes, M_DEVBUF);
1590 		mpt->raid_volumes = NULL;
1591 	}
1592 	if (mpt->raid_disks) {
1593 		free(mpt->raid_disks, M_DEVBUF);
1594 		mpt->raid_disks = NULL;
1595 	}
1596 	if (mpt->ioc_page2) {
1597 		free(mpt->ioc_page2, M_DEVBUF);
1598 		mpt->ioc_page2 = NULL;
1599 	}
1600 	if (mpt->ioc_page3) {
1601 		free(mpt->ioc_page3, M_DEVBUF);
1602 		mpt->ioc_page3 = NULL;
1603 	}
1604 	mpt->raid_max_volumes =  0;
1605 	mpt->raid_max_disks =  0;
1606 }
1607 
1608 #if __FreeBSD_version >= 500000
1609 static int
1610 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1611 {
1612 	struct mpt_raid_volume *mpt_vol;
1613 
1614 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1615 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1616 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1617 		return (EINVAL);
1618 
1619 	MPT_LOCK(mpt);
1620 	mpt->raid_resync_rate = rate;
1621 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1622 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1623 			continue;
1624 		}
1625 		mpt_verify_resync_rate(mpt, mpt_vol);
1626 	}
1627 	MPT_UNLOCK(mpt);
1628 	return (0);
1629 }
1630 
1631 static int
1632 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1633 {
1634 	struct mpt_raid_volume *mpt_vol;
1635 
1636 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1637 		return (EINVAL);
1638 
1639 	MPT_LOCK(mpt);
1640 	mpt->raid_queue_depth = vol_queue_depth;
1641 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1642 		struct cam_path *path;
1643 		int error;
1644 
1645 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1646 			continue;
1647 
1648 		mpt->raid_rescan = 0;
1649 
1650 		MPTLOCK_2_CAMLOCK(mpt);
1651 		error = xpt_create_path(&path, xpt_periph,
1652 					cam_sim_path(mpt->sim),
1653 					mpt_vol->config_page->VolumeID,
1654 					/*lun*/0);
1655 		if (error != CAM_REQ_CMP) {
1656 			CAMLOCK_2_MPTLOCK(mpt);
1657 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1658 			continue;
1659 		}
1660 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1661 		xpt_free_path(path);
1662 		CAMLOCK_2_MPTLOCK(mpt);
1663 	}
1664 	MPT_UNLOCK(mpt);
1665 	return (0);
1666 }
1667 
1668 static int
1669 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1670 {
1671 	struct mpt_raid_volume *mpt_vol;
1672 	int force_full_resync;
1673 
1674 	MPT_LOCK(mpt);
1675 	if (mwce == mpt->raid_mwce_setting) {
1676 		MPT_UNLOCK(mpt);
1677 		return (0);
1678 	}
1679 
1680 	/*
1681 	 * Catch MWCE being left on due to a failed shutdown.  Since
1682 	 * sysctls cannot be set by the loader, we treat the first
1683 	 * setting of this varible specially and force a full volume
1684 	 * resync if MWCE is enabled and a resync is in progress.
1685 	 */
1686 	force_full_resync = 0;
1687 	if (mpt->raid_mwce_set == 0
1688 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1689 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1690 		force_full_resync = 1;
1691 
1692 	mpt->raid_mwce_setting = mwce;
1693 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1694 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1695 		int resyncing;
1696 		int mwce;
1697 
1698 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1699 			continue;
1700 
1701 		vol_pg = mpt_vol->config_page;
1702 		resyncing = vol_pg->VolumeStatus.Flags
1703 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1704 		mwce = vol_pg->VolumeSettings.Settings
1705 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1706 		if (force_full_resync && resyncing && mwce) {
1707 
1708 			/*
1709 			 * XXX disable/enable volume should force a resync,
1710 			 *     but we'll need to queice, drain, and restart
1711 			 *     I/O to do that.
1712 			 */
1713 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1714 				    "detected.  Suggest full resync.\n");
1715 		}
1716 		mpt_verify_mwce(mpt, mpt_vol);
1717 	}
1718 	mpt->raid_mwce_set = 1;
1719 	MPT_UNLOCK(mpt);
1720 	return (0);
1721 }
1722 const char *mpt_vol_mwce_strs[] =
1723 {
1724 	"On",
1725 	"Off",
1726 	"On-During-Rebuild",
1727 	"NC"
1728 };
1729 
1730 static int
1731 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1732 {
1733 	char inbuf[20];
1734 	struct mpt_softc *mpt;
1735 	const char *str;
1736 	int error;
1737 	u_int size;
1738 	u_int i;
1739 
1740 	GIANT_REQUIRED;
1741 
1742 	mpt = (struct mpt_softc *)arg1;
1743 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1744 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1745 	if (error || !req->newptr) {
1746 		return (error);
1747 	}
1748 
1749 	size = req->newlen - req->newidx;
1750 	if (size >= sizeof(inbuf)) {
1751 		return (EINVAL);
1752 	}
1753 
1754 	error = SYSCTL_IN(req, inbuf, size);
1755 	if (error) {
1756 		return (error);
1757 	}
1758 	inbuf[size] = '\0';
1759 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1760 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1761 			return (mpt_raid_set_vol_mwce(mpt, i));
1762 		}
1763 	}
1764 	return (EINVAL);
1765 }
1766 
1767 static int
1768 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1769 {
1770 	struct mpt_softc *mpt;
1771 	u_int raid_resync_rate;
1772 	int error;
1773 
1774 	GIANT_REQUIRED;
1775 
1776 	mpt = (struct mpt_softc *)arg1;
1777 	raid_resync_rate = mpt->raid_resync_rate;
1778 
1779 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1780 	if (error || !req->newptr) {
1781 		return error;
1782 	}
1783 
1784 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1785 }
1786 
1787 static int
1788 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1789 {
1790 	struct mpt_softc *mpt;
1791 	u_int raid_queue_depth;
1792 	int error;
1793 
1794 	GIANT_REQUIRED;
1795 
1796 	mpt = (struct mpt_softc *)arg1;
1797 	raid_queue_depth = mpt->raid_queue_depth;
1798 
1799 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1800 	if (error || !req->newptr) {
1801 		return error;
1802 	}
1803 
1804 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1805 }
1806 
1807 static void
1808 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1809 {
1810 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1811 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1812 
1813 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1814 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1815 			mpt_raid_sysctl_vol_member_wce, "A",
1816 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1817 
1818 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1819 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1820 			mpt_raid_sysctl_vol_queue_depth, "I",
1821 			"default volume queue depth");
1822 
1823 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1824 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1825 			mpt_raid_sysctl_vol_resync_rate, "I",
1826 			"volume resync priority (0 == NC, 1 - 255)");
1827 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1828 			"nonoptimal_volumes", CTLFLAG_RD,
1829 			&mpt->raid_nonopt_volumes, 0,
1830 			"number of nonoptimal volumes");
1831 }
1832 #endif
1833